diff --git a/.ci/metrics/metrics.py b/.ci/metrics/metrics.py index 8edc00bc6bd37..48d2aa2f330ec 100644 --- a/.ci/metrics/metrics.py +++ b/.ci/metrics/metrics.py @@ -26,7 +26,67 @@ class JobMetrics: workflow_id: int -def get_metrics(github_repo: github.Repository, workflows_to_track: dict[str, int]): +@dataclass +class GaugeMetric: + name: str + value: int + time_ns: int + + +def get_sampled_workflow_metrics(github_repo: github.Repository): + """Gets global statistics about the Github workflow queue + + Args: + github_repo: A github repo object to use to query the relevant information. + + Returns: + Returns a list of GaugeMetric objects, containing the relevant metrics about + the workflow + """ + + # Other states are available (pending, waiting, etc), but the meaning + # is not documented (See #70540). + # "queued" seems to be the info we want. + queued_workflow_count = len( + [ + x + for x in github_repo.get_workflow_runs(status="queued") + if x.name in WORKFLOWS_TO_TRACK + ] + ) + running_workflow_count = len( + [ + x + for x in github_repo.get_workflow_runs(status="in_progress") + if x.name in WORKFLOWS_TO_TRACK + ] + ) + + workflow_metrics = [] + workflow_metrics.append( + GaugeMetric( + "workflow_queue_size", + queued_workflow_count, + time.time_ns(), + ) + ) + workflow_metrics.append( + GaugeMetric( + "running_workflow_count", + running_workflow_count, + time.time_ns(), + ) + ) + # Always send a hearbeat metric so we can monitor is this container is still able to log to Grafana. + workflow_metrics.append( + GaugeMetric("metrics_container_heartbeat", 1, time.time_ns()) + ) + return workflow_metrics + + +def get_per_workflow_metrics( + github_repo: github.Repository, workflows_to_track: dict[str, int] +): """Gets the metrics for specified Github workflows. This function takes in a list of workflows to track, and optionally the @@ -43,14 +103,14 @@ def get_metrics(github_repo: github.Repository, workflows_to_track: dict[str, in Returns a list of JobMetrics objects, containing the relevant metrics about the workflow. """ - workflow_runs = iter(github_repo.get_workflow_runs()) - workflow_metrics = [] workflows_to_include = set(workflows_to_track.keys()) - while len(workflows_to_include) > 0: - workflow_run = next(workflow_runs) + for workflow_run in iter(github_repo.get_workflow_runs()): + if len(workflows_to_include) == 0: + break + if workflow_run.status != "completed": continue @@ -139,12 +199,27 @@ def upload_metrics(workflow_metrics, metrics_userid, api_key): metrics_userid: The userid to use for the upload. api_key: The API key to use for the upload. """ + + if len(workflow_metrics) == 0: + print("No metrics found to upload.", file=sys.stderr) + return + metrics_batch = [] for workflow_metric in workflow_metrics: - workflow_formatted_name = workflow_metric.job_name.lower().replace(" ", "_") - metrics_batch.append( - f"{workflow_formatted_name} queue_time={workflow_metric.queue_time},run_time={workflow_metric.run_time},status={workflow_metric.status} {workflow_metric.created_at_ns}" - ) + if isinstance(workflow_metric, GaugeMetric): + name = workflow_metric.name.lower().replace(" ", "_") + metrics_batch.append( + f"{name} value={workflow_metric.value} {workflow_metric.time_ns}" + ) + elif isinstance(workflow_metric, JobMetrics): + name = workflow_metric.job_name.lower().replace(" ", "_") + metrics_batch.append( + f"{name} queue_time={workflow_metric.queue_time},run_time={workflow_metric.run_time},status={workflow_metric.status} {workflow_metric.created_at_ns}" + ) + else: + raise ValueError( + f"Unsupported object type {type(workflow_metric)}: {str(workflow_metric)}" + ) request_data = "\n".join(metrics_batch) response = requests.post( @@ -176,16 +251,21 @@ def main(): # Enter the main loop. Every five minutes we wake up and dump metrics for # the relevant jobs. while True: - current_metrics = get_metrics(github_repo, workflows_to_track) - if len(current_metrics) == 0: - print("No metrics found to upload.", file=sys.stderr) - continue + current_metrics = get_per_workflow_metrics(github_repo, workflows_to_track) + current_metrics += get_sampled_workflow_metrics(github_repo) + # Always send a hearbeat metric so we can monitor is this container is still able to log to Grafana. + current_metrics.append( + GaugeMetric("metrics_container_heartbeat", 1, time.time_ns()) + ) upload_metrics(current_metrics, grafana_metrics_userid, grafana_api_key) print(f"Uploaded {len(current_metrics)} metrics", file=sys.stderr) for workflow_metric in reversed(current_metrics): - workflows_to_track[workflow_metric.job_name] = workflow_metric.workflow_id + if isinstance(workflow_metric, JobMetrics): + workflows_to_track[ + workflow_metric.job_name + ] = workflow_metric.workflow_id time.sleep(SCRAPE_INTERVAL_SECONDS) diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h index 115e59ca0697e..94fe4aa8aa0e5 100644 --- a/bolt/include/bolt/Core/BinaryContext.h +++ b/bolt/include/bolt/Core/BinaryContext.h @@ -1363,6 +1363,12 @@ class BinaryContext { if (std::optional Size = MIB->getSize(Inst)) return *Size; + if (MIB->isPseudo(Inst)) + return 0; + + if (std::optional Size = MIB->getInstructionSize(Inst)) + return *Size; + if (!Emitter) Emitter = this->MCE.get(); SmallString<256> Code; diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h index 3634fed9757ce..5d77e6faff2fc 100644 --- a/bolt/include/bolt/Core/MCPlusBuilder.h +++ b/bolt/include/bolt/Core/MCPlusBuilder.h @@ -1204,6 +1204,11 @@ class MCPlusBuilder { /// Get instruction size specified via annotation. std::optional getSize(const MCInst &Inst) const; + /// Get target-specific instruction size. + virtual std::optional getInstructionSize(const MCInst &Inst) const { + return std::nullopt; + } + /// Set instruction size. void setSize(MCInst &Inst, uint32_t Size) const; diff --git a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp index 679c9774c767f..d752751c17932 100644 --- a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp +++ b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp @@ -1792,6 +1792,11 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { } uint16_t getMinFunctionAlignment() const override { return 4; } + + std::optional + getInstructionSize(const MCInst &Inst) const override { + return 4; + } }; } // end anonymous namespace diff --git a/clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp b/clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp index 80ee31368fe9a..30bc8be1719d5 100644 --- a/clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp +++ b/clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp @@ -118,35 +118,6 @@ findMembersUsedInInitExpr(const CXXCtorInitializer *Initializer, return Results; } -/// Returns the next token after `Loc` (including comment tokens). -static std::optional getTokenAfter(SourceLocation Loc, - const SourceManager &SM, - const LangOptions &LangOpts) { - if (Loc.isMacroID()) { - return std::nullopt; - } - Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts); - - // Break down the source location. - std::pair LocInfo = SM.getDecomposedLoc(Loc); - - // Try to load the file buffer. - bool InvalidTemp = false; - StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp); - if (InvalidTemp) - return std::nullopt; - - const char *TokenBegin = File.data() + LocInfo.second; - - Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(), - TokenBegin, File.end()); - lexer.SetCommentRetentionState(true); - // Find the token. - Token Tok; - lexer.LexFromRawLexer(Tok); - return Tok; -} - /// Returns the end of the trailing comments after `Loc`. static SourceLocation getEndOfTrailingComment(SourceLocation Loc, const SourceManager &SM, @@ -154,11 +125,12 @@ static SourceLocation getEndOfTrailingComment(SourceLocation Loc, // We consider any following comment token that is indented more than the // first comment to be part of the trailing comment. const unsigned Column = SM.getPresumedColumnNumber(Loc); - std::optional Tok = getTokenAfter(Loc, SM, LangOpts); + std::optional Tok = + Lexer::findNextToken(Loc, SM, LangOpts, /*IncludeComments=*/true); while (Tok && Tok->is(tok::comment) && SM.getPresumedColumnNumber(Tok->getLocation()) > Column) { Loc = Tok->getEndLoc(); - Tok = getTokenAfter(Loc, SM, LangOpts); + Tok = Lexer::findNextToken(Loc, SM, LangOpts, /*IncludeComments=*/true); } return Loc; } diff --git a/clang-tools-extra/clang-tidy/utils/LexerUtils.cpp b/clang-tools-extra/clang-tidy/utils/LexerUtils.cpp index 92c3e0ed7894e..50da196315d3b 100644 --- a/clang-tools-extra/clang-tidy/utils/LexerUtils.cpp +++ b/clang-tools-extra/clang-tidy/utils/LexerUtils.cpp @@ -86,29 +86,6 @@ SourceLocation findNextTerminator(SourceLocation Start, const SourceManager &SM, return findNextAnyTokenKind(Start, SM, LangOpts, tok::comma, tok::semi); } -std::optional -findNextTokenIncludingComments(SourceLocation Start, const SourceManager &SM, - const LangOptions &LangOpts) { - // `Lexer::findNextToken` will ignore comment - if (Start.isMacroID()) - return std::nullopt; - Start = Lexer::getLocForEndOfToken(Start, 0, SM, LangOpts); - // Break down the source location. - std::pair LocInfo = SM.getDecomposedLoc(Start); - bool InvalidTemp = false; - StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp); - if (InvalidTemp) - return std::nullopt; - // Lex from the start of the given location. - Lexer L(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(), - File.data() + LocInfo.second, File.end()); - L.SetCommentRetentionState(true); - // Find the token. - Token Tok; - L.LexFromRawLexer(Tok); - return Tok; -} - std::optional findNextTokenSkippingComments(SourceLocation Start, const SourceManager &SM, const LangOptions &LangOpts) { diff --git a/clang-tools-extra/clang-tidy/utils/LexerUtils.h b/clang-tools-extra/clang-tidy/utils/LexerUtils.h index ea9bd512b68b8..afd63885e388c 100644 --- a/clang-tools-extra/clang-tidy/utils/LexerUtils.h +++ b/clang-tools-extra/clang-tidy/utils/LexerUtils.h @@ -89,9 +89,11 @@ SourceLocation findNextAnyTokenKind(SourceLocation Start, } } -std::optional +inline std::optional findNextTokenIncludingComments(SourceLocation Start, const SourceManager &SM, - const LangOptions &LangOpts); + const LangOptions &LangOpts) { + return Lexer::findNextToken(Start, SM, LangOpts, true); +} // Finds next token that's not a comment. std::optional findNextTokenSkippingComments(SourceLocation Start, diff --git a/clang/docs/Multilib.rst b/clang/docs/Multilib.rst index 7637d0db9565b..d36b73dce68cd 100644 --- a/clang/docs/Multilib.rst +++ b/clang/docs/Multilib.rst @@ -122,6 +122,73 @@ subclass and a suitable base multilib variant is present then the It is the responsibility of layered multilib authors to ensure that headers and libraries in each layer are complete enough to mask any incompatibilities. +Multilib custom flags +===================== + +Introduction +------------ + +The multilib mechanism supports library variants that correspond to target, +code generation or language command-line flags. Examples include ``--target``, +``-mcpu``, ``-mfpu``, ``-mbranch-protection``, ``-fno-rtti``. However, some library +variants are particular to features that do not correspond to any command-line +option. Multithreading and semihosting, for instance, have no associated +compiler option. + +In order to support the selection of variants for which no compiler option +exists, the multilib specification includes the concept of *custom flags*. +These flags have no impact on code generation and are only used in the multilib +processing. + +Multilib custom flags follow this format in the driver invocation: + +:: + + -fmultilib-flag= + +They are fed into the multilib system alongside the remaining flags. + +Custom flag declarations +------------------------ + +Custom flags can be declared in the YAML file under the *Flags* section. + +.. code-block:: yaml + + Flags: + - Name: multithreaded + Values: + - Name: no-multithreaded + MacroDefines: [__SINGLE_THREAD__] + - Name: multithreaded + Default: no-multithreaded + +* Name: the name to categorize a flag. +* Values: a list of flag Values (defined below). +* Default: it specifies the name of the value this flag should take if not + specified in the command-line invocation. It must be one value from the Values + field. + +Each flag *Value* is defined as: + +* Name: name of the value. This is the string to be used in + ``-fmultilib-flag=``. +* MacroDefines: a list of strings to be used as macro definitions. Each string + is fed into the driver as ``-D``. + +The namespace of flag values is common across all flags. This means that flag +value names must be unique. + +Usage of custom flags in the *Variants* specifications +------------------------------------------------------ + +Library variants should list their requirement on one or more custom flags like +they do for any other flag. Each requirement must be listed as +``-fmultilib-flag=``. + +A variant that does not specify a requirement on one particular flag can be +matched against any value of that flag. + Stability ========= @@ -222,6 +289,23 @@ For a more comprehensive example see # Flags is a list of one or more strings. Flags: [--target=thumbv7m-none-eabi] + # Custom flag declarations. Each item is a different declaration. + Flags: + # Name of the flag + - Name: multithreaded + # List of custom flag values + Values: + # Name of the custom flag value. To be used in -fmultilib-flag=. + - Name: no-multithreaded + # Macro definitions. Useful for defining extra macros for building the + # associated library variant(s). + MacroDefines: [__SINGLE_THREAD__] + - Name: multithreaded + # Default flag value. If no value for this flag declaration is used in the + # command-line, the multilib system will use this one. Must be equal to one + # of the flag value names from this flag declaration. + Default: no-multithreaded + Design principles ================= diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 6ac91f43e66d8..aa1c02d04f7ca 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -316,6 +316,8 @@ C++23 Feature Support C++20 Feature Support ^^^^^^^^^^^^^^^^^^^^^ +- Implemented module level lookup for C++20 modules. (#GH90154) + Resolutions to C++ Defect Reports ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -787,6 +789,7 @@ Improvements to Clang's diagnostics scope.Unlock(); require(scope); // Warning! Requires mu1. } +- Diagnose invalid declarators in the declaration of constructors and destructors (#GH121706). Improvements to Clang's time-trace ---------------------------------- @@ -950,6 +953,10 @@ Bug Fixes to C++ Support - Clang now identifies unexpanded parameter packs within the type constraint on a non-type template parameter. (#GH88866) - Fixed an issue while resolving type of expression indexing into a pack of values of non-dependent type (#GH121242) - Fixed a crash when __PRETTY_FUNCTION__ or __FUNCSIG__ (clang-cl) appears in the trailing return type of the lambda (#GH121274) +- Fixed a crash caused by the incorrect construction of template arguments for CTAD alias guides when type + constraints are applied. (#GH122134) +- Fixed canonicalization of pack indexing types - Clang did not always recognized identical pack indexing. (#GH123033) + Bug Fixes to AST Handling ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h index 27a91a2d07210..186a3e7fca59d 100644 --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -4021,7 +4021,7 @@ class EnumDecl : public TagDecl { QualType getIntegerType() const { if (!IntegerType) return QualType(); - if (const Type *T = IntegerType.dyn_cast()) + if (const Type *T = dyn_cast(IntegerType)) return QualType(T, 0); return cast(IntegerType)->getType().getUnqualifiedType(); } diff --git a/clang/include/clang/AST/DeclBase.h b/clang/include/clang/AST/DeclBase.h index 71ab9178509b2..573b46a2321c5 100644 --- a/clang/include/clang/AST/DeclBase.h +++ b/clang/include/clang/AST/DeclBase.h @@ -836,6 +836,10 @@ class alignas(8) Decl { return isFromASTFile() ? getImportedOwningModule() : getLocalOwningModule(); } + /// Get the top level owning named module that owns this declaration if any. + /// \returns nullptr if the declaration is not owned by a named module. + Module *getTopLevelOwningNamedModule() const; + /// Get the module that owns this declaration for linkage purposes. /// There only ever is such a standard C++ module. Module *getOwningModuleForLinkage() const; @@ -2722,6 +2726,9 @@ class DeclContext { bool Deserialize = false) const; private: + lookup_result lookupImpl(DeclarationName Name, + const DeclContext *OriginalLookupDC) const; + /// Whether this declaration context has had externally visible /// storage added since the last lookup. In this case, \c LookupPtr's /// invariant may not hold and needs to be fixed before we perform diff --git a/clang/include/clang/AST/ExternalASTMerger.h b/clang/include/clang/AST/ExternalASTMerger.h index ec4cfbe2175c0..2c6f2a941311b 100644 --- a/clang/include/clang/AST/ExternalASTMerger.h +++ b/clang/include/clang/AST/ExternalASTMerger.h @@ -141,7 +141,8 @@ class ExternalASTMerger : public ExternalASTSource { /// Implementation of the ExternalASTSource API. bool FindExternalVisibleDeclsByName(const DeclContext *DC, - DeclarationName Name) override; + DeclarationName Name, + const DeclContext *OriginalDC) override; /// Implementation of the ExternalASTSource API. void diff --git a/clang/include/clang/AST/ExternalASTSource.h b/clang/include/clang/AST/ExternalASTSource.h index 4d7ff822fceb7..42aed56d42e07 100644 --- a/clang/include/clang/AST/ExternalASTSource.h +++ b/clang/include/clang/AST/ExternalASTSource.h @@ -145,12 +145,20 @@ class ExternalASTSource : public RefCountedBase { /// Find all declarations with the given name in the given context, /// and add them to the context by calling SetExternalVisibleDeclsForName /// or SetNoExternalVisibleDeclsForName. + /// \param DC The context for lookup in. \c DC should be a primary context. + /// \param Name The name to look for. + /// \param OriginalDC The original context for lookup. \c OriginalDC can + /// provide more information than \c DC. e.g., The same namespace can appear + /// in multiple module units. So we need the \c OriginalDC to tell us what + /// the module the lookup come from. + /// /// \return \c true if any declarations might have been found, \c false if /// we definitely have no declarations with tbis name. /// /// The default implementation of this method is a no-op returning \c false. - virtual bool - FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name); + virtual bool FindExternalVisibleDeclsByName(const DeclContext *DC, + DeclarationName Name, + const DeclContext *OriginalDC); /// Load all the external specializations for the Decl \param D if \param /// OnlyPartial is false. Otherwise, load all the external **partial** diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td index e903c2c000ee9..bbf4886b5cf05 100644 --- a/clang/include/clang/Basic/Builtins.td +++ b/clang/include/clang/Basic/Builtins.td @@ -4795,6 +4795,12 @@ def HLSLWaveActiveCountBits : LangBuiltin<"HLSL_LANG"> { let Prototype = "unsigned int(bool)"; } +def HLSLWaveActiveSum : LangBuiltin<"HLSL_LANG"> { + let Spellings = ["__builtin_hlsl_wave_active_sum"]; + let Attributes = [NoThrow, Const]; + let Prototype = "void (...)"; +} + def HLSLWaveGetLaneIndex : LangBuiltin<"HLSL_LANG"> { let Spellings = ["__builtin_hlsl_wave_get_lane_index"]; let Attributes = [NoThrow, Const]; diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index fe88fab0c26f8..db54312ad965e 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -2204,6 +2204,8 @@ def err_invalid_qualified_constructor : Error< "'%0' qualifier is not allowed on a constructor">; def err_ref_qualifier_constructor : Error< "ref-qualifier '%select{&&|&}0' is not allowed on a constructor">; +def err_invalid_ctor_dtor_decl : Error< + "invalid %select{constructor|destructor}0 declaration">; def err_constructor_return_type : Error< "constructor cannot have a return type">; @@ -9303,7 +9305,7 @@ def err_typecheck_expect_scalar_or_vector : Error< "invalid operand of type %0 where %1 or " "a vector of such type is required">; def err_typecheck_expect_any_scalar_or_vector : Error< - "invalid operand of type %0 where a scalar or vector is required">; + "invalid operand of type %0%select{| where a scalar or vector is required}1">; def err_typecheck_expect_flt_or_vector : Error< "invalid operand of type %0 where floating, complex or " "a vector of such types is required">; diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index 47f1754aeb629..ac1c139b20943 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -1954,13 +1954,13 @@ let SVETargetGuard = "sve2,lut", SMETargetGuard = "sme2,lut" in { def SVLUTI4_B : SInst<"svluti4_lane[_{d}]", "dd[i", "cUc", MergeNone, "aarch64_sve_luti4_lane", [VerifyRuntimeMode], [ImmCheck<2, ImmCheck0_1>]>; def SVLUTI4_H : SInst<"svluti4_lane[_{d}]", "dd[i", "sUsh", MergeNone, "aarch64_sve_luti4_lane", [VerifyRuntimeMode], [ImmCheck<2, ImmCheck0_3>]>; - def SVLUTI4_x2 : SInst<"svluti4_lane[_{d}]_x2", "d2.d[i", "sUsh", MergeNone, "aarch64_sve_luti4_lane_x2", [VerifyRuntimeMode], [ImmCheck<2, ImmCheck0_3>]>; + def SVLUTI4_x2 : SInst<"svluti4_lane[_{d}_x2]", "d2.d[i", "sUsh", MergeNone, "aarch64_sve_luti4_lane_x2", [VerifyRuntimeMode], [ImmCheck<2, ImmCheck0_3>]>; } let SVETargetGuard = "sve2,lut,bf16", SMETargetGuard = "sme2,lut,bf16" in { def SVLUTI2_BF16 : SInst<"svluti2_lane[_{d}]", "dd[i", "b", MergeNone, "aarch64_sve_luti2_lane", [ VerifyRuntimeMode], [ImmCheck<2, ImmCheck0_7>]>; def SVLUTI4_BF16 : SInst<"svluti4_lane[_{d}]", "dd[i", "b", MergeNone, "aarch64_sve_luti4_lane", [ VerifyRuntimeMode], [ImmCheck<2, ImmCheck0_3>]>; - def SVLUTI4_BF16_x2 : SInst<"svluti4_lane[_{d}]_x2", "d2.d[i", "b", MergeNone, "aarch64_sve_luti4_lane_x2", [ VerifyRuntimeMode], [ImmCheck<2, ImmCheck0_3>]>; + def SVLUTI4_BF16_x2 : SInst<"svluti4_lane[_{d}_x2]", "d2.d[i", "b", MergeNone, "aarch64_sve_luti4_lane_x2", [ VerifyRuntimeMode], [ImmCheck<2, ImmCheck0_3>]>; } //////////////////////////////////////////////////////////////////////////////// diff --git a/clang/include/clang/Driver/Driver.h b/clang/include/clang/Driver/Driver.h index e2eec58ba99d3..f4a52cc529b79 100644 --- a/clang/include/clang/Driver/Driver.h +++ b/clang/include/clang/Driver/Driver.h @@ -491,7 +491,7 @@ class Driver { /// ArgList. llvm::opt::InputArgList ParseArgStrings(ArrayRef Args, bool UseDriverMode, - bool &ContainsError); + bool &ContainsError) const; /// BuildInputs - Construct the list of inputs and their types from /// the given arguments. diff --git a/clang/include/clang/Driver/Multilib.h b/clang/include/clang/Driver/Multilib.h index 0a533ed2804e2..fc071ef48ca0f 100644 --- a/clang/include/clang/Driver/Multilib.h +++ b/clang/include/clang/Driver/Multilib.h @@ -168,9 +168,18 @@ class MultilibSet { const_iterator begin() const { return Multilibs.begin(); } const_iterator end() const { return Multilibs.end(); } + /// Process custom flags from \p Flags and returns an expanded flags list and + /// a list of macro defines. + /// Returns a pair where: + /// - first: the new flags list including custom flags after processing. + /// - second: the extra macro defines to be fed to the driver. + std::pair> + processCustomFlags(const Driver &D, const Multilib::flags_list &Flags) const; + /// Select compatible variants, \returns false if none are compatible bool select(const Driver &D, const Multilib::flags_list &Flags, - llvm::SmallVectorImpl &) const; + llvm::SmallVectorImpl &, + llvm::SmallVector * = nullptr) const; unsigned size() const { return Multilibs.size(); } diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 4bab2ae4d8dd5..d38dd2b4e3cf0 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -6670,6 +6670,7 @@ def fbinutils_version_EQ : Joined<["-"], "fbinutils-version=">, def fuse_ld_EQ : Joined<["-"], "fuse-ld=">, Group, Flags<[LinkOption]>, Visibility<[ClangOption, FlangOption, CLOption]>; def ld_path_EQ : Joined<["--"], "ld-path=">, Group; +def fuse_lipo_EQ : Joined<["-"], "fuse-lipo=">, Group, Flags<[LinkOption]>; defm align_labels : BooleanFFlag<"align-labels">, Group; def falign_labels_EQ : Joined<["-"], "falign-labels=">, Group; diff --git a/clang/include/clang/Driver/ToolChain.h b/clang/include/clang/Driver/ToolChain.h index 701a1d25ca4c8..7d1d8feebf35e 100644 --- a/clang/include/clang/Driver/ToolChain.h +++ b/clang/include/clang/Driver/ToolChain.h @@ -686,6 +686,13 @@ class ToolChain { /// Add warning options that need to be passed to cc1 for this target. virtual void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const; + // Get the list of extra macro defines requested by the multilib + // configuration. + virtual SmallVector + getMultilibMacroDefinesStr(llvm::opt::ArgList &Args) const { + return {}; + }; + // GetRuntimeLibType - Determine the runtime library type to use with the // given compilation arguments. virtual RuntimeLibType diff --git a/clang/include/clang/Lex/Lexer.h b/clang/include/clang/Lex/Lexer.h index b6ecc7e5ded9e..82a041ea3f848 100644 --- a/clang/include/clang/Lex/Lexer.h +++ b/clang/include/clang/Lex/Lexer.h @@ -554,7 +554,8 @@ class Lexer : public PreprocessorLexer { /// Returns the next token, or std::nullopt if the location is inside a macro. static std::optional findNextToken(SourceLocation Loc, const SourceManager &SM, - const LangOptions &LangOpts); + const LangOptions &LangOpts, + bool IncludeComments = false); /// Checks that the given token is the first token that occurs after /// the given location (this excludes comments and whitespace). Returns the diff --git a/clang/include/clang/Sema/MultiplexExternalSemaSource.h b/clang/include/clang/Sema/MultiplexExternalSemaSource.h index 0c92c52854c9e..921bebe3a44af 100644 --- a/clang/include/clang/Sema/MultiplexExternalSemaSource.h +++ b/clang/include/clang/Sema/MultiplexExternalSemaSource.h @@ -95,7 +95,8 @@ class MultiplexExternalSemaSource : public ExternalSemaSource { /// Find all declarations with the given name in the /// given context. bool FindExternalVisibleDeclsByName(const DeclContext *DC, - DeclarationName Name) override; + DeclarationName Name, + const DeclContext *OriginalDC) override; bool LoadExternalSpecializations(const Decl *D, bool OnlyPartial) override; diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h index aac165130b719..40dae25f7b54b 100644 --- a/clang/include/clang/Serialization/ASTBitCodes.h +++ b/clang/include/clang/Serialization/ASTBitCodes.h @@ -738,6 +738,8 @@ enum ASTRecordTypes { CXX_ADDED_TEMPLATE_SPECIALIZATION = 74, CXX_ADDED_TEMPLATE_PARTIAL_SPECIALIZATION = 75, + + UPDATE_MODULE_LOCAL_VISIBLE = 76, }; /// Record types used within a source manager block. @@ -1334,6 +1336,10 @@ enum DeclCode { /// into a DeclContext via DeclContext::lookup. DECL_CONTEXT_VISIBLE, + /// A record containing the set of declarations that are + /// only visible from DeclContext in the same module. + DECL_CONTEXT_MODULE_LOCAL_VISIBLE, + /// A LabelDecl record. DECL_LABEL, diff --git a/clang/include/clang/Serialization/ASTReader.h b/clang/include/clang/Serialization/ASTReader.h index 9f978762a6fb6..d77bb01c5aa59 100644 --- a/clang/include/clang/Serialization/ASTReader.h +++ b/clang/include/clang/Serialization/ASTReader.h @@ -353,6 +353,7 @@ class ASTIdentifierLookupTrait; /// The on-disk hash table(s) used for DeclContext name lookup. struct DeclContextLookupTable; +struct ModuleLocalLookupTable; /// The on-disk hash table(s) used for specialization decls. struct LazySpecializationInfoLookupTable; @@ -523,9 +524,14 @@ class ASTReader /// in the chain. DeclUpdateOffsetsMap DeclUpdateOffsets; + struct LookupBlockOffsets { + uint64_t LexicalOffset; + uint64_t VisibleOffset; + uint64_t ModuleLocalOffset; + }; + using DelayedNamespaceOffsetMapTy = - llvm::DenseMap>; + llvm::DenseMap; /// Mapping from global declaration IDs to the lexical and visible block /// offset for delayed namespace in reduced BMI. @@ -631,6 +637,9 @@ class ASTReader /// Map from a DeclContext to its lookup tables. llvm::DenseMap Lookups; + llvm::DenseMap + ModuleLocalLookups; using SpecLookupTableTy = llvm::DenseMap PendingVisibleUpdates; + llvm::DenseMap + PendingModuleLocalVisibleUpdates; using SpecializationsUpdate = SmallVector; using SpecializationsUpdateMap = @@ -696,7 +707,8 @@ class ASTReader /// Read the record that describes the visible contents of a DC. bool ReadVisibleDeclContextStorage(ModuleFile &M, llvm::BitstreamCursor &Cursor, - uint64_t Offset, GlobalDeclID ID); + uint64_t Offset, GlobalDeclID ID, + bool IsModuleLocal); bool ReadSpecializations(ModuleFile &M, llvm::BitstreamCursor &Cursor, uint64_t Offset, Decl *D, bool IsPartial); @@ -1132,6 +1144,10 @@ class ASTReader /// Number of visible decl contexts read/total. unsigned NumVisibleDeclContextsRead = 0, TotalVisibleDeclContexts = 0; + /// Number of module local visible decl contexts read/total. + unsigned NumModuleLocalVisibleDeclContexts = 0, + TotalModuleLocalVisibleDeclContexts = 0; + /// Total size of modules, in bits, currently loaded uint64_t TotalModulesSizeInBits = 0; @@ -1444,6 +1460,9 @@ class ASTReader const serialization::reader::DeclContextLookupTable * getLoadedLookupTables(DeclContext *Primary) const; + const serialization::reader::ModuleLocalLookupTable * + getModuleLocalLookupTables(DeclContext *Primary) const; + /// Get the loaded specializations lookup tables for \p D, /// if any. serialization::reader::LazySpecializationInfoLookupTable * @@ -2119,7 +2138,8 @@ class ASTReader /// The current implementation of this method just loads the entire /// lookup table as unmaterialized references. bool FindExternalVisibleDeclsByName(const DeclContext *DC, - DeclarationName Name) override; + DeclarationName Name, + const DeclContext *OriginalDC) override; /// Read all of the declarations lexically stored in a /// declaration context. @@ -2607,6 +2627,10 @@ inline bool shouldSkipCheckingODR(const Decl *D) { (D->isFromGlobalModule() || D->isFromHeaderUnit()); } +/// Calculate a hash value for the primary module name of the given module. +/// \returns std::nullopt if M is not a C++ standard module. +std::optional getPrimaryModuleHash(const Module *M); + } // namespace clang #endif // LLVM_CLANG_SERIALIZATION_ASTREADER_H diff --git a/clang/include/clang/Serialization/ASTWriter.h b/clang/include/clang/Serialization/ASTWriter.h index adb7cce522a80..53b09cc914392 100644 --- a/clang/include/clang/Serialization/ASTWriter.h +++ b/clang/include/clang/Serialization/ASTWriter.h @@ -492,6 +492,10 @@ class ASTWriter : public ASTDeserializationListener, /// file. unsigned NumVisibleDeclContexts = 0; + /// The number of module local visible declcontexts written to the AST + /// file. + unsigned NumModuleLocalDeclContexts = 0; + /// A mapping from each known submodule to its ID number, which will /// be a positive integer. llvm::DenseMap SubmoduleIDs; @@ -587,11 +591,15 @@ class ASTWriter : public ASTDeserializationListener, uint64_t WriteSpecializationInfoLookupTable( const NamedDecl *D, llvm::SmallVectorImpl &Specializations, bool IsPartial); - void GenerateNameLookupTable(ASTContext &Context, const DeclContext *DC, - llvm::SmallVectorImpl &LookupTable); + void + GenerateNameLookupTable(ASTContext &Context, const DeclContext *DC, + llvm::SmallVectorImpl &LookupTable, + llvm::SmallVectorImpl &ModuleLocalLookupTable); uint64_t WriteDeclContextLexicalBlock(ASTContext &Context, const DeclContext *DC); - uint64_t WriteDeclContextVisibleBlock(ASTContext &Context, DeclContext *DC); + void WriteDeclContextVisibleBlock(ASTContext &Context, DeclContext *DC, + uint64_t &VisibleBlockOffset, + uint64_t &ModuleLocalBlockOffset); void WriteTypeDeclOffsets(); void WriteFileDeclIDsMap(); void WriteComments(ASTContext &Context); @@ -624,7 +632,9 @@ class ASTWriter : public ASTDeserializationListener, unsigned DeclParmVarAbbrev = 0; unsigned DeclContextLexicalAbbrev = 0; unsigned DeclContextVisibleLookupAbbrev = 0; + unsigned DeclModuleLocalVisibleLookupAbbrev = 0; unsigned UpdateVisibleAbbrev = 0; + unsigned ModuleLocalUpdateVisibleAbbrev = 0; unsigned DeclRecordAbbrev = 0; unsigned DeclTypedefAbbrev = 0; unsigned DeclVarAbbrev = 0; diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index be1dd29d46278..d0ce4c511aedd 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -6248,7 +6248,8 @@ QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr, Canonical = getCanonicalType(Expansions[Index]); } else { llvm::FoldingSetNodeID ID; - PackIndexingType::Profile(ID, *this, Pattern, IndexExpr, FullySubstituted); + PackIndexingType::Profile(ID, *this, Pattern.getCanonicalType(), IndexExpr, + FullySubstituted); void *InsertPos = nullptr; PackIndexingType *Canon = DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos); @@ -6256,8 +6257,9 @@ QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr, void *Mem = Allocate( PackIndexingType::totalSizeToAlloc(Expansions.size()), TypeAlignment); - Canon = new (Mem) PackIndexingType(*this, QualType(), Pattern, IndexExpr, - FullySubstituted, Expansions); + Canon = new (Mem) + PackIndexingType(*this, QualType(), Pattern.getCanonicalType(), + IndexExpr, FullySubstituted, Expansions); DependentPackIndexingTypes.InsertNode(Canon, InsertPos); } Canonical = QualType(Canon, 0); diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp index fb701f76231bc..2886aebdf52e9 100644 --- a/clang/lib/AST/DeclBase.cpp +++ b/clang/lib/AST/DeclBase.cpp @@ -130,6 +130,14 @@ void Decl::setOwningModuleID(unsigned ID) { *IDAddress |= (uint64_t)ID << 48; } +Module *Decl::getTopLevelOwningNamedModule() const { + if (getOwningModule() && + getOwningModule()->getTopLevelModule()->isNamedModule()) + return getOwningModule()->getTopLevelModule(); + + return nullptr; +} + Module *Decl::getOwningModuleSlow() const { assert(isFromASTFile() && "Not from AST file?"); return getASTContext().getExternalSource()->getModule(getOwningModuleID()); @@ -1856,9 +1864,16 @@ DeclContext::lookup(DeclarationName Name) const { if (getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export) return getParent()->lookup(Name); - const DeclContext *PrimaryContext = getPrimaryContext(); - if (PrimaryContext != this) - return PrimaryContext->lookup(Name); + return getPrimaryContext()->lookupImpl(Name, this); +} + +DeclContext::lookup_result +DeclContext::lookupImpl(DeclarationName Name, + const DeclContext *OriginalLookupDC) const { + assert(this == getPrimaryContext() && + "lookupImpl should only be called with primary DC!"); + assert(getDeclKind() != Decl::LinkageSpec && getDeclKind() != Decl::Export && + "We shouldn't lookup in transparent DC."); // If we have an external source, ensure that any later redeclarations of this // context have been loaded, since they may add names to the result of this @@ -1889,7 +1904,8 @@ DeclContext::lookup(DeclarationName Name) const { if (!R.second && !R.first->second.hasExternalDecls()) return R.first->second.getLookupResult(); - if (Source->FindExternalVisibleDeclsByName(this, Name) || !R.second) { + if (Source->FindExternalVisibleDeclsByName(this, Name, OriginalLookupDC) || + !R.second) { if (StoredDeclsMap *Map = LookupPtr) { StoredDeclsMap::iterator I = Map->find(Name); if (I != Map->end()) @@ -2115,7 +2131,8 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) { if (ExternalASTSource *Source = getParentASTContext().getExternalSource()) if (hasExternalVisibleStorage() && Map->find(D->getDeclName()) == Map->end()) - Source->FindExternalVisibleDeclsByName(this, D->getDeclName()); + Source->FindExternalVisibleDeclsByName(this, D->getDeclName(), + D->getDeclContext()); // Insert this declaration into the map. StoredDeclsList &DeclNameEntries = (*Map)[D->getDeclName()]; diff --git a/clang/lib/AST/ExternalASTMerger.cpp b/clang/lib/AST/ExternalASTMerger.cpp index 7f7816e1b10ea..257e8338dedef 100644 --- a/clang/lib/AST/ExternalASTMerger.cpp +++ b/clang/lib/AST/ExternalASTMerger.cpp @@ -471,8 +471,9 @@ static bool importSpecializationsIfNeeded(Decl *D, ASTImporter *Importer) { return false; } -bool ExternalASTMerger::FindExternalVisibleDeclsByName(const DeclContext *DC, - DeclarationName Name) { +bool ExternalASTMerger::FindExternalVisibleDeclsByName( + const DeclContext *DC, DeclarationName Name, + const DeclContext *OriginalDC) { llvm::SmallVector Decls; llvm::SmallVector Candidates; diff --git a/clang/lib/AST/ExternalASTSource.cpp b/clang/lib/AST/ExternalASTSource.cpp index 543846c0093af..e2451f294741d 100644 --- a/clang/lib/AST/ExternalASTSource.cpp +++ b/clang/lib/AST/ExternalASTSource.cpp @@ -90,9 +90,9 @@ ExternalASTSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) { return nullptr; } -bool -ExternalASTSource::FindExternalVisibleDeclsByName(const DeclContext *DC, - DeclarationName Name) { +bool ExternalASTSource::FindExternalVisibleDeclsByName( + const DeclContext *DC, DeclarationName Name, + const DeclContext *OriginalDC) { return false; } diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp index bef5fa8624ce4..a9aff39df6474 100644 --- a/clang/lib/Analysis/UnsafeBufferUsage.cpp +++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp @@ -453,11 +453,8 @@ AST_MATCHER(ArraySubscriptExpr, isSafeArraySubscript) { return false; } - Expr::EvalResult EVResult; - if (Node.getIdx()->EvaluateAsInt(EVResult, Finder->getASTContext())) { - llvm::APSInt ArrIdx = EVResult.Val.getInt(); - // FIXME: ArrIdx.isNegative() we could immediately emit an error as that's a - // bug + if (const auto *IdxLit = dyn_cast(Node.getIdx())) { + const APInt ArrIdx = IdxLit->getValue(); if (ArrIdx.isNonNegative() && ArrIdx.getLimitedValue() < limit) return true; } diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp index 4e211deb9faba..0b899137bbb5c 100644 --- a/clang/lib/Basic/Targets/AArch64.cpp +++ b/clang/lib/Basic/Targets/AArch64.cpp @@ -421,7 +421,7 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts, #define ARM_ACLE_VERSION(Y, Q, P) (100 * (Y) + 10 * (Q) + (P)) Builder.defineMacro("__ARM_ACLE", Twine(ARM_ACLE_VERSION(2024, 2, 0))); Builder.defineMacro("__FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL", - Twine(ARM_ACLE_VERSION(2024, 2, 0))); + Twine(ARM_ACLE_VERSION(2024, 3, 0))); #undef ARM_ACLE_VERSION Builder.defineMacro("__ARM_ARCH", std::to_string(ArchInfo->Version.getMajor())); diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 2b41a87525f19..2385f2a320b62 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -19186,6 +19186,23 @@ static Intrinsic::ID getFirstBitHighIntrinsic(CGHLSLRuntime &RT, QualType QT) { return RT.getFirstBitUHighIntrinsic(); } +// Return wave active sum that corresponds to the QT scalar type +static Intrinsic::ID getWaveActiveSumIntrinsic(llvm::Triple::ArchType Arch, + CGHLSLRuntime &RT, QualType QT) { + switch (Arch) { + case llvm::Triple::spirv: + return llvm::Intrinsic::spv_wave_reduce_sum; + case llvm::Triple::dxil: { + if (QT->isUnsignedIntegerType()) + return llvm::Intrinsic::dx_wave_reduce_usum; + return llvm::Intrinsic::dx_wave_reduce_sum; + } + default: + llvm_unreachable("Intrinsic WaveActiveSum" + " not supported by target architecture"); + } +} + Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -19498,6 +19515,23 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: { Intrinsic::getOrInsertDeclaration(&CGM.getModule(), ID), ArrayRef{OpExpr}); } + case Builtin::BI__builtin_hlsl_wave_active_sum: { + // Due to the use of variadic arguments, explicitly retreive argument + Value *OpExpr = EmitScalarExpr(E->getArg(0)); + llvm::FunctionType *FT = llvm::FunctionType::get( + OpExpr->getType(), ArrayRef{OpExpr->getType()}, false); + Intrinsic::ID IID = getWaveActiveSumIntrinsic( + getTarget().getTriple().getArch(), CGM.getHLSLRuntime(), + E->getArg(0)->getType()); + + // Get overloaded name + std::string Name = + Intrinsic::getName(IID, ArrayRef{OpExpr->getType()}, &CGM.getModule()); + return EmitRuntimeCall(CGM.CreateRuntimeFunction(FT, Name, {}, + /*Local=*/false, + /*AssumeConvergent=*/true), + ArrayRef{OpExpr}, "hlsl.wave.active.sum"); + } case Builtin::BI__builtin_hlsl_wave_get_lane_index: { // We don't define a SPIR-V intrinsic, instead it is a SPIR-V built-in // defined in SPIRVBuiltins.td. So instead we manually get the matching name diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 6e5a21c8f01e7..9a9a8c7f6eae0 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -3607,18 +3607,15 @@ void CodeGenFunction::EmitCheck( llvm::Value *RecoverableCond = nullptr; llvm::Value *TrapCond = nullptr; bool NoMerge = false; - for (int i = 0, n = Checked.size(); i < n; ++i) { - llvm::Value *Check = Checked[i].first; + for (auto &[Check, Ord] : Checked) { // -fsanitize-trap= overrides -fsanitize-recover=. - llvm::Value *&Cond = - CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second) - ? TrapCond - : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) - ? RecoverableCond - : FatalCond; + llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond + : CGM.getCodeGenOpts().SanitizeRecover.has(Ord) + ? RecoverableCond + : FatalCond; Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check; - if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Checked[i].second)) + if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord)) NoMerge = true; } diff --git a/clang/lib/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CodeGen/ConstantInitBuilder.cpp index ddbf3ef743370..ce1fe137c1919 100644 --- a/clang/lib/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CodeGen/ConstantInitBuilder.cpp @@ -29,7 +29,7 @@ llvm::Type *ConstantInitFuture::getType() const { void ConstantInitFuture::abandon() { assert(Data && "abandoning null future"); - if (auto builder = Data.dyn_cast()) { + if (auto *builder = dyn_cast(Data)) { builder->abandon(0); } Data = nullptr; diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 7767c81d654dc..87855fdb79971 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -308,7 +308,8 @@ void Driver::setDriverMode(StringRef Value) { } InputArgList Driver::ParseArgStrings(ArrayRef ArgStrings, - bool UseDriverMode, bool &ContainsError) { + bool UseDriverMode, + bool &ContainsError) const { llvm::PrettyStackTraceString CrashInfo("Command line argument parsing"); ContainsError = false; @@ -1674,13 +1675,31 @@ Compilation *Driver::BuildCompilation(ArrayRef ArgList) { std::unique_ptr UArgs = std::make_unique(std::move(Args)); + // Owned by the host. + const ToolChain &TC = + getToolChain(*UArgs, computeTargetTriple(*this, TargetTriple, *UArgs)); + + { + SmallVector MultilibMacroDefinesStr = + TC.getMultilibMacroDefinesStr(*UArgs); + SmallVector MLMacroDefinesChar( + llvm::map_range(MultilibMacroDefinesStr, [&UArgs](const auto &S) { + return UArgs->MakeArgString(Twine("-D") + Twine(S)); + })); + bool MLContainsError; + auto MultilibMacroDefineList = + std::make_unique(ParseArgStrings( + MLMacroDefinesChar, /*UseDriverMode=*/false, MLContainsError)); + if (!MLContainsError) { + for (auto *Opt : *MultilibMacroDefineList) { + appendOneArg(*UArgs, Opt); + } + } + } + // Perform the default argument translations. DerivedArgList *TranslatedArgs = TranslateInputArgs(*UArgs); - // Owned by the host. - const ToolChain &TC = getToolChain( - *UArgs, computeTargetTriple(*this, TargetTriple, *UArgs)); - // Check if the environment version is valid except wasm case. llvm::Triple Triple = TC.getTriple(); if (!Triple.isWasm()) { diff --git a/clang/lib/Driver/Multilib.cpp b/clang/lib/Driver/Multilib.cpp index ccf747e90cb2c..efb99d3ffc752 100644 --- a/clang/lib/Driver/Multilib.cpp +++ b/clang/lib/Driver/Multilib.cpp @@ -92,12 +92,145 @@ MultilibSet &MultilibSet::FilterOut(FilterCallback F) { void MultilibSet::push_back(const Multilib &M) { Multilibs.push_back(M); } -bool MultilibSet::select(const Driver &D, const Multilib::flags_list &Flags, - llvm::SmallVectorImpl &Selected) const { - llvm::StringSet<> FlagSet(expandFlags(Flags)); +static void DiagnoseUnclaimedMultilibCustomFlags( + const Driver &D, const SmallVector &UnclaimedCustomFlagValues, + const SmallVector &CustomFlagDecls) { + struct EditDistanceInfo { + StringRef FlagValue; + unsigned EditDistance; + }; + const unsigned MaxEditDistance = 5; + + for (StringRef Unclaimed : UnclaimedCustomFlagValues) { + std::optional BestCandidate; + for (const auto &Decl : CustomFlagDecls) { + for (const auto &Value : Decl.ValueList) { + const std::string &FlagValueName = Value.Name; + unsigned EditDistance = + Unclaimed.edit_distance(FlagValueName, /*AllowReplacements=*/true, + /*MaxEditDistance=*/MaxEditDistance); + if (!BestCandidate || (EditDistance <= MaxEditDistance && + EditDistance < BestCandidate->EditDistance)) { + BestCandidate = {FlagValueName, EditDistance}; + } + } + } + if (!BestCandidate) + D.Diag(clang::diag::err_drv_unsupported_opt) + << (custom_flag::Prefix + Unclaimed).str(); + else + D.Diag(clang::diag::err_drv_unsupported_opt_with_suggestion) + << (custom_flag::Prefix + Unclaimed).str() + << (custom_flag::Prefix + BestCandidate->FlagValue).str(); + } +} + +namespace clang::driver::custom_flag { +// Map implemented using linear searches as the expected size is too small for +// the overhead of a search tree or a hash table. +class ValueNameToDetailMap { + SmallVector> Mapping; + +public: + template + ValueNameToDetailMap(It FlagDeclsBegin, It FlagDeclsEnd) { + for (auto DeclIt = FlagDeclsBegin; DeclIt != FlagDeclsEnd; ++DeclIt) { + const Declaration &Decl = *DeclIt; + for (const auto &Value : Decl.ValueList) + Mapping.emplace_back(Value.Name, &Value); + } + } + + const ValueDetail *get(StringRef Key) const { + auto Iter = llvm::find_if( + Mapping, [&](const auto &Pair) { return Pair.first == Key; }); + return Iter != Mapping.end() ? Iter->second : nullptr; + } +}; +} // namespace clang::driver::custom_flag + +std::pair> +MultilibSet::processCustomFlags(const Driver &D, + const Multilib::flags_list &Flags) const { + Multilib::flags_list Result; + SmallVector MacroDefines; + + // Custom flag values detected in the flags list + SmallVector ClaimedCustomFlagValues; + + // Arguments to -fmultilib-flag= that don't correspond to any valid + // custom flag value. An error will be printed out for each of these. + SmallVector UnclaimedCustomFlagValueStrs; + + const auto ValueNameToValueDetail = custom_flag::ValueNameToDetailMap( + CustomFlagDecls.begin(), CustomFlagDecls.end()); + + for (StringRef Flag : Flags) { + if (!Flag.starts_with(custom_flag::Prefix)) { + Result.push_back(Flag.str()); + continue; + } + + StringRef CustomFlagValueStr = Flag.substr(custom_flag::Prefix.size()); + const custom_flag::ValueDetail *Detail = + ValueNameToValueDetail.get(CustomFlagValueStr); + if (Detail) + ClaimedCustomFlagValues.push_back(Detail); + else + UnclaimedCustomFlagValueStrs.push_back(CustomFlagValueStr); + } + + // Set of custom flag declarations for which a value was passed in the flags + // list. This is used to, firstly, detect multiple values for the same flag + // declaration (in this case, the last one wins), and secondly, to detect + // which declarations had no value passed in (in this case, the default value + // is selected). + llvm::SmallPtrSet TriggeredCustomFlagDecls; + + // Detect multiple values for the same flag declaration. Last one wins. + for (auto *CustomFlagValue : llvm::reverse(ClaimedCustomFlagValues)) { + if (!TriggeredCustomFlagDecls.insert(CustomFlagValue->Decl).second) + continue; + Result.push_back(std::string(custom_flag::Prefix) + CustomFlagValue->Name); + if (CustomFlagValue->MacroDefines) + MacroDefines.append(CustomFlagValue->MacroDefines->begin(), + CustomFlagValue->MacroDefines->end()); + } + + // Detect flag declarations with no value passed in. Select default value. + for (const auto &Decl : CustomFlagDecls) { + if (TriggeredCustomFlagDecls.contains(&Decl)) + continue; + const custom_flag::ValueDetail &CustomFlagValue = + Decl.ValueList[*Decl.DefaultValueIdx]; + Result.push_back(std::string(custom_flag::Prefix) + CustomFlagValue.Name); + if (CustomFlagValue.MacroDefines) + MacroDefines.append(CustomFlagValue.MacroDefines->begin(), + CustomFlagValue.MacroDefines->end()); + } + + DiagnoseUnclaimedMultilibCustomFlags(D, UnclaimedCustomFlagValueStrs, + CustomFlagDecls); + + return {Result, MacroDefines}; +} + +bool MultilibSet::select( + const Driver &D, const Multilib::flags_list &Flags, + llvm::SmallVectorImpl &Selected, + llvm::SmallVector *CustomFlagMacroDefines) const { + auto [FlagsWithCustom, CFMacroDefines] = processCustomFlags(D, Flags); + llvm::StringSet<> FlagSet(expandFlags(FlagsWithCustom)); Selected.clear(); bool AnyErrors = false; + // Determining the list of macro defines depends only on the custom flags + // passed in. The library variants actually selected are not relevant in + // this. Therefore this assignment can take place before the selection + // happens. + if (CustomFlagMacroDefines) + *CustomFlagMacroDefines = std::move(CFMacroDefines); + // Decide which multilibs we're going to select at all. llvm::DenseSet ExclusiveGroupsSelected; for (const Multilib &M : llvm::reverse(Multilibs)) { diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/clang/lib/Driver/ToolChains/Arch/ARM.cpp index 2fb16d2e41320..c648fb66085c7 100644 --- a/clang/lib/Driver/ToolChains/Arch/ARM.cpp +++ b/clang/lib/Driver/ToolChains/Arch/ARM.cpp @@ -659,13 +659,21 @@ llvm::ARM::FPUKind arm::getARMTargetFeatures(const Driver &D, CPUArgFPUKind != llvm::ARM::FK_INVALID ? CPUArgFPUKind : ArchArgFPUKind; (void)llvm::ARM::getFPUFeatures(FPUKind, Features); } else { + bool Generic = true; if (!ForAS) { std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple); + if (CPU != "generic") + Generic = false; llvm::ARM::ArchKind ArchKind = arm::getLLVMArchKindForARM(CPU, ArchName, Triple); FPUKind = llvm::ARM::getDefaultFPU(CPU, ArchKind); (void)llvm::ARM::getFPUFeatures(FPUKind, Features); } + if (Generic && (Triple.isOSWindows() || Triple.isOSDarwin()) && + getARMSubArchVersionNumber(Triple) >= 7) { + FPUKind = llvm::ARM::parseFPU("neon"); + (void)llvm::ARM::getFPUFeatures(FPUKind, Features); + } } // Now we've finished accumulating features from arch, cpu and fpu, diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp index eecaaa9a42930..ffb1c6e34d603 100644 --- a/clang/lib/Driver/ToolChains/BareMetal.cpp +++ b/clang/lib/Driver/ToolChains/BareMetal.cpp @@ -162,9 +162,11 @@ static bool isPPCBareMetal(const llvm::Triple &Triple) { Triple.getEnvironment() == llvm::Triple::EABI; } -static void findMultilibsFromYAML(const ToolChain &TC, const Driver &D, - StringRef MultilibPath, const ArgList &Args, - DetectedMultilibs &Result) { +static void +findMultilibsFromYAML(const ToolChain &TC, const Driver &D, + StringRef MultilibPath, const ArgList &Args, + DetectedMultilibs &Result, + SmallVector &CustomFlagsMacroDefines) { llvm::ErrorOr> MB = D.getVFS().getBufferForFile(MultilibPath); if (!MB) @@ -175,7 +177,8 @@ static void findMultilibsFromYAML(const ToolChain &TC, const Driver &D, if (ErrorOrMultilibSet.getError()) return; Result.Multilibs = ErrorOrMultilibSet.get(); - if (Result.Multilibs.select(D, Flags, Result.SelectedMultilibs)) + if (Result.Multilibs.select(D, Flags, Result.SelectedMultilibs, + &CustomFlagsMacroDefines)) return; D.Diag(clang::diag::warn_drv_missing_multilib) << llvm::join(Flags, " "); std::stringstream ss; @@ -234,9 +237,13 @@ void BareMetal::findMultilibs(const Driver &D, const llvm::Triple &Triple, // If multilib.yaml is found, update sysroot so it doesn't use a target // specific suffix SysRoot = computeBaseSysRoot(D, /*IncludeTriple=*/false); - findMultilibsFromYAML(*this, D, *MultilibPath, Args, Result); + SmallVector CustomFlagMacroDefines; + findMultilibsFromYAML(*this, D, *MultilibPath, Args, Result, + CustomFlagMacroDefines); SelectedMultilibs = Result.SelectedMultilibs; Multilibs = Result.Multilibs; + MultilibMacroDefines.append(CustomFlagMacroDefines.begin(), + CustomFlagMacroDefines.end()); } else if (isRISCVBareMetal(Triple)) { if (findRISCVMultilibs(D, Triple, Args, Result)) { SelectedMultilibs = Result.SelectedMultilibs; @@ -551,3 +558,8 @@ SanitizerMask BareMetal::getSupportedSanitizers() const { } return Res; } + +SmallVector +BareMetal::getMultilibMacroDefinesStr(llvm::opt::ArgList &Args) const { + return MultilibMacroDefines; +} diff --git a/clang/lib/Driver/ToolChains/BareMetal.h b/clang/lib/Driver/ToolChains/BareMetal.h index 483b5efab5e6e..f6295bda0a6a2 100644 --- a/clang/lib/Driver/ToolChains/BareMetal.h +++ b/clang/lib/Driver/ToolChains/BareMetal.h @@ -70,12 +70,17 @@ class LLVM_LIBRARY_VISIBILITY BareMetal : public ToolChain { std::string computeSysRoot() const override; SanitizerMask getSupportedSanitizers() const override; + SmallVector + getMultilibMacroDefinesStr(llvm::opt::ArgList &Args) const override; + private: using OrderedMultilibs = llvm::iterator_range::const_reverse_iterator>; OrderedMultilibs getOrderedMultilibs() const; std::string SysRoot; + + SmallVector MultilibMacroDefines; }; } // namespace toolchains diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp index e5dffb11d1a5e..84ef8199de049 100644 --- a/clang/lib/Driver/ToolChains/Darwin.cpp +++ b/clang/lib/Driver/ToolChains/Darwin.cpp @@ -910,7 +910,9 @@ void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back(II.getFilename()); } - const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("lipo")); + StringRef LipoName = Args.getLastArgValue(options::OPT_fuse_lipo_EQ, "lipo"); + const char *Exec = + Args.MakeArgString(getToolChain().GetProgramPath(LipoName.data())); C.addCommand(std::make_unique(JA, *this, ResponseFileSupport::None(), Exec, CmdArgs, Inputs, Output)); } diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h index 855f4a6d8133f..d1e4eb08aa764 100644 --- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h +++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h @@ -2468,6 +2468,105 @@ __attribute__((convergent)) double3 WaveReadLaneAt(double3, int32_t); _HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_read_lane_at) __attribute__((convergent)) double4 WaveReadLaneAt(double4, int32_t); +//===----------------------------------------------------------------------===// +// WaveActiveSum builtins +//===----------------------------------------------------------------------===// + +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) half WaveActiveSum(half); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) half2 WaveActiveSum(half2); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) half3 WaveActiveSum(half3); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) half4 WaveActiveSum(half4); + +#ifdef __HLSL_ENABLE_16_BIT +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int16_t WaveActiveSum(int16_t); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int16_t2 WaveActiveSum(int16_t2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int16_t3 WaveActiveSum(int16_t3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int16_t4 WaveActiveSum(int16_t4); + +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint16_t WaveActiveSum(uint16_t); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint16_t2 WaveActiveSum(uint16_t2); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint16_t3 WaveActiveSum(uint16_t3); +_HLSL_AVAILABILITY(shadermodel, 6.0) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint16_t4 WaveActiveSum(uint16_t4); +#endif + +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int WaveActiveSum(int); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int2 WaveActiveSum(int2); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int3 WaveActiveSum(int3); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int4 WaveActiveSum(int4); + +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint WaveActiveSum(uint); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint2 WaveActiveSum(uint2); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint3 WaveActiveSum(uint3); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint4 WaveActiveSum(uint4); + +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int64_t WaveActiveSum(int64_t); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int64_t2 WaveActiveSum(int64_t2); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int64_t3 WaveActiveSum(int64_t3); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) int64_t4 WaveActiveSum(int64_t4); + +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint64_t WaveActiveSum(uint64_t); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint64_t2 WaveActiveSum(uint64_t2); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint64_t3 WaveActiveSum(uint64_t3); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) uint64_t4 WaveActiveSum(uint64_t4); + +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) float WaveActiveSum(float); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) float2 WaveActiveSum(float2); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) float3 WaveActiveSum(float3); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) float4 WaveActiveSum(float4); + +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) double WaveActiveSum(double); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) double2 WaveActiveSum(double2); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) double3 WaveActiveSum(double3); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_sum) +__attribute__((convergent)) double4 WaveActiveSum(double4); + //===----------------------------------------------------------------------===// // sign builtins //===----------------------------------------------------------------------===// diff --git a/clang/lib/Interpreter/CodeCompletion.cpp b/clang/lib/Interpreter/CodeCompletion.cpp index bbc8830d76bc0..aa90663538128 100644 --- a/clang/lib/Interpreter/CodeCompletion.cpp +++ b/clang/lib/Interpreter/CodeCompletion.cpp @@ -228,7 +228,8 @@ class ExternalSource : public clang::ExternalASTSource { ExternalSource(ASTContext &ChildASTCtxt, FileManager &ChildFM, ASTContext &ParentASTCtxt, FileManager &ParentFM); bool FindExternalVisibleDeclsByName(const DeclContext *DC, - DeclarationName Name) override; + DeclarationName Name, + const DeclContext *OriginalDC) override; void completeVisibleDeclsMap(const clang::DeclContext *childDeclContext) override; }; @@ -270,8 +271,9 @@ ExternalSource::ExternalSource(ASTContext &ChildASTCtxt, FileManager &ChildFM, Importer.reset(importer); } -bool ExternalSource::FindExternalVisibleDeclsByName(const DeclContext *DC, - DeclarationName Name) { +bool ExternalSource::FindExternalVisibleDeclsByName( + const DeclContext *DC, DeclarationName Name, + const DeclContext *OriginalDC) { IdentifierTable &ParentIdTable = ParentASTCtxt.Idents; diff --git a/clang/lib/Lex/Lexer.cpp b/clang/lib/Lex/Lexer.cpp index 72364500a48f9..115b6c1606a02 100644 --- a/clang/lib/Lex/Lexer.cpp +++ b/clang/lib/Lex/Lexer.cpp @@ -1323,7 +1323,8 @@ const char *Lexer::SkipEscapedNewLines(const char *P) { std::optional Lexer::findNextToken(SourceLocation Loc, const SourceManager &SM, - const LangOptions &LangOpts) { + const LangOptions &LangOpts, + bool IncludeComments) { if (Loc.isMacroID()) { if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) return std::nullopt; @@ -1344,6 +1345,7 @@ std::optional Lexer::findNextToken(SourceLocation Loc, // Lex from the start of the given location. Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(), TokenBegin, File.end()); + lexer.SetCommentRetentionState(IncludeComments); // Find the token. Token Tok; lexer.LexFromRawLexer(Tok); diff --git a/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/clang/lib/Sema/MultiplexExternalSemaSource.cpp index 54944267b4868..6d945300c386c 100644 --- a/clang/lib/Sema/MultiplexExternalSemaSource.cpp +++ b/clang/lib/Sema/MultiplexExternalSemaSource.cpp @@ -107,11 +107,13 @@ MultiplexExternalSemaSource::hasExternalDefinitions(const Decl *D) { return EK_ReplyHazy; } -bool MultiplexExternalSemaSource:: -FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name) { +bool MultiplexExternalSemaSource::FindExternalVisibleDeclsByName( + const DeclContext *DC, DeclarationName Name, + const DeclContext *OriginalDC) { bool AnyDeclsFound = false; for (size_t i = 0; i < Sources.size(); ++i) - AnyDeclsFound |= Sources[i]->FindExternalVisibleDeclsByName(DC, Name); + AnyDeclsFound |= + Sources[i]->FindExternalVisibleDeclsByName(DC, Name, OriginalDC); return AnyDeclsFound; } diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp index 92b0a48ba5589..a867ed73bd403 100644 --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -10757,6 +10757,22 @@ static void checkMethodTypeQualifiers(Sema &S, Declarator &D, unsigned DiagID) { } } +static void diagnoseInvalidDeclaratorChunks(Sema &S, Declarator &D, + unsigned Kind) { + if (D.isInvalidType() || D.getNumTypeObjects() <= 1) + return; + + DeclaratorChunk &Chunk = D.getTypeObject(D.getNumTypeObjects() - 1); + if (Chunk.Kind == DeclaratorChunk::Paren || + Chunk.Kind == DeclaratorChunk::Function) + return; + + SourceLocation PointerLoc = Chunk.getSourceRange().getBegin(); + S.Diag(PointerLoc, diag::err_invalid_ctor_dtor_decl) + << Kind << Chunk.getSourceRange(); + D.setInvalidType(); +} + QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass &SC) { bool isVirtual = D.getDeclSpec().isVirtualSpecified(); @@ -10792,6 +10808,7 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R, } checkMethodTypeQualifiers(*this, D, diag::err_invalid_qualified_constructor); + diagnoseInvalidDeclaratorChunks(*this, D, /*constructor*/ 0); // C++0x [class.ctor]p4: // A constructor shall not be declared with a ref-qualifier. @@ -10958,6 +10975,7 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R, } checkMethodTypeQualifiers(*this, D, diag::err_invalid_qualified_destructor); + diagnoseInvalidDeclaratorChunks(*this, D, /*destructor*/ 1); // C++0x [class.dtor]p2: // A destructor shall not be declared with a ref-qualifier. diff --git a/clang/lib/Sema/SemaFunctionEffects.cpp b/clang/lib/Sema/SemaFunctionEffects.cpp index 0d56a74b066e8..31980abd23fd1 100644 --- a/clang/lib/Sema/SemaFunctionEffects.cpp +++ b/clang/lib/Sema/SemaFunctionEffects.cpp @@ -567,7 +567,7 @@ class Analyzer { while (!VerificationQueue.empty()) { const Decl *D = VerificationQueue.back(); if (FuncAnalysisPtr AP = DeclAnalysis.lookup(D)) { - if (auto *Pending = AP.dyn_cast()) { + if (auto *Pending = dyn_cast(AP)) { // All children have been traversed; finish analysis. finishPendingAnalysis(D, Pending); } diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index fceb4e68bb3ff..238e19651dc6b 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -1859,7 +1859,24 @@ static bool CheckAnyScalarOrVector(Sema *S, CallExpr *TheCall, (VTy && VTy->getElementType()->isScalarType()))) { S->Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_typecheck_expect_any_scalar_or_vector) - << ArgType; + << ArgType << 1; + return true; + } + return false; +} + +static bool CheckWaveActive(Sema *S, CallExpr *TheCall) { + QualType BoolType = S->getASTContext().BoolTy; + assert(TheCall->getNumArgs() >= 1); + QualType ArgType = TheCall->getArg(0)->getType(); + auto *VTy = ArgType->getAs(); + // is the bool or vector + if (S->Context.hasSameUnqualifiedType(ArgType, BoolType) || + (VTy && + S->Context.hasSameUnqualifiedType(VTy->getElementType(), BoolType))) { + S->Diag(TheCall->getArg(0)->getBeginLoc(), + diag::err_typecheck_expect_any_scalar_or_vector) + << ArgType << 0; return true; } return false; @@ -2156,6 +2173,20 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { TheCall->setType(ArgTyA); break; } + case Builtin::BI__builtin_hlsl_wave_active_sum: { + if (SemaRef.checkArgCount(TheCall, 1)) + return true; + + // Ensure input expr type is a scalar/vector and the same as the return type + if (CheckAnyScalarOrVector(&SemaRef, TheCall, 0)) + return true; + if (CheckWaveActive(&SemaRef, TheCall)) + return true; + ExprResult Expr = TheCall->getArg(0); + QualType ArgTyExpr = Expr.get()->getType(); + TheCall->setType(ArgTyExpr); + break; + } // Note these are llvm builtins that we want to catch invalid intrinsic // generation. Normal handling of these builitns will occur elsewhere. case Builtin::BI__builtin_elementwise_bitreverse: { diff --git a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp index d42c3765aa534..5f813ba3a597a 100644 --- a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp +++ b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp @@ -996,7 +996,7 @@ BuildDeductionGuideForTypeAlias(Sema &SemaRef, F->getTemplateParameters()->size()); // FIXME: DeduceTemplateArguments stops immediately at the first - // non-deducible template argument. However, this doesn't seem to casue + // non-deducible template argument. However, this doesn't seem to cause // issues for practice cases, we probably need to extend it to continue // performing deduction for rest of arguments to align with the C++ // standard. @@ -1053,25 +1053,6 @@ BuildDeductionGuideForTypeAlias(Sema &SemaRef, TransformedDeducedAliasArgs[AliasTemplateParamIdx] = NewTemplateArgument; } unsigned FirstUndeducedParamIdx = FPrimeTemplateParams.size(); - // ...followed by the template parameters of f that were not deduced - // (including their default template arguments) - for (unsigned FTemplateParamIdx : NonDeducedTemplateParamsInFIndex) { - auto *TP = F->getTemplateParameters()->getParam(FTemplateParamIdx); - MultiLevelTemplateArgumentList Args; - Args.setKind(TemplateSubstitutionKind::Rewrite); - // We take a shortcut here, it is ok to reuse the - // TemplateArgsForBuildingFPrime. - Args.addOuterTemplateArguments(TemplateArgsForBuildingFPrime); - NamedDecl *NewParam = transformTemplateParameter( - SemaRef, F->getDeclContext(), TP, Args, FPrimeTemplateParams.size(), - getDepthAndIndex(TP).first); - FPrimeTemplateParams.push_back(NewParam); - - assert(TemplateArgsForBuildingFPrime[FTemplateParamIdx].isNull() && - "The argument must be null before setting"); - TemplateArgsForBuildingFPrime[FTemplateParamIdx] = - Context.getInjectedTemplateArg(NewParam); - } // To form a deduction guide f' from f, we leverage clang's instantiation // mechanism, we construct a template argument list where the template @@ -1080,24 +1061,21 @@ BuildDeductionGuideForTypeAlias(Sema &SemaRef, // f, this ensures all template parameter occurrences are updated // correctly. // - // The template argument list is formed from the `DeducedArgs`, two parts: - // 1) appeared template parameters of alias: transfrom the deduced - // template argument; - // 2) non-deduced template parameters of f: rebuild a - // template argument; + // The template argument list is formed, in order, from + // 1) For the template parameters of the alias, the corresponding deduced + // template arguments + // 2) For the non-deduced template parameters of f. the + // (rebuilt) template arguments corresponding. // - // 2) has been built already (when rebuilding the new template - // parameters), we now perform 1). + // Note: the non-deduced template arguments of `f` might refer to arguments + // deduced in 1), as in a type constraint. MultiLevelTemplateArgumentList Args; Args.setKind(TemplateSubstitutionKind::Rewrite); Args.addOuterTemplateArguments(TransformedDeducedAliasArgs); for (unsigned Index = 0; Index < DeduceResults.size(); ++Index) { const auto &D = DeduceResults[Index]; if (D.isNull()) { - // 2): Non-deduced template parameter has been built already. - assert(!TemplateArgsForBuildingFPrime[Index].isNull() && - "template arguments for non-deduced template parameters should " - "be been set!"); + // 2): Non-deduced template parameters would be substituted later. continue; } TemplateArgumentLoc Input = @@ -1110,6 +1088,27 @@ BuildDeductionGuideForTypeAlias(Sema &SemaRef, } } + // Case 2) + // ...followed by the template parameters of f that were not deduced + // (including their default template arguments) + for (unsigned FTemplateParamIdx : NonDeducedTemplateParamsInFIndex) { + auto *TP = F->getTemplateParameters()->getParam(FTemplateParamIdx); + MultiLevelTemplateArgumentList Args; + Args.setKind(TemplateSubstitutionKind::Rewrite); + // We take a shortcut here, it is ok to reuse the + // TemplateArgsForBuildingFPrime. + Args.addOuterTemplateArguments(TemplateArgsForBuildingFPrime); + NamedDecl *NewParam = transformTemplateParameter( + SemaRef, F->getDeclContext(), TP, Args, FPrimeTemplateParams.size(), + getDepthAndIndex(TP).first); + FPrimeTemplateParams.push_back(NewParam); + + assert(TemplateArgsForBuildingFPrime[FTemplateParamIdx].isNull() && + "The argument must be null before setting"); + TemplateArgsForBuildingFPrime[FTemplateParamIdx] = + Context.getInjectedTemplateArg(NewParam); + } + auto *TemplateArgListForBuildingFPrime = TemplateArgumentList::CreateCopy(Context, TemplateArgsForBuildingFPrime); // Form the f' by substituting the template arguments into f. diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index 7361cace49dd7..202227b195585 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -1235,7 +1235,7 @@ unsigned DeclarationNameKey::getHash() const { } ModuleFile * -ASTDeclContextNameLookupTrait::ReadFileRef(const unsigned char *&d) { +ASTDeclContextNameLookupTraitBase::ReadFileRef(const unsigned char *&d) { using namespace llvm::support; uint32_t ModuleFileID = @@ -1244,12 +1244,12 @@ ASTDeclContextNameLookupTrait::ReadFileRef(const unsigned char *&d) { } std::pair -ASTDeclContextNameLookupTrait::ReadKeyDataLength(const unsigned char *&d) { +ASTDeclContextNameLookupTraitBase::ReadKeyDataLength(const unsigned char *&d) { return readULEBKeyDataLength(d); } -ASTDeclContextNameLookupTrait::internal_key_type -ASTDeclContextNameLookupTrait::ReadKey(const unsigned char *d, unsigned) { +DeclarationNameKey +ASTDeclContextNameLookupTraitBase::ReadKeyBase(const unsigned char *&d) { using namespace llvm::support; auto Kind = (DeclarationName::NameKind)*d++; @@ -1283,10 +1283,13 @@ ASTDeclContextNameLookupTrait::ReadKey(const unsigned char *d, unsigned) { return DeclarationNameKey(Kind, Data); } -void ASTDeclContextNameLookupTrait::ReadDataInto(internal_key_type, - const unsigned char *d, - unsigned DataLen, - data_type_builder &Val) { +ASTDeclContextNameLookupTrait::internal_key_type +ASTDeclContextNameLookupTrait::ReadKey(const unsigned char *d, unsigned) { + return ReadKeyBase(d); +} + +void ASTDeclContextNameLookupTraitBase::ReadDataIntoImpl( + const unsigned char *d, unsigned DataLen, data_type_builder &Val) { using namespace llvm::support; for (unsigned NumDecls = DataLen / sizeof(DeclID); NumDecls; --NumDecls) { @@ -1296,6 +1299,47 @@ void ASTDeclContextNameLookupTrait::ReadDataInto(internal_key_type, } } +void ASTDeclContextNameLookupTrait::ReadDataInto(internal_key_type, + const unsigned char *d, + unsigned DataLen, + data_type_builder &Val) { + ReadDataIntoImpl(d, DataLen, Val); +} + +ModuleLocalNameLookupTrait::hash_value_type +ModuleLocalNameLookupTrait::ComputeHash(const internal_key_type &Key) { + llvm::FoldingSetNodeID ID; + ID.AddInteger(Key.first.getHash()); + ID.AddInteger(Key.second); + return ID.computeStableHash(); +} + +ModuleLocalNameLookupTrait::internal_key_type +ModuleLocalNameLookupTrait::GetInternalKey(const external_key_type &Key) { + DeclarationNameKey Name(Key.first); + + std::optional ModuleHash = getPrimaryModuleHash(Key.second); + if (!ModuleHash) + return {Name, 0}; + + return {Name, *ModuleHash}; +} + +ModuleLocalNameLookupTrait::internal_key_type +ModuleLocalNameLookupTrait::ReadKey(const unsigned char *d, unsigned) { + DeclarationNameKey Name = ReadKeyBase(d); + unsigned PrimaryModuleHash = + llvm::support::endian::readNext(d); + return {Name, PrimaryModuleHash}; +} + +void ModuleLocalNameLookupTrait::ReadDataInto(internal_key_type, + const unsigned char *d, + unsigned DataLen, + data_type_builder &Val) { + ReadDataIntoImpl(d, DataLen, Val); +} + ModuleFile * LazySpecializationInfoLookupTrait::ReadFileRef(const unsigned char *&d) { using namespace llvm::support; @@ -1383,8 +1427,8 @@ bool ASTReader::ReadLexicalDeclContextStorage(ModuleFile &M, bool ASTReader::ReadVisibleDeclContextStorage(ModuleFile &M, BitstreamCursor &Cursor, - uint64_t Offset, - GlobalDeclID ID) { + uint64_t Offset, GlobalDeclID ID, + bool IsModuleLocal) { assert(Offset != 0); SavedStreamPosition SavedPosition(Cursor); @@ -1408,15 +1452,22 @@ bool ASTReader::ReadVisibleDeclContextStorage(ModuleFile &M, return true; } unsigned RecCode = MaybeRecCode.get(); - if (RecCode != DECL_CONTEXT_VISIBLE) { + if (!IsModuleLocal && RecCode != DECL_CONTEXT_VISIBLE) { Error("Expected visible lookup table block"); return true; } + if (IsModuleLocal && RecCode != DECL_CONTEXT_MODULE_LOCAL_VISIBLE) { + Error("Expected module local visible lookup table block"); + return true; + } // We can't safely determine the primary context yet, so delay attaching the // lookup table until we're done with recursive deserialization. auto *Data = (const unsigned char*)Blob.data(); - PendingVisibleUpdates[ID].push_back(UpdateData{&M, Data}); + if (!IsModuleLocal) + PendingVisibleUpdates[ID].push_back(UpdateData{&M, Data}); + else + PendingModuleLocalVisibleUpdates[ID].push_back(UpdateData{&M, Data}); return false; } @@ -3549,6 +3600,19 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F, break; } + case UPDATE_MODULE_LOCAL_VISIBLE: { + unsigned Idx = 0; + GlobalDeclID ID = ReadDeclID(F, Record, Idx); + auto *Data = (const unsigned char *)Blob.data(); + PendingModuleLocalVisibleUpdates[ID].push_back(UpdateData{&F, Data}); + // If we've already loaded the decl, perform the updates when we finish + // loading this block. + if (Decl *D = GetExistingDecl(ID)) + PendingUpdateRecords.push_back( + PendingUpdateRecord(ID, D, /*JustLoaded=*/false)); + break; + } + case CXX_ADDED_TEMPLATE_SPECIALIZATION: { unsigned Idx = 0; GlobalDeclID ID = ReadDeclID(F, Record, Idx); @@ -3652,6 +3716,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F, TotalNumMacros += Record[1]; TotalLexicalDeclContexts += Record[2]; TotalVisibleDeclContexts += Record[3]; + TotalModuleLocalVisibleDeclContexts += Record[4]; break; case UNUSED_FILESCOPED_DECLS: @@ -3937,7 +4002,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F, break; case DELAYED_NAMESPACE_LEXICAL_VISIBLE_RECORD: { - if (Record.size() % 3 != 0) + if (Record.size() % 4 != 0) return llvm::createStringError( std::errc::illegal_byte_sequence, "invalid DELAYED_NAMESPACE_LEXICAL_VISIBLE_RECORD block in AST " @@ -3953,8 +4018,12 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F, uint64_t LocalVisibleOffset = Record[I++]; uint64_t VisibleOffset = LocalVisibleOffset ? BaseOffset + LocalVisibleOffset : 0; + uint64_t LocalModuleLocalOffset = Record[I++]; + uint64_t ModuleLocalOffset = + LocalModuleLocalOffset ? BaseOffset + LocalModuleLocalOffset : 0; - DelayedNamespaceOffsetMap[ID] = {LexicalOffset, VisibleOffset}; + DelayedNamespaceOffsetMap[ID] = {LexicalOffset, VisibleOffset, + ModuleLocalOffset}; assert(!GetExistingDecl(ID) && "We shouldn't load the namespace in the front of delayed " @@ -8366,31 +8435,44 @@ void ASTReader::FindFileRegionDecls(FileID File, *DInfo.Mod, LocalDeclID::get(*this, *DInfo.Mod, *DIt)))); } -bool -ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC, - DeclarationName Name) { +bool ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC, + DeclarationName Name, + const DeclContext *OriginalDC) { assert(DC->hasExternalVisibleStorage() && DC == DC->getPrimaryContext() && "DeclContext has no visible decls in storage"); if (!Name) return false; - auto It = Lookups.find(DC); - if (It == Lookups.end()) - return false; - - Deserializing LookupResults(this); - // Load the list of declarations. SmallVector Decls; llvm::SmallPtrSet Found; - for (GlobalDeclID ID : It->second.Table.find(Name)) { - NamedDecl *ND = cast(GetDecl(ID)); - if (ND->getDeclName() == Name && Found.insert(ND).second) - Decls.push_back(ND); + Deserializing LookupResults(this); + + // FIXME: Clear the redundancy with templated lambda in C++20 when that's + // available. + if (auto It = Lookups.find(DC); It != Lookups.end()) { + ++NumVisibleDeclContextsRead; + for (GlobalDeclID ID : It->second.Table.find(Name)) { + NamedDecl *ND = cast(GetDecl(ID)); + if (ND->getDeclName() == Name && Found.insert(ND).second) + Decls.push_back(ND); + } + } + + if (auto *NamedModule = + OriginalDC ? cast(OriginalDC)->getTopLevelOwningNamedModule() + : nullptr) { + if (auto It = ModuleLocalLookups.find(DC); It != ModuleLocalLookups.end()) { + ++NumModuleLocalVisibleDeclContexts; + for (GlobalDeclID ID : It->second.Table.find({Name, NamedModule})) { + NamedDecl *ND = cast(GetDecl(ID)); + if (ND->getDeclName() == Name && Found.insert(ND).second) + Decls.push_back(ND); + } + } } - ++NumVisibleDeclContextsRead; SetExternalVisibleDeclsForName(DC, Name, Decls); return !Decls.empty(); } @@ -8399,18 +8481,25 @@ void ASTReader::completeVisibleDeclsMap(const DeclContext *DC) { if (!DC->hasExternalVisibleStorage()) return; - auto It = Lookups.find(DC); - assert(It != Lookups.end() && - "have external visible storage but no lookup tables"); - DeclsMap Decls; - for (GlobalDeclID ID : It->second.Table.findAll()) { - NamedDecl *ND = cast(GetDecl(ID)); - Decls[ND->getDeclName()].push_back(ND); - } + auto findAll = [&](auto &LookupTables, unsigned &NumRead) { + auto It = LookupTables.find(DC); + if (It == LookupTables.end()) + return; + + NumRead++; - ++NumVisibleDeclContextsRead; + for (GlobalDeclID ID : It->second.Table.findAll()) { + NamedDecl *ND = cast(GetDecl(ID)); + Decls[ND->getDeclName()].push_back(ND); + } + + // FIXME: Why a PCH test is failing if we remove the iterator after findAll? + }; + + findAll(Lookups, NumVisibleDeclContextsRead); + findAll(ModuleLocalLookups, NumModuleLocalVisibleDeclContexts); for (DeclsMap::iterator I = Decls.begin(), E = Decls.end(); I != E; ++I) { SetExternalVisibleDeclsForName(DC, I->first, I->second); @@ -8424,6 +8513,12 @@ ASTReader::getLoadedLookupTables(DeclContext *Primary) const { return I == Lookups.end() ? nullptr : &I->second; } +const serialization::reader::ModuleLocalLookupTable * +ASTReader::getModuleLocalLookupTables(DeclContext *Primary) const { + auto I = ModuleLocalLookups.find(Primary); + return I == ModuleLocalLookups.end() ? nullptr : &I->second; +} + serialization::reader::LazySpecializationInfoLookupTable * ASTReader::getLoadedSpecializationsLookupTables(const Decl *D, bool IsPartial) { assert(D->isCanonicalDecl()); @@ -8533,6 +8628,12 @@ void ASTReader::PrintStats() { NumVisibleDeclContextsRead, TotalVisibleDeclContexts, ((float)NumVisibleDeclContextsRead/TotalVisibleDeclContexts * 100)); + if (TotalModuleLocalVisibleDeclContexts) + std::fprintf( + stderr, " %u/%u module local visible declcontexts read (%f%%)\n", + NumModuleLocalVisibleDeclContexts, TotalModuleLocalVisibleDeclContexts, + ((float)NumModuleLocalVisibleDeclContexts / + TotalModuleLocalVisibleDeclContexts * 100)); if (TotalNumMethodPoolEntries) std::fprintf(stderr, " %u/%u method pool entries read (%f%%)\n", NumMethodPoolEntriesRead, TotalNumMethodPoolEntries, @@ -12639,3 +12740,25 @@ void ASTRecordReader::readOpenACCClauseList( for (unsigned I = 0; I < Clauses.size(); ++I) Clauses[I] = readOpenACCClause(); } + +static unsigned getStableHashForModuleName(StringRef PrimaryModuleName) { + // TODO: Maybe it is better to check PrimaryModuleName is a valid + // module name? + llvm::FoldingSetNodeID ID; + ID.AddString(PrimaryModuleName); + return ID.computeStableHash(); +} + +std::optional clang::getPrimaryModuleHash(const Module *M) { + if (!M) + return std::nullopt; + + if (M->isHeaderLikeModule()) + return std::nullopt; + + if (M->isGlobalModule()) + return std::nullopt; + + StringRef PrimaryModuleName = M->getPrimaryModuleInterfaceName(); + return getStableHashForModuleName(PrimaryModuleName); +} diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp index 95abd75920c8f..1c51a7b5e460f 100644 --- a/clang/lib/Serialization/ASTReaderDecl.cpp +++ b/clang/lib/Serialization/ASTReaderDecl.cpp @@ -413,7 +413,8 @@ class ASTDeclReader : public DeclVisitor { void VisitEmptyDecl(EmptyDecl *D); void VisitLifetimeExtendedTemporaryDecl(LifetimeExtendedTemporaryDecl *D); - std::pair VisitDeclContext(DeclContext *DC); + void VisitDeclContext(DeclContext *DC, uint64_t &LexicalOffset, + uint64_t &VisibleOffset, uint64_t &ModuleLocalOffset); template RedeclarableResult VisitRedeclarable(Redeclarable *D); @@ -1855,7 +1856,10 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) { void ASTDeclReader::VisitHLSLBufferDecl(HLSLBufferDecl *D) { VisitNamedDecl(D); - VisitDeclContext(D); + uint64_t LexicalOffset = 0; + uint64_t VisibleOffset = 0; + uint64_t ModuleLocalOffset = 0; + VisitDeclContext(D, LexicalOffset, VisibleOffset, ModuleLocalOffset); D->IsCBuffer = Record.readBool(); D->KwLoc = readSourceLocation(); D->LBraceLoc = readSourceLocation(); @@ -2764,11 +2768,12 @@ void ASTDeclReader::VisitLifetimeExtendedTemporaryDecl( mergeMergeable(D); } -std::pair -ASTDeclReader::VisitDeclContext(DeclContext *DC) { - uint64_t LexicalOffset = ReadLocalOffset(); - uint64_t VisibleOffset = ReadLocalOffset(); - return std::make_pair(LexicalOffset, VisibleOffset); +void ASTDeclReader::VisitDeclContext(DeclContext *DC, uint64_t &LexicalOffset, + uint64_t &VisibleOffset, + uint64_t &ModuleLocalOffset) { + LexicalOffset = ReadLocalOffset(); + VisibleOffset = ReadLocalOffset(); + ModuleLocalOffset = ReadLocalOffset(); } template @@ -3869,6 +3874,7 @@ Decl *ASTReader::ReadDeclRecord(GlobalDeclID ID) { switch ((DeclCode)MaybeDeclCode.get()) { case DECL_CONTEXT_LEXICAL: case DECL_CONTEXT_VISIBLE: + case DECL_CONTEXT_MODULE_LOCAL_VISIBLE: case DECL_SPECIALIZATIONS: case DECL_PARTIAL_SPECIALIZATIONS: llvm_unreachable("Record cannot be de-serialized with readDeclRecord"); @@ -4176,21 +4182,35 @@ Decl *ASTReader::ReadDeclRecord(GlobalDeclID ID) { // If this declaration is also a declaration context, get the // offsets for its tables of lexical and visible declarations. if (auto *DC = dyn_cast(D)) { - std::pair Offsets = Reader.VisitDeclContext(DC); + uint64_t LexicalOffset = 0; + uint64_t VisibleOffset = 0; + uint64_t ModuleLocalOffset = 0; + + Reader.VisitDeclContext(DC, LexicalOffset, VisibleOffset, + ModuleLocalOffset); // Get the lexical and visible block for the delayed namespace. // It is sufficient to judge if ID is in DelayedNamespaceOffsetMap. // But it may be more efficient to filter the other cases. - if (!Offsets.first && !Offsets.second && isa(D)) + if (!LexicalOffset && !VisibleOffset && !ModuleLocalOffset && + isa(D)) if (auto Iter = DelayedNamespaceOffsetMap.find(ID); - Iter != DelayedNamespaceOffsetMap.end()) - Offsets = Iter->second; + Iter != DelayedNamespaceOffsetMap.end()) { + LexicalOffset = Iter->second.LexicalOffset; + VisibleOffset = Iter->second.VisibleOffset; + ModuleLocalOffset = Iter->second.ModuleLocalOffset; + } - if (Offsets.first && - ReadLexicalDeclContextStorage(*Loc.F, DeclsCursor, Offsets.first, DC)) + if (LexicalOffset && + ReadLexicalDeclContextStorage(*Loc.F, DeclsCursor, LexicalOffset, DC)) + return nullptr; + if (VisibleOffset && + ReadVisibleDeclContextStorage(*Loc.F, DeclsCursor, VisibleOffset, ID, + /*IsModuleLocal=*/false)) return nullptr; - if (Offsets.second && - ReadVisibleDeclContextStorage(*Loc.F, DeclsCursor, Offsets.second, ID)) + if (ModuleLocalOffset && + ReadVisibleDeclContextStorage(*Loc.F, DeclsCursor, ModuleLocalOffset, + ID, /*IsModuleLocal=*/true)) return nullptr; } assert(Record.getIdx() == Record.size()); @@ -4328,8 +4348,8 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) { } // Load the pending visible updates for this decl context, if it has any. - auto I = PendingVisibleUpdates.find(ID); - if (I != PendingVisibleUpdates.end()) { + if (auto I = PendingVisibleUpdates.find(ID); + I != PendingVisibleUpdates.end()) { auto VisibleUpdates = std::move(I->second); PendingVisibleUpdates.erase(I); @@ -4341,6 +4361,21 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) { DC->setHasExternalVisibleStorage(true); } + if (auto I = PendingModuleLocalVisibleUpdates.find(ID); + I != PendingModuleLocalVisibleUpdates.end()) { + auto ModuleLocalVisibleUpdates = std::move(I->second); + PendingModuleLocalVisibleUpdates.erase(I); + + auto *DC = cast(D)->getPrimaryContext(); + for (const auto &Update : ModuleLocalVisibleUpdates) + ModuleLocalLookups[DC].Table.add( + Update.Mod, Update.Data, + reader::ModuleLocalNameLookupTrait(*this, *Update.Mod)); + // NOTE: Can we optimize the case that the data being loaded + // is not related to current module? + DC->setHasExternalVisibleStorage(true); + } + // Load any pending related decls. if (D->isCanonicalDecl()) { if (auto IT = RelatedDeclsMap.find(ID); IT != RelatedDeclsMap.end()) { diff --git a/clang/lib/Serialization/ASTReaderInternals.h b/clang/lib/Serialization/ASTReaderInternals.h index be0d22d1f4094..4be2b2323ec40 100644 --- a/clang/lib/Serialization/ASTReaderInternals.h +++ b/clang/lib/Serialization/ASTReaderInternals.h @@ -31,6 +31,7 @@ class FileEntry; struct HeaderFileInfo; class HeaderSearch; class ObjCMethodDecl; +class Module; namespace serialization { @@ -38,9 +39,8 @@ class ModuleFile; namespace reader { -/// Class that performs name lookup into a DeclContext stored -/// in an AST file. -class ASTDeclContextNameLookupTrait { +class ASTDeclContextNameLookupTraitBase { +protected: ASTReader &Reader; ModuleFile &F; @@ -80,11 +80,37 @@ class ASTDeclContextNameLookupTrait { using offset_type = unsigned; using file_type = ModuleFile *; - using external_key_type = DeclarationName; - using internal_key_type = DeclarationNameKey; +protected: + explicit ASTDeclContextNameLookupTraitBase(ASTReader &Reader, ModuleFile &F) + : Reader(Reader), F(F) {} + +public: + static std::pair + ReadKeyDataLength(const unsigned char *&d); + + void ReadDataIntoImpl(const unsigned char *d, unsigned DataLen, + data_type_builder &Val); + + static void MergeDataInto(const data_type &From, data_type_builder &To) { + To.Data.reserve(To.Data.size() + From.size()); + for (GlobalDeclID ID : From) + To.insert(ID); + } + + file_type ReadFileRef(const unsigned char *&d); + + DeclarationNameKey ReadKeyBase(const unsigned char *&d); +}; +/// Class that performs name lookup into a DeclContext stored +/// in an AST file. +class ASTDeclContextNameLookupTrait : public ASTDeclContextNameLookupTraitBase { +public: explicit ASTDeclContextNameLookupTrait(ASTReader &Reader, ModuleFile &F) - : Reader(Reader), F(F) {} + : ASTDeclContextNameLookupTraitBase(Reader, F) {} + + using external_key_type = DeclarationName; + using internal_key_type = DeclarationNameKey; static bool EqualKey(const internal_key_type &a, const internal_key_type &b) { return a == b; @@ -98,25 +124,39 @@ class ASTDeclContextNameLookupTrait { return Name; } - static std::pair - ReadKeyDataLength(const unsigned char *&d); - internal_key_type ReadKey(const unsigned char *d, unsigned); void ReadDataInto(internal_key_type, const unsigned char *d, unsigned DataLen, data_type_builder &Val); +}; - static void MergeDataInto(const data_type &From, data_type_builder &To) { - To.Data.reserve(To.Data.size() + From.size()); - for (GlobalDeclID ID : From) - To.insert(ID); +struct DeclContextLookupTable { + MultiOnDiskHashTable Table; +}; + +class ModuleLocalNameLookupTrait : public ASTDeclContextNameLookupTraitBase { +public: + explicit ModuleLocalNameLookupTrait(ASTReader &Reader, ModuleFile &F) + : ASTDeclContextNameLookupTraitBase(Reader, F) {} + + using external_key_type = std::pair; + using internal_key_type = std::pair; + + static bool EqualKey(const internal_key_type &a, const internal_key_type &b) { + return a == b; } - file_type ReadFileRef(const unsigned char *&d); + static hash_value_type ComputeHash(const internal_key_type &Key); + static internal_key_type GetInternalKey(const external_key_type &Key); + + internal_key_type ReadKey(const unsigned char *d, unsigned); + + void ReadDataInto(internal_key_type, const unsigned char *d, unsigned DataLen, + data_type_builder &Val); }; -struct DeclContextLookupTable { - MultiOnDiskHashTable Table; +struct ModuleLocalLookupTable { + MultiOnDiskHashTable Table; }; using LazySpecializationInfo = GlobalDeclID; diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index 0ae2157eed4ec..55d3c2bb56f2c 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -1088,6 +1088,7 @@ void ASTWriter::WriteBlockInfoBlock() { RECORD(DECL_BLOCK); RECORD(DECL_CONTEXT_LEXICAL); RECORD(DECL_CONTEXT_VISIBLE); + RECORD(DECL_CONTEXT_MODULE_LOCAL_VISIBLE); RECORD(DECL_NAMESPACE); RECORD(DECL_NAMESPACE_ALIAS); RECORD(DECL_USING); @@ -4026,15 +4027,13 @@ void ASTWriter::handleVTable(CXXRecordDecl *RD) { namespace { -// Trait used for the on-disk hash table used in the method pool. -class ASTDeclContextNameLookupTrait { +class ASTDeclContextNameLookupTraitBase { +protected: ASTWriter &Writer; - llvm::SmallVector DeclIDs; + using DeclIDsTy = llvm::SmallVector; + DeclIDsTy DeclIDs; public: - using key_type = DeclarationNameKey; - using key_type_ref = key_type; - /// A start and end index into DeclIDs, representing a sequence of decls. using data_type = std::pair; using data_type_ref = const data_type &; @@ -4042,31 +4041,11 @@ class ASTDeclContextNameLookupTrait { using hash_value_type = unsigned; using offset_type = unsigned; - explicit ASTDeclContextNameLookupTrait(ASTWriter &Writer) : Writer(Writer) {} - - template - data_type getData(const Coll &Decls) { - unsigned Start = DeclIDs.size(); - for (NamedDecl *D : Decls) { - NamedDecl *DeclForLocalLookup = - getDeclForLocalLookup(Writer.getLangOpts(), D); - - if (Writer.getDoneWritingDeclsAndTypes() && - !Writer.wasDeclEmitted(DeclForLocalLookup)) - continue; - - // Try to avoid writing internal decls to reduced BMI. - // See comments in ASTWriter::WriteDeclContextLexicalBlock for details. - if (Writer.isGeneratingReducedBMI() && - !DeclForLocalLookup->isFromExplicitGlobalModule() && - IsInternalDeclFromFileContext(DeclForLocalLookup)) - continue; - - DeclIDs.push_back(Writer.GetDeclRef(DeclForLocalLookup)); - } - return std::make_pair(Start, DeclIDs.size()); - } +protected: + explicit ASTDeclContextNameLookupTraitBase(ASTWriter &Writer) + : Writer(Writer) {} +public: data_type ImportData(const reader::ASTDeclContextNameLookupTrait::data_type &FromReader) { unsigned Start = DeclIDs.size(); DeclIDs.insert( @@ -4076,14 +4055,6 @@ class ASTDeclContextNameLookupTrait { return std::make_pair(Start, DeclIDs.size()); } - static bool EqualKey(key_type_ref a, key_type_ref b) { - return a == b; - } - - hash_value_type ComputeHash(DeclarationNameKey Name) { - return Name.getHash(); - } - void EmitFileRef(raw_ostream &Out, ModuleFile *F) const { assert(Writer.hasChain() && "have reference to loaded module file but no chain?"); @@ -4094,9 +4065,9 @@ class ASTDeclContextNameLookupTrait { llvm::endianness::little); } - std::pair EmitKeyDataLength(raw_ostream &Out, - DeclarationNameKey Name, - data_type_ref Lookup) { + std::pair EmitKeyDataLengthBase(raw_ostream &Out, + DeclarationNameKey Name, + data_type_ref Lookup) { unsigned KeyLen = 1; switch (Name.getKind()) { case DeclarationName::Identifier: @@ -4122,10 +4093,10 @@ class ASTDeclContextNameLookupTrait { // length of DeclIDs. unsigned DataLen = sizeof(DeclID) * (Lookup.second - Lookup.first); - return emitULEBKeyDataLength(KeyLen, DataLen, Out); + return {KeyLen, DataLen}; } - void EmitKey(raw_ostream &Out, DeclarationNameKey Name, unsigned) { + void EmitKeyBase(raw_ostream &Out, DeclarationNameKey Name) { using namespace llvm::support; endian::Writer LE(Out, llvm::endianness::little); @@ -4156,8 +4127,7 @@ class ASTDeclContextNameLookupTrait { llvm_unreachable("Invalid name kind?"); } - void EmitData(raw_ostream &Out, key_type_ref, data_type Lookup, - unsigned DataLen) { + void EmitDataBase(raw_ostream &Out, data_type Lookup, unsigned DataLen) { using namespace llvm::support; endian::Writer LE(Out, llvm::endianness::little); @@ -4168,6 +4138,148 @@ class ASTDeclContextNameLookupTrait { } }; +class ModuleLocalNameLookupTrait : public ASTDeclContextNameLookupTraitBase { +public: + using primary_module_hash_type = unsigned; + + using key_type = std::pair; + using key_type_ref = key_type; + + explicit ModuleLocalNameLookupTrait(ASTWriter &Writer) + : ASTDeclContextNameLookupTraitBase(Writer) {} + + data_type getData(const DeclIDsTy &LocalIDs) { + unsigned Start = DeclIDs.size(); + for (auto ID : LocalIDs) + DeclIDs.push_back(ID); + return std::make_pair(Start, DeclIDs.size()); + } + + static bool EqualKey(key_type_ref a, key_type_ref b) { return a == b; } + + hash_value_type ComputeHash(key_type Key) { + llvm::FoldingSetNodeID ID; + ID.AddInteger(Key.first.getHash()); + ID.AddInteger(Key.second); + return ID.computeStableHash(); + } + + std::pair + EmitKeyDataLength(raw_ostream &Out, key_type Key, data_type_ref Lookup) { + auto [KeyLen, DataLen] = EmitKeyDataLengthBase(Out, Key.first, Lookup); + KeyLen += sizeof(Key.second); + return emitULEBKeyDataLength(KeyLen, DataLen, Out); + } + + void EmitKey(raw_ostream &Out, key_type Key, unsigned) { + EmitKeyBase(Out, Key.first); + llvm::support::endian::Writer LE(Out, llvm::endianness::little); + LE.write(Key.second); + } + + void EmitData(raw_ostream &Out, key_type_ref, data_type Lookup, + unsigned DataLen) { + EmitDataBase(Out, Lookup, DataLen); + } +}; + +static bool isModuleLocalDecl(NamedDecl *D) { + // For decls not in a file context, they should have the same visibility + // with their parent. + if (auto *Parent = dyn_cast(D->getNonTransparentDeclContext()); + Parent && !D->getNonTransparentDeclContext()->isFileContext()) + return isModuleLocalDecl(Parent); + + // Deduction Guide are special here. Since their logical parent context are + // not their actual parent. + if (auto *FTD = dyn_cast(D)) + if (auto *CDGD = dyn_cast(FTD->getTemplatedDecl())) + return isModuleLocalDecl(CDGD->getDeducedTemplate()); + + if (D->getFormalLinkage() == Linkage::Module) + return true; + + return false; +} + +// Trait used for the on-disk hash table used in the method pool. +class ASTDeclContextNameLookupTrait : public ASTDeclContextNameLookupTraitBase { +public: + using ModuleLocalDeclsMapTy = + llvm::DenseMap; + +private: + ModuleLocalDeclsMapTy ModuleLocalDeclsMap; + +public: + using key_type = DeclarationNameKey; + using key_type_ref = key_type; + + explicit ASTDeclContextNameLookupTrait(ASTWriter &Writer) + : ASTDeclContextNameLookupTraitBase(Writer) {} + + template data_type getData(const Coll &Decls) { + unsigned Start = DeclIDs.size(); + for (NamedDecl *D : Decls) { + NamedDecl *DeclForLocalLookup = + getDeclForLocalLookup(Writer.getLangOpts(), D); + + if (Writer.getDoneWritingDeclsAndTypes() && + !Writer.wasDeclEmitted(DeclForLocalLookup)) + continue; + + // Try to avoid writing internal decls to reduced BMI. + // See comments in ASTWriter::WriteDeclContextLexicalBlock for details. + if (Writer.isGeneratingReducedBMI() && + !DeclForLocalLookup->isFromExplicitGlobalModule() && + IsInternalDeclFromFileContext(DeclForLocalLookup)) + continue; + + auto ID = Writer.GetDeclRef(DeclForLocalLookup); + + if (isModuleLocalDecl(D)) { + if (std::optional PrimaryModuleHash = + getPrimaryModuleHash(D->getOwningModule())) { + auto Key = std::make_pair(D->getDeclName(), *PrimaryModuleHash); + auto Iter = ModuleLocalDeclsMap.find(Key); + if (Iter == ModuleLocalDeclsMap.end()) + ModuleLocalDeclsMap.insert({Key, DeclIDsTy{ID}}); + else + Iter->second.push_back(ID); + continue; + } + } + + DeclIDs.push_back(ID); + } + return std::make_pair(Start, DeclIDs.size()); + } + + const ModuleLocalDeclsMapTy &getModuleLocalDecls() { + return ModuleLocalDeclsMap; + } + + static bool EqualKey(key_type_ref a, key_type_ref b) { return a == b; } + + hash_value_type ComputeHash(key_type Name) { return Name.getHash(); } + + std::pair EmitKeyDataLength(raw_ostream &Out, + DeclarationNameKey Name, + data_type_ref Lookup) { + auto [KeyLen, DataLen] = EmitKeyDataLengthBase(Out, Name, Lookup); + return emitULEBKeyDataLength(KeyLen, DataLen, Out); + } + + void EmitKey(raw_ostream &Out, DeclarationNameKey Name, unsigned) { + return EmitKeyBase(Out, Name); + } + + void EmitData(raw_ostream &Out, key_type_ref, data_type Lookup, + unsigned DataLen) { + EmitDataBase(Out, Lookup, DataLen); + } +}; + } // namespace namespace { @@ -4373,7 +4485,8 @@ static bool isLookupResultNotInteresting(ASTWriter &Writer, void ASTWriter::GenerateNameLookupTable( ASTContext &Context, const DeclContext *ConstDC, - llvm::SmallVectorImpl &LookupTable) { + llvm::SmallVectorImpl &LookupTable, + llvm::SmallVectorImpl &ModuleLocalLookupTable) { assert(!ConstDC->hasLazyLocalLexicalLookups() && !ConstDC->hasLazyExternalLexicalLookups() && "must call buildLookups first"); @@ -4555,6 +4668,28 @@ void ASTWriter::GenerateNameLookupTable( // merged table if there is one. auto *Lookups = Chain ? Chain->getLoadedLookupTables(DC) : nullptr; Generator.emit(LookupTable, Trait, Lookups ? &Lookups->Table : nullptr); + + const auto &ModuleLocalDecls = Trait.getModuleLocalDecls(); + if (ModuleLocalDecls.empty()) + return; + + MultiOnDiskHashTableGenerator + ModuleLocalLookupGenerator; + ModuleLocalNameLookupTrait ModuleLocalTrait(*this); + + for (const auto &ModuleLocalIter : ModuleLocalDecls) { + const auto &Key = ModuleLocalIter.first; + const auto &IDs = ModuleLocalIter.second; + ModuleLocalLookupGenerator.insert(Key, ModuleLocalTrait.getData(IDs), + ModuleLocalTrait); + } + + auto *ModuleLocalLookups = + Chain ? Chain->getModuleLocalLookupTables(DC) : nullptr; + ModuleLocalLookupGenerator.emit( + ModuleLocalLookupTable, ModuleLocalTrait, + ModuleLocalLookups ? &ModuleLocalLookups->Table : nullptr); } /// Write the block containing all of the declaration IDs @@ -4562,8 +4697,10 @@ void ASTWriter::GenerateNameLookupTable( /// /// \returns the offset of the DECL_CONTEXT_VISIBLE block within the /// bitstream, or 0 if no block was written. -uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context, - DeclContext *DC) { +void ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context, + DeclContext *DC, + uint64_t &VisibleBlockOffset, + uint64_t &ModuleLocalBlockOffset) { // If we imported a key declaration of this namespace, write the visible // lookup results as an update record for it rather than including them // on this declaration. We will only look at key declarations on reload. @@ -4573,7 +4710,7 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context, for (auto *Prev = cast(DC)->getPreviousDecl(); Prev; Prev = Prev->getPreviousDecl()) if (!Prev->isFromASTFile()) - return 0; + return; // Note that we need to emit an update record for the primary context. UpdatedDeclContexts.insert(DC->getPrimaryContext()); @@ -4622,41 +4759,53 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context, } } - return 0; + return; } if (DC->getPrimaryContext() != DC) - return 0; + return; // Skip contexts which don't support name lookup. if (!DC->isLookupContext()) - return 0; + return; // If not in C++, we perform name lookup for the translation unit via the // IdentifierInfo chains, don't bother to build a visible-declarations table. if (DC->isTranslationUnit() && !Context.getLangOpts().CPlusPlus) - return 0; + return; // Serialize the contents of the mapping used for lookup. Note that, // although we have two very different code paths, the serialized // representation is the same for both cases: a declaration name, // followed by a size, followed by references to the visible // declarations that have that name. - uint64_t Offset = Stream.GetCurrentBitNo(); StoredDeclsMap *Map = DC->buildLookup(); if (!Map || Map->empty()) - return 0; + return; + VisibleBlockOffset = Stream.GetCurrentBitNo(); // Create the on-disk hash table in a buffer. SmallString<4096> LookupTable; - GenerateNameLookupTable(Context, DC, LookupTable); + SmallString<4096> ModuleLocalLookupTable; + GenerateNameLookupTable(Context, DC, LookupTable, ModuleLocalLookupTable); // Write the lookup table RecordData::value_type Record[] = {DECL_CONTEXT_VISIBLE}; Stream.EmitRecordWithBlob(DeclContextVisibleLookupAbbrev, Record, LookupTable); ++NumVisibleDeclContexts; - return Offset; + + if (ModuleLocalLookupTable.empty()) + return; + + ModuleLocalBlockOffset = Stream.GetCurrentBitNo(); + assert(ModuleLocalBlockOffset > VisibleBlockOffset); + // Write the lookup table + RecordData::value_type ModuleLocalRecord[] = { + DECL_CONTEXT_MODULE_LOCAL_VISIBLE}; + Stream.EmitRecordWithBlob(DeclModuleLocalVisibleLookupAbbrev, + ModuleLocalRecord, ModuleLocalLookupTable); + ++NumModuleLocalDeclContexts; } /// Write an UPDATE_VISIBLE block for the given context. @@ -4673,7 +4822,8 @@ void ASTWriter::WriteDeclContextVisibleUpdate(ASTContext &Context, // Create the on-disk hash table in a buffer. SmallString<4096> LookupTable; - GenerateNameLookupTable(Context, DC, LookupTable); + SmallString<4096> ModuleLocalLookupTable; + GenerateNameLookupTable(Context, DC, LookupTable, ModuleLocalLookupTable); // If we're updating a namespace, select a key declaration as the key for the // update record; those are the only ones that will be checked on reload. @@ -4684,6 +4834,15 @@ void ASTWriter::WriteDeclContextVisibleUpdate(ASTContext &Context, RecordData::value_type Record[] = {UPDATE_VISIBLE, getDeclID(cast(DC)).getRawValue()}; Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable); + + if (ModuleLocalLookupTable.empty()) + return; + + // Write the module local lookup table + RecordData::value_type ModuleLocalRecord[] = { + UPDATE_MODULE_LOCAL_VISIBLE, getDeclID(cast(DC)).getRawValue()}; + Stream.EmitRecordWithBlob(ModuleLocalUpdateVisibleAbbrev, ModuleLocalRecord, + ModuleLocalLookupTable); } /// Write an FP_PRAGMA_OPTIONS block for the given FPOptions. @@ -5867,7 +6026,8 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema *SemaPtr, StringRef isysroot, // Some simple statistics RecordData::value_type Record[] = { - NumStatements, NumMacros, NumLexicalDeclContexts, NumVisibleDeclContexts}; + NumStatements, NumMacros, NumLexicalDeclContexts, NumVisibleDeclContexts, + NumModuleLocalDeclContexts}; Stream.EmitRecord(STATISTICS, Record); Stream.ExitBlock(); Stream.FlushToWord(); @@ -5944,7 +6104,9 @@ void ASTWriter::WriteDeclAndTypes(ASTContext &Context) { RecordData DelayedNamespaceRecord; for (NamespaceDecl *NS : DelayedNamespace) { uint64_t LexicalOffset = WriteDeclContextLexicalBlock(Context, NS); - uint64_t VisibleOffset = WriteDeclContextVisibleBlock(Context, NS); + uint64_t VisibleOffset = 0; + uint64_t ModuleLocalOffset = 0; + WriteDeclContextVisibleBlock(Context, NS, VisibleOffset, ModuleLocalOffset); // Write the offset relative to current block. if (LexicalOffset) @@ -5953,9 +6115,13 @@ void ASTWriter::WriteDeclAndTypes(ASTContext &Context) { if (VisibleOffset) VisibleOffset -= DeclTypesBlockStartOffset; + if (ModuleLocalOffset) + ModuleLocalOffset -= DeclTypesBlockStartOffset; + AddDeclRef(NS, DelayedNamespaceRecord); DelayedNamespaceRecord.push_back(LexicalOffset); DelayedNamespaceRecord.push_back(VisibleOffset); + DelayedNamespaceRecord.push_back(ModuleLocalOffset); } // The process of writing lexical and visible block for delayed namespace @@ -6035,6 +6201,12 @@ void ASTWriter::WriteDeclAndTypes(ASTContext &Context) { Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob)); UpdateVisibleAbbrev = Stream.EmitAbbrev(std::move(Abv)); + Abv = std::make_shared(); + Abv->Add(llvm::BitCodeAbbrevOp(UPDATE_MODULE_LOCAL_VISIBLE)); + Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6)); + Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob)); + ModuleLocalUpdateVisibleAbbrev = Stream.EmitAbbrev(std::move(Abv)); + // And a visible updates block for the translation unit. WriteDeclContextVisibleUpdate(Context, TU); diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp index 3b357f3c50dad..7a494cfe1ac64 100644 --- a/clang/lib/Serialization/ASTWriterDecl.cpp +++ b/clang/lib/Serialization/ASTWriterDecl.cpp @@ -2068,6 +2068,7 @@ void ASTDeclWriter::VisitDeclContext(DeclContext *DC) { uint64_t LexicalOffset = 0; uint64_t VisibleOffset = 0; + uint64_t ModuleLocalOffset = 0; if (Writer.isGeneratingReducedBMI() && isa(DC) && cast(DC)->isFromExplicitGlobalModule()) { @@ -2078,12 +2079,13 @@ void ASTDeclWriter::VisitDeclContext(DeclContext *DC) { } else { LexicalOffset = Writer.WriteDeclContextLexicalBlock(Record.getASTContext(), DC); - VisibleOffset = - Writer.WriteDeclContextVisibleBlock(Record.getASTContext(), DC); + Writer.WriteDeclContextVisibleBlock(Record.getASTContext(), DC, + VisibleOffset, ModuleLocalOffset); } Record.AddOffset(LexicalOffset); Record.AddOffset(VisibleOffset); + Record.AddOffset(ModuleLocalOffset); } const Decl *ASTWriter::getFirstLocalDecl(const Decl *D) { @@ -2438,6 +2440,7 @@ void ASTWriter::WriteDeclAbbrevs() { // DC Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // VisibleOffset + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ModuleLocalOffset DeclEnumAbbrev = Stream.EmitAbbrev(std::move(Abv)); // Abbreviation for DECL_RECORD @@ -2490,6 +2493,7 @@ void ASTWriter::WriteDeclAbbrevs() { // DC Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // VisibleOffset + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ModuleLocalOffset DeclRecordAbbrev = Stream.EmitAbbrev(std::move(Abv)); // Abbreviation for DECL_PARM_VAR @@ -2827,6 +2831,11 @@ void ASTWriter::WriteDeclAbbrevs() { Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); DeclContextVisibleLookupAbbrev = Stream.EmitAbbrev(std::move(Abv)); + Abv = std::make_shared(); + Abv->Add(BitCodeAbbrevOp(serialization::DECL_CONTEXT_MODULE_LOCAL_VISIBLE)); + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); + DeclModuleLocalVisibleLookupAbbrev = Stream.EmitAbbrev(std::move(Abv)); + Abv = std::make_shared(); Abv->Add(BitCodeAbbrevOp(serialization::DECL_SPECIALIZATIONS)); Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); diff --git a/clang/lib/Serialization/GeneratePCH.cpp b/clang/lib/Serialization/GeneratePCH.cpp index a3189bb40b191..12751beb8d715 100644 --- a/clang/lib/Serialization/GeneratePCH.cpp +++ b/clang/lib/Serialization/GeneratePCH.cpp @@ -12,7 +12,7 @@ //===----------------------------------------------------------------------===// #include "clang/AST/ASTContext.h" -#include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Basic/DiagnosticFrontend.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/Preprocessor.h" diff --git a/clang/test/CXX/basic/basic.scope/basic.scope.namespace/p2.cpp b/clang/test/CXX/basic/basic.scope/basic.scope.namespace/p2.cpp index d69db40062dae..54ec6aa61ec37 100644 --- a/clang/test/CXX/basic/basic.scope/basic.scope.namespace/p2.cpp +++ b/clang/test/CXX/basic/basic.scope/basic.scope.namespace/p2.cpp @@ -62,8 +62,8 @@ void test_late() { not_exported = 1; #ifndef IMPLEMENTATION - // expected-error@-2 {{declaration of 'not_exported' must be imported from module 'A' before it is required}} - // expected-note@p2.cpp:19 {{declaration here is not visible}} + // expected-error@-2 {{use of undeclared identifier 'not_exported'; did you mean 'exported'?}} + // expected-note@p2.cpp:18 {{'exported' declared here}} #endif internal = 1; diff --git a/clang/test/CXX/module/basic/basic.link/p2.cppm b/clang/test/CXX/module/basic/basic.link/p2.cppm index 19761fb3359ce..5a497304201dc 100644 --- a/clang/test/CXX/module/basic/basic.link/p2.cppm +++ b/clang/test/CXX/module/basic/basic.link/p2.cppm @@ -62,12 +62,11 @@ import M; void use_from_module_impl() { external_linkage_fn(); - module_linkage_fn(); // expected-error {{declaration of 'module_linkage_fn' must be imported}} + module_linkage_fn(); // expected-error {{use of undeclared identifier 'module_linkage_fn'}} internal_linkage_fn(); // expected-error {{declaration of 'internal_linkage_fn' must be imported}} (void)external_linkage_class{}; (void)module_linkage_class{}; // expected-error {{undeclared identifier}} expected-error 0+{{}} (void)internal_linkage_class{}; // expected-error {{undeclared identifier}} expected-error 0+{{}} - // expected-note@M.cppm:9 {{declaration here is not visible}} // expected-note@M.cppm:10 {{declaration here is not visible}} (void)external_linkage_var; (void)module_linkage_var; // expected-error {{undeclared identifier}} diff --git a/clang/test/CXX/module/module.import/p2.cpp b/clang/test/CXX/module/module.import/p2.cpp index 6b8e32f746b62..0ad3bc815beac 100644 --- a/clang/test/CXX/module/module.import/p2.cpp +++ b/clang/test/CXX/module/module.import/p2.cpp @@ -23,10 +23,7 @@ export A f(); //--- Use.cpp import M; void test() { - A a; // expected-error {{definition of 'A' must be imported from module 'M' before it is required}} - // expected-error@-1 {{definition of 'A' must be imported from module 'M' before it is required}} expected-error@-1 {{}} - // expected-note@impl.cppm:2 {{declaration here is not visible}} - // expected-note@impl.cppm:2 {{definition here is not reachable}} expected-note@impl.cppm:2 {{}} + A a; // expected-error {{unknown type name 'A'}} } //--- UseInPartA.cppm @@ -40,10 +37,7 @@ void test() { export module B; import M; void test() { - A a; // expected-error {{declaration of 'A' must be imported from module 'M'}} - // expected-error@-1 {{definition of 'A' must be imported from module 'M'}} expected-error@-1 {{}} - // expected-note@impl.cppm:2 {{declaration here is not visible}} - // expected-note@impl.cppm:2 {{definition here is not reachable}} expected-note@impl.cppm:2 {{}} + A a; // expected-error {{unknown type name 'A'}} } //--- Private.cppm diff --git a/clang/test/CXX/module/module.interface/p7.cpp b/clang/test/CXX/module/module.interface/p7.cpp index 1572390f0d289..cff5df91e43d4 100644 --- a/clang/test/CXX/module/module.interface/p7.cpp +++ b/clang/test/CXX/module/module.interface/p7.cpp @@ -57,12 +57,10 @@ void test() { void test2() { auto a = E1::e1; // OK, namespace-scope name E1 is visible and e1 is reachable auto b = e1; // OK, namespace-scope name e1 is visible - auto c = E2::e2; // expected-error {{declaration of 'E2' must be imported from module}} - // expected-note@* {{declaration here is not visible}} - auto d = e2; // should be error, namespace-scope name e2 is not visible + auto c = E2::e2; // expected-error {{use of undeclared identifier 'E2'}} + auto d = e2; // expected-error {{use of undeclared identifier 'e2'}} auto e = E2U::e2; // OK, namespace-scope name E2U is visible and E2::e2 is reachable - auto f = E3::e3; // expected-error {{declaration of 'E3' must be imported from module 'p7' before it is required}} - // expected-note@* {{declaration here is not visible}} - auto g = e3; // should be error, namespace-scope name e3 is not visible + auto f = E3::e3; // expected-error {{use of undeclared identifier 'E3'}} + auto g = e3; // expected-error {{use of undeclared identifier 'e3'}} auto h = decltype(func())::e3; // OK, namespace-scope name f is visible and E3::e3 is reachable } diff --git a/clang/test/CXX/module/module.reach/p5.cpp b/clang/test/CXX/module/module.reach/p5.cpp index 9c498a260530f..947fd082553ec 100644 --- a/clang/test/CXX/module/module.reach/p5.cpp +++ b/clang/test/CXX/module/module.reach/p5.cpp @@ -14,5 +14,4 @@ export using Y = X; export module B; import A; Y y; // OK, definition of X is reachable -X x; // expected-error {{declaration of 'X' must be imported from module 'A' before it is required}} - // expected-note@* {{declaration here is not visible}} +X x; // expected-error {{unknown type name 'X'}} diff --git a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_luti.c b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_luti.c index 4b3f97d13c7eb..82e318a7460c2 100644 --- a/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_luti.c +++ b/clang/test/CodeGen/AArch64/sve2-intrinsics/acle_sve2_luti.c @@ -19,9 +19,9 @@ #ifdef SVE_OVERLOADED_FORMS // A simple used,unused... macro, long enough to represent any SVE builtin. -#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3) A1##A3 +#define SVE_ACLE_FUNC(A1,A2_UNUSED) A1 #else -#define SVE_ACLE_FUNC(A1,A2,A3) A1##A2##A3 +#define SVE_ACLE_FUNC(A1,A2) A1##A2 #endif // SME-CHECK-LABEL: @test_svluti2_lane_s8( @@ -39,7 +39,7 @@ // CPP-CHECK-NEXT: ret [[TMP0]] // svint8_t test_svluti2_lane_s8(svint8_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti2_lane,_s8,)(table, indices, 0); + return SVE_ACLE_FUNC(svluti2_lane,_s8)(table, indices, 0); } // SME-CHECK-LABEL: @test_svluti2_lane_u8( @@ -57,7 +57,7 @@ svint8_t test_svluti2_lane_s8(svint8_t table, svuint8_t indices) MODE_ATTR{ // CPP-CHECK-NEXT: ret [[TMP0]] // svuint8_t test_svluti2_lane_u8(svuint8_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti2_lane,_u8,)(table, indices, 3); + return SVE_ACLE_FUNC(svluti2_lane,_u8)(table, indices, 3); } // SME-CHECK-LABEL: @test_svluti2_lane_s16( @@ -75,7 +75,7 @@ svuint8_t test_svluti2_lane_u8(svuint8_t table, svuint8_t indices) MODE_ATTR{ // CPP-CHECK-NEXT: ret [[TMP0]] // svint16_t test_svluti2_lane_s16(svint16_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti2_lane,_s16,)(table, indices, 0); + return SVE_ACLE_FUNC(svluti2_lane,_s16)(table, indices, 0); } // SME-CHECK-LABEL: @test_svluti2_lane_u16( @@ -93,7 +93,7 @@ svint16_t test_svluti2_lane_s16(svint16_t table, svuint8_t indices) MODE_ATTR{ // CPP-CHECK-NEXT: ret [[TMP0]] // svuint16_t test_svluti2_lane_u16(svuint16_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti2_lane,_u16,)(table, indices, 7); + return SVE_ACLE_FUNC(svluti2_lane,_u16)(table, indices, 7); } // SME-CHECK-LABEL: @test_svluti2_lane_f16( @@ -111,7 +111,7 @@ svuint16_t test_svluti2_lane_u16(svuint16_t table, svuint8_t indices) MODE_ATTR{ // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat16_t test_svluti2_lane_f16(svfloat16_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti2_lane,_f16,)(table, indices, 5); + return SVE_ACLE_FUNC(svluti2_lane,_f16)(table, indices, 5); } // SME-CHECK-LABEL: @test_svluti2_lane_bf16( @@ -129,7 +129,7 @@ svfloat16_t test_svluti2_lane_f16(svfloat16_t table, svuint8_t indices) MODE_ATT // CPP-CHECK-NEXT: ret [[TMP0]] // svbfloat16_t test_svluti2_lane_bf16(svbfloat16_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti2_lane,_bf16,)(table, indices, 2); + return SVE_ACLE_FUNC(svluti2_lane,_bf16)(table, indices, 2); } // SME-CHECK-LABEL: @test_svluti4_lane_s8( @@ -147,7 +147,7 @@ svbfloat16_t test_svluti2_lane_bf16(svbfloat16_t table, svuint8_t indices) MODE_ // CPP-CHECK-NEXT: ret [[TMP0]] // svint8_t test_svluti4_lane_s8(svint8_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_s8,)(table, indices, 0); + return SVE_ACLE_FUNC(svluti4_lane,_s8)(table, indices, 0); } // SME-CHECK-LABEL: @test_svluti4_lane_u8( @@ -165,7 +165,7 @@ svint8_t test_svluti4_lane_s8(svint8_t table, svuint8_t indices) MODE_ATTR{ // CPP-CHECK-NEXT: ret [[TMP0]] // svuint8_t test_svluti4_lane_u8(svuint8_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_u8,)(table, indices, 1); + return SVE_ACLE_FUNC(svluti4_lane,_u8)(table, indices, 1); } // SME-CHECK-LABEL: @test_svluti4_lane_s16( @@ -183,7 +183,7 @@ svuint8_t test_svluti4_lane_u8(svuint8_t table, svuint8_t indices) MODE_ATTR{ // CPP-CHECK-NEXT: ret [[TMP0]] // svint16_t test_svluti4_lane_s16(svint16_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_s16,)(table, indices, 0); + return SVE_ACLE_FUNC(svluti4_lane,_s16)(table, indices, 0); } // SME-CHECK-LABEL: @test_svluti4_lane_u16( @@ -201,7 +201,7 @@ svint16_t test_svluti4_lane_s16(svint16_t table, svuint8_t indices) MODE_ATTR{ // CPP-CHECK-NEXT: ret [[TMP0]] // svuint16_t test_svluti4_lane_u16(svuint16_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_u16,)(table, indices, 3); + return SVE_ACLE_FUNC(svluti4_lane,_u16)(table, indices, 3); } // SME-CHECK-LABEL: @test_svluti4_lane_f16( @@ -219,7 +219,7 @@ svuint16_t test_svluti4_lane_u16(svuint16_t table, svuint8_t indices) MODE_ATTR{ // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat16_t test_svluti4_lane_f16(svfloat16_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_f16,)(table, indices, 2); + return SVE_ACLE_FUNC(svluti4_lane,_f16)(table, indices, 2); } // SME-CHECK-LABEL: @test_svluti4_lane_bf16( @@ -237,7 +237,7 @@ svfloat16_t test_svluti4_lane_f16(svfloat16_t table, svuint8_t indices) MODE_ATT // CPP-CHECK-NEXT: ret [[TMP0]] // svbfloat16_t test_svluti4_lane_bf16(svbfloat16_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_bf16,)(table, indices, 1); + return SVE_ACLE_FUNC(svluti4_lane,_bf16)(table, indices, 1); } // SME-CHECK-LABEL: @test_svluti4_lane_s16_x2( @@ -257,7 +257,7 @@ svbfloat16_t test_svluti4_lane_bf16(svbfloat16_t table, svuint8_t indices) MODE_ // CPP-CHECK-NEXT: ret [[TMP0]] // svint16_t test_svluti4_lane_s16_x2(svint16x2_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_s16,_x2)(table, indices, 0); + return SVE_ACLE_FUNC(svluti4_lane,_s16_x2)(table, indices, 0); } // SME-CHECK-LABEL: @test_svluti4_lane_u16_x2( @@ -277,7 +277,7 @@ svint16_t test_svluti4_lane_s16_x2(svint16x2_t table, svuint8_t indices) MODE_AT // CPP-CHECK-NEXT: ret [[TMP0]] // svuint16_t test_svluti4_lane_u16_x2(svuint16x2_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_u16,_x2)(table, indices, 3); + return SVE_ACLE_FUNC(svluti4_lane,_u16_x2)(table, indices, 3); } // SME-CHECK-LABEL: @test_svluti4_lane_f16_x2( @@ -297,7 +297,7 @@ svuint16_t test_svluti4_lane_u16_x2(svuint16x2_t table, svuint8_t indices) MODE_ // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat16_t test_svluti4_lane_f16_x2(svfloat16x2_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_f16,_x2)(table, indices, 2); + return SVE_ACLE_FUNC(svluti4_lane,_f16_x2)(table, indices, 2); } // SME-CHECK-LABEL: @test_svluti4_lane_bf16_x2( @@ -317,5 +317,5 @@ svfloat16_t test_svluti4_lane_f16_x2(svfloat16x2_t table, svuint8_t indices) MOD // CPP-CHECK-NEXT: ret [[TMP0]] // svbfloat16_t test_svluti4_lane_bf16_x2(svbfloat16x2_t table, svuint8_t indices) MODE_ATTR{ - return SVE_ACLE_FUNC(svluti4_lane,_bf16,_x2)(table, indices, 1); + return SVE_ACLE_FUNC(svluti4_lane,_bf16_x2)(table, indices, 1); } diff --git a/clang/test/CodeGenHLSL/builtins/WaveActiveSum.hlsl b/clang/test/CodeGenHLSL/builtins/WaveActiveSum.hlsl new file mode 100644 index 0000000000000..4bf423ccc1b82 --- /dev/null +++ b/clang/test/CodeGenHLSL/builtins/WaveActiveSum.hlsl @@ -0,0 +1,45 @@ +// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -triple \ +// RUN: dxil-pc-shadermodel6.3-compute %s -emit-llvm -disable-llvm-passes -o - | \ +// RUN: FileCheck %s --check-prefixes=CHECK,CHECK-DXIL +// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -triple \ +// RUN: spirv-pc-vulkan-compute %s -emit-llvm -disable-llvm-passes -o - | \ +// RUN: FileCheck %s --check-prefixes=CHECK,CHECK-SPIRV + +// Test basic lowering to runtime function call. + +// CHECK-LABEL: test_int +int test_int(int expr) { + // CHECK-SPIRV: %[[RET:.*]] = call spir_func [[TY:.*]] @llvm.spv.wave.reduce.sum.i32([[TY]] %[[#]]) + // CHECK-DXIL: %[[RET:.*]] = call [[TY:.*]] @llvm.dx.wave.reduce.sum.i32([[TY]] %[[#]]) + // CHECK: ret [[TY]] %[[RET]] + return WaveActiveSum(expr); +} + +// CHECK-DXIL: declare [[TY]] @llvm.dx.wave.reduce.sum.i32([[TY]]) #[[#attr:]] +// CHECK-SPIRV: declare spir_func [[TY]] @llvm.spv.wave.reduce.sum.i32([[TY]]) #[[#attr:]] + +// CHECK-LABEL: test_uint64_t +uint64_t test_uint64_t(uint64_t expr) { + // CHECK-SPIRV: %[[RET:.*]] = call spir_func [[TY:.*]] @llvm.spv.wave.reduce.sum.i64([[TY]] %[[#]]) + // CHECK-DXIL: %[[RET:.*]] = call [[TY:.*]] @llvm.dx.wave.reduce.usum.i64([[TY]] %[[#]]) + // CHECK: ret [[TY]] %[[RET]] + return WaveActiveSum(expr); +} + +// CHECK-DXIL: declare [[TY]] @llvm.dx.wave.reduce.usum.i64([[TY]]) #[[#attr:]] +// CHECK-SPIRV: declare spir_func [[TY]] @llvm.spv.wave.reduce.sum.i64([[TY]]) #[[#attr:]] + +// Test basic lowering to runtime function call with array and float value. + +// CHECK-LABEL: test_floatv4 +float4 test_floatv4(float4 expr) { + // CHECK-SPIRV: %[[RET1:.*]] = call reassoc nnan ninf nsz arcp afn spir_func [[TY1:.*]] @llvm.spv.wave.reduce.sum.v4f32([[TY1]] %[[#]] + // CHECK-DXIL: %[[RET1:.*]] = call reassoc nnan ninf nsz arcp afn [[TY1:.*]] @llvm.dx.wave.reduce.sum.v4f32([[TY1]] %[[#]]) + // CHECK: ret [[TY1]] %[[RET1]] + return WaveActiveSum(expr); +} + +// CHECK-DXIL: declare [[TY1]] @llvm.dx.wave.reduce.sum.v4f32([[TY1]]) #[[#attr]] +// CHECK-SPIRV: declare spir_func [[TY1]] @llvm.spv.wave.reduce.sum.v4f32([[TY1]]) #[[#attr]] + +// CHECK: attributes #[[#attr]] = {{{.*}} convergent {{.*}}} diff --git a/clang/test/Driver/arm-mfpu.c b/clang/test/Driver/arm-mfpu.c index babfa16741ad7..5ea2230044dfb 100644 --- a/clang/test/Driver/arm-mfpu.c +++ b/clang/test/Driver/arm-mfpu.c @@ -356,8 +356,10 @@ // CHECK-HF-DAG: "-target-cpu" "arm1176jzf-s" // RUN: %clang -target armv7-apple-darwin -x assembler %s -### -c 2>&1 \ -// RUN: | FileCheck --check-prefix=ASM %s -// ASM-NOT: -target-feature +// RUN: | FileCheck --check-prefix=ASM-NEON %s +// RUN: %clang -target armv7-windows -x assembler %s -### -c 2>&1 \ +// RUN: | FileCheck --check-prefix=ASM-NEON %s +// ASM-NEON: "-target-feature" "+neon" // RUN: %clang -target armv8-linux-gnueabi -mfloat-abi=soft -mfpu=none %s -### -c 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-SOFT-ABI-FP %s diff --git a/clang/test/Driver/baremetal-multilib-custom-flags.yaml b/clang/test/Driver/baremetal-multilib-custom-flags.yaml new file mode 100644 index 0000000000000..9c0320ea16117 --- /dev/null +++ b/clang/test/Driver/baremetal-multilib-custom-flags.yaml @@ -0,0 +1,81 @@ +# UNSUPPORTED: system-windows + +# RUN: %clang --multi-lib-config=%s -no-canonical-prefixes -x c %s -### -o /dev/null 2>&1 \ +# RUN: --target=thumbv8m.main-none-eabi -mfpu=none --sysroot= \ +# RUN: | FileCheck --check-prefix=CHECK-DEFAULT %s + +# CHECK-DEFAULT: "-cc1" "-triple" "thumbv8m.main-unknown-none-eabi" +# CHECK-DEFAULT-SAME: "-internal-isystem" "[[SYSROOT:[^"]*]]/bin/../lib/clang-runtimes/arm-none-eabi/thumb/v8-m.main/nofp/include" +# CHECK-DEFAULT-NEXT: "-L[[SYSROOT]]/bin/../lib/clang-runtimes/arm-none-eabi/thumb/v8-m.main/nofp/lib" + +# RUN: %clang --multi-lib-config=%s -no-canonical-prefixes -x c %s -### -o /dev/null 2>&1 \ +# RUN: --target=thumbv8m.main-none-eabi -mfpu=none -fmultilib-flag=no-multithreaded --sysroot= \ +# RUN: | FileCheck --check-prefix=CHECK-NOMULTI %s + +# CHECK-NOMULTI: "-cc1" "-triple" "thumbv8m.main-unknown-none-eabi" +# CHECK-NOMULTI-SAME: "-internal-isystem" "[[SYSROOT:[^"]*]]/bin/../lib/clang-runtimes/arm-none-eabi/thumb/v8-m.main/nofp/include" +# CHECK-NOMULTI-NEXT: "-L[[SYSROOT]]/bin/../lib/clang-runtimes/arm-none-eabi/thumb/v8-m.main/nofp/lib" + +# RUN: %clang --multi-lib-config=%s -no-canonical-prefixes -x c %s -### -o /dev/null 2>&1 \ +# RUN: --target=thumbv8m.main-none-eabi -mfpu=none -fmultilib-flag=multithreaded --sysroot= \ +# RUN: | FileCheck --check-prefix=CHECK-MULTI %s + +# CHECK-MULTI: "-cc1" "-triple" "thumbv8m.main-unknown-none-eabi" +# CHECK-MULTI-SAME: "-internal-isystem" "[[SYSROOT:[^"]*]]/bin/../lib/clang-runtimes/arm-none-eabi/multithreaded/thumb/v8-m.main/nofp/include" +# CHECK-MULTI-NEXT: "-L[[SYSROOT]]/bin/../lib/clang-runtimes/arm-none-eabi/multithreaded/thumb/v8-m.main/nofp/lib" + +# RUN: not %clang --multi-lib-config=%s -no-canonical-prefixes -x c %s -### -o /dev/null 2>&1 \ +# RUN: --target=thumbv8m.main-none-eabi -mfpu=none -fmultilib-flag=singlethreaded -fmultilib-flag=no-io --sysroot= \ +# RUN: | FileCheck --check-prefix=CHECK-ERROR %s +# CHECK-ERROR-DAG: error: unsupported option '-fmultilib-flag=singlethreaded' +# CHECK-ERROR-DAG: error: unsupported option '-fmultilib-flag=no-io'; did you mean '-fmultilib-flag=io-none'? + +# RUN: %clang --multi-lib-config=%s -no-canonical-prefixes -x c %s -### -o /dev/null 2>&1 \ +# RUN: --target=thumbv8m.main-none-eabi -mfpu=none -print-multi-lib --sysroot= \ +# RUN: | FileCheck --check-prefix=CHECK-PRINT-MULTI-LIB %s +# CHECK-PRINT-MULTI-LIB: arm-none-eabi/thumb/v8-m.main/nofp;@-target=thumbv8m.main-unknown-none-eabi@mfpu=none@fmultilib-flag=no-multithreaded +# CHECK-PRINT-MULTI-LIB: arm-none-eabi/multithreaded/thumb/v8-m.main/nofp;@-target=thumbv8m.main-unknown-none-eabi@mfpu=none@fmultilib-flag=multithreaded + +# RUN: %clang --target=arm-none-eabi --multi-lib-config=%s -x c %s -fmultilib-flag=no-multithreaded -### -o /dev/null 2>&1 \ +# RUN: | FileCheck --check-prefix=CHECK-MACRODEFINES-NOMULTI %s +# CHECK-MACRODEFINES-NOMULTI: "-D" "__SINGLE_THREAD__" + +# RUN: %clang --target=arm-none-eabi --multi-lib-config=%s -x c %s -fmultilib-flag=io-semihosting -### -o /dev/null 2>&1 \ +# RUN: | FileCheck --check-prefix=CHECK-MACRODEFINES-IO-SEMIHOSTING %s +# CHECK-MACRODEFINES-IO-SEMIHOSTING: "-D" "SEMIHOSTING" + +# RUN: %clang --target=arm-none-eabi --multi-lib-config=%s -x c %s -fmultilib-flag=io-linux-syscalls -### -o /dev/null 2>&1 \ +# RUN: | FileCheck --check-prefix=CHECK-MACRODEFINES-IO-LINUX %s +# CHECK-MACRODEFINES-IO-LINUX: "-D" "LINUX_SYSCALLS" +# CHECK-MACRODEFINES-IO-LINUX-SAME: "-D" "HOSTED" + +--- +MultilibVersion: 1.0 + +Groups: +- Name: stdlib + Type: Exclusive + +Variants: +- Dir: arm-none-eabi/thumb/v8-m.main/nofp + Flags: [--target=thumbv8m.main-unknown-none-eabi, -mfpu=none, -fmultilib-flag=no-multithreaded] + Group: stdlib +- Dir: arm-none-eabi/multithreaded/thumb/v8-m.main/nofp + Flags: [--target=thumbv8m.main-unknown-none-eabi, -mfpu=none, -fmultilib-flag=multithreaded] + Group: stdlib + +Flags: + - Name: multithreading + Values: + - Name: no-multithreaded + MacroDefines: [__SINGLE_THREAD__] + - Name: multithreaded + Default: no-multithreaded + - Name: io + Values: + - Name: io-none + - Name: io-semihosting + MacroDefines: [SEMIHOSTING] + - Name: io-linux-syscalls + MacroDefines: [LINUX_SYSCALLS, HOSTED] + Default: io-none \ No newline at end of file diff --git a/clang/test/Driver/fuse-lipo.c b/clang/test/Driver/fuse-lipo.c new file mode 100644 index 0000000000000..2dedb86ddc527 --- /dev/null +++ b/clang/test/Driver/fuse-lipo.c @@ -0,0 +1,15 @@ +// RUN: %clang %s -### --target=arm64-apple-darwin -arch x86_64 -arch arm64 -fuse-lipo=llvm-lipo 2>&1 | FileCheck -check-prefix=TEST1 %s +// TEST1: llvm-lipo + +// RUN: %clang %s -### --target=arm64-apple-darwin -arch x86_64 -arch arm64 -fuse-lipo=nonexistant-lipo 2>&1 | FileCheck -check-prefix=TEST2 %s +// TEST2: nonexistant-lipo + +// RUN: %clang %s -### --target=arm64-apple-darwin -fuse-lipo=llvm-lipo 2>&1 | FileCheck -check-prefix=TEST3 %s +// TEST3: clang: warning: argument unused during compilation: '-fuse-lipo=llvm-lipo' + +// RUN: %clang %s -### --target=arm64-apple-darwin -Wno-unused-command-line-argument -fuse-lipo=llvm-lipo 2>&1 | FileCheck -check-prefix=TEST4 %s +// TEST4-NOT: llvm-lipo + +// RUN: %clang %s -### --target=arm64-apple-darwin -arch x86_64 -arch arm64 2>&1 | FileCheck -check-prefix=TEST5 %s +// TEST5: lipo +// TEST5-NOT: llvm-lipo diff --git a/clang/test/Modules/Reachability-template-default-arg.cpp b/clang/test/Modules/Reachability-template-default-arg.cpp index 35c647d0d344b..a7da86b8cc2d5 100644 --- a/clang/test/Modules/Reachability-template-default-arg.cpp +++ b/clang/test/Modules/Reachability-template-default-arg.cpp @@ -21,6 +21,5 @@ struct A { import template_default_arg; void bar() { A<> a0; - A a1; // expected-error {{declaration of 't' must be imported from module 'template_default_arg' before it is required}} - // expected-note@* {{declaration here is not visible}} + A a1; // expected-error {{use of undeclared identifier 't'}} } diff --git a/clang/test/Modules/cxx20-10-1-ex2.cpp b/clang/test/Modules/cxx20-10-1-ex2.cpp index fc61d89926d44..8611d6d64c851 100644 --- a/clang/test/Modules/cxx20-10-1-ex2.cpp +++ b/clang/test/Modules/cxx20-10-1-ex2.cpp @@ -78,8 +78,7 @@ int &c = n; // OK //--- std10-1-ex2-tu6.cpp import B; // error, n is module-local and this is not a module. -int &c = n; // expected-error {{declaration of 'n' must be imported}} - // expected-note@* {{declaration here is not visible}} +int &c = n; // expected-error {{use of undeclared identifier 'n'}} //--- std10-1-ex2-tu7.cpp // expected-no-diagnostics diff --git a/clang/test/Modules/deduction-guide3.cppm b/clang/test/Modules/deduction-guide3.cppm index 1165dd40bcfb8..f7990004cec7c 100644 --- a/clang/test/Modules/deduction-guide3.cppm +++ b/clang/test/Modules/deduction-guide3.cppm @@ -22,8 +22,6 @@ Templ(T t) -> Templ; //--- Use.cpp import Templ; void func() { - Templ t(5); // expected-error {{declaration of 'Templ' must be imported from module 'Templ' before it is required}} - // expected-error@-1 {{unknown type name 'Templ'}} - // expected-note@Templ.cppm:3 {{declaration here is not visible}} + Templ t(5); // expected-error {{unknown type name 'Templ'}} } diff --git a/clang/test/Modules/module-local-with-templates.cppm b/clang/test/Modules/module-local-with-templates.cppm new file mode 100644 index 0000000000000..87955bdd3f99e --- /dev/null +++ b/clang/test/Modules/module-local-with-templates.cppm @@ -0,0 +1,79 @@ +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: split-file %s %t +// +// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-module-interface -o %t/a.pcm +// RUN: %clang_cc1 -std=c++20 %t/use.cc -fmodule-file=a=%t/a.pcm -fsyntax-only -verify +// RUN: %clang_cc1 -std=c++20 %t/a-part.cppm -fmodule-file=a=%t/a.pcm -fsyntax-only -verify +// +// Test again with reduced BMI +// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-reduced-module-interface -o %t/a.pcm +// RUN: %clang_cc1 -std=c++20 %t/use.cc -fmodule-file=a=%t/a.pcm -fsyntax-only -verify +// RUN: %clang_cc1 -std=c++20 %t/a-part.cppm -fmodule-file=a=%t/a.pcm -fsyntax-only -verify +// RUN: %clang_cc1 -std=c++20 %t/a.cc -fmodule-file=a=%t/a.pcm -fsyntax-only -verify + + +//--- a.cppm +export module a; + +constexpr int x = 43; + +export constexpr int f() { return x; } + +export template +constexpr T g() { + return x; +} + +namespace nn { + +constexpr int x = 88; + +export constexpr int f() { return x; } + +export template +constexpr T g() { + return x; +} +} + +//--- use.cc +// expected-no-diagnostics +import a; + +static_assert(f() == 43, ""); + +constexpr int x = 99; + +static_assert(g() == 43, ""); + +static_assert(x == 99, ""); + +namespace nn { +static_assert(f() == 88, ""); + +constexpr int x = 1000; + +static_assert(g() == 88, ""); + +static_assert(x == 1000, ""); + +} + +//--- a-part.cppm +module a:impl; +import a; + +static_assert(x == 43, ""); + +constexpr int x = 1000; // expected-error {{redefinition of 'x'}} + // expected-note@* {{previous definition is here}} + +//--- a.cc +module a; + +static_assert(x == 43, ""); + +constexpr int x = 1000; // expected-error {{redefinition of 'x'}} + // expected-note@* {{previous definition is here}} + diff --git a/clang/test/Modules/pr90154.cppm b/clang/test/Modules/pr90154.cppm new file mode 100644 index 0000000000000..d626646fbc488 --- /dev/null +++ b/clang/test/Modules/pr90154.cppm @@ -0,0 +1,25 @@ +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: split-file %s %t +// +// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-module-interface -o %t/a.pcm +// RUN: %clang_cc1 -std=c++20 %t/use.cc -fmodule-file=a=%t/a.pcm -fsyntax-only -verify +// +// Test again with reduced BMI +// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-reduced-module-interface -o %t/a.pcm +// RUN: %clang_cc1 -std=c++20 %t/use.cc -fmodule-file=a=%t/a.pcm -fsyntax-only -verify + +//--- a.cppm +export module a; +int b = 99; +namespace a { int a = 43; } + +//--- use.cc +// expected-no-diagnostics +import a; + +namespace a { + double a = 43.0; +} + +int b = 883; diff --git a/clang/test/Preprocessor/arm-target-features.c b/clang/test/Preprocessor/arm-target-features.c index 2999ee0d9e4d8..95ca7d0cbc3c2 100644 --- a/clang/test/Preprocessor/arm-target-features.c +++ b/clang/test/Preprocessor/arm-target-features.c @@ -132,6 +132,30 @@ // CHECK-V7VE-DEFAULT-ABI-SOFT: #define __ARM_ARCH_EXT_IDIV__ 1 // CHECK-V7VE-DEFAULT-ABI-SOFT: #define __ARM_FP 0xc +// RUN: %clang -target x86_64-apple-macosx10.10 -arch armv7 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-DARWIN-V7 %s +// CHECK-DARWIN-V7: #define __ARMEL__ 1 +// CHECK-DARWIN-V7: #define __ARM_ARCH 7 +// CHECK-DARWIN-V7: #define __ARM_ARCH_7A__ 1 +// CHECK-DARWIN-V7-NOT: __ARM_FEATURE_CRC32 +// CHECK-DARWIN-V7-NOT: __ARM_FEATURE_NUMERIC_MAXMIN +// CHECK-DARWIN-V7-NOT: __ARM_FEATURE_DIRECTED_ROUNDING +// CHECK-DARWIN-V7: #define __ARM_FP 0xc +// CHECK-DARWIN-V7: #define __ARM_NEON 1 +// CHECK-DARWIN-V7: #define __ARM_NEON_FP 0x4 +// CHECK-DARWIN-V7: #define __ARM_NEON__ 1 + +// RUN: %clang -target armv7-windows -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-WINDOWS-V7 %s +// CHECK-WINDOWS-V7: #define __ARMEL__ 1 +// CHECK-WINDOWS-V7: #define __ARM_ARCH 7 +// CHECK-WINDOWS-V7: #define __ARM_ARCH_7A__ 1 +// CHECK-WINDOWS-V7-NOT: __ARM_FEATURE_CRC32 +// CHECK-WINDOWS-V7-NOT: __ARM_FEATURE_NUMERIC_MAXMIN +// CHECK-WINDOWS-V7-NOT: __ARM_FEATURE_DIRECTED_ROUNDING +// CHECK-WINDOWS-V7: #define __ARM_FP 0xe +// CHECK-WINDOWS-V7: #define __ARM_NEON 1 +// CHECK-WINDOWS-V7: #define __ARM_NEON_FP 0x6 +// CHECK-WINDOWS-V7: #define __ARM_NEON__ 1 + // RUN: %clang -target x86_64-apple-macosx10.10 -arch armv7s -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-V7S %s // CHECK-V7S: #define __ARMEL__ 1 // CHECK-V7S: #define __ARM_ARCH 7 @@ -140,6 +164,9 @@ // CHECK-V7S-NOT: __ARM_FEATURE_NUMERIC_MAXMIN // CHECK-V7S-NOT: __ARM_FEATURE_DIRECTED_ROUNDING // CHECK-V7S: #define __ARM_FP 0xe +// CHECK-V7S: #define __ARM_NEON 1 +// CHECK-V7S: #define __ARM_NEON_FP 0x6 +// CHECK-V7S: #define __ARM_NEON__ 1 // RUN: %clang -target arm-arm-none-eabi -march=armv7-m -mfloat-abi=soft -x c -E -dM %s | FileCheck -match-full-lines --check-prefix=CHECK-VFP-FP %s // RUN: %clang -target arm-arm-none-eabi -march=armv7-m -mfloat-abi=softfp -x c -E -dM %s | FileCheck -match-full-lines --check-prefix=CHECK-VFP-FP %s diff --git a/clang/test/Preprocessor/init-aarch64.c b/clang/test/Preprocessor/init-aarch64.c index 3d2f4b83abcb8..8578993dbfaeb 100644 --- a/clang/test/Preprocessor/init-aarch64.c +++ b/clang/test/Preprocessor/init-aarch64.c @@ -123,7 +123,7 @@ // AARCH64-NEXT: #define __FPCLASS_SNAN 0x0001 // AARCH64-NEXT: #define __FP_FAST_FMA 1 // AARCH64-NEXT: #define __FP_FAST_FMAF 1 -// AARCH64-NEXT: #define __FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL 202420 +// AARCH64-NEXT: #define __FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL 202430 // AARCH64-NEXT: #define __GCC_ASM_FLAG_OUTPUTS__ 1 // AARCH64-NEXT: #define __GCC_CONSTRUCTIVE_SIZE {{.+}} // AARCH64-NEXT: #define __GCC_DESTRUCTIVE_SIZE {{.+}} @@ -434,7 +434,7 @@ // AARCH64-DARWIN: #define __FLT_MIN_EXP__ (-125) // AARCH64-DARWIN: #define __FLT_MIN__ 1.17549435e-38F // AARCH64-DARWIN: #define __FLT_RADIX__ 2 -// AARCH64-DARWIN: #define __FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL 202420 +// AARCH64-DARWIN: #define __FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL 202430 // AARCH64-DARWIN: #define __INT16_C_SUFFIX__ // AARCH64-DARWIN: #define __INT16_FMTd__ "hd" // AARCH64-DARWIN: #define __INT16_FMTi__ "hi" @@ -651,7 +651,7 @@ // AARCH64-MSVC: #define __FLT_MIN_EXP__ (-125) // AARCH64-MSVC: #define __FLT_MIN__ 1.17549435e-38F // AARCH64-MSVC: #define __FLT_RADIX__ 2 -// AARCH64-MSVC: #define __FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL 202420 +// AARCH64-MSVC: #define __FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL 202430 // AARCH64-MSVC: #define __INT_MAX__ 2147483647 // AARCH64-MSVC: #define __LDBL_DECIMAL_DIG__ 17 // AARCH64-MSVC: #define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L @@ -859,7 +859,7 @@ // ARM64EC-MSVC: #define __FPCLASS_SNAN 0x0001 // ARM64EC-MSVC: #define __FP_FAST_FMA 1 // ARM64EC-MSVC: #define __FP_FAST_FMAF 1 -// ARM64EC-MSVC: #define __FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL 202420 +// ARM64EC-MSVC: #define __FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL 202430 // ARM64EC-MSVC: #define __GCC_ASM_FLAG_OUTPUTS__ 1 // ARM64EC-MSVC: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 1 // ARM64EC-MSVC: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 1 diff --git a/clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_imm_lane.cpp b/clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_imm_lane.cpp index e405077b3de93..9be83f442de5d 100644 --- a/clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_imm_lane.cpp +++ b/clang/test/Sema/aarch64-sve2-intrinsics/acle_sve2_imm_lane.cpp @@ -5,9 +5,9 @@ #ifdef SVE_OVERLOADED_FORMS // A simple used,unused... macro, long enough to represent any SVE builtin. -#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#define SVE_ACLE_FUNC(A1, A2_UNUSED) A1 #else -#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#define SVE_ACLE_FUNC(A1,A2) A1##A2 #endif #include @@ -15,201 +15,201 @@ void test_range_0_7() { // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmla_lane,_s16,,)(svundef_s16(), svundef_s16(), svundef_s16(), 8); + SVE_ACLE_FUNC(svmla_lane,_s16)(svundef_s16(), svundef_s16(), svundef_s16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmla_lane,_u16,,)(svundef_u16(), svundef_u16(), svundef_u16(), -1); + SVE_ACLE_FUNC(svmla_lane,_u16)(svundef_u16(), svundef_u16(), svundef_u16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlalb_lane,_s32,,)(svundef_s32(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svmlalb_lane,_s32)(svundef_s32(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlalb_lane,_u32,,)(svundef_u32(), svundef_u16(), svundef_u16(), 8); + SVE_ACLE_FUNC(svmlalb_lane,_u32)(svundef_u32(), svundef_u16(), svundef_u16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlalb_lane,_f32,,)(svundef_f32(), svundef_f16(), svundef_f16(), -1); + SVE_ACLE_FUNC(svmlalb_lane,_f32)(svundef_f32(), svundef_f16(), svundef_f16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlalt_lane,_s32,,)(svundef_s32(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svmlalt_lane,_s32)(svundef_s32(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlalt_lane,_u32,,)(svundef_u32(), svundef_u16(), svundef_u16(), 8); + SVE_ACLE_FUNC(svmlalt_lane,_u32)(svundef_u32(), svundef_u16(), svundef_u16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlalt_lane,_f32,,)(svundef_f32(), svundef_f16(), svundef_f16(), -1); + SVE_ACLE_FUNC(svmlalt_lane,_f32)(svundef_f32(), svundef_f16(), svundef_f16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmls_lane,_s16,,)(svundef_s16(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svmls_lane,_s16)(svundef_s16(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmls_lane,_u16,,)(svundef_u16(), svundef_u16(), svundef_u16(), 8); + SVE_ACLE_FUNC(svmls_lane,_u16)(svundef_u16(), svundef_u16(), svundef_u16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlslb_lane,_s32,,)(svundef_s32(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svmlslb_lane,_s32)(svundef_s32(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlslb_lane,_u32,,)(svundef_u32(), svundef_u16(), svundef_u16(), 8); + SVE_ACLE_FUNC(svmlslb_lane,_u32)(svundef_u32(), svundef_u16(), svundef_u16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlslb_lane,_f32,,)(svundef_f32(), svundef_f16(), svundef_f16(), -1); + SVE_ACLE_FUNC(svmlslb_lane,_f32)(svundef_f32(), svundef_f16(), svundef_f16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlslt_lane,_s32,,)(svundef_s32(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svmlslt_lane,_s32)(svundef_s32(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlslt_lane,_u32,,)(svundef_u32(), svundef_u16(), svundef_u16(), 8); + SVE_ACLE_FUNC(svmlslt_lane,_u32)(svundef_u32(), svundef_u16(), svundef_u16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmlslt_lane,_f32,,)(svundef_f32(), svundef_f16(), svundef_f16(), -1); + SVE_ACLE_FUNC(svmlslt_lane,_f32)(svundef_f32(), svundef_f16(), svundef_f16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmul_lane,_s16,,)(svundef_s16(), svundef_s16(), 8); + SVE_ACLE_FUNC(svmul_lane,_s16)(svundef_s16(), svundef_s16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmul_lane,_u16,,)(svundef_u16(), svundef_u16(), -1); + SVE_ACLE_FUNC(svmul_lane,_u16)(svundef_u16(), svundef_u16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmullb_lane,_s32,,)(svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svmullb_lane,_s32)(svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmullb_lane,_u32,,)(svundef_u16(), svundef_u16(), 8); + SVE_ACLE_FUNC(svmullb_lane,_u32)(svundef_u16(), svundef_u16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmullt_lane,_s32,,)(svundef_s16(), svundef_s16(), 8); + SVE_ACLE_FUNC(svmullt_lane,_s32)(svundef_s16(), svundef_s16(), 8); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svmullt_lane,_u32,,)(svundef_u16(), svundef_u16(), -1); + SVE_ACLE_FUNC(svmullt_lane,_u32)(svundef_u16(), svundef_u16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqdmlalb_lane,_s32,,)(svundef_s32(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqdmlalb_lane,_s32)(svundef_s32(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqdmlalt_lane,_s32,,)(svundef_s32(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqdmlalt_lane,_s32)(svundef_s32(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqdmulh_lane,_s16,,)(svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqdmulh_lane,_s16)(svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqdmlslb_lane,_s32,,)(svundef_s32(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqdmlslb_lane,_s32)(svundef_s32(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqdmlslt_lane,_s32,,)(svundef_s32(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqdmlslt_lane,_s32)(svundef_s32(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqdmullb_lane,_s32,,)(svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqdmullb_lane,_s32)(svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqdmullt_lane,_s32,,)(svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqdmullt_lane,_s32)(svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqrdmlah_lane,_s16,,)(svundef_s16(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqrdmlah_lane,_s16)(svundef_s16(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqrdmlsh_lane,_s16,,)(svundef_s16(), svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqrdmlsh_lane,_s16)(svundef_s16(), svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svqrdmulh_lane,_s16,,)(svundef_s16(), svundef_s16(), -1); + SVE_ACLE_FUNC(svqrdmulh_lane,_s16)(svundef_s16(), svundef_s16(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svluti2_lane,_s16,,)(svundef_s16(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti2_lane,_s16)(svundef_s16(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svluti2_lane,_u16,,)(svundef_u16(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti2_lane,_u16)(svundef_u16(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svluti2_lane,_f16,,)(svundef_f16(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti2_lane,_f16)(svundef_f16(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} - SVE_ACLE_FUNC(svluti2_lane,_bf16,,)(svundef_bf16(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti2_lane,_bf16)(svundef_bf16(), svundef_u8(), -1); } void test_range_0_3() { // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svcdot_lane,_s32,,)(svundef_s32(), svundef_s8(), svundef_s8(), -1, 0); + SVE_ACLE_FUNC(svcdot_lane,_s32)(svundef_s32(), svundef_s8(), svundef_s8(), -1, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svcmla_lane,_s16,,)(svundef_s16(), svundef_s16(), svundef_s16(), -1, 0); + SVE_ACLE_FUNC(svcmla_lane,_s16)(svundef_s16(), svundef_s16(), svundef_s16(), -1, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svcmla_lane,_u16,,)(svundef_u16(), svundef_u16(), svundef_u16(), -1, 0); + SVE_ACLE_FUNC(svcmla_lane,_u16)(svundef_u16(), svundef_u16(), svundef_u16(), -1, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmla_lane,_s32,,)(svundef_s32(), svundef_s32(), svundef_s32(), -1); + SVE_ACLE_FUNC(svmla_lane,_s32)(svundef_s32(), svundef_s32(), svundef_s32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmla_lane,_u32,,)(svundef_u32(), svundef_u32(), svundef_u32(), 4); + SVE_ACLE_FUNC(svmla_lane,_u32)(svundef_u32(), svundef_u32(), svundef_u32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmlalb_lane,_s64,,)(svundef_s64(), svundef_s32(), svundef_s32(), -1); + SVE_ACLE_FUNC(svmlalb_lane,_s64)(svundef_s64(), svundef_s32(), svundef_s32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmlalb_lane,_u64,,)(svundef_u64(), svundef_u32(), svundef_u32(), 4); + SVE_ACLE_FUNC(svmlalb_lane,_u64)(svundef_u64(), svundef_u32(), svundef_u32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmlalt_lane,_s64,,)(svundef_s64(), svundef_s32(), svundef_s32(), -1); + SVE_ACLE_FUNC(svmlalt_lane,_s64)(svundef_s64(), svundef_s32(), svundef_s32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmlalt_lane,_u64,,)(svundef_u64(), svundef_u32(), svundef_u32(), 4); + SVE_ACLE_FUNC(svmlalt_lane,_u64)(svundef_u64(), svundef_u32(), svundef_u32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmls_lane,_s32,,)(svundef_s32(), svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svmls_lane,_s32)(svundef_s32(), svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmls_lane,_u32,,)(svundef_u32(), svundef_u32(), svundef_u32(), -1); + SVE_ACLE_FUNC(svmls_lane,_u32)(svundef_u32(), svundef_u32(), svundef_u32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmlslb_lane,_s64,,)(svundef_s64(), svundef_s32(), svundef_s32(), -1); + SVE_ACLE_FUNC(svmlslb_lane,_s64)(svundef_s64(), svundef_s32(), svundef_s32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmlslb_lane,_u64,,)(svundef_u64(), svundef_u32(), svundef_u32(), 4); + SVE_ACLE_FUNC(svmlslb_lane,_u64)(svundef_u64(), svundef_u32(), svundef_u32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmlslt_lane,_s64,,)(svundef_s64(), svundef_s32(), svundef_s32(), -1); + SVE_ACLE_FUNC(svmlslt_lane,_s64)(svundef_s64(), svundef_s32(), svundef_s32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmlslt_lane,_u64,,)(svundef_u64(), svundef_u32(), svundef_u32(), 4); + SVE_ACLE_FUNC(svmlslt_lane,_u64)(svundef_u64(), svundef_u32(), svundef_u32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmul_lane,_s32,,)(svundef_s32(), svundef_s32(), -1); + SVE_ACLE_FUNC(svmul_lane,_s32)(svundef_s32(), svundef_s32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmul_lane,_u32,,)(svundef_u32(), svundef_u32(), 4); + SVE_ACLE_FUNC(svmul_lane,_u32)(svundef_u32(), svundef_u32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmullb_lane,_s64,,)(svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svmullb_lane,_s64)(svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmullb_lane,_u64,,)(svundef_u32(), svundef_u32(), -1); + SVE_ACLE_FUNC(svmullb_lane,_u64)(svundef_u32(), svundef_u32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmullt_lane,_s64,,)(svundef_s32(), svundef_s32(), -1); + SVE_ACLE_FUNC(svmullt_lane,_s64)(svundef_s32(), svundef_s32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svmullt_lane,_u64,,)(svundef_u32(), svundef_u32(), 4); + SVE_ACLE_FUNC(svmullt_lane,_u64)(svundef_u32(), svundef_u32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqdmulh_lane,_s32,,)(svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqdmulh_lane,_s32)(svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqrdcmlah_lane,_s16,,)(svundef_s16(), svundef_s16(), svundef_s16(), -1, 0); + SVE_ACLE_FUNC(svqrdcmlah_lane,_s16)(svundef_s16(), svundef_s16(), svundef_s16(), -1, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqrdmlah_lane,_s32,,)(svundef_s32(), svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqrdmlah_lane,_s32)(svundef_s32(), svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqdmlalb_lane,_s64,,)(svundef_s64(), svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqdmlalb_lane,_s64)(svundef_s64(), svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqdmlalt_lane,_s64,,)(svundef_s64(), svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqdmlalt_lane,_s64)(svundef_s64(), svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqrdmlsh_lane,_s32,,)(svundef_s32(), svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqrdmlsh_lane,_s32)(svundef_s32(), svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqdmlslb_lane,_s64,,)(svundef_s64(), svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqdmlslb_lane,_s64)(svundef_s64(), svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqdmlslt_lane,_s64,,)(svundef_s64(), svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqdmlslt_lane,_s64)(svundef_s64(), svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqrdmulh_lane,_s32,,)(svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqrdmulh_lane,_s32)(svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqdmullb_lane,_s64,,)(svundef_s32(), svundef_s32(), 4); + SVE_ACLE_FUNC(svqdmullb_lane,_s64)(svundef_s32(), svundef_s32(), 4); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svqdmullt_lane,_s64,,)(svundef_s32(), svundef_s32(), -1); + SVE_ACLE_FUNC(svqdmullt_lane,_s64)(svundef_s32(), svundef_s32(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti2_lane,_s8,,)(svundef_s8(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti2_lane,_s8)(svundef_s8(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti2_lane,_u8,,)(svundef_u8(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti2_lane,_u8)(svundef_u8(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti4_lane,_s16,,)(svundef_s16(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_s16)(svundef_s16(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti4_lane,_u16,,)(svundef_u16(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_u16)(svundef_u16(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti4_lane,_f16,,)(svundef_f16(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_f16)(svundef_f16(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti4_lane,_bf16,,)(svundef_bf16(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_bf16)(svundef_bf16(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti4_lane,_s16,_x2,)(svcreate2_s16(svundef_s16(),svundef_s16()), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_s16_x2)(svcreate2_s16(svundef_s16(),svundef_s16()), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti4_lane,_u16,_x2,)(svcreate2_u16(svundef_u16(),svundef_u16()), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_u16_x2)(svcreate2_u16(svundef_u16(),svundef_u16()), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti4_lane,_f16,_x2,)(svcreate2_f16(svundef_f16(),svundef_f16()), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_f16_x2)(svcreate2_f16(svundef_f16(),svundef_f16()), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} - SVE_ACLE_FUNC(svluti4_lane,_bf16,_x2,)(svcreate2_bf16(svundef_bf16(),svundef_bf16()), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_bf16_x2)(svcreate2_bf16(svundef_bf16(),svundef_bf16()), svundef_u8(), -1); } void test_range_0_1() { // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svcdot_lane,_s64,,)(svundef_s64(), svundef_s16(), svundef_s16(), -1, 0); + SVE_ACLE_FUNC(svcdot_lane,_s64)(svundef_s64(), svundef_s16(), svundef_s16(), -1, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svcmla_lane,_s32,,)(svundef_s32(), svundef_s32(), svundef_s32(), -1, 0); + SVE_ACLE_FUNC(svcmla_lane,_s32)(svundef_s32(), svundef_s32(), svundef_s32(), -1, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svcmla_lane,_s32,,)(svundef_s32(), svundef_s32(), svundef_s32(), 2, 0); + SVE_ACLE_FUNC(svcmla_lane,_s32)(svundef_s32(), svundef_s32(), svundef_s32(), 2, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svcmla_lane,_u32,,)(svundef_u32(), svundef_u32(), svundef_u32(), -1, 0); + SVE_ACLE_FUNC(svcmla_lane,_u32)(svundef_u32(), svundef_u32(), svundef_u32(), -1, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svmla_lane,_s64,,)(svundef_s64(), svundef_s64(), svundef_s64(), 2); + SVE_ACLE_FUNC(svmla_lane,_s64)(svundef_s64(), svundef_s64(), svundef_s64(), 2); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svmla_lane,_u64,,)(svundef_u64(), svundef_u64(), svundef_u64(), -1); + SVE_ACLE_FUNC(svmla_lane,_u64)(svundef_u64(), svundef_u64(), svundef_u64(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svmls_lane,_s64,,)(svundef_s64(), svundef_s64(), svundef_s64(), -1); + SVE_ACLE_FUNC(svmls_lane,_s64)(svundef_s64(), svundef_s64(), svundef_s64(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svmls_lane,_u64,,)(svundef_u64(), svundef_u64(), svundef_u64(), 2); + SVE_ACLE_FUNC(svmls_lane,_u64)(svundef_u64(), svundef_u64(), svundef_u64(), 2); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svmul_lane,_s64,,)(svundef_s64(), svundef_s64(), 2); + SVE_ACLE_FUNC(svmul_lane,_s64)(svundef_s64(), svundef_s64(), 2); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svmul_lane,_u64,,)(svundef_u64(), svundef_u64(), -1); + SVE_ACLE_FUNC(svmul_lane,_u64)(svundef_u64(), svundef_u64(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svqdmulh_lane,_s64,,)(svundef_s64(), svundef_s64(), 2); + SVE_ACLE_FUNC(svqdmulh_lane,_s64)(svundef_s64(), svundef_s64(), 2); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svqrdcmlah_lane,_s32,,)(svundef_s32(), svundef_s32(), svundef_s32(), 2, 0); + SVE_ACLE_FUNC(svqrdcmlah_lane,_s32)(svundef_s32(), svundef_s32(), svundef_s32(), 2, 0); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svqrdmlah_lane,_s64,,)(svundef_s64(), svundef_s64(), svundef_s64(), 2); + SVE_ACLE_FUNC(svqrdmlah_lane,_s64)(svundef_s64(), svundef_s64(), svundef_s64(), 2); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svqrdmlsh_lane,_s64,,)(svundef_s64(), svundef_s64(), svundef_s64(), 2); + SVE_ACLE_FUNC(svqrdmlsh_lane,_s64)(svundef_s64(), svundef_s64(), svundef_s64(), 2); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svqrdmulh_lane,_s64,,)(svundef_s64(), svundef_s64(), 2); + SVE_ACLE_FUNC(svqrdmulh_lane,_s64)(svundef_s64(), svundef_s64(), 2); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svluti4_lane,_s8,,)(svundef_s8(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_s8)(svundef_s8(), svundef_u8(), -1); // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} - SVE_ACLE_FUNC(svluti4_lane,_u8,,)(svundef_u8(), svundef_u8(), -1); + SVE_ACLE_FUNC(svluti4_lane,_u8)(svundef_u8(), svundef_u8(), -1); } diff --git a/clang/test/SemaCXX/constructor.cpp b/clang/test/SemaCXX/constructor.cpp index abd7dbe18a0e6..b069d55118f56 100644 --- a/clang/test/SemaCXX/constructor.cpp +++ b/clang/test/SemaCXX/constructor.cpp @@ -96,3 +96,80 @@ namespace PR38286 { template struct C; // expected-note {{non-type declaration found}} template C::~C() {} // expected-error {{identifier 'C' after '~' in destructor name does not name a type}} } + +namespace GH121706 { + +struct A { + *&A(); // expected-error {{invalid constructor declaration}} +}; + +struct B { + *&&B(); // expected-error {{invalid constructor declaration}} +}; + +struct C { + *const C(); // expected-error {{invalid constructor declaration}} +}; + +struct D { + *const *D(); // expected-error {{invalid constructor declaration}} +}; + +struct E { + *E::*E(); // expected-error {{invalid constructor declaration}} +}; + +struct F { + *F::*const F(); // expected-error {{invalid constructor declaration}} +}; + +struct G { + ****G(); // expected-error {{invalid constructor declaration}} +}; + +struct H { + **H(const H &); // expected-error {{invalid constructor declaration}} +}; + +struct I { + *I(I &&); // expected-error {{invalid constructor declaration}} +}; + +struct J { + *&(J)(); // expected-error {{invalid constructor declaration}} +}; + +struct K { + **&&(K)(); // expected-error {{invalid constructor declaration}} +}; + +struct L { + *L(L&& other); // expected-error {{invalid constructor declaration}} +}; + +struct M { + *M(M& other); // expected-error {{invalid constructor declaration}} +}; + +struct N { + int N(); // expected-error {{constructor cannot have a return type}} +}; + +struct O { + static O(); // expected-error {{constructor cannot be declared 'static'}} +}; + +struct P { + explicit P(); +}; + +struct Q { + constexpr Q(); +}; + +struct R { + R(); + friend R::R(); +}; + +} diff --git a/clang/test/SemaCXX/conversion-function.cpp b/clang/test/SemaCXX/conversion-function.cpp index 749e2fc1b452b..b653a3bf1a1d2 100644 --- a/clang/test/SemaCXX/conversion-function.cpp +++ b/clang/test/SemaCXX/conversion-function.cpp @@ -494,3 +494,10 @@ using Result = B::Lookup; using Result = int (A2::*)(); } #endif + +namespace GH121706 { +struct S { + *operator int(); // expected-error {{cannot specify any part of a return type in the declaration of a conversion function; put the complete type after 'operator'}} + **operator char(); // expected-error {{cannot specify any part of a return type in the declaration of a conversion function; put the complete type after 'operator'}} +}; +} diff --git a/clang/test/SemaCXX/cxx2c-pack-indexing.cpp b/clang/test/SemaCXX/cxx2c-pack-indexing.cpp index 58b642d2735b6..202a819655217 100644 --- a/clang/test/SemaCXX/cxx2c-pack-indexing.cpp +++ b/clang/test/SemaCXX/cxx2c-pack-indexing.cpp @@ -321,3 +321,26 @@ namespace GH121242 { (void)z; } } // namespace GH121242 + +namespace GH123033 { + template + requires __is_same_as(Types...[0], int) + void print(double d); + + template + requires __is_same_as(Types...[0], int) + void print(double d); + + template + Types...[0] convert(double d); + + template + Types...[0] convert(double d) { + return static_cast(d); + } + + void f() { + print(12.34); + convert(12.34); + } +} diff --git a/clang/test/SemaCXX/destructor.cpp b/clang/test/SemaCXX/destructor.cpp index dfcd1b033af5a..589616ef8e437 100644 --- a/clang/test/SemaCXX/destructor.cpp +++ b/clang/test/SemaCXX/destructor.cpp @@ -586,4 +586,50 @@ struct Y : X {} y1{ }; // expected-error {{call to implicitly-deleted default co // expected-note {{default constructor of 'Y' is implicitly deleted because base class 'X' has no destructor}} } +namespace GH121706 { +struct A { + *&~A(); // expected-error {{invalid destructor declaration}} +}; + +struct B { + *&&~B(); // expected-error {{invalid destructor declaration}} +}; + +struct C { + *const ~C(); // expected-error {{invalid destructor declaration}} +}; + +struct D { + *const * ~D(); // expected-error {{invalid destructor declaration}} +}; + +struct E { + *E::*~E(); // expected-error {{invalid destructor declaration}} +}; + +struct F { + *F::*const ~F(); // expected-error {{invalid destructor declaration}} +}; + +struct G { + ****~G(); // expected-error {{invalid destructor declaration}} +}; + +struct H { + **~H(); // expected-error {{invalid destructor declaration}} +}; + +struct I { + *~I(); // expected-error {{invalid destructor declaration}} +}; + +struct J { + *&~J(); // expected-error {{invalid destructor declaration}} +}; + +struct K { + **&&~K(); // expected-error {{invalid destructor declaration}} +}; +} + #endif // BE_THE_HEADER diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp index e80b54b7c6967..7dd6c83dbba2a 100644 --- a/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp +++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-array.cpp @@ -92,35 +92,3 @@ char access_strings() { c = array_string[5]; return c; } - -struct T { - int array[10]; -}; - -const int index = 1; - -constexpr int get_const(int x) { - if(x < 3) - return ++x; - else - return x + 5; -}; - -void array_indexed_const_expr(unsigned idx) { - // expected-note@+2 {{change type of 'arr' to 'std::array' to label it for hardening}} - // expected-warning@+1{{'arr' is an unsafe buffer that does not perform bounds checks}} - int arr[10]; - arr[sizeof(int)] = 5; - - int array[sizeof(T)]; - array[sizeof(int)] = 5; - array[sizeof(T) -1 ] = 3; - - int k = arr[6 & 5]; - k = arr[2 << index]; - k = arr[8 << index]; // expected-note {{used in buffer access here}} - k = arr[16 >> 1]; - k = arr[get_const(index)]; - k = arr[get_const(5)]; // expected-note {{used in buffer access here}} - k = arr[get_const(4)]; -} diff --git a/clang/test/SemaHLSL/BuiltIns/WaveActiveSum-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/WaveActiveSum-errors.hlsl new file mode 100644 index 0000000000000..406e8fc57ca95 --- /dev/null +++ b/clang/test/SemaHLSL/BuiltIns/WaveActiveSum-errors.hlsl @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -emit-llvm-only -disable-llvm-passes -verify + +int test_too_few_arg() { + return __builtin_hlsl_wave_active_sum(); + // expected-error@-1 {{too few arguments to function call, expected 1, have 0}} +} + +float2 test_too_many_arg(float2 p0) { + return __builtin_hlsl_wave_active_sum(p0, p0); + // expected-error@-1 {{too many arguments to function call, expected 1, have 2}} +} + +bool test_expr_bool_type_check(bool p0) { + return __builtin_hlsl_wave_active_sum(p0); + // expected-error@-1 {{invalid operand of type 'bool'}} +} + +bool2 test_expr_bool_vec_type_check(bool2 p0) { + return __builtin_hlsl_wave_active_sum(p0); + // expected-error@-1 {{invalid operand of type 'bool2' (aka 'vector')}} +} + +struct S { float f; }; + +S test_expr_struct_type_check(S p0) { + return __builtin_hlsl_wave_active_sum(p0); + // expected-error@-1 {{invalid operand of type 'S' where a scalar or vector is required}} +} diff --git a/clang/test/SemaTemplate/deduction-guide.cpp b/clang/test/SemaTemplate/deduction-guide.cpp index d03c783313dd7..39250f0617f4b 100644 --- a/clang/test/SemaTemplate/deduction-guide.cpp +++ b/clang/test/SemaTemplate/deduction-guide.cpp @@ -478,3 +478,53 @@ A a{.f1 = {1}}; // CHECK-NEXT: `-DeclRefExpr {{.+}} 'int' NonTypeTemplateParm {{.+}} 'N' 'int' } // namespace GH83368 + +namespace GH122134 { + +template +concept Constraint = true; + +template struct Struct { + Struct(Constraint auto) {} +}; + +template using Test = Struct; + +Test test(42); + +// CHECK-LABEL: Dumping GH122134::: +// CHECK-NEXT: FunctionTemplateDecl {{.*}} implicit +// CHECK-NEXT: |-NonTypeTemplateParmDecl {{.*}} 'int' depth 0 index 0 N +// CHECK-NEXT: | `-TemplateArgument {{.*}} expr '0' +// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 0 +// CHECK-NEXT: |-TemplateTypeParmDecl {{.*}} Concept {{.*}} 'Constraint' depth 0 index 1 auto:1 +// CHECK-NEXT: | `-ConceptSpecializationExpr {{.*}} 'bool' Concept {{.*}} 'Constraint' +// CHECK-NEXT: | |-ImplicitConceptSpecializationDecl {{.*}} +// CHECK-NEXT: | | |-TemplateArgument type 'type-parameter-0-1' +// CHECK-NEXT: | | | `-TemplateTypeParmType {{.*}} 'type-parameter-0-1' dependent depth 0 index 1 +// CHECK-NEXT: | | `-TemplateArgument type 'int' +// CHECK-NEXT: | | `-BuiltinType {{.*}} 'int' +// CHECK-NEXT: | |-TemplateArgument {{.*}} type 'auto:1':'type-parameter-0-1' +// CHECK-NEXT: | | `-TemplateTypeParmType {{.*}} 'auto:1' dependent depth 0 index 1 +// CHECK-NEXT: | | `-TemplateTypeParm {{.*}} 'auto:1' +// CHECK-NEXT: | `-TemplateArgument {{.*}} type 'int' +// CHECK-NEXT: | `-BuiltinType {{.*}} 'int' +// CHECK-NEXT: |-TypeTraitExpr {{.*}} 'bool' __is_deducible +// CHECK-NEXT: | |-DeducedTemplateSpecializationType {{.*}} 'GH122134::Test' dependent +// CHECK-NEXT: | | `-name: 'GH122134::Test' +// CHECK-NEXT: | | `-TypeAliasTemplateDecl {{.*}} Test +// CHECK-NEXT: | `-TemplateSpecializationType {{.*}} 'Struct' dependent +// CHECK-NEXT: | |-name: 'Struct':'GH122134::Struct' qualified +// CHECK-NEXT: | | `-ClassTemplateDecl {{.*}} Struct +// CHECK-NEXT: | |-TemplateArgument type 'int' +// CHECK-NEXT: | | `-SubstTemplateTypeParmType {{.*}} 'int' sugar class depth 0 index 0 T +// CHECK-NEXT: | | |-FunctionTemplate {{.*}} '' +// CHECK-NEXT: | | `-BuiltinType {{.*}} 'int' +// CHECK-NEXT: | `-TemplateArgument expr 'N' +// CHECK-NEXT: | `-SubstNonTypeTemplateParmExpr {{.*}} 'int' +// CHECK-NEXT: | |-NonTypeTemplateParmDecl {{.*}} 'int' depth 0 index 1 +// CHECK-NEXT: | `-DeclRefExpr {{.*}} 'int' NonTypeTemplateParm {{.*}} 'N' 'int' +// CHECK-NEXT: |-CXXDeductionGuideDecl {{.*}} implicit 'auto (auto:1) -> Struct' +// CHECK-NEXT: | `-ParmVarDecl {{.*}} 'auto:1' + +} // namespace GH122134 diff --git a/clang/unittests/AST/ExternalASTSourceTest.cpp b/clang/unittests/AST/ExternalASTSourceTest.cpp index 8e1bde1247f66..512f21e8efff4 100644 --- a/clang/unittests/AST/ExternalASTSourceTest.cpp +++ b/clang/unittests/AST/ExternalASTSourceTest.cpp @@ -67,8 +67,9 @@ TEST(ExternalASTSourceTest, FailedLookupOccursOnce) { struct TestSource : ExternalASTSource { TestSource(unsigned &Calls) : Calls(Calls) {} - bool FindExternalVisibleDeclsByName(const DeclContext *, - DeclarationName Name) override { + bool + FindExternalVisibleDeclsByName(const DeclContext *, DeclarationName Name, + const DeclContext *OriginalDC) override { if (Name.getAsString() == "j") ++Calls; return false; diff --git a/clang/unittests/Lex/LexerTest.cpp b/clang/unittests/Lex/LexerTest.cpp index aead7fb899d0a..c897998cabe66 100644 --- a/clang/unittests/Lex/LexerTest.cpp +++ b/clang/unittests/Lex/LexerTest.cpp @@ -603,6 +603,7 @@ TEST_F(LexerTest, CharRangeOffByOne) { TEST_F(LexerTest, FindNextToken) { Lex("int abcd = 0;\n" + "// A comment.\n" "int xyz = abcd;\n"); std::vector GeneratedByNextToken; SourceLocation Loc = @@ -619,6 +620,26 @@ TEST_F(LexerTest, FindNextToken) { "xyz", "=", "abcd", ";")); } +TEST_F(LexerTest, FindNextTokenIncludingComments) { + Lex("int abcd = 0;\n" + "// A comment.\n" + "int xyz = abcd;\n"); + std::vector GeneratedByNextToken; + SourceLocation Loc = + SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); + while (true) { + auto T = Lexer::findNextToken(Loc, SourceMgr, LangOpts, true); + ASSERT_TRUE(T); + if (T->is(tok::eof)) + break; + GeneratedByNextToken.push_back(getSourceText(*T, *T)); + Loc = T->getLocation(); + } + EXPECT_THAT(GeneratedByNextToken, + ElementsAre("abcd", "=", "0", ";", "// A comment.", "int", "xyz", + "=", "abcd", ";")); +} + TEST_F(LexerTest, CreatedFIDCountForPredefinedBuffer) { TrivialModuleLoader ModLoader; auto PP = CreatePP("", ModLoader); diff --git a/flang/cmake/modules/AddFlang.cmake b/flang/cmake/modules/AddFlang.cmake index 9ed1a3050b7e8..1f178772067ed 100644 --- a/flang/cmake/modules/AddFlang.cmake +++ b/flang/cmake/modules/AddFlang.cmake @@ -18,7 +18,7 @@ endmacro() function(add_flang_library name) set(options SHARED STATIC INSTALL_WITH_TOOLCHAIN) - set(multiValueArgs ADDITIONAL_HEADERS CLANG_LIBS) + set(multiValueArgs ADDITIONAL_HEADERS CLANG_LIBS MLIR_LIBS) cmake_parse_arguments(ARG "${options}" "" @@ -66,6 +66,9 @@ function(add_flang_library name) llvm_add_library(${name} ${LIBTYPE} ${ARG_UNPARSED_ARGUMENTS} ${srcs}) clang_target_link_libraries(${name} PRIVATE ${ARG_CLANG_LIBS}) + if (ARG_MLIR_LIBS) + mlir_target_link_libraries(${name} PRIVATE ${ARG_MLIR_LIBS}) + endif() if (TARGET ${name}) diff --git a/flang/docs/OpenACC-descriptor-management.md b/flang/docs/OpenACC-descriptor-management.md index 0b5103000d8e7..52d00ae4daef8 100644 --- a/flang/docs/OpenACC-descriptor-management.md +++ b/flang/docs/OpenACC-descriptor-management.md @@ -348,7 +348,7 @@ acc.attach.recipe @attach_ref : %offset : index, %size : index): fir.call _FortranAOpenACCAttachDescriptor(%aug_ptr, %base_addr_val, %offset, %size) : - (!fir.ref, !fir.ref>, index, index) -> none + (!fir.ref, !fir.ref>, index, index) -> () acc.yield } diff --git a/flang/docs/ParameterizedDerivedTypes.md b/flang/docs/ParameterizedDerivedTypes.md index 851775b123b43..0ed9f8b494e78 100644 --- a/flang/docs/ParameterizedDerivedTypes.md +++ b/flang/docs/ParameterizedDerivedTypes.md @@ -435,16 +435,16 @@ allocate(t1(2)::p) **FIR** ``` // For allocatable -%5 = fir.call @_FortranAAllocatableInitDerived(%desc, %type) : (!fir.box, ) -> () +fir.call @_FortranAAllocatableInitDerived(%desc, %type) : (!fir.box, ) -> () // The AllocatableSetDerivedLength functions is called for each length type parameters. -%6 = fir.call @_FortranAAllocatableSetDerivedLength(%desc, %pos, %value) : (!fir.box, i32, i64) -> () -%7 = fir.call @_FortranAAllocatableAllocate(%3) : (!fir.box) -> () +fir.call @_FortranAAllocatableSetDerivedLength(%desc, %pos, %value) : (!fir.box, i32, i64) -> () +fir.call @_FortranAAllocatableAllocate(%3) : (!fir.box) -> () // For pointer -%5 = fir.call @_FortranAPointerNullifyDerived(%desc, %type) : (!fir.box, ) -> () +fir.call @_FortranAPointerNullifyDerived(%desc, %type) : (!fir.box, ) -> () // The PointerSetDerivedLength functions is called for each length type parameters. -%6 = fir.call @_FortranAPointerSetDerivedLength(%desc, %pos, %value) : (!fir.box, i32, i64) -> () -%7 = fir.call @_FortranAPointerAllocate(%3) : (!fir.box) -> () +fir.call @_FortranAPointerSetDerivedLength(%desc, %pos, %value) : (!fir.box, i32, i64) -> () +fir.call @_FortranAPointerAllocate(%3) : (!fir.box) -> () ``` `DEALLOCATE` @@ -478,7 +478,7 @@ NULLIFY(p) **FIR** ``` -%0 = fir.call @_FortranAPointerNullifyDerived(%desc, %type) : (!fir.box, !fir.tdesc) -> () +fir.call @_FortranAPointerNullifyDerived(%desc, %type) : (!fir.box, !fir.tdesc) -> () ``` #### Formatted I/O @@ -518,7 +518,7 @@ func.func @_QMpdtPprint_pdt() { %c8_i32 = arith.constant 8 : i32 %3 = fir.convert %1 : (!fir.box}>>) -> !fir.box %4 = fir.convert %2 : (!fir.ref>) -> !fir.ref - %5 = fir.call @_FortranAInitialize(%3, %4, %c8_i32) : (!fir.box, !fir.ref, i32) -> none + fir.call @_FortranAInitialize(%3, %4, %c8_i32) : (!fir.box, !fir.ref, i32) -> () %c-1_i32 = arith.constant -1 : i32 %6 = fir.address_of(@_QQcl.2E2F6669725F7064745F6578616D706C652E66393000) : !fir.ref> %7 = fir.convert %6 : (!fir.ref>) -> !fir.ref @@ -882,7 +882,7 @@ func.func @_QMpdt_initPlocal() { %c8_i32 = arith.constant 8 : i32 %3 = fir.convert %1 : (!fir.box}>>) -> !fir.box %4 = fir.convert %2 : (!fir.ref>) -> !fir.ref - %5 = fir.call @_FortranAInitialize(%3, %4, %c8_i32) : (!fir.box, !fir.ref, i32) -> none + fir.call @_FortranAInitialize(%3, %4, %c8_i32) : (!fir.box, !fir.ref, i32) -> () return } ``` diff --git a/flang/docs/PolymorphicEntities.md b/flang/docs/PolymorphicEntities.md index befcc53127a4a..6583068508584 100644 --- a/flang/docs/PolymorphicEntities.md +++ b/flang/docs/PolymorphicEntities.md @@ -609,7 +609,7 @@ finalization with a call the the `@_FortranADestroy` function **FIR** ``` -%5 = fir.call @_FortranADestroy(%desc) : (!fir.box) -> none +fir.call @_FortranADestroy(%desc) : (!fir.box) -> () ``` The `@_FortranADestroy` function will take care to call the final subroutines diff --git a/flang/include/flang/Lower/AbstractConverter.h b/flang/include/flang/Lower/AbstractConverter.h index 607aff41f6459..c24f43737df50 100644 --- a/flang/include/flang/Lower/AbstractConverter.h +++ b/flang/include/flang/Lower/AbstractConverter.h @@ -130,9 +130,18 @@ class AbstractConverter { virtual void createHostAssociateVarCloneDealloc(const Fortran::semantics::Symbol &sym) = 0; - virtual void copyHostAssociateVar( - const Fortran::semantics::Symbol &sym, - mlir::OpBuilder::InsertPoint *copyAssignIP = nullptr) = 0; + /// For a host-associated symbol (a symbol associated with another symbol from + /// an enclosing scope), either: + /// + /// * if \p hostIsSource == true: copy \p sym's value *from* its corresponding + /// host symbol, + /// + /// * if \p hostIsSource == false: copy \p sym's value *to* its corresponding + /// host symbol. + virtual void + copyHostAssociateVar(const Fortran::semantics::Symbol &sym, + mlir::OpBuilder::InsertPoint *copyAssignIP = nullptr, + bool hostIsSource = true) = 0; virtual void copyVar(mlir::Location loc, mlir::Value dst, mlir::Value src, fir::FortranVariableFlagsEnum attrs) = 0; diff --git a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h index 09b49b95fefe5..eaa1de76154d9 100644 --- a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h +++ b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h @@ -320,7 +320,7 @@ constexpr TypeBuilderFunc getModel() { template <> constexpr TypeBuilderFunc getModel() { return [](mlir::MLIRContext *context) -> mlir::Type { - return mlir::FloatType::getF64(context); + return mlir::Float64Type::get(context); }; } template <> @@ -347,11 +347,11 @@ constexpr TypeBuilderFunc getModel() { static_assert(size == 16 || size == 10 || size == 8, "unsupported long double size"); if constexpr (size == 16) - return mlir::FloatType::getF128(context); + return mlir::Float128Type::get(context); if constexpr (size == 10) - return mlir::FloatType::getF80(context); + return mlir::Float80Type::get(context); if constexpr (size == 8) - return mlir::FloatType::getF64(context); + return mlir::Float64Type::get(context); llvm_unreachable("failed static assert"); }; } @@ -369,7 +369,7 @@ constexpr TypeBuilderFunc getModel() { template <> constexpr TypeBuilderFunc getModel() { return [](mlir::MLIRContext *context) -> mlir::Type { - return mlir::FloatType::getF32(context); + return mlir::Float32Type::get(context); }; } template <> @@ -674,6 +674,8 @@ struct RuntimeTableKey { llvm::SmallVector argTys; for (auto f : args) argTys.push_back(f(ctxt)); + if (mlir::isa(retTy)) + return mlir::FunctionType::get(ctxt, argTys, {}); return mlir::FunctionType::get(ctxt, argTys, {retTy}); }; } diff --git a/flang/include/flang/Optimizer/Dialect/FIRType.h b/flang/include/flang/Optimizer/Dialect/FIRType.h index ac638d98980d1..78257ab703086 100644 --- a/flang/include/flang/Optimizer/Dialect/FIRType.h +++ b/flang/include/flang/Optimizer/Dialect/FIRType.h @@ -439,7 +439,7 @@ inline mlir::Type wrapInClassOrBoxType(mlir::Type eleTy, /// Return the elementType where intrinsic types are replaced with none for /// unlimited polymorphic entities. /// -/// i32 -> none +/// i32 -> () /// !fir.array<2xf32> -> !fir.array<2xnone> /// !fir.heap> -> !fir.heap> inline mlir::Type updateTypeForUnlimitedPolymorphic(mlir::Type ty) { diff --git a/flang/lib/Common/CMakeLists.txt b/flang/lib/Common/CMakeLists.txt index de6bea396f3cb..4b5df0a49f403 100644 --- a/flang/lib/Common/CMakeLists.txt +++ b/flang/lib/Common/CMakeLists.txt @@ -47,6 +47,6 @@ add_flang_library(FortranCommon LINK_COMPONENTS Support - LINK_LIBS + MLIR_LIBS MLIRIR ) diff --git a/flang/lib/Frontend/CMakeLists.txt b/flang/lib/Frontend/CMakeLists.txt index 0a0482505b747..d063ed36d00b4 100644 --- a/flang/lib/Frontend/CMakeLists.txt +++ b/flang/lib/Frontend/CMakeLists.txt @@ -41,13 +41,6 @@ add_flang_library(flangFrontend flangPasses FIROpenACCSupport FlangOpenMPTransforms - MLIRTransforms - MLIRBuiltinToLLVMIRTranslation - MLIRLLVMToLLVMIRTranslation - MLIRSCFToControlFlow - MLIRTargetLLVMIRImport - ${dialect_libs} - ${extension_libs} LINK_COMPONENTS Passes @@ -63,6 +56,15 @@ add_flang_library(flangFrontend FrontendOpenACC FrontendOpenMP + MLIR_LIBS + MLIRTransforms + MLIRBuiltinToLLVMIRTranslation + MLIRLLVMToLLVMIRTranslation + MLIRSCFToControlFlow + MLIRTargetLLVMIRImport + ${dialect_libs} + ${extension_libs} + CLANG_LIBS clangBasic clangDriver diff --git a/flang/lib/FrontendTool/CMakeLists.txt b/flang/lib/FrontendTool/CMakeLists.txt index 2acaffbe50380..faf56e9d955a1 100644 --- a/flang/lib/FrontendTool/CMakeLists.txt +++ b/flang/lib/FrontendTool/CMakeLists.txt @@ -8,12 +8,14 @@ add_flang_library(flangFrontendTool LINK_LIBS flangFrontend - MLIRPass LINK_COMPONENTS Option Support + MLIR_LIBS + MLIRPass + CLANG_LIBS clangBasic clangDriver diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 37f51d74d23f8..700ca56141a32 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -891,9 +891,10 @@ class FirConverter : public Fortran::lower::AbstractConverter { isPointer, Fortran::semantics::Symbol::Flags()); } - void copyHostAssociateVar( - const Fortran::semantics::Symbol &sym, - mlir::OpBuilder::InsertPoint *copyAssignIP = nullptr) override final { + void + copyHostAssociateVar(const Fortran::semantics::Symbol &sym, + mlir::OpBuilder::InsertPoint *copyAssignIP = nullptr, + bool hostIsSource = true) override final { // 1) Fetch the original copy of the variable. assert(sym.has() && "No host-association found"); @@ -908,16 +909,14 @@ class FirConverter : public Fortran::lower::AbstractConverter { "Host and associated symbol boxes are the same"); // 3) Perform the assignment. - mlir::OpBuilder::InsertPoint insPt = builder->saveInsertionPoint(); + mlir::OpBuilder::InsertionGuard guard(*builder); if (copyAssignIP && copyAssignIP->isSet()) builder->restoreInsertionPoint(*copyAssignIP); else builder->setInsertionPointAfter(sb.getAddr().getDefiningOp()); Fortran::lower::SymbolBox *lhs_sb, *rhs_sb; - if (copyAssignIP && copyAssignIP->isSet() && - sym.test(Fortran::semantics::Symbol::Flag::OmpLastPrivate)) { - // lastprivate case + if (!hostIsSource) { lhs_sb = &hsb; rhs_sb = &sb; } else { @@ -926,11 +925,6 @@ class FirConverter : public Fortran::lower::AbstractConverter { } copyVar(sym, *lhs_sb, *rhs_sb, sym.flags()); - - if (copyAssignIP && copyAssignIP->isSet() && - sym.test(Fortran::semantics::Symbol::Flag::OmpLastPrivate)) { - builder->restoreInsertionPoint(insPt); - } } void genEval(Fortran::lower::pft::Evaluation &eval, diff --git a/flang/lib/Lower/CMakeLists.txt b/flang/lib/Lower/CMakeLists.txt index f57f0e7a77a01..f611010765cb5 100644 --- a/flang/lib/Lower/CMakeLists.txt +++ b/flang/lib/Lower/CMakeLists.txt @@ -55,17 +55,19 @@ add_flang_library(FortranLower FIRSupport FIRTransforms HLFIRDialect - ${dialect_libs} - ${extension_libs} FortranCommon FortranParser FortranEvaluate FortranSemantics + + LINK_COMPONENTS + Support + + MLIR_LIBS + ${dialect_libs} + ${extension_libs} MLIRAffineToStandard MLIRFuncDialect MLIRLLVMDialect MLIRSCFToControlFlow - - LINK_COMPONENTS - Support ) diff --git a/flang/lib/Lower/ConvertType.cpp b/flang/lib/Lower/ConvertType.cpp index 037d4335fedf1..2fab520e6c475 100644 --- a/flang/lib/Lower/ConvertType.cpp +++ b/flang/lib/Lower/ConvertType.cpp @@ -36,17 +36,17 @@ static mlir::Type genRealType(mlir::MLIRContext *context, int kind) { Fortran::common::TypeCategory::Real, kind)) { switch (kind) { case 2: - return mlir::FloatType::getF16(context); + return mlir::Float16Type::get(context); case 3: - return mlir::FloatType::getBF16(context); + return mlir::BFloat16Type::get(context); case 4: - return mlir::FloatType::getF32(context); + return mlir::Float32Type::get(context); case 8: - return mlir::FloatType::getF64(context); + return mlir::Float64Type::get(context); case 10: - return mlir::FloatType::getF80(context); + return mlir::Float80Type::get(context); case 16: - return mlir::FloatType::getF128(context); + return mlir::Float128Type::get(context); } } llvm_unreachable("REAL type translation not implemented"); diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp index 9dfdbd8337ae9..5b89816850bed 100644 --- a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp +++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp @@ -145,7 +145,7 @@ void DataSharingProcessor::copyFirstPrivateSymbol( void DataSharingProcessor::copyLastPrivateSymbol( const semantics::Symbol *sym, mlir::OpBuilder::InsertPoint *lastPrivIP) { if (sym->test(semantics::Symbol::Flag::OmpLastPrivate)) - converter.copyHostAssociateVar(*sym, lastPrivIP); + converter.copyHostAssociateVar(*sym, lastPrivIP, /*hostIsSource=*/false); } void DataSharingProcessor::collectOmpObjectListSymbol( diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index 158f76250572e..52541bb91481d 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -2082,7 +2082,7 @@ genSectionsOp(lower::AbstractConverter &converter, lower::SymMap &symTable, const auto &objList = std::get(lastp->t); for (const Object &object : objList) { semantics::Symbol *sym = object.sym(); - converter.copyHostAssociateVar(*sym, &insp); + converter.copyHostAssociateVar(*sym, &insp, /*hostIsSource=*/false); } } } @@ -2511,7 +2511,7 @@ static void genStandaloneDo(lower::AbstractConverter &converter, DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval, /*shouldCollectPreDeterminedSymbols=*/true, - enableDelayedPrivatizationStaging, symTable); + enableDelayedPrivatization, symTable); dsp.processStep1(&wsloopClauseOps); mlir::omp::LoopNestOperands loopNestClauseOps; diff --git a/flang/lib/Lower/Runtime.cpp b/flang/lib/Lower/Runtime.cpp index 3474832bdb225..2be5ef76e46b8 100644 --- a/flang/lib/Lower/Runtime.cpp +++ b/flang/lib/Lower/Runtime.cpp @@ -210,7 +210,7 @@ void Fortran::lower::genPointerAssociate(fir::FirOpBuilder &builder, fir::runtime::getRuntimeFunc(loc, builder); llvm::SmallVector args = fir::runtime::createArguments( builder, loc, func.getFunctionType(), pointer, target); - builder.create(loc, func, args).getResult(0); + builder.create(loc, func, args); } void Fortran::lower::genPointerAssociateRemapping(fir::FirOpBuilder &builder, @@ -228,7 +228,7 @@ void Fortran::lower::genPointerAssociateRemapping(fir::FirOpBuilder &builder, llvm::SmallVector args = fir::runtime::createArguments( builder, loc, func.getFunctionType(), pointer, target, bounds, sourceFile, sourceLine); - builder.create(loc, func, args).getResult(0); + builder.create(loc, func, args); } void Fortran::lower::genPointerAssociateLowerBounds(fir::FirOpBuilder &builder, @@ -241,5 +241,5 @@ void Fortran::lower::genPointerAssociateLowerBounds(fir::FirOpBuilder &builder, loc, builder); llvm::SmallVector args = fir::runtime::createArguments( builder, loc, func.getFunctionType(), pointer, target, lbounds); - builder.create(loc, func, args).getResult(0); + builder.create(loc, func, args); } diff --git a/flang/lib/Optimizer/Analysis/CMakeLists.txt b/flang/lib/Optimizer/Analysis/CMakeLists.txt index 1358219fd98d5..6fe9c70f83765 100644 --- a/flang/lib/Optimizer/Analysis/CMakeLists.txt +++ b/flang/lib/Optimizer/Analysis/CMakeLists.txt @@ -13,6 +13,8 @@ add_flang_library(FIRAnalysis FIRBuilder FIRDialect HLFIRDialect + + MLIR_LIBS MLIRFuncDialect MLIRLLVMDialect MLIRMathTransforms diff --git a/flang/lib/Optimizer/Builder/CMakeLists.txt b/flang/lib/Optimizer/Builder/CMakeLists.txt index 0960e858c4111..f8faeaa81c90c 100644 --- a/flang/lib/Optimizer/Builder/CMakeLists.txt +++ b/flang/lib/Optimizer/Builder/CMakeLists.txt @@ -51,6 +51,8 @@ add_flang_library(FIRBuilder FIRSupport FortranEvaluate HLFIRDialect + + MLIR_LIBS ${dialect_libs} ${extension_libs} ) diff --git a/flang/lib/Optimizer/Builder/FIRBuilder.cpp b/flang/lib/Optimizer/Builder/FIRBuilder.cpp index 218f98ef9ef42..ad1244ef99b41 100644 --- a/flang/lib/Optimizer/Builder/FIRBuilder.cpp +++ b/flang/lib/Optimizer/Builder/FIRBuilder.cpp @@ -105,17 +105,17 @@ mlir::Type fir::FirOpBuilder::getVarLenSeqTy(mlir::Type eleTy, unsigned rank) { mlir::Type fir::FirOpBuilder::getRealType(int kind) { switch (kindMap.getRealTypeID(kind)) { case llvm::Type::TypeID::HalfTyID: - return mlir::FloatType::getF16(getContext()); + return mlir::Float16Type::get(getContext()); case llvm::Type::TypeID::BFloatTyID: - return mlir::FloatType::getBF16(getContext()); + return mlir::BFloat16Type::get(getContext()); case llvm::Type::TypeID::FloatTyID: - return mlir::FloatType::getF32(getContext()); + return mlir::Float32Type::get(getContext()); case llvm::Type::TypeID::DoubleTyID: - return mlir::FloatType::getF64(getContext()); + return mlir::Float64Type::get(getContext()); case llvm::Type::TypeID::X86_FP80TyID: - return mlir::FloatType::getF80(getContext()); + return mlir::Float80Type::get(getContext()); case llvm::Type::TypeID::FP128TyID: - return mlir::FloatType::getF128(getContext()); + return mlir::Float128Type::get(getContext()); default: fir::emitFatalError(mlir::UnknownLoc::get(getContext()), "unsupported type !fir.real"); diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp index f6f2e15e469e6..6a343645ab878 100644 --- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp @@ -2367,7 +2367,7 @@ mlir::Value IntrinsicLibrary::genAcosd(mlir::Type resultType, mlir::FunctionType::get(context, {resultType}, {args[0].getType()}); llvm::APFloat pi = llvm::APFloat(llvm::numbers::pi); mlir::Value dfactor = builder.createRealConstant( - loc, mlir::FloatType::getF64(context), pi / llvm::APFloat(180.0)); + loc, mlir::Float64Type::get(context), pi / llvm::APFloat(180.0)); mlir::Value factor = builder.createConvert(loc, args[0].getType(), dfactor); mlir::Value arg = builder.create(loc, args[0], factor); return getRuntimeCallGenerator("acos", ftype)(builder, loc, {arg}); @@ -2518,7 +2518,7 @@ mlir::Value IntrinsicLibrary::genAsind(mlir::Type resultType, mlir::FunctionType::get(context, {resultType}, {args[0].getType()}); llvm::APFloat pi = llvm::APFloat(llvm::numbers::pi); mlir::Value dfactor = builder.createRealConstant( - loc, mlir::FloatType::getF64(context), pi / llvm::APFloat(180.0)); + loc, mlir::Float64Type::get(context), pi / llvm::APFloat(180.0)); mlir::Value factor = builder.createConvert(loc, args[0].getType(), dfactor); mlir::Value arg = builder.create(loc, args[0], factor); return getRuntimeCallGenerator("asin", ftype)(builder, loc, {arg}); @@ -2544,7 +2544,7 @@ mlir::Value IntrinsicLibrary::genAtand(mlir::Type resultType, } llvm::APFloat pi = llvm::APFloat(llvm::numbers::pi); mlir::Value dfactor = builder.createRealConstant( - loc, mlir::FloatType::getF64(context), llvm::APFloat(180.0) / pi); + loc, mlir::Float64Type::get(context), llvm::APFloat(180.0) / pi); mlir::Value factor = builder.createConvert(loc, resultType, dfactor); return builder.create(loc, atan, factor); } @@ -2569,7 +2569,7 @@ mlir::Value IntrinsicLibrary::genAtanpi(mlir::Type resultType, } llvm::APFloat inv_pi = llvm::APFloat(llvm::numbers::inv_pi); mlir::Value dfactor = - builder.createRealConstant(loc, mlir::FloatType::getF64(context), inv_pi); + builder.createRealConstant(loc, mlir::Float64Type::get(context), inv_pi); mlir::Value factor = builder.createConvert(loc, resultType, dfactor); return builder.create(loc, atan, factor); } @@ -3124,7 +3124,7 @@ mlir::Value IntrinsicLibrary::genCosd(mlir::Type resultType, mlir::FunctionType::get(context, {resultType}, {args[0].getType()}); llvm::APFloat pi = llvm::APFloat(llvm::numbers::pi); mlir::Value dfactor = builder.createRealConstant( - loc, mlir::FloatType::getF64(context), pi / llvm::APFloat(180.0)); + loc, mlir::Float64Type::get(context), pi / llvm::APFloat(180.0)); mlir::Value factor = builder.createConvert(loc, args[0].getType(), dfactor); mlir::Value arg = builder.create(loc, args[0], factor); return getRuntimeCallGenerator("cos", ftype)(builder, loc, {arg}); @@ -4418,12 +4418,12 @@ IntrinsicLibrary::genIeeeCopySign(mlir::Type resultType, mlir::FloatType yRealType = mlir::dyn_cast(yRealVal.getType()); - if (yRealType == mlir::FloatType::getBF16(builder.getContext())) { + if (yRealType == mlir::BFloat16Type::get(builder.getContext())) { // Workaround: CopySignOp and BitcastOp don't work for kind 3 arg Y. // This conversion should always preserve the sign bit. yRealVal = builder.createConvert( - loc, mlir::FloatType::getF32(builder.getContext()), yRealVal); - yRealType = mlir::FloatType::getF32(builder.getContext()); + loc, mlir::Float32Type::get(builder.getContext()), yRealVal); + yRealType = mlir::Float32Type::get(builder.getContext()); } // Args have the same type. @@ -4979,7 +4979,7 @@ mlir::Value IntrinsicLibrary::genIeeeReal(mlir::Type resultType, assert(args.size() == 2); mlir::Type i1Ty = builder.getI1Type(); - mlir::Type f32Ty = mlir::FloatType::getF32(builder.getContext()); + mlir::Type f32Ty = mlir::Float32Type::get(builder.getContext()); mlir::Value a = args[0]; mlir::Type aType = a.getType(); @@ -5179,7 +5179,7 @@ mlir::Value IntrinsicLibrary::genIeeeRem(mlir::Type resultType, mlir::Value x = args[0]; mlir::Value y = args[1]; if (mlir::dyn_cast(resultType).getWidth() < 32) { - mlir::Type f32Ty = mlir::FloatType::getF32(builder.getContext()); + mlir::Type f32Ty = mlir::Float32Type::get(builder.getContext()); x = builder.create(loc, f32Ty, x); y = builder.create(loc, f32Ty, y); } else { @@ -5213,7 +5213,7 @@ mlir::Value IntrinsicLibrary::genIeeeRint(mlir::Type resultType, } if (mlir::cast(resultType).getWidth() == 16) a = builder.create( - loc, mlir::FloatType::getF32(builder.getContext()), a); + loc, mlir::Float32Type::get(builder.getContext()), a); mlir::Value result = builder.create( loc, resultType, genRuntimeCall("nearbyint", a.getType(), a)); if (isStaticallyPresent(args[1])) { @@ -5298,10 +5298,10 @@ mlir::Value IntrinsicLibrary::genIeeeSignbit(mlir::Type resultType, mlir::Value realVal = args[0]; mlir::FloatType realType = mlir::dyn_cast(realVal.getType()); int bitWidth = realType.getWidth(); - if (realType == mlir::FloatType::getBF16(builder.getContext())) { + if (realType == mlir::BFloat16Type::get(builder.getContext())) { // Workaround: can't bitcast or convert real(3) to integer(2) or real(2). realVal = builder.createConvert( - loc, mlir::FloatType::getF32(builder.getContext()), realVal); + loc, mlir::Float32Type::get(builder.getContext()), realVal); bitWidth = 32; } mlir::Type intType = builder.getIntegerType(bitWidth); @@ -6065,7 +6065,7 @@ mlir::Value IntrinsicLibrary::genModulo(mlir::Type resultType, auto fastMathFlags = builder.getFastMathFlags(); // F128 arith::RemFOp may be lowered to a runtime call that may be unsupported // on the target, so generate a call to Fortran Runtime's ModuloReal16. - if (resultType == mlir::FloatType::getF128(builder.getContext()) || + if (resultType == mlir::Float128Type::get(builder.getContext()) || (fastMathFlags & mlir::arith::FastMathFlags::ninf) == mlir::arith::FastMathFlags::none) return builder.createConvert( @@ -6254,7 +6254,7 @@ mlir::Value IntrinsicLibrary::genNearest(mlir::Type resultType, mlir::FloatType yType = mlir::dyn_cast(args[1].getType()); const unsigned yBitWidth = yType.getWidth(); if (xType != yType) { - mlir::Type f32Ty = mlir::FloatType::getF32(builder.getContext()); + mlir::Type f32Ty = mlir::Float32Type::get(builder.getContext()); if (xBitWidth < 32) x1 = builder.createConvert(loc, f32Ty, x1); if (yBitWidth > 32 && yBitWidth > xBitWidth) @@ -7205,7 +7205,7 @@ mlir::Value IntrinsicLibrary::genSind(mlir::Type resultType, mlir::FunctionType::get(context, {resultType}, {args[0].getType()}); llvm::APFloat pi = llvm::APFloat(llvm::numbers::pi); mlir::Value dfactor = builder.createRealConstant( - loc, mlir::FloatType::getF64(context), pi / llvm::APFloat(180.0)); + loc, mlir::Float64Type::get(context), pi / llvm::APFloat(180.0)); mlir::Value factor = builder.createConvert(loc, args[0].getType(), dfactor); mlir::Value arg = builder.create(loc, args[0], factor); return getRuntimeCallGenerator("sin", ftype)(builder, loc, {arg}); @@ -7286,7 +7286,7 @@ mlir::Value IntrinsicLibrary::genTand(mlir::Type resultType, mlir::FunctionType::get(context, {resultType}, {args[0].getType()}); llvm::APFloat pi = llvm::APFloat(llvm::numbers::pi); mlir::Value dfactor = builder.createRealConstant( - loc, mlir::FloatType::getF64(context), pi / llvm::APFloat(180.0)); + loc, mlir::Float64Type::get(context), pi / llvm::APFloat(180.0)); mlir::Value factor = builder.createConvert(loc, args[0].getType(), dfactor); mlir::Value arg = builder.create(loc, args[0], factor); return getRuntimeCallGenerator("tan", ftype)(builder, loc, {arg}); diff --git a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp index b3b07d18a956b..fcc91752552c3 100644 --- a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp @@ -1579,7 +1579,7 @@ PPCIntrinsicLibrary::genVecConvert(mlir::Type resultType, return callOp.getResult(0); } else if (width == 64) { - auto fTy{mlir::FloatType::getF64(context)}; + auto fTy{mlir::Float64Type::get(context)}; auto ty{mlir::VectorType::get(2, fTy)}; // vec_vtf(arg1, arg2) = fmul(1.0 / (1 << arg2), llvm.sitofp(arg1)) @@ -1639,7 +1639,7 @@ PPCIntrinsicLibrary::genVecConvert(mlir::Type resultType, newArgs[0] = builder.create(loc, funcOp, newArgs).getResult(0); auto fvf32Ty{newArgs[0].getType()}; - auto f32type{mlir::FloatType::getF32(context)}; + auto f32type{mlir::Float32Type::get(context)}; auto mvf32Ty{mlir::VectorType::get(4, f32type)}; newArgs[0] = builder.createConvert(loc, mvf32Ty, newArgs[0]); @@ -1949,7 +1949,7 @@ PPCIntrinsicLibrary::genVecLdCallGrp(mlir::Type resultType, fname = isBEVecElemOrderOnLE() ? "llvm.ppc.vsx.lxvd2x.be" : "llvm.ppc.vsx.lxvd2x"; // llvm.ppc.altivec.lxvd2x* returns <2 x double> - intrinResTy = mlir::VectorType::get(2, mlir::FloatType::getF64(context)); + intrinResTy = mlir::VectorType::get(2, mlir::Float64Type::get(context)); } break; case VecOp::Xlw4: fname = isBEVecElemOrderOnLE() ? "llvm.ppc.vsx.lxvw4x.be" @@ -2092,7 +2092,7 @@ PPCIntrinsicLibrary::genVecPerm(mlir::Type resultType, auto mlirTy{vecTyInfo.toMlirVectorType(context)}; auto vi32Ty{mlir::VectorType::get(4, mlir::IntegerType::get(context, 32))}; - auto vf64Ty{mlir::VectorType::get(2, mlir::FloatType::getF64(context))}; + auto vf64Ty{mlir::VectorType::get(2, mlir::Float64Type::get(context))}; auto mArg0{builder.createConvert(loc, mlirTy, argBases[0])}; auto mArg1{builder.createConvert(loc, mlirTy, argBases[1])}; diff --git a/flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp b/flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp index e01a6f05b5fdd..f4d03c95ae518 100644 --- a/flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp +++ b/flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp @@ -40,7 +40,7 @@ void fir::runtime::genLbound(fir::FirOpBuilder &builder, mlir::Location loc, fir::factory::locationToLineNo(builder, loc, fTy.getInput(4)); auto args = fir::runtime::createArguments( builder, loc, fTy, resultAddr, array, kind, sourceFile, sourceLine); - builder.create(loc, func, args).getResult(0); + builder.create(loc, func, args); } /// Generate call to `Ubound` runtime routine. Calls to UBOUND with a DIM @@ -57,7 +57,7 @@ void fir::runtime::genUbound(fir::FirOpBuilder &builder, mlir::Location loc, fir::factory::locationToLineNo(builder, loc, fTy.getInput(2)); auto args = fir::runtime::createArguments(builder, loc, fTy, resultBox, array, kind, sourceFile, sourceLine); - builder.create(loc, uboundFunc, args).getResult(0); + builder.create(loc, uboundFunc, args); } /// Generate call to `Size` runtime routine. This routine is a version when @@ -113,5 +113,5 @@ void fir::runtime::genShape(fir::FirOpBuilder &builder, mlir::Location loc, fir::factory::locationToLineNo(builder, loc, fTy.getInput(4)); auto args = fir::runtime::createArguments( builder, loc, fTy, resultAddr, array, kind, sourceFile, sourceLine); - builder.create(loc, func, args).getResult(0); + builder.create(loc, func, args); } diff --git a/flang/lib/Optimizer/Builder/Runtime/Intrinsics.cpp b/flang/lib/Optimizer/Builder/Runtime/Intrinsics.cpp index ded9579f2c1df..963051ccdc379 100644 --- a/flang/lib/Optimizer/Builder/Runtime/Intrinsics.cpp +++ b/flang/lib/Optimizer/Builder/Runtime/Intrinsics.cpp @@ -38,8 +38,7 @@ struct ForcedRandomNumberReal16 { auto strTy = fir::runtime::getModel()(ctx); auto intTy = fir::runtime::getModel()(ctx); ; - return mlir::FunctionType::get(ctx, {boxTy, strTy, intTy}, - mlir::NoneType::get(ctx)); + return mlir::FunctionType::get(ctx, {boxTy, strTy, intTy}, {}); }; } }; diff --git a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp index d0092add0118f..4ff7c86bb0a24 100644 --- a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp +++ b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp @@ -27,7 +27,7 @@ struct ForcedErfcScaled10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ErfcScaled10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); return mlir::FunctionType::get(ctx, {ty}, {ty}); }; } @@ -38,7 +38,7 @@ struct ForcedErfcScaled16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ErfcScaled16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); return mlir::FunctionType::get(ctx, {ty}, {ty}); }; } @@ -49,7 +49,7 @@ struct ForcedExponent10_4 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Exponent10_4)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF80(ctx); + auto fltTy = mlir::Float80Type::get(ctx); auto intTy = mlir::IntegerType::get(ctx, 32); return mlir::FunctionType::get(ctx, fltTy, intTy); }; @@ -60,7 +60,7 @@ struct ForcedExponent10_8 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Exponent10_8)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF80(ctx); + auto fltTy = mlir::Float80Type::get(ctx); auto intTy = mlir::IntegerType::get(ctx, 64); return mlir::FunctionType::get(ctx, fltTy, intTy); }; @@ -72,7 +72,7 @@ struct ForcedExponent16_4 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Exponent16_4)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF128(ctx); + auto fltTy = mlir::Float128Type::get(ctx); auto intTy = mlir::IntegerType::get(ctx, 32); return mlir::FunctionType::get(ctx, fltTy, intTy); }; @@ -83,7 +83,7 @@ struct ForcedExponent16_8 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Exponent16_8)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF128(ctx); + auto fltTy = mlir::Float128Type::get(ctx); auto intTy = mlir::IntegerType::get(ctx, 64); return mlir::FunctionType::get(ctx, fltTy, intTy); }; @@ -95,7 +95,7 @@ struct ForcedFraction10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Fraction10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); return mlir::FunctionType::get(ctx, {ty}, {ty}); }; } @@ -106,7 +106,7 @@ struct ForcedFraction16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Fraction16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); return mlir::FunctionType::get(ctx, {ty}, {ty}); }; } @@ -117,7 +117,7 @@ struct ForcedMod10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ModReal10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF80(ctx); + auto fltTy = mlir::Float80Type::get(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 8 * sizeof(int)); return mlir::FunctionType::get(ctx, {fltTy, fltTy, strTy, intTy}, @@ -131,7 +131,7 @@ struct ForcedMod16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ModReal16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF128(ctx); + auto fltTy = mlir::Float128Type::get(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 8 * sizeof(int)); return mlir::FunctionType::get(ctx, {fltTy, fltTy, strTy, intTy}, @@ -145,7 +145,7 @@ struct ForcedModulo10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ModuloReal10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF80(ctx); + auto fltTy = mlir::Float80Type::get(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 8 * sizeof(int)); return mlir::FunctionType::get(ctx, {fltTy, fltTy, strTy, intTy}, @@ -159,7 +159,7 @@ struct ForcedModulo16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ModuloReal16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF128(ctx); + auto fltTy = mlir::Float128Type::get(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 8 * sizeof(int)); return mlir::FunctionType::get(ctx, {fltTy, fltTy, strTy, intTy}, @@ -173,7 +173,7 @@ struct ForcedNearest10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Nearest10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF80(ctx); + auto fltTy = mlir::Float80Type::get(ctx); auto boolTy = mlir::IntegerType::get(ctx, 1); return mlir::FunctionType::get(ctx, {fltTy, boolTy}, {fltTy}); }; @@ -185,7 +185,7 @@ struct ForcedNearest16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Nearest16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF128(ctx); + auto fltTy = mlir::Float128Type::get(ctx); auto boolTy = mlir::IntegerType::get(ctx, 1); return mlir::FunctionType::get(ctx, {fltTy, boolTy}, {fltTy}); }; @@ -197,7 +197,7 @@ struct ForcedRRSpacing10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(RRSpacing10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); return mlir::FunctionType::get(ctx, {ty}, {ty}); }; } @@ -208,7 +208,7 @@ struct ForcedRRSpacing16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(RRSpacing16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); return mlir::FunctionType::get(ctx, {ty}, {ty}); }; } @@ -219,7 +219,7 @@ struct ForcedScale10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Scale10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF80(ctx); + auto fltTy = mlir::Float80Type::get(ctx); auto intTy = mlir::IntegerType::get(ctx, 64); return mlir::FunctionType::get(ctx, {fltTy, intTy}, {fltTy}); }; @@ -231,7 +231,7 @@ struct ForcedScale16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Scale16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF128(ctx); + auto fltTy = mlir::Float128Type::get(ctx); auto intTy = mlir::IntegerType::get(ctx, 64); return mlir::FunctionType::get(ctx, {fltTy, intTy}, {fltTy}); }; @@ -243,7 +243,7 @@ struct ForcedSetExponent10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(SetExponent10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF80(ctx); + auto fltTy = mlir::Float80Type::get(ctx); auto intTy = mlir::IntegerType::get(ctx, 64); return mlir::FunctionType::get(ctx, {fltTy, intTy}, {fltTy}); }; @@ -255,7 +255,7 @@ struct ForcedSetExponent16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(SetExponent16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto fltTy = mlir::FloatType::getF128(ctx); + auto fltTy = mlir::Float128Type::get(ctx); auto intTy = mlir::IntegerType::get(ctx, 64); return mlir::FunctionType::get(ctx, {fltTy, intTy}, {fltTy}); }; @@ -267,7 +267,7 @@ struct ForcedSpacing10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Spacing10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); return mlir::FunctionType::get(ctx, {ty}, {ty}); }; } @@ -278,7 +278,7 @@ struct ForcedSpacing16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Spacing16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); return mlir::FunctionType::get(ctx, {ty}, {ty}); }; } diff --git a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp index 1aa941bd2131c..f778b963c59ca 100644 --- a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp +++ b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp @@ -27,7 +27,7 @@ struct ForcedMaxvalReal10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(MaxvalReal10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -43,7 +43,7 @@ struct ForcedMaxvalReal16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(MaxvalReal16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -94,7 +94,7 @@ struct ForcedMinvalReal10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(MinvalReal10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -110,7 +110,7 @@ struct ForcedMinvalReal16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(MinvalReal16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -173,7 +173,7 @@ struct ForcedNorm2Real10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Norm2_10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -188,7 +188,7 @@ struct ForcedNorm2Real16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Norm2_16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -209,7 +209,7 @@ struct ForcedNorm2DimReal16 { auto intTy = mlir::IntegerType::get(ctx, 8 * sizeof(int)); return mlir::FunctionType::get( ctx, {fir::ReferenceType::get(boxTy), boxTy, intTy, strTy, intTy}, - mlir::NoneType::get(ctx)); + {}); }; } }; @@ -219,7 +219,7 @@ struct ForcedProductReal10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ProductReal10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -235,7 +235,7 @@ struct ForcedProductReal16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ProductReal16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -287,7 +287,7 @@ struct ForcedProductComplex10 { ExpandAndQuoteKey(RTNAME(CppProductComplex10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF80(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float80Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -305,7 +305,7 @@ struct ForcedProductComplex16 { ExpandAndQuoteKey(RTNAME(CppProductComplex16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF128(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float128Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -323,7 +323,7 @@ struct ForcedDotProductReal10 { ExpandAndQuoteKey(RTNAME(DotProductReal10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -339,7 +339,7 @@ struct ForcedDotProductReal16 { ExpandAndQuoteKey(RTNAME(DotProductReal16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -355,7 +355,7 @@ struct ForcedDotProductComplex10 { ExpandAndQuoteKey(RTNAME(CppDotProductComplex10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF80(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float80Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -373,7 +373,7 @@ struct ForcedDotProductComplex16 { ExpandAndQuoteKey(RTNAME(CppDotProductComplex16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF128(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float128Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -423,7 +423,7 @@ struct ForcedSumReal10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(SumReal10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -439,7 +439,7 @@ struct ForcedSumReal16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(SumReal16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -489,7 +489,7 @@ struct ForcedSumComplex10 { ExpandAndQuoteKey(RTNAME(CppSumComplex10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF80(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float80Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -507,7 +507,7 @@ struct ForcedSumComplex16 { ExpandAndQuoteKey(RTNAME(CppSumComplex16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF128(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float128Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); @@ -573,7 +573,7 @@ struct ForcedReduceReal10Ref { ExpandAndQuoteKey(RTNAME(ReduceReal10Ref)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -593,7 +593,7 @@ struct ForcedReduceReal10Value { ExpandAndQuoteKey(RTNAME(ReduceReal10Value)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -613,7 +613,7 @@ struct ForcedReduceReal16Ref { ExpandAndQuoteKey(RTNAME(ReduceReal16Ref)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -633,7 +633,7 @@ struct ForcedReduceReal16Value { ExpandAndQuoteKey(RTNAME(ReduceReal16Value)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -653,7 +653,7 @@ struct ForcedReduceReal10DimRef { ExpandAndQuoteKey(RTNAME(ReduceReal10DimRef)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -675,7 +675,7 @@ struct ForcedReduceReal10DimValue { ExpandAndQuoteKey(RTNAME(ReduceReal10DimValue)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -697,7 +697,7 @@ struct ForcedReduceReal16DimRef { ExpandAndQuoteKey(RTNAME(ReduceReal16DimRef)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -719,7 +719,7 @@ struct ForcedReduceReal16DimValue { ExpandAndQuoteKey(RTNAME(ReduceReal16DimValue)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -911,7 +911,7 @@ struct ForcedReduceComplex10Ref { ExpandAndQuoteKey(RTNAME(CppReduceComplex10Ref)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF80(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float80Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -932,7 +932,7 @@ struct ForcedReduceComplex10Value { ExpandAndQuoteKey(RTNAME(CppReduceComplex10Value)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF80(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float80Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -953,7 +953,7 @@ struct ForcedReduceComplex10DimRef { ExpandAndQuoteKey(RTNAME(CppReduceComplex10DimRef)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF80(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float80Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -975,7 +975,7 @@ struct ForcedReduceComplex10DimValue { ExpandAndQuoteKey(RTNAME(CppReduceComplex10DimValue)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF80(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float80Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -997,7 +997,7 @@ struct ForcedReduceComplex16Ref { ExpandAndQuoteKey(RTNAME(CppReduceComplex16Ref)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF128(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float128Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -1018,7 +1018,7 @@ struct ForcedReduceComplex16Value { ExpandAndQuoteKey(RTNAME(CppReduceComplex16Value)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF128(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float128Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -1039,7 +1039,7 @@ struct ForcedReduceComplex16DimRef { ExpandAndQuoteKey(RTNAME(CppReduceComplex16DimRef)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF128(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float128Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); @@ -1061,7 +1061,7 @@ struct ForcedReduceComplex16DimValue { ExpandAndQuoteKey(RTNAME(CppReduceComplex16DimValue)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::ComplexType::get(mlir::FloatType::getF128(ctx)); + auto ty = mlir::ComplexType::get(mlir::Float128Type::get(ctx)); auto boxTy = fir::runtime::getModel()(ctx); auto refTy = fir::ReferenceType::get(ty); diff --git a/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp b/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp index 517ba3799798f..978524494af9b 100644 --- a/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp +++ b/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp @@ -25,14 +25,13 @@ struct ForcedBesselJn_10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(BesselJn_10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 32); - auto noneTy = mlir::NoneType::get(ctx); return mlir::FunctionType::get( - ctx, {boxTy, intTy, intTy, ty, ty, ty, strTy, intTy}, {noneTy}); + ctx, {boxTy, intTy, intTy, ty, ty, ty, strTy, intTy}, {}); }; } }; @@ -42,14 +41,13 @@ struct ForcedBesselJn_16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(BesselJn_16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 32); - auto noneTy = mlir::NoneType::get(ctx); return mlir::FunctionType::get( - ctx, {boxTy, intTy, intTy, ty, ty, ty, strTy, intTy}, {noneTy}); + ctx, {boxTy, intTy, intTy, ty, ty, ty, strTy, intTy}, {}); }; } }; @@ -63,9 +61,8 @@ struct ForcedBesselJnX0_10 { fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 32); - auto noneTy = mlir::NoneType::get(ctx); return mlir::FunctionType::get(ctx, {boxTy, intTy, intTy, strTy, intTy}, - {noneTy}); + {}); }; } }; @@ -79,9 +76,8 @@ struct ForcedBesselJnX0_16 { fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 32); - auto noneTy = mlir::NoneType::get(ctx); return mlir::FunctionType::get(ctx, {boxTy, intTy, intTy, strTy, intTy}, - {noneTy}); + {}); }; } }; @@ -91,14 +87,13 @@ struct ForcedBesselYn_10 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(BesselYn_10)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF80(ctx); + auto ty = mlir::Float80Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 32); - auto noneTy = mlir::NoneType::get(ctx); return mlir::FunctionType::get( - ctx, {boxTy, intTy, intTy, ty, ty, ty, strTy, intTy}, {noneTy}); + ctx, {boxTy, intTy, intTy, ty, ty, ty, strTy, intTy}, {}); }; } }; @@ -108,14 +103,13 @@ struct ForcedBesselYn_16 { static constexpr const char *name = ExpandAndQuoteKey(RTNAME(BesselYn_16)); static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() { return [](mlir::MLIRContext *ctx) { - auto ty = mlir::FloatType::getF128(ctx); + auto ty = mlir::Float128Type::get(ctx); auto boxTy = fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 32); - auto noneTy = mlir::NoneType::get(ctx); return mlir::FunctionType::get( - ctx, {boxTy, intTy, intTy, ty, ty, ty, strTy, intTy}, {noneTy}); + ctx, {boxTy, intTy, intTy, ty, ty, ty, strTy, intTy}, {}); }; } }; @@ -129,9 +123,8 @@ struct ForcedBesselYnX0_10 { fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 32); - auto noneTy = mlir::NoneType::get(ctx); return mlir::FunctionType::get(ctx, {boxTy, intTy, intTy, strTy, intTy}, - {noneTy}); + {}); }; } }; @@ -145,9 +138,8 @@ struct ForcedBesselYnX0_16 { fir::runtime::getModel()(ctx); auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8)); auto intTy = mlir::IntegerType::get(ctx, 32); - auto noneTy = mlir::NoneType::get(ctx); return mlir::FunctionType::get(ctx, {boxTy, intTy, intTy, strTy, intTy}, - {noneTy}); + {}); }; } }; @@ -339,9 +331,8 @@ struct ForcedMatmulTypeModel { fir::runtime::getModel()(ctx); auto strTy = fir::runtime::getModel()(ctx); auto intTy = fir::runtime::getModel()(ctx); - auto voidTy = fir::runtime::getModel()(ctx); return mlir::FunctionType::get( - ctx, {boxRefTy, boxTy, boxTy, strTy, intTy}, {voidTy}); + ctx, {boxRefTy, boxTy, boxTy, strTy, intTy}, {}); }; } }; diff --git a/flang/lib/Optimizer/CodeGen/CMakeLists.txt b/flang/lib/Optimizer/CodeGen/CMakeLists.txt index f47d11875f04d..81c8a68b95367 100644 --- a/flang/lib/Optimizer/CodeGen/CMakeLists.txt +++ b/flang/lib/Optimizer/CodeGen/CMakeLists.txt @@ -21,6 +21,14 @@ add_flang_library(FIRCodeGen FIRDialect FIRDialectSupport FIRSupport + + LINK_COMPONENTS + AsmParser + AsmPrinter + Remarks + TargetParser + + MLIR_LIBS MLIRComplexToLLVM MLIRComplexToStandard MLIRGPUDialect @@ -34,10 +42,4 @@ add_flang_library(FIRCodeGen MLIRLLVMToLLVMIRTranslation MLIRTargetLLVMIRExport MLIRVectorToLLVM - - LINK_COMPONENTS - AsmParser - AsmPrinter - Remarks - TargetParser ) diff --git a/flang/lib/Optimizer/CodeGen/Target.cpp b/flang/lib/Optimizer/CodeGen/Target.cpp index c332493eb8072..1bc673bb34e32 100644 --- a/flang/lib/Optimizer/CodeGen/Target.cpp +++ b/flang/lib/Optimizer/CodeGen/Target.cpp @@ -572,12 +572,12 @@ struct TargetX86_64 : public GenericTarget { // select an fp type of the right size, and it makes things simpler // here. if (partByteSize > 8) - return mlir::FloatType::getF128(context); + return mlir::Float128Type::get(context); if (partByteSize > 4) - return mlir::FloatType::getF64(context); + return mlir::Float64Type::get(context); if (partByteSize > 2) - return mlir::FloatType::getF32(context); - return mlir::FloatType::getF16(context); + return mlir::Float32Type::get(context); + return mlir::Float16Type::get(context); } assert(partByteSize <= 8 && "expect integer part of aggregate argument to fit into eight bytes"); diff --git a/flang/lib/Optimizer/Dialect/CMakeLists.txt b/flang/lib/Optimizer/Dialect/CMakeLists.txt index 08caa15700d4c..d39dca8ed0000 100644 --- a/flang/lib/Optimizer/Dialect/CMakeLists.txt +++ b/flang/lib/Optimizer/Dialect/CMakeLists.txt @@ -20,14 +20,16 @@ add_flang_library(FIRDialect LINK_LIBS CUFAttrs FIRDialectSupport - MLIRArithDialect - MLIRBuiltinToLLVMIRTranslation - MLIROpenMPToLLVM - MLIRLLVMToLLVMIRTranslation - MLIRTargetLLVMIRExport LINK_COMPONENTS AsmParser AsmPrinter Remarks + + MLIR_LIBS + MLIRArithDialect + MLIRBuiltinToLLVMIRTranslation + MLIROpenMPToLLVM + MLIRLLVMToLLVMIRTranslation + MLIRTargetLLVMIRExport ) diff --git a/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt b/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt index ec5484c1d6108..a0f58504eff05 100644 --- a/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt +++ b/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt @@ -7,11 +7,11 @@ add_flang_library(CUFAttrs CUFAttrsIncGen CUFOpsIncGen - LINK_LIBS - MLIRTargetLLVMIRExport - LINK_COMPONENTS AsmParser AsmPrinter Remarks + + MLIR_LIBS + MLIRTargetLLVMIRExport ) diff --git a/flang/lib/Optimizer/Dialect/CUF/CMakeLists.txt b/flang/lib/Optimizer/Dialect/CUF/CMakeLists.txt index 5d4bd0785971f..e483b4a164113 100644 --- a/flang/lib/Optimizer/Dialect/CUF/CMakeLists.txt +++ b/flang/lib/Optimizer/Dialect/CUF/CMakeLists.txt @@ -14,12 +14,14 @@ add_flang_library(CUFDialect CUFAttrs FIRDialect FIRDialectSupport - MLIRIR - MLIRGPUDialect - MLIRTargetLLVMIRExport LINK_COMPONENTS AsmParser AsmPrinter Remarks + + MLIR_LIBS + MLIRIR + MLIRGPUDialect + MLIRTargetLLVMIRExport ) diff --git a/flang/lib/Optimizer/Dialect/FIRType.cpp b/flang/lib/Optimizer/Dialect/FIRType.cpp index d25e5651f1142..d8ce231d1b5a7 100644 --- a/flang/lib/Optimizer/Dialect/FIRType.cpp +++ b/flang/lib/Optimizer/Dialect/FIRType.cpp @@ -1249,17 +1249,17 @@ mlir::Type fir::fromRealTypeID(mlir::MLIRContext *context, llvm::Type::TypeID typeID, fir::KindTy kind) { switch (typeID) { case llvm::Type::TypeID::HalfTyID: - return mlir::FloatType::getF16(context); + return mlir::Float16Type::get(context); case llvm::Type::TypeID::BFloatTyID: - return mlir::FloatType::getBF16(context); + return mlir::BFloat16Type::get(context); case llvm::Type::TypeID::FloatTyID: - return mlir::FloatType::getF32(context); + return mlir::Float32Type::get(context); case llvm::Type::TypeID::DoubleTyID: - return mlir::FloatType::getF64(context); + return mlir::Float64Type::get(context); case llvm::Type::TypeID::X86_FP80TyID: - return mlir::FloatType::getF80(context); + return mlir::Float80Type::get(context); case llvm::Type::TypeID::FP128TyID: - return mlir::FloatType::getF128(context); + return mlir::Float128Type::get(context); default: mlir::emitError(mlir::UnknownLoc::get(context)) << "unsupported type: !fir.real<" << kind << ">"; diff --git a/flang/lib/Optimizer/Dialect/Support/CMakeLists.txt b/flang/lib/Optimizer/Dialect/Support/CMakeLists.txt index c37b0549f7fc1..bfdd5279b6f29 100644 --- a/flang/lib/Optimizer/Dialect/Support/CMakeLists.txt +++ b/flang/lib/Optimizer/Dialect/Support/CMakeLists.txt @@ -8,6 +8,6 @@ add_flang_library(FIRDialectSupport MLIRIR intrinsics_gen - LINK_LIBS + MLIR_LIBS ${dialect_libs} ) diff --git a/flang/lib/Optimizer/HLFIR/IR/CMakeLists.txt b/flang/lib/Optimizer/HLFIR/IR/CMakeLists.txt index 267d6469ee7ab..8a646bedf94b8 100644 --- a/flang/lib/Optimizer/HLFIR/IR/CMakeLists.txt +++ b/flang/lib/Optimizer/HLFIR/IR/CMakeLists.txt @@ -13,11 +13,13 @@ add_flang_library(HLFIRDialect LINK_LIBS CUFAttrs FIRDialect - MLIRIR - ${dialect_libs} LINK_COMPONENTS AsmParser AsmPrinter Remarks + + MLIR_LIBS + MLIRIR + ${dialect_libs} ) diff --git a/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt b/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt index 25a532204dd05..09286aced6089 100644 --- a/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt +++ b/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt @@ -27,11 +27,13 @@ add_flang_library(HLFIRTransforms FIRTransforms FlangOpenMPTransforms HLFIRDialect - MLIRIR - ${dialect_libs} LINK_COMPONENTS AsmParser AsmPrinter Remarks + + MLIR_LIBS + MLIRIR + ${dialect_libs} ) diff --git a/flang/lib/Optimizer/OpenACC/CMakeLists.txt b/flang/lib/Optimizer/OpenACC/CMakeLists.txt index ed673121353c1..04d351ac265d6 100644 --- a/flang/lib/Optimizer/OpenACC/CMakeLists.txt +++ b/flang/lib/Optimizer/OpenACC/CMakeLists.txt @@ -18,5 +18,7 @@ add_flang_library(FIROpenACCSupport FIRDialectSupport FIRSupport HLFIRDialect + + MLIR_LIBS MLIROpenACCDialect ) diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt index 026889cca238a..9fe2d3947c26d 100644 --- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt +++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt @@ -23,9 +23,11 @@ add_flang_library(FlangOpenMPTransforms FIRSupport FortranCommon FortranEvaluate + HLFIRDialect + + MLIR_LIBS MLIRFuncDialect MLIROpenMPDialect - HLFIRDialect MLIRIR MLIRPass MLIRTransformUtils diff --git a/flang/lib/Optimizer/Passes/CMakeLists.txt b/flang/lib/Optimizer/Passes/CMakeLists.txt index 40abbdfbdd651..eb25beba309bf 100644 --- a/flang/lib/Optimizer/Passes/CMakeLists.txt +++ b/flang/lib/Optimizer/Passes/CMakeLists.txt @@ -12,16 +12,18 @@ add_flang_library(flangPasses FIRCodeGen FIRTransforms FlangOpenMPTransforms - ${dialect_libs} - ${extension_libs} FortranCommon HLFIRTransforms + + LINK_COMPONENTS + Passes + + MLIR_LIBS + ${dialect_libs} + ${extension_libs} MLIRPass MLIRReconcileUnrealizedCasts MLIRSCFToControlFlow MLIRSupport MLIRTransforms - - LINK_COMPONENTS - Passes ) diff --git a/flang/lib/Optimizer/Support/CMakeLists.txt b/flang/lib/Optimizer/Support/CMakeLists.txt index 8794c24712417..f8e4fc5bcefea 100644 --- a/flang/lib/Optimizer/Support/CMakeLists.txt +++ b/flang/lib/Optimizer/Support/CMakeLists.txt @@ -16,6 +16,11 @@ add_flang_library(FIRSupport LINK_LIBS FIRDialect + + LINK_COMPONENTS + TargetParser + + MLIR_LIBS ${dialect_libs} ${extension_libs} MLIRBuiltinToLLVMIRTranslation @@ -24,7 +29,4 @@ add_flang_library(FIRSupport MLIRLLVMToLLVMIRTranslation MLIRTargetLLVMIRExport MLIRTargetLLVMIRImport - - LINK_COMPONENTS - TargetParser ) diff --git a/flang/lib/Optimizer/Transforms/CMakeLists.txt b/flang/lib/Optimizer/Transforms/CMakeLists.txt index d20d3bc4108ce..9c550f983434a 100644 --- a/flang/lib/Optimizer/Transforms/CMakeLists.txt +++ b/flang/lib/Optimizer/Transforms/CMakeLists.txt @@ -48,6 +48,8 @@ add_flang_library(FIRTransforms FIRSupport FortranCommon HLFIRDialect + + MLIR_LIBS MLIRAffineUtils MLIRFuncDialect MLIRGPUDialect diff --git a/flang/lib/Support/CMakeLists.txt b/flang/lib/Support/CMakeLists.txt index 9c7887aecafbd..12183f590316d 100644 --- a/flang/lib/Support/CMakeLists.txt +++ b/flang/lib/Support/CMakeLists.txt @@ -1,9 +1,9 @@ add_flang_library(FortranSupport Timing.cpp - LINK_LIBS - MLIRSupport - LINK_COMPONENTS Support + + MLIR_LIBS + MLIRSupport ) diff --git a/flang/module/__cuda_device.f90 b/flang/module/__cuda_device.f90 index 81b1f5aa334bb..73f3d19c98a31 100644 --- a/flang/module/__cuda_device.f90 +++ b/flang/module/__cuda_device.f90 @@ -14,19 +14,4 @@ ! Set PRIVATE by default to explicitly only export what is meant ! to be exported by this MODULE. - interface - attributes(device) function __fadd_rd(x, y) bind(c, name='__nv_fadd_rd') - real, intent(in), value :: x, y - real :: __fadd_rd - end function - end interface - public :: __fadd_rd - - interface - attributes(device) function __fadd_ru(x, y) bind(c, name='__nv_fadd_ru') - real, intent(in), value :: x, y - real :: __fadd_ru - end function - end interface - public :: __fadd_ru end module diff --git a/flang/module/cudadevice.f90 b/flang/module/cudadevice.f90 index 5712a7724ae49..3d487fd000a09 100644 --- a/flang/module/cudadevice.f90 +++ b/flang/module/cudadevice.f90 @@ -75,4 +75,22 @@ attributes(device) subroutine threadfence_system() end interface public :: threadfence_system + ! Math API + + interface + attributes(device) function __fadd_rd(x, y) bind(c, name='__nv_fadd_rd') + real, intent(in), value :: x, y + real :: __fadd_rd + end function + end interface + public :: __fadd_rd + + interface + attributes(device) function __fadd_ru(x, y) bind(c, name='__nv_fadd_ru') + real, intent(in), value :: x, y + real :: __fadd_ru + end function + end interface + public :: __fadd_ru + end module diff --git a/flang/runtime/tools.h b/flang/runtime/tools.h index 3fe3283415ba9..facbd23161057 100644 --- a/flang/runtime/tools.h +++ b/flang/runtime/tools.h @@ -206,6 +206,23 @@ inline RT_API_ATTRS RESULT ApplyType( default: terminator.Crash("not yet implemented: INTEGER(KIND=%d)", kind); } + case TypeCategory::Unsigned: + switch (kind) { + case 1: + return FUNC{}(std::forward(x)...); + case 2: + return FUNC{}(std::forward(x)...); + case 4: + return FUNC{}(std::forward(x)...); + case 8: + return FUNC{}(std::forward(x)...); +#if defined __SIZEOF_INT128__ && !AVOID_NATIVE_UINT128_T + case 16: + return FUNC{}(std::forward(x)...); +#endif + default: + terminator.Crash("not yet implemented: UNSIGNED(KIND=%d)", kind); + } case TypeCategory::Real: switch (kind) { #if 0 // TODO: REAL(2 & 3) diff --git a/flang/test/Analysis/AliasAnalysis/ptr-component.fir b/flang/test/Analysis/AliasAnalysis/ptr-component.fir index 279143a581460..856d8e2d94c98 100644 --- a/flang/test/Analysis/AliasAnalysis/ptr-component.fir +++ b/flang/test/Analysis/AliasAnalysis/ptr-component.fir @@ -101,7 +101,7 @@ func.func @_QMmPfoo.fir(%arg0: !fir.ref>>,i:i32}>>>) -> !fir.ref> %18 = fir.convert %15 : (!fir.box>>,i:i32}>>) -> !fir.box %19 = fir.convert %16 : (!fir.ref>) -> !fir.ref - %20 = fir.call @_FortranAAssign(%17, %18, %19, %c14_i32) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none + fir.call @_FortranAAssign(%17, %18, %19, %c14_i32) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () %21 = fir.field_index next, !fir.type<_QMmTt{next:!fir.box>>,i:i32}> %22 = fir.coordinate_of %5, %21 {test.ptr="xnext2.fir"}: (!fir.ref>>,i:i32}>>, !fir.field) -> !fir.ref>>,i:i32}>>>> %23 = fir.load %22 : !fir.ref>>,i:i32}>>>> diff --git a/flang/test/Fir/CUDA/cuda-alloc-free.fir b/flang/test/Fir/CUDA/cuda-alloc-free.fir index abf2d56695b17..6194f0071cd79 100644 --- a/flang/test/Fir/CUDA/cuda-alloc-free.fir +++ b/flang/test/Fir/CUDA/cuda-alloc-free.fir @@ -15,7 +15,7 @@ func.func @_QPsub1() { // CHECK: %[[CONV:.*]] = fir.convert %3 : (!fir.llvm_ptr) -> !fir.ref // CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[CONV]] {data_attr = #cuf.cuda, uniq_name = "_QFsub1Eidev"} : (!fir.ref) -> (!fir.ref, !fir.ref) // CHECK: %[[DEVPTR:.*]] = fir.convert %[[DECL]]#1 : (!fir.ref) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFMemFree(%[[DEVPTR]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFMemFree(%[[DEVPTR]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, i32, !fir.ref, i32) -> () func.func @_QPsub2() { %0 = cuf.alloc !fir.array<10xf32> {bindc_name = "a", data_attr = #cuf.cuda, uniq_name = "_QMcuda_varFcuda_alloc_freeEa"} -> !fir.ref> diff --git a/flang/test/Fir/CUDA/cuda-allocate.fir b/flang/test/Fir/CUDA/cuda-allocate.fir index 2f805d4a2b6bb..35c6e2a77a697 100644 --- a/flang/test/Fir/CUDA/cuda-allocate.fir +++ b/flang/test/Fir/CUDA/cuda-allocate.fir @@ -24,7 +24,7 @@ func.func @_QPsub1() { // CHECK: %[[BOX_NONE:.*]] = fir.convert %[[DECL_DESC]]#1 : (!fir.ref>>>) -> !fir.ref> // CHECK: %{{.*}} = fir.call @_FortranAAllocatableDeallocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 // CHECK: %[[BOX_NONE:.*]] = fir.convert %[[DECL_DESC]]#1 : (!fir.ref>>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFFreeDescriptor(%[[BOX_NONE]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFFreeDescriptor(%[[BOX_NONE]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref, i32) -> () fir.global @_QMmod1Ea {data_attr = #cuf.cuda} : !fir.box>> { %0 = fir.zero_bits !fir.heap> @@ -80,7 +80,7 @@ func.func @_QPsub5() { %6 = fir.convert %5#1 : (!fir.ref>>>) -> !fir.ref> %7 = fir.convert %c1 : (index) -> i64 %8 = fir.convert %c10_i32 : (i32) -> i64 - %9 = fir.call @_FortranAAllocatableSetBounds(%6, %c0_i32, %7, %8) fastmath : (!fir.ref>, i32, i64, i64) -> none + fir.call @_FortranAAllocatableSetBounds(%6, %c0_i32, %7, %8) fastmath : (!fir.ref>, i32, i64, i64) -> () %10 = cuf.allocate %5#1 : !fir.ref>>> {data_attr = #cuf.cuda} -> i32 %11 = cuf.deallocate %5#1 : !fir.ref>>> {data_attr = #cuf.cuda} -> i32 return @@ -108,7 +108,7 @@ func.func @_QQsub6() attributes {fir.bindc_name = "test"} { %2 = fir.convert %1#1 : (!fir.ref>>>) -> !fir.ref> %3 = fir.convert %c1 : (index) -> i64 %4 = fir.convert %c10_i32 : (i32) -> i64 - %5 = fir.call @_FortranAAllocatableSetBounds(%2, %c0_i32, %3, %4) fastmath : (!fir.ref>, i32, i64, i64) -> none + fir.call @_FortranAAllocatableSetBounds(%2, %c0_i32, %3, %4) fastmath : (!fir.ref>, i32, i64, i64) -> () %6 = cuf.allocate %1#1 : !fir.ref>>> {data_attr = #cuf.cuda} -> i32 return } diff --git a/flang/test/Fir/CUDA/cuda-code-gen.mlir b/flang/test/Fir/CUDA/cuda-code-gen.mlir index 0f1b8b1cd6a8e..3ad28fa7bd517 100644 --- a/flang/test/Fir/CUDA/cuda-code-gen.mlir +++ b/flang/test/Fir/CUDA/cuda-code-gen.mlir @@ -91,8 +91,8 @@ module attributes {dlti.dl_spec = #dlti.dl_spec : vector<2xi64> %16 = fir.convert %6 : (!fir.ref>>>) -> !fir.ref> %17 = fir.convert %c1 : (index) -> i64 %18 = fir.convert %c16_i32 : (i32) -> i64 - %19 = fir.call @_FortranAAllocatableSetBounds(%16, %c0_i32, %17, %18) fastmath : (!fir.ref>, i32, i64, i64) -> none - %20 = fir.call @_FortranAAllocatableSetBounds(%16, %c1_i32, %17, %18) fastmath : (!fir.ref>, i32, i64, i64) -> none + fir.call @_FortranAAllocatableSetBounds(%16, %c0_i32, %17, %18) fastmath : (!fir.ref>, i32, i64, i64) -> () + fir.call @_FortranAAllocatableSetBounds(%16, %c1_i32, %17, %18) fastmath : (!fir.ref>, i32, i64, i64) -> () %21 = fir.address_of(@_QQclX64756D6D792E6D6C697200) : !fir.ref> %c31_i32 = arith.constant 31 : i32 %false = arith.constant false @@ -102,7 +102,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec : vector<2xi64> %24 = fir.convert %21 : (!fir.ref>) -> !fir.ref %25 = fir.call @_FortranACUFAllocatableAllocate(%23, %c-1_i64, %false, %22, %24, %c31_i32) : (!fir.ref>, i64, i1, !fir.box, !fir.ref, i32) -> i32 %26 = fir.convert %13 : (!fir.ref>>>) -> !fir.ref> - %27 = fir.call @_FortranAAllocatableSetBounds(%26, %c0_i32, %17, %18) fastmath : (!fir.ref>, i32, i64, i64) -> none + fir.call @_FortranAAllocatableSetBounds(%26, %c0_i32, %17, %18) fastmath : (!fir.ref>, i32, i64, i64) -> () %28 = fir.address_of(@_QQclX64756D6D792E6D6C697200) : !fir.ref> %c34_i32 = arith.constant 34 : i32 %false_0 = arith.constant false @@ -115,7 +115,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec : vector<2xi64> %34 = fircg.ext_rebox %33 : (!fir.box>>) -> !fir.box> return } - func.func private @_FortranAAllocatableSetBounds(!fir.ref>, i32, i64, i64) -> none attributes {fir.runtime} + func.func private @_FortranAAllocatableSetBounds(!fir.ref>, i32, i64, i64) -> () attributes {fir.runtime} fir.global linkonce @_QQclX64756D6D792E6D6C697200 constant : !fir.char<1,11> { %0 = fir.string_lit "dummy.mlir\00"(11) : !fir.char<1,11> fir.has_value %0 : !fir.char<1,11> @@ -165,7 +165,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec = dense<32> : vec fir.has_value %0 : !fir.char<1,8> } func.func private @_FortranACUFAllocDescriptor(i64, !fir.ref, i32) -> !fir.ref> attributes {fir.runtime} - func.func private @_FortranACUFFreeDescriptor(!fir.ref>, !fir.ref, i32) -> none attributes {fir.runtime} + func.func private @_FortranACUFFreeDescriptor(!fir.ref>, !fir.ref, i32) -> () attributes {fir.runtime} } // CHECK-LABEL: llvm.func @_QQmain() diff --git a/flang/test/Fir/CUDA/cuda-constructor-2.f90 b/flang/test/Fir/CUDA/cuda-constructor-2.f90 index eb118ccee311c..89fc99b736f4f 100644 --- a/flang/test/Fir/CUDA/cuda-constructor-2.f90 +++ b/flang/test/Fir/CUDA/cuda-constructor-2.f90 @@ -25,7 +25,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry>) -> !fir.ref // CHECK-DAG: %[[CST:.*]] = arith.constant 20 : index // CHECK-DAG: %[[CST2:.*]] = fir.convert %[[CST]] : (index) -> i64 -// CHECK-DAG: fir.call @_FortranACUFRegisterVariable(%[[MODULE2]], %[[VAR_ADDR2]], %[[VAR_NAME2]], %[[CST2]]) : (!fir.ref>, !fir.ref, !fir.ref, i64) -> none +// CHECK-DAG: fir.call @_FortranACUFRegisterVariable(%[[MODULE2]], %[[VAR_ADDR2]], %[[VAR_NAME2]], %[[CST2]]) : (!fir.ref>, !fir.ref, !fir.ref, i64) -> () // CHECK-DAG: %[[BOX:.*]] = fir.address_of(@_QMmtestsEndev) : !fir.ref>>> // CHECK-DAG: %[[BOXREF:.*]] = fir.convert %[[BOX]] : (!fir.ref>>>) -> !fir.ref // CHECK-DAG: fir.call @_FortranACUFRegisterVariable(%[[MODULE:.*]], %[[BOXREF]], %{{.*}}, %{{.*}}) @@ -59,4 +59,4 @@ module attributes {dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, i gpu.return %6 : i32 } } -} \ No newline at end of file +} diff --git a/flang/test/Fir/CUDA/cuda-data-transfer.fir b/flang/test/Fir/CUDA/cuda-data-transfer.fir index 5ed27f1be0a43..415d0015918bb 100644 --- a/flang/test/Fir/CUDA/cuda-data-transfer.fir +++ b/flang/test/Fir/CUDA/cuda-data-transfer.fir @@ -17,7 +17,7 @@ func.func @_QPsub1() { // CHECK: %[[AHOST:.*]]:2 = hlfir.declare %{{.*}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFsub1Eahost"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) // CHECK: %[[AHOST_BOX:.*]] = fir.convert %[[AHOST]]#0 : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[ADEV_BOX:.*]] = fir.convert %[[ADEV]]#0 : (!fir.ref>>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%[[AHOST_BOX]], %[[ADEV_BOX]], %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%[[AHOST_BOX]], %[[ADEV_BOX]], %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPsub2() { %0 = cuf.alloc !fir.box>> {bindc_name = "adev", data_attr = #cuf.cuda, uniq_name = "_QFsub2Eadev"} -> !fir.ref>>> @@ -38,7 +38,7 @@ func.func @_QPsub2() { // CHECK: fir.store %[[EMBOX]] to %[[TEMP_BOX]] : !fir.ref> // CHECK: %[[ADEV_BOX:.*]] = fir.convert %[[ADEV]]#0 : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[TEMP_CONV:.*]] = fir.convert %[[TEMP_BOX]] : (!fir.ref>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferCstDesc(%[[ADEV_BOX]], %[[TEMP_CONV]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferCstDesc(%[[ADEV_BOX]], %[[TEMP_CONV]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPsub3() { %0 = cuf.alloc !fir.box>> {bindc_name = "adev", data_attr = #cuf.cuda, uniq_name = "_QFsub3Eadev"} -> !fir.ref>>> @@ -58,7 +58,7 @@ func.func @_QPsub3() { // CHECK: fir.store %[[EMBOX]] to %[[TEMP_BOX]] : !fir.ref> // CHECK: %[[ADEV_BOX:.*]] = fir.convert %[[ADEV]]#0 : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[V_CONV:.*]] = fir.convert %[[TEMP_BOX]] : (!fir.ref>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferCstDesc(%[[ADEV_BOX]], %[[V_CONV]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferCstDesc(%[[ADEV_BOX]], %[[V_CONV]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPsub4() { %0 = cuf.alloc !fir.box>> {bindc_name = "adev", data_attr = #cuf.cuda, uniq_name = "_QFsub4Eadev"} -> !fir.ref>>> @@ -81,12 +81,12 @@ func.func @_QPsub4() { // CHECK: fir.store %[[EMBOX]] to %[[TEMP_BOX]] : !fir.ref>> // CHECK: %[[ADEV_BOX:.*]] = fir.convert %[[ADEV]]#0 : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[AHOST_BOX:.*]] = fir.convert %[[TEMP_BOX]] : (!fir.ref>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%[[ADEV_BOX]], %[[AHOST_BOX]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%[[ADEV_BOX]], %[[AHOST_BOX]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () // CHECK: %[[EMBOX:.*]] = fir.embox %[[AHOST]]#0(%[[AHOST_SHAPE]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> // CHECK: fir.store %[[EMBOX]] to %[[TEMP_BOX1]] : !fir.ref>> // CHECK: %[[AHOST_BOX:.*]] = fir.convert %[[TEMP_BOX1]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[ADEV_BOX:.*]] = fir.convert %[[ADEV]]#0 : (!fir.ref>>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDescNoRealloc(%[[AHOST_BOX]], %[[ADEV_BOX]], %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDescNoRealloc(%[[AHOST_BOX]], %[[ADEV_BOX]], %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPsub5(%arg0: !fir.ref {fir.bindc_name = "n"}) { %0 = fir.dummy_scope : !fir.dscope @@ -123,12 +123,12 @@ func.func @_QPsub5(%arg0: !fir.ref {fir.bindc_name = "n"}) { // CHECK: fir.store %[[EMBOX]] to %[[TEMP_BOX]] : !fir.ref>> // CHECK: %[[ADEV_BOX:.*]] = fir.convert %[[ADEV]]#0 : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[AHOST_BOX:.*]] = fir.convert %[[TEMP_BOX]] : (!fir.ref>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%[[ADEV_BOX]], %[[AHOST_BOX]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%[[ADEV_BOX]], %[[AHOST_BOX]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () // CHECK: %[[EMBOX:.*]] = fir.embox %[[AHOST]]#1(%[[SHAPE]]) : (!fir.ref>, !fir.shape<2>) -> !fir.box> // CHECK: fir.store %[[EMBOX]] to %[[TEMP_BOX1]] : !fir.ref>> // CHECK: %[[AHOST_BOX:.*]] = fir.convert %[[TEMP_BOX1]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[ADEV_BOX:.*]] = fir.convert %[[ADEV]]#0 : (!fir.ref>>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDescNoRealloc(%[[AHOST_BOX]], %[[ADEV_BOX]], %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDescNoRealloc(%[[AHOST_BOX]], %[[ADEV_BOX]], %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPsub6() { %0 = cuf.alloc i32 {bindc_name = "idev", data_attr = #cuf.cuda, uniq_name = "_QFsub6Eidev"} -> !fir.ref @@ -149,12 +149,12 @@ func.func @_QPsub6() { // CHECK: %[[IHOST:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "_QFsub6Eihost"} : (!fir.ref) -> (!fir.ref, !fir.ref) // CHECK: %[[DST:.*]] = fir.convert %[[IHOST]]#0 : (!fir.ref) -> !fir.llvm_ptr // CHECK: %[[SRC:.*]] = fir.convert %[[IDEV]]#0 : (!fir.ref) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %c4{{.*}}, %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %c4{{.*}}, %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () // CHECK: %[[LOAD:.*]] = fir.load %[[IHOST]]#0 : !fir.ref // CHECK: %[[ASSOC:.*]]:3 = hlfir.associate %[[LOAD]] {uniq_name = ".cuf_host_tmp"} : (i32) -> (!fir.ref, !fir.ref, i1) // CHECK: %[[DST:.*]] = fir.convert %[[IDEV]]#0 : (!fir.ref) -> !fir.llvm_ptr // CHECK: %[[SRC:.*]] = fir.convert %[[ASSOC]]#0 : (!fir.ref) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %c4{{.*}}, %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %c4{{.*}}, %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () func.func @_QPsub7() { %c10 = arith.constant 10 : index @@ -177,11 +177,11 @@ func.func @_QPsub7() { // CHECK: %[[BYTES:.*]] = arith.muli %c10{{.*}}, %c4{{.*}} : i64 // CHECK: %[[DST:.*]] = fir.convert %[[IHOST]]#0 : (!fir.ref>) -> !fir.llvm_ptr // CHECK: %[[SRC:.*]] = fir.convert %[[IDEV]]#0 : (!fir.ref>) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %[[BYTES]], %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %[[BYTES]], %c1{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () // CHECK: %[[BYTES:.*]] = arith.muli %c10{{.*}}, %c4{{.*}} : i64 // CHECK: %[[DST:.*]] = fir.convert %[[IDEV]]#0 : (!fir.ref>) -> !fir.llvm_ptr // CHECK: %[[SRC:.*]] = fir.convert %[[IHOST]]#0 : (!fir.ref>) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %[[BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %[[BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () fir.global @_QMmtestsEn(dense<[3, 4, 5, 6, 7]> : tensor<5xi32>) {data_attr = #cuf.cuda} : !fir.array<5xi32> func.func @_QPsub8() attributes {fir.bindc_name = "t"} { @@ -206,7 +206,7 @@ func.func @_QPsub8() attributes {fir.bindc_name = "t"} { // CHECK: %[[DECL:.*]] = fir.declare %[[ADDR_CONV]] // CHECK: %[[DST:.*]] = fir.convert %[[LOCAL]] : (!fir.ref>) -> !fir.llvm_ptr // CHECK: %[[SRC:.*]] = fir.convert %[[DECL]] : (!fir.ref>) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () func.func @_QPsub9() { @@ -231,7 +231,7 @@ func.func @_QPsub9() { // CHECK: %[[DECL:.*]] = fir.declare %[[ADDR_CONV]] // CHECK: %[[DST:.*]] = fir.convert %[[DECL]] : (!fir.ref>) -> !fir.llvm_ptr // CHECK: %[[SRC:.*]] = fir.convert %[[LOCAL]] : (!fir.ref>) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () fir.global @_QMmod1Ea {data_attr = #cuf.cuda} : !fir.box>> { %c0 = arith.constant 0 : index @@ -254,7 +254,7 @@ func.func @_QQdesc_global() attributes {fir.bindc_name = "host_sub"} { // CHECK: %[[GLOBAL_ADDRESS:.*]] = fir.address_of(@_QMmod1Ea) : !fir.ref>>> // CHECK: %[[GLOBAL_DECL:.*]]:2 = hlfir.declare %[[GLOBAL_ADDRESS]] {data_attr = #cuf.cuda, fortran_attrs = #fir.var_attrs, uniq_name = "_QMmod1Ea"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) // CHECK: %[[BOX_NONE:.*]] = fir.convert %[[GLOBAL_DECL:.*]]#0 : (!fir.ref>>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferGlobalDescDesc(%[[BOX_NONE]],{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferGlobalDescDesc(%[[BOX_NONE]],{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () fir.global @_QMmod2Eadev {data_attr = #cuf.cuda} : !fir.box>> { %c0 = arith.constant 0 : index @@ -285,7 +285,7 @@ func.func @_QPdesc_global_ptr() { // CHECK: fir.store %[[EMBOX]] to %[[TEMP_BOX]] : !fir.ref>> // CHECK: %[[ADEV_BOXNONE:.*]] = fir.convert %[[DECL_ADEV]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[AHOST_BOXNONE:.*]] = fir.convert %[[TEMP_BOX]] : (!fir.ref>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferGlobalDescDesc(%[[ADEV_BOXNONE]], %[[AHOST_BOXNONE]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferGlobalDescDesc(%[[ADEV_BOXNONE]], %[[AHOST_BOXNONE]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPscalar_to_array() { %c1_i32 = arith.constant 1 : i32 @@ -312,7 +312,7 @@ func.func @_QPtest_type() { } // CHECK-LABEL: func.func @_QPtest_type() -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %{{.*}}, %c12{{.*}}, %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %{{.*}}, %c12{{.*}}, %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () func.func @_QPtest_array_type() { %c10 = arith.constant 10 : index @@ -331,7 +331,7 @@ func.func @_QPtest_array_type() { // CHECK: %[[CONV_BYTES:.*]] = fir.convert %[[BYTES]] : (index) -> i64 // CHECK: fir.call @_FortranACUFMemAlloc(%[[CONV_BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (i64, i32, !fir.ref, i32) -> !fir.llvm_ptr // CHECK: %[[BYTES:.*]] = arith.muli %c10{{.*}}, %c12{{.*}} : i64 -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %{{.*}}, %[[BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %{{.*}}, %[[BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () func.func @_QPshape_shift() { %c0_i32 = arith.constant 0 : i32 @@ -365,7 +365,7 @@ func.func @_QPshape_shift2() { // CHECK-LABEL: func.func @_QPshape_shift2() // CHECK: %[[C10:.*]] = fir.convert %c10{{.*}} : (index) -> i64 // CHECK: %[[BYTES:.*]] = arith.muli %[[C10]], %c4{{.*}} : i64 -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %{{.*}}, %[[BYTES]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %{{.*}}, %[[BYTES]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () fir.global @_QMmod1Ea_dev {data_attr = #cuf.cuda} : !fir.array<4xf32> { %0 = fir.zero_bits !fir.array<4xf32> @@ -407,7 +407,7 @@ func.func @_QQchar_transfer() attributes {fir.bindc_name = "char_transfer"} { // CHECK-LABEL: func.func @_QQchar_transfer() // CHECK: fir.call @_FortranACUFMemAlloc // CHECK: %[[BYTES:.*]] = arith.muli %c10{{.*}}, %c1{{.*}} : i64 -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %{{.*}}, %[[BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %{{.*}}, %[[BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () func.func @_QPdevmul(%arg0: !fir.ref> {fir.bindc_name = "b"}, %arg1: !fir.ref {fir.bindc_name = "wa"}, %arg2: !fir.ref {fir.bindc_name = "wb"}) { %c0_i64 = arith.constant 0 : i64 @@ -447,10 +447,10 @@ func.func @_QPdevmul(%arg0: !fir.ref> {fir.bindc_name = "b"} // CHECK: %[[EMBOX:.*]] = fir.embox %{{.*}}(%{{.*}}) [%{{.*}}] : (!fir.ref>, !fir.shape<2>, !fir.slice<2>) -> !fir.box> // CHECK: fir.store %[[EMBOX]] to %[[ALLOCA1]] : !fir.ref>> // CHECK: %[[SRC:.*]] = fir.convert %[[ALLOCA1]] : (!fir.ref>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%{{.*}}, %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%{{.*}}, %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () // CHECK: fir.store %[[EMBOX]] to %[[ALLOCA0]] : !fir.ref>> // CHECK: %[[DST:.*]] = fir.convert %[[ALLOCA0]] : (!fir.ref>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%[[DST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%[[DST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPlogical_cst() { %c0_i64 = arith.constant 0 : i64 @@ -470,7 +470,7 @@ func.func @_QPlogical_cst() { // CHECK: %[[EMBOX:.*]] = fir.embox %[[CONST]] : (!fir.ref>) -> !fir.box> // CHECK: fir.store %[[EMBOX]] to %[[DESC]] : !fir.ref>> // CHECK: %[[BOX_NONE:.*]] = fir.convert %[[DESC]] : (!fir.ref>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferCstDesc(%{{.*}}, %[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferCstDesc(%{{.*}}, %[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPcallkernel(%arg0: !fir.box>> {fir.bindc_name = "a"}, %arg1: !fir.ref {fir.bindc_name = "b"}, %arg2: !fir.ref {fir.bindc_name = "c"}) { %c0_i64 = arith.constant 0 : i64 @@ -517,7 +517,7 @@ func.func @_QPcallkernel(%arg0: !fir.box>> {fir.bind // CHECK: %[[REBOX1:.*]] = fir.rebox %[[REBOX0]] [%{{.*}}] : (!fir.box>>, !fir.slice<2>) -> !fir.box>> // CHECK: fir.store %[[REBOX1]] to %[[ALLOCA]] : !fir.ref>>> // CHECK: %[[BOX_NONE:.*]] = fir.convert %[[ALLOCA]] : (!fir.ref>>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%{{.*}}, %[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDesc(%{{.*}}, %[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () func.func @_QPsrc_cst() { %0 = fir.dummy_scope : !fir.dscope @@ -557,7 +557,7 @@ func.func @_QPsrc_cst() { // CHECK: %[[CST:.*]] = arith.constant -4.000000e+00 : f32 // CHECK: fir.store %[[CST]] to %[[ALLOCA]] : !fir.ref // CHECK: %[[CONV:.*]] = fir.convert %[[ALLOCA]] : (!fir.ref) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %[[CONV]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%{{.*}}, %[[CONV]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () func.func @_QPchecksums(%arg0: !fir.box> {cuf.data_attr = #cuf.cuda, fir.bindc_name = "a"}, %arg1: !fir.ref {fir.bindc_name = "n"}) { %c0 = arith.constant 0 : index @@ -580,6 +580,6 @@ func.func @_QPchecksums(%arg0: !fir.box> {cuf.data_attr = #cuf // CHECK-LABEL: func.func @_QPchecksums // CHECK: %[[DST:.*]] = fir.convert %{{.*}} : (!fir.ref>>) -> !fir.ref> // CHECK: %[[SRC:.*]] = fir.convert %{{.*}} : (!fir.ref>>) -> !fir.ref> -// CHECK: fir.call @_FortranACUFDataTransferDescDescNoRealloc(%[[DST]], %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferDescDescNoRealloc(%[[DST]], %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.ref>, i32, !fir.ref, i32) -> () } // end of module diff --git a/flang/test/Fir/CUDA/cuda-global-addr.mlir b/flang/test/Fir/CUDA/cuda-global-addr.mlir index ee51875599da6..3e50c7a51f49c 100644 --- a/flang/test/Fir/CUDA/cuda-global-addr.mlir +++ b/flang/test/Fir/CUDA/cuda-global-addr.mlir @@ -34,7 +34,7 @@ func.func @_QQmain() attributes {fir.bindc_name = "test"} { // CHECK: %[[DECL:.*]] = fir.declare %[[DEVICE_ADDR_CONV]](%{{.*}}) {data_attr = #cuf.cuda, uniq_name = "_QMmod1Eadev"} : (!fir.ref>, !fir.shape<1>) -> !fir.ref> // CHECK: %[[ARRAY_COOR:.*]] = fir.array_coor %[[DECL]](%{{.*}}) %c4{{.*}} : (!fir.ref>, !fir.shape<1>, index) -> !fir.ref // CHECK: %[[ARRAY_COOR_PTR:.*]] = fir.convert %[[ARRAY_COOR]] : (!fir.ref) -> !fir.llvm_ptr -// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[ARRAY_COOR_PTR]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[ARRAY_COOR_PTR]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> () // ----- diff --git a/flang/test/Fir/abstract-result-2.fir b/flang/test/Fir/abstract-result-2.fir index af13d57476e8c..8e59f60c8c244 100644 --- a/flang/test/Fir/abstract-result-2.fir +++ b/flang/test/Fir/abstract-result-2.fir @@ -21,11 +21,11 @@ func.func @_QMi8Pintrinsic_pack0(%arg0: !fir.box> {fir.bin %9 = fir.convert %arg0 : (!fir.box>) -> !fir.box %10 = fir.convert %5 : (!fir.box>) -> !fir.box %11 = fir.convert %arg2 : (!fir.box>) -> !fir.box - %12 = fir.call @_FortranAPack(%8, %9, %10, %11) : (!fir.ref>, !fir.box, !fir.box, !fir.box) -> none + fir.call @_FortranAPack(%8, %9, %10, %11) : (!fir.ref>, !fir.box, !fir.box, !fir.box) -> () %13 = fir.load %1 : !fir.ref>>> return %13 : !fir.box>> } -func.func private @_FortranAPack(!fir.ref>, !fir.box, !fir.box, !fir.box) -> none attributes {fir.runtime} +func.func private @_FortranAPack(!fir.ref>, !fir.box, !fir.box, !fir.box) -> () attributes {fir.runtime} // CHECK-LABEL: func.func private @empty // CHECK-SAME:(!fir.ref>>>, !fir.box> {fir.bindc_name = "array"}, !fir.ref> {fir.bindc_name = "mask"}, !fir.box> {fir.bindc_name = "vector", fir.optional}) diff --git a/flang/test/Fir/array-value-copy-3.fir b/flang/test/Fir/array-value-copy-3.fir index 2840c3c68d701..945a857e46669 100644 --- a/flang/test/Fir/array-value-copy-3.fir +++ b/flang/test/Fir/array-value-copy-3.fir @@ -23,7 +23,7 @@ func.func @test_overlap_with_alloc_components(%arg0: !fir.ref (!fir.array<10x!t_with_alloc_comp>) { %10 = fir.array_access %7, %arg1 : (!fir.array<10x!t_with_alloc_comp>, index) -> !fir.ref %11 = fir.array_access %arg2, %arg1 : (!fir.array<10x!t_with_alloc_comp>, index) -> !fir.ref - fir.call @custom_assign(%11, %10) : (!fir.ref, !fir.ref) -> none + fir.call @custom_assign(%11, %10) : (!fir.ref, !fir.ref) -> () %19 = fir.array_amend %arg2, %11 : (!fir.array<10x!t_with_alloc_comp>, !fir.ref) -> !fir.array<10x!t_with_alloc_comp> fir.result %19 : !fir.array<10x!t_with_alloc_comp> } @@ -41,7 +41,7 @@ func.func @test_overlap_with_alloc_components(%arg0: !fir.ref>>}>> // CHECK: %[[VAL_12:.*]] = fir.embox %[[VAL_11]](%[[VAL_9]]) : (!fir.heap>>}>>>, !fir.shape<1>) -> !fir.box>>}>>>> // CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_12]] : (!fir.box>>}>>>>) -> !fir.box -// CHECK: fir.call @_FortranAInitialize(%[[VAL_16]], %{{.*}}, %{{.*}}) : (!fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAInitialize(%[[VAL_16]], %{{.*}}, %{{.*}}) : (!fir.box, !fir.ref, i32) -> () // CHECK: fir.do_loop {{.*}} { // CHECK: fir.call @_FortranAAssign // CHECK: } @@ -52,5 +52,5 @@ func.func @test_overlap_with_alloc_components(%arg0: !fir.ref>>}>>>>) -> !fir.box -// CHECK: %[[VAL_73:.*]] = fir.call @_FortranADestroy(%[[VAL_72]]) : (!fir.box) -> none +// CHECK: fir.call @_FortranADestroy(%[[VAL_72]]) : (!fir.box) -> () // CHECK: fir.freemem %[[VAL_11]] diff --git a/flang/test/Fir/array-value-copy-4.fir b/flang/test/Fir/array-value-copy-4.fir index f120f054f6abd..85def88e6887c 100644 --- a/flang/test/Fir/array-value-copy-4.fir +++ b/flang/test/Fir/array-value-copy-4.fir @@ -43,7 +43,7 @@ func.func @_QMmodPsub1(%arg0: !fir.box>>}>>>) -> !fir.ref> %24 = fir.convert %21 : (!fir.box>>}>>) -> !fir.box %25 = fir.convert %22 : (!fir.ref>) -> !fir.ref - %26 = fir.call @_FortranAAssign(%23, %24, %25, %c9_i32) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> none + fir.call @_FortranAAssign(%23, %24, %25, %c9_i32) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> () %27 = fir.array_amend %arg2, %19 : (!fir.array>>}>>, !fir.ref>>}>>) -> !fir.array>>}>> fir.result %27 : !fir.array>>}>> } diff --git a/flang/test/Fir/boxproc-openmp.fir b/flang/test/Fir/boxproc-openmp.fir index 8b714539b5e85..9db053ad93c66 100644 --- a/flang/test/Fir/boxproc-openmp.fir +++ b/flang/test/Fir/boxproc-openmp.fir @@ -14,7 +14,7 @@ omp.private {type = private} @_QFsub1Et1_private_ref_rec__QFsub1Tt : !fir.ref> %4 = fir.convert %2 : (!fir.box ()>}>>) -> !fir.box %5 = fir.convert %3 : (!fir.ref>) -> !fir.ref - %6 = fir.call @_FortranAInitialize(%4, %5, %c1_i32) fastmath : (!fir.box, !fir.ref, i32) -> none + fir.call @_FortranAInitialize(%4, %5, %c1_i32) fastmath : (!fir.box, !fir.ref, i32) -> () //CHECK: omp.yield(%{{.*}} : !fir.ref ()}>>) omp.yield(%1 : !fir.ref ()>}>>) } @@ -52,7 +52,7 @@ omp.private {type = firstprivate} @_QFsub2Et1_firstprivate_ref_box_heap_rec__QFs %4 = fir.convert %arg1 : (!fir.ref ()>}>>>>) -> !fir.ref> %5 = fir.convert %2 : (!fir.box ()>}>>) -> !fir.box %6 = fir.convert %3 : (!fir.ref>) -> !fir.ref - %7 = fir.call @_FortranAAssign(%4, %5, %6, %c5_i32) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none + fir.call @_FortranAAssign(%4, %5, %6, %c5_i32) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () //CHECK: omp.yield(%{{.*}} : !fir.ref ()}>>>>) omp.yield(%arg1 : !fir.ref ()>}>>>>) } dealloc { @@ -78,10 +78,10 @@ func.func @_QPsub2() { } return } -func.func private @_FortranAInitialize(!fir.box, !fir.ref, i32) -> none attributes {fir.runtime} +func.func private @_FortranAInitialize(!fir.box, !fir.ref, i32) -> () attributes {fir.runtime} fir.global linkonce @_QQclXea constant : !fir.char<1,8> { %0 = fir.string_lit "pp.f90\00"(8) : !fir.char<1,8> fir.has_value %0 : !fir.char<1,8> } func.func private @_FortranAAllocatableDeallocate(!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 attributes {fir.runtime} -func.func private @_FortranAAssign(!fir.ref>, !fir.box, !fir.ref, i32) -> none attributes {fir.runtime} +func.func private @_FortranAAssign(!fir.ref>, !fir.box, !fir.ref, i32) -> () attributes {fir.runtime} diff --git a/flang/test/Fir/polymorphic.fir b/flang/test/Fir/polymorphic.fir index 74b29ed6ca729..f9cf6fab6b707 100644 --- a/flang/test/Fir/polymorphic.fir +++ b/flang/test/Fir/polymorphic.fir @@ -169,16 +169,16 @@ func.func @_QMmod2Pinitp(%arg0: !fir.ref>> {fir.bindc_ %1 = fir.load %arg0 : !fir.ref>> %2 = fir.convert %0 : (!fir.ref>>) -> !fir.ref> %3 = fir.convert %1 : (!fir.class>) -> !fir.box - %4 = fir.call @_FortranAPointerAssociate(%2, %3) fastmath : (!fir.ref>, !fir.box) -> none + fir.call @_FortranAPointerAssociate(%2, %3) fastmath : (!fir.ref>, !fir.box) -> () return } -func.func private @_FortranAPointerAssociate(!fir.ref>, !fir.box) -> none attributes {fir.runtime} +func.func private @_FortranAPointerAssociate(!fir.ref>, !fir.box) -> () attributes {fir.runtime} // CHECK-LABEL: define void @_QMmod2Pinitp( // CHECK-SAME: ptr nocapture %[[ARG0:.*]]){{.*}}{ // CHECK: %[[ALLOCA_CLASS_NONE:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } // CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %[[ALLOCA_CLASS_NONE]], ptr %[[ARG0]], i32 40, i1 false) -// CHECK: %{{.*}} = call {} @_FortranAPointerAssociate(ptr @_QMmod2Ep, ptr %[[ALLOCA_CLASS_NONE]]) +// CHECK: call void @_FortranAPointerAssociate(ptr @_QMmod2Ep, ptr %[[ALLOCA_CLASS_NONE]]) // CHECK: ret void fir.global linkonce_odr @_QMmod1E.dt.p1 constant target : !fir.type<_QM__fortran_type_infoTderivedtype{binding:!fir.box,name:!fir.box>>}>>>>,name:!fir.box>>,sizeinbytes:i64,uninstantiated:!fir.box>>,kindparameter:!fir.box>>,lenparameterkind:!fir.box>>,component:!fir.box>>,genre:i8,category:i8,kind:i8,rank:i8,__padding0:!fir.array<4xi8>,offset:i64,characterlen:!fir.type<_QM__fortran_type_infoTvalue{genre:i8,__padding0:!fir.array<7xi8>,value:i64}>,derived:!fir.box>>,lenvalue:!fir.box,value:i64}>>>>,bounds:!fir.box,value:i64}>>>>,initialization:!fir.type<_QM__fortran_builtinsT__builtin_c_ptr{__address:i64}>}>>>>,procptr:!fir.box>>,offset:i64,initialization:!fir.type<_QM__fortran_builtinsT__builtin_c_funptr{__address:i64}>}>>>>,special:!fir.box,proc:!fir.type<_QM__fortran_builtinsT__builtin_c_funptr{__address:i64}>}>>>>,specialbitset:i32,hasparent:i8,noinitializationneeded:i8,nodestructionneeded:i8,nofinalizationneeded:i8,__padding0:!fir.array<4xi8>}> { diff --git a/flang/test/Fir/rebox_assumed_rank_codegen.fir b/flang/test/Fir/rebox_assumed_rank_codegen.fir index 3c4de0bef509f..b4336b9279493 100644 --- a/flang/test/Fir/rebox_assumed_rank_codegen.fir +++ b/flang/test/Fir/rebox_assumed_rank_codegen.fir @@ -55,7 +55,7 @@ func.func private @takes_assumed_rank_t(!fir.box>) // CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ref // CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_0]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_7:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref>, !fir.box, !fir.ref, i8, i32) -> none +// CHECK: fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref>, !fir.box, !fir.ref, i8, i32) -> () // CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_3]] : !fir.ref>> // CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (!fir.box>) -> !fir.box> // CHECK: fir.call @somefunc(%[[VAL_9]]) : (!fir.box>) -> () @@ -78,7 +78,7 @@ func.func private @takes_assumed_rank_t(!fir.box>) // CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ref // CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_0]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_7:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref>, !fir.box, !fir.ref, i8, i32) -> none +// CHECK: fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref>, !fir.box, !fir.ref, i8, i32) -> () // CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_3]] : !fir.ref>>> // CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (!fir.box>>) -> !fir.box>> // CHECK: fir.call @somefuncalloc(%[[VAL_9]]) : (!fir.box>>) -> () @@ -93,7 +93,7 @@ func.func private @takes_assumed_rank_t(!fir.box>) // CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ref // CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_0]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_7:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref>, !fir.box, !fir.ref, i8, i32) -> none +// CHECK: fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref>, !fir.box, !fir.ref, i8, i32) -> () // CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_3]] : !fir.ref>>> // CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (!fir.box>>) -> !fir.box>> // CHECK: fir.call @somefuncpointer(%[[VAL_9]]) : (!fir.box>>) -> () @@ -109,7 +109,7 @@ func.func private @takes_assumed_rank_t(!fir.box>) // CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_0]] : (!fir.box,x:f32}>>>) -> !fir.box // CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.tdesc>) -> !fir.ref -// CHECK: %[[VAL_8:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_7]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref>, !fir.box, !fir.ref, i8, i32) -> none +// CHECK: fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_7]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref>, !fir.box, !fir.ref, i8, i32) -> () // CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_3]] : !fir.ref>>> // CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (!fir.box>>) -> !fir.box>> // CHECK: fir.call @somefunct1(%[[VAL_10]]) : (!fir.box>>) -> () @@ -119,6 +119,6 @@ func.func private @takes_assumed_rank_t(!fir.box>) // CHECK-LABEL: func.func @test_poly_to_nonepoly( // CHECK: %[[VAL_4:.*]] = fir.type_desc !fir.type // CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.tdesc>) -> !fir.ref -// CHECK: %[[VAL_8:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%{{.*}}, %{{.*}}, %[[VAL_7]], +// CHECK: fir.call @_FortranACopyAndUpdateDescriptor(%{{.*}}, %{{.*}}, %[[VAL_7]], -// CHECK: func.func private @_FortranACopyAndUpdateDescriptor(!fir.ref> {llvm.nocapture}, !fir.box {llvm.nocapture}, !fir.ref, i8, i32) -> none attributes {fir.runtime} +// CHECK: func.func private @_FortranACopyAndUpdateDescriptor(!fir.ref> {llvm.nocapture}, !fir.box {llvm.nocapture}, !fir.ref, i8, i32) attributes {fir.runtime} diff --git a/flang/test/Fir/tbaa-codegen2.fir b/flang/test/Fir/tbaa-codegen2.fir index 12232a29aae4a..8f8b6a29129e7 100644 --- a/flang/test/Fir/tbaa-codegen2.fir +++ b/flang/test/Fir/tbaa-codegen2.fir @@ -43,7 +43,7 @@ module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", llvm.targ %9 = fir.convert %0 : (!fir.ref>>) -> !fir.ref> %10 = fir.convert %7 : (!fir.box>) -> !fir.box %11 = fir.convert %8 : (!fir.ref>) -> !fir.ref - %12 = fir.call @_FortranAAssign(%9, %10, %11, %c3_i32) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none + fir.call @_FortranAAssign(%9, %10, %11, %c3_i32) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () fir.freemem %6 : !fir.heap> %13 = fir.array_coor %2 %c2 : (!fir.box>, index) -> !fir.ref // load modified not to have tbaa @@ -53,7 +53,7 @@ module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", llvm.targ fir.store %14 to %15 {tbaa = [#tbaa_tag]} : !fir.ref return } - func.func private @_FortranAAssign(!fir.ref>, !fir.box, !fir.ref, i32) -> none attributes {fir.runtime} + func.func private @_FortranAAssign(!fir.ref>, !fir.box, !fir.ref, i32) -> () attributes {fir.runtime} fir.global linkonce @_QQclX2F746D702F73696D706C652E66393000 constant : !fir.char<1,16> { %0 = fir.string_lit "/tmp/simple.f90\00"(16) : !fir.char<1,16> fir.has_value %0 : !fir.char<1,16> diff --git a/flang/test/HLFIR/all-lowering.fir b/flang/test/HLFIR/all-lowering.fir index e83378eacf9c9..df6771e565efc 100644 --- a/flang/test/HLFIR/all-lowering.fir +++ b/flang/test/HLFIR/all-lowering.fir @@ -50,7 +50,7 @@ func.func @_QPall2(%arg0: !fir.box>> {fir.bindc_n // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]]#1 -// CHECK: %[[NONE:.*]] = fir.call @_FortranAAllDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAllDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] @@ -96,7 +96,7 @@ func.func @_QPall3(%arg0: !fir.ref>> {fir.bindc_nam // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK_BOX]] : (!fir.box>>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAAllDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) +// CHECK: fir.call @_FortranAAllDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] @@ -144,7 +144,7 @@ func.func @_QPall4(%arg0: !fir.box>> {fir.bindc_n // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]]#1 -// CHECK: %[[NONE:.*]] = fir.call @_FortranAAllDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAllDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] diff --git a/flang/test/HLFIR/any-elemental.fir b/flang/test/HLFIR/any-elemental.fir index 6e233068d2e9b..a7c559679d965 100644 --- a/flang/test/HLFIR/any-elemental.fir +++ b/flang/test/HLFIR/any-elemental.fir @@ -161,7 +161,7 @@ func.func @_Qtest_recursive() attributes {fir.bindc_name = "test"} { %25 = arith.xori %24, %true : i1 cf.cond_br %25, ^bb1, ^bb2 ^bb1: // pred: ^bb0 - %26 = fir.call @_FortranAStopStatement(%c2_i32, %false, %false) fastmath : (i32, i1, i1) -> none + fir.call @_FortranAStopStatement(%c2_i32, %false, %false) fastmath : (i32, i1, i1) -> () fir.unreachable ^bb2: // pred: ^bb0 return diff --git a/flang/test/HLFIR/any-lowering.fir b/flang/test/HLFIR/any-lowering.fir index 039146727d3f5..72fcdd37b6193 100644 --- a/flang/test/HLFIR/any-lowering.fir +++ b/flang/test/HLFIR/any-lowering.fir @@ -52,7 +52,7 @@ func.func @_QPany2(%arg0: !fir.box>> {fir.bindc_n // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]]#1 -// CHECK: %[[NONE:.*]] = fir.call @_FortranAAnyDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAnyDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] @@ -99,7 +99,7 @@ func.func @_QPany3(%arg0: !fir.ref>> {fir.bindc_nam // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK_BOX]] : (!fir.box>>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAAnyDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) +// CHECK: fir.call @_FortranAAnyDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] @@ -148,7 +148,7 @@ func.func @_QPany4(%arg0: !fir.box>> {fir.bindc_n // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]]#1 -// CHECK: %[[NONE:.*]] = fir.call @_FortranAAnyDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAnyDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] diff --git a/flang/test/HLFIR/assign-codegen.fir b/flang/test/HLFIR/assign-codegen.fir index e0dcc06d75a9c..581d1ab0e7739 100644 --- a/flang/test/HLFIR/assign-codegen.fir +++ b/flang/test/HLFIR/assign-codegen.fir @@ -142,7 +142,7 @@ func.func @array(%arg0: !fir.box>, %arg1: !fir.ref>> // CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_10]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_29:.*]] = fir.call @_FortranAAssign(%[[VAL_26]], %[[VAL_27]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssign(%[[VAL_26]], %[[VAL_27]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @array_temp(%arg0: !fir.box>, %arg1: !fir.ref>) { @@ -167,7 +167,7 @@ func.func @array_temp(%arg0: !fir.box>, %arg1: !fir.ref>> // CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_10]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_29:.*]] = fir.call @_FortranAAssignTemporary(%[[VAL_26]], %[[VAL_27]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_26]], %[[VAL_27]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @test_scalar_to_array(%lhs: !fir.box>, %rhs: i32) { @@ -184,7 +184,7 @@ func.func @test_scalar_to_array(%lhs: !fir.box>, %rhs: i32) { // CHECK: fir.store %[[VAL_0]] to %[[VAL_2]] : !fir.ref>> // CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_6]] : (!fir.box) -> !fir.box -// CHECK: %[[VAL_13:.*]] = fir.call @_FortranAAssign(%[[VAL_10]], %[[VAL_11]] +// CHECK: fir.call @_FortranAAssign(%[[VAL_10]], %[[VAL_11]] func.func @test_i1_scalar_to_array(%lhs: !fir.box>>, %rhs: i1) { @@ -196,7 +196,7 @@ func.func @test_i1_scalar_to_array(%lhs: !fir.box>> // CHECK: %[[VAL_6:.*]] = fir.alloca !fir.logical<4> // CHECK: %[[VAL_7:.*]] = fir.embox %[[VAL_6]] : (!fir.ref>) -> !fir.box> // CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_7]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_14:.*]] = fir.call @_FortranAAssign(%{{.*}}, %[[VAL_12]] +// CHECK: fir.call @_FortranAAssign(%{{.*}}, %[[VAL_12]] func.func @alloc_assign(%arg0: !fir.ref>>>, %arg1: !fir.box>) { hlfir.assign %arg1 to %arg0 realloc : !fir.box>, !fir.ref>>> @@ -207,7 +207,7 @@ func.func @alloc_assign(%arg0: !fir.ref>>>, // CHECK-SAME: %[[VAL_1:.*]]: !fir.box>) { // CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.box>) -> !fir.box -// CHECK: fir.call @_FortranAAssign(%[[VAL_2]], %[[VAL_3]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssign(%[[VAL_2]], %[[VAL_3]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @alloc_assign_temp(%arg0: !fir.ref>>>, %arg1: !fir.box>) { hlfir.assign %arg1 to %arg0 realloc temporary_lhs : !fir.box>, !fir.ref>>> @@ -218,7 +218,7 @@ func.func @alloc_assign_temp(%arg0: !fir.ref>) { // CHECK: %[[VAL_2:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.box>) -> !fir.box -// CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_2]], %[[VAL_3]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_2]], %[[VAL_3]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @test_alloc_assign_explicit_length_character(%lhs: !fir.ref>>>>, %rhs: !fir.box>>) { hlfir.assign %rhs to %lhs realloc keep_lhs_len : !fir.box>>, !fir.ref>>>> @@ -229,7 +229,7 @@ func.func @test_alloc_assign_explicit_length_character(%lhs: !fir.ref>>) { // CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_1]] : (!fir.box>>) -> !fir.box -// CHECK: %[[VAL_10:.*]] = fir.call @_FortranAAssignExplicitLengthCharacter(%[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignExplicitLengthCharacter(%[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @test_alloc_assign_polymorphic(%lhs: !fir.ref>>>>, %rhs: !fir.class>>) { hlfir.assign %rhs to %lhs realloc : !fir.class>>, !fir.ref>>>> @@ -240,7 +240,7 @@ func.func @test_alloc_assign_polymorphic(%lhs: !fir.ref>>) { // CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_1]] : (!fir.class>>) -> !fir.box -// CHECK: %[[VAL_10:.*]] = fir.call @_FortranAAssignPolymorphic(%[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignPolymorphic(%[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @assing_scalar_int_to_polymorphic(%arg0: !fir.ref>>) { %c123_i32 = arith.constant 123 : i32 @@ -258,7 +258,7 @@ func.func @assing_scalar_int_to_polymorphic(%arg0: !fir.ref) -> !fir.box // CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_4]] : (!fir.box) -> !fir.box -// CHECK: %[[VAL_11:.*]] = fir.call @_FortranAAssignPolymorphic(%[[VAL_8]], %[[VAL_9]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignPolymorphic(%[[VAL_8]], %[[VAL_9]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @assign_i1_to_polymorphic(%arg0: !fir.ref>>) { %false = arith.constant false @@ -279,7 +279,7 @@ func.func @assign_i1_to_polymorphic(%arg0: !fir.ref>> // CHECK: %[[VAL_6:.*]] = fir.embox %[[VAL_5]] : (!fir.ref>) -> !fir.box> // CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_6]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_13:.*]] = fir.call @_FortranAAssignPolymorphic(%[[VAL_10]], %[[VAL_11]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignPolymorphic(%[[VAL_10]], %[[VAL_11]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @test_alloc_assign_polymorphic_temp(%lhs: !fir.ref>>>>, %rhs: !fir.class>>) { hlfir.assign %rhs to %lhs realloc temporary_lhs : !fir.class>>, !fir.ref>>>> @@ -290,7 +290,7 @@ func.func @test_alloc_assign_polymorphic_temp(%lhs: !fir.ref>>) { // CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_1]] : (!fir.class>>) -> !fir.box -// CHECK: %[[VAL_10:.*]] = fir.call @_FortranAAssignTemporary(%[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () func.func @test_allocatable_component(%arg0: !fir.ref>>}>> {fir.bindc_name = "x", fir.target}, %arg1: !fir.ref>>}>> {fir.bindc_name = "y", fir.target}) { %4:2 = hlfir.declare %arg0 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtestEx"} : (!fir.ref>>}>>) -> (!fir.ref>>}>>, !fir.ref>>}>>) @@ -310,7 +310,7 @@ func.func @test_allocatable_component(%arg0: !fir.ref>>}>>>) -> !fir.ref> // CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_6]] : (!fir.box>>}>>) -> !fir.box // CHECK: %[[VAL_12:.*]] = fir.convert %{{.*}} : (!fir.ref>) -> !fir.ref -// CHECK: %[[VAL_13:.*]] = fir.call @_FortranAAssign(%[[VAL_10]], %[[VAL_11]], %[[VAL_12]], %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssign(%[[VAL_10]], %[[VAL_11]], %[[VAL_12]], %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () // CHECK: return // CHECK: } @@ -332,7 +332,7 @@ func.func @test_allocatable_component_temp(%arg0: !fir.ref>>}>>>) -> !fir.ref> // CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_6]] : (!fir.box>>}>>) -> !fir.box // CHECK: %[[VAL_12:.*]] = fir.convert %{{.*}} : (!fir.ref>) -> !fir.ref -// CHECK: %[[VAL_13:.*]] = fir.call @_FortranAAssignTemporary(%[[VAL_10]], %[[VAL_11]], %[[VAL_12]], %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_10]], %[[VAL_11]], %[[VAL_12]], %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () // CHECK: return // CHECK: } @@ -357,7 +357,7 @@ func.func @_QFPtest_scalar_lhs_finalization(%arg0: !fir.ref>>) -> !fir.ref> // CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_5]] : (!fir.box>) -> !fir.box // CHECK: %[[VAL_12:.*]] = fir.convert %{{.*}} : (!fir.ref>) -> !fir.ref -// CHECK: %[[VAL_13:.*]] = fir.call @_FortranAAssign(%[[VAL_10]], %[[VAL_11]], %[[VAL_12]], %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssign(%[[VAL_10]], %[[VAL_11]], %[[VAL_12]], %{{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () // CHECK: return // CHECK: } @@ -423,7 +423,7 @@ func.func @test_upoly_expr_assignment(%arg0: !fir.class> {fir // CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> // CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_12]] : (!fir.class) -> !fir.box // CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_23]] : (!fir.ref>) -> !fir.ref -// CHECK: %[[VAL_29:.*]] = fir.call @_FortranAAssignTemporary(%[[VAL_26]], %[[VAL_27]], %[[VAL_28]], %[[VAL_25]]) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_26]], %[[VAL_27]], %[[VAL_28]], %[[VAL_25]]) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () // CHECK: } // CHECK: return // CHECK: } diff --git a/flang/test/HLFIR/assumed_shape_with_value_keyword.f90 b/flang/test/HLFIR/assumed_shape_with_value_keyword.f90 index feea3712a822c..0f904041b7101 100644 --- a/flang/test/HLFIR/assumed_shape_with_value_keyword.f90 +++ b/flang/test/HLFIR/assumed_shape_with_value_keyword.f90 @@ -140,7 +140,7 @@ subroutine test_optional3(x) ! CHECK: %[[C0_I32:.*]] = arith.constant 0 : i32 ! CHECK: %[[FALSE:.*]] = arith.constant false ! CHECK: %[[FALSE_0:.*]] = arith.constant false -! CHECK: %[[VAL_2:.*]] = fir.call @_FortranAStopStatement(%[[C0_I32]], %[[FALSE]], %[[FALSE]]_0) fastmath : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[C0_I32]], %[[FALSE]], %[[FALSE]]_0) fastmath : (i32, i1, i1) -> () ! CHECK: fir.unreachable ! CHECK: b2: // pred: ^bb0 ! CHECK: return diff --git a/flang/test/HLFIR/boxchar_emboxing.f90 b/flang/test/HLFIR/boxchar_emboxing.f90 index c25a5c283e369..787aa8325a8c8 100644 --- a/flang/test/HLFIR/boxchar_emboxing.f90 +++ b/flang/test/HLFIR/boxchar_emboxing.f90 @@ -22,7 +22,7 @@ ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_10]] : (index) -> i64 ! CHECK: %[[VAL_14:.*]] = arith.constant false ! CHECK: %[[VAL_15:.*]] = arith.constant false -! CHECK: %[[VAL_16:.*]] = fir.call @_FortranAStopStatementText(%[[VAL_12]], %[[VAL_13]], %[[VAL_14]], %[[VAL_15]]) fastmath : (!fir.ref, i64, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatementText(%[[VAL_12]], %[[VAL_13]], %[[VAL_14]], %[[VAL_15]]) fastmath : (!fir.ref, i64, i1, i1) -> () ! CHECK: fir.unreachable ! CHECK: ^bb3: ! CHECK: return @@ -66,7 +66,7 @@ end subroutine test1 ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_12]] : (index) -> i64 ! CHECK: %[[VAL_16:.*]] = arith.constant false ! CHECK: %[[VAL_17:.*]] = arith.constant false -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAStopStatementText(%[[VAL_14]], %[[VAL_15]], %[[VAL_16]], %[[VAL_17]]) fastmath : (!fir.ref, i64, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatementText(%[[VAL_14]], %[[VAL_15]], %[[VAL_16]], %[[VAL_17]]) fastmath : (!fir.ref, i64, i1, i1) -> () ! CHECK: fir.unreachable ! CHECK: ^bb3: ! CHECK: return diff --git a/flang/test/HLFIR/bufferize-destroy-for-derived.fir b/flang/test/HLFIR/bufferize-destroy-for-derived.fir index a3c756682777f..618ebf8028225 100644 --- a/flang/test/HLFIR/bufferize-destroy-for-derived.fir +++ b/flang/test/HLFIR/bufferize-destroy-for-derived.fir @@ -25,7 +25,7 @@ func.func @_QPtest1(%arg0: !fir.box>}>>>, !fir.box>}>>> // CHECK-NEXT: %[[VAL_18:.*]] = fir.box_addr %[[VAL_7]]#0 : (!fir.box>}>>>) -> !fir.heap>}>>> // CHECK-NEXT: %[[VAL_19:.*]] = fir.convert %[[VAL_7]]#0 : (!fir.box>}>>>) -> !fir.box -// CHECK-NEXT: %[[VAL_20:.*]] = fir.call @_FortranADestroyWithoutFinalization(%[[VAL_19]]) : (!fir.box) -> none +// CHECK-NEXT: fir.call @_FortranADestroyWithoutFinalization(%[[VAL_19]]) : (!fir.box) -> () // CHECK-NEXT: fir.freemem %[[VAL_18]] : !fir.heap>}>>> // CHECK-NEXT: return // CHECK-NEXT: } @@ -57,9 +57,9 @@ func.func @_QPtest2(%arg0: !fir.box>}>>>) -> !fir.box // CHECK-NEXT: %[[VAL_23:.*]] = fir.convert %[[VAL_19]] : (!fir.ref>) -> !fir.ref -// CHECK-NEXT: %[[VAL_24:.*]] = fir.call @_FortranAFinalize(%[[VAL_22]], %[[VAL_23]], %[[VAL_21]]) : (!fir.box, !fir.ref, i32) -> none +// CHECK-NEXT: fir.call @_FortranAFinalize(%[[VAL_22]], %[[VAL_23]], %[[VAL_21]]) : (!fir.box, !fir.ref, i32) -> () // CHECK-NEXT: %[[VAL_25:.*]] = fir.convert %[[VAL_7]]#0 : (!fir.box>}>>>) -> !fir.box -// CHECK-NEXT: %[[VAL_26:.*]] = fir.call @_FortranADestroyWithoutFinalization(%[[VAL_25]]) : (!fir.box) -> none +// CHECK-NEXT: fir.call @_FortranADestroyWithoutFinalization(%[[VAL_25]]) : (!fir.box) -> () // CHECK-NEXT: fir.freemem %[[VAL_18]] : !fir.heap>}>>> // CHECK-NEXT: return // CHECK-NEXT: } @@ -91,7 +91,7 @@ func.func @_QPtest3(%arg0: !fir.box>> // CHECK-NEXT: %[[VAL_21:.*]] = arith.constant {{[0-9]*}} : i32 // CHECK-NEXT: %[[VAL_22:.*]] = fir.convert %[[VAL_7]]#0 : (!fir.box>>) -> !fir.box // CHECK-NEXT: %[[VAL_23:.*]] = fir.convert %[[VAL_19]] : (!fir.ref>) -> !fir.ref -// CHECK-NEXT: %[[VAL_24:.*]] = fir.call @_FortranAFinalize(%[[VAL_22]], %[[VAL_23]], %[[VAL_21]]) : (!fir.box, !fir.ref, i32) -> none +// CHECK-NEXT: fir.call @_FortranAFinalize(%[[VAL_22]], %[[VAL_23]], %[[VAL_21]]) : (!fir.box, !fir.ref, i32) -> () // CHECK-NEXT: fir.freemem %[[VAL_18]] : !fir.heap>> // CHECK-NEXT: return // CHECK-NEXT: } diff --git a/flang/test/HLFIR/bufferize-end-associate-for-derived.fir b/flang/test/HLFIR/bufferize-end-associate-for-derived.fir index 089fe574893db..aad297d0b072f 100644 --- a/flang/test/HLFIR/bufferize-end-associate-for-derived.fir +++ b/flang/test/HLFIR/bufferize-end-associate-for-derived.fir @@ -16,7 +16,7 @@ func.func @_QPtest1(%arg0: !fir.box) -> none +// CHECK: fir.call @_FortranADestroyWithoutFinalization(%{{.*}}) : (!fir.box) -> () // CHECK-NOT: fir.call @_Fortran func.func @_QPtest2(%arg0: !fir.box>}>>> {fir.bindc_name = "x"}) { @@ -33,7 +33,7 @@ func.func @_QPtest2(%arg0: !fir.box) -> none +// CHECK: fir.call @_FortranADestroyWithoutFinalization(%{{.*}}) : (!fir.box) -> () // CHECK-NOT: fir.call @_Fortran func.func @_QPtest3(%arg0: !fir.box>> {fir.bindc_name = "x"}) { diff --git a/flang/test/HLFIR/bufferize-poly-expr.fir b/flang/test/HLFIR/bufferize-poly-expr.fir index dfa62a9ac5ab7..49c2347b2b26d 100644 --- a/flang/test/HLFIR/bufferize-poly-expr.fir +++ b/flang/test/HLFIR/bufferize-poly-expr.fir @@ -24,7 +24,7 @@ func.func @test_poly_expr_without_associate() { // CHECK: %[[VAL_9:.*]] = arith.constant 0 : i32 // CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_4]]#1 : (!fir.class>>) -> !fir.box -// CHECK: %[[VAL_12:.*]] = fir.call @_FortranAAllocatableApplyMold(%[[VAL_10]], %[[VAL_11]], %[[VAL_9]]) : (!fir.ref>, !fir.box, i32) -> none +// CHECK: fir.call @_FortranAAllocatableApplyMold(%[[VAL_10]], %[[VAL_11]], %[[VAL_9]]) : (!fir.ref>, !fir.box, i32) -> () // CHECK: hlfir.assign %[[VAL_4]]#0 to %[[VAL_8]]#0 realloc temporary_lhs : !fir.class>>, !fir.ref>>> // CHECK: %[[VAL_8B:.*]] = fir.load %[[VAL_8]]#0 // CHECK: %[[VAL_13:.*]] = fir.undefined tuple>>, i1> @@ -47,7 +47,7 @@ func.func @test_poly_expr_with_associate(%arg1: !fir.class !fir.shape<1> %9:3 = hlfir.associate %6(%8) {uniq_name = ".tmp.assign"} : (!hlfir.expr?>, !fir.shape<1>) -> (!fir.class>>>, !fir.class>>>, i1) %10 = fir.convert %0 : (!fir.ref>>>>) -> !fir.box - %11 = fir.call @_FortranADestroy(%10) fastmath : (!fir.box) -> none + fir.call @_FortranADestroy(%10) fastmath : (!fir.box) -> () %c3 = arith.constant 3 : index %12 = fir.shape %c3 : (index) -> !fir.shape<1> %c1 = arith.constant 1 : index @@ -79,7 +79,7 @@ func.func @test_poly_expr_with_associate(%arg1: !fir.class>>>>) -> !fir.ref> // CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_5]] : (!fir.class>>>) -> !fir.box -// CHECK: %[[VAL_18:.*]] = fir.call @_FortranAAllocatableApplyMold(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) : (!fir.ref>, !fir.box, i32) -> none +// CHECK: fir.call @_FortranAAllocatableApplyMold(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) : (!fir.ref>, !fir.box, i32) -> () // CHECK: hlfir.assign %[[VAL_5]] to %[[VAL_14]]#0 realloc temporary_lhs : !fir.class>>>, !fir.ref>>>> // CHECK: %[[VAL_14B:.*]] = fir.load %[[VAL_14]]#0 // CHECK: %[[VAL_19:.*]] = fir.undefined tuple>>>, i1> @@ -89,7 +89,7 @@ func.func @test_poly_expr_with_associate(%arg1: !fir.class>>>, index) -> (index, index, index) // CHECK: %[[VAL_24:.*]] = fir.shape %[[VAL_23]]#1 : (index) -> !fir.shape<1> // CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>>) -> !fir.box -// CHECK: %[[VAL_28:.*]] = fir.call @_FortranADestroy(%[[VAL_27]]) fastmath : (!fir.box) -> none +// CHECK: fir.call @_FortranADestroy(%[[VAL_27]]) fastmath : (!fir.box) -> () // CHECK: %[[VAL_29:.*]] = arith.constant 3 : index // CHECK: %[[VAL_30:.*]] = fir.shape %[[VAL_29]] : (index) -> !fir.shape<1> // CHECK: %[[VAL_31:.*]] = arith.constant 1 : index diff --git a/flang/test/HLFIR/bufferize01.fir b/flang/test/HLFIR/bufferize01.fir index 02ac6076268af..40e2769e459c1 100644 --- a/flang/test/HLFIR/bufferize01.fir +++ b/flang/test/HLFIR/bufferize01.fir @@ -27,7 +27,7 @@ // CHECK: %[[VAL_15:.*]] = fir.address_of(@_QQclXce30ef70ff16a711a97719fb946c0b3d) : !fir.ref> // CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_6]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_15]] : (!fir.ref>) -> !fir.ref -// CHECK: %[[VAL_18:.*]] = fir.call @_FortranAInitArrayConstructorVector(%[[VAL_14]], %[[VAL_16]], %[[VAL_2]], %[[VAL_1]], %[[VAL_17]], %[[VAL_0]]) fastmath : (!fir.llvm_ptr, !fir.ref>, i1, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAInitArrayConstructorVector(%[[VAL_14]], %[[VAL_16]], %[[VAL_2]], %[[VAL_1]], %[[VAL_17]], %[[VAL_0]]) fastmath : (!fir.llvm_ptr, !fir.ref>, i1, i32, !fir.ref, i32) -> () // CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_10]]#0 : !fir.ref>>> // CHECK: %[[VAL_20:.*]] = fir.box_addr %[[VAL_19]] : (!fir.box>>) -> !fir.heap> // CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_10]]#0 : !fir.ref>>> @@ -43,7 +43,7 @@ // CHECK: %[[VAL_31:.*]]:2 = fir.unboxchar %[[VAL_30]] : (!fir.boxchar<1>) -> (!fir.ref>, index) // CHECK: %[[VAL_32:.*]] = fir.embox %[[VAL_31]]#0 typeparams %[[VAL_29]] : (!fir.ref>, index) -> !fir.box> // CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_32]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_34:.*]] = fir.call @_FortranAPushArrayConstructorValue(%[[VAL_14]], %[[VAL_33]]) fastmath : (!fir.llvm_ptr, !fir.box) -> none +// CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_14]], %[[VAL_33]]) fastmath : (!fir.llvm_ptr, !fir.box) -> () // CHECK: %[[VAL_35:.*]] = fir.load %[[VAL_6]] : !fir.ref>>>> // CHECK: %[[VAL_36:.*]] = fir.undefined tuple>>>, i1> // CHECK: %[[VAL_37:.*]] = fir.insert_value %[[VAL_36]], %[[VAL_2]], [1 : index] : (tuple>>>, i1>, i1) -> tuple>>>, i1> @@ -101,7 +101,7 @@ func.func @_QPtest1() { %10 = fir.address_of(@_QQclXce30ef70ff16a711a97719fb946c0b3d) : !fir.ref> %11 = fir.convert %1 : (!fir.ref>>>>) -> !fir.ref> %12 = fir.convert %10 : (!fir.ref>) -> !fir.ref - %13 = fir.call @_FortranAInitArrayConstructorVector(%9, %11, %true, %c80_i32, %12, %c1_i32) fastmath : (!fir.llvm_ptr, !fir.ref>, i1, i32, !fir.ref, i32) -> none + fir.call @_FortranAInitArrayConstructorVector(%9, %11, %true, %c80_i32, %12, %c1_i32) fastmath : (!fir.llvm_ptr, !fir.ref>, i1, i32, !fir.ref, i32) -> () %14 = fir.load %5#0 : !fir.ref>>> %15 = fir.box_addr %14 : (!fir.box>>) -> !fir.heap> %16 = fir.load %5#0 : !fir.ref>>> @@ -117,7 +117,7 @@ func.func @_QPtest1() { %26:2 = fir.unboxchar %25 : (!fir.boxchar<1>) -> (!fir.ref>, index) %27 = fir.embox %26#0 typeparams %24 : (!fir.ref>, index) -> !fir.box> %28 = fir.convert %27 : (!fir.box>) -> !fir.box - %29 = fir.call @_FortranAPushArrayConstructorValue(%9, %28) fastmath : (!fir.llvm_ptr, !fir.box) -> none + fir.call @_FortranAPushArrayConstructorValue(%9, %28) fastmath : (!fir.llvm_ptr, !fir.box) -> () %30 = fir.load %1 : !fir.ref>>>> %31 = hlfir.as_expr %30 move %true : (!fir.box>>>, i1) -> !hlfir.expr<1x!fir.char<1,?>> %32 = fir.box_elesize %30 : (!fir.box>>>) -> index @@ -137,12 +137,12 @@ func.func @_QPtest1() { hlfir.destroy %31 : !hlfir.expr<1x!fir.char<1,?>> return } -func.func private @_FortranAInitArrayConstructorVector(!fir.llvm_ptr, !fir.ref>, i1, i32, !fir.ref, i32) -> none attributes {fir.runtime} +func.func private @_FortranAInitArrayConstructorVector(!fir.llvm_ptr, !fir.ref>, i1, i32, !fir.ref, i32) -> () attributes {fir.runtime} fir.global linkonce @_QQclXce30ef70ff16a711a97719fb946c0b3d constant : !fir.char<1,1> { %0 = fir.string_lit "\00"(1) : !fir.char<1,1> fir.has_value %0 : !fir.char<1,1> } -func.func private @_FortranAPushArrayConstructorValue(!fir.llvm_ptr, !fir.box) -> none attributes {fir.runtime} +func.func private @_FortranAPushArrayConstructorValue(!fir.llvm_ptr, !fir.box) -> () attributes {fir.runtime} // ----- diff --git a/flang/test/HLFIR/copy-in-out-codegen.fir b/flang/test/HLFIR/copy-in-out-codegen.fir index 8031536550bdf..f4ea36c2244e5 100644 --- a/flang/test/HLFIR/copy-in-out-codegen.fir +++ b/flang/test/HLFIR/copy-in-out-codegen.fir @@ -16,7 +16,7 @@ func.func @test_copy_in(%box: !fir.box>, %temp: !fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_0]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_11:.*]] = fir.call @_FortranACopyInAssign(%[[VAL_8]], %[[VAL_9]], +// CHECK: fir.call @_FortranACopyInAssign(%[[VAL_8]], %[[VAL_9]], // CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_1]] : !fir.ref>>> // CHECK: %[[VAL_13:.*]] = fir.rebox %[[VAL_12]] : (!fir.box>>) -> !fir.box> // CHECK: fir.result %[[VAL_13]] : !fir.box> @@ -42,7 +42,7 @@ func.func @test_copy_in_optional(%box: !fir.box>, %temp: !fir. // CHECK: } else { // CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_0]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_13:.*]] = fir.call @_FortranACopyInAssign(%[[VAL_10]], %[[VAL_11]], +// CHECK: fir.call @_FortranACopyInAssign(%[[VAL_10]], %[[VAL_11]], // CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_1]] : !fir.ref>>> // CHECK: %[[VAL_15:.*]] = fir.rebox %[[VAL_14]] : (!fir.box>>) -> !fir.box> // CHECK: fir.result %[[VAL_15]] : !fir.box> @@ -68,7 +68,7 @@ func.func @test_copy_out_no_copy_back(%temp: !fir.ref>>> // CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>>) -> !fir.ref> -// CHECK: %[[VAL_9:.*]] = fir.call @_FortranACopyOutAssign(%[[VAL_6]], %[[VAL_7]], +// CHECK: fir.call @_FortranACopyOutAssign(%[[VAL_6]], %[[VAL_7]], // CHECK: } // CHECK: return // CHECK: } @@ -86,7 +86,7 @@ func.func @test_copy_out_copy_back(%box: !fir.box>, %temp: !fi // CHECK: fir.store %[[VAL_0]] to %[[VAL_3]] : !fir.ref>> // CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>) -> !fir.ref> // CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>>) -> !fir.ref> -// CHECK: %[[VAL_10:.*]] = fir.call @_FortranACopyOutAssign(%[[VAL_7]], %[[VAL_8]], +// CHECK: fir.call @_FortranACopyOutAssign(%[[VAL_7]], %[[VAL_8]], // CHECK: } // CHECK: return // CHECK: } @@ -105,7 +105,7 @@ func.func @test_copy_in_poly(%poly : !fir.class>>>>) -> !fir.ref> // CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_0]] : (!fir.class>>) -> !fir.box -// CHECK: %[[VAL_11:.*]] = fir.call @_FortranACopyInAssign(%[[VAL_8]], %[[VAL_9]], +// CHECK: fir.call @_FortranACopyInAssign(%[[VAL_8]], %[[VAL_9]], // CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_1]] : !fir.ref>>>> // CHECK: %[[VAL_13:.*]] = fir.rebox %[[VAL_12]] : (!fir.class>>>) -> !fir.class>> // CHECK: fir.result %[[VAL_13]] : !fir.class>> diff --git a/flang/test/HLFIR/count-lowering-default-int-kinds.fir b/flang/test/HLFIR/count-lowering-default-int-kinds.fir index 68bc7fdbaad87..4869ec688c825 100644 --- a/flang/test/HLFIR/count-lowering-default-int-kinds.fir +++ b/flang/test/HLFIR/count-lowering-default-int-kinds.fir @@ -9,7 +9,7 @@ module attributes {fir.defaultkind = "a1c4d8i8l4r4", fir.kindmap = ""} { } // CHECK-LABEL: func.func @test_i8 // CHECK: %[[KIND:.*]] = arith.constant 8 : i32 -// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = ""} { func.func @test_i4(%arg0: !fir.box>> {fir.bindc_name = "x"}, %arg1: i64) -> !hlfir.expr { @@ -19,7 +19,7 @@ module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = ""} { } // CHECK-LABEL: func.func @test_i4 // CHECK: %[[KIND:.*]] = arith.constant 4 : i32 -// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () module attributes {fir.defaultkind = "a1c4d8i2l4r4", fir.kindmap = ""} { func.func @test_i2(%arg0: !fir.box>> {fir.bindc_name = "x"}, %arg1: i64) -> !hlfir.expr { @@ -29,7 +29,7 @@ module attributes {fir.defaultkind = "a1c4d8i2l4r4", fir.kindmap = ""} { } // CHECK-LABEL: func.func @test_i2 // CHECK: %[[KIND:.*]] = arith.constant 2 : i32 -// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () module attributes {fir.defaultkind = "a1c4d8i1l4r4", fir.kindmap = ""} { func.func @test_i1(%arg0: !fir.box>> {fir.bindc_name = "x"}, %arg1: i64) -> !hlfir.expr { @@ -39,4 +39,4 @@ module attributes {fir.defaultkind = "a1c4d8i1l4r4", fir.kindmap = ""} { } // CHECK-LABEL: func.func @test_i1 // CHECK: %[[KIND:.*]] = arith.constant 1 : i32 -// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACountDim(%{{.*}}, %{{.*}}, %{{.*}}, %[[KIND]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () diff --git a/flang/test/HLFIR/count-lowering.fir b/flang/test/HLFIR/count-lowering.fir index c3309724981a3..a314b507d048c 100644 --- a/flang/test/HLFIR/count-lowering.fir +++ b/flang/test/HLFIR/count-lowering.fir @@ -51,7 +51,7 @@ func.func @_QPcount2(%arg0: !fir.box>> {fir.bindc // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]]#1 -// CHECK: %[[NONE:.*]] = fir.call @_FortranACountDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[KIND]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACountDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[KIND]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] @@ -99,7 +99,7 @@ func.func @_QPcount3(%arg0: !fir.ref> {fir.bindc_name = "s"}) // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK_BOX]] : (!fir.box>>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranACountDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[KIND]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) +// CHECK: fir.call @_FortranACountDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[KIND]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] @@ -150,7 +150,7 @@ func.func @_QPcount4(%arg0: !fir.box>> {fir.bindc // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]]#1 -// CHECK: %[[NONE:.*]] = fir.call @_FortranACountDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[KIND]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACountDim(%[[RET_ARG]], %[[MASK_ARG]], %[[DIM]], %[[KIND]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] diff --git a/flang/test/HLFIR/cshift-lowering.fir b/flang/test/HLFIR/cshift-lowering.fir index 386b81c4dbff6..44408d785f682 100644 --- a/flang/test/HLFIR/cshift-lowering.fir +++ b/flang/test/HLFIR/cshift-lowering.fir @@ -25,7 +25,7 @@ func.func @cshift1(%arg0: !fir.box> {fir.bindc_name = "a"}, %a // CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_5]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_6]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_11]] : (i32) -> i64 -// CHECK: %[[VAL_17:.*]] = fir.call @_FortranACshiftVector(%[[VAL_13]], %[[VAL_14]], %[[VAL_15]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACshiftVector(%[[VAL_13]], %[[VAL_14]], %[[VAL_15]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> () // 2d boxed array shift by scalar func.func @cshift2(%arg0: !fir.box> {fir.bindc_name = "a"}, %arg1: i32 {fir.bindc_name = "sh"}) { @@ -53,7 +53,7 @@ func.func @cshift2(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_8]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_12]] : (!fir.box) -> !fir.box // CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_4]] : (index) -> i32 -// CHECK: %[[VAL_19:.*]] = fir.call @_FortranACshift(%[[VAL_14]], %[[VAL_15]], %[[VAL_16]], %[[VAL_17]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACshift(%[[VAL_14]], %[[VAL_15]], %[[VAL_16]], %[[VAL_17]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> () // 2d boxed array shift by boxed array func.func @cshift3(%arg0: !fir.box> {fir.bindc_name = "a"}, %arg1: !fir.box> {fir.bindc_name = "sh"}) { @@ -80,7 +80,7 @@ func.func @cshift3(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_7]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_8]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_4]] : (index) -> i32 -// CHECK: %[[VAL_18:.*]] = fir.call @_FortranACshift(%[[VAL_13]], %[[VAL_14]], %[[VAL_15]], %[[VAL_16]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACshift(%[[VAL_13]], %[[VAL_14]], %[[VAL_15]], %[[VAL_16]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> () // 2d boxed array shift by array expr func.func @cshift4(%arg0: !fir.box> {fir.bindc_name = "a"}, %arg1: !hlfir.expr {fir.bindc_name = "sh"}) { @@ -110,7 +110,7 @@ func.func @cshift4(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_7]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_15]] : (!fir.box>) -> !fir.box // CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_4]] : (index) -> i32 -// CHECK: %[[VAL_22:.*]] = fir.call @_FortranACshift(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_20]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACshift(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_20]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> () // 2d array expr shift by array expr func.func @cshift5(%arg0: !hlfir.expr {fir.bindc_name = "a"}, %arg1: !hlfir.expr {fir.bindc_name = "sh"}) { @@ -144,7 +144,7 @@ func.func @cshift5(%arg0: !hlfir.expr {fir.bindc_name = "a"}, %arg1: !h // CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_15]] : (!fir.box>) -> !fir.box // CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_20]] : (!fir.box>) -> !fir.box // CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_4]] : (index) -> i32 -// CHECK: %[[VAL_27:.*]] = fir.call @_FortranACshift(%[[VAL_22]], %[[VAL_23]], %[[VAL_24]], %[[VAL_25]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACshift(%[[VAL_22]], %[[VAL_23]], %[[VAL_24]], %[[VAL_25]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> () // 2d array expr shift by array expr with explicit dim func.func @cshift6(%arg0: !hlfir.expr {fir.bindc_name = "a"}, %arg1: !hlfir.expr {fir.bindc_name = "sh"}, %dim : i16) { @@ -179,7 +179,7 @@ func.func @cshift6(%arg0: !hlfir.expr {fir.bindc_name = "a"}, %arg1: !h // CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_7]] : (!fir.ref>>>) -> !fir.ref> // CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_17]] : (!fir.box>) -> !fir.box // CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_22]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_28:.*]] = fir.call @_FortranACshift(%[[VAL_24]], %[[VAL_25]], %[[VAL_26]], %[[VAL_15]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACshift(%[[VAL_24]], %[[VAL_25]], %[[VAL_26]], %[[VAL_15]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> () // shift of polymorphic array func.func @cshift7(%arg0: !fir.ref>>>>, %arg1: !fir.ref) { @@ -214,4 +214,4 @@ func.func @cshift7(%arg0: !fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_11]] : (!fir.class>>>) -> !fir.box // CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64 -// CHECK: %[[VAL_21:.*]] = fir.call @_FortranACshiftVector(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACshiftVector(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %{{.*}}, %{{.*}}) : (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> () diff --git a/flang/test/HLFIR/elemental-codegen.fir b/flang/test/HLFIR/elemental-codegen.fir index 0d5f343cb1771..2443217f557f8 100644 --- a/flang/test/HLFIR/elemental-codegen.fir +++ b/flang/test/HLFIR/elemental-codegen.fir @@ -171,20 +171,20 @@ func.func @test_polymorphic(%arg0: !fir.class> {fir.bindc_ // CHECK: %[[RANK:.*]] = arith.constant 2 : i32 // CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_5]]#1 : (!fir.class>) -> !fir.box -// CHECK: %[[VAL_17:.*]] = fir.call @_FortranAAllocatableApplyMold(%[[VAL_15]], %[[VAL_16]], %[[RANK]]) : (!fir.ref>, !fir.box, i32) -> none +// CHECK: fir.call @_FortranAAllocatableApplyMold(%[[VAL_15]], %[[VAL_16]], %[[RANK]]) : (!fir.ref>, !fir.box, i32) -> () // CHECK: %[[VAL_18:.*]] = arith.constant 1 : index // CHECK: %[[VAL_19:.*]] = arith.constant 0 : index // CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_19]] : (index) -> i32 // CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_18]] : (index) -> i64 // CHECK: %[[VAL_23:.*]] = fir.convert %[[EX0]] : (index) -> i64 -// CHECK: %[[VAL_24:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_20]], %[[VAL_21]], %[[VAL_22]], %[[VAL_23]]) : (!fir.ref>, i32, i64, i64) -> none +// CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_20]], %[[VAL_21]], %[[VAL_22]], %[[VAL_23]]) : (!fir.ref>, i32, i64, i64) -> () // CHECK: %[[VAL_25:.*]] = arith.constant 1 : index // CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_25]] : (index) -> i32 // CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_18]] : (index) -> i64 // CHECK: %[[VAL_29:.*]] = fir.convert %[[EX1]] : (index) -> i64 -// CHECK: %[[VAL_30:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_26]], %[[VAL_27]], %[[VAL_28]], %[[VAL_29]]) : (!fir.ref>, i32, i64, i64) -> none +// CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_26]], %[[VAL_27]], %[[VAL_28]], %[[VAL_29]]) : (!fir.ref>, i32, i64, i64) -> () // CHECK: %[[VAL_31:.*]] = fir.address_of(@_QQclX // CHECK: %[[VAL_32:.*]] = arith.constant {{.*}} : index // CHECK: %[[VAL_33:.*]] = arith.constant {{.*}} : i32 @@ -255,20 +255,20 @@ func.func @test_polymorphic_expr(%arg0: !fir.class> {fir.b // CHECK: %[[VAL_15:.*]] = arith.constant 2 : i32 // CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_5]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_6]]#1 : (!fir.class>) -> !fir.box -// CHECK: %[[VAL_18:.*]] = fir.call @_FortranAAllocatableApplyMold(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) : (!fir.ref>, !fir.box, i32) -> none +// CHECK: fir.call @_FortranAAllocatableApplyMold(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) : (!fir.ref>, !fir.box, i32) -> () // CHECK: %[[VAL_19:.*]] = arith.constant 1 : index // CHECK: %[[VAL_20:.*]] = arith.constant 0 : index // CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_5]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_20]] : (index) -> i32 // CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_19]] : (index) -> i64 // CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_2]] : (index) -> i64 -// CHECK: %[[VAL_25:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_21]], %[[VAL_22]], %[[VAL_23]], %[[VAL_24]]) : (!fir.ref>, i32, i64, i64) -> none +// CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_21]], %[[VAL_22]], %[[VAL_23]], %[[VAL_24]]) : (!fir.ref>, i32, i64, i64) -> () // CHECK: %[[VAL_26:.*]] = arith.constant 1 : index // CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_5]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_26]] : (index) -> i32 // CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_19]] : (index) -> i64 // CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_3]] : (index) -> i64 -// CHECK: %[[VAL_31:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_27]], %[[VAL_28]], %[[VAL_29]], %[[VAL_30]]) : (!fir.ref>, i32, i64, i64) -> none +// CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_27]], %[[VAL_28]], %[[VAL_29]], %[[VAL_30]]) : (!fir.ref>, i32, i64, i64) -> () // CHECK: %[[VAL_32:.*]] = fir.address_of(@_QQcl // CHECK: %[[VAL_33:.*]] = arith.constant {{.*}} : index // CHECK: %[[VAL_34:.*]] = arith.constant {{.*}} : i32 @@ -308,20 +308,20 @@ func.func @test_polymorphic_expr(%arg0: !fir.class> {fir.b // CHECK: %[[VAL_64:.*]] = arith.constant 2 : i32 // CHECK: %[[VAL_65:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_66:.*]] = fir.convert %[[VAL_40]] : (!fir.class>>>) -> !fir.box -// CHECK: %[[VAL_67:.*]] = fir.call @_FortranAAllocatableApplyMold(%[[VAL_65]], %[[VAL_66]], %[[VAL_64]]) : (!fir.ref>, !fir.box, i32) -> none +// CHECK: fir.call @_FortranAAllocatableApplyMold(%[[VAL_65]], %[[VAL_66]], %[[VAL_64]]) : (!fir.ref>, !fir.box, i32) -> () // CHECK: %[[VAL_68:.*]] = arith.constant 1 : index // CHECK: %[[VAL_69:.*]] = arith.constant 0 : index // CHECK: %[[VAL_70:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_71:.*]] = fir.convert %[[VAL_69]] : (index) -> i32 // CHECK: %[[VAL_72:.*]] = fir.convert %[[VAL_68]] : (index) -> i64 // CHECK: %[[VAL_73:.*]] = fir.convert %[[VAL_2]] : (index) -> i64 -// CHECK: %[[VAL_74:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_70]], %[[VAL_71]], %[[VAL_72]], %[[VAL_73]]) : (!fir.ref>, i32, i64, i64) -> none +// CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_70]], %[[VAL_71]], %[[VAL_72]], %[[VAL_73]]) : (!fir.ref>, i32, i64, i64) -> () // CHECK: %[[VAL_75:.*]] = arith.constant 1 : index // CHECK: %[[VAL_76:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>>>>) -> !fir.ref> // CHECK: %[[VAL_77:.*]] = fir.convert %[[VAL_75]] : (index) -> i32 // CHECK: %[[VAL_78:.*]] = fir.convert %[[VAL_68]] : (index) -> i64 // CHECK: %[[VAL_79:.*]] = fir.convert %[[VAL_3]] : (index) -> i64 -// CHECK: %[[VAL_80:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_76]], %[[VAL_77]], %[[VAL_78]], %[[VAL_79]]) : (!fir.ref>, i32, i64, i64) -> none +// CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_76]], %[[VAL_77]], %[[VAL_78]], %[[VAL_79]]) : (!fir.ref>, i32, i64, i64) -> () // CHECK: %[[VAL_81:.*]] = fir.address_of(@_QQcl // CHECK: %[[VAL_82:.*]] = arith.constant {{.*}} : index // CHECK: %[[VAL_83:.*]] = arith.constant {{.*}} : i32 diff --git a/flang/test/HLFIR/matmul-lowering.fir b/flang/test/HLFIR/matmul-lowering.fir index fd76db2659516..51a859401bf4a 100644 --- a/flang/test/HLFIR/matmul-lowering.fir +++ b/flang/test/HLFIR/matmul-lowering.fir @@ -29,7 +29,7 @@ func.func @_QPmatmul1(%arg0: !fir.box> {fir.bindc_name = "lh // CHECK: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] : (!fir.ref>>>) -> !fir.ref> // CHECK-DAG: %[[LHS_ARG:.*]] = fir.convert %[[LHS_VAR]]#1 : (!fir.box>) -> !fir.box // CHECK-DAG: %[[RHS_ARG:.*]] = fir.convert %[[RHS_VAR]]#1 : (!fir.box>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAMatmulInteger4Integer4(%[[RET_ARG]], %[[LHS_ARG]], %[[RHS_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) fastmath +// CHECK: fir.call @_FortranAMatmulInteger4Integer4(%[[RET_ARG]], %[[LHS_ARG]], %[[RHS_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) fastmath // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK-DAG: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] @@ -71,7 +71,7 @@ func.func @_QPtest(%arg0: !fir.ref> {fir.bindc_name = "a"}, } // just check that we apply the patterns successfully. The details are checked above // CHECK-LABEL: func.func @_QPtest( -// CHECK: fir.call @_FortranAMatmulReal4Real4({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none -// CHECK: fir.call @_FortranAMatmulReal4Real4({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAMatmulReal4Real4({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () +// CHECK: fir.call @_FortranAMatmulReal4Real4({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () // CHECK: return // CHECK-NEXT: } diff --git a/flang/test/HLFIR/maxloc-lowering.fir b/flang/test/HLFIR/maxloc-lowering.fir index a51c9b483fa05..be52627564c49 100644 --- a/flang/test/HLFIR/maxloc-lowering.fir +++ b/flang/test/HLFIR/maxloc-lowering.fir @@ -28,7 +28,7 @@ func.func @_QPmaxloc1(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V12:.*]] = fir.convert %[[V3]] : (!fir.box) -> !fir.box -// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMaxlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box>>) -> !fir.heap> @@ -72,7 +72,7 @@ func.func @_QPmaxloc2(%arg0: !fir.box> {fir.bindc_name = "a" // CHECK: %[[V11:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V12:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V15:.*]] = fir.convert %[[V6]] : (!fir.box) -> !fir.box -// CHECK-NEXT: %[[V16:.*]] = fir.call @_FortranAMaxlocDim(%[[V11]], %[[V12]], %[[C4]], %[[V5]], {{.*}}, {{.*}}, %[[V15]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMaxlocDim(%[[V11]], %[[V12]], %[[C4]], %[[V5]], {{.*}}, {{.*}}, %[[V15]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V17:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V18:.*]]:3 = fir.box_dims %[[V17]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V19:.*]] = fir.box_addr %[[V17]] : (!fir.box>>) -> !fir.heap> @@ -114,7 +114,7 @@ func.func @_QPmaxloc3(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[V9:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V10:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V13:.*]] = fir.convert %[[V4]] : (!fir.box>) -> !fir.box -// CHECK-NEXT: %[[V14:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V9]], %[[V10]], %[[C4]], {{.*}}, {{.*}}, %[[V13]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMaxlocInteger4(%[[V9]], %[[V10]], %[[C4]], {{.*}}, {{.*}}, %[[V13]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V15:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V16:.*]]:3 = fir.box_dims %[[V15]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V17:.*]] = fir.box_addr %[[V15]] : (!fir.box>>) -> !fir.heap> @@ -155,7 +155,7 @@ func.func @_QPmaxloc4(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V12:.*]] = fir.convert %[[V2]]#1 : (!fir.box>>) -> !fir.box -// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMaxlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box>>) -> !fir.heap> @@ -226,7 +226,7 @@ func.func @_QPmaxloc5(%arg0: !fir.ref> {fir.bindc_name = "s"}) // CHECK: %[[V15:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V16:.*]] = fir.convert %[[V8]] : (!fir.box>) -> !fir.box // CHECK: %[[V19:.*]] = fir.convert %[[V10]] : (!fir.box>) -> !fir.box -// CHECK-NEXT: %[[V20:.*]] = fir.call @_FortranAMaxlocDim(%[[V15]], %[[V16]], %[[C4]], %[[C1]], {{.*}}, {{.*}}, %[[V19]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMaxlocDim(%[[V15]], %[[V16]], %[[C4]], %[[C1]], {{.*}}, {{.*}}, %[[V19]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V21:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V22:.*]]:3 = fir.box_dims %[[V21]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V23:.*]] = fir.box_addr %[[V21]] : (!fir.box>>) -> !fir.heap> @@ -265,7 +265,7 @@ func.func @_QPmaxloc6(%arg0: !fir.box>> {fir.bindc_n // CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box>>) -> !fir.box // CHECK: %[[V12:.*]] = fir.convert %[[V3]] : (!fir.box) -> !fir.box -// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMaxlocCharacter(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMaxlocCharacter(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box>>) -> !fir.heap> @@ -310,7 +310,7 @@ func.func @_QPmaxloc7(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[V10:.*]] = fir.convert %[[V0]] : (!fir.ref>>) -> !fir.ref> // CHECK-NEXT: %[[V11:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V14:.*]] = fir.convert %[[V4]]#1 : (!fir.box>>) -> !fir.box -// CHECK-NEXT: %[[V15:.*]] = fir.call @_FortranAMaxlocDim(%[[V10]], %[[V11]], %[[C4]], %[[V6]], {{.*}}, {{.*}}, %[[V14]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMaxlocDim(%[[V10]], %[[V11]], %[[C4]], %[[V6]], {{.*}}, {{.*}}, %[[V14]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V16:.*]] = fir.load %[[V0]] : !fir.ref>> // CHECK-NEXT: %[[V17:.*]] = fir.box_addr %[[V16]] : (!fir.box>) -> !fir.heap // CHECK-NEXT: %[[V18:.*]] = fir.load %[[V17]] : !fir.heap diff --git a/flang/test/HLFIR/maxval-lowering.fir b/flang/test/HLFIR/maxval-lowering.fir index 5a49ed5273ef8..7e025c41c6aeb 100644 --- a/flang/test/HLFIR/maxval-lowering.fir +++ b/flang/test/HLFIR/maxval-lowering.fir @@ -56,7 +56,7 @@ func.func @_QPmaxval2(%arg0: !fir.box> {fir.bindc_name = "a" // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY]]#1 : (!fir.box>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]] : (!fir.box) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAMaxvalDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranAMaxvalDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] @@ -175,7 +175,7 @@ func.func @_QPmaxval5(%arg0: !fir.ref> {fir.bindc_name = "s"}) // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY_BOX]] : (!fir.box>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK_BOX]] : (!fir.box>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAMaxvalDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranAMaxvalDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () // simple one argument maxval for character func.func @_QPmaxval6(%arg0: !fir.box>> {fir.bindc_name = "a"}, %arg1: !fir.boxchar<1> {fir.bindc_name = "s"}) { @@ -205,7 +205,7 @@ func.func @_QPmaxval6(%arg0: !fir.box>> {fir.bindc_n // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY]]#1 : (!fir.box>>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]] : (!fir.box) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAMaxvalCharacter(%[[RET_ARG]], %[[ARRAY_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranAMaxvalCharacter(%[[RET_ARG]], %[[ARRAY_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32, !fir.box) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_ELESIZE:.*]] = fir.box_elesize %[[RET]] diff --git a/flang/test/HLFIR/minloc-lowering.fir b/flang/test/HLFIR/minloc-lowering.fir index 6f3cbd171445c..76d788812e24c 100644 --- a/flang/test/HLFIR/minloc-lowering.fir +++ b/flang/test/HLFIR/minloc-lowering.fir @@ -28,7 +28,7 @@ func.func @_QPminloc1(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V12:.*]] = fir.convert %[[V3]] : (!fir.box) -> !fir.box -// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMinlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box>>) -> !fir.heap> @@ -72,7 +72,7 @@ func.func @_QPminloc2(%arg0: !fir.box> {fir.bindc_name = "a" // CHECK: %[[V11:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V12:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V15:.*]] = fir.convert %[[V6]] : (!fir.box) -> !fir.box -// CHECK-NEXT: %[[V16:.*]] = fir.call @_FortranAMinlocDim(%[[V11]], %[[V12]], %[[C4]], %[[V5]], {{.*}}, {{.*}}, %[[V15]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMinlocDim(%[[V11]], %[[V12]], %[[C4]], %[[V5]], {{.*}}, {{.*}}, %[[V15]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V17:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V18:.*]]:3 = fir.box_dims %[[V17]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V19:.*]] = fir.box_addr %[[V17]] : (!fir.box>>) -> !fir.heap> @@ -114,7 +114,7 @@ func.func @_QPminloc3(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[V9:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V10:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V13:.*]] = fir.convert %[[V4]] : (!fir.box>) -> !fir.box -// CHECK-NEXT: %[[V14:.*]] = fir.call @_FortranAMinlocInteger4(%[[V9]], %[[V10]], %[[C4]], {{.*}}, {{.*}}, %[[V13]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMinlocInteger4(%[[V9]], %[[V10]], %[[C4]], {{.*}}, {{.*}}, %[[V13]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V15:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V16:.*]]:3 = fir.box_dims %[[V15]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V17:.*]] = fir.box_addr %[[V15]] : (!fir.box>>) -> !fir.heap> @@ -155,7 +155,7 @@ func.func @_QPminloc4(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V12:.*]] = fir.convert %[[V2]]#1 : (!fir.box>>) -> !fir.box -// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMinlocInteger4(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box>>) -> !fir.heap> @@ -226,7 +226,7 @@ func.func @_QPminloc5(%arg0: !fir.ref> {fir.bindc_name = "s"}) // CHECK: %[[V15:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V16:.*]] = fir.convert %[[V8]] : (!fir.box>) -> !fir.box // CHECK: %[[V19:.*]] = fir.convert %[[V10]] : (!fir.box>) -> !fir.box -// CHECK-NEXT: %[[V20:.*]] = fir.call @_FortranAMinlocDim(%[[V15]], %[[V16]], %[[C4]], %[[C1]], {{.*}}, {{.*}}, %[[V19]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMinlocDim(%[[V15]], %[[V16]], %[[C4]], %[[C1]], {{.*}}, {{.*}}, %[[V19]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V21:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V22:.*]]:3 = fir.box_dims %[[V21]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V23:.*]] = fir.box_addr %[[V21]] : (!fir.box>>) -> !fir.heap> @@ -265,7 +265,7 @@ func.func @_QPminloc6(%arg0: !fir.box>> {fir.bindc_n // CHECK: %[[V8:.*]] = fir.convert %[[V0]] : (!fir.ref>>>) -> !fir.ref> // CHECK-NEXT: %[[V9:.*]] = fir.convert %[[V1]]#1 : (!fir.box>>) -> !fir.box // CHECK: %[[V12:.*]] = fir.convert %[[V3]] : (!fir.box) -> !fir.box -// CHECK-NEXT: %[[V13:.*]] = fir.call @_FortranAMinlocCharacter(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMinlocCharacter(%[[V8]], %[[V9]], %[[C4]], {{.*}}, {{.*}}, %[[V12]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V14:.*]] = fir.load %[[V0]] : !fir.ref>>> // CHECK-NEXT: %[[V15:.*]]:3 = fir.box_dims %[[V14]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) // CHECK-NEXT: %[[V16:.*]] = fir.box_addr %[[V14]] : (!fir.box>>) -> !fir.heap> @@ -310,7 +310,7 @@ func.func @_QPminloc7(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK: %[[V10:.*]] = fir.convert %[[V0]] : (!fir.ref>>) -> !fir.ref> // CHECK-NEXT: %[[V11:.*]] = fir.convert %[[V1]]#1 : (!fir.box>) -> !fir.box // CHECK: %[[V14:.*]] = fir.convert %[[V4]]#1 : (!fir.box>>) -> !fir.box -// CHECK-NEXT: %[[V15:.*]] = fir.call @_FortranAMinlocDim(%[[V10]], %[[V11]], %[[C4]], %[[V6]], {{.*}}, {{.*}}, %[[V14]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK-NEXT: fir.call @_FortranAMinlocDim(%[[V10]], %[[V11]], %[[C4]], %[[V6]], {{.*}}, {{.*}}, %[[V14]], %[[FALSE]]) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NEXT: %[[V16:.*]] = fir.load %[[V0]] : !fir.ref>> // CHECK-NEXT: %[[V17:.*]] = fir.box_addr %[[V16]] : (!fir.box>) -> !fir.heap // CHECK-NEXT: %[[V18:.*]] = fir.load %[[V17]] : !fir.heap diff --git a/flang/test/HLFIR/minval-lowering.fir b/flang/test/HLFIR/minval-lowering.fir index d03dec1552309..c9c78e3b2e446 100644 --- a/flang/test/HLFIR/minval-lowering.fir +++ b/flang/test/HLFIR/minval-lowering.fir @@ -56,7 +56,7 @@ func.func @_QPminval2(%arg0: !fir.box> {fir.bindc_name = "a" // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY]]#1 : (!fir.box>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]] : (!fir.box) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAMinvalDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranAMinvalDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] @@ -175,7 +175,7 @@ func.func @_QPminval5(%arg0: !fir.ref> {fir.bindc_name = "s"}) // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY_BOX]] : (!fir.box>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK_BOX]] : (!fir.box>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAMinvalDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranAMinvalDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () // simple one argument minval for character func.func @_QPminval6(%arg0: !fir.box>> {fir.bindc_name = "a"}, %arg1: !fir.boxchar<1> {fir.bindc_name = "s"}) { @@ -205,7 +205,7 @@ func.func @_QPminval6(%arg0: !fir.box>> {fir.bindc_n // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY]]#1 : (!fir.box>>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]] : (!fir.box) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAMinvalCharacter(%[[RET_ARG]], %[[ARRAY_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranAMinvalCharacter(%[[RET_ARG]], %[[ARRAY_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32, !fir.box) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_ELESIZE:.*]] = fir.box_elesize %[[RET]] diff --git a/flang/test/HLFIR/optional_dummy.f90 b/flang/test/HLFIR/optional_dummy.f90 index 8534a414eaaf5..ecb14f60fd7df 100644 --- a/flang/test/HLFIR/optional_dummy.f90 +++ b/flang/test/HLFIR/optional_dummy.f90 @@ -12,7 +12,7 @@ ! CHECK: %[[VAL_3:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_4:.*]] = arith.constant false ! CHECK: %[[VAL_5:.*]] = arith.constant false -! CHECK: %[[VAL_6:.*]] = fir.call @_FortranAStopStatement(%[[VAL_3]], %[[VAL_4]], %[[VAL_5]]) fastmath : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_3]], %[[VAL_4]], %[[VAL_5]]) fastmath : (i32, i1, i1) -> () ! CHECK: fir.unreachable ! CHECK: ^bb2: ! CHECK: cf.br ^bb3 diff --git a/flang/test/HLFIR/order_assignments/lhs-conflicts-codegen.fir b/flang/test/HLFIR/order_assignments/lhs-conflicts-codegen.fir index 45ceb516a6863..ac6c0d89f73fb 100644 --- a/flang/test/HLFIR/order_assignments/lhs-conflicts-codegen.fir +++ b/flang/test/HLFIR/order_assignments/lhs-conflicts-codegen.fir @@ -96,7 +96,7 @@ func.func @save_box_in_stack(%arg0: !fir.box>) { // CHECK: fir.do_loop {{.*}} { // CHECK: %[[VAL_48:.*]] = hlfir.designate %[[VAL_9]]#0 {{.*}} : (!fir.box>, i32, i32, index, !fir.shape<1>) -> !fir.box> // CHECK: %[[VAL_49:.*]] = fir.convert %[[VAL_48]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_50:.*]] = fir.call @_FortranAPushDescriptor(%[[VAL_30]], %[[VAL_49]]) : (!fir.llvm_ptr, !fir.box) -> none +// CHECK: fir.call @_FortranAPushDescriptor(%[[VAL_30]], %[[VAL_49]]) : (!fir.llvm_ptr, !fir.box) -> () // CHECK: } // CHECK: fir.store %{{.*}} to %[[VAL_2]] : !fir.ref // CHECK: fir.do_loop {{.*}} { @@ -104,12 +104,12 @@ func.func @save_box_in_stack(%arg0: !fir.box>) { // CHECK: %[[VAL_61:.*]] = arith.addi %[[VAL_60]], %{{.*}} : i64 // CHECK: fir.store %[[VAL_61]] to %[[VAL_2]] : !fir.ref // CHECK: %[[VAL_62:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>>) -> !fir.ref> -// CHECK: %[[VAL_63:.*]] = fir.call @_FortranADescriptorAt(%[[VAL_30]], %[[VAL_60]], %[[VAL_62]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> none +// CHECK: fir.call @_FortranADescriptorAt(%[[VAL_30]], %[[VAL_60]], %[[VAL_62]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> () // CHECK: %[[VAL_64:.*]] = fir.load %[[VAL_1]] : !fir.ref>>> // CHECK: %[[VAL_65:.*]] = fir.convert %[[VAL_64]] : (!fir.box>>) -> !fir.box> // CHECK: hlfir.assign %{{.*}} to %[[VAL_65]] : i32, !fir.box> // CHECK: } -// CHECK: fir.call @_FortranADestroyDescriptorStack(%[[VAL_30]]) : (!fir.llvm_ptr) -> none +// CHECK: fir.call @_FortranADestroyDescriptorStack(%[[VAL_30]]) : (!fir.llvm_ptr) -> () // Test simplified IR for: // @@ -171,13 +171,13 @@ func.func @test_vector_subscript_overlap(%arg0: !fir.ref>) { // CHECK: %[[VAL_52:.*]] = fir.embox %[[VAL_51]](%[[VAL_48]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> // CHECK: %[[VAL_55:.*]] = fir.convert %[[VAL_52]] : (!fir.box>) -> !fir.box // Save the vector subscripted designator shape. -// CHECK: %[[VAL_56:.*]] = fir.call @_FortranAPushDescriptor({{.*}}, {{.*}}) : (!fir.llvm_ptr, !fir.box) -> none +// CHECK: fir.call @_FortranAPushDescriptor({{.*}}, {{.*}}) : (!fir.llvm_ptr, !fir.box) -> () // CHECK: fir.do_loop {{.*}} { // CHECK: %[[VAL_60:.*]] = hlfir.designate %[[VAL_11]]#0 (%{{.*}}) : (!fir.box>, i64) -> !fir.ref // CHECK: %[[VAL_61:.*]] = fir.embox %[[VAL_60]] : (!fir.ref) -> !fir.box // CHECK: %[[VAL_62:.*]] = fir.convert %[[VAL_61]] : (!fir.box) -> !fir.box // Save the vector subscripted designator element address. -// CHECK: %[[VAL_63:.*]] = fir.call @_FortranAPushDescriptor(%[[VAL_30]], %[[VAL_62]]) : (!fir.llvm_ptr, !fir.box) -> none +// CHECK: fir.call @_FortranAPushDescriptor(%[[VAL_30]], %[[VAL_62]]) : (!fir.llvm_ptr, !fir.box) -> () // CHECK: } // CHECK: } // CHECK: fir.store %{{.*}} to %[[VAL_4]] : !fir.ref @@ -189,7 +189,7 @@ func.func @test_vector_subscript_overlap(%arg0: !fir.ref>) { // CHECK: fir.store %[[VAL_71]] to %[[VAL_2]] : !fir.ref // CHECK: %[[VAL_72:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>>) -> !fir.ref> // Fetch the vector subscripted designator shape to create the elemental loop. -// CHECK: %[[VAL_73:.*]] = fir.call @_FortranADescriptorAt(%[[VAL_37]], %[[VAL_70]], %[[VAL_72]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> none +// CHECK: fir.call @_FortranADescriptorAt(%[[VAL_37]], %[[VAL_70]], %[[VAL_72]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> () // CHECK: %[[VAL_74:.*]] = fir.load %[[VAL_1]] : !fir.ref>>> // CHECK: %[[VAL_75:.*]] = fir.convert %[[VAL_74]] : (!fir.box>>) -> !fir.box> // CHECK: %[[VAL_76:.*]] = arith.constant 0 : index @@ -201,15 +201,15 @@ func.func @test_vector_subscript_overlap(%arg0: !fir.ref>) { // CHECK: fir.store %[[VAL_82]] to %[[VAL_4]] : !fir.ref // CHECK: %[[VAL_83:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>) -> !fir.ref> // Fetch the vector subscripted designator element address. -// CHECK: %[[VAL_84:.*]] = fir.call @_FortranADescriptorAt(%[[VAL_30]], %[[VAL_81]], %[[VAL_83]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> none +// CHECK: fir.call @_FortranADescriptorAt(%[[VAL_30]], %[[VAL_81]], %[[VAL_83]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> () // CHECK: %[[VAL_85:.*]] = fir.load %[[VAL_3]] : !fir.ref>> // CHECK: %[[VAL_86:.*]] = fir.box_addr %[[VAL_85]] : (!fir.box>) -> !fir.ptr // CHECK: %[[VAL_87:.*]] = fir.convert %[[VAL_86]] : (!fir.ptr) -> !fir.ref // CHECK: hlfir.assign %{{.*}} to %[[VAL_87]] : i64, !fir.ref // CHECK: } // CHECK: } -// CHECK: %[[VAL_88:.*]] = fir.call @_FortranADestroyDescriptorStack(%[[VAL_30]]) : (!fir.llvm_ptr) -> none -// CHECK: %[[VAL_89:.*]] = fir.call @_FortranADestroyDescriptorStack(%[[VAL_37]]) : (!fir.llvm_ptr) -> none +// CHECK: fir.call @_FortranADestroyDescriptorStack(%[[VAL_30]]) : (!fir.llvm_ptr) -> () +// CHECK: fir.call @_FortranADestroyDescriptorStack(%[[VAL_37]]) : (!fir.llvm_ptr) -> () func.func private @integer_to_real(!fir.ref, !fir.logical<4>) func.func private @foo(!fir.ref>, index) -> index diff --git a/flang/test/HLFIR/order_assignments/runtime-stack-temp.fir b/flang/test/HLFIR/order_assignments/runtime-stack-temp.fir index aa334c5ac56cf..4c2d416836671 100644 --- a/flang/test/HLFIR/order_assignments/runtime-stack-temp.fir +++ b/flang/test/HLFIR/order_assignments/runtime-stack-temp.fir @@ -89,7 +89,7 @@ func.func @test_runtime_stack(%arg0: !fir.box>, %n: !fir.ref !fir.shape<1> // CHECK: %[[VAL_37:.*]] = hlfir.designate %[[VAL_11]]#0 (%[[VAL_29]]:%[[VAL_30]]:%[[VAL_6]]) shape %[[VAL_36]] : (!fir.box>, index, index, index, !fir.shape<1>) -> !fir.box> // CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_37]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_39:.*]] = fir.call @_FortranAPushValue(%[[VAL_22]], %[[VAL_38]]) : (!fir.llvm_ptr, !fir.box) -> none +// CHECK: fir.call @_FortranAPushValue(%[[VAL_22]], %[[VAL_38]]) : (!fir.llvm_ptr, !fir.box) -> () // CHECK: } // CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_9]] : (i32) -> index // CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_8]] : (i32) -> index @@ -101,7 +101,7 @@ func.func @test_runtime_stack(%arg0: !fir.box>, %n: !fir.ref // CHECK: %[[VAL_47:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> -// CHECK: %[[VAL_48:.*]] = fir.call @_FortranAValueAt(%[[VAL_22]], %[[VAL_45]], %[[VAL_47]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> none +// CHECK: fir.call @_FortranAValueAt(%[[VAL_22]], %[[VAL_45]], %[[VAL_47]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> () // CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_2]] : !fir.ref>>> // CHECK: %[[VAL_50:.*]] = fir.load %[[VAL_10]]#0 : !fir.ref // CHECK: %[[VAL_51:.*]] = arith.addi %[[VAL_44]], %[[VAL_50]] : i32 @@ -116,7 +116,7 @@ func.func @test_runtime_stack(%arg0: !fir.box>, %n: !fir.ref>, index, index, index, !fir.shape<1>) -> !fir.box> // CHECK: hlfir.assign %[[VAL_49]] to %[[VAL_60]] : !fir.box>>, !fir.box> // CHECK: } -// CHECK: %[[VAL_61:.*]] = fir.call @_FortranADestroyValueStack(%[[VAL_22]]) : (!fir.llvm_ptr) -> none +// CHECK: fir.call @_FortranADestroyValueStack(%[[VAL_22]]) : (!fir.llvm_ptr) -> () // CHECK: return // CHECK: } @@ -164,13 +164,13 @@ func.func @_QPdealing_with_i1(%x: !fir.ref>) { // CHECK: fir.store %[[VAL_27]] to %[[VAL_1]] : !fir.ref> // CHECK: %[[VAL_28:.*]] = fir.embox %[[VAL_1]] : (!fir.ref>) -> !fir.box> // CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_28]] : (!fir.box>) -> !fir.box -// CHECK: %[[VAL_30:.*]] = fir.call @_FortranAPushValue(%{{.*}}, %[[VAL_29]]) : (!fir.llvm_ptr, !fir.box) -> none +// CHECK: fir.call @_FortranAPushValue(%{{.*}}, %[[VAL_29]]) : (!fir.llvm_ptr, !fir.box) -> () // CHECK: } // CHECK: } // CHECK: fir.do_loop // CHECK: fir.do_loop // CHECK: %[[VAL_43:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> -// CHECK: %[[VAL_44:.*]] = fir.call @_FortranAValueAt(%{{.*}}, %{{.*}}, %[[VAL_43]]) +// CHECK: fir.call @_FortranAValueAt(%{{.*}}, %{{.*}}, %[[VAL_43]]) // CHECK: %[[VAL_45:.*]] = fir.load %[[VAL_2]] : !fir.ref>>> // CHECK: %[[VAL_46:.*]] = fir.box_addr %[[VAL_45]] : (!fir.box>>) -> !fir.heap> // CHECK: %[[VAL_47:.*]] = fir.load %[[VAL_46]] : !fir.heap> diff --git a/flang/test/HLFIR/order_assignments/user-defined-assignment-finalization.fir b/flang/test/HLFIR/order_assignments/user-defined-assignment-finalization.fir index ae5329a2d2433..fbbc47185757f 100644 --- a/flang/test/HLFIR/order_assignments/user-defined-assignment-finalization.fir +++ b/flang/test/HLFIR/order_assignments/user-defined-assignment-finalization.fir @@ -56,7 +56,7 @@ func.func @_QPtest1() { hlfir.yield %4#0 : !fir.ref>}>> cleanup { %5 = fir.embox %0 : (!fir.ref>}>>) -> !fir.box>}>> %6 = fir.convert %5 : (!fir.box>}>>) -> !fir.box - %7 = fir.call @_FortranADestroy(%6) fastmath : (!fir.box) -> none + fir.call @_FortranADestroy(%6) fastmath : (!fir.box) -> () } } to { hlfir.yield %2#0 : !fir.ref>}>> @@ -86,7 +86,7 @@ func.func @_QPtest1() { // CHECK: hlfir.end_associate %[[VAL_6]]#1, %[[VAL_6]]#2 : !fir.ref>}>>, i1 // CHECK: %[[VAL_11:.*]] = fir.embox %[[VAL_0]] : (!fir.ref>}>>) -> !fir.box>}>> // CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_11]] : (!fir.box>}>>) -> !fir.box -// CHECK: %[[VAL_13:.*]] = fir.call @_FortranADestroy(%[[VAL_12]]) fastmath : (!fir.box) -> none +// CHECK: fir.call @_FortranADestroy(%[[VAL_12]]) fastmath : (!fir.box) -> () // CHECK: return // CHECK: } @@ -115,7 +115,7 @@ func.func @_QPtest2() { hlfir.yield %6#0 : !fir.ref>}>>> cleanup { %7 = fir.embox %0(%2) : (!fir.ref>}>>>, !fir.shape<1>) -> !fir.box>}>>> %8 = fir.convert %7 : (!fir.box>}>>>) -> !fir.box - %9 = fir.call @_FortranADestroy(%8) fastmath : (!fir.box) -> none + fir.call @_FortranADestroy(%8) fastmath : (!fir.box) -> () fir.call @llvm.stackrestore.p0(%4) fastmath : (!fir.ref) -> () } } to { @@ -154,7 +154,7 @@ func.func @_QPtest2() { // CHECK: hlfir.end_associate %[[VAL_9]]#1, %[[VAL_9]]#2 : !fir.ref>}>>>, i1 // CHECK: %[[VAL_18:.*]] = fir.embox %[[VAL_1]](%[[VAL_3]]) : (!fir.ref>}>>>, !fir.shape<1>) -> !fir.box>}>>> // CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_18]] : (!fir.box>}>>>) -> !fir.box -// CHECK: %[[VAL_20:.*]] = fir.call @_FortranADestroy(%[[VAL_19]]) fastmath : (!fir.box) -> none +// CHECK: fir.call @_FortranADestroy(%[[VAL_19]]) fastmath : (!fir.box) -> () // CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_5]]) fastmath : (!fir.ref) -> () // CHECK: return // CHECK: } @@ -201,7 +201,7 @@ func.func @_QPtest3(%arg0: !fir.ref> {fir.bindc_name = "y"}) { hlfir.yield %9#0 : !fir.ref>}>>> cleanup { %10 = fir.embox %0(%2) : (!fir.ref>}>>>, !fir.shape<1>) -> !fir.box>}>>> %11 = fir.convert %10 : (!fir.box>}>>>) -> !fir.box - %12 = fir.call @_FortranADestroy(%11) fastmath : (!fir.box) -> none + fir.call @_FortranADestroy(%11) fastmath : (!fir.box) -> () fir.call @llvm.stackrestore.p0(%7) fastmath : (!fir.ref) -> () } } @@ -254,7 +254,7 @@ func.func @_QPtest3(%arg0: !fir.ref> {fir.bindc_name = "y"}) { // CHECK: %[[VAL_32:.*]] = hlfir.designate %[[VAL_20B]]#0 (%[[VAL_28]]) : (!fir.ref>}>>>, index) -> !fir.ref>}>> // CHECK: %[[VAL_33:.*]] = fir.embox %[[VAL_32]] : (!fir.ref>}>>) -> !fir.box>}>> // CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_33]] : (!fir.box>}>>) -> !fir.box -// CHECK: %[[VAL_35:.*]] = fir.call @_FortranAPushValue(%[[VAL_27]], %[[VAL_34]]) : (!fir.llvm_ptr, !fir.box) -> none +// CHECK: fir.call @_FortranAPushValue(%[[VAL_27]], %[[VAL_34]]) : (!fir.llvm_ptr, !fir.box) -> () // CHECK: } // CHECK: } // CHECK: fir.do_loop %[[VAL_37:.*]] = %{{.*}} to %[[VAL_4]] step %{{.*}} { @@ -266,7 +266,7 @@ func.func @_QPtest3(%arg0: !fir.ref> {fir.bindc_name = "y"}) { // CHECK: %[[VAL_42:.*]] = arith.addi %[[VAL_41]], %{{.*}} : i64 // CHECK: fir.store %[[VAL_42]] to %[[VAL_2]] : !fir.ref // CHECK: %[[VAL_43:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>}>>>>) -> !fir.ref> -// CHECK: %[[VAL_44:.*]] = fir.call @_FortranAValueAt(%[[VAL_27]], %[[VAL_41]], %[[VAL_43]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> none +// CHECK: fir.call @_FortranAValueAt(%[[VAL_27]], %[[VAL_41]], %[[VAL_43]]) : (!fir.llvm_ptr, i64, !fir.ref>) -> () // CHECK: %[[VAL_45:.*]] = fir.load %[[VAL_1]] : !fir.ref>}>>>> // CHECK: %[[VAL_46:.*]] = fir.box_addr %[[VAL_45]] : (!fir.box>}>>>) -> !fir.heap>}>> // CHECK: %[[VAL_47:.*]] = hlfir.designate %[[VAL_8]]#0 (%[[VAL_37]]) : (!fir.ref>}>>>, index) -> !fir.ref>}>> @@ -279,10 +279,10 @@ func.func @_QPtest3(%arg0: !fir.ref> {fir.bindc_name = "y"}) { // CHECK: } // CHECK: } // CHECK: hlfir.end_associate %[[VAL_16]]#1, %[[VAL_16]]#2 : !fir.ref>>, i1 -// CHECK: %[[VAL_53:.*]] = fir.call @_FortranADestroyValueStack(%[[VAL_27]]) : (!fir.llvm_ptr) -> none +// CHECK: fir.call @_FortranADestroyValueStack(%[[VAL_27]]) : (!fir.llvm_ptr) -> () // CHECK: %[[VAL_54:.*]] = fir.embox %[[VAL_5]](%[[VAL_7]]) : (!fir.ref>}>>>, !fir.shape<1>) -> !fir.box>}>>> // CHECK: %[[VAL_55:.*]] = fir.convert %[[VAL_54]] : (!fir.box>}>>>) -> !fir.box -// CHECK: %[[VAL_56:.*]] = fir.call @_FortranADestroy(%[[VAL_55]]) fastmath : (!fir.box) -> none +// CHECK: fir.call @_FortranADestroy(%[[VAL_55]]) fastmath : (!fir.box) -> () // CHECK: fir.call @llvm.stackrestore.p0(%[[VAL_18]]) fastmath : (!fir.ref) -> () // CHECK: return // CHECK: } diff --git a/flang/test/HLFIR/product-lowering.fir b/flang/test/HLFIR/product-lowering.fir index dd3506937cacb..45ae1f7aeaf5a 100644 --- a/flang/test/HLFIR/product-lowering.fir +++ b/flang/test/HLFIR/product-lowering.fir @@ -59,7 +59,7 @@ func.func @_QPproduct2(%arg0: !fir.box> {fir.bindc_name = "a // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY]] // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]] -// CHECK: %[[NONE:.*]] = fir.call @_FortranAProductDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranAProductDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] // CHECK-NEXT: %[[ADDR:.*]] = fir.box_addr %[[RET]] @@ -165,4 +165,4 @@ func.func @_QPproduct5(%arg0: !fir.ref> {fir.bindc_name = "s"} // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY_BOX]] : (!fir.box>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK_BOX]] : (!fir.box>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranAProductDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranAProductDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () diff --git a/flang/test/HLFIR/sum-lowering.fir b/flang/test/HLFIR/sum-lowering.fir index d4a79d278acc4..e34ac487e8f9b 100644 --- a/flang/test/HLFIR/sum-lowering.fir +++ b/flang/test/HLFIR/sum-lowering.fir @@ -56,7 +56,7 @@ func.func @_QPsum2(%arg0: !fir.box> {fir.bindc_name = "a"}, // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY]]#1 : (!fir.box>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK]] : (!fir.box) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranASumDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranASumDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] @@ -175,4 +175,4 @@ func.func @_QPsum5(%arg0: !fir.ref> {fir.bindc_name = "s"}) { // CHECK-DAG: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] // CHECK-DAG: %[[ARRAY_ARG:.*]] = fir.convert %[[ARRAY_BOX]] : (!fir.box>) -> !fir.box // CHECK-DAG: %[[MASK_ARG:.*]] = fir.convert %[[MASK_BOX]] : (!fir.box>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranASumDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +// CHECK: fir.call @_FortranASumDim(%[[RET_ARG]], %[[ARRAY_ARG]], %[[DIM]], %[[LOC_STR:.*]], %[[LOC_N:.*]], %[[MASK_ARG]]) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () diff --git a/flang/test/HLFIR/transpose-lowering.fir b/flang/test/HLFIR/transpose-lowering.fir index 9afe8a058b8b8..76d51c3438282 100644 --- a/flang/test/HLFIR/transpose-lowering.fir +++ b/flang/test/HLFIR/transpose-lowering.fir @@ -33,7 +33,7 @@ func.func @_QPtranspose1(%arg0: !fir.ref> {fir.bindc_name = // CHECK: %[[RET_ARG:.*]] = fir.convert %[[RET_BOX]] : (!fir.ref>>>) // CHECK: %[[M_ARG:.*]] = fir.convert %[[M_BOX]] : (!fir.box>) -> !fir.box -// CHECK: %[[NONE:.*]] = fir.call @_FortranATranspose(%[[RET_ARG]], %[[M_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) +// CHECK: fir.call @_FortranATranspose(%[[RET_ARG]], %[[M_ARG]], %[[LOC_STR:.*]], %[[LOC_N:.*]]) // CHECK: %[[RET:.*]] = fir.load %[[RET_BOX]] // CHECK-DAG: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[RET]] diff --git a/flang/test/Integration/OpenMP/parallel-private-reduction-worstcase.f90 b/flang/test/Integration/OpenMP/parallel-private-reduction-worstcase.f90 index fe3a326702e52..0173847b73235 100644 --- a/flang/test/Integration/OpenMP/parallel-private-reduction-worstcase.f90 +++ b/flang/test/Integration/OpenMP/parallel-private-reduction-worstcase.f90 @@ -96,9 +96,12 @@ subroutine worst_case(a, b, c, d) ! CHECK: omp.region.cont13: ; preds = %omp.private.copy16 ! CHECK-NEXT: %{{.*}} = phi ptr +! CHECK-NEXT: br label %omp.region.after_alloca + +! CHECK: omp.region.after_alloca: ! CHECK-NEXT: br label %omp.par.region -! CHECK: omp.par.region: ; preds = %omp.region.cont13 +! CHECK: omp.par.region: ; preds = %omp.region.after_alloca ! CHECK-NEXT: br label %omp.reduction.init ! CHECK: omp.reduction.init: ; preds = %omp.par.region @@ -232,7 +235,7 @@ subroutine worst_case(a, b, c, d) ! CHECK-NEXT: br label %omp.reduction.cleanup42 ! CHECK: omp.par.region28: ; preds = %omp.par.region27 -! CHECK-NEXT: call {} @_FortranAStopStatement +! CHECK-NEXT: call void @_FortranAStopStatement ! CHECK: omp.reduction.neutral23: ; preds = %omp.reduction.neutral22 ! [source length was zero: finish initializing array] diff --git a/flang/test/Integration/OpenMP/private-global.f90 b/flang/test/Integration/OpenMP/private-global.f90 index 63ac6fbe05ee0..07dbe86e5ec93 100644 --- a/flang/test/Integration/OpenMP/private-global.f90 +++ b/flang/test/Integration/OpenMP/private-global.f90 @@ -34,7 +34,7 @@ program bug ! CHECK : %[[TABLE_BOX_VAL2:.*]] = load { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr %[[TABLE_BOX_ADDR]], align 8 ! CHECK : store { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } %[[TABLE_BOX_VAL2]], ptr %[[TABLE_BOX_ADDR2]], align 8 ! CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %[[TABLE_BOX_ADDR2]], ptr %[[TABLE_BOX_ADDR]], i32 48, i1 false) -! CHECK: %[[VAL_26:.*]] = call {} @_FortranAAssign(ptr %[[TABLE_BOX_ADDR2]], ptr %[[BOXED_FIFTY]], ptr @{{.*}}, i32 9) +! CHECK: call void @_FortranAAssign(ptr %[[TABLE_BOX_ADDR2]], ptr %[[BOXED_FIFTY]], ptr @{{.*}}, i32 9) ! ... ! check that we use the private copy of table for table/=50 ! CHECK: omp.par.region3: diff --git a/flang/test/Lower/CUDA/cuda-allocatable.cuf b/flang/test/Lower/CUDA/cuda-allocatable.cuf index 8b287f859aa76..ed78bec1b8f08 100644 --- a/flang/test/Lower/CUDA/cuda-allocatable.cuf +++ b/flang/test/Lower/CUDA/cuda-allocatable.cuf @@ -80,7 +80,7 @@ end subroutine ! CHECK: fir.embox {{.*}} {allocator_idx = 1 : i32} ! CHECK: %[[BOX_DECL:.*]]:2 = hlfir.declare %[[BOX]] {data_attr = #cuf.cuda, fortran_attrs = #fir.var_attrs, uniq_name = "_QFsub3Ea"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) ! CHECK: %[[PLOG:.*]] = fir.alloca !fir.logical<4> {bindc_name = "plog", uniq_name = "_QFsub3Eplog"} -! CHECK: %[[PLOG_DECL:.*]]:2 = hlfir.declare %5 {uniq_name = "_QFsub3Eplog"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) +! CHECK: %[[PLOG_DECL:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "_QFsub3Eplog"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK-2: fir.call @_FortranAAllocatableSetBounds ! CHECK: %{{.*}} = cuf.allocate %[[BOX_DECL]]#1 : !fir.ref>>> pinned(%[[PLOG_DECL]]#1 : !fir.ref>) {data_attr = #cuf.cuda} -> i32 ! CHECK: fir.if %{{.*}} { @@ -97,7 +97,7 @@ end subroutine ! CHECK-LABEL: func.func @_QPsub4() ! CHECK: %[[BOX:.*]] = cuf.alloc !fir.box>> {bindc_name = "a", data_attr = #cuf.cuda, uniq_name = "_QFsub4Ea"} -> !fir.ref>>> ! CHECK: fir.embox {{.*}} {allocator_idx = 2 : i32} -! CHECK: %[[BOX_DECL:.*]]:2 = hlfir.declare %0 {data_attr = #cuf.cuda, fortran_attrs = #fir.var_attrs, uniq_name = "_QFsub4Ea"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) +! CHECK: %[[BOX_DECL:.*]]:2 = hlfir.declare %{{.*}} {data_attr = #cuf.cuda, fortran_attrs = #fir.var_attrs, uniq_name = "_QFsub4Ea"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) ! CHECK: %[[ISTREAM:.*]] = fir.alloca i32 {bindc_name = "istream", uniq_name = "_QFsub4Eistream"} ! CHECK: %[[ISTREAM_DECL:.*]]:2 = hlfir.declare %[[ISTREAM]] {uniq_name = "_QFsub4Eistream"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.call @_FortranAAllocatableSetBounds @@ -173,7 +173,7 @@ end subroutine ! CHECK: fir.store %[[STAT]] to %[[ISTAT_DECL]]#1 : !fir.ref ! CHECK: %[[ERR_BOX:.*]] = fir.embox %[[ERR_DECL]]#1 : (!fir.ref>) -> !fir.box> -! CHECK: %[[STAT:.*]] = cuf.deallocate %[[BOX_DECL]]#1 : !fir.ref>>> errmsg(%15 : !fir.box>) {data_attr = #cuf.cuda, hasStat} -> i32 +! CHECK: %[[STAT:.*]] = cuf.deallocate %[[BOX_DECL]]#1 : !fir.ref>>> errmsg(%{{.*}} : !fir.box>) {data_attr = #cuf.cuda, hasStat} -> i32 ! CHECK: fir.store %[[STAT]] to %[[ISTAT_DECL]]#1 : !fir.ref ! CHECK: fir.if %{{.*}} { ! CHECK: %{{.*}} = cuf.deallocate %[[BOX_DECL]]#1 : !fir.ref>>> {data_attr = #cuf.cuda} -> i32 diff --git a/flang/test/Lower/HLFIR/array-ctor-as-runtime-temp.f90 b/flang/test/Lower/HLFIR/array-ctor-as-runtime-temp.f90 index 727eff7613e48..704245caf3d6d 100644 --- a/flang/test/Lower/HLFIR/array-ctor-as-runtime-temp.f90 +++ b/flang/test/Lower/HLFIR/array-ctor-as-runtime-temp.f90 @@ -21,7 +21,7 @@ subroutine test_loops() ! CHECK: %[[VAL_11:.*]] = arith.constant 7 : i32 ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_10]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_14:.*]] = fir.call @_FortranAInitArrayConstructorVector(%[[VAL_8]], %[[VAL_12]], %[[VAL_7]], %[[VAL_13]], %[[VAL_11]]) fastmath : (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitArrayConstructorVector(%[[VAL_8]], %[[VAL_12]], %[[VAL_7]], %[[VAL_13]], %[[VAL_11]]) fastmath : (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> () ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i64 ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (i64) -> index ! CHECK: %[[VAL_17:.*]] = fir.call @_QMarrayctorPibar() fastmath : () -> i32 @@ -42,7 +42,7 @@ subroutine test_loops() ! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_31]] : (i64) -> i32 ! CHECK: fir.store %[[VAL_32]] to %[[VAL_0]] : !fir.ref ! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_0]] : (!fir.ref) -> !fir.llvm_ptr -! CHECK: %[[VAL_34:.*]] = fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_8]], %[[VAL_33]]) fastmath : (!fir.llvm_ptr, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_8]], %[[VAL_33]]) fastmath : (!fir.llvm_ptr, !fir.llvm_ptr) -> () ! CHECK: } ! CHECK: } ! CHECK: %[[VAL_35:.*]] = arith.constant true @@ -85,11 +85,11 @@ subroutine test_arrays(a) ! CHECK: %[[VAL_26:.*]] = arith.constant false ! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> -! CHECK: %[[VAL_33:.*]] = fir.call @_FortranAInitArrayConstructorVector(%[[VAL_27]], %[[VAL_31]], %[[VAL_26]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitArrayConstructorVector(%[[VAL_27]], %[[VAL_31]], %[[VAL_26]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> () ! CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_3]]#1 : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_35:.*]] = fir.call @_FortranAPushArrayConstructorValue(%[[VAL_27]], %[[VAL_34]]) {{.*}}: (!fir.llvm_ptr, !fir.box) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_27]], %[[VAL_34]]) {{.*}}: (!fir.llvm_ptr, !fir.box) -> () ! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_3]]#1 : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_37:.*]] = fir.call @_FortranAPushArrayConstructorValue(%[[VAL_27]], %[[VAL_36]]) {{.*}}: (!fir.llvm_ptr, !fir.box) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_27]], %[[VAL_36]]) {{.*}}: (!fir.llvm_ptr, !fir.box) -> () ! CHECK: %[[VAL_38:.*]] = arith.constant true ! CHECK: hlfir.as_expr %[[VAL_24]]#0 move %[[VAL_38]] : (!fir.box>, i1) -> !hlfir.expr @@ -106,13 +106,13 @@ subroutine test_arrays_unpredictable_size() ! CHECK: %[[VAL_9:.*]] = arith.constant false ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>>>) -> !fir.ref> -! CHECK: %[[VAL_16:.*]] = fir.call @_FortranAInitArrayConstructorVector(%[[VAL_10]], %[[VAL_14]], %[[VAL_9]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitArrayConstructorVector(%[[VAL_10]], %[[VAL_14]], %[[VAL_9]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> () ! CHECK: fir.call @_QMarrayctorPrank1() {{.*}}: () -> !fir.box>> -! CHECK: %[[VAL_21:.*]] = fir.call @_FortranAPushArrayConstructorValue(%[[VAL_10]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_10]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> () ! CHECK: fir.call @_QMarrayctorPrank3() {{.*}}: () -> !fir.box>> -! CHECK: %[[VAL_26:.*]] = fir.call @_FortranAPushArrayConstructorValue(%[[VAL_10]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_10]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> () ! CHECK: fir.call @_QMarrayctorPrank1() {{.*}}: () -> !fir.box>> -! CHECK: %[[VAL_31:.*]] = fir.call @_FortranAPushArrayConstructorValue(%[[VAL_10]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_10]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> () ! CHECK: %[[VAL_32:.*]] = arith.constant true ! CHECK: %[[VAL_33:.*]] = fir.load %[[VAL_4]] : !fir.ref>>> ! CHECK: hlfir.as_expr %[[VAL_33]] move %[[VAL_32]] : (!fir.box>>, i1) -> !hlfir.expr diff --git a/flang/test/Lower/HLFIR/array-ctor-character.f90 b/flang/test/Lower/HLFIR/array-ctor-character.f90 index 7cbad5218f588..5538c6763c310 100644 --- a/flang/test/Lower/HLFIR/array-ctor-character.f90 +++ b/flang/test/Lower/HLFIR/array-ctor-character.f90 @@ -52,11 +52,11 @@ subroutine test_dynamic_length() ! CHECK: %[[VAL_15:.*]] = arith.constant true ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>>) -> !fir.ref> -! CHECK: %[[VAL_22:.*]] = fir.call @_FortranAInitArrayConstructorVector(%[[VAL_16]], %[[VAL_20]], %[[VAL_15]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitArrayConstructorVector(%[[VAL_16]], %[[VAL_20]], %[[VAL_15]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> () ! CHECK: fir.call @_QMchararrayctorPchar_pointer( -! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_16]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_16]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> () ! CHECK: fir.call @_QMchararrayctorPchar_pointer( -! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_16]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorValue(%[[VAL_16]], %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box) -> () ! CHECK: %[[VAL_45:.*]] = arith.constant true ! CHECK: %[[VAL_46:.*]] = fir.load %[[VAL_3]] : !fir.ref>>>> ! CHECK: %[[VAL_47:.*]] = hlfir.as_expr %[[VAL_46]] move %[[VAL_45]] : (!fir.box>>>, i1) -> !hlfir.expr<2x!fir.char<1,?>> diff --git a/flang/test/Lower/HLFIR/array-ctor-derived.f90 b/flang/test/Lower/HLFIR/array-ctor-derived.f90 index 22f7fbd72cb59..08e9abd1ec243 100644 --- a/flang/test/Lower/HLFIR/array-ctor-derived.f90 +++ b/flang/test/Lower/HLFIR/array-ctor-derived.f90 @@ -28,11 +28,11 @@ subroutine test_simple(s1, s2) ! CHECK: %[[VAL_11:.*]] = arith.constant false ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>>) -> !fir.ref> -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAInitArrayConstructorVector(%[[VAL_12]], %[[VAL_16]], %[[VAL_11]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitArrayConstructorVector(%[[VAL_12]], %[[VAL_16]], %[[VAL_11]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> () ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_4]]#1 : (!fir.ref>) -> !fir.llvm_ptr -! CHECK: %[[VAL_20:.*]] = fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_12]], %[[VAL_19]]) {{.*}}: (!fir.llvm_ptr, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_12]], %[[VAL_19]]) {{.*}}: (!fir.llvm_ptr, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_5]]#1 : (!fir.ref>) -> !fir.llvm_ptr -! CHECK: %[[VAL_22:.*]] = fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_12]], %[[VAL_21]]) {{.*}}: (!fir.llvm_ptr, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_12]], %[[VAL_21]]) {{.*}}: (!fir.llvm_ptr, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_23:.*]] = arith.constant true ! CHECK: %[[VAL_24:.*]] = hlfir.as_expr %[[VAL_9]]#0 move %[[VAL_23]] : (!fir.heap>>, i1) -> !hlfir.expr<2x!fir.type<_QMtypesTsimple{i:i32,j:i32}>> ! CHECK: fir.call @_QMderivedarrayctorPtakes_simple @@ -56,13 +56,13 @@ subroutine test_with_polymorphic(s1, s2) ! CHECK: %[[VAL_11:.*]] = arith.constant false ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>>) -> !fir.ref> -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAInitArrayConstructorVector(%[[VAL_12]], %[[VAL_16]], %[[VAL_11]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitArrayConstructorVector(%[[VAL_12]], %[[VAL_16]], %[[VAL_11]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> () ! CHECK: %[[VAL_19A:.*]] = fir.box_addr %[[VAL_4]]#1 : (!fir.class>) -> !fir.ref> ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_19A]] : (!fir.ref>) -> !fir.llvm_ptr -! CHECK: %[[VAL_20:.*]] = fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_12]], %[[VAL_19]]) {{.*}}: (!fir.llvm_ptr, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_12]], %[[VAL_19]]) {{.*}}: (!fir.llvm_ptr, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_21A:.*]] = fir.box_addr %[[VAL_5]]#1 : (!fir.class>) -> !fir.ref> ! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_21A]] : (!fir.ref>) -> !fir.llvm_ptr -! CHECK: %[[VAL_22:.*]] = fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_12]], %[[VAL_21]]) {{.*}}: (!fir.llvm_ptr, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_12]], %[[VAL_21]]) {{.*}}: (!fir.llvm_ptr, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_23:.*]] = arith.constant true ! CHECK: %[[VAL_24:.*]] = hlfir.as_expr %[[VAL_9]]#0 move %[[VAL_23]] : (!fir.heap>>, i1) -> !hlfir.expr<2x!fir.type<_QMtypesTsimple{i:i32,j:i32}>> ! CHECK: fir.call @_QMderivedarrayctorPtakes_simple diff --git a/flang/test/Lower/HLFIR/assumed-rank-inquiries-3.f90 b/flang/test/Lower/HLFIR/assumed-rank-inquiries-3.f90 index af89cb833b337..aeb2c124d2628 100644 --- a/flang/test/Lower/HLFIR/assumed-rank-inquiries-3.f90 +++ b/flang/test/Lower/HLFIR/assumed-rank-inquiries-3.f90 @@ -10,7 +10,7 @@ subroutine test_shape(x) ! CHECK: %[[VAL_4:.*]] = arith.constant 4 : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_3:.*]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranAShape(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranAShape(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_12:.*]] = fir.box_rank %[[VAL_3]] : (!fir.box>) -> index ! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_12]] : (index) -> !fir.shape<1> @@ -33,7 +33,7 @@ subroutine test_shape_kind(x) ! CHECK: %[[VAL_4:.*]] = arith.constant 8 : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_3:.*]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranAShape(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranAShape(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_12:.*]] = fir.box_rank %[[VAL_3]] : (!fir.box>) -> index ! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_12]] : (index) -> !fir.shape<1> @@ -49,7 +49,7 @@ subroutine test_shape_2(x) ! CHECK: %[[VAL_5:.*]] = arith.constant 4 : i32 ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_4]] : (!fir.box>>) -> !fir.box -! CHECK: %[[VAL_11:.*]] = fir.call @_FortranAShape(%[[VAL_8]], %[[VAL_9]], %[[VAL_5]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranAShape(%[[VAL_8]], %[[VAL_9]], %[[VAL_5]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_13:.*]] = fir.box_rank %[[VAL_4]] : (!fir.box>>) -> index ! CHECK: %[[VAL_14:.*]] = fir.shape %[[VAL_13]] : (index) -> !fir.shape<1> @@ -65,7 +65,7 @@ subroutine test_lbound(x) ! CHECK: %[[VAL_4:.*]] = arith.constant 4 : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_3:.*]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranALbound(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranALbound(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_12:.*]] = fir.box_rank %[[VAL_3]] : (!fir.box>) -> index ! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_12]] : (index) -> !fir.shape<1> @@ -88,7 +88,7 @@ subroutine test_lbound_kind(x) ! CHECK: %[[VAL_4:.*]] = arith.constant 8 : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_3:.*]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranALbound(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranALbound(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_12:.*]] = fir.box_rank %[[VAL_3]] : (!fir.box>) -> index ! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_12]] : (index) -> !fir.shape<1> @@ -104,7 +104,7 @@ subroutine test_lbound_2(x) ! CHECK: %[[VAL_5:.*]] = arith.constant 4 : i32 ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_4]] : (!fir.box>>) -> !fir.box -! CHECK: %[[VAL_11:.*]] = fir.call @_FortranALbound(%[[VAL_8]], %[[VAL_9]], %[[VAL_5]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranALbound(%[[VAL_8]], %[[VAL_9]], %[[VAL_5]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_13:.*]] = fir.box_rank %[[VAL_4]] : (!fir.box>>) -> index ! CHECK: %[[VAL_14:.*]] = fir.shape %[[VAL_13]] : (index) -> !fir.shape<1> @@ -119,7 +119,7 @@ subroutine test_ubound(x) ! CHECK: %[[VAL_4:.*]] = arith.constant 4 : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_3:.*]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranAUbound(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranAUbound(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_12:.*]] = fir.box_rank %[[VAL_3]] : (!fir.box>) -> index ! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_12]] : (index) -> !fir.shape<1> @@ -142,7 +142,7 @@ subroutine test_ubound_kind(x) ! CHECK: %[[VAL_4:.*]] = arith.constant 8 : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_3:.*]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranAUbound(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranAUbound(%[[VAL_7]], %[[VAL_8]], %[[VAL_4]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_12:.*]] = fir.box_rank %[[VAL_3]] : (!fir.box>) -> index ! CHECK: %[[VAL_13:.*]] = fir.shape %[[VAL_12]] : (index) -> !fir.shape<1> @@ -158,7 +158,7 @@ subroutine test_ubound_2(x) ! CHECK: %[[VAL_5:.*]] = arith.constant 4 : i32 ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.llvm_ptr ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_4]] : (!fir.box>>) -> !fir.box -! CHECK: %[[VAL_11:.*]] = fir.call @_FortranAUbound(%[[VAL_8]], %[[VAL_9]], %[[VAL_5]], %{{.*}}, %{{.*}}) +! CHECK: fir.call @_FortranAUbound(%[[VAL_8]], %[[VAL_9]], %[[VAL_5]], %{{.*}}, %{{.*}}) ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>) -> !fir.ref> ! CHECK: %[[VAL_13:.*]] = fir.box_rank %[[VAL_4]] : (!fir.box>>) -> index ! CHECK: %[[VAL_14:.*]] = fir.shape %[[VAL_13]] : (index) -> !fir.shape<1> @@ -171,7 +171,7 @@ subroutine test_lbound_dim(x) ! CHECK-LABEL: func.func @_QPtest_lbound_dim( ! CHECK: %[[VAL_3:.*]] = arith.constant 2 : i32 ! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_2:.*]]#0 : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_8:.*]] = fir.call @_FortranALboundDim(%[[VAL_6]], %[[VAL_3]], +! CHECK: fir.call @_FortranALboundDim(%[[VAL_6]], %[[VAL_3]], ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i64) -> i32 ! CHECK: %[[VAL_10:.*]]:3 = hlfir.associate %[[VAL_9]] @@ -186,7 +186,7 @@ subroutine test_ubound_dim(x) ! CHECK: %[[VAL_8:.*]] = fir.call @_FortranASizeDim(%[[VAL_6]], %[[VAL_3]], ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i64) -> i32 ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_2]]#0 : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_14:.*]] = fir.call @_FortranALboundDim(%[[VAL_12]], %[[VAL_3]], +! CHECK: fir.call @_FortranALboundDim(%[[VAL_12]], %[[VAL_3]], ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (i64) -> i32 ! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_17:.*]] = arith.subi %[[VAL_15]], %[[VAL_16]] : i32 diff --git a/flang/test/Lower/HLFIR/assumed-rank-inquiries.f90 b/flang/test/Lower/HLFIR/assumed-rank-inquiries.f90 index a94ae7da36593..6a44cbd86e80d 100644 --- a/flang/test/Lower/HLFIR/assumed-rank-inquiries.f90 +++ b/flang/test/Lower/HLFIR/assumed-rank-inquiries.f90 @@ -210,7 +210,7 @@ subroutine c_loc_2(x) ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i64 ! CHECK: %[[VAL_7:.*]] = arith.cmpi eq, %[[VAL_5]], %[[VAL_6]] : i64 ! CHECK: fir.if %[[VAL_7]] { -! CHECK: %[[VAL_13:.*]] = fir.call @_FortranAReportFatalUserError +! CHECK: fir.call @_FortranAReportFatalUserError ! CHECK: } ! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_2]]#0 : !fir.ref>>> ! CHECK: %[[VAL_15:.*]] = fir.box_elesize %[[VAL_14]] : (!fir.class>>) -> i32 diff --git a/flang/test/Lower/HLFIR/cray-pointers.f90 b/flang/test/Lower/HLFIR/cray-pointers.f90 index bb49977dd2227..29b4f7b52ac09 100644 --- a/flang/test/Lower/HLFIR/cray-pointers.f90 +++ b/flang/test/Lower/HLFIR/cray-pointers.f90 @@ -21,7 +21,7 @@ end subroutine test1 ! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref> ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_12]]#0 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_16]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_19:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_17]], %[[VAL_18]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_17]], %[[VAL_18]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_20:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref>>> ! CHECK: %[[VAL_21:.*]] = arith.constant 7 : index ! CHECK: %[[VAL_22:.*]] = hlfir.designate %[[VAL_20]] (%[[VAL_21]]) : (!fir.box>>, index) -> !fir.ref @@ -47,7 +47,7 @@ end subroutine test2 ! CHECK: %[[VAL_28:.*]] = fir.load %[[VAL_27]] : !fir.ref> ! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_24]]#0 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_28]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_31:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_29]], %[[VAL_30]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_29]], %[[VAL_30]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_32:.*]] = fir.load %[[VAL_24]]#0 : !fir.ref>>> ! CHECK: %[[VAL_33:.*]] = arith.constant 7 : index ! CHECK: %[[VAL_34:.*]] = hlfir.designate %[[VAL_32]] (%[[VAL_33]]) : (!fir.box>>, index) -> !fir.ref @@ -75,7 +75,7 @@ end subroutine test3 ! CHECK: %[[VAL_33:.*]] = fir.load %[[VAL_32]] : !fir.ref> ! CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_29]]#0 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_33]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_36:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_34]], %[[VAL_35]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_34]], %[[VAL_35]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_37:.*]] = fir.load %[[VAL_29]]#0 : !fir.ref>>>> ! CHECK: %[[VAL_38:.*]] = arith.constant 7 : index ! CHECK: %[[VAL_39:.*]] = hlfir.designate %[[VAL_37]] (%[[VAL_38]]) typeparams %[[VAL_8]] : (!fir.box>>>, index, index) -> !fir.ref> @@ -103,7 +103,7 @@ end subroutine test4 ! CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_22]] : !fir.ref> ! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_13]]#0 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_23]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_26:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_24]], %[[VAL_25]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_24]], %[[VAL_25]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_27:.*]] = fir.load %[[VAL_13]]#0 : !fir.ref>>> ! CHECK: %[[VAL_28:.*]] = fir.box_addr %[[VAL_27]] : (!fir.box>>) -> !fir.ptr> ! CHECK: %[[VAL_29:.*]] = fir.emboxchar %[[VAL_28]], %[[VAL_8]] : (!fir.ptr>, i32) -> !fir.boxchar<1> @@ -134,7 +134,7 @@ end subroutine test5 ! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_16]] : !fir.ref> ! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_13]]#0 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_17]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_20:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_18]], %[[VAL_19]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_18]], %[[VAL_19]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_13]]#0 : !fir.ref>>>> ! CHECK: %[[VAL_22:.*]] = arith.constant 7 : index ! CHECK: %[[VAL_23:.*]] = hlfir.designate %[[VAL_21]] (%[[VAL_22]]) : (!fir.box>>>, index) -> !fir.ref> @@ -178,7 +178,7 @@ end subroutine test6 ! CHECK: %[[VAL_50:.*]] = fir.load %[[VAL_49]] : !fir.ref> ! CHECK: %[[VAL_51:.*]] = fir.convert %[[VAL_20]]#0 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[VAL_52:.*]] = fir.convert %[[VAL_50]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_53:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_51]], %[[VAL_52]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_51]], %[[VAL_52]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_54:.*]] = fir.load %[[VAL_20]]#0 : !fir.ref>>>> ! CHECK: %[[VAL_55:.*]] = arith.constant 9 : index ! CHECK: %[[VAL_56:.*]] = hlfir.designate %[[VAL_54]] (%[[VAL_55]]) typeparams %[[VAL_11]] : (!fir.box>>>, index, i32) -> !fir.boxchar<1> @@ -187,7 +187,7 @@ end subroutine test6 ! CHECK: %[[VAL_58:.*]] = fir.load %[[VAL_57]] : !fir.ref> ! CHECK: %[[VAL_59:.*]] = fir.convert %[[VAL_46]]#0 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_60:.*]] = fir.convert %[[VAL_58]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_61:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_59]], %[[VAL_60]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_59]], %[[VAL_60]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_62:.*]] = fir.load %[[VAL_46]]#0 : !fir.ref>>> ! CHECK: %[[VAL_63:.*]] = arith.constant 5 : index ! CHECK: %[[VAL_64:.*]] = hlfir.designate %[[VAL_62]] (%[[VAL_63]]) : (!fir.box>>, index) -> !fir.ref @@ -216,7 +216,7 @@ end subroutine test7 ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_14]] : !fir.ref> ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_9]]#0 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_15]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_16]], %[[VAL_17]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_16]], %[[VAL_17]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_9]]#0 : !fir.ref>>> subroutine test8() @@ -238,7 +238,7 @@ end subroutine test8 ! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_10]] : !fir.ref> ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_5]]#0 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_11]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_14:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_12]], %[[VAL_13]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_12]], %[[VAL_13]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_5]]#0 : !fir.ref>>> ! CHECK: %[[VAL_16:.*]] = fir.box_addr %[[VAL_15]] : (!fir.box>>) -> !fir.ptr> ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (!fir.ptr>) -> !fir.ref> @@ -268,7 +268,7 @@ end subroutine test9 ! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_10]] : !fir.ref> ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_5]]#0 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_11]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_14:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_12]], %[[VAL_13]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_12]], %[[VAL_13]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_5]]#0 : !fir.ref>>> ! CHECK: %[[VAL_16:.*]] = hlfir.as_expr %[[VAL_15]] : (!fir.box>>) -> !hlfir.expr ! CHECK: %[[VAL_17:.*]] = arith.constant 0 : index @@ -297,7 +297,7 @@ end subroutine test10 ! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_8]] : !fir.ref> ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_9]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_12:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_10]], %[[VAL_11]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_10]], %[[VAL_11]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref>> ! CHECK: %[[VAL_14:.*]] = fir.box_addr %[[VAL_13]] : (!fir.box>) -> !fir.ptr ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (!fir.ptr) -> !fir.ref @@ -325,7 +325,7 @@ end subroutine test11 ! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_8]] : !fir.ref> ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_9]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_12:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_10]], %[[VAL_11]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_10]], %[[VAL_11]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref>> ! CHECK: %[[VAL_14:.*]] = fir.box_addr %[[VAL_13]] : (!fir.box>) -> !fir.ptr ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_14]] : !fir.ptr @@ -355,7 +355,7 @@ subroutine test_hidden_pointer ! CHECK: %[[VAL_7:.*]] = fir.load %[[VAL_6]] : !fir.ref> ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_7]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_8]], %[[VAL_9]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_8]], %[[VAL_9]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref>> ! CHECK: %[[VAL_12:.*]] = fir.box_addr %[[VAL_11]] : (!fir.box>) -> !fir.ptr ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (!fir.ptr) -> !fir.ref @@ -417,7 +417,7 @@ subroutine internal() ! CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_11]] : !fir.ref> ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_6]]#0 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_12]] : (!fir.ptr) -> !fir.llvm_ptr -! CHECK: %[[VAL_15:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_13]], %[[VAL_14]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPointerAssociateScalar(%[[VAL_13]], %[[VAL_14]]) fastmath : (!fir.ref>, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref>>> ! CHECK: %[[VAL_17:.*]] = fir.box_addr %[[VAL_16]] : (!fir.box>>) -> !fir.ptr> ! CHECK: %[[VAL_18:.*]] = fir.emboxchar %[[VAL_17]], %[[VAL_5]] : (!fir.ptr>, index) -> !fir.boxchar<1> diff --git a/flang/test/Lower/HLFIR/function-return-as-expr.f90 b/flang/test/Lower/HLFIR/function-return-as-expr.f90 index 95a0c090ef043..41c489decf158 100644 --- a/flang/test/Lower/HLFIR/function-return-as-expr.f90 +++ b/flang/test/Lower/HLFIR/function-return-as-expr.f90 @@ -70,7 +70,7 @@ end subroutine test4 ! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = ".tmp.func_result"} : (!fir.class>) -> (!fir.class>, !fir.class>) ! CHECK: hlfir.assign %[[VAL_7]]#0 to %{{.*}}#0 realloc : !fir.class>, !fir.ref>> ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>) -> !fir.box -! CHECK: fir.call @_FortranADestroy(%[[VAL_10]]) fastmath : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[VAL_10]]) fastmath : (!fir.box) -> () subroutine test4b class(*), allocatable :: p(:, :) @@ -85,7 +85,7 @@ end subroutine test4b ! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = ".tmp.func_result"} : (!fir.class>>) -> (!fir.class>>, !fir.class>>) ! CHECK: hlfir.assign %[[VAL_7]]#0 to %{{.*}}#0 realloc : !fir.class>>, !fir.ref>>> ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_0]] : (!fir.ref>>>) -> !fir.box -! CHECK: fir.call @_FortranADestroy(%[[VAL_10]]) fastmath : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[VAL_10]]) fastmath : (!fir.box) -> () subroutine test5 use types diff --git a/flang/test/Lower/HLFIR/ignore-rank-unlimited-polymorphic.f90 b/flang/test/Lower/HLFIR/ignore-rank-unlimited-polymorphic.f90 index c2118432a9813..bb3ce49059027 100644 --- a/flang/test/Lower/HLFIR/ignore-rank-unlimited-polymorphic.f90 +++ b/flang/test/Lower/HLFIR/ignore-rank-unlimited-polymorphic.f90 @@ -100,7 +100,7 @@ end subroutine test_derived_explicit_shape_array ! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_0]] : (index) -> !fir.shape<1> ! CHECK: %[[VAL_5:.*]] = fir.embox %[[VAL_3]]#1(%[[VAL_4]]) : (!fir.ref>}>>>, !fir.shape<1>) -> !fir.box>}>>> ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_5]] : (!fir.box>}>>>) -> !fir.box -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranAInitialize(%[[VAL_8]], %{{.*}}, %{{.*}}) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_8]], %{{.*}}, %{{.*}}) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_11:.*]] = fir.embox %[[VAL_3]]#0(%[[VAL_2]]) : (!fir.ref>}>>>, !fir.shape<1>) -> !fir.box>}>>> ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_11]] : (!fir.box>}>>>) -> !fir.class> ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (!fir.class>) -> !fir.class diff --git a/flang/test/Lower/HLFIR/intentout-allocatable-components.f90 b/flang/test/Lower/HLFIR/intentout-allocatable-components.f90 index 9d4bedbd9be60..647f1c26c8dad 100644 --- a/flang/test/Lower/HLFIR/intentout-allocatable-components.f90 +++ b/flang/test/Lower/HLFIR/intentout-allocatable-components.f90 @@ -13,7 +13,7 @@ subroutine test_intentout_component_deallocate(a) ! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtest_intentout_component_deallocateEa"} ! CHECK: %[[VAL_2:.*]] = fir.embox %[[VAL_1]]#1 : (!fir.ref>}>>) -> !fir.box>}>> ! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (!fir.box>}>>) -> !fir.box -! CHECK: %[[VAL_4:.*]] = fir.call @_FortranADestroy(%[[VAL_3]]) fastmath : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[VAL_3]]) fastmath : (!fir.box) -> () subroutine test_intentout_optional_component_deallocate(a) type :: t @@ -28,5 +28,5 @@ subroutine test_intentout_optional_component_deallocate(a) ! CHECK: fir.if %[[VAL_2]] { ! CHECK: %[[VAL_3:.*]] = fir.embox %[[VAL_1]]#1 : (!fir.ref>}>>) -> !fir.box>}>> ! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (!fir.box>}>>) -> !fir.box -! CHECK: %[[VAL_5:.*]] = fir.call @_FortranADestroy(%[[VAL_4]]) fastmath : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[VAL_4]]) fastmath : (!fir.box) -> () ! CHECK: } diff --git a/flang/test/Lower/HLFIR/internal-procedures-polymorphic.f90 b/flang/test/Lower/HLFIR/internal-procedures-polymorphic.f90 index 5763d84cfd605..ed1d0a954d82f 100644 --- a/flang/test/Lower/HLFIR/internal-procedures-polymorphic.f90 +++ b/flang/test/Lower/HLFIR/internal-procedures-polymorphic.f90 @@ -50,7 +50,7 @@ subroutine internal() ! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_18]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[VAL_21:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 0 : i32 -! CHECK: %[[VAL_23:.*]] = fir.call @_FortranAPointerNullifyDerived(%[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]]) fastmath : (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]]) fastmath : (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: } ! CHECK: fir.call @_QMcaptured_optional_polymorphicFtestPinternal(%[[VAL_7]]) diff --git a/flang/test/Lower/HLFIR/intrinsic-subroutines.f90 b/flang/test/Lower/HLFIR/intrinsic-subroutines.f90 index 216e044ec9cab..2812d124c4b7a 100644 --- a/flang/test/Lower/HLFIR/intrinsic-subroutines.f90 +++ b/flang/test/Lower/HLFIR/intrinsic-subroutines.f90 @@ -81,7 +81,7 @@ program main ! CHECK: %[[VAL_45:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_46:.*]] = arith.constant false ! CHECK: %[[VAL_47:.*]] = arith.constant false -! CHECK: %[[VAL_48:.*]] = fir.call @_FortranAStopStatement(%[[VAL_45]], %[[VAL_46]], %[[VAL_47]]) fastmath : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_45]], %[[VAL_46]], %[[VAL_47]]) fastmath : (i32, i1, i1) -> () ! CHECK: fir.unreachable ! CHECK: ^bb2: ! CHECK: return diff --git a/flang/test/Lower/HLFIR/local-end-of-scope-component-dealloc.f90 b/flang/test/Lower/HLFIR/local-end-of-scope-component-dealloc.f90 index 06884138d28c3..246d18b243974 100644 --- a/flang/test/Lower/HLFIR/local-end-of-scope-component-dealloc.f90 +++ b/flang/test/Lower/HLFIR/local-end-of-scope-component-dealloc.f90 @@ -25,7 +25,7 @@ subroutine test1() type(t1) :: x1 end subroutine test1 ! CHECK-LABEL: func.func @_QPtest1() { -! CHECK-DAG: %[[VAL_10:.*]] = fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_9]] = fir.convert %{{.*}} : (!fir.box>}>>) -> !fir.box subroutine test1b() @@ -35,7 +35,7 @@ subroutine test1b() end block end subroutine test1b ! CHECK-LABEL: func.func @_QPtest1b() { -! CHECK-DAG: %[[VAL_11:.*]] = fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_10]] = fir.convert %{{.*}} : (!fir.box>}>>) -> !fir.box subroutine test2() @@ -43,7 +43,7 @@ subroutine test2() type(t2) :: x2 end subroutine test2 ! CHECK-LABEL: func.func @_QPtest2() { -! CHECK-DAG: %[[VAL_10:.*]] = fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_9]] = fir.convert %{{.*}} : (!fir.box>}>}>>) -> !fir.box subroutine test2b() @@ -53,7 +53,7 @@ subroutine test2b() end block end subroutine test2b ! CHECK-LABEL: func.func @_QPtest2b() { -! CHECK-DAG: %[[VAL_11:.*]] = fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_10]] = fir.convert %{{.*}} : (!fir.box>}>}>>) -> !fir.box subroutine test3() @@ -61,7 +61,7 @@ subroutine test3() type(t3) :: x3 end subroutine test3 ! CHECK-LABEL: func.func @_QPtest3() { -! CHECK-DAG: %[[VAL_10:.*]] = fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_9]] = fir.convert %{{.*}} : (!fir.box>}>}>>) -> !fir.box subroutine test3b() @@ -71,7 +71,7 @@ subroutine test3b() end block end subroutine test3b ! CHECK-LABEL: func.func @_QPtest3b() { -! CHECK-DAG: %[[VAL_11:.*]] = fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_10]] = fir.convert %{{.*}} : (!fir.box>}>}>>) -> !fir.box subroutine test4() @@ -79,7 +79,7 @@ subroutine test4() type(t4) :: x4 end subroutine test4 ! CHECK-LABEL: func.func @_QPtest4() { -! CHECK-DAG: %[[VAL_10:.*]] = fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_9]] = fir.convert %{{.*}} : (!fir.box>}>}>}>>) -> !fir.box subroutine test4b() @@ -89,7 +89,7 @@ subroutine test4b() end block end subroutine test4b ! CHECK-LABEL: func.func @_QPtest4b() { -! CHECK-DAG: %[[VAL_11:.*]] = fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_10]] = fir.convert %{{.*}} : (!fir.box>}>}>}>>) -> !fir.box subroutine test5() @@ -97,7 +97,7 @@ subroutine test5() type(t5) :: x5 end subroutine test5 ! CHECK-LABEL: func.func @_QPtest5() { -! CHECK-DAG: %[[VAL_10:.*]] = fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_9:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_9]] = fir.convert %{{.*}} : (!fir.box>}>}>}>>) -> !fir.box subroutine test5b() @@ -107,5 +107,5 @@ subroutine test5b() end block end subroutine test5b ! CHECK-LABEL: func.func @_QPtest5b() { -! CHECK-DAG: %[[VAL_11:.*]] = fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> none +! CHECK-DAG: fir.call @_FortranADestroy(%[[VAL_10:.*]]) fastmath : (!fir.box) -> () ! CHECK-DAG: %[[VAL_10]] = fir.convert %{{.*}} : (!fir.box>}>}>}>>) -> !fir.box diff --git a/flang/test/Lower/HLFIR/structure-constructor.f90 b/flang/test/Lower/HLFIR/structure-constructor.f90 index ed9ee5d0ac363..3a82145ddf4f6 100644 --- a/flang/test/Lower/HLFIR/structure-constructor.f90 +++ b/flang/test/Lower/HLFIR/structure-constructor.f90 @@ -50,7 +50,7 @@ end subroutine test1 ! CHECK: %[[VAL_11:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_9]] : (!fir.box}>>) -> !fir.box ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_10]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_14:.*]] = fir.call @_FortranAInitialize(%[[VAL_12]], %[[VAL_13]], %[[VAL_11]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_12]], %[[VAL_13]], %[[VAL_11]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_15:.*]] = arith.constant 4 : index ! CHECK: %[[VAL_16:.*]] = hlfir.designate %[[VAL_8]]#0{"c"} typeparams %[[VAL_15]] : (!fir.ref}>>, index) -> !fir.ref> ! CHECK: hlfir.assign %[[VAL_7]]#0 to %[[VAL_16]] temporary_lhs : !fir.ref>, !fir.ref> @@ -78,7 +78,7 @@ end subroutine test2 ! CHECK: %[[VAL_10:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_8]] : (!fir.box}>>) -> !fir.box ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_9]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_13:.*]] = fir.call @_FortranAInitialize(%[[VAL_11]], %[[VAL_12]], %[[VAL_10]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_11]], %[[VAL_12]], %[[VAL_10]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_14:.*]] = arith.constant 10 : index ! CHECK: %[[VAL_15:.*]] = fir.shape %[[VAL_14]] : (index) -> !fir.shape<1> ! CHECK: %[[VAL_16:.*]] = hlfir.designate %[[VAL_7]]#0{"i"} <%[[VAL_15]]> shape %[[VAL_15]] : (!fir.ref}>>, !fir.shape<1>, !fir.shape<1>) -> !fir.ref> @@ -103,7 +103,7 @@ end subroutine test3 ! CHECK: %[[VAL_6:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_5]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_9:.*]] = fir.call @_FortranAInitialize(%[[VAL_7]], %[[VAL_8]], %[[VAL_6]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_7]], %[[VAL_8]], %[[VAL_6]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtest3Ex"} : (!fir.ref>>>, !fir.dscope) -> (!fir.ref>>>, !fir.ref>>>) ! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "ctor.temp"} : (!fir.ref>>}>>) -> (!fir.ref>>}>>, !fir.ref>>}>>) ! CHECK: %[[VAL_12:.*]] = fir.embox %[[VAL_11]]#0 : (!fir.ref>>}>>) -> !fir.box>>}>> @@ -111,7 +111,7 @@ end subroutine test3 ! CHECK: %[[VAL_14:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_12]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_13]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_17:.*]] = fir.call @_FortranAInitialize(%[[VAL_15]], %[[VAL_16]], %[[VAL_14]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_15]], %[[VAL_16]], %[[VAL_14]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_11]]#0{"r"} {fortran_attrs = #fir.var_attrs} : (!fir.ref>>}>>) -> !fir.ref>>> ! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_10]]#0 : !fir.ref>>> ! CHECK: %[[VAL_20:.*]] = arith.constant 0 : index @@ -139,7 +139,7 @@ end subroutine test4 ! CHECK: %[[VAL_6:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.box>>>}>>) -> !fir.box ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_5]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_9:.*]] = fir.call @_FortranAInitialize(%[[VAL_7]], %[[VAL_8]], %[[VAL_6]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_7]], %[[VAL_8]], %[[VAL_6]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_10:.*]] = arith.constant 2 : index ! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_0]] typeparams %[[VAL_10]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtest4Ex"} : (!fir.ref>>>>, index, !fir.dscope) -> (!fir.ref>>>>, !fir.ref>>>>) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "ctor.temp"} : (!fir.ref>>>}>>) -> (!fir.ref>>>}>>, !fir.ref>>>}>>) @@ -148,7 +148,7 @@ end subroutine test4 ! CHECK: %[[VAL_15:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_13]] : (!fir.box>>>}>>) -> !fir.box ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_14]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAInitialize(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_19:.*]] = arith.constant 2 : index ! CHECK: %[[VAL_20:.*]] = hlfir.designate %[[VAL_12]]#0{"c"} typeparams %[[VAL_19]] {fortran_attrs = #fir.var_attrs} : (!fir.ref>>>}>>, index) -> !fir.ref>>>> ! CHECK: %[[VAL_21:.*]] = fir.load %[[VAL_11]]#1 : !fir.ref>>>> @@ -183,7 +183,7 @@ end subroutine test5 ! CHECK: %[[VAL_6:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.box>>>}>>>>}>>) -> !fir.box ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_5]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_9:.*]] = fir.call @_FortranAInitialize(%[[VAL_7]], %[[VAL_8]], %[[VAL_6]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_7]], %[[VAL_8]], %[[VAL_6]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtest5Ex"} : (!fir.ref>>>}>>>>>, !fir.dscope) -> (!fir.ref>>>}>>>>>, !fir.ref>>>}>>>>>) ! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "ctor.temp"} : (!fir.ref>>>}>>>>}>>) -> (!fir.ref>>>}>>>>}>>, !fir.ref>>>}>>>>}>>) ! CHECK: %[[VAL_12:.*]] = fir.embox %[[VAL_11]]#0 : (!fir.ref>>>}>>>>}>>) -> !fir.box>>>}>>>>}>> @@ -191,7 +191,7 @@ end subroutine test5 ! CHECK: %[[VAL_14:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_12]] : (!fir.box>>>}>>>>}>>) -> !fir.box ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_13]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_17:.*]] = fir.call @_FortranAInitialize(%[[VAL_15]], %[[VAL_16]], %[[VAL_14]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_15]], %[[VAL_16]], %[[VAL_14]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_11]]#0{"t5m"} {fortran_attrs = #fir.var_attrs} : (!fir.ref>>>}>>>>}>>) -> !fir.ref>>>}>>>>> ! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_10]]#1 : !fir.ref>>>}>>>>> ! CHECK: %[[VAL_20:.*]] = fir.box_addr %[[VAL_19]] : (!fir.box>>>}>>>>) -> !fir.heap>>>}>>> @@ -234,7 +234,7 @@ end subroutine test6 ! CHECK: %[[VAL_15:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_13]] : (!fir.box>>>}>>>>}>,t6m:!fir.array<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>}>>) -> !fir.box ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_14]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAInitialize(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtest6Ex"} : (!fir.ref>>>}>>>>>, !fir.dscope) -> (!fir.ref>>>}>>>>>, !fir.ref>>>}>>>>>) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "ctor.temp"} : (!fir.ref>>>}>>>>}>,t6m:!fir.array<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>}>>) -> (!fir.ref>>>}>>>>}>,t6m:!fir.array<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>}>>, !fir.ref>>>}>>>>}>,t6m:!fir.array<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>}>>) ! CHECK: %[[VAL_21:.*]] = fir.embox %[[VAL_20]]#0 : (!fir.ref>>>}>>>>}>,t6m:!fir.array<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>}>>) -> !fir.box>>>}>>>>}>,t6m:!fir.array<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>}>> @@ -242,7 +242,7 @@ end subroutine test6 ! CHECK: %[[VAL_23:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_21]] : (!fir.box>>>}>>>>}>,t6m:!fir.array<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>}>>) -> !fir.box ! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_22]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_26:.*]] = fir.call @_FortranAInitialize(%[[VAL_24]], %[[VAL_25]], %[[VAL_23]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_24]], %[[VAL_25]], %[[VAL_23]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_27:.*]] = hlfir.designate %[[VAL_20]]#0{"t5"} : (!fir.ref>>>}>>>>}>,t6m:!fir.array<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>}>>) -> !fir.ref>>>}>>>>}>> ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "ctor.temp"} : (!fir.ref>>>}>>>>}>>) -> (!fir.ref>>>}>>>>}>>, !fir.ref>>>}>>>>}>>) ! CHECK: %[[VAL_29:.*]] = fir.embox %[[VAL_28]]#0 : (!fir.ref>>>}>>>>}>>) -> !fir.box>>>}>>>>}>> @@ -250,7 +250,7 @@ end subroutine test6 ! CHECK: %[[VAL_31:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_29]] : (!fir.box>>>}>>>>}>>) -> !fir.box ! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_30]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_34:.*]] = fir.call @_FortranAInitialize(%[[VAL_32]], %[[VAL_33]], %[[VAL_31]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_32]], %[[VAL_33]], %[[VAL_31]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_35:.*]] = hlfir.designate %[[VAL_28]]#0{"t5m"} {fortran_attrs = #fir.var_attrs} : (!fir.ref>>>}>>>>}>>) -> !fir.ref>>>}>>>>> ! CHECK: %[[VAL_36:.*]] = fir.load %[[VAL_19]]#1 : !fir.ref>>>}>>>>> ! CHECK: %[[VAL_37:.*]] = fir.box_addr %[[VAL_36]] : (!fir.box>>>}>>>>) -> !fir.heap>>>}>>> @@ -277,19 +277,19 @@ end subroutine test6 ! CHECK: %[[VAL_54:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_55:.*]] = fir.convert %[[VAL_4]] : (!fir.ref}>>>>>) -> !fir.ref> ! CHECK: %[[VAL_56:.*]] = fir.convert %[[VAL_53]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_57:.*]] = fir.call @_FortranAInitArrayConstructorVector(%[[VAL_51]], %[[VAL_55]], %[[VAL_50]], %[[VAL_56]], %[[VAL_54]]) fastmath : (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitArrayConstructorVector(%[[VAL_51]], %[[VAL_55]], %[[VAL_50]], %[[VAL_56]], %[[VAL_54]]) fastmath : (!fir.llvm_ptr, !fir.ref>, i1, !fir.ref, i32) -> () ! CHECK: %[[VAL_58:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "ctor.temp"} : (!fir.ref}>>) -> (!fir.ref}>>, !fir.ref}>>) ! CHECK: %[[VAL_59:.*]] = fir.embox %[[VAL_58]]#0 : (!fir.ref}>>) -> !fir.box}>> ! CHECK: %[[VAL_60:.*]] = fir.address_of(@_QQclX{{.*}}) : !fir.ref> ! CHECK: %[[VAL_61:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_62:.*]] = fir.convert %[[VAL_59]] : (!fir.box}>>) -> !fir.box ! CHECK: %[[VAL_63:.*]] = fir.convert %[[VAL_60]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_64:.*]] = fir.call @_FortranAInitialize(%[[VAL_62]], %[[VAL_63]], %[[VAL_61]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_62]], %[[VAL_63]], %[[VAL_61]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_65:.*]] = arith.constant 4 : index ! CHECK: %[[VAL_66:.*]] = hlfir.designate %[[VAL_58]]#0{"c"} typeparams %[[VAL_65]] : (!fir.ref}>>, index) -> !fir.ref> ! CHECK: hlfir.assign %[[VAL_10]]#0 to %[[VAL_66]] temporary_lhs : !fir.ref>, !fir.ref> ! CHECK: %[[VAL_67:.*]] = fir.convert %[[VAL_58]]#1 : (!fir.ref}>>) -> !fir.llvm_ptr -! CHECK: %[[VAL_68:.*]] = fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_51]], %[[VAL_67]]) fastmath : (!fir.llvm_ptr, !fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranAPushArrayConstructorSimpleScalar(%[[VAL_51]], %[[VAL_67]]) fastmath : (!fir.llvm_ptr, !fir.llvm_ptr) -> () ! CHECK: %[[VAL_69:.*]] = arith.constant true ! CHECK: %[[VAL_70:.*]] = hlfir.as_expr %[[VAL_48]]#0 move %[[VAL_69]] : (!fir.heap}>>>, i1) -> !hlfir.expr<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>> ! CHECK: hlfir.assign %[[VAL_70]] to %[[VAL_44]] temporary_lhs : !hlfir.expr<1x!fir.type<_QMtypesTt1{c:!fir.char<1,4>}>>, !fir.ref}>>> @@ -323,14 +323,14 @@ end subroutine test7 ! CHECK: %[[VAL_7:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_5]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_6]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_10:.*]] = fir.call @_FortranAInitialize(%[[VAL_8]], %[[VAL_9]], %[[VAL_7]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_8]], %[[VAL_9]], %[[VAL_7]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "ctor.temp"} : (!fir.ref>>}>>) -> (!fir.ref>>}>>, !fir.ref>>}>>) ! CHECK: %[[VAL_12:.*]] = fir.embox %[[VAL_11]]#0 : (!fir.ref>>}>>) -> !fir.box>>}>> ! CHECK: %[[VAL_13:.*]] = fir.address_of(@_QQclX{{.*}}) : !fir.ref> ! CHECK: %[[VAL_14:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_12]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_13]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_17:.*]] = fir.call @_FortranAInitialize(%[[VAL_15]], %[[VAL_16]], %[[VAL_14]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_15]], %[[VAL_16]], %[[VAL_14]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_11]]#0{"c1"} : (!fir.ref>>}>>) -> !fir.ref ! CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_2]]#0 : !fir.ref ! CHECK: hlfir.assign %[[VAL_19]] to %[[VAL_18]] temporary_lhs : i32, !fir.ref @@ -355,7 +355,7 @@ end subroutine test8 ! CHECK: %[[VAL_5:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_3]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_8:.*]] = fir.call @_FortranAInitialize(%[[VAL_6]], %[[VAL_7]], %[[VAL_5]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_6]], %[[VAL_7]], %[[VAL_5]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_9:.*]] = fir.alloca !fir.box>> {bindc_name = "x", uniq_name = "_QFtest8Ex"} ! CHECK: %[[VAL_10:.*]] = arith.constant 12 : index ! CHECK: %[[VAL_11:.*]] = fir.zero_bits !fir.heap> @@ -368,7 +368,7 @@ end subroutine test8 ! CHECK: %[[VAL_17:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_15]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_16]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_20:.*]] = fir.call @_FortranAInitialize(%[[VAL_18]], %[[VAL_19]], %[[VAL_17]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_18]], %[[VAL_19]], %[[VAL_17]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_21:.*]] = arith.constant 11 : index ! CHECK: %[[VAL_22:.*]] = hlfir.designate %[[VAL_14]]#0{"c"} typeparams %[[VAL_21]] {fortran_attrs = #fir.var_attrs} : (!fir.ref>>}>>, index) -> !fir.ref>>> ! CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_13]]#1 : !fir.ref>>> @@ -404,7 +404,7 @@ end subroutine test9 ! CHECK: %[[VAL_5:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_3]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_8:.*]] = fir.call @_FortranAInitialize(%[[VAL_6]], %[[VAL_7]], %[[VAL_5]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_6]], %[[VAL_7]], %[[VAL_5]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_9:.*]] = arith.constant 12 : index ! CHECK: %[[VAL_10:.*]] = fir.alloca !fir.char<1,12> {bindc_name = "x", uniq_name = "_QFtest9Ex"} ! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] typeparams %[[VAL_9]] {uniq_name = "_QFtest9Ex"} : (!fir.ref>, index) -> (!fir.ref>, !fir.ref>) @@ -414,7 +414,7 @@ end subroutine test9 ! CHECK: %[[VAL_15:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_13]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_14]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAInitialize(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_19:.*]] = arith.constant 11 : index ! CHECK: %[[VAL_20:.*]] = hlfir.designate %[[VAL_12]]#0{"c"} typeparams %[[VAL_19]] {fortran_attrs = #fir.var_attrs} : (!fir.ref>>}>>, index) -> !fir.ref>>> ! CHECK: hlfir.assign %[[VAL_11]]#0 to %[[VAL_20]] realloc keep_lhs_len temporary_lhs : !fir.ref>, !fir.ref>>> @@ -449,7 +449,7 @@ end subroutine test10 ! CHECK: %[[VAL_11:.*]] = arith.constant {{[0-9]*}} : i32 ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_9]] : (!fir.box}>>) -> !fir.box ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_10]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_14:.*]] = fir.call @_FortranAInitialize(%[[VAL_12]], %[[VAL_13]], %[[VAL_11]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_12]], %[[VAL_13]], %[[VAL_11]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_15:.*]] = arith.constant 4 : index ! CHECK: %[[VAL_16:.*]] = hlfir.designate %[[VAL_8]]#0{"c"} typeparams %[[VAL_15]] : (!fir.ref}>>, index) -> !fir.ref> ! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_7]]#0 : !fir.ref>>> diff --git a/flang/test/Lower/Intrinsics/abort.f90 b/flang/test/Lower/Intrinsics/abort.f90 index 942d3c8cd9af6..1b51708cbf7e7 100644 --- a/flang/test/Lower/Intrinsics/abort.f90 +++ b/flang/test/Lower/Intrinsics/abort.f90 @@ -1,7 +1,7 @@ ! RUN: bbc -emit-fir %s -o - | FileCheck %s ! CHECK-LABEL: func.func @_QPabort_test() { -! CHECK: %[[VAL_0:.*]] = fir.call @_FortranAAbort() {{.*}}: () -> none +! CHECK: fir.call @_FortranAAbort() {{.*}}: () -> () ! CHECK: return ! CHECK: } diff --git a/flang/test/Lower/Intrinsics/adjustl.f90 b/flang/test/Lower/Intrinsics/adjustl.f90 index a8d004cd52665..56c93996015f4 100644 --- a/flang/test/Lower/Intrinsics/adjustl.f90 +++ b/flang/test/Lower/Intrinsics/adjustl.f90 @@ -13,7 +13,7 @@ subroutine adjustl_test ! CHECK: %[[r3:.*]] = fir.convert %[[strBox]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[r4:.*]] = fir.convert %[[eBox]] : (!fir.box>) -> !fir.box ! CHECK: %[[r5:.*]] = fir.convert %[[r2]] : (!fir.ref>) -> !fir.ref - ! CHECK: %[[r6:.*]] = fir.call @_FortranAAdjustl(%[[r3]], %[[r4]], %[[r5]], %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAAdjustl(%[[r3]], %[[r4]], %[[r5]], %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () adjust_str = adjustl(adjust_str) end subroutine diff --git a/flang/test/Lower/Intrinsics/adjustr.f90 b/flang/test/Lower/Intrinsics/adjustr.f90 index 07aa08c994586..17c2a1647bb8d 100644 --- a/flang/test/Lower/Intrinsics/adjustr.f90 +++ b/flang/test/Lower/Intrinsics/adjustr.f90 @@ -13,7 +13,7 @@ subroutine adjustr_test ! CHECK: %[[r3:.*]] = fir.convert %[[strBox]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[r4:.*]] = fir.convert %[[eBox]] : (!fir.box>) -> !fir.box ! CHECK: %[[r5:.*]] = fir.convert %[[r2]] : (!fir.ref>) -> !fir.ref - ! CHECK: %[[r6:.*]] = fir.call @_FortranAAdjustr(%[[r3]], %[[r4]], %[[r5]], %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAAdjustr(%[[r3]], %[[r4]], %[[r5]], %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () adjust_str = adjustr(adjust_str) end subroutine diff --git a/flang/test/Lower/Intrinsics/all.f90 b/flang/test/Lower/Intrinsics/all.f90 index 3eb7ea70dfb16..343169f0b10ea 100644 --- a/flang/test/Lower/Intrinsics/all.f90 +++ b/flang/test/Lower/Intrinsics/all.f90 @@ -24,7 +24,7 @@ subroutine all_test2(mask, d, rslt) ! CHECK: %[[a6:.*]] = fir.convert %[[a0:.*]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[a7:.*]] = fir.convert %[[arg0:.*]]: (!fir.box>>) -> !fir.box rslt = all(mask, d) -! CHECK: %[[r1:.*]] = fir.call @_FortranAAllDim(%[[a6:.*]], %[[a7:.*]], %[[a1:.*]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAllDim(%[[a6:.*]], %[[a7:.*]], %[[a1:.*]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: %[[a10:.*]] = fir.load %[[a0:.*]] : !fir.ref>>>> ! CHECK: %[[a12:.*]] = fir.box_addr %[[a10:.*]] : (!fir.box>>>) -> !fir.heap>> ! CHECK: fir.freemem %[[a12:.*]] diff --git a/flang/test/Lower/Intrinsics/any.f90 b/flang/test/Lower/Intrinsics/any.f90 index 1ea22d9706744..e4dc20e3de2f3 100644 --- a/flang/test/Lower/Intrinsics/any.f90 +++ b/flang/test/Lower/Intrinsics/any.f90 @@ -24,7 +24,7 @@ subroutine any_test2(mask, d, rslt) ! CHECK-DAG: %[[a6:.*]] = fir.convert %[[a0:.*]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK-DAG: %[[a7:.*]] = fir.convert %[[arg0:.*]]: (!fir.box>>) -> !fir.box rslt = any(mask, d) -! CHECK: %[[r1:.*]] = fir.call @_FortranAAnyDim(%[[a6:.*]], %[[a7:.*]], %[[a1:.*]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAnyDim(%[[a6:.*]], %[[a7:.*]], %[[a1:.*]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () ! CHECK-DAG: %[[a10:.*]] = fir.load %[[a0:.*]] : !fir.ref>>>> ! CHECK-DAG: %[[a12:.*]] = fir.box_addr %[[a10:.*]] : (!fir.box>>>) -> !fir.heap>> ! CHECK-DAG fir.freemem %[[a12:.*]] diff --git a/flang/test/Lower/Intrinsics/bessel_jn.f90 b/flang/test/Lower/Intrinsics/bessel_jn.f90 index 428733d547d7b..f6ea8d296d576 100644 --- a/flang/test/Lower/Intrinsics/bessel_jn.f90 +++ b/flang/test/Lower/Intrinsics/bessel_jn.f90 @@ -47,22 +47,22 @@ subroutine test_transformational_real4(x, n1, n2, r) ! ALL-DAG: %[[n1eqn2:.*]] = arith.cmpi eq, %[[n1]], %[[n2]] : i32 ! ALL: fir.if %[[xeq0]] { ! ALL: %[[resxeq0:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselJnX0_4(%[[resxeq0]], %[[n1]], %[[n2]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselJnX0_4(%[[resxeq0]], %[[n1]], %[[n2]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-NEXT: fir.if %[[n1ltn2]] { ! ALL-DAG: %[[n2_1:.*]] = arith.subi %[[n2]], %[[one]] : i32 ! ALL-DAG: %[[bn2:.*]] = fir.call @jnf(%[[n2]], %[[x]]) {{.*}} : (i32, f32) -> f32 ! ALL-DAG: %[[bn2_1:.*]] = fir.call @jnf(%[[n2_1]], %[[x]]) {{.*}} : (i32, f32) -> f32 ! ALL-DAG: %[[resn1ltn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselJn_4(%[[resn1ltn2]], %[[n1]], %[[n2]], %[[x]], %[[bn2]], %[[bn2_1]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselJn_4(%[[resn1ltn2]], %[[n1]], %[[n2]], %[[x]], %[[bn2]], %[[bn2_1]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-NEXT: fir.if %[[n1eqn2]] { ! ALL-DAG: %[[bn2:.*]] = fir.call @jnf(%[[n2]], %[[x]]) {{.*}} : (i32, f32) -> f32 ! ALL-DAG: %[[resn1eqn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselJn_4(%[[resn1eqn2]], %[[n1]], %[[n2]], %[[x]], %[[bn2]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselJn_4(%[[resn1eqn2]], %[[n1]], %[[n2]], %[[x]], %[[bn2]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-DAG: %[[resn1gtn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselJn_4(%[[resn1gtn2]], %[[n1]], %[[n2]], %[[x]], %[[zero]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselJn_4(%[[resn1gtn2]], %[[n1]], %[[n2]], %[[x]], %[[zero]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> () ! ALL-NEXT: } ! ALL-NEXT: } ! ALL-NEXT: } @@ -90,22 +90,22 @@ subroutine test_transformational_real8(x, n1, n2, r) ! ALL-DAG: %[[n1eqn2:.*]] = arith.cmpi eq, %[[n1]], %[[n2]] : i32 ! ALL: fir.if %[[xeq0]] { ! ALL: %[[resxeq0:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselJnX0_8(%[[resxeq0]], %[[n1]], %[[n2]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselJnX0_8(%[[resxeq0]], %[[n1]], %[[n2]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-NEXT: fir.if %[[n1ltn2]] { ! ALL-DAG: %[[n2_1:.*]] = arith.subi %[[n2]], %[[one]] : i32 ! ALL-DAG: %[[bn2:.*]] = fir.call @jn(%[[n2]], %[[x]]) {{.*}} : (i32, f64) -> f64 ! ALL-DAG: %[[bn2_1:.*]] = fir.call @jn(%[[n2_1]], %[[x]]) {{.*}} : (i32, f64) -> f64 ! ALL-DAG: %[[resn1ltn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselJn_8(%[[resn1ltn2]], %[[n1]], %[[n2]], %[[x]], %[[bn2]], %[[bn2_1]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselJn_8(%[[resn1ltn2]], %[[n1]], %[[n2]], %[[x]], %[[bn2]], %[[bn2_1]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-NEXT: fir.if %[[n1eqn2]] { ! ALL-DAG: %[[bn2:.*]] = fir.call @jn(%[[n2]], %[[x]]) {{.*}} : (i32, f64) -> f64 ! ALL-DAG: %[[resn1eqn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselJn_8(%[[resn1eqn2]], %[[n1]], %[[n2]], %[[x]], %[[bn2]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselJn_8(%[[resn1eqn2]], %[[n1]], %[[n2]], %[[x]], %[[bn2]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-DAG: %[[resn1gtn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselJn_8(%[[resn1gtn2]], %[[n1]], %[[n2]], %[[x]], %[[zero]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselJn_8(%[[resn1gtn2]], %[[n1]], %[[n2]], %[[x]], %[[zero]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> () ! ALL-NEXT: } ! ALL-NEXT: } ! ALL-NEXT: } diff --git a/flang/test/Lower/Intrinsics/bessel_yn.f90 b/flang/test/Lower/Intrinsics/bessel_yn.f90 index ac77e4db5614d..2097fb9006ff5 100644 --- a/flang/test/Lower/Intrinsics/bessel_yn.f90 +++ b/flang/test/Lower/Intrinsics/bessel_yn.f90 @@ -47,22 +47,22 @@ subroutine test_transformational_real4(x, n1, n2, r) ! ALL-DAG: %[[n1eqn2:.*]] = arith.cmpi eq, %[[n1]], %[[n2]] : i32 ! ALL: fir.if %[[xeq0]] { ! ALL: %[[resxeq0:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselYnX0_4(%[[resxeq0]], %[[n1]], %[[n2]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselYnX0_4(%[[resxeq0]], %[[n1]], %[[n2]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-NEXT: fir.if %[[n1ltn2]] { ! ALL-DAG: %[[n1_1:.*]] = arith.addi %[[n1]], %[[one]] : i32 ! ALL-DAG: %[[bn1:.*]] = fir.call @ynf(%[[n1]], %[[x]]) {{.*}} : (i32, f32) -> f32 ! ALL-DAG: %[[bn1_1:.*]] = fir.call @ynf(%[[n1_1]], %[[x]]) {{.*}} : (i32, f32) -> f32 ! ALL-DAG: %[[resn1ltn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselYn_4(%[[resn1ltn2]], %[[n1]], %[[n2]], %[[x]], %[[bn1]], %[[bn1_1]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselYn_4(%[[resn1ltn2]], %[[n1]], %[[n2]], %[[x]], %[[bn1]], %[[bn1_1]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-NEXT: fir.if %[[n1eqn2]] { ! ALL-DAG: %[[bn1:.*]] = fir.call @ynf(%[[n1]], %[[x]]) {{.*}} : (i32, f32) -> f32 ! ALL-DAG: %[[resn1eqn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselYn_4(%[[resn1eqn2]], %[[n1]], %[[n2]], %[[x]], %[[bn1]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselYn_4(%[[resn1eqn2]], %[[n1]], %[[n2]], %[[x]], %[[bn1]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-DAG: %[[resn1gtn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselYn_4(%[[resn1gtn2]], %[[n1]], %[[n2]], %[[x]], %[[zero]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselYn_4(%[[resn1gtn2]], %[[n1]], %[[n2]], %[[x]], %[[zero]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f32, f32, f32, !fir.ref, i32) -> () ! ALL-NEXT: } ! ALL-NEXT: } ! ALL-NEXT: } @@ -90,22 +90,22 @@ subroutine test_transformational_real8(x, n1, n2, r) ! ALL-DAG: %[[n1eqn2:.*]] = arith.cmpi eq, %[[n1]], %[[n2]] : i32 ! ALL: fir.if %[[xeq0]] { ! ALL: %[[resxeq0:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselYnX0_8(%[[resxeq0]], %[[n1]], %[[n2]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselYnX0_8(%[[resxeq0]], %[[n1]], %[[n2]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-NEXT: fir.if %[[n1ltn2]] { ! ALL-DAG: %[[n1_1:.*]] = arith.addi %[[n1]], %[[one]] : i32 ! ALL-DAG: %[[bn1:.*]] = fir.call @yn(%[[n1]], %[[x]]) {{.*}} : (i32, f64) -> f64 ! ALL-DAG: %[[bn1_1:.*]] = fir.call @yn(%[[n1_1]], %[[x]]) {{.*}} : (i32, f64) -> f64 ! ALL-DAG: %[[resn1ltn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselYn_8(%[[resn1ltn2]], %[[n1]], %[[n2]], %[[x]], %[[bn1]], %[[bn1_1]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselYn_8(%[[resn1ltn2]], %[[n1]], %[[n2]], %[[x]], %[[bn1]], %[[bn1_1]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-NEXT: fir.if %[[n1eqn2]] { ! ALL-DAG: %[[bn1:.*]] = fir.call @yn(%[[n1]], %[[x]]) {{.*}} : (i32, f64) -> f64 ! ALL-DAG: %[[resn1eqn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselYn_8(%[[resn1eqn2]], %[[n1]], %[[n2]], %[[x]], %[[bn1]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselYn_8(%[[resn1eqn2]], %[[n1]], %[[n2]], %[[x]], %[[bn1]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> () ! ALL-NEXT: } else { ! ALL-DAG: %[[resn1gtn2:.*]] = fir.convert %[[r]] {{.*}} - ! ALL: fir.call @_FortranABesselYn_8(%[[resn1gtn2]], %[[n1]], %[[n2]], %[[x]], %[[zero]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> none + ! ALL: fir.call @_FortranABesselYn_8(%[[resn1gtn2]], %[[n1]], %[[n2]], %[[x]], %[[zero]], %[[zero]], {{.*}}, {{.*}}) {{.*}} : (!fir.ref>, i32, i32, f64, f64, f64, !fir.ref, i32) -> () ! ALL-NEXT: } ! ALL-NEXT: } ! ALL-NEXT: } diff --git a/flang/test/Lower/Intrinsics/count.f90 b/flang/test/Lower/Intrinsics/count.f90 index 1eef676e79244..c3efe6b4bf077 100644 --- a/flang/test/Lower/Intrinsics/count.f90 +++ b/flang/test/Lower/Intrinsics/count.f90 @@ -24,7 +24,7 @@ subroutine test_count2(rslt, mask) ! CHECK: %[[a6:.*]] = fir.convert %[[arg1]] : (!fir.box>>) -> !fir.box ! CHECK: %[[a7:.*]] = fir.convert %[[c4]] : (index) -> i32 rslt = count(mask, dim=1) - ! CHECK: %{{.*}} = fir.call @_FortranACountDim(%[[a5]], %[[a6]], %[[c1_i32]], %[[a7]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranACountDim(%[[a5]], %[[a6]], %[[c1_i32]], %[[a7]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () ! CHECK: %[[a10:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK: %[[a12:.*]] = fir.box_addr %[[a10]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[a12]] diff --git a/flang/test/Lower/Intrinsics/date_and_time.f90 b/flang/test/Lower/Intrinsics/date_and_time.f90 index 900880f778447..55b1383766cb8 100644 --- a/flang/test/Lower/Intrinsics/date_and_time.f90 +++ b/flang/test/Lower/Intrinsics/date_and_time.f90 @@ -15,7 +15,7 @@ subroutine date_and_time_test(date, time, zone, values) ! CHECK: %[[zoneBuffer:.*]] = fir.convert %[[zoneUnbox]]#0 : (!fir.ref>) -> !fir.ref ! CHECK: %[[zoneLen:.*]] = fir.convert %[[zoneUnbox]]#1 : (index) -> i64 ! CHECK: %[[valuesCast:.*]] = fir.convert %[[values]] : (!fir.box>) -> !fir.box - ! CHECK: fir.call @_FortranADateAndTime(%[[dateBuffer]], %[[dateLen]], %[[timeBuffer]], %[[timeLen]], %[[zoneBuffer]], %[[zoneLen]], %{{.*}}, %{{.*}}, %[[valuesCast]]) {{.*}}: (!fir.ref, i64, !fir.ref, i64, !fir.ref, i64, !fir.ref, i32, !fir.box) -> none + ! CHECK: fir.call @_FortranADateAndTime(%[[dateBuffer]], %[[dateLen]], %[[timeBuffer]], %[[timeLen]], %[[zoneBuffer]], %[[zoneLen]], %{{.*}}, %{{.*}}, %[[valuesCast]]) {{.*}}: (!fir.ref, i64, !fir.ref, i64, !fir.ref, i64, !fir.ref, i32, !fir.box) -> () call date_and_time(date, time, zone, values) end subroutine @@ -31,7 +31,7 @@ subroutine date_and_time_test2(date) ! CHECK: %[[timeLen:.*]] = fir.convert %c0{{.*}} : (index) -> i64 ! CHECK: %[[zoneBuffer:.*]] = fir.convert %c0{{.*}} : (index) -> !fir.ref ! CHECK: %[[zoneLen:.*]] = fir.convert %c0{{.*}} : (index) -> i64 - ! CHECK: fir.call @_FortranADateAndTime(%[[dateBuffer]], %[[dateLen]], %[[timeBuffer]], %[[timeLen]], %[[zoneBuffer]], %[[zoneLen]], %{{.*}}, %{{.*}}, %[[values]]) {{.*}}: (!fir.ref, i64, !fir.ref, i64, !fir.ref, i64, !fir.ref, i32, !fir.box) -> none + ! CHECK: fir.call @_FortranADateAndTime(%[[dateBuffer]], %[[dateLen]], %[[timeBuffer]], %[[timeLen]], %[[zoneBuffer]], %[[zoneLen]], %{{.*}}, %{{.*}}, %[[values]]) {{.*}}: (!fir.ref, i64, !fir.ref, i64, !fir.ref, i64, !fir.ref, i32, !fir.box) -> () call date_and_time(date) end subroutine @@ -69,5 +69,5 @@ subroutine date_and_time_dynamic_optional(date, time, zone, values) ! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_5]]#0 : (!fir.ref>) -> !fir.ref ! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_5]]#1 : (index) -> i64 ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_16]] : (!fir.box>>) -> !fir.box - ! CHECK: %[[VAL_28:.*]] = fir.call @_FortranADateAndTime(%[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]], %[[VAL_23]], %[[VAL_24]], %{{.*}}, %{{.*}}, %[[VAL_26]]) {{.*}}: (!fir.ref, i64, !fir.ref, i64, !fir.ref, i64, !fir.ref, i32, !fir.box) -> none + ! CHECK: fir.call @_FortranADateAndTime(%[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]], %[[VAL_23]], %[[VAL_24]], %{{.*}}, %{{.*}}, %[[VAL_26]]) {{.*}}: (!fir.ref, i64, !fir.ref, i64, !fir.ref, i64, !fir.ref, i32, !fir.box) -> () end subroutine diff --git a/flang/test/Lower/Intrinsics/dot_product.f90 b/flang/test/Lower/Intrinsics/dot_product.f90 index e67e9d598cd84..9a825c4b9acf1 100644 --- a/flang/test/Lower/Intrinsics/dot_product.f90 +++ b/flang/test/Lower/Intrinsics/dot_product.f90 @@ -168,7 +168,7 @@ subroutine dot_prod_complex_default (x, y, z) ! CHECK-DAG: %[[res:.*]] = fir.alloca complex ! CHECK-DAG: %[[x_conv:.*]] = fir.convert %[[x]] : (!fir.box>>) -> !fir.box ! CHECK-DAG: %[[y_conv:.*]] = fir.convert %[[y]] : (!fir.box>>) -> !fir.box - ! CHECK-DAG: fir.call @_FortranACppDotProductComplex4(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK-DAG: fir.call @_FortranACppDotProductComplex4(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () z = dot_product(x,y) end subroutine @@ -182,7 +182,7 @@ subroutine dot_prod_complex_kind_4 (x, y, z) ! CHECK-DAG: %[[res:.*]] = fir.alloca complex ! CHECK-DAG: %[[x_conv:.*]] = fir.convert %[[x]] : (!fir.box>>) -> !fir.box ! CHECK-DAG: %[[y_conv:.*]] = fir.convert %[[y]] : (!fir.box>>) -> !fir.box - ! CHECK-DAG: fir.call @_FortranACppDotProductComplex4(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK-DAG: fir.call @_FortranACppDotProductComplex4(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () z = dot_product(x,y) end subroutine @@ -196,7 +196,7 @@ subroutine dot_prod_complex_kind_8 (x, y, z) ! CHECK-DAG: %[[res:.*]] = fir.alloca complex ! CHECK-DAG: %[[x_conv:.*]] = fir.convert %[[x]] : (!fir.box>>) -> !fir.box ! CHECK-DAG: %[[y_conv:.*]] = fir.convert %[[y]] : (!fir.box>>) -> !fir.box - ! CHECK-DAG: fir.call @_FortranACppDotProductComplex8(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK-DAG: fir.call @_FortranACppDotProductComplex8(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () z = dot_product(x,y) end subroutine @@ -264,7 +264,7 @@ subroutine dot_product_mixed_int_complex(x, y, z) ! CHECK-DAG: %[[res:.*]] = fir.alloca complex ! CHECK-DAG: %[[x_conv:.*]] = fir.convert %[[x]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[y_conv:.*]] = fir.convert %[[y]] : (!fir.box>>) -> !fir.box - ! CHECK-DAG: fir.call @_FortranACppDotProductComplex4(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK-DAG: fir.call @_FortranACppDotProductComplex4(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () z = dot_product(x,y) end subroutine @@ -278,6 +278,6 @@ subroutine dot_product_mixed_real_complex(x, y, z) ! CHECK-DAG: %[[res:.*]] = fir.alloca complex ! CHECK-DAG: %[[x_conv:.*]] = fir.convert %[[x]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[y_conv:.*]] = fir.convert %[[y]] : (!fir.box>>) -> !fir.box - ! CHECK-DAG: fir.call @_FortranACppDotProductComplex4(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK-DAG: fir.call @_FortranACppDotProductComplex4(%[[res]], %[[x_conv]], %[[y_conv]], %{{[0-9]+}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () z = dot_product(x,y) end subroutine diff --git a/flang/test/Lower/Intrinsics/eoshift.f90 b/flang/test/Lower/Intrinsics/eoshift.f90 index 5d916dcdb56c2..9cd0b86fadc52 100644 --- a/flang/test/Lower/Intrinsics/eoshift.f90 +++ b/flang/test/Lower/Intrinsics/eoshift.f90 @@ -19,7 +19,7 @@ subroutine eoshift_test1(arr, shift) ! CHECK: %[[resIRBox:.*]] = fir.convert %[[resBox]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[arrBox:.*]] = fir.convert %[[arr]] : (!fir.box>>) -> !fir.box ! CHECK: %[[shiftBox:.*]] = fir.convert %[[shift]] : (i32) -> i64 - ! CHECK: %[[tmp:.*]] = fir.call @_FortranAEoshiftVector(%[[resIRBox]], %[[arrBox]], %[[shiftBox]], %[[boundBox]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, i64, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAEoshiftVector(%[[resIRBox]], %[[arrBox]], %[[shiftBox]], %[[boundBox]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, i64, !fir.box, !fir.ref, i32) -> () ! CHECK: fir.array_merge_store %[[resLoad]], {{.*}} to %[[res]] : !fir.array<3x!fir.logical<4>>, !fir.array<3x!fir.logical<4>>, !fir.ref>> end subroutine eoshift_test1 @@ -43,7 +43,7 @@ subroutine eoshift_test2(arr, shift, bound, dim) ! CHECK: %[[shiftBoxNone:.*]] = fir.convert %[[shiftBox]] : (!fir.box>) -> !fir.box ! CHECK: %[[boundBoxNone:.*]] = fir.convert %[[boundBox]] : (!fir.box) -> !fir.box - ! CHECK: %[[tmp:.*]] = fir.call @_FortranAEoshift(%[[resIRBox]], %[[arrBox]], %[[shiftBoxNone]], %[[boundBoxNone]], %[[dim]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAEoshift(%[[resIRBox]], %[[arrBox]], %[[shiftBoxNone]], %[[boundBoxNone]], %[[dim]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: fir.array_merge_store %[[resLoad]], {{.*}} to %[[res]] : !fir.array<3x3xi32>, !fir.array<3x3xi32>, !fir.ref> end subroutine eoshift_test2 @@ -67,7 +67,7 @@ subroutine eoshift_test3(arr, shift, dim) ! CHECK: %[[resIRBox:.*]] = fir.convert %[[resBox]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[arrayBoxNone:.*]] = fir.convert %[[arrayBox]] : (!fir.box>>) -> !fir.box ! CHECK: %[[shiftBoxNone:.*]] = fir.convert %[[shiftBox]] : (!fir.box) -> !fir.box - ! CHECK: %[[tmp:.*]] = fir.call @_FortranAEoshift(%[[resIRBox]], %[[arrayBoxNone]], %[[shiftBoxNone]], %[[boundBox]], %[[dim]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAEoshift(%[[resIRBox]], %[[arrayBoxNone]], %[[shiftBoxNone]], %[[boundBox]], %[[dim]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: fir.array_merge_store %[[resLoad]], {{.*}} to %[[res]] : !fir.array<3x3x!fir.char<1,4>>, !fir.array<3x3x!fir.char<1,4>>, !fir.ref>> end subroutine eoshift_test3 @@ -90,5 +90,5 @@ subroutine eoshift_test_dynamic_optional(array, shift, boundary) ! CHECK: %[[VAL_8:.*]] = fir.absent !fir.box> ! CHECK: %[[VAL_9:.*]] = arith.select %[[VAL_5]], %[[VAL_7]], %[[VAL_8]] : !fir.box> ! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_9]] : (!fir.box>) -> !fir.box - ! CHECK: fir.call @_FortranAEoshift(%{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_21]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAEoshift(%{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_21]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> () end subroutine diff --git a/flang/test/Lower/Intrinsics/etime-function.f90 b/flang/test/Lower/Intrinsics/etime-function.f90 index e3279189c7523..f4594cee7525d 100644 --- a/flang/test/Lower/Intrinsics/etime-function.f90 +++ b/flang/test/Lower/Intrinsics/etime-function.f90 @@ -18,8 +18,8 @@ subroutine etime_test(values, time) ! CHECK-NEXT: %[[timeTmpBox:.*]] = fir.embox %[[timeTmpAddr]] : (!fir.ref) -> !fir.box ! CHECK: %[[values:.*]] = fir.convert %[[valuesBox]] : (!fir.box>) -> !fir.box ! CHECK: %[[timeTmp:.*]] = fir.convert %[[timeTmpBox]] : (!fir.box) -> !fir.box - ! CHECK: %[[VAL_9:.*]] = fir.call @_FortranAEtime(%[[values]], %[[timeTmp]], %[[VAL_7:.*]], %[[c9]]) fastmath : (!fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAEtime(%[[values]], %[[timeTmp]], %[[VAL_7:.*]], %[[c9]]) fastmath : (!fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: %[[timeValue:.*]] = fir.load %[[timeTmpAddr]] : !fir.ref ! CHECK-NEXT: fir.store %[[timeValue]] to %[[timeDeclare]] : !fir.ref ! CHECK-NEXT: return -end subroutine etime_test \ No newline at end of file +end subroutine etime_test diff --git a/flang/test/Lower/Intrinsics/etime.f90 b/flang/test/Lower/Intrinsics/etime.f90 index 3e7ae0e9a406d..fe5d16b64cd0c 100644 --- a/flang/test/Lower/Intrinsics/etime.f90 +++ b/flang/test/Lower/Intrinsics/etime.f90 @@ -17,6 +17,6 @@ subroutine etime_test(values, time) ! CHECK-NEXT: %[[timeBox:.*]] = fir.embox %[[timeDeclare]] : (!fir.ref) -> !fir.box ! CHECK: %[[values:.*]] = fir.convert %[[valuesBox]] : (!fir.box>) -> !fir.box ! CHECK: %[[time:.*]] = fir.convert %[[timeBox]] : (!fir.box) -> !fir.box - ! CHECK: %[[VAL_9:.*]] = fir.call @_FortranAEtime(%[[values]], %[[time]], %[[VAL_7:.*]], %[[c9]]) fastmath : (!fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAEtime(%[[values]], %[[time]], %[[VAL_7:.*]], %[[c9]]) fastmath : (!fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: return -end subroutine etime_test \ No newline at end of file +end subroutine etime_test diff --git a/flang/test/Lower/Intrinsics/execute_command_line-optional.f90 b/flang/test/Lower/Intrinsics/execute_command_line-optional.f90 index e4f9a241197c8..00a3258c9a647 100644 --- a/flang/test/Lower/Intrinsics/execute_command_line-optional.f90 +++ b/flang/test/Lower/Intrinsics/execute_command_line-optional.f90 @@ -50,6 +50,6 @@ subroutine all_args_optional(command, isWait, exitVal, cmdVal, msg) ! CHECK-NEXT: %[[exitstat:.*]] = fir.convert %[[exitstatBox]] : (!fir.box) -> !fir.box ! CHECK-NEXT: %[[cmdstat:.*]] = fir.convert %[[cmdstatBox]] : (!fir.box) -> !fir.box ! CHECK-NEXT: %[[cmdmsg:.*]] = fir.convert %[[cmdmsgBox]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_30:.*]] = fir.call @_FortranAExecuteCommandLine(%[[command]], %[[wait]], %[[exitstat]], %[[cmdstat]], %[[cmdmsg]], %[[VAL_29:.*]], %[[c14]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAExecuteCommandLine(%[[command]], %[[wait]], %[[exitstat]], %[[cmdstat]], %[[cmdmsg]], %[[VAL_29:.*]], %[[c14]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: return end subroutine all_args_optional diff --git a/flang/test/Lower/Intrinsics/execute_command_line.f90 b/flang/test/Lower/Intrinsics/execute_command_line.f90 index 6bde50e807b28..77f1750c504bd 100644 --- a/flang/test/Lower/Intrinsics/execute_command_line.f90 +++ b/flang/test/Lower/Intrinsics/execute_command_line.f90 @@ -42,7 +42,7 @@ subroutine all_args(command, isWait, exitVal, cmdVal, msg) ! CHECK-NEXT: %[[exitstat:.*]] = fir.convert %[[exitstatBox]] : (!fir.box) -> !fir.box ! CHECK-NEXT: %[[cmdstat:.*]] = fir.convert %[[cmdstatBox]] : (!fir.box) -> !fir.box ! CHECK-NEXT: %[[cmdmsg:.*]] = fir.convert %[[cmdmsgBox]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_22:.*]] = fir.call @_FortranAExecuteCommandLine(%[[command]], %[[wait]], %[[exitstat]], %[[cmdstat]], %[[cmdmsg]], %[[VAL_20:.*]], %[[c13]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAExecuteCommandLine(%[[command]], %[[wait]], %[[exitstat]], %[[cmdstat]], %[[cmdmsg]], %[[VAL_20:.*]], %[[c13]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: return end subroutine all_args @@ -61,6 +61,6 @@ subroutine only_command_default_wait_true(command) ! CHECK-NEXT: %[[commandBox:.*]] = fir.embox %[[commandDeclare]] : (!fir.ref>) -> !fir.box> ! CHECK-NEXT: %[[absent:.*]] = fir.absent !fir.box ! CHECK: %[[command:.*]] = fir.convert %[[commandBox]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_8:.*]] = fir.call @_FortranAExecuteCommandLine(%[[command]], %true, %[[absent]], %[[absent]], %[[absent]], %[[VAL_7:.*]], %[[c52]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAExecuteCommandLine(%[[command]], %true, %[[absent]], %[[absent]], %[[absent]], %[[VAL_7:.*]], %[[c52]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: return end subroutine only_command_default_wait_true diff --git a/flang/test/Lower/Intrinsics/exit-2.f90 b/flang/test/Lower/Intrinsics/exit-2.f90 index 7158eeb70db81..458d2a83cf76b 100644 --- a/flang/test/Lower/Intrinsics/exit-2.f90 +++ b/flang/test/Lower/Intrinsics/exit-2.f90 @@ -14,7 +14,7 @@ subroutine exit_opt_dummy(status) ! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32 ! CHECK: fir.result %[[VAL_4]] : i32 ! CHECK: } -! CHECK: %[[VAL_5:.*]] = fir.call @_FortranAExit(%[[VAL_6:.*]]) {{.*}}: (i32) -> none +! CHECK: fir.call @_FortranAExit(%[[VAL_6:.*]]) {{.*}}: (i32) -> () end subroutine ! CHECK-LABEL: func @_QPexit_pointer( @@ -36,5 +36,5 @@ subroutine exit_pointer(status) ! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32 ! CHECK: fir.result %[[VAL_10]] : i32 ! CHECK: } -! CHECK: %[[VAL_11:.*]] = fir.call @_FortranAExit(%[[VAL_12:.*]]) {{.*}}: (i32) -> none +! CHECK: fir.call @_FortranAExit(%[[VAL_12:.*]]) {{.*}}: (i32) -> () end subroutine diff --git a/flang/test/Lower/Intrinsics/exit.f90 b/flang/test/Lower/Intrinsics/exit.f90 index bd551f7318a84..d80efc556f95e 100644 --- a/flang/test/Lower/Intrinsics/exit.f90 +++ b/flang/test/Lower/Intrinsics/exit.f90 @@ -7,8 +7,8 @@ subroutine exit_test1 call exit() ! CHECK: %[[status:.*]] = arith.constant 0 : i[[DEFAULT_INTEGER_SIZE]] ! CHECK-64: %[[statusConvert:.*]] = fir.convert %[[status]] : (i64) -> i32 - ! CHECK-32: %{{[0-9]+}} = fir.call @_FortranAExit(%[[status]]) {{.*}}: (i32) -> none - ! CHECK-64: %{{[0-9]+}} = fir.call @_FortranAExit(%[[statusConvert]]) {{.*}}: (i32) -> none + ! CHECK-32: fir.call @_FortranAExit(%[[status]]) {{.*}}: (i32) -> () + ! CHECK-64: fir.call @_FortranAExit(%[[statusConvert]]) {{.*}}: (i32) -> () end subroutine exit_test1 ! CHECK-LABEL: func @_QPexit_test2( @@ -18,6 +18,6 @@ subroutine exit_test2(status) call exit(status) ! CHECK: %[[status:.*]] = fir.load %[[statusArg]] : !fir.ref ! CHECK-64: %[[statusConv:.*]] = fir.convert %[[status]] : (i64) -> i32 - ! CHECK-32: %{{[0-9]+}} = fir.call @_FortranAExit(%[[status]]) {{.*}}: (i32) -> none - ! CHECK-64: %{{[0-9]+}} = fir.call @_FortranAExit(%[[statusConv]]) {{.*}}: (i32) -> none + ! CHECK-32: fir.call @_FortranAExit(%[[status]]) {{.*}}: (i32) -> () + ! CHECK-64: fir.call @_FortranAExit(%[[statusConv]]) {{.*}}: (i32) -> () end subroutine exit_test2 diff --git a/flang/test/Lower/Intrinsics/findloc.f90 b/flang/test/Lower/Intrinsics/findloc.f90 index b8b337e8f085f..a82a5277753ac 100644 --- a/flang/test/Lower/Intrinsics/findloc.f90 +++ b/flang/test/Lower/Intrinsics/findloc.f90 @@ -18,7 +18,7 @@ function findloc_test_1d(a, v) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[m]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 findloc_test_1d = findloc(a, v) - ! CHECK: %{{.*}} = fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -41,7 +41,7 @@ function findloc_test_2d(a, v) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[m]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 findloc_test_2d = findloc(a, v) - ! CHECK: %{{.*}} = fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -66,7 +66,7 @@ function findloc_test_byval(a, v) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[m]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 findloc_test_byval = findloc(a, v) - ! CHECK: %{{.*}} = fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -89,7 +89,7 @@ function findloc_test_back_true(a, v) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[m]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 findloc_test_back_true = findloc(a, v, back=.true.) - ! CHECK: %{{.*}} = fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %true) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %true) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -114,7 +114,7 @@ function findloc_test_back(a, v, back) ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 ! CHECK-DAG: %[[back:.*]] = fir.convert %[[b]] : (!fir.logical<4>) -> i1 findloc_test_back = findloc(a, v, back=back) - ! CHECK: %{{.*}} = fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %[[back]]) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %[[back]]) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -138,7 +138,7 @@ subroutine findloc_test_dim(a, v, res) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[m]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 res = findloc(a, v, dim=1) - ! CHECK: %{{.*}} = fir.call @_FortranAFindlocDim(%[[res]], %[[arr]], %[[val]], %[[kind]], %[[c1]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindlocDim(%[[res]], %[[arr]], %[[val]], %[[kind]], %[[c1]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -163,7 +163,7 @@ subroutine findloc_test_dim_unknown(a, v, dim, res) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[m]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 res = findloc(a, v, dim=dim) - ! CHECK: %{{.*}} = fir.call @_FortranAFindlocDim(%[[res]], %[[arr]], %[[val]], %[[kind]], %[[dim]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindlocDim(%[[res]], %[[arr]], %[[val]], %[[kind]], %[[dim]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -185,7 +185,7 @@ subroutine findloc_test_kind(a, v, res) ! CHECK-DAG: %[[val:.*]] = fir.convert %[[v]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[m]] : (!fir.box) -> !fir.box res = findloc(a, v, kind=8) - ! CHECK: %{{.*}} = fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -208,7 +208,7 @@ subroutine findloc_test_non_scalar_mask(a, v, mask, res) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[arg2]] : (!fir.box>>) -> !fir.box ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 res = findloc(a, v, mask=mask) - ! CHECK: %{{.*}} = fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -232,7 +232,7 @@ subroutine findloc_test_scalar_mask(a, v, mask, res) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[m]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[kind:.*]] = fir.convert %[[c4]] : (index) -> i32 res = findloc(a, v, mask=mask) - ! CHECK: %{{.*}} = fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindloc(%[[res]], %[[arr]], %[[val]], %[[kind]], %{{.*}}, %{{.*}}, %[[mask]], %false) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> @@ -258,7 +258,7 @@ subroutine findloc_test_all(a, v, dim, mask, back, res) ! CHECK-DAG: %[[mask:.*]] = fir.convert %[[arg3]] : (!fir.box>>) -> !fir.box ! CHECK-DAG: %[[back:.*]] = fir.convert %[[b]] : (!fir.logical<4>) -> i1 res = findloc(a, v, dim=dim, mask=mask, kind=8, back=back) - ! CHECK: %{{.*}} = fir.call @_FortranAFindlocDim(%[[res]], %[[arr]], %[[val]], %[[kind]], %[[dim]], %{{.*}}, %{{.*}}, %[[mask]], %[[back]]) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAFindlocDim(%[[res]], %[[arr]], %[[val]], %[[kind]], %[[dim]], %{{.*}}, %{{.*}}, %[[mask]], %[[back]]) fastmath : (!fir.ref>, !fir.box, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[addr]] : !fir.heap> diff --git a/flang/test/Lower/Intrinsics/free.f90 b/flang/test/Lower/Intrinsics/free.f90 index bb8d38e737aa7..1bfe48f550754 100644 --- a/flang/test/Lower/Intrinsics/free.f90 +++ b/flang/test/Lower/Intrinsics/free.f90 @@ -10,7 +10,7 @@ subroutine free_ptr() ! CHECK: %[[X_PTR_DECL:.*]]:2 = hlfir.declare %[[X_PTR]] {uniq_name = "_QFfree_ptrEptr_x"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X]] {fortran_attrs = #fir.var_attrs, uniq_name = "_QFfree_ptrEx"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: %[[X_LD:.*]] = fir.load %[[X_PTR_DECL]]#0 : !fir.ref - ! CHECK: %[[VOID:.*]] = fir.call @_FortranAFree(%[[X_LD]]) fastmath : (i64) -> none + ! CHECK: fir.call @_FortranAFree(%[[X_LD]]) fastmath : (i64) -> () ! CHECK: return call free(ptr_x) end subroutine @@ -24,7 +24,7 @@ subroutine free_i8 ! CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X]] {uniq_name = "_QFfree_i8Ex"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[X_LD:.*]] = fir.load %[[X_DECL]]#0 : !fir.ref ! CHECK: %[[X_I64:.*]] = fir.convert %[[X_LD]] : (i8) -> i64 - ! CHECK: %[[VOID:.*]] = fir.call @_FortranAFree(%[[X_I64]]) fastmath : (i64) -> none + ! CHECK: fir.call @_FortranAFree(%[[X_I64]]) fastmath : (i64) -> () ! CHECK: return call free(x) end subroutine @@ -37,7 +37,7 @@ subroutine free_i16 ! CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X]] {uniq_name = "_QFfree_i16Ex"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[X_LD:.*]] = fir.load %[[X_DECL]]#0 : !fir.ref ! CHECK: %[[X_I64:.*]] = fir.convert %[[X_LD]] : (i16) -> i64 - ! CHECK: %[[VOID:.*]] = fir.call @_FortranAFree(%[[X_I64]]) fastmath : (i64) -> none + ! CHECK: fir.call @_FortranAFree(%[[X_I64]]) fastmath : (i64) -> () ! CHECK: return call free(x) end subroutine @@ -49,7 +49,7 @@ subroutine free_i32 ! CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X]] {uniq_name = "_QFfree_i32Ex"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[X_LD:.*]] = fir.load %[[X_DECL]]#0 : !fir.ref ! CHECK: %[[X_I64:.*]] = fir.convert %[[X_LD]] : (i32) -> i64 - ! CHECK: %[[VOID:.*]] = fir.call @_FortranAFree(%[[X_I64]]) fastmath : (i64) -> none + ! CHECK: fir.call @_FortranAFree(%[[X_I64]]) fastmath : (i64) -> () ! CHECK: return call free(x) end subroutine @@ -60,7 +60,7 @@ subroutine free_i64 ! CHECK: %[[X:.*]] = fir.alloca i64 {bindc_name = "x", uniq_name = "_QFfree_i64Ex"} ! CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X]] {uniq_name = "_QFfree_i64Ex"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[X_LD:.*]] = fir.load %[[X_DECL]]#0 : !fir.ref - ! CHECK: %[[VOID:.*]] = fir.call @_FortranAFree(%[[X_LD]]) fastmath : (i64) -> none + ! CHECK: fir.call @_FortranAFree(%[[X_LD]]) fastmath : (i64) -> () ! CHECK: return call free(x) end subroutine diff --git a/flang/test/Lower/Intrinsics/iall.f90 b/flang/test/Lower/Intrinsics/iall.f90 index 119cb90a52da2..a1d320ea8b1a2 100644 --- a/flang/test/Lower/Intrinsics/iall.f90 +++ b/flang/test/Lower/Intrinsics/iall.f90 @@ -77,7 +77,7 @@ subroutine iall_test2(a,r) ! CHECK-DAG: %[[a7:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box r = iall(a,dim=2) -! CHECK: %{{.*}} = fir.call @_FortranAIAllDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranAIAllDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () ! CHECK-DAG: %[[a11:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a13:.*]] = fir.box_addr %[[a11]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a13]] diff --git a/flang/test/Lower/Intrinsics/iand.f90 b/flang/test/Lower/Intrinsics/iand.f90 index a6e4fbcdfe9f4..0954948a62af4 100644 --- a/flang/test/Lower/Intrinsics/iand.f90 +++ b/flang/test/Lower/Intrinsics/iand.f90 @@ -74,6 +74,6 @@ subroutine iand_test6(s1, s2) ! CHECK-DAG: %[[S2_VAL:.*]] = fir.load %[[S2]] : !fir.ref stop iand(s1,s2) ! CHECK-DAG: %[[ANDI:.*]] = arith.andi %[[S1_VAL]], %[[S2_VAL]] : i32 -! CHECK: fir.call @_FortranAStopStatement(%[[ANDI]], {{.*}}, {{.*}}) {{.*}}: (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[ANDI]], {{.*}}, {{.*}}) {{.*}}: (i32, i1, i1) -> () ! CHECK-NEXT: fir.unreachable end subroutine iand_test6 diff --git a/flang/test/Lower/Intrinsics/iany.f90 b/flang/test/Lower/Intrinsics/iany.f90 index 1f33a7a5c5cfb..3b9036bb670fe 100644 --- a/flang/test/Lower/Intrinsics/iany.f90 +++ b/flang/test/Lower/Intrinsics/iany.f90 @@ -77,7 +77,7 @@ subroutine iany_test2(a,r) ! CHECK-DAG: %[[a7:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box r = iany(a,dim=2) -! CHECK: %{{.*}} = fir.call @_FortranAIAnyDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranAIAnyDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () ! CHECK-DAG: %[[a11:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a13:.*]] = fir.box_addr %[[a11]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a13]] diff --git a/flang/test/Lower/Intrinsics/index.f90 b/flang/test/Lower/Intrinsics/index.f90 index f1204458f7a40..0ec8cfad83adf 100644 --- a/flang/test/Lower/Intrinsics/index.f90 +++ b/flang/test/Lower/Intrinsics/index.f90 @@ -31,7 +31,7 @@ integer function index_test2(s1, s2) ! CHECK: %[[a2:.*]] = fir.convert %[[ssb]] : (!fir.box>) -> !fir.box ! CHECK: %[[a3:.*]] = fir.convert %[[back]] : (!fir.box>) -> !fir.box ! CHECK: %[[a5:.*]] = fir.convert %{{.*}} : (!fir.ref>) -> !fir.ref - ! CHECK: fir.call @_FortranAIndex(%[[a0]], %[[a1]], %[[a2]], %[[a3]], %{{.*}}, %[[a5]], %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAIndex(%[[a0]], %[[a1]], %[[a2]], %[[a3]], %{{.*}}, %[[a5]], %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> () index_test2 = index(s1, s2, .true., 4) ! CHECK: %[[ld1:.*]] = fir.load %[[mut]] : !fir.ref>> ! CHECK: %[[ad1:.*]] = fir.box_addr %[[ld1]] : (!fir.box>) -> !fir.heap diff --git a/flang/test/Lower/Intrinsics/iparity.f90 b/flang/test/Lower/Intrinsics/iparity.f90 index 46b9ca5fc86fa..fab2b07e859c0 100644 --- a/flang/test/Lower/Intrinsics/iparity.f90 +++ b/flang/test/Lower/Intrinsics/iparity.f90 @@ -77,7 +77,7 @@ subroutine iparity_test2(a,r) ! CHECK-DAG: %[[a7:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box r = iparity(a,dim=2) -! CHECK: %{{.*}} = fir.call @_FortranAIParityDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranAIParityDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () ! CHECK-DAG: %[[a11:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a13:.*]] = fir.box_addr %[[a11]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a13]] diff --git a/flang/test/Lower/Intrinsics/matmul.f90 b/flang/test/Lower/Intrinsics/matmul.f90 index db60963320144..77e7fa213717e 100644 --- a/flang/test/Lower/Intrinsics/matmul.f90 +++ b/flang/test/Lower/Intrinsics/matmul.f90 @@ -23,7 +23,7 @@ ! CHECK: %[[RESULT_BOX_ADDR_RUNTIME:.*]] = fir.convert %[[RESULT_BOX_ADDR]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[X_BOX_RUNTIME:.*]] = fir.convert %[[X_BOX]] : (!fir.box>) -> !fir.box ! CHECK: %[[Y_BOX_RUNTIME:.*]] = fir.convert %[[Y_BOX]] : (!fir.box>) -> !fir.box -! CHECK: {{.*}}fir.call @_FortranAMatmulReal4Real4(%[[RESULT_BOX_ADDR_RUNTIME]], %[[X_BOX_RUNTIME]], %[[Y_BOX_RUNTIME]], {{.*}}, {{.*}} {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: {{.*}}fir.call @_FortranAMatmulReal4Real4(%[[RESULT_BOX_ADDR_RUNTIME]], %[[X_BOX_RUNTIME]], %[[Y_BOX_RUNTIME]], {{.*}}, {{.*}} {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[RESULT_BOX:.*]] = fir.load %[[RESULT_BOX_ADDR]] : !fir.ref>>> ! CHECK: %[[RESULT_TMP:.*]] = fir.box_addr %[[RESULT_BOX]] : (!fir.box>>) -> !fir.heap> ! CHECK: %[[Z_COPY_FROM_RESULT:.*]] = fir.do_loop @@ -50,7 +50,7 @@ subroutine matmul_test(x,y,z) !CHECK: %[[RESULT_BOX_RUNTIME:.*]] = fir.convert %[[RESULT_BOX_ADDR]] : (!fir.ref>>>>) -> !fir.ref> !CHECK: %[[X_BOX_RUNTIME:.*]] = fir.convert %[[X_BOX]] : (!fir.box>>) -> !fir.box !CHECK: %[[Y_BOX_RUNTIME:.*]] = fir.convert %[[Y_BOX]] : (!fir.box>>) -> !fir.box -!CHECK: {{.*}}fir.call @_FortranAMatmulLogical4Logical4(%[[RESULT_BOX_RUNTIME]], %[[X_BOX_RUNTIME]], %[[Y_BOX_RUNTIME]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none +!CHECK: {{.*}}fir.call @_FortranAMatmulLogical4Logical4(%[[RESULT_BOX_RUNTIME]], %[[X_BOX_RUNTIME]], %[[Y_BOX_RUNTIME]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () !CHECK: %[[RESULT_BOX:.*]] = fir.load %[[RESULT_BOX_ADDR]] : !fir.ref>>>> !CHECK: %[[RESULT_TMP:.*]] = fir.box_addr %[[RESULT_BOX]] : (!fir.box>>>) -> !fir.heap>> !CHECK: %[[Z_COPY_FROM_RESULT:.*]] = fir.do_loop diff --git a/flang/test/Lower/Intrinsics/maxloc.f90 b/flang/test/Lower/Intrinsics/maxloc.f90 index e299e5ab63e64..87f17881e0476 100644 --- a/flang/test/Lower/Intrinsics/maxloc.f90 +++ b/flang/test/Lower/Intrinsics/maxloc.f90 @@ -13,7 +13,7 @@ subroutine maxloc_test(arr,res) ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[c4]] : (index) -> i32 ! CHECK-DAG: %[[a10:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box res = maxloc(arr) - ! CHECK: %{{.*}} = fir.call @_FortranAMaxlocInteger4(%[[a6]], %[[a7]], %[[a8]], %{{.*}}, %{{.*}}, %[[a10]], %false) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAMaxlocInteger4(%[[a6]], %[[a7]], %[[a8]], %{{.*}}, %{{.*}}, %[[a10]], %false) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK-DAG: %[[a12:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a14:.*]] = fir.box_addr %[[a12]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a14]] @@ -34,7 +34,7 @@ subroutine maxloc_test2(arr,res,d) ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[c4]] : (index) -> i32 ! CHECK-DAG: %[[a10:.*]] = fir.convert %[[a2]] : (!fir.box) -> !fir.box res = maxloc(arr, dim=d) - ! CHECK: %{{.*}} = fir.call @_FortranAMaxlocDim(%[[a6]], %[[a7]], %[[a8]], %[[a1]], %{{.*}}, %{{.*}}, %[[a10]], %false) {{.*}}: (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAMaxlocDim(%[[a6]], %[[a7]], %[[a8]], %[[a1]], %{{.*}}, %{{.*}}, %[[a10]], %false) {{.*}}: (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[a12:.*]] = fir.load %0 : !fir.ref>> ! CHECK: %[[a13:.*]] = fir.box_addr %[[a12]] : (!fir.box>) -> !fir.heap ! CHECK: fir.freemem %[[a13]] @@ -63,7 +63,7 @@ subroutine test_maxloc_optional_scalar_mask(mask, back, array) ! CHECK: } ! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_12]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_14]] : (!fir.logical<4>) -> i1 - ! CHECK: fir.call @_FortranAMaxlocInteger4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_29]], %[[VAL_30]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAMaxlocInteger4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_29]], %[[VAL_30]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () end subroutine ! CHECK-LABEL: func @_QPtest_maxloc_optional_array_mask( @@ -85,5 +85,5 @@ subroutine test_maxloc_optional_array_mask(mask, back, array) ! CHECK: } ! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_0]] : (!fir.box>>) -> !fir.box ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_10]] : (!fir.logical<4>) -> i1 - ! CHECK: fir.call @_FortranAMaxlocInteger4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_25]], %[[VAL_26]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAMaxlocInteger4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_25]], %[[VAL_26]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () end subroutine diff --git a/flang/test/Lower/Intrinsics/maxval.f90 b/flang/test/Lower/Intrinsics/maxval.f90 index 9e8b6e04cd684..92c868bd1fd01 100644 --- a/flang/test/Lower/Intrinsics/maxval.f90 +++ b/flang/test/Lower/Intrinsics/maxval.f90 @@ -23,7 +23,7 @@ integer function maxval_test(a) ! CHECK: %[[a6:.*]] = fir.convert %[[arg2]] : (!fir.box>>) -> !fir.box ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box maxval_test2 = maxval(a) -! CHECK: %{{.*}} = fir.call @_FortranAMaxvalCharacter(%[[a5]], %[[a6]], %{{.*}}, %{{.*}}, %[[a8]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranAMaxvalCharacter(%[[a5]], %[[a6]], %{{.*}}, %{{.*}}, %[[a8]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32, !fir.box) -> () end function ! CHECK-LABEL: func @_QPmaxval_test3( @@ -38,7 +38,7 @@ subroutine maxval_test3(a,r) ! CHECK: %[[a7:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box r = maxval(a,dim=2) -! CHECK: %{{.*}} = fir.call @_FortranAMaxvalDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranAMaxvalDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () ! CHECK: %[[a11:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a13:.*]] = fir.box_addr %[[a11]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a13]] diff --git a/flang/test/Lower/Intrinsics/minloc.f90 b/flang/test/Lower/Intrinsics/minloc.f90 index 2a361cc94639f..caab36d0f8138 100644 --- a/flang/test/Lower/Intrinsics/minloc.f90 +++ b/flang/test/Lower/Intrinsics/minloc.f90 @@ -13,7 +13,7 @@ subroutine minloc_test(arr,res) ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[c4]] : (index) -> i32 ! CHECK-DAG: %[[a10:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box res = minloc(arr) - ! CHECK: %{{.*}} = fir.call @_FortranAMinlocInteger4(%[[a6]], %[[a7]], %[[a8]], %{{.*}}, %{{.*}}, %[[a10]], %false) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAMinlocInteger4(%[[a6]], %[[a7]], %[[a8]], %{{.*}}, %{{.*}}, %[[a10]], %false) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK-DAG: %[[a12:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a14:.*]] = fir.box_addr %[[a12]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a14]] @@ -34,7 +34,7 @@ subroutine minloc_test2(arr,res,d) ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[c4]] : (index) -> i32 ! CHECK-DAG: %[[a10:.*]] = fir.convert %[[a2]] : (!fir.box) -> !fir.box res = minloc(arr, dim=d) - ! CHECK: %{{.*}} = fir.call @_FortranAMinlocDim(%[[a6]], %[[a7]], %[[a8]], %[[a1]], %{{.*}}, %{{.*}}, %[[a10]], %false) {{.*}}: (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAMinlocDim(%[[a6]], %[[a7]], %[[a8]], %[[a1]], %{{.*}}, %{{.*}}, %[[a10]], %false) {{.*}}: (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () ! CHECK: %[[a12:.*]] = fir.load %0 : !fir.ref>> ! CHECK: %[[a13:.*]] = fir.box_addr %[[a12]] : (!fir.box>) -> !fir.heap ! CHECK: fir.freemem %[[a13]] @@ -63,7 +63,7 @@ subroutine test_minloc_optional_scalar_mask(mask, back, array) ! CHECK: } ! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_12]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_14]] : (!fir.logical<4>) -> i1 - ! CHECK: fir.call @_FortranAMinlocInteger4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_29]], %[[VAL_30]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAMinlocInteger4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_29]], %[[VAL_30]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () end subroutine ! CHECK-LABEL: func @_QPtest_minloc_optional_array_mask( @@ -85,5 +85,5 @@ subroutine test_minloc_optional_array_mask(mask, back, array) ! CHECK: } ! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_0]] : (!fir.box>>) -> !fir.box ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_10]] : (!fir.logical<4>) -> i1 - ! CHECK: fir.call @_FortranAMinlocInteger4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_25]], %[[VAL_26]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + ! CHECK: fir.call @_FortranAMinlocInteger4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_25]], %[[VAL_26]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () end subroutine diff --git a/flang/test/Lower/Intrinsics/minval.f90 b/flang/test/Lower/Intrinsics/minval.f90 index cff34a4f1e7e6..59132f1813673 100644 --- a/flang/test/Lower/Intrinsics/minval.f90 +++ b/flang/test/Lower/Intrinsics/minval.f90 @@ -23,7 +23,7 @@ integer function minval_test(a) ! CHECK: %[[a6:.*]] = fir.convert %[[arg2]] : (!fir.box>>) -> !fir.box ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box minval_test2 = minval(a) -! CHECK: %{{.*}} = fir.call @_FortranAMinvalCharacter(%[[a5]], %[[a6]], %{{.*}}, %{{.*}}, %[[a8]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranAMinvalCharacter(%[[a5]], %[[a6]], %{{.*}}, %{{.*}}, %[[a8]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32, !fir.box) -> () end function ! CHECK-LABEL: func @_QPminval_test3( @@ -38,7 +38,7 @@ subroutine minval_test3(a,r) ! CHECK: %[[a7:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box r = minval(a,dim=2) -! CHECK: %{{.*}} = fir.call @_FortranAMinvalDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranAMinvalDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () ! CHECK: %[[a11:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a13:.*]] = fir.box_addr %[[a11]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a13]] diff --git a/flang/test/Lower/Intrinsics/norm2.f90 b/flang/test/Lower/Intrinsics/norm2.f90 index 0d125e36f6650..ac761ae3f5381 100644 --- a/flang/test/Lower/Intrinsics/norm2.f90 +++ b/flang/test/Lower/Intrinsics/norm2.f90 @@ -55,7 +55,7 @@ subroutine norm2_test_dim_2(a,r) ! CHECK-DAG: %[[res:.*]] = fir.convert %[[r]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[arr:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box r = norm2(a,dim=1) - ! CHECK: %{{.*}} = fir.call @_FortranANorm2Dim(%[[res]], %[[arr]], %[[dim]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranANorm2Dim(%[[res]], %[[arr]], %[[dim]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK-DAG: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[addr]] @@ -71,7 +71,7 @@ subroutine norm2_test_dim_3(a,r) ! CHECK-DAG: %[[res:.*]] = fir.convert %[[r]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[arr:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box r = norm2(a,dim=3) - ! CHECK: %{{.*}} = fir.call @_FortranANorm2Dim(%[[res]], %[[arr]], %[[dim]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranANorm2Dim(%[[res]], %[[arr]], %[[dim]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK-DAG: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[addr]] @@ -87,7 +87,7 @@ subroutine norm2_test_real16(a,r) ! CHECK-DAG: %[[res:.*]] = fir.convert %[[r]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[arr:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box r = norm2(a,dim=3) - ! CHECK: %{{.*}} = fir.call @_FortranANorm2DimReal16(%[[res]], %[[arr]], %[[dim]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranANorm2DimReal16(%[[res]], %[[arr]], %[[dim]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref>>> ! CHECK-DAG: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[addr]] diff --git a/flang/test/Lower/Intrinsics/pack.f90 b/flang/test/Lower/Intrinsics/pack.f90 index 37e3170316b4c..a00c10dc2e959 100644 --- a/flang/test/Lower/Intrinsics/pack.f90 +++ b/flang/test/Lower/Intrinsics/pack.f90 @@ -16,7 +16,7 @@ subroutine pack_test(a,m,v,r) ! CHECK: %[[a7:.*]] = fir.convert %[[arg1]] : (!fir.box>>) -> !fir.box ! CHECK: %[[a8:.*]] = fir.convert %[[arg2]] : (!fir.box>) -> !fir.box r = pack(a,m,v) - ! CHECK: %{{.*}} = fir.call @_FortranAPack(%[[a5]], %[[a6]], %[[a7]], %[[a8]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAPack(%[[a5]], %[[a6]], %[[a7]], %[[a8]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[a11:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK: %[[a13:.*]] = fir.box_addr %[[a11]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[a13]] @@ -38,5 +38,5 @@ subroutine test_pack_optional(vector, array, mask) ! CHECK: %[[VAL_15:.*]] = fir.absent !fir.box>> ! CHECK: %[[VAL_16:.*]] = arith.select %[[VAL_13]], %[[VAL_14]], %[[VAL_15]] : !fir.box>> ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_16]] : (!fir.box>>) -> !fir.box - ! CHECK: fir.call @_FortranAPack(%{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_26]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAPack(%{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_26]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () end subroutine diff --git a/flang/test/Lower/Intrinsics/parity.f90 b/flang/test/Lower/Intrinsics/parity.f90 index 6771b7d703275..91b168ee5662d 100644 --- a/flang/test/Lower/Intrinsics/parity.f90 +++ b/flang/test/Lower/Intrinsics/parity.f90 @@ -25,7 +25,7 @@ subroutine parity_test2(mask, d, rslt) ! CHECK-DAG: %[[a6:.*]] = fir.convert %[[a0:.*]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK-DAG: %[[a7:.*]] = fir.convert %[[arg0:.*]]: (!fir.box>>) -> !fir.box rslt = parity(mask, d) - ! CHECK: %[[r1:.*]] = fir.call @_FortranAParityDim(%[[a6:.*]], %[[a7:.*]], %[[a1:.*]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAParityDim(%[[a6:.*]], %[[a7:.*]], %[[a1:.*]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () ! CHECK-DAG: %[[a10:.*]] = fir.load %[[a0:.*]] : !fir.ref>>>> ! CHECK-DAG: %[[a12:.*]] = fir.box_addr %[[a10:.*]] : (!fir.box>>>) -> !fir.heap>> ! CHECK-DAG fir.freemem %[[a12:.*]] diff --git a/flang/test/Lower/Intrinsics/product.f90 b/flang/test/Lower/Intrinsics/product.f90 index 77b8ab8e7f5a9..b2fc809b15ef3 100644 --- a/flang/test/Lower/Intrinsics/product.f90 +++ b/flang/test/Lower/Intrinsics/product.f90 @@ -25,7 +25,7 @@ subroutine product_test2(a,r) ! CHECK-DAG: %[[a7:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box r = product(a,dim=2) -! CHECK: %{{.*}} = fir.call @_FortranAProductDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranAProductDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () ! CHECK-DAG: %[[a11:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a13:.*]] = fir.box_addr %[[a11]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a13]] @@ -42,7 +42,7 @@ subroutine product_test2(a,r) ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[c0]] : (index) -> i32 ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a3]] : (!fir.box) -> !fir.box product_test3 = product(a) -! CHECK: %{{.*}} = fir.call @_FortranACppProductComplex4(%[[a0]], %[[a6]], %{{.*}}, %{{.*}}, %[[a8]], %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> none +! CHECK: fir.call @_FortranACppProductComplex4(%[[a0]], %[[a6]], %{{.*}}, %{{.*}}, %[[a8]], %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> () end function ! CHECK-LABEL: func @_QPproduct_test4( diff --git a/flang/test/Lower/Intrinsics/random.f90 b/flang/test/Lower/Intrinsics/random.f90 index 4fb1a9a5da27a..5f7d6414e606d 100644 --- a/flang/test/Lower/Intrinsics/random.f90 +++ b/flang/test/Lower/Intrinsics/random.f90 @@ -8,14 +8,14 @@ subroutine random_test_1 ! CHECK-DAG: [[rr:%[0-9]+]] = fir.alloca {{.*}}random_test_1Err ! CHECK-DAG: [[aa:%[0-9]+]] = fir.alloca {{.*}}random_test_1Eaa real rr, aa(5) - ! CHECK: fir.call @_FortranARandomInit(%true{{.*}}, %false{{.*}}) {{.*}}: (i1, i1) -> none + ! CHECK: fir.call @_FortranARandomInit(%true{{.*}}, %false{{.*}}) {{.*}}: (i1, i1) -> () call random_init(.true., .false.) ! CHECK: [[box:%[0-9]+]] = fir.embox [[ss]] ! CHECK: [[argbox:%[0-9]+]] = fir.convert [[box]] ! CHECK: fir.call @_FortranARandomSeedSize([[argbox]] call random_seed(size=ss) print*, 'size: ', ss - ! CHECK: fir.call @_FortranARandomSeedDefaultPut() {{.*}}: () -> none + ! CHECK: fir.call @_FortranARandomSeedDefaultPut() {{.*}}: () -> () call random_seed() ! CHECK: [[box:%[0-9]+]] = fir.embox [[rr]] ! CHECK: [[argbox:%[0-9]+]] = fir.convert [[box]] diff --git a/flang/test/Lower/Intrinsics/random_number_real16.f90 b/flang/test/Lower/Intrinsics/random_number_real16.f90 index 76fed258d8afc..060574d5b3b3f 100644 --- a/flang/test/Lower/Intrinsics/random_number_real16.f90 +++ b/flang/test/Lower/Intrinsics/random_number_real16.f90 @@ -2,14 +2,14 @@ ! RUN: %flang_fc1 -emit-fir %s -o - | FileCheck %s ! CHECK-LABEL: func @_QPtest_scalar -! CHECK: fir.call @_FortranARandomNumber16({{.*}}){{.*}}: (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranARandomNumber16({{.*}}){{.*}}: (!fir.box, !fir.ref, i32) -> () subroutine test_scalar real(16) :: r call random_number(r) end ! CHECK-LABEL: func @_QPtest_array -! CHECK: fir.call @_FortranARandomNumber16({{.*}}){{.*}}: (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranARandomNumber16({{.*}}){{.*}}: (!fir.box, !fir.ref, i32) -> () subroutine test_array(r) real(16) :: r(:) call random_number(r) diff --git a/flang/test/Lower/Intrinsics/rename.f90 b/flang/test/Lower/Intrinsics/rename.f90 index 75042217c6202..66fab9efae9f6 100644 --- a/flang/test/Lower/Intrinsics/rename.f90 +++ b/flang/test/Lower/Intrinsics/rename.f90 @@ -20,7 +20,7 @@ subroutine test_rename(src, dst) !CHECK-NEXT: %[[src:.*]] = fir.convert %[[srcBox]] : (!fir.box>) -> !fir.box !CHECK-NEXT: %[[dst:.*]] = fir.convert %[[dstBox]] : (!fir.box>) -> !fir.box !CHECK-NEXT: %[[loc:.*]] = fir.convert %[[sourceFileConv:.*]]: (!fir.ref>) -> !fir.ref - !CHECK-NEXT: %[[result:.*]] = fir.call @_FortranARename(%[[src]], %[[dst]], %[[statusBox]], %[[loc]], %[[c10_i32]]) fastmath : (!fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none + !CHECK-NEXT: fir.call @_FortranARename(%[[src]], %[[dst]], %[[statusBox]], %[[loc]], %[[c10_i32]]) fastmath : (!fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () end subroutine test_rename !CHECK-LABEL: func.func @_QPtest_rename_status @@ -47,5 +47,5 @@ subroutine test_rename_status(src, dst) !CHECK-NEXT: %[[dst:.*]] = fir.convert %[[dstBox]] : (!fir.box>) -> !fir.box !CHECK-NEXT: %[[status:.*]] = fir.convert %[[statusBox]] : (!fir.box) -> !fir.box !CHECK-NEXT: %[[loc:.*]] = fir.convert %[[sourceFileConv:.*]]: (!fir.ref>) -> !fir.ref - !CHECK-NEXT: %[[result:.*]] = fir.call @_FortranARename(%[[src]], %[[dst]], %[[status]], %[[loc]], %[[c10_i32]]) fastmath : (!fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none + !CHECK-NEXT: fir.call @_FortranARename(%[[src]], %[[dst]], %[[status]], %[[loc]], %[[c10_i32]]) fastmath : (!fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () end subroutine test_rename_status diff --git a/flang/test/Lower/Intrinsics/repeat.f90 b/flang/test/Lower/Intrinsics/repeat.f90 index dd37fbbc8c54c..e95221405b664 100644 --- a/flang/test/Lower/Intrinsics/repeat.f90 +++ b/flang/test/Lower/Intrinsics/repeat.f90 @@ -13,7 +13,7 @@ subroutine repeat_test(c, n) ! CHECK-DAG: %[[cBox:.*]] = fir.embox %[[c]]#0 typeparams %[[c]]#1 : (!fir.ref>, index) -> !fir.box> ! CHECK-DAG: %[[cBoxNone:.*]] = fir.convert %[[cBox]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[resBox:.*]] = fir.convert %[[tmpBox]] : (!fir.ref>>>) -> !fir.ref> - ! CHECK: fir.call @{{.*}}Repeat(%[[resBox]], %[[cBoxNone]], %[[n]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> none + ! CHECK: fir.call @{{.*}}Repeat(%[[resBox]], %[[cBoxNone]], %[[n]], {{.*}}, {{.*}}) {{.*}}: (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> () ! CHECK-DAG: %[[tmpAddr:.*]] = fir.box_addr ! CHECK-DAG: fir.box_elesize ! CHECK: fir.call @{{.*}}bar_repeat_test diff --git a/flang/test/Lower/Intrinsics/reshape.f90 b/flang/test/Lower/Intrinsics/reshape.f90 index 6fe95963b7acc..4f4f50965dd1b 100644 --- a/flang/test/Lower/Intrinsics/reshape.f90 +++ b/flang/test/Lower/Intrinsics/reshape.f90 @@ -19,7 +19,7 @@ subroutine reshape_test(x, source, pd, sh, ord) ! CHECK-DAG: %[[a11:.*]] = fir.convert %[[arg2]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a12:.*]] = fir.convert %[[a3]] : (!fir.box>) -> !fir.box x = reshape(source, sh, pd, ord) - ! CHECK: %{{.*}} = fir.call @_FortranAReshape(%[[a8]], %[[a9]], %[[a10]], %[[a11]], %[[a12]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAReshape(%[[a8]], %[[a9]], %[[a10]], %[[a11]], %[[a12]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-DAG: %[[a15:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a18:.*]] = fir.box_addr %[[a15]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a18]] @@ -52,7 +52,7 @@ subroutine test_reshape_optional(pad, order, source, shape) ! CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_25]], %[[VAL_26]], %[[VAL_27]] : !fir.box>> ! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_20]] : (!fir.box>>) -> !fir.box ! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_28]] : (!fir.box>>) -> !fir.box - ! CHECK: %[[VAL_41:.*]] = fir.call @_FortranAReshape({{.*}}, {{.*}}, %{{.*}}, %[[VAL_38]], %[[VAL_39]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAReshape({{.*}}, {{.*}}, %{{.*}}, %[[VAL_38]], %[[VAL_39]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () end subroutine ! CHECK-LABEL: func.func @_QPtest_reshape_shape_slice() { @@ -73,7 +73,7 @@ subroutine test_reshape_optional(pad, order, source, shape) ! CHECK: %[[VAL_15:.*]] = fir.embox %[[VAL_1]](%[[VAL_13]]) [%[[VAL_14]]] : (!fir.ref>, !fir.shape<1>, !fir.slice<1>) -> !fir.box> ! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_6]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_15]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_30:.*]] = fir.call @_FortranAReshape(%{{.*}}, %[[VAL_25]], %[[VAL_26]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAReshape(%{{.*}}, %[[VAL_25]], %[[VAL_26]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () subroutine test_reshape_shape_slice() integer, parameter :: i = 1 real :: tmp(4) = [1,2,3,4] diff --git a/flang/test/Lower/Intrinsics/scan.f90 b/flang/test/Lower/Intrinsics/scan.f90 index 2dd6933bc46fa..1773a395f1b24 100644 --- a/flang/test/Lower/Intrinsics/scan.f90 +++ b/flang/test/Lower/Intrinsics/scan.f90 @@ -15,7 +15,7 @@ integer function scan_test(s1, s2) ! CHECK-DAG: %[[backBox:.*]] = fir.convert %[[backOptBox]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[kindConstant:.*]] = arith.constant 4 : i32 ! CHECK-DAG: %[[resBox:.*]] = fir.convert %[[tmpBox:.*]] : (!fir.ref>>) -> !fir.ref> -! CHECK: fir.call @{{.*}}Scan(%[[resBox]], %[[cBoxNone]], %[[cBoxNone2]], %[[backBox]], %[[kindConstant]], {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> none +! CHECK: fir.call @{{.*}}Scan(%[[resBox]], %[[cBoxNone]], %[[cBoxNone2]], %[[backBox]], %[[kindConstant]], {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> () scan_test = scan(s1, s2, kind=4) ! CHECK-DAG: %[[tmpAddr:.*]] = fir.box_addr ! CHECK: fir.freemem %[[tmpAddr]] diff --git a/flang/test/Lower/Intrinsics/sleep.f90 b/flang/test/Lower/Intrinsics/sleep.f90 index c4a7b381602ca..0b7d11a803650 100644 --- a/flang/test/Lower/Intrinsics/sleep.f90 +++ b/flang/test/Lower/Intrinsics/sleep.f90 @@ -6,22 +6,22 @@ subroutine test_sleep() call sleep(1_2) ! CHECK: %[[VAL_0:.*]] = arith.constant 1 : i16 ! CHECK: %[[VAL_1:.*]] = fir.convert %[[VAL_0]] : (i16) -> i64 -! CHECK: %[[VAL_2:.*]] = fir.call @_FortranASleep(%[[VAL_1]]) fastmath : (i64) -> none +! CHECK: fir.call @_FortranASleep(%[[VAL_1]]) fastmath : (i64) -> () call sleep(1_4) ! CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i32) -> i64 -! CHECK: %[[VAL_5:.*]] = fir.call @_FortranASleep(%[[VAL_4]]) fastmath : (i64) -> none +! CHECK: fir.call @_FortranASleep(%[[VAL_4]]) fastmath : (i64) -> () call sleep(1_8) ! CHECK: %[[VAL_6:.*]] = arith.constant 1 : i64 ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]] : (i64) -> i64 -! CHECK: %[[VAL_8:.*]] = fir.call @_FortranASleep(%[[VAL_7]]) fastmath : (i64) -> none +! CHECK: fir.call @_FortranASleep(%[[VAL_7]]) fastmath : (i64) -> () call sleep(1_16) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i128 ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i128) -> i64 -! CHECK: %[[VAL_11:.*]] = fir.call @_FortranASleep(%[[VAL_10]]) fastmath : (i64) -> none +! CHECK: fir.call @_FortranASleep(%[[VAL_10]]) fastmath : (i64) -> () end ! CHECK: return ! CHECK: } diff --git a/flang/test/Lower/Intrinsics/spread.f90 b/flang/test/Lower/Intrinsics/spread.f90 index d58725aba6987..3c20ec29ebc11 100644 --- a/flang/test/Lower/Intrinsics/spread.f90 +++ b/flang/test/Lower/Intrinsics/spread.f90 @@ -25,7 +25,7 @@ subroutine spread_test(s,d,n,r) ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a3]] : (!fir.box) -> !fir.box ! CHECK-DAG: %[[a10:.*]] = fir.convert %[[a2]] : (i32) -> i64 r = spread(s,d,n) - ! CHECK: %{{.*}} = fir.call @_FortranASpread(%[[a8]], %[[a9]], %[[a1]], %[[a10]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranASpread(%[[a8]], %[[a9]], %[[a1]], %[[a10]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> () ! CHECK-DAG: %[[a13:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a15:.*]] = fir.box_addr %[[a13]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[a15]] @@ -43,7 +43,7 @@ subroutine spread_test2(s,d,n,r) ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a2]] : (i32) -> i64 r = spread(s,d,n) - ! CHECK: %{{.*}} = fir.call @_FortranASpread(%[[a7]], %[[a8]], %[[a1]], %[[a9]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranASpread(%[[a7]], %[[a8]], %[[a1]], %[[a9]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> () ! CHECK-DAG: %[[a12:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a15:.*]] = fir.box_addr %[[a12]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[a15:.*]] @@ -62,7 +62,7 @@ subroutine spread_test_polymorphic_source(p) ! CHECK: fir.store %[[embox]] to %[[res]] : !fir.ref>>> ! CHECK: %[[res_box_none:.*]] = fir.convert %[[res]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[source_box_none:.*]] = fir.convert %[[source_box]] : (!fir.class>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranASpread(%[[res_box_none]], %[[source_box_none]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranASpread(%[[res_box_none]], %[[source_box_none]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> () end subroutine diff --git a/flang/test/Lower/Intrinsics/storage_size.f90 b/flang/test/Lower/Intrinsics/storage_size.f90 index b0c9d51f95328..3dc135bbf6fbc 100644 --- a/flang/test/Lower/Intrinsics/storage_size.f90 +++ b/flang/test/Lower/Intrinsics/storage_size.f90 @@ -29,7 +29,7 @@ integer function unlimited_polymorphic_pointer(p) result(size) ! CHECK: %[[C0:.*]] = arith.constant 0 : i64 ! CHECK: %[[IS_NULL_ADDR:.*]] = arith.cmpi eq, %[[P_ADDR_I64]], %[[C0]] : i64 ! CHECK: fir.if %[[IS_NULL_ADDR]] { -! CHECK: %{{.*}} = fir.call @_FortranAReportFatalUserError(%{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAReportFatalUserError(%{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref, !fir.ref, i32) -> () ! CHECK: } ! CHECK: %[[LOAD_P:.*]] = fir.load %[[P]] : !fir.ref>> ! CHECK: %[[ELE_SIZE:.*]] = fir.box_elesize %[[LOAD_P]] : (!fir.class>) -> i32 @@ -53,7 +53,7 @@ integer function unlimited_polymorphic_allocatable(p) result(size) ! CHECK: %[[C0:.*]] = arith.constant 0 : i64 ! CHECK: %[[IS_NULL_ADDR:.*]] = arith.cmpi eq, %[[P_ADDR_I64]], %[[C0]] : i64 ! CHECK: fir.if %[[IS_NULL_ADDR]] { -! CHECK: %{{.*}} = fir.call @_FortranAReportFatalUserError(%{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAReportFatalUserError(%{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref, !fir.ref, i32) -> () ! CHECK: } ! CHECK: %[[LOAD_P:.*]] = fir.load %[[P]] : !fir.ref>> ! CHECK: %[[ELE_SIZE:.*]] = fir.box_elesize %[[LOAD_P]] : (!fir.class>) -> i32 diff --git a/flang/test/Lower/Intrinsics/sum.f90 b/flang/test/Lower/Intrinsics/sum.f90 index ab5da34b3c7bb..785f20b861f13 100644 --- a/flang/test/Lower/Intrinsics/sum.f90 +++ b/flang/test/Lower/Intrinsics/sum.f90 @@ -25,7 +25,7 @@ subroutine sum_test2(a,r) ! CHECK-DAG: %[[a7:.*]] = fir.convert %[[arg0]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a1]] : (!fir.box) -> !fir.box r = sum(a,dim=2) -! CHECK: %{{.*}} = fir.call @_FortranASumDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none +! CHECK: fir.call @_FortranASumDim(%[[a6]], %[[a7]], %[[c2_i32]], %{{.*}}, %{{.*}}, %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () ! CHECK-DAG: %[[a11:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK-DAG: %[[a13:.*]] = fir.box_addr %[[a11]] : (!fir.box>>) -> !fir.heap> ! CHECK-DAG: fir.freemem %[[a13]] @@ -42,7 +42,7 @@ subroutine sum_test2(a,r) ! CHECK-DAG: %[[a8:.*]] = fir.convert %[[c0]] : (index) -> i32 ! CHECK-DAG: %[[a9:.*]] = fir.convert %[[a3]] : (!fir.box) -> !fir.box sum_test3 = sum(a) -! CHECK: %{{.*}} = fir.call @_FortranACppSumComplex4(%[[a0]], %[[a6]], %{{.*}}, %{{.*}}, %[[a8]], %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> none +! CHECK: fir.call @_FortranACppSumComplex4(%[[a0]], %[[a6]], %{{.*}}, %{{.*}}, %[[a8]], %[[a9]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> () end function ! CHECK-LABEL: func @_QPsum_test4( diff --git a/flang/test/Lower/Intrinsics/system-optional.f90 b/flang/test/Lower/Intrinsics/system-optional.f90 index 8001e76fb93bd..55f63a913a532 100644 --- a/flang/test/Lower/Intrinsics/system-optional.f90 +++ b/flang/test/Lower/Intrinsics/system-optional.f90 @@ -27,7 +27,7 @@ subroutine all_args(command, exitstat) ! CHECK-NEXT: %[[command:.*]] = fir.convert %[[commandBox]] : (!fir.box>) -> !fir.box ! CHECK-NEXT: %[[exitstat:.*]] = fir.convert %[[exitstatRealBox]] : (!fir.box) -> !fir.box ! CHECK-NEXT: %[[cmdstat:.*]] = fir.convert %[[cmdstatBox]] : (!fir.box) -> !fir.box -! CHECK: %[[VAL_16:.*]] = fir.call @_FortranAExecuteCommandLine(%[[command]], %[[true]], %[[exitstat]], %[[cmdstat]], %[[absentBox]], %[[VAL_15:.*]], %[[c9_i32]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAExecuteCommandLine(%[[command]], %[[true]], %[[exitstat]], %[[cmdstat]], %[[absentBox]], %[[VAL_15:.*]], %[[c9_i32]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: return ! CHECK-NEXT: } diff --git a/flang/test/Lower/Intrinsics/system.f90 b/flang/test/Lower/Intrinsics/system.f90 index 87ac8d9c7e6f9..d2a95acf2b120 100644 --- a/flang/test/Lower/Intrinsics/system.f90 +++ b/flang/test/Lower/Intrinsics/system.f90 @@ -23,7 +23,7 @@ subroutine all_args(command, exitstat) ! CHECK-NEXT: %[[command:.*]] = fir.convert %[[commandBox]] : (!fir.box>) -> !fir.box ! CHECK-NEXT: %[[exitstat:.*]] = fir.convert %[[exitstatBox]] : (!fir.box) -> !fir.box ! CHECK-NEXT: %[[cmdstat:.*]] = fir.convert %[[cmdstatBox]] : (!fir.box) -> !fir.box -! CHECK: %[[VAL_13:.*]] = fir.call @_FortranAExecuteCommandLine(%[[command]], %[[true]], %[[exitstat]], %[[cmdstat]], %[[absentBox]], %[[VAL_12:.*]], %[[c9_i32]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAExecuteCommandLine(%[[command]], %[[true]], %[[exitstat]], %[[cmdstat]], %[[absentBox]], %[[VAL_12:.*]], %[[c9_i32]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: return ! CHECK-NEXT: } end subroutine all_args @@ -47,7 +47,7 @@ subroutine only_command(command) ! CHECK: %[[c35_i32:.*]] = arith.constant {{[0-9]+}} : i32 ! CHECK-NEXT: %[[command:.*]] = fir.convert %[[commandBox]] : (!fir.box>) -> !fir.box ! CHECK-NEXT: %[[cmdstat:.*]] = fir.convert %[[cmdstatBox]] : (!fir.box) -> !fir.box -! CHECK: %[[VAL_12:.*]] = fir.call @_FortranAExecuteCommandLine(%[[command]], %[[true]], %[[absentBox]], %[[cmdstat]], %[[absentBox2]], %[[VAL_11:.*]], %[[c35_i32]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAExecuteCommandLine(%[[command]], %[[true]], %[[absentBox]], %[[cmdstat]], %[[absentBox2]], %[[VAL_11:.*]], %[[c35_i32]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: return ! CHECK-NEXT: } end subroutine only_command @@ -77,7 +77,7 @@ subroutine as_function(command) ! CHECK-NEXT: %[[command:.*]] = fir.convert %[[commandBox]] : (!fir.box>) -> !fir.box ! CHECK-NEXT: %[[exitstat:.*]] = fir.convert %[[exitstatBox]] : (!fir.box) -> !fir.box ! CHECK-NEXT: %[[cmdstat:.*]] = fir.convert %[[cmdstatBox]] : (!fir.box) -> !fir.box -! CHECK: %[[VAL_13:.*]] = fir.call @_FortranAExecuteCommandLine(%[[command]], %[[true]], %[[exitstat]], %[[cmdstat]], %[[absentBox]], %[[VAL_12:.*]], %[[LINE_NO]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAExecuteCommandLine(%[[command]], %[[true]], %[[exitstat]], %[[cmdstat]], %[[absentBox]], %[[VAL_12:.*]], %[[LINE_NO]]) fastmath : (!fir.box, i1, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: %[[RET_ADDR:.*]] = fir.box_addr %[[exitstatBox]] : (!fir.box) -> !fir.ref ! CHECK-NEXT: %[[RET:.*]] = fir.load %[[RET_ADDR]] : !fir.ref ! CHECK-NEXT: hlfir.assign %[[RET]] to %[[exitstatDeclare]]#0 : i32, !fir.ref diff --git a/flang/test/Lower/Intrinsics/transfer.f90 b/flang/test/Lower/Intrinsics/transfer.f90 index 812946f106476..b75fe2e826561 100644 --- a/flang/test/Lower/Intrinsics/transfer.f90 +++ b/flang/test/Lower/Intrinsics/transfer.f90 @@ -15,7 +15,7 @@ subroutine trans_test(store, word) ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_3]] : (!fir.box) -> !fir.box ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_4]] : (!fir.box) -> !fir.box ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_7]] : (!fir.ref>) -> !fir.ref - ! CHECK: %[[VAL_13:.*]] = fir.call @_FortranATransfer(%[[VAL_9]], %[[VAL_10]], %[[VAL_11]], %[[VAL_12]], %[[VAL_8]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranATransfer(%[[VAL_9]], %[[VAL_10]], %[[VAL_11]], %[[VAL_12]], %[[VAL_8]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_2]] : !fir.ref>> ! CHECK: %[[VAL_15:.*]] = fir.box_addr %[[VAL_14]] : (!fir.box>) -> !fir.heap ! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.heap @@ -50,7 +50,7 @@ subroutine trans_test(store, word) ! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_9]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_14]] : (!fir.ref>) -> !fir.ref ! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_6]] : (i32) -> i64 - ! CHECK: %[[VAL_21:.*]] = fir.call @_FortranATransferSize(%[[VAL_16]], %[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_15]], %[[VAL_20]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32, i64) -> none + ! CHECK: fir.call @_FortranATransferSize(%[[VAL_16]], %[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_15]], %[[VAL_20]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32, i64) -> () ! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_2]] : !fir.ref>>> ! CHECK: %[[VAL_23:.*]] = arith.constant 0 : index ! CHECK: %[[VAL_24:.*]]:3 = fir.box_dims %[[VAL_22]], %[[VAL_23]] : (!fir.box>>, index) -> (index, index, index) @@ -94,7 +94,7 @@ integer function trans_test3(p) ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_5]] : (!fir.box) -> !fir.box ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_6]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_9]] : (!fir.ref>) -> !fir.ref - ! CHECK: %[[VAL_15:.*]] = fir.call @_FortranATransfer(%[[VAL_11]], %[[VAL_12]], %[[VAL_13]], %[[VAL_14]], %[[VAL_10]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranATransfer(%[[VAL_11]], %[[VAL_12]], %[[VAL_13]], %[[VAL_14]], %[[VAL_10]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_2]] : !fir.ref>>> ! CHECK: %[[VAL_17:.*]] = fir.box_addr %[[VAL_16]] : (!fir.box>>) -> !fir.heap> ! CHECK: %[[VAL_18:.*]] = fir.embox %[[VAL_3]] : (!fir.ref>) -> !fir.box> @@ -104,7 +104,7 @@ integer function trans_test3(p) ! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_16]] : (!fir.box>>) -> !fir.box ! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_19]] : (!fir.ref>) -> !fir.ref - ! CHECK: %[[VAL_24:.*]] = fir.call @_FortranAAssign(%[[VAL_21]], %[[VAL_22]], %[[VAL_23]], %[[VAL_20]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAAssign(%[[VAL_21]], %[[VAL_22]], %[[VAL_23]], %[[VAL_20]]) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: fir.freemem %[[VAL_17]] ! CHECK: %[[VAL_25:.*]] = fir.field_index x, !fir.type<_QFtrans_test3Tobj{x:i32}> ! CHECK: %[[VAL_26:.*]] = fir.coordinate_of %[[VAL_3]], %[[VAL_25]] : (!fir.ref>, !fir.field) -> !fir.ref diff --git a/flang/test/Lower/Intrinsics/transpose.f90 b/flang/test/Lower/Intrinsics/transpose.f90 index 41c94edb77e7b..cf2c2ba5bde83 100644 --- a/flang/test/Lower/Intrinsics/transpose.f90 +++ b/flang/test/Lower/Intrinsics/transpose.f90 @@ -15,7 +15,7 @@ subroutine transpose_test(mat) ! CHECK: fir.store %[[resultBox]] to %[[resultDescr]] : !fir.ref>>> ! CHECK: %[[resultOpaque:.*]] = fir.convert %[[resultDescr]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[sourceOpaque:.*]] = fir.convert %[[sourceBox]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranATranspose(%[[resultOpaque]], %[[sourceOpaque]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranATranspose(%[[resultOpaque]], %[[sourceOpaque]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[tmp1:.*]] = fir.load %[[resultDescr]] : !fir.ref>>> ! CHECK: %[[tmp2:.*]] = fir.box_addr %[[tmp1]] : (!fir.box>>) -> !fir.heap> ! CHECK: %[[tmp3:.*]] = fir.convert %[[tmp2]] : (!fir.heap>) -> !fir.ref> diff --git a/flang/test/Lower/Intrinsics/trim.f90 b/flang/test/Lower/Intrinsics/trim.f90 index 9ecb7945097be..c88e07f2a4e25 100644 --- a/flang/test/Lower/Intrinsics/trim.f90 +++ b/flang/test/Lower/Intrinsics/trim.f90 @@ -10,7 +10,7 @@ subroutine trim_test(c) ! CHECK-DAG: %[[cBox:.*]] = fir.embox %[[c]]#0 typeparams %[[c]]#1 : (!fir.ref>, index) -> !fir.box> ! CHECK-DAG: %[[cBoxNone:.*]] = fir.convert %[[cBox]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[resBox:.*]] = fir.convert %[[tmpBox]] : (!fir.ref>>>) -> !fir.ref> - ! CHECK: fir.call @{{.*}}Trim(%[[resBox]], %[[cBoxNone]], {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @{{.*}}Trim(%[[resBox]], %[[cBoxNone]], {{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK-DAG: %[[tmpAddr:.*]] = fir.box_addr ! CHECK-DAG: fir.box_elesize ! CHECK: fir.call @{{.*}}bar_trim_test diff --git a/flang/test/Lower/Intrinsics/ubound01.f90 b/flang/test/Lower/Intrinsics/ubound01.f90 index e933075cc0bf2..bd7961553da83 100644 --- a/flang/test/Lower/Intrinsics/ubound01.f90 +++ b/flang/test/Lower/Intrinsics/ubound01.f90 @@ -20,4 +20,4 @@ subroutine s2(a,n,n2) ! CHECK-SAME: %[[ARG0:.*]]: !fir.box> ! CHECK: %[[BOX:.*]] = fir.rebox %[[ARG0]](%{{.*}}) : (!fir.box>, !fir.shift<2>) -> !fir.box> ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[BOX]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAUbound(%{{.*}}, %[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box, i32, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAUbound(%{{.*}}, %[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.llvm_ptr, !fir.box, i32, !fir.ref, i32) -> () diff --git a/flang/test/Lower/Intrinsics/verify.f90 b/flang/test/Lower/Intrinsics/verify.f90 index eb1454c001f70..7d0f97023e63f 100644 --- a/flang/test/Lower/Intrinsics/verify.f90 +++ b/flang/test/Lower/Intrinsics/verify.f90 @@ -21,7 +21,7 @@ integer function verify_test(s1, s2) ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_9]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_7]] : (!fir.box) -> !fir.box ! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_12]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_19:.*]] = fir.call @_FortranAVerify(%[[VAL_14]], %[[VAL_15]], %[[VAL_16]], %[[VAL_17]], %[[VAL_6]], %[[VAL_18]], %[[VAL_13]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAVerify(%[[VAL_14]], %[[VAL_15]], %[[VAL_16]], %[[VAL_17]], %[[VAL_6]], %[[VAL_18]], %[[VAL_13]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: %[[VAL_20:.*]] = fir.load %[[VAL_2]] : !fir.ref>> ! CHECK: %[[VAL_21:.*]] = fir.box_addr %[[VAL_20]] : (!fir.box>) -> !fir.heap ! CHECK: %[[VAL_22:.*]] = fir.load %[[VAL_21]] : !fir.heap diff --git a/flang/test/Lower/OpenACC/stop-stmt-in-region.f90 b/flang/test/Lower/OpenACC/stop-stmt-in-region.f90 index 2694a1531d169..89d0d4a484cc1 100644 --- a/flang/test/Lower/OpenACC/stop-stmt-in-region.f90 +++ b/flang/test/Lower/OpenACC/stop-stmt-in-region.f90 @@ -8,7 +8,7 @@ ! CHECK: %[[VAL_0:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_1:.*]] = arith.constant false ! CHECK: %[[VAL_2:.*]] = arith.constant false -! CHECK: %[[VAL_3:.*]] = fir.call @_FortranAStopStatement(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) {{.*}} : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) {{.*}} : (i32, i1, i1) -> () ! CHECK: acc.yield ! CHECK: } ! CHECK: return @@ -26,7 +26,7 @@ subroutine test_stop_in_region1() ! CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_2:.*]] = arith.constant false ! CHECK: %[[VAL_3:.*]] = arith.constant false -! CHECK: %[[VAL_4:.*]] = fir.call @_FortranAStopStatement(%[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) {{.*}} : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) {{.*}} : (i32, i1, i1) -> () ! CHECK: acc.yield ! CHECK: } ! CHECK: return diff --git a/flang/test/Lower/OpenMP/DelayedPrivatization/wsloop.f90 b/flang/test/Lower/OpenMP/DelayedPrivatization/wsloop.f90 index 66fd120085c78..c98850b8000d3 100644 --- a/flang/test/Lower/OpenMP/DelayedPrivatization/wsloop.f90 +++ b/flang/test/Lower/OpenMP/DelayedPrivatization/wsloop.f90 @@ -1,6 +1,6 @@ -! RUN: %flang_fc1 -emit-hlfir -fopenmp -mmlir --openmp-enable-delayed-privatization-staging \ +! RUN: %flang_fc1 -emit-hlfir -fopenmp \ ! RUN: -o - %s 2>&1 | FileCheck %s -! RUN: bbc -emit-hlfir -fopenmp --openmp-enable-delayed-privatization-staging -o - %s 2>&1 \ +! RUN: bbc -emit-hlfir -fopenmp -o - %s 2>&1 \ ! RUN: | FileCheck %s subroutine wsloop_private diff --git a/flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f90 b/flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f90 index 77a1304f39a48..10879c53dc0c5 100644 --- a/flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f90 +++ b/flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f90 @@ -9,11 +9,10 @@ ! The string "EXPECTED" denotes the expected FIR ! CHECK: omp.parallel private(@{{.*}} %{{.*}} -> %[[PRIVATE_Y:.*]], @{{.*}} %{{.*}} -> %[[PRIVATE_Y:.*]] : !fir.ref, !fir.ref) { -! CHECK: %[[TEMP:.*]] = fir.alloca i32 {bindc_name = "x", pinned, {{.*}}} ! CHECK: %[[const_1:.*]] = arith.constant 1 : i32 ! CHECK: %[[const_2:.*]] = arith.constant 10 : i32 ! CHECK: %[[const_3:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[TEMP:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[ARG:.*]]) : i32 = (%[[const_1]]) to (%[[const_2]]) inclusive step (%[[const_3]]) { ! CHECK: fir.store %[[ARG]] to %[[TEMP]] : !fir.ref ! EXPECTED: %[[temp_1:.*]] = fir.load %[[PRIVATE_Z]] : !fir.ref diff --git a/flang/test/Lower/OpenMP/associate.f90 b/flang/test/Lower/OpenMP/associate.f90 index 4964890a6842c..d497b4ade782e 100644 --- a/flang/test/Lower/OpenMP/associate.f90 +++ b/flang/test/Lower/OpenMP/associate.f90 @@ -6,12 +6,12 @@ !CHECK: omp.parallel { !CHECK-NOT: hlfir.declare {{.*}} {uniq_name = "_QFtest_parallel_assocEa"} !CHECK-NOT: hlfir.declare {{.*}} {uniq_name = "_QFtest_parallel_assocEb"} -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private({{.*}}) { !CHECK: } !CHECK: } !CHECK: omp.parallel {{.*}} { !CHECK-NOT: hlfir.declare {{.*}} {uniq_name = "_QFtest_parallel_assocEb"} -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private({{.*}}) { !CHECK: } !CHECK: } subroutine test_parallel_assoc() diff --git a/flang/test/Lower/OpenMP/copyin.f90 b/flang/test/Lower/OpenMP/copyin.f90 index 9e9ccf8e3d914..5ad45f1f5ba6f 100644 --- a/flang/test/Lower/OpenMP/copyin.f90 +++ b/flang/test/Lower/OpenMP/copyin.f90 @@ -154,14 +154,13 @@ subroutine copyin_derived_type() ! CHECK: omp.barrier -! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFcombined_parallel_worksharing_loopEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_9]]#0 : !fir.ref ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[VAL_6:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_14:.*]]) : i32 = (%[[VAL_11]]) to (%[[VAL_12]]) inclusive step (%[[VAL_13]]) { +! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFcombined_parallel_worksharing_loopEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_14]] to %[[VAL_7]]#1 : !fir.ref ! CHECK: fir.call @_QPsub4(%[[VAL_9]]#1) fastmath : (!fir.ref) -> () ! CHECK: omp.yield @@ -321,15 +320,12 @@ subroutine common_1() ! CHECK: %[[VAL_33:.*]] = fir.load %[[VAL_18]]#0 : !fir.ref ! CHECK: hlfir.assign %[[VAL_33]] to %[[VAL_31]]#0 : i32, !fir.ref ! CHECK: omp.barrier - -! CHECK: %[[VAL_19:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFcommon_2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_35:.*]] = fir.load %[[VAL_26]]#0 : !fir.ref ! CHECK: %[[VAL_36:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[VAL_19:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_37:.*]]) : i32 = (%[[VAL_34]]) to (%[[VAL_35]]) inclusive step (%[[VAL_36]]) { +! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFcommon_2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_37]] to %[[VAL_20]]#1 : !fir.ref ! CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref ! CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_20]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/critical.f90 b/flang/test/Lower/OpenMP/critical.f90 index 051d378210646..99a4426ab0453 100644 --- a/flang/test/Lower/OpenMP/critical.f90 +++ b/flang/test/Lower/OpenMP/critical.f90 @@ -38,11 +38,10 @@ subroutine predetermined_privatization() !CHECK: omp.parallel !$omp parallel do - !CHECK: %[[PRIV_I_ALLOC:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} - !CHECK: %[[PRIV_I_DECL:.*]]:2 = hlfir.declare %[[PRIV_I_ALLOC]] do i = 2, 10 - !CHECK: omp.wsloop + !CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[PRIV_I_ALLOC:.*]] : !fir.ref) !CHECK: omp.loop_nest (%[[IV:[^[:space:]]+]]) + !CHECK: %[[PRIV_I_DECL:.*]]:2 = hlfir.declare %[[PRIV_I_ALLOC]] !CHECK: fir.store %[[IV]] to %[[PRIV_I_DECL]]#1 !CHECK: omp.critical !$omp critical diff --git a/flang/test/Lower/OpenMP/default-clause-byref.f90 b/flang/test/Lower/OpenMP/default-clause-byref.f90 index 654c13ada9e39..10e62005f42ba 100644 --- a/flang/test/Lower/OpenMP/default-clause-byref.f90 +++ b/flang/test/Lower/OpenMP/default-clause-byref.f90 @@ -346,7 +346,7 @@ subroutine skipped_default_clause_checks() type(it)::iii !CHECK: omp.parallel {{.*}} { -!CHECK: omp.wsloop reduction(byref @min_byref_i32 %[[VAL_Z_DECLARE]]#0 -> %[[PRV:.+]] : !fir.ref) { +!CHECK: omp.wsloop private({{.*}}) reduction(byref @min_byref_i32 %[[VAL_Z_DECLARE]]#0 -> %[[PRV:.+]] : !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[ARG:.*]]) {{.*}} { !CHECK: omp.yield !CHECK: } diff --git a/flang/test/Lower/OpenMP/default-clause.f90 b/flang/test/Lower/OpenMP/default-clause.f90 index c004813a911f7..fcc8d033eea0f 100644 --- a/flang/test/Lower/OpenMP/default-clause.f90 +++ b/flang/test/Lower/OpenMP/default-clause.f90 @@ -284,16 +284,13 @@ subroutine nested_default_clause_test4 !CHECK-LABEL: func @_QPnested_default_clause_test5 !CHECK: omp.parallel { -!CHECK: %[[X_ALLOCA:.*]] = fir.alloca i32 {bindc_name = "x", pinned, uniq_name = "_QFnested_default_clause_test5Ex"} -!CHECK: %[[X_DECLARE:.*]]:2 = hlfir.declare %[[X_ALLOCA]] {{.*}} - -!CHECK: %[[LOOP_VAR_ALLOCA:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -!CHECK: %[[LOOP_VAR_DECLARE:.*]]:2 = hlfir.declare %[[LOOP_VAR_ALLOCA]] {{.*}} - !CHECK: %[[CONST_LB:.*]] = arith.constant 1 : i32 !CHECK: %[[CONST_UB:.*]] = arith.constant 50 : i32 !CHECK: %[[CONST_STEP:.*]] = arith.constant 1 : i32 +! CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[X_ALLOCA:.*]], @{{.*}} %{{.*}} -> %[[LOOP_VAR_ALLOCA:.*]] : !fir.ref, !fir.ref) { !CHECK: omp.loop_nest (%[[ARG:.*]]) : i32 = (%[[CONST_LB]]) to (%[[CONST_UB]]) inclusive step (%[[CONST_STEP]]) { +!CHECK: %[[X_DECLARE:.*]]:2 = hlfir.declare %[[X_ALLOCA]] {{.*}} +!CHECK: %[[LOOP_VAR_DECLARE:.*]]:2 = hlfir.declare %[[LOOP_VAR_ALLOCA]] {{.*}} !CHECK: fir.store %[[ARG]] to %[[LOOP_VAR_DECLARE]]#1 : !fir.ref !CHECK: %[[LOADED_X:.*]] = fir.load %[[X_DECLARE]]#0 : !fir.ref !CHECK: %[[CONST:.*]] = arith.constant 1 : i32 @@ -321,13 +318,12 @@ subroutine nested_default_clause_test5 !CHECK: %[[Z_VAR_DECLARE:.*]]:2 = hlfir.declare %[[Z_VAR]] {{.*}} -!CHECK: %[[LOOP_VAR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -!CHECK: %[[LOOP_VAR_DECLARE:.*]]:2 = hlfir.declare %[[LOOP_VAR]] {{.*}} - !CHECK: %[[CONST_LB:.*]] = arith.constant 1 : i32 !CHECK: %[[CONST_UB:.*]] = arith.constant 10 : i32 !CHECK: %[[CONST_STEP:.*]] = arith.constant 1 : i32 +! CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[LOOP_VAR:.*]] : !fir.ref) { !CHECK: omp.loop_nest (%[[ARG:.*]]) : i32 = (%[[CONST_LB]]) to (%[[CONST_UB]]) inclusive step (%[[CONST_STEP]]) { +!CHECK: %[[LOOP_VAR_DECLARE:.*]]:2 = hlfir.declare %[[LOOP_VAR]] {{.*}} !CHECK: fir.store %[[ARG]] to %[[LOOP_VAR_DECLARE]]#1 : !fir.ref !CHECK: %[[LOADED_X:.*]] = fir.load %[[X_VAR_DECLARE]]#0 : !fir.ref !CHECK: %[[CONST:.*]] = arith.constant 1 : i32 @@ -386,7 +382,7 @@ subroutine skipped_default_clause_checks() type(it)::iii !CHECK: omp.parallel {{.*}} { -!CHECK: omp.wsloop reduction(@min_i32 %[[VAL_Z_DECLARE]]#0 -> %[[PRV:.+]] : !fir.ref) { +!CHECK: omp.wsloop private({{.*}}) reduction(@min_i32 %[[VAL_Z_DECLARE]]#0 -> %[[PRV:.+]] : !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[ARG:.*]]) {{.*}} { !CHECK: omp.yield !CHECK: } diff --git a/flang/test/Lower/OpenMP/delayed-privatization-default-init.f90 b/flang/test/Lower/OpenMP/delayed-privatization-default-init.f90 index 0eeebe0afea54..022b592db74b8 100644 --- a/flang/test/Lower/OpenMP/delayed-privatization-default-init.f90 +++ b/flang/test/Lower/OpenMP/delayed-privatization-default-init.f90 @@ -41,7 +41,7 @@ subroutine delayed_privatization_default_init_firstprivate ! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.type<_QFdelayed_privatization_default_initTt{i:i32}> {bindc_name = "a", pinned, uniq_name = "_QFdelayed_privatization_default_initEa"} ! CHECK: %[[VAL_2:.*]] = fir.embox %[[VAL_1]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_2]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_8:.*]] = fir.call @_FortranAInitialize(%[[VAL_6]],{{.*}} +! CHECK: fir.call @_FortranAInitialize(%[[VAL_6]],{{.*}} ! CHECK-NEXT: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFdelayed_privatization_default_initEa"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: omp.yield(%[[VAL_9]]#0 : !fir.ref>) ! CHECK: } diff --git a/flang/test/Lower/OpenMP/hlfir-wsloop.f90 b/flang/test/Lower/OpenMP/hlfir-wsloop.f90 index f7b0ba681efeb..786ab916d000c 100644 --- a/flang/test/Lower/OpenMP/hlfir-wsloop.f90 +++ b/flang/test/Lower/OpenMP/hlfir-wsloop.f90 @@ -10,12 +10,11 @@ subroutine simple_loop ! CHECK-DAG: %[[WS_END:.*]] = arith.constant 9 : i32 ! CHECK: omp.parallel !$OMP PARALLEL - ! CHECK-DAG: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} - ! CHECK: %[[IV:.*]] = fir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loopEi"} : (!fir.ref) -> !fir.ref - ! CHECK: omp.wsloop { + ! CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[ALLOCA_IV:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_ST]]) to (%[[WS_END]]) inclusive step (%[[WS_ST]]) { !$OMP DO do i=1, 9 + ! CHECK: %[[IV:.*]] = fir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loopEi"} : (!fir.ref) -> !fir.ref ! CHECK: fir.store %[[I]] to %[[IV:.*]] : !fir.ref ! CHECK: %[[LOAD_IV:.*]] = fir.load %[[IV]] : !fir.ref ! CHECK: fir.call @_FortranAioOutputInteger32({{.*}}, %[[LOAD_IV]]) {{.*}}: (!fir.ref, i32) -> i1 diff --git a/flang/test/Lower/OpenMP/lastprivate-allocatable.f90 b/flang/test/Lower/OpenMP/lastprivate-allocatable.f90 index 6b7d849fde93c..fd8338393dd88 100644 --- a/flang/test/Lower/OpenMP/lastprivate-allocatable.f90 +++ b/flang/test/Lower/OpenMP/lastprivate-allocatable.f90 @@ -8,12 +8,11 @@ ! CHECK: fir.store %[[VAL_2]] to %[[VAL_0]] : !fir.ref>> ! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_0]] {fortran_attrs = {{.*}}, uniq_name = "_QFEa"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: omp.parallel { -! create original copy of private variable -! CHECK: %[[VAL_16:.*]]:2 = hlfir.declare %{{.*}} {fortran_attrs = {{.*}}, uniq_name = "_QFEa"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) -! CHECK: %[[VAL_17:.*]] = fir.alloca i32 {bindc_name = "i", pinned, uniq_name = "_QFEi"} -! CHECK: %[[VAL_18:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %{{.*}}, @{{.*}} %{{.*}} -> %[[VAL_17:.*]] : !fir.ref>>, !fir.ref) { ! CHECK: omp.loop_nest +! CHECK: %[[VAL_16:.*]]:2 = hlfir.declare %{{.*}} {fortran_attrs = {{.*}}, uniq_name = "_QFEa"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) +! CHECK: %[[VAL_18:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! [...] ! if this is the last iteration ! CHECK: fir.if %{{.*}} { diff --git a/flang/test/Lower/OpenMP/lastprivate-commonblock.f90 b/flang/test/Lower/OpenMP/lastprivate-commonblock.f90 index faa3d3e053f34..c059382bf634c 100644 --- a/flang/test/Lower/OpenMP/lastprivate-commonblock.f90 +++ b/flang/test/Lower/OpenMP/lastprivate-commonblock.f90 @@ -11,12 +11,10 @@ !CHECK: %[[CB_C_Y_COOR:.*]] = fir.coordinate_of %[[CB_C_REF_CVT]], %{{.*}} : (!fir.ref>, index) -> !fir.ref !CHECK: %[[CB_C_Y_ADDR:.*]] = fir.convert %[[CB_C_Y_COOR]] : (!fir.ref) -> !fir.ref !CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[CB_C_Y_ADDR]] {uniq_name = "_QFlastprivate_commonEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: %[[PRIVATE_X_REF:.*]] = fir.alloca f32 {bindc_name = "x", pinned, uniq_name = "_QFlastprivate_commonEx"} +!CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[PRIVATE_X_REF:.*]], @{{.*}} %{{.*}} -> %[[PRIVATE_Y_REF:.*]], @{{.*}} %{{.*}} -> %{{.*}} : !{{.*}}, !{{.*}}, !{{.*}}) { +!CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}}) { !CHECK: %[[PRIVATE_X_DECL:.*]]:2 = hlfir.declare %[[PRIVATE_X_REF]] {uniq_name = "_QFlastprivate_commonEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: %[[PRIVATE_Y_REF:.*]] = fir.alloca f32 {bindc_name = "y", pinned, uniq_name = "_QFlastprivate_commonEy"} !CHECK: %[[PRIVATE_Y_DECL:.*]]:2 = hlfir.declare %[[PRIVATE_Y_REF]] {uniq_name = "_QFlastprivate_commonEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: omp.wsloop { -!CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}}) { !CHECK: %[[V:.*]] = arith.addi %[[I]], %{{.*}} : i32 !CHECK: %[[C0:.*]] = arith.constant 0 : i32 !CHECK: %[[NEG_STEP:.*]] = arith.cmpi slt, %{{.*}}, %[[C0]] : i32 diff --git a/flang/test/Lower/OpenMP/lastprivate-iv.f90 b/flang/test/Lower/OpenMP/lastprivate-iv.f90 index 63a81e818bc8b..aacefd8b59c0f 100644 --- a/flang/test/Lower/OpenMP/lastprivate-iv.f90 +++ b/flang/test/Lower/OpenMP/lastprivate-iv.f90 @@ -6,14 +6,12 @@ !CHECK: %[[I2_MEM:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFlastprivate_iv_incEi"} !CHECK: %[[I2:.*]]:2 = hlfir.declare %[[I2_MEM]] {uniq_name = "_QFlastprivate_iv_incEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: %[[I_MEM:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -!CHECK: %[[I:.*]]:2 = hlfir.declare %[[I_MEM]] {uniq_name = "_QFlastprivate_iv_incEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - !CHECK: %[[LB:.*]] = arith.constant 4 : i32 !CHECK: %[[UB:.*]] = arith.constant 10 : i32 !CHECK: %[[STEP:.*]] = arith.constant 3 : i32 -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[I_MEM:.*]] : !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[IV:.*]]) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { +!CHECK: %[[I:.*]]:2 = hlfir.declare %[[I_MEM]] {uniq_name = "_QFlastprivate_iv_incEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: fir.store %[[IV]] to %[[I]]#1 : !fir.ref !CHECK: %[[V:.*]] = arith.addi %[[IV]], %[[STEP]] : i32 !CHECK: %[[C0:.*]] = arith.constant 0 : i32 @@ -42,15 +40,12 @@ subroutine lastprivate_iv_inc() !CHECK: %[[I2_MEM:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFlastprivate_iv_decEi"} !CHECK: %[[I2:.*]]:2 = hlfir.declare %[[I2_MEM]] {uniq_name = "_QFlastprivate_iv_decEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - -!CHECK: %[[I_MEM:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -!CHECK: %[[I:.*]]:2 = hlfir.declare %[[I_MEM]] {uniq_name = "_QFlastprivate_iv_decEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - !CHECK: %[[LB:.*]] = arith.constant 10 : i32 !CHECK: %[[UB:.*]] = arith.constant 1 : i32 !CHECK: %[[STEP:.*]] = arith.constant -3 : i32 -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}} -> %[[I_MEM:.*]] : !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[IV:.*]]) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { +!CHECK: %[[I:.*]]:2 = hlfir.declare %[[I_MEM]] {uniq_name = "_QFlastprivate_iv_decEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: fir.store %[[IV]] to %[[I]]#1 : !fir.ref !CHECK: %[[V:.*]] = arith.addi %[[IV]], %[[STEP]] : i32 !CHECK: %[[C0:.*]] = arith.constant 0 : i32 @@ -80,7 +75,7 @@ subroutine lastprivate_iv_dec() subroutine lastprivate_iv_i1 integer*1 :: i1 i1=0 -!CHECK: omp.wsloop +!CHECK: omp.wsloop private({{.*}}) !CHECK: omp.loop_nest !CHECK: fir.if %{{.*}} { !CHECK: %[[I8_VAL:.*]] = fir.convert %{{.*}} : (i32) -> i8 diff --git a/flang/test/Lower/OpenMP/location.f90 b/flang/test/Lower/OpenMP/location.f90 index 2dab22a1c1f90..fc7dd43499863 100644 --- a/flang/test/Lower/OpenMP/location.f90 +++ b/flang/test/Lower/OpenMP/location.f90 @@ -28,7 +28,7 @@ subroutine sub_target() !CHECK-LABEL: sub_loop subroutine sub_loop() -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private({{.*}}) { !CHECK-NEXT: omp.loop_nest {{.*}} { !$omp do do i=1,10 diff --git a/flang/test/Lower/OpenMP/order-clause.f90 b/flang/test/Lower/OpenMP/order-clause.f90 index a30d82979021d..1f678e02708da 100644 --- a/flang/test/Lower/OpenMP/order-clause.f90 +++ b/flang/test/Lower/OpenMP/order-clause.f90 @@ -20,15 +20,15 @@ end subroutine simd_order !CHECK-LABEL: func.func @_QPdo_order() { subroutine do_order - !CHECK: omp.wsloop order(reproducible:concurrent) { + !CHECK: omp.wsloop order(reproducible:concurrent) private({{.*}}) { !$omp do order(concurrent) do i = 1, 10 end do - !CHECK: omp.wsloop order(reproducible:concurrent) { + !CHECK: omp.wsloop order(reproducible:concurrent) private({{.*}}) { !$omp do order(reproducible:concurrent) do i = 1, 10 end do - !CHECK: omp.wsloop order(unconstrained:concurrent) { + !CHECK: omp.wsloop order(unconstrained:concurrent) private({{.*}}) { !$omp do order(unconstrained:concurrent) do i = 1, 10 end do diff --git a/flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f90 b/flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f90 index 86309a24f91a0..531413c124f81 100644 --- a/flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f90 +++ b/flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f90 @@ -10,12 +10,12 @@ !CHECK-DAG: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1_REF]] typeparams %[[FIVE]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFlastprivate_characterEarg1"} : (!fir.ref>, index, !fir.dscope) -> (!fir.ref>, !fir.ref>) !CHECK: omp.parallel { -!CHECK-DAG: %[[ARG1_PVT:.*]] = fir.alloca !fir.char<1,5> {bindc_name = "arg1", pinned, {{.*}}} -!CHECK-DAG: %[[ARG1_PVT_DECL:.*]]:2 = hlfir.declare %[[ARG1_PVT]] typeparams %[[FIVE]] {uniq_name = "_QFlastprivate_characterEarg1"} : (!fir.ref>, index) -> (!fir.ref>, !fir.ref>) ! Check that we are accessing the clone inside the loop -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[ARG1_PVT:.*]], @{{.*}} %{{.*}}#0 -> %{{.*}} : !fir.ref>, !{{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} { +!CHECK: %[[FIVE:.*]] = arith.constant 5 : index +!CHECK: %[[ARG1_PVT_DECL:.*]]:2 = hlfir.declare %[[ARG1_PVT]] typeparams %[[FIVE]] {uniq_name = "_QFlastprivate_characterEarg1"} : (!fir.ref>, index) -> (!fir.ref>, !fir.ref>) !CHECK: %[[UNIT:.*]] = arith.constant 6 : i32 !CHECK-NEXT: %[[ADDR:.*]] = fir.address_of(@_QQclX !CHECK-NEXT: %[[CVT0:.*]] = fir.convert %[[ADDR]] @@ -58,10 +58,9 @@ subroutine lastprivate_character(arg1) !CHECK: func @_QPlastprivate_int(%[[ARG1:.*]]: !fir.ref {fir.bindc_name = "arg1"}) { !CHECK: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFlastprivate_intEarg1"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) !CHECK-DAG: omp.parallel { -!CHECK-DAG: %[[CLONE:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}} -!CHECK-DAG: %[[CLONE_DECL:.*]]:2 = hlfir.declare %[[CLONE]] {uniq_name = "_QFlastprivate_intEarg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[CLONE:.*]], @{{.*}} %{{.*}}#0 -> %{{.*}} : !fir.ref, !{{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} { +!CHECK: %[[CLONE_DECL:.*]]:2 = hlfir.declare %[[CLONE]] {uniq_name = "_QFlastprivate_intEarg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! Testing last iteration check !CHECK: %[[V:.*]] = arith.addi %[[INDX_WS]], %{{.*}} : i32 @@ -98,12 +97,10 @@ subroutine lastprivate_int(arg1) !CHECK: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFmult_lastprivate_intEarg1"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) !CHECK: %[[ARG2_DECL:.*]]:2 = hlfir.declare %[[ARG2]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFmult_lastprivate_intEarg2"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) !CHECK: omp.parallel { -!CHECK-DAG: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}} +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[CLONE1:.*]], @{{.*}} %{{.*}}#0 -> %[[CLONE2:.*]], @{{.*}} %{{.*}}#0 -> %{{.*}} : !fir.ref, !fir.ref, !{{.*}}) { +!CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} { !CHECK-DAG: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFmult_lastprivate_intEarg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK-DAG: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2", pinned, {{.*}}} !CHECK-DAG: %[[CLONE2_DECL:.*]]:2 = hlfir.declare %[[CLONE2]] {uniq_name = "_QFmult_lastprivate_intEarg2"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: omp.wsloop { -!CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} { ! Testing last iteration check !CHECK: %[[V:.*]] = arith.addi %[[INDX_WS]], %{{.*}} : i32 @@ -142,12 +139,10 @@ subroutine mult_lastprivate_int(arg1, arg2) !CHECK: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFmult_lastprivate_int2Earg1"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) !CHECK: %[[ARG2_DECL:.*]]:2 = hlfir.declare %[[ARG2]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFmult_lastprivate_int2Earg2"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) !CHECK: omp.parallel { -!CHECK-DAG: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2", pinned, {{.*}}} -!CHECK-DAG: %[[CLONE2_DECL:.*]]:2 = hlfir.declare %[[CLONE2]] {uniq_name = "_QFmult_lastprivate_int2Earg2"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK-DAG: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}} -!CHECK-DAG: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFmult_lastprivate_int2Earg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[CLONE1:.*]], @{{.*}} %{{.*}}#0 -> %[[CLONE2:.*]], @{{.*}} %{{.*}}#0 -> %{{.*}} : !fir.ref, !fir.ref, !{{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} { +!CHECK-DAG: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFmult_lastprivate_int2Earg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK-DAG: %[[CLONE2_DECL:.*]]:2 = hlfir.declare %[[CLONE2]] {uniq_name = "_QFmult_lastprivate_int2Earg2"} : (!fir.ref) -> (!fir.ref, !fir.ref) !Testing last iteration check !CHECK: %[[V:.*]] = arith.addi %[[INDX_WS]], %{{.*}} : i32 @@ -187,16 +182,11 @@ subroutine mult_lastprivate_int2(arg1, arg2) !CHECK: %[[ARG2_DECL:.*]]:2 = hlfir.declare %[[ARG2]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFfirstpriv_lastpriv_intEarg2"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) !CHECK: omp.parallel { ! Firstprivate update -!CHECK: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}} -!CHECK: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFfirstpriv_lastpriv_intEarg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: %[[FPV_LD:.*]] = fir.load %[[ARG1_DECL]]#0 : !fir.ref -!CHECK: hlfir.assign %[[FPV_LD]] to %[[CLONE1_DECL]]#0 : i32, !fir.ref -! Lastprivate Allocation -!CHECK: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2", pinned, {{.*}}} -!CHECK: %[[CLONE2_DECL:.*]]:2 = hlfir.declare %[[CLONE2]] {uniq_name = "_QFfirstpriv_lastpriv_intEarg2"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK-NOT: omp.barrier -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[CLONE1:.*]], @{{.*}} %{{.*}}#0 -> %[[CLONE2:.*]], @{{.*}} %{{.*}}#0 -> %{{.*}} : !fir.ref, !fir.ref, !{{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} { +!CHECK: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFfirstpriv_lastpriv_intEarg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK: %[[CLONE2_DECL:.*]]:2 = hlfir.declare %[[CLONE2]] {uniq_name = "_QFfirstpriv_lastpriv_intEarg2"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! Testing last iteration check !CHECK: %[[V:.*]] = arith.addi %[[INDX_WS]], %{{.*}} : i32 @@ -234,17 +224,14 @@ subroutine firstpriv_lastpriv_int(arg1, arg2) !CHECK: omp.parallel { ! Firstprivate update -!CHECK: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}} -!CHECK: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFfirstpriv_lastpriv_int2Earg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK-NEXT: %[[FPV_LD:.*]] = fir.load %[[ARG1_DECL]]#0 : !fir.ref -!CHECK-NEXT: hlfir.assign %[[FPV_LD]] to %[[CLONE1_DECL]]#0 : i32, !fir.ref -!CHECK-NEXT: %[[IV:.*]] = fir.alloca i32 {bindc_name = "n", pinned, {{.*}}} -!CHECK-NEXT: hlfir.declare %[[IV]] !CHECK-NEXT: omp.barrier -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[CLONE1:.*]], @{{.*}} %{{.*}}#0 -> %[[IV:.*]] : !fir.ref, !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} { +!CHECK: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFfirstpriv_lastpriv_int2Earg1"} : (!fir.ref) -> (!fir.ref, !fir.ref) + +!CHECK-NEXT: hlfir.declare %[[IV]] ! Testing last iteration check !CHECK: %[[V:.*]] = arith.addi %[[INDX_WS]], %{{.*}} : i32 !CHECK: %[[C0:.*]] = arith.constant 0 : i32 diff --git a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 index 99323e69113bc..e8ac8e7f62122 100644 --- a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 +++ b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 @@ -56,17 +56,15 @@ ! CHECK-DAG: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_private_fixEx"} ! CHECK-DAG: %[[X_DECL:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFmultiple_private_fixEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: omp.parallel { -! CHECK-DAG: %[[PRIV_I:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK-DAG: %[[PRIV_I_DECL:.*]]:2 = hlfir.declare %[[PRIV_I]] {uniq_name = "_QFmultiple_private_fixEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK-DAG: %[[PRIV_J:.*]] = fir.alloca i32 {bindc_name = "j", pinned, uniq_name = "_QFmultiple_private_fixEj"} -! CHECK-DAG: %[[PRIV_J_DECL:.*]]:2 = hlfir.declare %[[PRIV_J]] {uniq_name = "_QFmultiple_private_fixEj"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK-DAG: %[[PRIV_X:.*]] = fir.alloca i32 {bindc_name = "x", pinned, {{.*}}} -! CHECK-DAG: %[[PRIV_X_DECL:.*]]:2 = hlfir.declare %[[PRIV_X]] {uniq_name = "_QFmultiple_private_fixEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: %[[ONE:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_3:.*]] = fir.load %[[GAMA_DECL]]#0 : !fir.ref ! CHECK: %[[VAL_5:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[PRIV_J:.*]], @{{.*}} %{{.*}}#0 -> %[[PRIV_X:.*]], @{{.*}} %{{.*}}#0 -> %[[PRIV_I:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_6:.*]]) : i32 = (%[[ONE]]) to (%[[VAL_3]]) inclusive step (%[[VAL_5]]) { +! CHECK-DAG: %[[PRIV_I_DECL:.*]]:2 = hlfir.declare %[[PRIV_I]] {uniq_name = "_QFmultiple_private_fixEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK-DAG: %[[PRIV_J_DECL:.*]]:2 = hlfir.declare %[[PRIV_J]] {uniq_name = "_QFmultiple_private_fixEj"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK-DAG: %[[PRIV_X_DECL:.*]]:2 = hlfir.declare %[[PRIV_X]] {uniq_name = "_QFmultiple_private_fixEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_6]] to %[[PRIV_I_DECL]]#1 : !fir.ref ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_7]] : (i32) -> index diff --git a/flang/test/Lower/OpenMP/parallel-private-clause.f90 b/flang/test/Lower/OpenMP/parallel-private-clause.f90 index 7114314df05d3..f26b97b55d51a 100644 --- a/flang/test/Lower/OpenMP/parallel-private-clause.f90 +++ b/flang/test/Lower/OpenMP/parallel-private-clause.f90 @@ -271,17 +271,16 @@ subroutine simple_loop_1 !$OMP PARALLEL PRIVATE(r) ! FIRDialect: %[[R_DECL:.*]]:2 = hlfir.declare %[[R]] {fortran_attrs = #fir.var_attrs, uniq_name = "_QFsimple_loop_1Er"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) - ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} - ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loop_1Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! FIRDialect: %[[WS_LB:.*]] = arith.constant 1 : i32 ! FIRDialect: %[[WS_UB:.*]] = arith.constant 9 : i32 ! FIRDialect: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! FIRDialect: omp.wsloop { + ! FIRDialect: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[ALLOCA_IV:.*]] : !fir.ref) { ! FIRDialect-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP DO do i=1, 9 + ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loop_1Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! FIRDialect: fir.store %[[I]] to %[[ALLOCA_IV_DECL]]#1 : !fir.ref ! FIRDialect: %[[LOAD_IV:.*]] = fir.load %[[ALLOCA_IV_DECL]]#0 : !fir.ref ! FIRDialect: fir.call @_FortranAioOutputInteger32({{.*}}, %[[LOAD_IV]]) {{.*}} : (!fir.ref, i32) -> i1 @@ -299,34 +298,23 @@ subroutine simple_loop_2 real, allocatable :: r; ! FIRDialect: omp.parallel !$OMP PARALLEL - ! FIRDialect: [[R:%.*]] = fir.alloca !fir.box> {bindc_name = "r", pinned, uniq_name = "{{.*}}Er"} - ! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref>> - ! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref>> - ! FIRDialect: %[[R_DECL:.*]]:2 = hlfir.declare [[R]] {fortran_attrs = #fir.var_attrs, uniq_name = "{{.*}}Er"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) - - ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} - ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "{{.*}}Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! FIRDialect: %[[WS_LB:.*]] = arith.constant 1 : i32 ! FIRDialect: %[[WS_UB:.*]] = arith.constant 9 : i32 ! FIRDialect: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! FIRDialect: omp.wsloop { + ! FIRDialect: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[R:.*]], @{{.*}} %{{.*}}#0 -> %[[ALLOCA_IV:.*]] : !fir.ref>>, !fir.ref) { ! FIRDialect-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP DO PRIVATE(r) do i=1, 9 + ! FIRDialect: %[[R_DECL:.*]]:2 = hlfir.declare %[[R]] {fortran_attrs = #fir.var_attrs, uniq_name = "{{.*}}Er"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) + ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "{{.*}}Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! FIRDialect: fir.store %[[I]] to %[[ALLOCA_IV_DECL]]#1 : !fir.ref ! FIRDialect: %[[LOAD_IV:.*]] = fir.load %[[ALLOCA_IV_DECL]]#0 : !fir.ref ! FIRDialect: fir.call @_FortranAioOutputInteger32({{.*}}, %[[LOAD_IV]]) {{.*}}: (!fir.ref, i32) -> i1 print*, i end do ! FIRDialect: omp.yield - ! FIRDialect: {{%.*}} = fir.load %[[R_DECL]]#0 : !fir.ref>> - ! FIRDialect: fir.if {{%.*}} { - ! FIRDialect: [[LD:%.*]] = fir.load %[[R_DECL]]#0 : !fir.ref>> - ! FIRDialect: [[AD:%.*]] = fir.box_addr [[LD]] : (!fir.box>) -> !fir.heap - ! FIRDialect: fir.freemem [[AD]] : !fir.heap - ! FIRDialect: fir.store {{%.*}} to %[[R_DECL]]#0 : !fir.ref>> !$OMP END DO ! FIRDialect: omp.terminator !$OMP END PARALLEL @@ -337,35 +325,24 @@ subroutine simple_loop_3 integer :: i real, allocatable :: r; ! FIRDialect: omp.parallel - - ! FIRDialect: [[R:%.*]] = fir.alloca !fir.box> {bindc_name = "r", pinned, uniq_name = "{{.*}}Er"} - ! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref>> - ! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref>> - ! FIRDialect: [[R_DECL:%.*]]:2 = hlfir.declare [[R]] {fortran_attrs = #fir.var_attrs, uniq_name = "{{.*}}Er"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) - - ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} - ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "{{.*}}Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! FIRDialect: %[[WS_LB:.*]] = arith.constant 1 : i32 ! FIRDialect: %[[WS_UB:.*]] = arith.constant 9 : i32 ! FIRDialect: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! FIRDialect: omp.wsloop { + ! FIRDialect: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[R:.*]], @{{.*}} %{{.*}}#0 -> %[[ALLOCA_IV:.*]] : !fir.ref>>, !fir.ref) { ! FIRDialect-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP PARALLEL DO PRIVATE(r) do i=1, 9 + ! FIRDialect: [[R_DECL:%.*]]:2 = hlfir.declare %[[R]] {fortran_attrs = #fir.var_attrs, uniq_name = "{{.*}}Er"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) + + ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "{{.*}}Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! FIRDialect: fir.store %[[I]] to %[[ALLOCA_IV_DECL:.*]]#1 : !fir.ref ! FIRDialect: %[[LOAD_IV:.*]] = fir.load %[[ALLOCA_IV_DECL]]#0 : !fir.ref ! FIRDialect: fir.call @_FortranAioOutputInteger32({{.*}}, %[[LOAD_IV]]) {{.*}}: (!fir.ref, i32) -> i1 print*, i end do ! FIRDialect: omp.yield - ! FIRDialect: {{%.*}} = fir.load [[R_DECL]]#0 : !fir.ref>> - ! FIRDialect: fir.if {{%.*}} { - ! FIRDialect: [[LD:%.*]] = fir.load [[R_DECL]]#0 : !fir.ref>> - ! FIRDialect: [[AD:%.*]] = fir.box_addr [[LD]] : (!fir.box>) -> !fir.heap - ! FIRDialect: fir.freemem [[AD]] : !fir.heap - ! FIRDialect: fir.store {{%.*}} to [[R_DECL]]#0 : !fir.ref>> !$OMP END PARALLEL DO ! FIRDialect: omp.terminator end subroutine diff --git a/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 index dabd495d733b5..11d5682209676 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 @@ -92,13 +92,12 @@ program reduce ! CHECK: %[[VAL_11:.*]] = fir.embox %[[VAL_9]](%[[VAL_10]]) : (!fir.heap>, !fir.shape<1>) -> !fir.box>> ! CHECK: fir.store %[[VAL_11]] to %[[VAL_3]]#1 : !fir.ref>>> ! CHECK: omp.parallel { -! CHECK: %[[VAL_12:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_13:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_box_heap_Uxi32 %[[VAL_3]]#0 -> %[[VAL_17:.*]] : !fir.ref>>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_12:.*]] : !fir.ref) reduction(byref @add_reduction_byref_box_heap_Uxi32 %[[VAL_3]]#0 -> %[[VAL_17:.*]] : !fir.ref>>>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_18:.*]]) : i32 = (%[[VAL_14]]) to (%[[VAL_15]]) inclusive step (%[[VAL_16]]) { +! CHECK: %[[VAL_13:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_17]] {fortran_attrs = {{.*}}, uniq_name = "_QFEr"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) ! CHECK: fir.store %[[VAL_18]] to %[[VAL_13]]#1 : !fir.ref ! CHECK: %[[VAL_20:.*]] = fir.load %[[VAL_13]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 index 01d8dc33f40e6..54fe53b5d6f6a 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 @@ -99,18 +99,17 @@ program reduce ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_3]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_12]] : (index) -> i64 ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_13]] : (i32) -> i64 -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAPointerSetBounds(%[[VAL_15]], %[[VAL_14]], %[[VAL_16]], %[[VAL_17]]) fastmath : (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[VAL_15]], %[[VAL_14]], %[[VAL_16]], %[[VAL_17]]) fastmath : (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_3]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_6]] : (!fir.ref>) -> !fir.ref ! CHECK: %[[VAL_21:.*]] = fir.call @_FortranAPointerAllocate(%[[VAL_19]], %[[VAL_4]], %[[VAL_5]], %[[VAL_20]], %[[VAL_7]]) fastmath : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 ! CHECK: omp.parallel { -! CHECK: %[[VAL_22:.*]] = fir.alloca i32 {bindc_name = "i", pinned, uniq_name = "_QFEi"} -! CHECK: %[[VAL_23:.*]]:2 = hlfir.declare %[[VAL_22]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_24:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_25:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_26:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_box_ptr_Uxi32 %[[VAL_3]]#0 -> %[[VAL_27:.*]] : !fir.ref>>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_22:.*]] : !fir.ref) reduction(byref @add_reduction_byref_box_ptr_Uxi32 %[[VAL_3]]#0 -> %[[VAL_27:.*]] : !fir.ref>>>) { ! CHECK: omp.loop_nest (%[[VAL_28:.*]]) : i32 = (%[[VAL_24]]) to (%[[VAL_25]]) inclusive step (%[[VAL_26]]) { +! CHECK: %[[VAL_23:.*]]:2 = hlfir.declare %[[VAL_22]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_27]] {fortran_attrs = {{.*}}, uniq_name = "_QFEr"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) ! CHECK: fir.store %[[VAL_28]] to %[[VAL_23]]#1 : !fir.ref ! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_23]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/parallel-reduction3.f90 b/flang/test/Lower/OpenMP/parallel-reduction3.f90 index 70b4f0f12820b..194b3fdd98201 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction3.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction3.f90 @@ -71,13 +71,12 @@ ! CHECK: omp.parallel { ! CHECK: %[[VAL_14:.*]] = fir.alloca !fir.box> ! CHECK: fir.store %[[VAL_12]]#0 to %[[VAL_14]] : !fir.ref>> -! CHECK: %[[VAL_15:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_16:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_18:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_19:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_box_Uxi32 %[[VAL_14]] -> %[[VAL_20:.*]] : !fir.ref>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_15:.*]] : !fir.ref) reduction(byref @add_reduction_byref_box_Uxi32 %[[VAL_14]] -> %[[VAL_20:.*]] : !fir.ref>>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_21:.*]]) : i32 = (%[[VAL_17]]) to (%[[VAL_18]]) inclusive step (%[[VAL_19]]) { +! CHECK: %[[VAL_16:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_20]] {uniq_name = "_QFsEc"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: fir.store %[[VAL_21]] to %[[VAL_16]]#1 : !fir.ref ! CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_22]]#0 : !fir.ref>> @@ -115,12 +114,12 @@ ! CHECK: %[[VAL_44:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_45:.*]] = arith.constant false ! CHECK: %[[VAL_46:.*]] = arith.constant false -! CHECK: %[[VAL_47:.*]] = fir.call @_FortranAStopStatement(%[[VAL_44]], %[[VAL_45]], %[[VAL_46]]) fastmath : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_44]], %[[VAL_45]], %[[VAL_46]]) fastmath : (i32, i1, i1) -> () ! CHECK: fir.unreachable ! CHECK: ^bb2: ! CHECK: return ! CHECK: } -! CHECK: func.func private @_FortranAStopStatement(i32, i1, i1) -> none attributes {fir.runtime} +! CHECK: func.func private @_FortranAStopStatement(i32, i1, i1) attributes {fir.runtime} subroutine s(x) integer :: x diff --git a/flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f90 b/flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f90 index 4e7c2c15df743..a0cdaaa4c7b09 100644 --- a/flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f90 +++ b/flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f90 @@ -11,20 +11,13 @@ subroutine omp_do_firstprivate(a) n = a+1 !$omp parallel do firstprivate(a) ! CHECK: omp.parallel { - - ! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned, uniq_name = "_QFomp_do_firstprivateEa"} - ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_firstprivateEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK-NEXT: %[[LD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref - ! CHECK-NEXT: hlfir.assign %[[LD]] to %[[A_PVT_DECL]]#0 : i32, !fir.ref - - ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} - ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_firstprivateEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[LB:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: %[[UB:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref + ! CHECK-NEXT: %[[UB:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref ! CHECK-NEXT: %[[STEP:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: omp.wsloop { + ! CHECK-NEXT: omp.wsloop private(@{{.*a_firstprivate_ref_i32.*}} %{{.*}}#0 -> %[[A_PVT_REF:.*]], @{{.*i_private_ref_i32.*}} %{{.*}}#0 -> %[[I_PVT_REF:.*]] : !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[ARG1:.*]]) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { + ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_firstprivateEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_firstprivateEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK-NEXT: fir.store %[[ARG1]] to %[[I_PVT_DECL]]#1 : !fir.ref ! CHECK-NEXT: fir.call @_QPfoo(%[[I_PVT_DECL]]#1, %[[A_PVT_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref) -> () ! CHECK-NEXT: omp.yield @@ -47,25 +40,14 @@ subroutine omp_do_firstprivate2(a, n) n = a+1 !$omp parallel do firstprivate(a, n) ! CHECK: omp.parallel { - - ! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned, {{.*}}} + ! CHECK: %[[LB:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref + ! CHECK: %[[UB:.*]] = fir.load %[[ARG1_DECL]]#0 : !fir.ref + ! CHECK: %[[STEP:.*]] = arith.constant 1 : i32 + ! CHECK: omp.wsloop private(@{{.*a_firstprivate_ref_i32}} %{{.*}}#0 -> %[[A_PVT_REF:.*]], @{{.*n_firstprivate_ref_i32}} %{{.*}}#0 -> %[[N_PVT_REF:.*]], @{{.*i_private_ref_i32}} %{{.*}}#0 -> %[[I_PVT_REF:.*]] : !fir.ref, !fir.ref, !fir.ref) { + ! CHECK-NEXT: omp.loop_nest (%[[ARG2:.*]]) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_firstprivate2Ea"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[LD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref - ! CHECK: hlfir.assign %[[LD]] to %[[A_PVT_DECL]]#0 : i32, !fir.ref - - ! CHECK: %[[N_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "n", pinned, uniq_name = "_QFomp_do_firstprivate2En"} ! CHECK: %[[N_PVT_DECL:.*]]:2 = hlfir.declare %[[N_PVT_REF]] {uniq_name = "_QFomp_do_firstprivate2En"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[LD1:.*]] = fir.load %[[ARG1_DECL]]#0 : !fir.ref - ! CHECK: hlfir.assign %[[LD1]] to %[[N_PVT_DECL]]#0 : i32, !fir.ref - - ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_firstprivate2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) - - ! CHECK: %[[LB:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref - ! CHECK: %[[UB:.*]] = fir.load %[[N_PVT_DECL]]#0 : !fir.ref - ! CHECK: %[[STEP:.*]] = arith.constant 1 : i32 - ! CHECK: omp.wsloop { - ! CHECK-NEXT: omp.loop_nest (%[[ARG2:.*]]) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { ! CHECK: fir.store %[[ARG2]] to %[[I_PVT_DECL]]#1 : !fir.ref ! CHECK: fir.call @_QPfoo(%[[I_PVT_DECL]]#1, %[[A_PVT_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref) -> () ! CHECK: omp.yield diff --git a/flang/test/Lower/OpenMP/parallel-wsloop-lastpriv.f90 b/flang/test/Lower/OpenMP/parallel-wsloop-lastpriv.f90 index dbde5291c01c8..a7c0dc3b1b406 100644 --- a/flang/test/Lower/OpenMP/parallel-wsloop-lastpriv.f90 +++ b/flang/test/Lower/OpenMP/parallel-wsloop-lastpriv.f90 @@ -12,17 +12,15 @@ subroutine omp_do_lastprivate(a) !$omp parallel do lastprivate(a) ! CHECK: omp.parallel { - ! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned, uniq_name = "_QFomp_do_lastprivateEa"} - ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_lastprivateEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) - - ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} - ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_lastprivateEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[LB:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: %[[UB:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref + ! CHECK-NEXT: %[[UB:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref ! CHECK-NEXT: %[[STEP:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: omp.wsloop { + ! CHECK-NEXT: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[A_PVT_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[I_PVT_REF:.*]] : !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[ARG1:.*]]) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { + ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_lastprivateEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_lastprivateEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK-NEXT: fir.store %[[ARG1]] to %[[I_PVT_DECL]]#1 : !fir.ref ! CHECK-NEXT: fir.call @_QPfoo(%[[I_PVT_DECL]]#1, %[[A_PVT_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref) -> () ! CHECK: %[[NEXT_ARG1:.*]] = arith.addi %[[ARG1]], %[[STEP]] : i32 @@ -58,20 +56,15 @@ subroutine omp_do_lastprivate2(a, n) !$omp parallel do lastprivate(a, n) ! CHECK: omp.parallel { - ! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned, {{.*}}} + ! CHECK: %[[LB:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref + ! CHECK: %[[UB:.*]] = fir.load %[[ARG1_DECL]]#0 : !fir.ref + ! CHECK: %[[STEP:.*]] = arith.constant 1 : i32 + ! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[A_PVT_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[N_PVT_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[I_PVT_REF:.*]] : !fir.ref, !fir.ref, !fir.ref) { + ! CHECK-NEXT: omp.loop_nest (%[[ARG2:.*]]) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate2Ea"} : (!fir.ref) -> (!fir.ref, !fir.ref) - - ! CHECK: %[[N_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "n", pinned, uniq_name = "_QFomp_do_lastprivate2En"} ! CHECK: %[[N_PVT_DECL:.*]]:2 = hlfir.declare %[[N_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate2En"} : (!fir.ref) -> (!fir.ref, !fir.ref) - - ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[LB:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref - ! CHECK: %[[UB:.*]] = fir.load %[[N_PVT_DECL]]#0 : !fir.ref - ! CHECK: %[[STEP:.*]] = arith.constant 1 : i32 - ! CHECK: omp.wsloop { - ! CHECK-NEXT: omp.loop_nest (%[[ARG2:.*]]) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]]) { ! CHECK: fir.store %[[ARG2]] to %[[I_PVT_DECL]]#1 : !fir.ref ! CHECK: fir.call @_QPfoo(%[[I_PVT_DECL]]#1, %[[A_PVT_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref) -> () ! CHECK: %[[NEXT_ARG2:.*]] = arith.addi %[[ARG2]], %[[STEP]] : i32 @@ -104,23 +97,18 @@ subroutine omp_do_lastprivate_collapse2(a) !$omp parallel do lastprivate(a) collapse(2) ! CHECK: omp.parallel { - ! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned, uniq_name = "_QFomp_do_lastprivate_collapse2Ea"} - ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse2Ea"} : (!fir.ref) -> (!fir.ref, !fir.ref) - - ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} - ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! - ! CHECK: %[[J_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "j", pinned, {{.*}}} - ! CHECK: %[[J_PVT_DECL:.*]]:2 = hlfir.declare %[[J_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse2Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[LB1:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: %[[UB1:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref + ! CHECK-NEXT: %[[UB1:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref ! CHECK-NEXT: %[[STEP1:.*]] = arith.constant 1 : i32 ! CHECK: %[[LB2:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: %[[UB2:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref + ! CHECK-NEXT: %[[UB2:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref ! CHECK-NEXT: %[[STEP2:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: omp.wsloop { + ! CHECK-NEXT: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[A_PVT_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[I_PVT_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[J_PVT_REF:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[ARG1:.*]], %[[ARG2:.*]]) : i32 = (%[[LB1]], %[[LB2]]) to (%[[UB1]], %[[UB2]]) inclusive step (%[[STEP1]], %[[STEP2]]) { + ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse2Ea"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: %[[J_PVT_DECL:.*]]:2 = hlfir.declare %[[J_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse2Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK-NEXT: fir.store %[[ARG1]] to %[[I_PVT_DECL]]#1 : !fir.ref ! CHECK-NEXT: fir.store %[[ARG2]] to %[[J_PVT_DECL]]#1 : !fir.ref ! CHECK-NEXT: fir.call @_QPfoo(%[[I_PVT_DECL]]#1, %[[A_PVT_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref) -> () @@ -164,29 +152,23 @@ subroutine omp_do_lastprivate_collapse3(a) !$omp parallel do lastprivate(a) collapse(3) ! CHECK: omp.parallel { - ! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned, uniq_name = "_QFomp_do_lastprivate_collapse3Ea"} - ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse3Ea"} : (!fir.ref) -> (!fir.ref, !fir.ref) - - ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} - ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse3Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) - - ! CHECK: %[[J_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "j", pinned, {{.*}}} - ! CHECK: %[[J_PVT_DECL:.*]]:2 = hlfir.declare %[[J_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse3Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) - - ! CHECK: %[[K_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "k", pinned, {{.*}}} - ! CHECK: %[[K_PVT_DECL:.*]]:2 = hlfir.declare %[[K_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse3Ek"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[LB1:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: %[[UB1:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref + ! CHECK-NEXT: %[[UB1:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref ! CHECK-NEXT: %[[STEP1:.*]] = arith.constant 1 : i32 ! CHECK: %[[LB2:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: %[[UB2:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref + ! CHECK-NEXT: %[[UB2:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref ! CHECK-NEXT: %[[STEP2:.*]] = arith.constant 1 : i32 ! CHECK: %[[LB3:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: %[[UB3:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref + ! CHECK-NEXT: %[[UB3:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref ! CHECK-NEXT: %[[STEP3:.*]] = arith.constant 1 : i32 - ! CHECK-NEXT: omp.wsloop { + ! CHECK-NEXT: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[A_PVT_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[I_PVT_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[J_PVT_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[K_PVT_REF:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) : i32 = (%[[LB1]], %[[LB2]], %[[LB3]]) to (%[[UB1]], %[[UB2]], %[[UB3]]) inclusive step (%[[STEP1]], %[[STEP2]], %[[STEP3]]) { + ! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse3Ea"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse3Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: %[[J_PVT_DECL:.*]]:2 = hlfir.declare %[[J_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse3Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: %[[K_PVT_DECL:.*]]:2 = hlfir.declare %[[K_PVT_REF]] {uniq_name = "_QFomp_do_lastprivate_collapse3Ek"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK-NEXT: fir.store %[[ARG1]] to %[[I_PVT_DECL]]#1 : !fir.ref ! CHECK-NEXT: fir.store %[[ARG2]] to %[[J_PVT_DECL]]#1 : !fir.ref ! CHECK-NEXT: fir.store %[[ARG3]] to %[[K_PVT_DECL]]#1 : !fir.ref diff --git a/flang/test/Lower/OpenMP/parallel-wsloop-reduction-byref.f90 b/flang/test/Lower/OpenMP/parallel-wsloop-reduction-byref.f90 index 99c521406a777..3f44f292cb6a0 100644 --- a/flang/test/Lower/OpenMP/parallel-wsloop-reduction-byref.f90 +++ b/flang/test/Lower/OpenMP/parallel-wsloop-reduction-byref.f90 @@ -4,7 +4,7 @@ ! RUN: flang -fc1 -fopenmp -mmlir --force-byref-reduction -emit-hlfir %s -o - | FileCheck %s ! CHECK: omp.parallel { -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_i32 +! CHECK: omp.wsloop private({{.*}}) reduction(byref @add_reduction_byref_i32 subroutine sb integer :: x x = 0 diff --git a/flang/test/Lower/OpenMP/parallel-wsloop-reduction.f90 b/flang/test/Lower/OpenMP/parallel-wsloop-reduction.f90 index cfeb5de83f4e8..a206eef52da5a 100644 --- a/flang/test/Lower/OpenMP/parallel-wsloop-reduction.f90 +++ b/flang/test/Lower/OpenMP/parallel-wsloop-reduction.f90 @@ -4,7 +4,7 @@ ! RUN: flang -fc1 -fopenmp -emit-hlfir %s -o - | FileCheck %s ! CHECK: omp.parallel { -! CHECK: omp.wsloop reduction(@add_reduction_i32 +! CHECK: omp.wsloop private({{.*}}) reduction(@add_reduction_i32 subroutine sb integer :: x x = 0 diff --git a/flang/test/Lower/OpenMP/parallel-wsloop.f90 b/flang/test/Lower/OpenMP/parallel-wsloop.f90 index cba5209f85989..7116069e8daa6 100644 --- a/flang/test/Lower/OpenMP/parallel-wsloop.f90 +++ b/flang/test/Lower/OpenMP/parallel-wsloop.f90 @@ -10,7 +10,7 @@ subroutine simple_parallel_do ! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 ! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 ! CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! CHECK: omp.wsloop { + ! CHECK: omp.wsloop private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP PARALLEL DO do i=1, 9 @@ -39,7 +39,7 @@ subroutine parallel_do_with_parallel_clauses(cond, nt) ! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 ! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 ! CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! CHECK: omp.wsloop { + ! CHECK: omp.wsloop private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP PARALLEL DO IF(cond) NUM_THREADS(nt) PROC_BIND(close) do i=1, 9 @@ -64,7 +64,7 @@ subroutine parallel_do_with_clauses(nt) ! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 ! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 ! CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! CHECK: omp.wsloop schedule(dynamic) { + ! CHECK: omp.wsloop schedule(dynamic) private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP PARALLEL DO NUM_THREADS(nt) SCHEDULE(dynamic) do i=1, 9 @@ -92,19 +92,16 @@ subroutine parallel_do_with_privatisation_clauses(cond,nt) integer :: nt integer :: i ! CHECK: omp.parallel - ! CHECK: %[[PRIVATE_COND_REF:.*]] = fir.alloca !fir.logical<4> {bindc_name = "cond", pinned, uniq_name = "_QFparallel_do_with_privatisation_clausesEcond"} - ! CHECK: %[[PRIVATE_COND_DECL:.*]]:2 = hlfir.declare %[[PRIVATE_COND_REF]] {uniq_name = "_QFparallel_do_with_privatisation_clausesEcond"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) - ! CHECK: %[[PRIVATE_NT_REF:.*]] = fir.alloca i32 {bindc_name = "nt", pinned, uniq_name = "_QFparallel_do_with_privatisation_clausesEnt"} - ! CHECK: %[[PRIVATE_NT_DECL:.*]]:2 = hlfir.declare %[[PRIVATE_NT_REF]] {uniq_name = "_QFparallel_do_with_privatisation_clausesEnt"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[NT_VAL:.*]] = fir.load %[[NT_DECL]]#0 : !fir.ref - ! CHECK: hlfir.assign %[[NT_VAL]] to %[[PRIVATE_NT_DECL]]#0 : i32, !fir.ref + ! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 ! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 ! CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! CHECK: omp.wsloop { + ! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[PRIVATE_COND_REF:.*]], @{{.*}} %{{.*}}#0 -> %[[PRIVATE_NT_REF:.*]], @{{.*}} %3#0 -> %{{.*}} : !fir.ref>, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP PARALLEL DO PRIVATE(cond) FIRSTPRIVATE(nt) do i=1, 9 + ! CHECK: %[[PRIVATE_COND_DECL:.*]]:2 = hlfir.declare %[[PRIVATE_COND_REF]] {uniq_name = "_QFparallel_do_with_privatisation_clausesEcond"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) + ! CHECK: %[[PRIVATE_NT_DECL:.*]]:2 = hlfir.declare %[[PRIVATE_NT_REF]] {uniq_name = "_QFparallel_do_with_privatisation_clausesEnt"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[I]] to %[[IV_ADDR:.*]]#1 : !fir.ref ! CHECK: %[[LOAD_IV:.*]] = fir.load %[[IV_ADDR]]#0 : !fir.ref ! CHECK: fir.call @_FortranAioOutputInteger32({{.*}}, %[[LOAD_IV]]) {{.*}}: (!fir.ref, i32) -> i1 @@ -149,14 +146,12 @@ end subroutine parallel_private_do ! CHECK: %[[NT_PRIV_DECL:.*]]:2 = hlfir.declare %[[NT_PRIV_ADDR]] {uniq_name = "_QFparallel_private_doEnt"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: %[[I_PRIV:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV]] {uniq_name = "_QFparallel_private_doEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 9 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[I_PRIV:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV]] {uniq_name = "_QFparallel_private_doEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[I]] to %[[I_PRIV_DECL]]#1 : !fir.ref ! CHECK: fir.call @_QPfoo(%[[I_PRIV_DECL]]#1, %[[COND_DECL]]#1, %[[NT_PRIV_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref>, !fir.ref) -> () ! CHECK: omp.yield @@ -195,14 +190,13 @@ end subroutine omp_parallel_multiple_firstprivate_do ! CHECK: %[[B_PRIV_DECL:.*]]:2 = hlfir.declare %[[B_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEb"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[I_PRIV_ADDR:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[VAL_8]]) to (%[[VAL_9]]) inclusive step (%[[VAL_10]]) { +! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[I]] to %[[I_PRIV_DECL]]#1 : !fir.ref ! CHECK: fir.call @_QPbar(%[[I_PRIV_DECL]]#1, %[[A_PRIV_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref) -> () ! CHECK: omp.yield @@ -237,23 +231,15 @@ end subroutine parallel_do_private ! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref {fir.bindc_name = "nt"}) { ! CHECK: %[[NT_DECL:.*]]:2 = hlfir.declare %[[VAL_1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFparallel_do_privateEnt"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) ! CHECK: omp.parallel { - -! CHECK: %[[COND_PRIV_ADDR:.*]] = fir.alloca !fir.logical<4> {bindc_name = "cond", pinned, uniq_name = "_QFparallel_do_privateEcond"} -! CHECK: %[[COND_PRIV_DECL:.*]]:2 = hlfir.declare %[[COND_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEcond"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) - -! CHECK: %[[NT_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "nt", pinned, uniq_name = "_QFparallel_do_privateEnt"} -! CHECK: %[[NT_PRIV_DECL:.*]]:2 = hlfir.declare %[[NT_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEnt"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: %[[NT_VAL:.*]] = fir.load %[[NT_DECL]]#0 : !fir.ref -! CHECK: hlfir.assign %[[NT_VAL]] to %[[NT_PRIV_DECL]]#0 : i32, !fir.ref - -! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 9 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[COND_PRIV_ADDR:.*]], @{{.*}} %{{.*}}#0 -> %[[NT_PRIV_ADDR:.*]], @{{.*}} %3#0 -> %[[I_PRIV_ADDR:.*]] : !fir.ref>, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[COND_PRIV_DECL:.*]]:2 = hlfir.declare %[[COND_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEcond"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) +! CHECK: %[[NT_PRIV_DECL:.*]]:2 = hlfir.declare %[[NT_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEnt"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: fir.store %[[I]] to %[[I_PRIV_DECL]]#1 : !fir.ref ! CHECK: fir.call @_QPfoo(%[[I_PRIV_DECL]]#1, %[[COND_PRIV_DECL]]#1, %[[NT_PRIV_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref>, !fir.ref) -> () ! CHECK: omp.yield @@ -287,25 +273,15 @@ end subroutine omp_parallel_do_multiple_firstprivate ! CHECK: %[[A_DECL:.*]]:2 = hlfir.declare %[[A_ADDR]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEa"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) ! CHECK: %[[B_DECL:.*]]:2 = hlfir.declare %[[B_ADDR]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEb"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref ! CHECK: omp.parallel { - -! CHECK: %[[A_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "a", pinned, uniq_name = "_QFomp_parallel_do_multiple_firstprivateEa"} -! CHECK: %[[A_PRIV_DECL:.*]]:2 = hlfir.declare %[[A_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: %[[A:.*]] = fir.load %[[A_DECL]]#0 : !fir.ref -! CHECK: hlfir.assign %[[A]] to %[[A_PRIV_DECL]]#0 : i32, !fir.ref - -! CHECK: %[[B_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "b", pinned, uniq_name = "_QFomp_parallel_do_multiple_firstprivateEb"} -! CHECK: %[[B_PRIV_DECL:.*]]:2 = hlfir.declare %[[B_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEb"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: %[[B:.*]] = fir.load %[[B_DECL]]#0 : !fir.ref -! CHECK: hlfir.assign %[[B]] to %[[B_PRIV_DECL]]#0 : i32, !fir.ref - -! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - ! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[A_PRIV_ADDR:.*]], @{{.*}} %{{.}}#0 -> %[[B_PRIV_ADDR:.*]], @{{.*}} %{{.}}#0 -> %[[I_PRIV_ADDR:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[VAL_8]]) to (%[[VAL_9]]) inclusive step (%[[VAL_10]]) { +! CHECK: %[[A_PRIV_DECL:.*]]:2 = hlfir.declare %[[A_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK: %[[B_PRIV_DECL:.*]]:2 = hlfir.declare %[[B_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEb"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) + ! CHECK: fir.store %[[I]] to %[[I_PRIV_DECL]]#1 : !fir.ref ! CHECK: fir.call @_QPbar(%[[I_PRIV_DECL]]#1, %[[A_PRIV_DECL]]#1) {{.*}}: (!fir.ref, !fir.ref) -> () ! CHECK: omp.yield diff --git a/flang/test/Lower/OpenMP/private-derived-type.f90 b/flang/test/Lower/OpenMP/private-derived-type.f90 index 9d680cd5d6114..7e0a3f14639f6 100644 --- a/flang/test/Lower/OpenMP/private-derived-type.f90 +++ b/flang/test/Lower/OpenMP/private-derived-type.f90 @@ -15,6 +15,17 @@ subroutine s4 !$omp end parallel end subroutine s4 +! CHECK: omp.private {type = private} @[[DERIVED_PRIV:.*]] : !fir.ref>}>> alloc { +! CHECK: %[[VAL_23:.*]] = fir.alloca !fir.type<_QFs4Ty3{x:!fir.box>}> {bindc_name = "v", pinned, uniq_name = "_QFs4Ev"} +! CHECK: %[[VAL_25:.*]] = fir.embox %[[VAL_23]] : (!fir.ref>}>>) -> !fir.box>}>> +! CHECK: %[[VAL_26:.*]] = fir.address_of +! CHECK: %[[VAL_27:.*]] = arith.constant 8 : i32 +! CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_25]] : (!fir.box>}>>) -> !fir.box +! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_26]] : (!fir.ref>) -> !fir.ref +! Check we do call FortranAInitialize on the derived type +! CHECK: fir.call @_FortranAInitialize(%[[VAL_28]], %[[VAL_29]], %[[VAL_27]]) fastmath : (!fir.box, !fir.ref, i32) -> () +! CHECK: %[[VAL_24:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFs4Ev"} : (!fir.ref>}>>) -> (!fir.ref>}>>, !fir.ref>}>>) +! CHECK: } ! CHECK-LABEL: func.func @_QPs4() { ! Example of how the lowering for regular derived type variables: @@ -25,22 +36,13 @@ end subroutine s4 ! CHECK: %[[VAL_12:.*]] = arith.constant 4 : i32 ! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_10]] : (!fir.box>}>>) -> !fir.box ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_11]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_15:.*]] = fir.call @_FortranAInitialize(%[[VAL_13]], %[[VAL_14]], %[[VAL_12]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_13]], %[[VAL_14]], %[[VAL_12]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! CHECK: omp.parallel { -! CHECK: %[[VAL_23:.*]] = fir.alloca !fir.type<_QFs4Ty3{x:!fir.box>}> {bindc_name = "v", pinned, uniq_name = "_QFs4Ev"} -! CHECK: %[[VAL_25:.*]] = fir.embox %[[VAL_23]] : (!fir.ref>}>>) -> !fir.box>}>> -! CHECK: %[[VAL_26:.*]] = fir.address_of -! CHECK: %[[VAL_27:.*]] = arith.constant 8 : i32 -! CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_25]] : (!fir.box>}>>) -> !fir.box -! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_26]] : (!fir.ref>) -> !fir.ref -! Check we do call FortranAInitialize on the derived type -! CHECK: %[[VAL_30:.*]] = fir.call @_FortranAInitialize(%[[VAL_28]], %[[VAL_29]], %[[VAL_27]]) fastmath : (!fir.box, !fir.ref, i32) -> none -! CHECK: %[[VAL_24:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFs4Ev"} : (!fir.ref>}>>) -> (!fir.ref>}>>, !fir.ref>}>>) -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@[[DERIVED_PRIV]] %{{.*}}#0 -> %{{.*}}, @{{.*}} %{{.*}}#0 -> %{{.*}} : !fir.ref>}>>, !fir.ref) { ! CHECK: } ! CHECK: %[[VAL_39:.*]] = fir.embox %[[VAL_9]]#1 : (!fir.ref>}>>) -> !fir.box>}>> ! CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_39]] : (!fir.box>}>>) -> !fir.box ! Check the derived type is destroyed -! CHECK: %[[VAL_41:.*]] = fir.call @_FortranADestroy(%[[VAL_40]]) fastmath : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[VAL_40]]) fastmath : (!fir.box) -> () ! CHECK: return ! CHECK: } diff --git a/flang/test/Lower/OpenMP/same_var_first_lastprivate.f90 b/flang/test/Lower/OpenMP/same_var_first_lastprivate.f90 new file mode 100644 index 0000000000000..c49a0908b721e --- /dev/null +++ b/flang/test/Lower/OpenMP/same_var_first_lastprivate.f90 @@ -0,0 +1,39 @@ +! RUN: %flang_fc1 -fopenmp -emit-hlfir %s -o - | FileCheck %s + +subroutine first_and_lastprivate + integer i + integer :: var = 1 + + !$omp parallel do firstprivate(var) lastprivate(var) + do i=1,1 + end do + !$omp end parallel do +end subroutine + +! CHECK: omp.private {type = firstprivate} @{{.*}}Evar_firstprivate_ref_i32 : {{.*}} alloc { +! CHECK: %[[ALLOC:.*]] = fir.alloca i32 {{.*}} +! CHECK: %[[ALLOC_DECL:.*]]:2 = hlfir.declare %[[ALLOC]] +! CHECK: omp.yield(%[[ALLOC_DECL]]#0 : !fir.ref) +! CHECK: } copy { +! CHECK: ^{{.*}}(%[[ORIG_REF:.*]]: {{.*}}, %[[PRIV_REF:.*]]: {{.*}}): +! CHECK: %[[ORIG_VAL:.*]] = fir.load %[[ORIG_REF]] +! CHECK: hlfir.assign %[[ORIG_VAL]] to %[[PRIV_REF]] +! CHECK: omp.yield(%[[PRIV_REF]] : !fir.ref) +! CHECK: } + +! CHECK: func.func @{{.*}}first_and_lastprivate() +! CHECK: %[[ORIG_VAR_DECL:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "{{.*}}Evar"} +! CHECK: omp.parallel { +! CHECK: omp.barrier +! CHECK: omp.wsloop private(@{{.*}}var_firstprivate_ref_i32 {{.*}}) { +! CHECK: omp.loop_nest {{.*}} { +! CHECK: %[[PRIV_VAR_DECL:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "{{.*}}Evar"} +! CHECK: fir.if %{{.*}} { +! CHECK: %[[PRIV_VAR_VAL:.*]] = fir.load %[[PRIV_VAR_DECL]]#0 : !fir.ref +! CHECK: hlfir.assign %[[PRIV_VAR_VAL]] to %[[ORIG_VAR_DECL]]#0 +! CHECK: } +! CHECK: omp.yield +! CHECK: } +! CHECK: } +! CHECK: omp.terminator +! CHECK: } diff --git a/flang/test/Lower/OpenMP/stop-stmt-in-region.f90 b/flang/test/Lower/OpenMP/stop-stmt-in-region.f90 index d119c2120c7c5..d817c4e771b31 100644 --- a/flang/test/Lower/OpenMP/stop-stmt-in-region.f90 +++ b/flang/test/Lower/OpenMP/stop-stmt-in-region.f90 @@ -9,7 +9,7 @@ ! CHECK: %[[VAL_0:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_1:.*]] = arith.constant false ! CHECK: %[[VAL_2:.*]] = arith.constant false -! CHECK: %[[VAL_3:.*]] = fir.call @_FortranAStopStatement(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) {{.*}} : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) {{.*}} : (i32, i1, i1) -> () ! CHECK-NOT: fir.unreachable ! CHECK: omp.terminator ! CHECK: } @@ -28,7 +28,7 @@ subroutine test_stop_in_region1() ! CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_2:.*]] = arith.constant false ! CHECK: %[[VAL_3:.*]] = arith.constant false -! CHECK: %[[VAL_4:.*]] = fir.call @_FortranAStopStatement(%[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) {{.*}} : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) {{.*}} : (i32, i1, i1) -> () ! CHECK: omp.terminator ! CHECK: } ! CHECK: return @@ -56,7 +56,7 @@ subroutine test_stop_in_region2() ! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_0_DECL]]#0 : !fir.ref ! CHECK: %[[VAL_6:.*]] = arith.constant false ! CHECK: %[[VAL_7:.*]] = arith.constant false -! CHECK: %[[VAL_8:.*]] = fir.call @_FortranAStopStatement(%[[VAL_5]], %[[VAL_6]], %[[VAL_7]]) {{.*}} : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_5]], %[[VAL_6]], %[[VAL_7]]) {{.*}} : (i32, i1, i1) -> () ! CHECK: omp.terminator ! CHECK: ^bb2: ! CHECK: omp.terminator @@ -80,14 +80,13 @@ subroutine test_stop_in_region3() ! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFtest_stop_in_region4Ex"} ! CHECK: %[[VAL_2_DECL:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFtest_stop_in_region4Ex"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_0_DECL:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFtest_stop_in_region4Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_4:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_5:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_0:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_6:.*]]) : i32 = (%[[VAL_3]]) to (%[[VAL_4]]) inclusive step (%[[VAL_5]]) { +! CHECK: %[[VAL_0_DECL:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFtest_stop_in_region4Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_6]] to %[[VAL_0_DECL]]#1 : !fir.ref ! CHECK: cf.br ^bb1 ! CHECK: ^bb1: @@ -101,7 +100,7 @@ subroutine test_stop_in_region3() ! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_2_DECL]]#0 : !fir.ref ! CHECK: %[[VAL_12:.*]] = arith.constant false ! CHECK: %[[VAL_13:.*]] = arith.constant false -! CHECK: %[[VAL_14:.*]] = fir.call @_FortranAStopStatement(%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]) {{.*}} : (i32, i1, i1) -> none +! CHECK: fir.call @_FortranAStopStatement(%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]) {{.*}} : (i32, i1, i1) -> () ! CHECK: omp.yield ! CHECK: ^bb3: ! CHECK: omp.yield @@ -125,7 +124,7 @@ subroutine test_stop_in_region4() !CHECK-LABEL: func.func @_QPtest_stop_in_region5 !CHECK: omp.parallel { -!CHECK: {{.*}} fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) fastmath : (i32, i1, i1) -> none +!CHECK: {{.*}} fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) fastmath : (i32, i1, i1) -> () !CHECK: omp.terminator !CHECK: } !CHECK: return @@ -142,10 +141,10 @@ subroutine test_stop_in_region5() !CHECK: omp.parallel { !CHECK: cf.cond_br %{{.*}}, ^[[BB1:.*]], ^[[BB2:.*]] !CHECK: ^[[BB1]]: -!CHECK: {{.*}}fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) fastmath : (i32, i1, i1) -> none +!CHECK: {{.*}}fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) fastmath : (i32, i1, i1) -> () !CHECK: omp.terminator !CHECK: ^[[BB2]]: -!CHECK: {{.*}}fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) fastmath : (i32, i1, i1) -> none +!CHECK: {{.*}}fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) fastmath : (i32, i1, i1) -> () !CHECK: omp.terminator !CHECK: } !CHECK: return diff --git a/flang/test/Lower/OpenMP/target.f90 b/flang/test/Lower/OpenMP/target.f90 index 04764be2293c1..bf801e69405b9 100644 --- a/flang/test/Lower/OpenMP/target.f90 +++ b/flang/test/Lower/OpenMP/target.f90 @@ -586,11 +586,10 @@ subroutine omp_target_parallel_do !CHECK: %[[VAL_0_DECL:.*]]:2 = hlfir.declare %[[ARG_0]](%{{.*}}) {uniq_name = "_QFomp_target_parallel_doEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) !CHECK: omp.parallel !$omp target parallel do map(tofrom: a) - !CHECK: %[[I_PVT_ALLOCA:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} - !CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ALLOCA]] {uniq_name = "_QFomp_target_parallel_doEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - !CHECK: omp.wsloop { + !CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[I_PVT_ALLOCA:.*]] : !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[I_VAL:.*]]) : i32 do i = 1, 1024 + !CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ALLOCA]] {uniq_name = "_QFomp_target_parallel_doEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#1 : !fir.ref !CHECK: %[[C10:.*]] = arith.constant 10 : i32 !CHECK: %[[I_PVT_VAL:.*]] = fir.load %[[I_PVT_DECL]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/unstructured.f90 b/flang/test/Lower/OpenMP/unstructured.f90 index a0955c8440c1c..a9925a484eb1c 100644 --- a/flang/test/Lower/OpenMP/unstructured.f90 +++ b/flang/test/Lower/OpenMP/unstructured.f90 @@ -69,10 +69,9 @@ subroutine ss2(n) ! unstructured OpenMP construct; loop exit inside construct ! CHECK: cond_br %{{[0-9]*}}, ^bb2, ^bb4 ! CHECK: ^bb2: // pred: ^bb1 -! CHECK: %[[ALLOCA_2:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} -! CHECK: %[[OMP_LOOP_K_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_2]] {uniq_name = "_QFss3Ek"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[ALLOCA_2:.*]] : !fir.ref) { ! CHECK: omp.loop_nest (%[[ARG1:.*]]) : {{.*}} { +! CHECK: %[[OMP_LOOP_K_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_2]] {uniq_name = "_QFss3Ek"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[ARG1]] to %[[OMP_LOOP_K_DECL]]#1 : !fir.ref ! CHECK: @_FortranAioBeginExternalListOutput ! CHECK: %[[LOAD_1:.*]] = fir.load %[[OMP_LOOP_K_DECL]]#0 : !fir.ref @@ -81,11 +80,9 @@ subroutine ss2(n) ! unstructured OpenMP construct; loop exit inside construct ! CHECK: } ! CHECK: } -! CHECK: %[[ALLOCA_1:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} -! CHECK: %[[OMP_LOOP_J_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_1]] {uniq_name = "_QFss3Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) - -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[ALLOCA_1:.*]] : !fir.ref) { ! CHECK: omp.loop_nest (%[[ARG2:.*]]) : {{.*}} { +! CHECK: %[[OMP_LOOP_J_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_1]] {uniq_name = "_QFss3Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[ARG2]] to %[[OMP_LOOP_J_DECL]]#1 : !fir.ref ! CHECK: br ^bb1 ! CHECK: ^bb2: // 2 preds: ^bb1, ^bb5 @@ -128,10 +125,9 @@ subroutine ss3(n) ! nested unstructured OpenMP constructs ! CHECK-LABEL: func @_QPss4{{.*}} { ! CHECK: omp.parallel private(@{{.*}} %{{.*}}#0 -> %{{.*}} : {{.*}}) { -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 {{{.*}}, pinned, uniq_name = "_QFss4Ej"} -! CHECK: %[[OMP_LOOP_J_DECL:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "_QFss4Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[ALLOCA:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[ARG:.*]]) : {{.*}} { +! CHECK: %[[OMP_LOOP_J_DECL:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "_QFss4Ej"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[ARG]] to %[[OMP_LOOP_J_DECL]]#1 : !fir.ref ! CHECK: %[[COND:.*]] = arith.cmpi eq, %{{.*}}, %{{.*}} ! CHECK: %[[COND_XOR:.*]] = arith.xori %[[COND]], %{{.*}} @@ -160,7 +156,7 @@ subroutine ss4(n) ! CYCLE in OpenMP wsloop constructs ! CHECK-LABEL: func @_QPss5() { ! CHECK: omp.parallel private(@{{.*}} %{{.*}}#0 -> %{{.*}} : {{.*}}) { -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private({{.*}}) { ! CHECK: omp.loop_nest {{.*}} { ! CHECK: br ^[[BB1:.*]] ! CHECK: ^[[BB1]]: @@ -202,7 +198,7 @@ subroutine ss5() ! EXIT inside OpenMP wsloop (inside parallel) ! CHECK: ^[[BB1_OUTER]]: ! CHECK: cond_br %{{.*}}, ^[[BB2_OUTER:.*]], ^[[BB3_OUTER:.*]] ! CHECK: ^[[BB2_OUTER]]: -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private({{.*}}) { ! CHECK: omp.loop_nest {{.*}} { ! CHECK: br ^[[BB1:.*]] ! CHECK: ^[[BB1]]: @@ -248,7 +244,7 @@ subroutine ss6() ! EXIT inside OpenMP wsloop in a do loop (inside parallel) ! CHECK: cond_br %{{.*}}, ^[[BB2_OUTER:.*]], ^[[BB3_OUTER:.*]] ! CHECK-NEXT: ^[[BB2_OUTER:.*]]: ! CHECK: omp.parallel { -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private({{.*}}) { ! CHECK: omp.loop_nest {{.*}} { ! CHECK: br ^[[BB1:.*]] ! CHECK-NEXT: ^[[BB1]]: @@ -288,7 +284,7 @@ subroutine ss7() ! EXIT inside OpenMP parallel do (inside do loop) ! CHECK-LABEL: func @_QPss8() { ! CHECK: omp.parallel { -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private({{.*}}) { ! CHECK: omp.loop_nest {{.*}} { ! CHECK: br ^[[BB1:.*]] ! CHECK-NEXT: ^[[BB1]]: diff --git a/flang/test/Lower/OpenMP/wsloop-chunks.f90 b/flang/test/Lower/OpenMP/wsloop-chunks.f90 index 0fb7d6f1b64fa..0a2b962761acb 100644 --- a/flang/test/Lower/OpenMP/wsloop-chunks.f90 +++ b/flang/test/Lower/OpenMP/wsloop-chunks.f90 @@ -20,7 +20,7 @@ program wsloop ! CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_4:.*]] = arith.constant 9 : i32 ! CHECK: %[[VAL_5:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop nowait schedule(static = %[[VAL_2]] : i32) { +! CHECK: omp.wsloop nowait schedule(static = %[[VAL_2]] : i32) private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%[[ARG0:.*]]) : i32 = (%[[VAL_3]]) to (%[[VAL_4]]) inclusive step (%[[VAL_5]]) { ! CHECK: fir.store %[[ARG0]] to %[[STORE_IV:.*]]#1 : !fir.ref ! CHECK: %[[LOAD_IV:.*]] = fir.load %[[STORE_IV]]#0 : !fir.ref @@ -40,7 +40,7 @@ program wsloop ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_16:.*]] = arith.constant 9 : i32 ! CHECK: %[[VAL_17:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop nowait schedule(static = %[[VAL_14]] : i32) { +! CHECK: omp.wsloop nowait schedule(static = %[[VAL_14]] : i32) private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%[[ARG1:.*]]) : i32 = (%[[VAL_15]]) to (%[[VAL_16]]) inclusive step (%[[VAL_17]]) { ! CHECK: fir.store %[[ARG1]] to %[[STORE_IV1:.*]]#1 : !fir.ref ! CHECK: %[[VAL_24:.*]] = arith.constant 2 : i32 @@ -66,7 +66,7 @@ program wsloop ! CHECK: %[[VAL_30:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_31:.*]] = arith.constant 9 : i32 ! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop nowait schedule(static = %[[VAL_29]] : i32) { +! CHECK: omp.wsloop nowait schedule(static = %[[VAL_29]] : i32) private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%[[ARG2:.*]]) : i32 = (%[[VAL_30]]) to (%[[VAL_31]]) inclusive step (%[[VAL_32]]) { ! CHECK: fir.store %[[ARG2]] to %[[STORE_IV2:.*]]#1 : !fir.ref ! CHECK: %[[VAL_39:.*]] = arith.constant 3 : i32 diff --git a/flang/test/Lower/OpenMP/wsloop-collapse.f90 b/flang/test/Lower/OpenMP/wsloop-collapse.f90 index 61ee76d589107..6d9862e625400 100644 --- a/flang/test/Lower/OpenMP/wsloop-collapse.f90 +++ b/flang/test/Lower/OpenMP/wsloop-collapse.f90 @@ -38,15 +38,6 @@ program wsloop_collapse !CHECK: %[[VAL_23:.*]] = arith.constant 0 : i32 !CHECK: hlfir.assign %[[VAL_23]] to %[[VAL_19]]#0 : i32, !fir.ref -!CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -!CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) - -!CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "j", pinned, {{.*}}} -!CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFEj"} : (!fir.ref) -> (!fir.ref, !fir.ref) - -!CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "k", pinned, {{.*}}} -!CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFEk"} : (!fir.ref) -> (!fir.ref, !fir.ref) - integer :: i, j, k integer :: a, b, c integer :: x @@ -65,12 +56,17 @@ program wsloop_collapse !CHECK: %[[VAL_30:.*]] = arith.constant 1 : i32 !CHECK: %[[VAL_31:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref !CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32 -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_4:.*]], @{{.*}} %{{.*}}#0 -> %[[VAL_2:.*]], @{{.*}} %{{.*}}#0 -> %[[VAL_0:.*]] : !fir.ref, !fir.ref, !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[VAL_33:.*]], %[[VAL_34:.*]], %[[VAL_35:.*]]) : i32 = (%[[VAL_24]], %[[VAL_27]], %[[VAL_30]]) to (%[[VAL_25]], %[[VAL_28]], %[[VAL_31]]) inclusive step (%[[VAL_26]], %[[VAL_29]], %[[VAL_32]]) { !$omp do collapse(3) do i = 1, a do j= 1, b do k = 1, c + +!CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFEj"} : (!fir.ref) -> (!fir.ref, !fir.ref) +!CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFEk"} : (!fir.ref) -> (!fir.ref, !fir.ref) + !CHECK: fir.store %[[VAL_33]] to %[[VAL_5]]#1 : !fir.ref !CHECK: fir.store %[[VAL_34]] to %[[VAL_3]]#1 : !fir.ref !CHECK: fir.store %[[VAL_35]] to %[[VAL_1]]#1 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-monotonic.f90 b/flang/test/Lower/OpenMP/wsloop-monotonic.f90 index 9659fff2d42e7..e21aa4c678f42 100644 --- a/flang/test/Lower/OpenMP/wsloop-monotonic.f90 +++ b/flang/test/Lower/OpenMP/wsloop-monotonic.f90 @@ -11,11 +11,10 @@ program wsloop_dynamic !CHECK: omp.parallel { !$OMP DO SCHEDULE(monotonic:dynamic) -!CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} !CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 !CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 !CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 -!CHECK: omp.wsloop nowait schedule(dynamic, monotonic) { +!CHECK: omp.wsloop nowait schedule(dynamic, monotonic) private({{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !CHECK: fir.store %[[I]] to %[[ALLOCA_IV:.*]]#1 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-nonmonotonic.f90 b/flang/test/Lower/OpenMP/wsloop-nonmonotonic.f90 index b1bea525ff489..23d3c49c00786 100644 --- a/flang/test/Lower/OpenMP/wsloop-nonmonotonic.f90 +++ b/flang/test/Lower/OpenMP/wsloop-nonmonotonic.f90 @@ -12,13 +12,12 @@ program wsloop_dynamic !CHECK: omp.parallel { !$OMP DO SCHEDULE(nonmonotonic:dynamic) -!CHECK: %[[I_REF:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} -!CHECK: %[[ALLOCA_IV:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 !CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 !CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 -!CHECK: omp.wsloop nowait schedule(dynamic, nonmonotonic) { +!CHECK: omp.wsloop nowait schedule(dynamic, nonmonotonic) private(@{{.*}} %{{.*}}#0 -> %[[I_REF:.*]] : !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { +!CHECK: %[[ALLOCA_IV:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: fir.store %[[I]] to %[[ALLOCA_IV]]#1 : !fir.ref do i=1, 9 diff --git a/flang/test/Lower/OpenMP/wsloop-ordered.f90 b/flang/test/Lower/OpenMP/wsloop-ordered.f90 index 5fa53f7b28447..4862b7296a9bc 100644 --- a/flang/test/Lower/OpenMP/wsloop-ordered.f90 +++ b/flang/test/Lower/OpenMP/wsloop-ordered.f90 @@ -6,7 +6,7 @@ subroutine wsloop_ordered_no_para() integer :: a(10), i -! CHECK: omp.wsloop ordered(0) { +! CHECK: omp.wsloop ordered(0) private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%{{.*}}) : i32 = (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}}) { ! CHECK: omp.yield ! CHECK: } @@ -27,7 +27,7 @@ subroutine wsloop_ordered_with_para() integer :: a(10), i ! CHECK: func @_QPwsloop_ordered_with_para() { -! CHECK: omp.wsloop ordered(1) { +! CHECK: omp.wsloop ordered(1) private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%{{.*}}) : i32 = (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}}) { ! CHECK: omp.yield ! CHECK: } diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f90 index c38a79191bc4e..bc021e7a3b273 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f90 @@ -85,13 +85,12 @@ ! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @add_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref @@ -127,13 +126,12 @@ subroutine simple_int_reduction ! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @add_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref @@ -170,13 +168,12 @@ subroutine simple_real_reduction ! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @add_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref @@ -211,13 +208,12 @@ subroutine simple_int_reduction_switch_order ! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @add_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref @@ -261,13 +257,12 @@ subroutine simple_real_reduction_switch_order ! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], byref @add_reduction_byref_i32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], byref @add_reduction_byref_i32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_11:.*]] : !fir.ref) reduction(byref @add_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], byref @add_reduction_byref_i32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], byref @add_reduction_byref_i32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) { +! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -324,13 +319,12 @@ subroutine multiple_int_reductions_same_type ! CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], byref @add_reduction_byref_f32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], byref @add_reduction_byref_f32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_11:.*]] : !fir.ref) reduction(byref @add_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], byref @add_reduction_byref_f32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], byref @add_reduction_byref_f32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) { +! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -394,13 +388,12 @@ subroutine multiple_real_reductions_same_type ! CHECK: %[[VAL_13:.*]] = arith.constant 0.000000e+00 : f64 ! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_17:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_i32 %[[VAL_5]]#0 -> %[[VAL_19:.*]], byref @add_reduction_byref_i64 %[[VAL_7]]#0 -> %[[VAL_20:.*]], byref @add_reduction_byref_f32 %[[VAL_9]]#0 -> %[[VAL_21:.*]], byref @add_reduction_byref_f64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_14:.*]] : !fir.ref) reduction(byref @add_reduction_byref_i32 %[[VAL_5]]#0 -> %[[VAL_19:.*]], byref @add_reduction_byref_i64 %[[VAL_7]]#0 -> %[[VAL_20:.*]], byref @add_reduction_byref_f32 %[[VAL_9]]#0 -> %[[VAL_21:.*]], byref @add_reduction_byref_f64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_23:.*]]) : i32 = (%[[VAL_16]]) to (%[[VAL_17]]) inclusive step (%[[VAL_18]]) { +! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_24:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_25:.*]]:2 = hlfir.declare %[[VAL_20]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_26:.*]]:2 = hlfir.declare %[[VAL_21]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-add.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-add.f90 index c5278e0ef8815..a355e968b4146 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-add.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-add.f90 @@ -53,13 +53,12 @@ ! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@add_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(@add_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref @@ -95,13 +94,12 @@ subroutine simple_int_reduction ! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@add_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(@add_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref @@ -138,13 +136,12 @@ subroutine simple_real_reduction ! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@add_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(@add_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref @@ -179,13 +176,12 @@ subroutine simple_int_reduction_switch_order ! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@add_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(@add_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref @@ -229,13 +225,12 @@ subroutine simple_real_reduction_switch_order ! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@add_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], @add_reduction_i32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], @add_reduction_i32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_11:.*]] : !fir.ref) reduction(@add_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], @add_reduction_i32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], @add_reduction_i32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) { +! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -292,13 +287,12 @@ subroutine multiple_int_reductions_same_type ! CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@add_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], @add_reduction_f32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], @add_reduction_f32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_11:.*]] : !fir.ref) reduction(@add_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], @add_reduction_f32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], @add_reduction_f32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) { +! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -362,13 +356,12 @@ subroutine multiple_real_reductions_same_type ! CHECK: %[[VAL_13:.*]] = arith.constant 0.000000e+00 : f64 ! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_17:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@add_reduction_i32 %[[VAL_5]]#0 -> %[[VAL_19:.*]], @add_reduction_i64 %[[VAL_7]]#0 -> %[[VAL_20:.*]], @add_reduction_f32 %[[VAL_9]]#0 -> %[[VAL_21:.*]], @add_reduction_f64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_14:.*]] : !fir.ref) reduction(@add_reduction_i32 %[[VAL_5]]#0 -> %[[VAL_19:.*]], @add_reduction_i64 %[[VAL_7]]#0 -> %[[VAL_20:.*]], @add_reduction_f32 %[[VAL_9]]#0 -> %[[VAL_21:.*]], @add_reduction_f64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_23:.*]]) : i32 = (%[[VAL_16]]) to (%[[VAL_17]]) inclusive step (%[[VAL_18]]) { +! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_24:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_25:.*]]:2 = hlfir.declare %[[VAL_20]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_26:.*]]:2 = hlfir.declare %[[VAL_21]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 index ce45d09d77a22..f09130152fb28 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 @@ -214,13 +214,12 @@ program reduce15 ! CHECK: } ! CHECK: fir.store %[[VAL_54:.*]]#1 to %[[VAL_3]]#1 : !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_55:.*]] = fir.alloca i32 {bindc_name = "i", pinned, uniq_name = "_QFEi"} -! CHECK: %[[VAL_56:.*]]:2 = hlfir.declare %[[VAL_55]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_57:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_58:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_59:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @max_byref_box_heap_Uxi32 %[[VAL_5]]#0 -> %[[VAL_60:.*]] : !fir.ref>>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_55:.*]] : !fir.ref) reduction(byref @max_byref_box_heap_Uxi32 %[[VAL_5]]#0 -> %[[VAL_60:.*]] : !fir.ref>>>) { ! CHECK: omp.loop_nest (%[[VAL_61:.*]]) : i32 = (%[[VAL_57]]) to (%[[VAL_58]]) inclusive step (%[[VAL_59]]) { +! CHECK: %[[VAL_56:.*]]:2 = hlfir.declare %[[VAL_55]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_62:.*]]:2 = hlfir.declare %[[VAL_60]] {fortran_attrs = {{.*}}, uniq_name = "_QFEmaxes"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) ! CHECK: fir.store %[[VAL_61]] to %[[VAL_56]]#1 : !fir.ref ! CHECK: %[[VAL_63:.*]] = fir.load %[[VAL_1]]#0 : !fir.ref>>> @@ -256,13 +255,12 @@ program reduce15 ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.parallel { -! CHECK: %[[VAL_87:.*]] = fir.alloca i32 {bindc_name = "i", pinned, uniq_name = "_QFEi"} -! CHECK: %[[VAL_88:.*]]:2 = hlfir.declare %[[VAL_87]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_89:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_90:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_91:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @min_byref_box_heap_Uxi32 %[[VAL_7]]#0 -> %[[VAL_92:.*]] : !fir.ref>>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_87:.*]] : !fir.ref) reduction(byref @min_byref_box_heap_Uxi32 %[[VAL_7]]#0 -> %[[VAL_92:.*]] : !fir.ref>>>) { ! CHECK: omp.loop_nest (%[[VAL_93:.*]]) : i32 = (%[[VAL_89]]) to (%[[VAL_90]]) inclusive step (%[[VAL_91]]) { +! CHECK: %[[VAL_88:.*]]:2 = hlfir.declare %[[VAL_87]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_94:.*]]:2 = hlfir.declare %[[VAL_92]] {fortran_attrs = {{.*}}, uniq_name = "_QFEmins"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) ! CHECK: fir.store %[[VAL_93]] to %[[VAL_88]]#1 : !fir.ref ! CHECK: %[[VAL_95:.*]] = fir.load %[[VAL_1]]#0 : !fir.ref>>> diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 index ba7aea0d96c5b..7e6d7fddff5a1 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 @@ -77,13 +77,12 @@ program reduce ! CHECK: %[[VAL_8:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_8]] to %[[VAL_5]]#0 realloc : i32, !fir.ref>> ! CHECK: omp.parallel { -! CHECK: %[[VAL_9:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_9]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_11:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_12:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_box_heap_i32 %[[VAL_5]]#0 -> %[[VAL_14:.*]] : !fir.ref>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_9:.*]] : !fir.ref) reduction(byref @add_reduction_byref_box_heap_i32 %[[VAL_5]]#0 -> %[[VAL_14:.*]] : !fir.ref>>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_15:.*]]) : i32 = (%[[VAL_11]]) to (%[[VAL_12]]) inclusive step (%[[VAL_13]]) { +! CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_9]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_16:.*]]:2 = hlfir.declare %[[VAL_14]] {fortran_attrs = {{.*}}, uniq_name = "_QFEr"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: fir.store %[[VAL_15]] to %[[VAL_10]]#1 : !fir.ref ! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_10]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 index 9785f77c0e091..0e2fc3a24ee1b 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 @@ -81,13 +81,12 @@ subroutine reduce(r) ! CHECK: omp.parallel { ! CHECK: %[[VAL_4:.*]] = fir.alloca !fir.box> ! CHECK: fir.store %[[VAL_3]]#1 to %[[VAL_4]] : !fir.ref>> -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFFreduceEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_box_Uxf64 %[[VAL_4]] -> %[[VAL_10:.*]] : !fir.ref>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @add_reduction_byref_box_Uxf64 %[[VAL_4]] -> %[[VAL_10:.*]] : !fir.ref>>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFFreduceEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {fortran_attrs = {{.*}}, uniq_name = "_QFFreduceEr"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 index ea5df5a836972..07debb9f6b9e0 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 @@ -76,13 +76,12 @@ program reduce ! CHECK: %[[VAL_6:.*]] = fir.embox %[[VAL_5]]#0(%[[VAL_4]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: %[[VAL_7:.*]] = fir.alloca !fir.box> ! CHECK: fir.store %[[VAL_6]] to %[[VAL_7]] : !fir.ref>> -! CHECK: %[[VAL_8:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_box_2xi32 %[[VAL_7]] -> %[[VAL_13:.*]] : !fir.ref>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_8:.*]] : !fir.ref) reduction(byref @add_reduction_byref_box_2xi32 %[[VAL_7]] -> %[[VAL_13:.*]] : !fir.ref>>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_14:.*]]) : i32 = (%[[VAL_10]]) to (%[[VAL_11]]) inclusive step (%[[VAL_12]]) { +! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_13]] {uniq_name = "_QFEr"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: fir.store %[[VAL_14]] to %[[VAL_9]]#1 : !fir.ref ! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_9]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 index 9815cfa9c3150..a25bedb359f4e 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 @@ -76,13 +76,12 @@ program reduce ! CHECK: %[[VAL_6:.*]] = fir.embox %[[VAL_5]]#0(%[[VAL_4]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: %[[VAL_7:.*]] = fir.alloca !fir.box> ! CHECK: fir.store %[[VAL_6]] to %[[VAL_7]] : !fir.ref>> -! CHECK: %[[VAL_8:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_box_2xi32 %[[VAL_7]] -> %[[VAL_13:.*]] : !fir.ref>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_8:.*]] : !fir.ref) reduction(byref @add_reduction_byref_box_2xi32 %[[VAL_7]] -> %[[VAL_13:.*]] : !fir.ref>>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_14:.*]]) : i32 = (%[[VAL_10]]) to (%[[VAL_11]]) inclusive step (%[[VAL_12]]) { +! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_13]] {uniq_name = "_QFEr"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: fir.store %[[VAL_14]] to %[[VAL_9]]#1 : !fir.ref ! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref>> diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f90 index 829229807698a..18dcc3d722886 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f90 @@ -32,13 +32,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @iand_byref_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(byref @iand_byref_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_iandEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90 index 6c060f2e5292a..eaf07f93c7474 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90 @@ -24,13 +24,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@iand_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(@iand_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_iandEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f90 index 284ada404bd60..6be6913f91a33 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f90 @@ -28,10 +28,9 @@ !CHECK: omp.parallel -!CHECK: %[[I_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_ieorEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: omp.wsloop reduction(byref @ieor_byref_i32 %[[X_DECL]]#0 -> %[[PRV:.+]] : !fir.ref) +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[I_REF:.*]] : !fir.ref) reduction(byref @ieor_byref_i32 %[[X_DECL]]#0 -> %[[PRV:.+]] : !fir.ref) !CHECK-NEXT: omp.loop_nest +!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_ieorEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: %[[PRV_DECL:.+]]:2 = hlfir.declare %[[PRV]] {{.*}} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: fir.store %{{.*}} to %[[I_DECL]]#1 : !fir.ref !CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90 index e67253a413ce2..632dbcf1348ec 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90 @@ -17,10 +17,9 @@ !CHECK: omp.parallel -!CHECK: %[[I_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_ieorEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: omp.wsloop reduction(@[[IEOR_DECLARE_I]] %[[X_DECL]]#0 -> %[[PRV:.+]] : !fir.ref) +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[I_REF:.*]] : !fir.ref) reduction(@[[IEOR_DECLARE_I]] %[[X_DECL]]#0 -> %[[PRV:.+]] : !fir.ref) !CHECK-NEXT: omp.loop_nest +!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_ieorEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: %[[PRV_DECL:.+]]:2 = hlfir.declare %[[PRV]] {{.*}} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: fir.store %{{.*}} to %[[I_DECL]]#1 : !fir.ref !CHECK: %[[I_32:.*]] = fir.load %[[I_DECL]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f90 index 315121cc7beb7..90b9d2f61f930 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f90 @@ -30,13 +30,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref ! CHECK: omp.parallel -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @ior_byref_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(byref @ior_byref_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_iorEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90 index 3da250da9703d..144bc17cf8b31 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90 @@ -24,13 +24,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref ! CHECK: omp.parallel -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@ior_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(@ior_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_iorEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f90 index 30908b6bdd4ce..e73540a93a71b 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f90 @@ -39,13 +39,12 @@ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(byref @and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref> @@ -88,13 +87,12 @@ end subroutine simple_reduction ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(byref @and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref @@ -146,13 +144,12 @@ subroutine simple_reduction_switch_order(y) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @and_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], byref @and_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], byref @and_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(byref @and_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], byref @and_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], byref @and_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90 index 367683de02080..c059dab5bff5a 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90 @@ -31,13 +31,12 @@ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(@and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref> @@ -80,13 +79,12 @@ end subroutine simple_reduction ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(@and_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref @@ -138,13 +136,12 @@ subroutine simple_reduction_switch_order(y) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@and_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], @and_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], @and_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(@and_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], @and_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], @and_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f90 index 9137dd8ff4454..5e24ad6f7bb63 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f90 @@ -39,13 +39,12 @@ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(byref @eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref> @@ -87,13 +86,12 @@ subroutine simple_reduction(y) ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(byref @eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref @@ -145,13 +143,12 @@ subroutine simple_reduction_switch_order(y) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @eqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], byref @eqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], byref @eqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(byref @eqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], byref @eqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], byref @eqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90 index d1ef676c37407..ad9e869984eac 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90 @@ -31,13 +31,12 @@ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(@eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref> @@ -79,13 +78,12 @@ subroutine simple_reduction(y) ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(@eqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref @@ -137,13 +135,12 @@ subroutine simple_reduction_switch_order(y) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@eqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], @eqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], @eqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(@eqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], @eqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], @eqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f90 index d1491a0f5561d..b5bf1d0d0b589 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f90 @@ -39,13 +39,12 @@ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(byref @neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref> @@ -88,13 +87,12 @@ subroutine simple_reduction(y) ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(byref @neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref @@ -148,13 +146,12 @@ subroutine simple_reduction_switch_order(y) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @neqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], byref @neqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], byref @neqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(byref @neqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], byref @neqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], byref @neqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90 index b4df699c49ffa..ac9fc7f051d88 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90 @@ -31,13 +31,12 @@ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(@neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref> @@ -80,13 +79,12 @@ subroutine simple_reduction(y) ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(@neqv_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref @@ -140,13 +138,12 @@ subroutine simple_reduction_switch_order(y) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@neqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], @neqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], @neqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(@neqv_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], @neqv_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], @neqv_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f90 index 8f4a6c22c1d74..883064884b637 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f90 @@ -38,13 +38,12 @@ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(byref @or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref> @@ -86,13 +85,12 @@ subroutine simple_reduction(y) ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(byref @or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref @@ -144,13 +142,12 @@ subroutine simple_reduction_switch_order(y) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @or_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], byref @or_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], byref @or_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(byref @or_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], byref @or_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], byref @or_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90 index 9d367797ec216..312c08d17a14d 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90 @@ -31,13 +31,12 @@ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(@or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reductionEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref> @@ -79,13 +78,12 @@ subroutine simple_reduction(y) ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_10:.*]] : !fir.ref) reduction(@or_reduction %[[VAL_4]]#0 -> %[[VAL_15:.*]] : !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_16:.*]]) : i32 = (%[[VAL_12]]) to (%[[VAL_13]]) inclusive step (%[[VAL_14]]) { +! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_15]] {uniq_name = "_QFsimple_reduction_switch_orderEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: fir.store %[[VAL_16]] to %[[VAL_11]]#1 : !fir.ref ! CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_11]]#0 : !fir.ref @@ -137,13 +135,12 @@ subroutine simple_reduction_switch_order(y) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4> ! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref> ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@or_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], @or_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], @or_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(@or_reduction %[[VAL_7]]#0 -> %[[VAL_23:.*]], @or_reduction %[[VAL_9]]#0 -> %[[VAL_24:.*]], @or_reduction %[[VAL_11]]#0 -> %[[VAL_25:.*]] : !fir.ref>, !fir.ref>, !fir.ref>) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_26:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_27:.*]]:2 = hlfir.declare %[[VAL_23]] {uniq_name = "_QFmultiple_reductionsEx"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_28:.*]]:2 = hlfir.declare %[[VAL_24]] {uniq_name = "_QFmultiple_reductionsEy"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_29:.*]]:2 = hlfir.declare %[[VAL_25]] {uniq_name = "_QFmultiple_reductionsEz"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max-2-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max-2-byref.f90 index 28c70899e6ccc..0438e19f34391 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-max-2-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-max-2-byref.f90 @@ -1,7 +1,7 @@ ! RUN: bbc -emit-hlfir -fopenmp --force-byref-reduction -o - %s 2>&1 | FileCheck %s ! RUN: %flang_fc1 -emit-hlfir -fopenmp -mmlir --force-byref-reduction -o - %s 2>&1 | FileCheck %s -! CHECK: omp.wsloop reduction(byref @max_byref_i32 +! CHECK: omp.wsloop private({{.*}}) reduction(byref @max_byref_i32 ! CHECK: arith.cmpi sgt ! CHECK: arith.select diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max-2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max-2.f90 index abd7ca1ae555d..66c75bbe38f10 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-max-2.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-max-2.f90 @@ -1,7 +1,7 @@ ! RUN: bbc -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s ! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s -! CHECK: omp.wsloop reduction(@max_i32 +! CHECK: omp.wsloop private({{.*}}) reduction(@max_i32 ! CHECK: arith.cmpi sgt ! CHECK: arith.select diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f90 index 9abff8ccfa3b6..07c18f90480bf 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f90 @@ -45,13 +45,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @max_byref_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(byref @max_byref_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_max_intEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref @@ -75,13 +74,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @max_byref_f32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(byref @max_byref_f32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_max_realEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref @@ -95,13 +93,12 @@ ! CHECK: omp.yield ! CHECK: omp.terminator ! CHECK: omp.parallel { -! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @max_byref_f32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_30:.*]] : !fir.ref) reduction(byref @max_byref_f32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_36:.*]]) : i32 = (%[[VAL_32]]) to (%[[VAL_33]]) inclusive step (%[[VAL_34]]) { +! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_37:.*]]:2 = hlfir.declare %[[VAL_35]] {uniq_name = "_QFreduction_max_realEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_36]] to %[[VAL_31]]#1 : !fir.ref ! CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max.f90 index 7237d3f903b74..7bdfa0948c747 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-max.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-max.f90 @@ -35,13 +35,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@max_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(@max_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_max_intEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref @@ -65,13 +64,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@max_f32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(@max_f32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_max_realEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref @@ -85,13 +83,12 @@ ! CHECK: omp.yield ! CHECK: omp.terminator ! CHECK: omp.parallel { -! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@max_f32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_30:.*]] : !fir.ref) reduction(@max_f32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_36:.*]]) : i32 = (%[[VAL_32]]) to (%[[VAL_33]]) inclusive step (%[[VAL_34]]) { +! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_37:.*]]:2 = hlfir.declare %[[VAL_35]] {uniq_name = "_QFreduction_max_realEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_36]] to %[[VAL_31]]#1 : !fir.ref ! CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f90 index a4bfbaa09d2fa..88a455f4b45ac 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f90 @@ -45,13 +45,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @min_byref_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(byref @min_byref_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_min_intEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref @@ -75,13 +74,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @min_byref_f32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(byref @min_byref_f32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_min_realEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref @@ -97,13 +95,12 @@ ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.parallel { -! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @min_byref_f32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_30:.*]] : !fir.ref) reduction(byref @min_byref_f32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_36:.*]]) : i32 = (%[[VAL_32]]) to (%[[VAL_33]]) inclusive step (%[[VAL_34]]) { +! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_37:.*]]:2 = hlfir.declare %[[VAL_35]] {uniq_name = "_QFreduction_min_realEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_36]] to %[[VAL_31]]#1 : !fir.ref ! CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-min.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-min.f90 index ce9e53a17523c..6d4dcf1ab68eb 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-min.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-min.f90 @@ -35,13 +35,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@min_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(@min_i32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_min_intEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref @@ -65,13 +64,12 @@ ! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@min_f32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_7:.*]] : !fir.ref) reduction(@min_f32 %[[VAL_4]]#0 -> %[[VAL_12:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_13:.*]]) : i32 = (%[[VAL_9]]) to (%[[VAL_10]]) inclusive step (%[[VAL_11]]) { +! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFreduction_min_realEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_13]] to %[[VAL_8]]#1 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_8]]#0 : !fir.ref @@ -87,13 +85,12 @@ ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.parallel { -! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32 ! CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@min_f32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_30:.*]] : !fir.ref) reduction(@min_f32 %[[VAL_4]]#0 -> %[[VAL_35:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_36:.*]]) : i32 = (%[[VAL_32]]) to (%[[VAL_33]]) inclusive step (%[[VAL_34]]) { +! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_37:.*]]:2 = hlfir.declare %[[VAL_35]] {uniq_name = "_QFreduction_min_realEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_36]] to %[[VAL_31]]#1 : !fir.ref ! CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_31]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-min2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-min2.f90 index d83ebb77af3eb..db8e59cb09dfa 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-min2.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-min2.f90 @@ -34,13 +34,12 @@ program reduce ! CHECK: %[[VAL_2:.*]] = fir.address_of(@_QFEr) : !fir.ref ! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFEr"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: omp.parallel { -! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_7:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@min_i32 %[[VAL_3]]#0 -> %[[VAL_9:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_4:.*]] : !fir.ref) reduction(@min_i32 %[[VAL_3]]#0 -> %[[VAL_9:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_10:.*]]) : i32 = (%[[VAL_6]]) to (%[[VAL_7]]) inclusive step (%[[VAL_8]]) { +! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_9]] {uniq_name = "_QFEr"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_10]] to %[[VAL_5]]#1 : !fir.ref ! CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_5]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f90 index 18554fbb72aee..85df29e83f75d 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f90 @@ -88,13 +88,12 @@ ! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @multiply_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @multiply_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref @@ -125,13 +124,12 @@ subroutine simple_int_reduction ! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @multiply_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @multiply_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref @@ -163,13 +161,12 @@ subroutine simple_real_reduction ! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @multiply_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @multiply_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref @@ -200,13 +197,12 @@ subroutine simple_int_reduction_switch_order ! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @multiply_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(byref @multiply_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref @@ -246,13 +242,12 @@ subroutine simple_real_reduction_switch_order ! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32 ! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @multiply_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], byref @multiply_reduction_byref_i32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], byref @multiply_reduction_byref_i32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_11:.*]] : !fir.ref) reduction(byref @multiply_reduction_byref_i32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], byref @multiply_reduction_byref_i32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], byref @multiply_reduction_byref_i32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) { +! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -305,13 +300,12 @@ subroutine multiple_int_reductions_same_type ! CHECK: %[[VAL_10:.*]] = arith.constant 1.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @multiply_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], byref @multiply_reduction_byref_f32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], byref @multiply_reduction_byref_f32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_11:.*]] : !fir.ref) reduction(byref @multiply_reduction_byref_f32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], byref @multiply_reduction_byref_f32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], byref @multiply_reduction_byref_f32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) { +! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -371,13 +365,12 @@ subroutine multiple_real_reductions_same_type ! CHECK: %[[VAL_13:.*]] = arith.constant 1.000000e+00 : f64 ! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_17:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @multiply_reduction_byref_i32 %[[VAL_5]]#0 -> %[[VAL_19:.*]], byref @multiply_reduction_byref_i64 %[[VAL_7]]#0 -> %[[VAL_20:.*]], byref @multiply_reduction_byref_f32 %[[VAL_9]]#0 -> %[[VAL_21:.*]], byref @multiply_reduction_byref_f64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_14:.*]] : !fir.ref) reduction(byref @multiply_reduction_byref_i32 %[[VAL_5]]#0 -> %[[VAL_19:.*]], byref @multiply_reduction_byref_i64 %[[VAL_7]]#0 -> %[[VAL_20:.*]], byref @multiply_reduction_byref_f32 %[[VAL_9]]#0 -> %[[VAL_21:.*]], byref @multiply_reduction_byref_f64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_23:.*]]) : i32 = (%[[VAL_16]]) to (%[[VAL_17]]) inclusive step (%[[VAL_18]]) { +! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_24:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_25:.*]]:2 = hlfir.declare %[[VAL_20]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_26:.*]]:2 = hlfir.declare %[[VAL_21]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90 index f5c12ccf61f76..09c44f187f4a2 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90 @@ -55,13 +55,12 @@ ! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@multiply_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(@multiply_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reductionEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref @@ -92,13 +91,12 @@ subroutine simple_int_reduction ! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@multiply_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(@multiply_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reductionEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref @@ -130,13 +128,12 @@ subroutine simple_real_reduction ! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@multiply_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(@multiply_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_int_reduction_switch_orderEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref @@ -167,13 +164,12 @@ subroutine simple_int_reduction_switch_order ! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@multiply_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_5:.*]] : !fir.ref) reduction(@multiply_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_10:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_11:.*]]) : i32 = (%[[VAL_7]]) to (%[[VAL_8]]) inclusive step (%[[VAL_9]]) { +! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_real_reduction_switch_orderEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[VAL_11]] to %[[VAL_6]]#1 : !fir.ref ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref @@ -213,13 +209,12 @@ subroutine simple_real_reduction_switch_order ! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32 ! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@multiply_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], @multiply_reduction_i32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], @multiply_reduction_i32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_11:.*]] : !fir.ref) reduction(@multiply_reduction_i32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], @multiply_reduction_i32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], @multiply_reduction_i32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) { +! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_int_reductions_same_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_int_reductions_same_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_int_reductions_same_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -272,13 +267,12 @@ subroutine multiple_int_reductions_same_type ! CHECK: %[[VAL_10:.*]] = arith.constant 1.000000e+00 : f32 ! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@multiply_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], @multiply_reduction_f32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], @multiply_reduction_f32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_11:.*]] : !fir.ref) reduction(@multiply_reduction_f32 %[[VAL_3]]#0 -> %[[VAL_16:.*]], @multiply_reduction_f32 %[[VAL_5]]#0 -> %[[VAL_17:.*]], @multiply_reduction_f32 %[[VAL_7]]#0 -> %[[VAL_18:.*]] : !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_19:.*]]) : i32 = (%[[VAL_13]]) to (%[[VAL_14]]) inclusive step (%[[VAL_15]]) { +! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFmultiple_real_reductions_same_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFmultiple_real_reductions_same_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_real_reductions_same_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -338,13 +332,12 @@ subroutine multiple_real_reductions_same_type ! CHECK: %[[VAL_13:.*]] = arith.constant 1.000000e+00 : f64 ! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref ! CHECK: omp.parallel { -! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_17:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@multiply_reduction_i32 %[[VAL_5]]#0 -> %[[VAL_19:.*]], @multiply_reduction_i64 %[[VAL_7]]#0 -> %[[VAL_20:.*]], @multiply_reduction_f32 %[[VAL_9]]#0 -> %[[VAL_21:.*]], @multiply_reduction_f64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_14:.*]] : !fir.ref) reduction(@multiply_reduction_i32 %[[VAL_5]]#0 -> %[[VAL_19:.*]], @multiply_reduction_i64 %[[VAL_7]]#0 -> %[[VAL_20:.*]], @multiply_reduction_f32 %[[VAL_9]]#0 -> %[[VAL_21:.*]], @multiply_reduction_f64 %[[VAL_3]]#0 -> %[[VAL_22:.*]] : !fir.ref, !fir.ref, !fir.ref, !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_23:.*]]) : i32 = (%[[VAL_16]]) to (%[[VAL_17]]) inclusive step (%[[VAL_18]]) { +! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_24:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFmultiple_reductions_different_typeEx"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_25:.*]]:2 = hlfir.declare %[[VAL_20]] {uniq_name = "_QFmultiple_reductions_different_typeEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_26:.*]]:2 = hlfir.declare %[[VAL_21]] {uniq_name = "_QFmultiple_reductions_different_typeEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-multi.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-multi.f90 index 659ba06005670..66229259adf82 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-multi.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-multi.f90 @@ -41,7 +41,7 @@ !CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_REF]] {uniq_name = "_QFmultiple_reductionEy"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: %[[Z_REF:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_reductionEz"} !CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[Z_REF]] {uniq_name = "_QFmultiple_reductionEz"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: omp.wsloop reduction( +!CHECK: omp.wsloop private({{.*}}) reduction( !CHECK-SAME: @[[ADD_RED_I32_NAME]] %[[X_DECL]]#0 -> %[[PRV_X:[^,]+]], !CHECK-SAME: @[[ADD_RED_F32_NAME]] %[[Y_DECL]]#0 -> %[[PRV_Y:[^,]+]], !CHECK-SAME: @[[MIN_RED_I32_NAME]] %[[Z_DECL]]#0 -> %[[PRV_Z:.+]] : diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 index 5b6ab095b45b6..75773416e4840 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 @@ -112,13 +112,12 @@ program main ! CHECK: %[[VAL_11:.*]] = fir.embox %[[VAL_4]]#0(%[[VAL_3]]) : (!fir.ref>, !fir.shape<2>) -> !fir.box> ! CHECK: %[[VAL_12:.*]] = fir.alloca !fir.box> ! CHECK: fir.store %[[VAL_11]] to %[[VAL_12]] : !fir.ref>> -! CHECK: %[[VAL_13:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}} -! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_13]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_16:.*]] = arith.constant 10 : i32 ! CHECK: %[[VAL_17:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(@add_reduction_f64 %[[VAL_8]]#0 -> %[[VAL_18:.*]], byref @add_reduction_byref_box_3x3xf64 %[[VAL_12]] -> %[[VAL_19:.*]] : !fir.ref, !fir.ref>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_13:.*]] : !fir.ref) reduction(@add_reduction_f64 %[[VAL_8]]#0 -> %[[VAL_18:.*]], byref @add_reduction_byref_box_3x3xf64 %[[VAL_12]] -> %[[VAL_19:.*]] : !fir.ref, !fir.ref>>) { ! CHECK: omp.loop_nest (%[[VAL_20:.*]]) : i32 = (%[[VAL_15]]) to (%[[VAL_16]]) inclusive step (%[[VAL_17]]) { +! CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_13]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFEscalar"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_22:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFEarray"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: fir.store %[[VAL_20]] to %[[VAL_14]]#1 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 index 2c126bb8962c2..f706e48b8fda8 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 @@ -87,13 +87,12 @@ program reduce_pointer ! CHECK: %[[VAL_17:.*]] = fir.box_addr %[[VAL_16]] : (!fir.box>) -> !fir.ptr ! CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_17]] : i32, !fir.ptr ! CHECK: omp.parallel { -! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, uniq_name = "_QFEi"} -! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_21:.*]] = arith.constant 5 : i32 ! CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 -! CHECK: omp.wsloop reduction(byref @add_reduction_byref_box_ptr_i32 %[[VAL_5]]#0 -> %[[VAL_23:.*]] : !fir.ref>>) { +! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_18:.*]] : !fir.ref) reduction(byref @add_reduction_byref_box_ptr_i32 %[[VAL_5]]#0 -> %[[VAL_23:.*]] : !fir.ref>>) { ! CHECK: omp.loop_nest (%[[VAL_24:.*]]) : i32 = (%[[VAL_20]]) to (%[[VAL_21]]) inclusive step (%[[VAL_22]]) { +! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[VAL_25:.*]]:2 = hlfir.declare %[[VAL_23]] {fortran_attrs = {{.*}}, uniq_name = "_QFEv"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) ! CHECK: fir.store %[[VAL_24]] to %[[VAL_19]]#1 : !fir.ref ! CHECK: %[[VAL_26:.*]] = fir.load %[[VAL_25]]#0 : !fir.ref>> diff --git a/flang/test/Lower/OpenMP/wsloop-schedule.f90 b/flang/test/Lower/OpenMP/wsloop-schedule.f90 index ae854a2de0c9d..0ff4ce7c3ede3 100644 --- a/flang/test/Lower/OpenMP/wsloop-schedule.f90 +++ b/flang/test/Lower/OpenMP/wsloop-schedule.f90 @@ -14,7 +14,7 @@ program wsloop_dynamic !CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 !CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 !CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 -!CHECK: omp.wsloop nowait schedule(runtime, simd) { +!CHECK: omp.wsloop nowait schedule(runtime, simd) private({{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !CHECK: fir.store %[[I]] to %[[STORE:.*]]#1 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop-unstructured.f90 b/flang/test/Lower/OpenMP/wsloop-unstructured.f90 index 8c89f863ab877..6174718c08758 100644 --- a/flang/test/Lower/OpenMP/wsloop-unstructured.f90 +++ b/flang/test/Lower/OpenMP/wsloop-unstructured.f90 @@ -29,7 +29,7 @@ end subroutine sub ! CHECK-SAME: %[[VAL_2:.*]]: !fir.ref> {fir.bindc_name = "x"}, ! CHECK-SAME: %[[VAL_3:.*]]: !fir.ref> {fir.bindc_name = "y"}) { ! [...] -! CHECK: omp.wsloop { +! CHECK: omp.wsloop private({{.*}}) { ! CHECK-NEXT: omp.loop_nest (%[[VAL_53:.*]], %[[VAL_54:.*]]) : i32 = ({{.*}}) to ({{.*}}) inclusive step ({{.*}}) { ! [...] ! CHECK: cf.br ^bb1 diff --git a/flang/test/Lower/OpenMP/wsloop-variable.f90 b/flang/test/Lower/OpenMP/wsloop-variable.f90 index cc77ce754d97e..50b2b3a21ff1e 100644 --- a/flang/test/Lower/OpenMP/wsloop-variable.f90 +++ b/flang/test/Lower/OpenMP/wsloop-variable.f90 @@ -22,7 +22,7 @@ program wsloop_variable !CHECK: %[[TMP5:.*]] = fir.convert %{{.*}} : (i128) -> i64 !CHECK: %[[TMP6:.*]] = fir.convert %[[TMP1]] : (i32) -> i64 !CHECK: %[[TMP7:.*]] = fir.convert %{{.*}} : (i32) -> i64 -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private({{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[ARG0:.*]], %[[ARG1:.*]]) : i64 = (%[[TMP2]], %[[TMP5]]) to (%[[TMP3]], %[[TMP6]]) inclusive step (%[[TMP4]], %[[TMP7]]) { !CHECK: %[[ARG0_I16:.*]] = fir.convert %[[ARG0]] : (i64) -> i16 !CHECK: fir.store %[[ARG0_I16]] to %[[STORE_IV0:.*]]#1 : !fir.ref @@ -48,7 +48,7 @@ program wsloop_variable !CHECK: %[[TMP12:.*]] = arith.constant 1 : i32 !CHECK: %[[TMP13:.*]] = fir.convert %{{.*}} : (i8) -> i32 !CHECK: %[[TMP14:.*]] = fir.convert %{{.*}} : (i64) -> i32 -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private({{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[ARG0:.*]]) : i32 = (%[[TMP12]]) to (%[[TMP13]]) inclusive step (%[[TMP14]]) { !CHECK: %[[ARG0_I16:.*]] = fir.convert %[[ARG0]] : (i32) -> i16 !CHECK: fir.store %[[ARG0_I16]] to %[[STORE3:.*]]#1 : !fir.ref @@ -68,7 +68,7 @@ program wsloop_variable !CHECK: %[[TMP17:.*]] = fir.convert %{{.*}} : (i8) -> i64 !CHECK: %[[TMP18:.*]] = fir.convert %{{.*}} : (i16) -> i64 !CHECK: %[[TMP19:.*]] = fir.convert %{{.*}} : (i32) -> i64 -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private({{.*}}) { !CHECK-NEXT: omp.loop_nest (%[[ARG1:.*]]) : i64 = (%[[TMP17]]) to (%[[TMP18]]) inclusive step (%[[TMP19]]) { !CHECK: %[[ARG1_I128:.*]] = fir.convert %[[ARG1]] : (i64) -> i128 !CHECK: fir.store %[[ARG1_I128]] to %[[STORE4:.*]]#1 : !fir.ref @@ -123,16 +123,14 @@ subroutine wsloop_variable_sub integer(kind=16) :: i16_lb real :: x -!CHECK: %[[VAL_2:.*]] = fir.alloca i16 {bindc_name = "i2", pinned, {{.*}}} -!CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFwsloop_variable_subEi2"} : (!fir.ref) -> (!fir.ref, !fir.ref) - !CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32 !CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_9]]#0 : !fir.ref !CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_13]]#0 : !fir.ref !CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_23]] : (i8) -> i32 !CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_24]] : (i16) -> i32 -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_2:.*]] : !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[VAL_27:.*]]) : i32 = (%[[VAL_22]]) to (%[[VAL_25]]) inclusive step (%[[VAL_26]]) { +!CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFwsloop_variable_subEi2"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_27]] : (i32) -> i16 !CHECK: fir.store %[[VAL_28]] to %[[VAL_3]]#1 : !fir.ref !CHECK: %[[VAL_29:.*]] = fir.load %[[VAL_7]]#0 : !fir.ref @@ -172,14 +170,13 @@ subroutine wsloop_variable_sub !CHECK: %[[VAL_49:.*]] = arith.constant 5 : i8 !CHECK: hlfir.assign %[[VAL_49]] to %[[VAL_19]]#0 : i8, !fir.ref -!CHECK: %[[VAL_0:.*]] = fir.alloca i8 {bindc_name = "i1", pinned, {{.*}}} -!CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFwsloop_variable_subEi1"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: %[[VAL_50:.*]] = arith.constant 1 : i32 !CHECK: %[[VAL_51:.*]] = arith.constant 10 : i32 !CHECK: %[[VAL_52:.*]] = arith.constant 1 : i32 -!CHECK: omp.wsloop { +!CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[VAL_0:.*]] : !fir.ref) { !CHECK-NEXT: omp.loop_nest (%[[VAL_53:.*]]) : i32 = (%[[VAL_50]]) to (%[[VAL_51]]) inclusive step (%[[VAL_52]]) { +!CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFwsloop_variable_subEi1"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_53]] : (i32) -> i8 !CHECK: fir.store %[[VAL_54]] to %[[VAL_1]]#1 : !fir.ref !CHECK: %[[VAL_55:.*]] = fir.load %[[VAL_1]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenMP/wsloop.f90 b/flang/test/Lower/OpenMP/wsloop.f90 index 4378233a622ed..44b2f585b3a67 100644 --- a/flang/test/Lower/OpenMP/wsloop.f90 +++ b/flang/test/Lower/OpenMP/wsloop.f90 @@ -7,15 +7,14 @@ subroutine simple_loop integer :: i ! CHECK: omp.parallel !$OMP PARALLEL - ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} - ! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loopEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 ! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 ! CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! CHECK: omp.wsloop { + ! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[ALLOCA_IV:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP DO do i=1, 9 + ! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loopEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[I]] to %[[IV_DECL:.*]]#1 : !fir.ref ! CHECK: %[[LOAD_IV:.*]] = fir.load %[[IV_DECL]]#0 : !fir.ref ! CHECK: fir.call @_FortranAioOutputInteger32({{.*}}, %[[LOAD_IV]]) {{.*}}: (!fir.ref, i32) -> i1 @@ -32,13 +31,12 @@ subroutine simple_loop_with_step integer :: i ! CHECK: omp.parallel !$OMP PARALLEL - ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} - ! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loop_with_stepEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 ! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 ! CHECK: %[[WS_STEP:.*]] = arith.constant 2 : i32 - ! CHECK: omp.wsloop { + ! CHECK: omp.wsloop private(@{{.*}} %{{.*}}#0 -> %[[ALLOCA_IV:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { + ! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loop_with_stepEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[I]] to %[[IV_DECL]]#1 : !fir.ref ! CHECK: %[[LOAD_IV:.*]] = fir.load %[[IV_DECL]]#0 : !fir.ref !$OMP DO @@ -57,15 +55,14 @@ subroutine loop_with_schedule_nowait integer :: i ! CHECK: omp.parallel !$OMP PARALLEL - ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}} - ! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFloop_with_schedule_nowaitEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32 ! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32 ! CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32 - ! CHECK: omp.wsloop nowait schedule(runtime) { + ! CHECK: omp.wsloop nowait schedule(runtime) private(@{{.*}} %{{.*}}#0 -> %[[ALLOCA_IV:.*]] : !fir.ref) { ! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_LB]]) to (%[[WS_UB]]) inclusive step (%[[WS_STEP]]) { !$OMP DO SCHEDULE(runtime) do i=1, 9 + ! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFloop_with_schedule_nowaitEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: fir.store %[[I]] to %[[IV_DECL]]#1 : !fir.ref ! CHECK: %[[LOAD_IV:.*]] = fir.load %[[IV_DECL]]#0 : !fir.ref ! CHECK: fir.call @_FortranAioOutputInteger32({{.*}}, %[[LOAD_IV]]) {{.*}}: (!fir.ref, i32) -> i1 diff --git a/flang/test/Lower/allocatable-assignment.f90 b/flang/test/Lower/allocatable-assignment.f90 index 5c9887c507b67..7fe7aa4ebae34 100644 --- a/flang/test/Lower/allocatable-assignment.f90 +++ b/flang/test/Lower/allocatable-assignment.f90 @@ -678,7 +678,7 @@ subroutine test_scalar_rhs(x, y) ! CHECK: } else { ! CHECK: %[[error_msg_addr:.*]] = fir.address_of(@[[error_message:.*]]) : !fir.ref> ! CHECK: %[[msg_addr_cast:.*]] = fir.convert %[[error_msg_addr]] : (!fir.ref>) -> !fir.ref - ! CHECK: %{{.*}} = fir.call @_FortranAReportFatalUserError(%[[msg_addr_cast]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAReportFatalUserError(%[[msg_addr_cast]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref, !fir.ref, i32) -> () ! CHECK-NOT: allocmem ! CHECK: } x = y @@ -1049,7 +1049,7 @@ subroutine test_derived_with_init(x, y) ! CHECK: %[[VAL_11:.*]] = fir.allocmem !fir.type<_QMalloc_assignFtest_derived_with_initTt{a:!fir.box>>}> {uniq_name = ".auto.alloc"} ! CHECK: %[[VAL_12:.*]] = fir.embox %[[VAL_11]] : (!fir.heap>>}>>) -> !fir.box>>}>>> ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_12]] : (!fir.box>>}>>>) -> !fir.box -! CHECK: fir.call @_FortranAInitialize(%[[VAL_15]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_15]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () ! CHECK: fir.result %[[VAL_11]] : !fir.heap>>}>> ! CHECK: } else { ! CHECK: fir.result %{{.*}} : !fir.heap>>}>> diff --git a/flang/test/Lower/allocatable-polymorphic.f90 b/flang/test/Lower/allocatable-polymorphic.f90 index bbc54754ca1ab..db518c541918a 100644 --- a/flang/test/Lower/allocatable-polymorphic.f90 +++ b/flang/test/Lower/allocatable-polymorphic.f90 @@ -102,7 +102,7 @@ subroutine test_pointer() ! CHECK: %[[TYPE_DESC_P1_CAST:.*]] = fir.convert %[[TYPE_DESC_P1]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyDerived(%[[P_DESC_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[P_DESC_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[P_DESC_CAST:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[P_DESC_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -115,7 +115,7 @@ subroutine test_pointer() ! CHECK: %[[TYPE_DESC_P1_CAST:.*]] = fir.convert %[[TYPE_DESC_P1]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyDerived(%[[C1_DESC_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[C1_DESC_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[C1_DESC_CAST:.*]] = fir.convert %[[C1_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[C1_DESC_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -124,7 +124,7 @@ subroutine test_pointer() ! CHECK: %[[TYPE_DESC_P2_CAST:.*]] = fir.convert %[[TYPE_DESC_P2]] : (!fir.tdesc,c:i32}>>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyDerived(%[[C2_DESC_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[C2_DESC_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[C2_DESC_CAST:.*]] = fir.convert %[[C2_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[C2_DESC_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -151,9 +151,9 @@ subroutine test_pointer() ! CHECK: %[[TYPE_DESC_P1_CAST:.*]] = fir.convert %[[TYPE_DESC_P1]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyDerived(%[[C3_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[C3_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> -! CHECK: %{{.*}} = fir.call @_FortranAPointerSetBounds(%[[C3_CAST]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[C3_CAST]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[C3_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -162,9 +162,9 @@ subroutine test_pointer() ! CHECK: %[[TYPE_DESC_P2_CAST:.*]] = fir.convert %[[TYPE_DESC_P2]] : (!fir.tdesc,c:i32}>>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyDerived(%[[C4_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[C4_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> -! CHECK: %{{.*}} = fir.call @_FortranAPointerSetBounds(%[[C4_CAST]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[C4_CAST]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[C4_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -265,7 +265,7 @@ subroutine test_allocatable() ! CHECK: %[[TYPE_DESC_P1_CAST:.*]] = fir.convert %[[TYPE_DESC_P1]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 -! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[P_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[P_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[P_CAST:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[P_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -274,7 +274,7 @@ subroutine test_allocatable() ! CHECK: %[[TYPE_DESC_P1_CAST:.*]] = fir.convert %[[TYPE_DESC_P1]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 -! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C1_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C1_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[C1_CAST:.*]] = fir.convert %[[C1_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C1_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -283,7 +283,7 @@ subroutine test_allocatable() ! CHECK: %[[TYPE_DESC_P2_CAST:.*]] = fir.convert %[[TYPE_DESC_P2]] : (!fir.tdesc,c:i32}>>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 -! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C2_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C2_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[C2_CAST:.*]] = fir.convert %[[C2_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C2_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -292,13 +292,13 @@ subroutine test_allocatable() ! CHECK: %[[TYPE_DESC_P1_CAST:.*]] = fir.convert %[[TYPE_DESC_P1]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 -! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C3_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C3_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[C10:.*]] = arith.constant 10 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 ! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[C1_I64:.*]] = fir.convert %c1{{.*}} : (index) -> i64 ! CHECK: %[[C10_I64:.*]] = fir.convert %[[C10]] : (i32) -> i64 -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[C3_CAST]], %[[C0]], %[[C1_I64]], %[[C10_I64]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[C3_CAST]], %[[C0]], %[[C1_I64]], %[[C10_I64]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C3_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -307,14 +307,14 @@ subroutine test_allocatable() ! CHECK: %[[TYPE_DESC_P2_CAST:.*]] = fir.convert %[[TYPE_DESC_P2]] : (!fir.tdesc,c:i32}>>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 -! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C4_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C4_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[CST1:.*]] = arith.constant 1 : index ! CHECK: %[[C20:.*]] = arith.constant 20 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 ! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[C1_I64:.*]] = fir.convert %[[CST1]] : (index) -> i64 ! CHECK: %[[C20_I64:.*]] = fir.convert %[[C20]] : (i32) -> i64 -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[C4_CAST]], %[[C0]], %[[C1_I64]], %[[C20_I64]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[C4_CAST]], %[[C0]], %[[C1_I64]], %[[C20_I64]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C4_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -388,7 +388,7 @@ subroutine test_unlimited_polymorphic_with_intrinsic_type_spec() ! CHECK: %[[KIND:.*]] = arith.constant 4 : i32 ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableInitIntrinsicForAllocate(%[[BOX_NONE]], %[[CAT]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref>, i32, i32, i32, i32) -> none +! CHECK: fir.call @_FortranAAllocatableInitIntrinsicForAllocate(%[[BOX_NONE]], %[[CAT]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref>, i32, i32, i32, i32) -> () ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -397,7 +397,7 @@ subroutine test_unlimited_polymorphic_with_intrinsic_type_spec() ! CHECK: %[[KIND:.*]] = arith.constant 4 : i32 ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyIntrinsic(%[[BOX_NONE]], %[[CAT]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref>, i32, i32, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyIntrinsic(%[[BOX_NONE]], %[[CAT]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref>, i32, i32, i32, i32) -> () ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[PTR_DECL]]#1 : (!fir.ref>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -435,7 +435,7 @@ subroutine test_type_with_polymorphic_pointer_component() ! CHECK: %[[TYPE_DESC_P1_CAST:.*]] = fir.convert %[[TYPE_DESC_P1]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyDerived(%[[ELEMENT_DESC_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[ELEMENT_DESC_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref>, !fir.ref, i32, i32) -> () ! CHECK: %[[ELEMENT_DESC_CAST:.*]] = fir.convert %[[ELEMENT]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[ELEMENT_DESC_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -463,14 +463,14 @@ subroutine test_allocate_with_mold() ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[P_BOX_NONE:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[X_BOX_NONE:.*]] = fir.convert %[[EMBOX_X]] : (!fir.box,c:i32}>>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerApplyMold(%[[P_BOX_NONE]], %[[X_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> none +! CHECK: fir.call @_FortranAPointerApplyMold(%[[P_BOX_NONE]], %[[X_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> () ! CHECK: %[[P_BOX_NONE:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[P_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 ! CHECK: %[[EMBOX_I:.*]] = fir.embox %[[I_DECL]]#1(%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[UP_BOX_NONE:.*]] = fir.convert %[[UP_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[I_BOX_NONE:.*]] = fir.convert %[[EMBOX_I]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerApplyMold(%[[UP_BOX_NONE]], %[[I_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> none +! CHECK: fir.call @_FortranAPointerApplyMold(%[[UP_BOX_NONE]], %[[I_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> () ! CHECK: %[[UP_BOX_NONE:.*]] = fir.convert %[[UP_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocate(%[[UP_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -497,8 +497,8 @@ subroutine test_allocate_with_source() ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[P_BOX_NONE:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[X_BOX_NONE:.*]] = fir.convert %[[EMBOX_X]] : (!fir.box,c:i32}>>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerApplyMold(%[[P_BOX_NONE]], %[[X_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> none -! CHECK: %{{.*}} = fir.call @_FortranAPointerSetBounds +! CHECK: fir.call @_FortranAPointerApplyMold(%[[P_BOX_NONE]], %[[X_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> () +! CHECK: fir.call @_FortranAPointerSetBounds ! CHECK: %[[BOX_NONE_P:.*]] = fir.convert %[[P_DECL]]#1 : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[BOX_NONE_X:.*]] = fir.convert %[[EMBOX_X]] : (!fir.box,c:i32}>>>) -> !fir.box ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocateSource(%[[BOX_NONE_P]], %[[BOX_NONE_X]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -506,8 +506,8 @@ subroutine test_allocate_with_source() ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[UP_BOX_NONE:.*]] = fir.convert %[[UP_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[I_BOX_NONE:.*]] = fir.convert %[[EMBOX_I]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerApplyMold(%[[UP_BOX_NONE]], %[[I_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> none -! CHECK: %{{.*}} = fir.call @_FortranAPointerSetBounds +! CHECK: fir.call @_FortranAPointerApplyMold(%[[UP_BOX_NONE]], %[[I_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> () +! CHECK: fir.call @_FortranAPointerSetBounds ! CHECK: %[[UP_BOX_NONE:.*]] = fir.convert %[[UP_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[I_BOX_NONE:.*]] = fir.convert %[[EMBOX_I]] : (!fir.box>) -> !fir.box ! CHECK: %{{.*}} = fir.call @_FortranAPointerAllocateSource(%[[UP_BOX_NONE]], %[[I_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -526,7 +526,7 @@ subroutine test_allocatable_up_from_up_mold(a, b) ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A_DECL]]#1 : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[B_BOX_NONE:.*]] = fir.convert %[[LOAD_B]] : (!fir.class>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[B_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> none +! CHECK: fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[B_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> () ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A_DECL]]#1 : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[B_BOX_NONE:.*]] = fir.convert %[[LOAD_B]] : (!fir.class>) -> !fir.box ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocateSource(%[[A_BOX_NONE]], %[[B_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -546,14 +546,14 @@ subroutine test_allocatable_up_from_mold_rank(a) ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[BOX_NONE_10:.*]] = fir.convert %[[EMBOX_10]] : (!fir.box) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[BOX_NONE_10]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> none +! CHECK: fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[BOX_NONE_10]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> () ! CHECK: %[[C1:.*]] = arith.constant 1 : index ! CHECK: %[[C2:.*]] = arith.constant 20 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[C1_I64:.*]] = fir.convert %[[C1]] : (index) -> i64 ! CHECK: %[[C20_I64:.*]] = fir.convert %[[C20]] : (i32) -> i64 -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[A_BOX_NONE]], %[[C0]], %[[C1_I64]], %[[C20_I64]]) {{.*}} : (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[A_BOX_NONE]], %[[C0]], %[[C1_I64]], %[[C20_I64]]) {{.*}} : (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A_DECL]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[BOX_NONE_10:.*]] = fir.convert %[[EMBOX_10]] : (!fir.box) -> !fir.box ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocateSource(%[[A_BOX_NONE]], %[[BOX_NONE_10]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -571,7 +571,7 @@ subroutine test_allocatable_up_character() ! CHECK: %[[KIND:.*]] = arith.constant 1 : i32 ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[A_NONE]], %[[LEN]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref>, i64, i32, i32, i32) -> none +! CHECK: fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[A_NONE]], %[[LEN]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref>, i64, i32, i32, i32) -> () ! CHECK: %[[A_NONE:.*]] = fir.convert %[[A_DECL]]#1 : (!fir.ref>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -591,17 +591,17 @@ program test_alloc ! LLVM-LABEL: define void @_QMpolyPtest_allocatable() -! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0) +! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0) ! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}}) -! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0) +! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0) ! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}}) -! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 0, i32 0) +! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 0, i32 0) ! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}}) -! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 1, i32 0) -! LLVM: %{{.*}} = call {} @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 10) +! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 1, i32 0) +! LLVM: call void @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 10) ! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}}) -! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 1, i32 0) -! LLVM: %{{.*}} = call {} @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 20) +! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 1, i32 0) +! LLVM: call void @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 20) ! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}}) ! LLVM-COUNT-2: call void %{{[0-9]*}}() @@ -682,6 +682,6 @@ program test_alloc ! LLVM-LABEL: define void @_QMpolyPtest_deallocate() ! LLVM: store { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QMpolyTp1, ptr null, i32 1) to i64), i32 20240719, i8 0, i8 42, i8 2, i8 1, ptr @_QMpolyEXdtXp1, [1 x i64] zeroinitializer }, ptr %[[ALLOCA1:[0-9]*]] ! LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[ALLOCA2:[0-9]+]], ptr %[[ALLOCA1]], i32 40, i1 false) -! LLVM: %{{.*}} = call {} @_FortranAAllocatableInitDerivedForAllocate(ptr %[[ALLOCA2]], ptr @_QMpolyEXdtXp1, i32 0, i32 0) +! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %[[ALLOCA2]], ptr @_QMpolyEXdtXp1, i32 0, i32 0) ! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %[[ALLOCA2]], i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}}) ! LLVM: %{{.*}} = call i32 @_FortranAAllocatableDeallocatePolymorphic(ptr %[[ALLOCA2]], ptr {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}}) diff --git a/flang/test/Lower/allocatable-runtime.f90 b/flang/test/Lower/allocatable-runtime.f90 index 3f1f8a86b7d07..9670a1e0e716e 100644 --- a/flang/test/Lower/allocatable-runtime.f90 +++ b/flang/test/Lower/allocatable-runtime.f90 @@ -28,7 +28,7 @@ subroutine foo() ! CHECK-DAG: %[[xBoxCast2:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref>>>) -> !fir.ref> ! CHECK-DAG: %[[xlbCast:.*]] = fir.convert %[[xlb]] : (i32) -> i64 ! CHECK-DAG: %[[xubCast:.*]] = fir.convert %[[xub]] : (i32) -> i64 - ! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[xBoxCast2]], %c0{{.*}}, %[[xlbCast]], %[[xubCast]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none + ! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[xBoxCast2]], %c0{{.*}}, %[[xlbCast]], %[[xubCast]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK-DAG: %[[xBoxCast3:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref>>>) -> !fir.ref> ! CHECK-DAG: %[[sourceFile:.*]] = fir.convert %{{.*}} -> !fir.ref ! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[xBoxCast3]], %false{{.*}}, %[[errMsg]], %[[sourceFile]], %{{.*}}) {{.*}}: (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -178,6 +178,6 @@ subroutine mold_allocation() ! CHECK: %[[RANK:.*]] = arith.constant 1 : i32 ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[M_BOX_NONE:.*]] = fir.convert %[[EMBOX_M]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[M_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> none +! CHECK: fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[M_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> () ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 diff --git a/flang/test/Lower/allocate-mold.f90 b/flang/test/Lower/allocate-mold.f90 index 0cc10fc9016de..e50861a4ce76b 100644 --- a/flang/test/Lower/allocate-mold.f90 +++ b/flang/test/Lower/allocate-mold.f90 @@ -14,7 +14,7 @@ subroutine scalar_mold_allocation() ! CHECK: %[[BOX_ADDR_A:.*]] = fir.embox %[[ADDR_A]] : (!fir.heap) -> !fir.box> ! CHECK: fir.store %[[BOX_ADDR_A]] to %[[A]] : !fir.ref>> ! CHECK: %[[A_REF_BOX_NONE1:.*]] = fir.convert %[[A]] : (!fir.ref>>) -> !fir.ref> -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_REF_BOX_NONE1]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32) -> none +! CHECK: fir.call @_FortranAAllocatableApplyMold(%[[A_REF_BOX_NONE1]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32) -> () ! CHECK: %[[A_REF_BOX_NONE2:.*]] = fir.convert %[[A]] : (!fir.ref>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_REF_BOX_NONE2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 @@ -35,9 +35,9 @@ end subroutine array_scalar_mold_allocation ! CHECK: %[[BOX_SHAPESHIFT:.*]] = fir.embox %[[LOADED_A]](%[[SHAPESHIFT]]) : (!fir.heap>, !fir.shapeshift<1>) -> !fir.box>> ! CHECK: fir.store %[[BOX_SHAPESHIFT]] to %[[A]] : !fir.ref>>> ! CHECK: %[[REF_BOX_A0:.*]] = fir.convert %1 : (!fir.ref>>>) -> !fir.ref> -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[REF_BOX_A0]], {{.*}}, {{.*}}) fastmath : (!fir.ref>, !fir.box, i32) -> none +! CHECK: fir.call @_FortranAAllocatableApplyMold(%[[REF_BOX_A0]], {{.*}}, {{.*}}) fastmath : (!fir.ref>, !fir.box, i32) -> () ! CHECK: %[[C10:.*]] = arith.constant 10 : i32 ! CHECK: %[[REF_BOX_A1:.*]] = fir.convert %1 : (!fir.ref>>>) -> !fir.ref> -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableSetBounds(%[[REF_BOX_A1]], {{.*}},{{.*}}, {{.*}}) fastmath : (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[REF_BOX_A1]], {{.*}},{{.*}}, {{.*}}) fastmath : (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[REF_BOX_A2:.*]] = fir.convert %[[A]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[REF_BOX_A2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, i1, !fir.box, !fir.ref, i32) -> i32 diff --git a/flang/test/Lower/allocate-source-allocatables-2.f90 b/flang/test/Lower/allocate-source-allocatables-2.f90 index 39b9f04a5f67a..e84b367f29bc0 100644 --- a/flang/test/Lower/allocate-source-allocatables-2.f90 +++ b/flang/test/Lower/allocate-source-allocatables-2.f90 @@ -25,7 +25,7 @@ subroutine test() ! CHECK: %[[VAL_29:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_30:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_31:.*]] = arith.constant 0 : i32 -! CHECK: %[[VAL_32:.*]] = fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[VAL_27]], %[[VAL_28]], %[[VAL_29]], %[[VAL_30]], %[[VAL_31]] +! CHECK: fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[VAL_27]], %[[VAL_28]], %[[VAL_29]], %[[VAL_30]], %[[VAL_31]] ! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_4]]#1 : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_22]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_36:.*]] = fir.call @_FortranAAllocatableAllocateSource(%[[VAL_33]], %[[VAL_34]], %[[VAL_18]], diff --git a/flang/test/Lower/allocate-source-allocatables.f90 b/flang/test/Lower/allocate-source-allocatables.f90 index f09612c3197da..29b00b79a69d4 100644 --- a/flang/test/Lower/allocate-source-allocatables.f90 +++ b/flang/test/Lower/allocate-source-allocatables.f90 @@ -72,7 +72,7 @@ subroutine test_allocatable_scalar(a) ! CHECK: %[[VAL_55:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_56:.*]] = fir.convert %[[VAL_49]] : (index) -> i64 ! CHECK: %[[VAL_57:.*]] = fir.convert %[[VAL_53]] : (index) -> i64 -! CHECK: %[[VAL_58:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_55]], %[[VAL_54]], %[[VAL_56]], %[[VAL_57]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_55]], %[[VAL_54]], %[[VAL_56]], %[[VAL_57]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_59:.*]] = arith.constant 1 : index ! CHECK: %[[VAL_60:.*]]:3 = fir.box_dims %[[VAL_41]], %[[VAL_59]] : (!fir.box>, index) -> (index, index, index) ! CHECK: %[[VAL_61:.*]] = arith.addi %[[VAL_60]]#1, %[[VAL_49]] : index @@ -81,16 +81,16 @@ subroutine test_allocatable_scalar(a) ! CHECK: %[[VAL_64:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_65:.*]] = fir.convert %[[VAL_49]] : (index) -> i64 ! CHECK: %[[VAL_66:.*]] = fir.convert %[[VAL_62]] : (index) -> i64 -! CHECK: %[[VAL_67:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_64]], %[[VAL_63]], %[[VAL_65]], %[[VAL_66]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_64]], %[[VAL_63]], %[[VAL_65]], %[[VAL_66]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_68:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_69:.*]] = fir.convert %[[VAL_41]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_71:.*]] = fir.call @_FortranAAllocatableAllocateSource(%[[VAL_68]], %[[VAL_69]], %[[VAL_36]], %[[VAL_37]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 -! CHECK: %[[VAL_94:.*]] = fir.call @_FortranAAllocatableSetBounds( -! CHECK: %[[VAL_103:.*]] = fir.call @_FortranAAllocatableSetBounds( +! CHECK: fir.call @_FortranAAllocatableSetBounds( +! CHECK: fir.call @_FortranAAllocatableSetBounds( ! CHECK: %[[VAL_107:.*]] = fir.call @_FortranAAllocatableAllocateSource( ! CHECK: %[[VAL_114:.*]] = arith.constant true -! CHECK: %[[VAL_149:.*]] = fir.call @_FortranAAllocatableSetBounds( -! CHECK: %[[VAL_158:.*]] = fir.call @_FortranAAllocatableSetBounds( +! CHECK: fir.call @_FortranAAllocatableSetBounds( +! CHECK: fir.call @_FortranAAllocatableSetBounds( ! CHECK: %[[VAL_162:.*]] = fir.call @_FortranAAllocatableAllocateSource(%{{.*}}, %{{.*}}, %[[VAL_114]] subroutine test_allocatable_2d_array(n, a) @@ -139,7 +139,7 @@ subroutine test_allocatable_2d_array(n, a) ! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_30]] : (i32) -> i64 ! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_31]] : (i32) -> i64 -! CHECK: %[[VAL_36:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_33]], %[[VAL_32]], %[[VAL_34]], %[[VAL_35]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_33]], %[[VAL_32]], %[[VAL_34]], %[[VAL_35]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_24]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_40:.*]] = fir.call @_FortranAAllocatableAllocateSource(%[[VAL_37]], %[[VAL_38]], %[[VAL_19]], %[[VAL_20]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -162,7 +162,7 @@ subroutine test_allocatable_2d_array(n, a) ! CHECK: %[[VAL_53:.*]] = fir.convert %[[VAL_8]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_50]] : (index) -> i64 ! CHECK: %[[VAL_55:.*]] = fir.convert %[[VAL_51]] : (i32) -> i64 -! CHECK: %[[VAL_56:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_53]], %[[VAL_52]], %[[VAL_54]], %[[VAL_55]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_53]], %[[VAL_52]], %[[VAL_54]], %[[VAL_55]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_57:.*]] = fir.convert %[[VAL_8]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_58:.*]] = fir.convert %[[VAL_24]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_60:.*]] = fir.call @_FortranAAllocatableAllocateSource(%[[VAL_57]], %[[VAL_58]], %[[VAL_19]], %[[VAL_20]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -218,7 +218,7 @@ subroutine test_allocatable_with_shapespec(n, a, m) ! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_42:.*]] = fir.convert %[[VAL_35]] : (index) -> i64 ! CHECK: %[[VAL_43:.*]] = fir.convert %[[VAL_39]] : (index) -> i64 -! CHECK: %[[VAL_44:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_41]], %[[VAL_40]], %[[VAL_42]], %[[VAL_43]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_41]], %[[VAL_40]], %[[VAL_42]], %[[VAL_43]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_46:.*]] = fir.convert %[[VAL_29]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_48:.*]] = fir.call @_FortranAAllocatableAllocateSource(%[[VAL_45]], %[[VAL_46]], %[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -276,7 +276,7 @@ subroutine test_allocatable_from_const(n, a) ! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_26]] : (index) -> i64 ! CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_30]] : (index) -> i64 -! CHECK: %[[VAL_35:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_32]], %[[VAL_31]], %[[VAL_33]], %[[VAL_34]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_32]], %[[VAL_31]], %[[VAL_33]], %[[VAL_34]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_20]] : (!fir.box>>) -> !fir.box ! CHECK: %[[VAL_39:.*]] = fir.call @_FortranAAllocatableAllocateSource(%[[VAL_36]], %[[VAL_37]], %[[VAL_15]], %[[VAL_16]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -311,7 +311,7 @@ subroutine test_allocatable_chararray(n, a) ! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_19:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_20:.*]] = arith.constant 0 : i32 -! CHECK: %[[VAL_21:.*]] = fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[VAL_16]], %[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_20]]) {{.*}}: (!fir.ref>, i64, i32, i32, i32) -> none +! CHECK: fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[VAL_16]], %[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_20]]) {{.*}}: (!fir.ref>, i64, i32, i32, i32) -> () ! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_11]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_25:.*]] = fir.call @_FortranAAllocatableAllocateSource(%[[VAL_22]], %[[VAL_23]], %[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -354,7 +354,7 @@ subroutine test_allocatable_char(n, a) ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>}>>>>>) -> !fir.ref> ! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_12]]#0 : (index) -> i64 ! CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_24]] : (index) -> i64 -! CHECK: %[[VAL_29:.*]] = fir.call @_FortranAAllocatableSetBounds(%[[VAL_26]], %[[VAL_25]], %[[VAL_27]], %[[VAL_28]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[VAL_26]], %[[VAL_25]], %[[VAL_27]], %[[VAL_28]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>}>>>>>) -> !fir.ref> ! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_14]] : (!fir.box>>}>>>) -> !fir.box ! CHECK: %[[VAL_33:.*]] = fir.call @_FortranAAllocatableAllocateSource(%[[VAL_30]], %[[VAL_31]], %[[VAL_6]], %[[VAL_7]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 diff --git a/flang/test/Lower/allocate-source-pointers.f90 b/flang/test/Lower/allocate-source-pointers.f90 index 1beb420c53191..e6359dba81eb4 100644 --- a/flang/test/Lower/allocate-source-pointers.f90 +++ b/flang/test/Lower/allocate-source-pointers.f90 @@ -65,7 +65,7 @@ subroutine test_pointer_scalar(a) ! CHECK: %[[VAL_46:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_47:.*]] = fir.convert %[[VAL_40]] : (index) -> i64 ! CHECK: %[[VAL_48:.*]] = fir.convert %[[VAL_44]] : (index) -> i64 -! CHECK: %[[VAL_49:.*]] = fir.call @_FortranAPointerSetBounds(%[[VAL_46]], %[[VAL_45]], %[[VAL_47]], %[[VAL_48]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[VAL_46]], %[[VAL_45]], %[[VAL_47]], %[[VAL_48]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_50:.*]] = arith.constant 1 : index ! CHECK: %[[VAL_51:.*]]:3 = fir.box_dims %[[VAL_35]], %[[VAL_50]] : (!fir.box>, index) -> (index, index, index) ! CHECK: %[[VAL_52:.*]] = arith.addi %[[VAL_51]]#1, %[[VAL_40]] : index @@ -74,16 +74,16 @@ subroutine test_pointer_scalar(a) ! CHECK: %[[VAL_55:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_56:.*]] = fir.convert %[[VAL_40]] : (index) -> i64 ! CHECK: %[[VAL_57:.*]] = fir.convert %[[VAL_53]] : (index) -> i64 -! CHECK: %[[VAL_58:.*]] = fir.call @_FortranAPointerSetBounds(%[[VAL_55]], %[[VAL_54]], %[[VAL_56]], %[[VAL_57]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[VAL_55]], %[[VAL_54]], %[[VAL_56]], %[[VAL_57]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_59:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_60:.*]] = fir.convert %[[VAL_35]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_62:.*]] = fir.call @_FortranAPointerAllocateSource(%[[VAL_59]], %[[VAL_60]], %[[VAL_30]], %[[VAL_31]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 -! CHECK: %[[VAL_76:.*]] = fir.call @_FortranAPointerSetBounds( -! CHECK: %[[VAL_85:.*]] = fir.call @_FortranAPointerSetBounds( +! CHECK: fir.call @_FortranAPointerSetBounds( +! CHECK: fir.call @_FortranAPointerSetBounds( ! CHECK: %[[VAL_89:.*]] = fir.call @_FortranAPointerAllocateSource( ! CHECK: %[[VAL_90:.*]] = arith.constant true -! CHECK: %[[VAL_122:.*]] = fir.call @_FortranAPointerSetBounds( -! CHECK: %[[VAL_131:.*]] = fir.call @_FortranAPointerSetBounds( +! CHECK: fir.call @_FortranAPointerSetBounds( +! CHECK: fir.call @_FortranAPointerSetBounds( ! CHECK: %[[VAL_135:.*]] = fir.call @_FortranAPointerAllocateSource(%{{.*}}, %{{.*}}, %[[VAL_90]] subroutine test_pointer_2d_array(n, a) @@ -131,7 +131,7 @@ subroutine test_pointer_2d_array(n, a) ! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_29]] : (i32) -> i64 ! CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_30]] : (i32) -> i64 -! CHECK: %[[VAL_35:.*]] = fir.call @_FortranAPointerSetBounds(%[[VAL_32]], %[[VAL_31]], %[[VAL_33]], %[[VAL_34]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[VAL_32]], %[[VAL_31]], %[[VAL_33]], %[[VAL_34]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_24]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_39:.*]] = fir.call @_FortranAPointerAllocateSource(%[[VAL_36]], %[[VAL_37]], %[[VAL_19]], %[[VAL_20]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -146,7 +146,7 @@ subroutine test_pointer_2d_array(n, a) ! CHECK: %[[VAL_47:.*]] = fir.convert %[[VAL_8]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_48:.*]] = fir.convert %[[VAL_44]] : (index) -> i64 ! CHECK: %[[VAL_49:.*]] = fir.convert %[[VAL_45]] : (i32) -> i64 -! CHECK: %[[VAL_50:.*]] = fir.call @_FortranAPointerSetBounds(%[[VAL_47]], %[[VAL_46]], %[[VAL_48]], %[[VAL_49]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[VAL_47]], %[[VAL_46]], %[[VAL_48]], %[[VAL_49]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_51:.*]] = fir.convert %[[VAL_8]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_52:.*]] = fir.convert %[[VAL_24]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_54:.*]] = fir.call @_FortranAPointerAllocateSource(%[[VAL_51]], %[[VAL_52]], %[[VAL_19]], %[[VAL_20]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -203,7 +203,7 @@ subroutine test_pointer_with_shapespec(n, a, m) ! CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_34]] : (index) -> i64 ! CHECK: %[[VAL_42:.*]] = fir.convert %[[VAL_38]] : (index) -> i64 -! CHECK: %[[VAL_43:.*]] = fir.call @_FortranAPointerSetBounds(%[[VAL_40]], %[[VAL_39]], %[[VAL_41]], %[[VAL_42]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[VAL_40]], %[[VAL_39]], %[[VAL_41]], %[[VAL_42]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_44:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_29]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_47:.*]] = fir.call @_FortranAPointerAllocateSource(%[[VAL_44]], %[[VAL_45]], %[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -253,7 +253,7 @@ subroutine test_pointer_from_const(n, a) ! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_25]] : (index) -> i64 ! CHECK: %[[VAL_33:.*]] = fir.convert %[[VAL_29]] : (index) -> i64 -! CHECK: %[[VAL_34:.*]] = fir.call @_FortranAPointerSetBounds(%[[VAL_31]], %[[VAL_30]], %[[VAL_32]], %[[VAL_33]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[VAL_31]], %[[VAL_30]], %[[VAL_32]], %[[VAL_33]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_20]] : (!fir.box>>) -> !fir.box ! CHECK: %[[VAL_38:.*]] = fir.call @_FortranAPointerAllocateSource(%[[VAL_35]], %[[VAL_36]], %[[VAL_15]], %[[VAL_16]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -290,7 +290,7 @@ subroutine test_pointer_chararray(n, a) ! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_19:.*]] = arith.constant 0 : i32 ! CHECK: %[[VAL_20:.*]] = arith.constant 0 : i32 -! CHECK: %[[VAL_21:.*]] = fir.call @_FortranAPointerNullifyCharacter(%[[VAL_16]], %[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_20]]) {{.*}}: (!fir.ref>, i64, i32, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyCharacter(%[[VAL_16]], %[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_20]]) {{.*}}: (!fir.ref>, i64, i32, i32, i32) -> () ! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_11]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_25:.*]] = fir.call @_FortranAPointerAllocateSource(%[[VAL_22]], %[[VAL_23]], %[[VAL_7]], %[[VAL_8]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 @@ -339,7 +339,7 @@ subroutine test_pointer_char(n, a) ! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>}>>>>>) -> !fir.ref> ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_12]]#0 : (index) -> i64 ! CHECK: %[[VAL_27:.*]] = fir.convert %[[VAL_23]] : (index) -> i64 -! CHECK: %[[VAL_28:.*]] = fir.call @_FortranAPointerSetBounds(%[[VAL_25]], %[[VAL_24]], %[[VAL_26]], %[[VAL_27]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> none +! CHECK: fir.call @_FortranAPointerSetBounds(%[[VAL_25]], %[[VAL_24]], %[[VAL_26]], %[[VAL_27]]) {{.*}}: (!fir.ref>, i32, i64, i64) -> () ! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>}>>>>>) -> !fir.ref> ! CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_14]] : (!fir.box>>}>>>) -> !fir.box ! CHECK: %[[VAL_32:.*]] = fir.call @_FortranAPointerAllocateSource(%[[VAL_29]], %[[VAL_30]], %[[VAL_6]], %[[VAL_7]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, i1, !fir.box, !fir.ref, i32) -> i32 diff --git a/flang/test/Lower/array-derived-assignments.f90 b/flang/test/Lower/array-derived-assignments.f90 index f4e51271d5936..3a66a0824666b 100644 --- a/flang/test/Lower/array-derived-assignments.f90 +++ b/flang/test/Lower/array-derived-assignments.f90 @@ -92,7 +92,7 @@ subroutine test_deep_copy(t1, t2) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_6]] : (!fir.ref>>}>>>) -> !fir.ref> ! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_15]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_16]] : (!fir.ref>) -> !fir.ref - ! CHECK: %[[VAL_20:.*]] = fir.call @_FortranAAssign(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAAssign(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_9]], %[[VAL_5]] : index ! CHECK: br ^bb1(%[[VAL_11]], %[[VAL_21]] : index, index) type(deep_copy) :: t1(10), t2(10) diff --git a/flang/test/Lower/basic-function.f90 b/flang/test/Lower/basic-function.f90 index f5f81545c899d..5f2fabe1b325d 100644 --- a/flang/test/Lower/basic-function.f90 +++ b/flang/test/Lower/basic-function.f90 @@ -45,7 +45,7 @@ integer function fct_body() ! CHECK-LABEL: func @_QPfct_body() -> i32 ! CHECK: cf.br ^bb1 ! CHECK: ^bb1 -! CHECK: %{{.*}} = fir.call @_FortranAStopStatement +! CHECK: fir.call @_FortranAStopStatement ! CHECK: fir.unreachable function fct_iarr1() diff --git a/flang/test/Lower/call-by-value-attr.f90 b/flang/test/Lower/call-by-value-attr.f90 index 09fc32fbf71ae..97028edfb8d77 100644 --- a/flang/test/Lower/call-by-value-attr.f90 +++ b/flang/test/Lower/call-by-value-attr.f90 @@ -78,7 +78,7 @@ end subroutine subra !CHECK: fir.store %[[TEMP_BOX]] to %[[TEMP_BOX_LOC:.*]] : !fir.ref>> !CHECK: %[[TEMP_BOX_ADDR:.*]] = fir.convert %[[TEMP_BOX_LOC]] : (!fir.ref>>) -> !fir.ref> !CHECK: %[[BOX_ADDR:.*]] = fir.convert %[[BOX]] : (!fir.box>) -> !fir.box - !CHECK: fir.call @_FortranAAssignTemporary(%[[TEMP_BOX_ADDR]], %[[BOX_ADDR]], %{{.*}}, %{{.*}}){{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none + !CHECK: fir.call @_FortranAAssignTemporary(%[[TEMP_BOX_ADDR]], %[[BOX_ADDR]], %{{.*}}, %{{.*}}){{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () !CHECK: fir.result %[[ARRAY_COPY_2]] : !fir.heap> !CHECK: %[[CONVERT_B:.*]] = fir.convert %[[ADDR]] : (!fir.heap>) -> !fir.ref> !CHECK: fir.call @_QPsubra(%[[CONVERT_B]]) diff --git a/flang/test/Lower/call-copy-in-out.f90 b/flang/test/Lower/call-copy-in-out.f90 index 253db7f05a6b6..fd3b5c342a48f 100644 --- a/flang/test/Lower/call-copy-in-out.f90 +++ b/flang/test/Lower/call-copy-in-out.f90 @@ -23,7 +23,7 @@ subroutine test_assumed_shape_to_array(x) ! CHECK-DAG: fir.store %[[temp_box]] to %[[temp_box_loc:.*]] : !fir.ref>> ! CHECK-DAG: %[[temp_box_addr:.*]] = fir.convert %[[temp_box_loc]] : (!fir.ref>>) -> !fir.ref> ! CHECK-DAG: %[[arg_box:.*]] = fir.convert %[[x]] : (!fir.box>) -> !fir.box -! CHECK-DAG: fir.call @_FortranAAssignTemporary(%[[temp_box_addr]], %[[arg_box]], %{{.*}}, %{{.*}}){{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK-DAG: fir.call @_FortranAAssignTemporary(%[[temp_box_addr]], %[[arg_box]], %{{.*}}, %{{.*}}){{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: fir.result %[[temp]] : !fir.heap> ! CHECK: %[[dim:.*]]:3 = fir.box_dims %[[x]], %c0{{.*}} : (!fir.box>, index) -> (index, index, index) @@ -38,7 +38,7 @@ subroutine test_assumed_shape_to_array(x) ! CHECK-DAG: fir.store %[[x]] to %[[arg_box_loc:.*]] : !fir.ref>> ! CHECK-DAG: %[[arg_box_addr:.*]] = fir.convert %[[arg_box_loc]] : (!fir.ref>>) -> !fir.ref> ! CHECK-DAG: %[[temp_box_cast:.*]] = fir.convert %[[temp_box_ref]] : (!fir.ref>>>) -> !fir.ref> -! CHECK-DAG: fir.call @_FortranACopyOutAssign(%[[arg_box_addr]], %[[temp_box_cast]], %{{.*}}, %{{.*}}){{.*}}: (!fir.ref>, !fir.ref>, !fir.ref, i32) -> none +! CHECK-DAG: fir.call @_FortranACopyOutAssign(%[[arg_box_addr]], %[[temp_box_cast]], %{{.*}}, %{{.*}}){{.*}}: (!fir.ref>, !fir.ref>, !fir.ref, i32) -> () call bar(x) end subroutine @@ -204,7 +204,7 @@ subroutine test_char(x) ! CHECK: fir.store %[[VAL_12]] to %[[VAL_2]] : !fir.ref>>> ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_2]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_0]] : (!fir.box>>) -> !fir.box -! CHECK: %[[VAL_18:.*]] = fir.call @_FortranAAssignTemporary(%[[VAL_15]], %[[VAL_16]], %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_15]], %[[VAL_16]], %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: fir.result %[[VAL_10]] : !fir.heap>> ! CHECK: } ! CHECK: %[[VAL_19:.*]] = arith.constant 0 : index @@ -222,7 +222,7 @@ subroutine test_char(x) ! CHECK: fir.store %[[VAL_0]] to %[[VAL_1]] : !fir.ref>>> ! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_32:.*]] = fir.convert %[[TMP_BOX_REF]] : (!fir.ref>>>>) -> !fir.ref> -! CHECK: %[[VAL_34:.*]] = fir.call @_FortranACopyOutAssign(%[[VAL_31]], %[[VAL_32]], %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.ref>, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranACopyOutAssign(%[[VAL_31]], %[[VAL_32]], %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.ref>, !fir.ref, i32) -> () ! CHECK: } character(10) :: x(:) diff --git a/flang/test/Lower/default-initialization.f90 b/flang/test/Lower/default-initialization.f90 index 7a6133452b3a2..a3a36d5a1c3de 100644 --- a/flang/test/Lower/default-initialization.f90 +++ b/flang/test/Lower/default-initialization.f90 @@ -24,7 +24,7 @@ subroutine local ! CHECK: %[[x:.*]] = fir.alloca !fir.type<_QMtest_dinitTt{i:i32}> ! CHECK: %[[xbox:.*]] = fir.embox %[[x]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[xboxNone:.*]] = fir.convert %[[xbox]] - ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () type(t) :: x print *, x%i end subroutine @@ -36,7 +36,7 @@ subroutine local_array() ! CHECK: %[[xshape:.*]] = fir.shape %c4{{.*}} : (index) -> !fir.shape<1> ! CHECK: %[[xbox:.*]] = fir.embox %[[x]](%[[xshape]]) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> ! CHECK: %[[xboxNone:.*]] = fir.convert %[[xbox]] - ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () type(t) :: x(4) print *, x(2)%i end subroutine @@ -48,7 +48,7 @@ subroutine local_alloc_comp ! CHECK: %[[x:.*]] = fir.alloca !fir.type<_QMtest_dinitTt_alloc_comp{i:!fir.box>>}> ! CHECK: %[[xbox:.*]] = fir.embox %[[x]] : (!fir.ref>>}>>) -> !fir.box>>}>> ! CHECK: %[[xboxNone:.*]] = fir.convert %[[xbox]] - ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () type(t_alloc_comp) :: x end subroutine @@ -58,7 +58,7 @@ function result() ! CHECK: %[[x:.*]] = fir.alloca !fir.type<_QMtest_dinitTt{i:i32}> ! CHECK: %[[xbox:.*]] = fir.embox %[[x]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[xboxNone:.*]] = fir.convert %[[xbox]] - ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () type(t) :: result end function @@ -68,7 +68,7 @@ function result() subroutine intent_out(x) ! CHECK: %[[xbox:.*]] = fir.embox %[[x]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[xboxNone:.*]] = fir.convert %[[xbox]] - ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () type(t), intent(out) :: x end subroutine @@ -81,7 +81,7 @@ subroutine intent_out_optional(x) ! CHECK: fir.if %[[isPresent]] { ! CHECK: %[[xbox:.*]] = fir.embox %[[x]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[xboxNone:.*]] = fir.convert %[[xbox]] - ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () ! CHECK: } type(t), intent(out), optional :: x end subroutine @@ -96,7 +96,7 @@ subroutine local_eq() ! CHECK: %[[x:.*]] = fir.convert %[[xcoor]] : (!fir.ref) -> !fir.ptr> ! CHECK: %[[xbox:.*]] = fir.embox %[[x]] : (!fir.ptr>) -> !fir.box> ! CHECK: %[[xboxNone:.*]] = fir.convert %[[xbox]] - ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () equivalence (x, zi) print *, i end subroutine @@ -114,14 +114,14 @@ subroutine local_eq2() ! CHECK: %[[x:.*]] = fir.convert %[[xcoor]] : (!fir.ref) -> !fir.ptr> ! CHECK: %[[xbox:.*]] = fir.embox %[[x]] : (!fir.ptr>) -> !fir.box> ! CHECK: %[[xboxNone:.*]] = fir.convert %[[xbox]] - ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[xboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[ycoor:.*]] = fir.coordinate_of %[[equiv]], %c0{{.*}} : (!fir.ref>, index) -> !fir.ref ! CHECK: %[[y:.*]] = fir.convert %[[ycoor]] : (!fir.ref) -> !fir.ptr> ! CHECK: %[[ybox:.*]] = fir.embox %[[y]] : (!fir.ptr>) -> !fir.box> ! CHECK: %[[yboxNone:.*]] = fir.convert %[[ybox]] - ! CHECK: fir.call @_FortranAInitialize(%[[yboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAInitialize(%[[yboxNone]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () equivalence (x, y) print *, y%i end subroutine diff --git a/flang/test/Lower/derived-assignments.f90 b/flang/test/Lower/derived-assignments.f90 index 4465a7eecc886..1048e6199451a 100644 --- a/flang/test/Lower/derived-assignments.f90 +++ b/flang/test/Lower/derived-assignments.f90 @@ -170,7 +170,7 @@ subroutine test_box_assign(t1, t2) ! CHECK: fir.store %[[t1Load]] to %[[tmpBox]] : !fir.ref>>> ! CHECK: %[[lhs:.*]] = fir.convert %[[tmpBox]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[rhs:.*]] = fir.convert %[[t2Load]] : (!fir.box>>) -> !fir.box - ! CHECK: fir.call @_FortranAAssign(%[[lhs]], %[[rhs]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAAssign(%[[lhs]], %[[rhs]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () t1 = t2 end subroutine @@ -190,7 +190,7 @@ subroutine test_alloc_comp(t1, t2) ! CHECK: fir.store %[[t1Box]] to %[[tmpBox]] : !fir.ref>> ! CHECK: %[[lhs:.*]] = fir.convert %[[tmpBox]] : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[rhs:.*]] = fir.convert %[[t2Box]] : (!fir.box>) -> !fir.box - ! CHECK: fir.call @_FortranAAssign(%[[lhs]], %[[rhs]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAAssign(%[[lhs]], %[[rhs]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () t1 = t2 end subroutine @@ -230,7 +230,7 @@ subroutine test_alloc_comp(t1, t2) ! ! cHECK: fir.store %[[t1Box]] to %[[tmpBox]] : !fir.ref>> ! ! cHECK: %[[lhs:.*]] = fir.convert %[[tmpBox]] : (!fir.ref>>) -> !fir.ref> ! ! cHECK: %[[rhs:.*]] = fir.convert %[[t2Box]] : (!fir.box>) -> !fir.box -! ! cHECK: fir.call @_FortranAAssign(%[[lhs]], %[[rhs]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! ! cHECK: fir.call @_FortranAAssign(%[[lhs]], %[[rhs]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! t1 = t2 ! end subroutine !end module diff --git a/flang/test/Lower/derived-type-finalization.f90 b/flang/test/Lower/derived-type-finalization.f90 index e7ade0d8145bb..b38fcd8ba5766 100644 --- a/flang/test/Lower/derived-type-finalization.f90 +++ b/flang/test/Lower/derived-type-finalization.f90 @@ -60,7 +60,7 @@ subroutine test_lhs_allocatable() ! CHECK: %[[EMBOX:.*]] = fir.embox %[[LHS]] : (!fir.ref>) -> !fir.box> ! CHECK: fir.store %[[EMBOX]] to %[[BOXREF]] : !fir.ref>> ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[BOXREF]] : (!fir.ref>>) -> !fir.ref> -! CHECK: %{{.*}} = fir.call @_FortranAAssign(%[[BOX_NONE]], {{.*}} +! CHECK: fir.call @_FortranAAssign(%[[BOX_NONE]], {{.*}} ! CHECK-LABEL: func.func @_QMderived_type_finalizationPtest_lhs_allocatable() { ! CHECK: %[[LHS:.*]] = fir.alloca !fir.box>> {bindc_name = "lhs", uniq_name = "_QMderived_type_finalizationFtest_lhs_allocatableElhs"} @@ -72,7 +72,7 @@ subroutine test_lhs_allocatable() ! CHECK: %[[IS_NULL:.*]] = arith.cmpi ne, %[[ADDR_I64]], %[[C0]] : i64 ! CHECK: fir.if %[[IS_NULL]] { ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[LHS]] : (!fir.ref>>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> () ! CHECK: } ! 7.5.6.3 point 2. Finalization on explicit deallocation. @@ -111,7 +111,7 @@ subroutine test_end_finalization() ! CHECK: %[[LOCAL_T:.*]] = fir.alloca !fir.type<_QMderived_type_finalizationTt1{a:i32}> {bindc_name = "t", uniq_name = "_QMderived_type_finalizationFtest_end_finalizationEt"} ! CHECK: %[[EMBOX:.*]] = fir.embox %[[LOCAL_T]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[EMBOX]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> () ! CHECK: return ! test with multiple return. @@ -139,7 +139,7 @@ subroutine test_end_finalization2(a) ! CHECK: ^bb3: ! CHECK: %[[EMBOX:.*]] = fir.embox %[[T]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[EMBOX]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> () ! CHECK: return ! CHECK: } @@ -159,7 +159,7 @@ subroutine test_fct_ref() ! CHECK: fir.save_result %[[CALL_RES]] to %[[RESULT]] : !fir.type<_QMderived_type_finalizationTt1{a:i32}>, !fir.ref> ! CHECK: %[[EMBOX:.*]] = fir.embox %[[RESULT]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[EMBOX]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> () ! CHECK: return subroutine test_finalize_intent_out(t) @@ -170,7 +170,7 @@ subroutine test_finalize_intent_out(t) ! CHECK-SAME: %[[T:.*]]: !fir.ref> {fir.bindc_name = "t"}) { ! CHECK: %[[EMBOX:.*]] = fir.embox %[[T]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[EMBOX]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}}: (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}}: (!fir.box) -> () ! CHECK: return function get_t1(i) @@ -189,7 +189,7 @@ subroutine test_nonpointer_function() ! CHECK: %[[RES:.*]] = fir.call @_QMderived_type_finalizationPget_t1(%{{.*}}) {{.*}} : (!fir.ref) -> !fir.box>> ! CHECK: fir.save_result %[[RES]] to %[[TMP]] : !fir.box>>, !fir.ref>>> ! CHECK: %{{.*}} = fir.call @_FortranAioOutputDerivedType -! CHECK-NOT: %{{.*}} = fir.call @_FortranADestroy +! CHECK-NOT: fir.call @_FortranADestroy ! CHECK: %{{.*}} = fir.call @_FortranAioEndIoStatement ! CHECK: return @@ -201,9 +201,9 @@ subroutine test_avoid_double_finalization(a) ! CHECK-LABEL: func.func @_QMderived_type_finalizationPtest_avoid_double_finalization( ! CHECK: fir.call @_FortranAInitialize( -! CHECK-NOT: %{{.*}} = fir.call @_FortranADestroy -! CHECK: %{{.*}} = fir.call @_FortranAAssign( -! CHECK: %{{.*}} = fir.call @_FortranADestroy( +! CHECK-NOT: fir.call @_FortranADestroy +! CHECK: fir.call @_FortranAAssign( +! CHECK: fir.call @_FortranADestroy( function no_func_ret_finalize() result(ty) type(t1) :: ty @@ -211,7 +211,7 @@ function no_func_ret_finalize() result(ty) end function ! CHECK-LABEL: func.func @_QMderived_type_finalizationPno_func_ret_finalize() -> !fir.type<_QMderived_type_finalizationTt1{a:i32}> { -! CHECK: %{{.*}} = fir.call @_FortranAAssign +! CHECK: fir.call @_FortranAAssign ! CHECK-NOT: fir.call @_FortranADestroy ! CHECK: return %{{.*}} : !fir.type<_QMderived_type_finalizationTt1{a:i32}> @@ -232,7 +232,7 @@ subroutine test_avoid_double_free() ! CHECK: fir.call @_FortranAAllocatableAllocateSource( ! CHECK-NOT: fir.freemem %{{.*}} : !fir.heap>> ! CHECK: %[[RES_CONV:.*]] = fir.convert %[[RES]] : (!fir.ref>>>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%[[RES_CONV]]) {{.*}} : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[RES_CONV]]) {{.*}} : (!fir.box) -> () subroutine t4_final(this) type(t4) :: this @@ -243,7 +243,7 @@ subroutine local_t4() end subroutine ! CHECK-LABEL: func.func @_QMderived_type_finalizationPlocal_t4() -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%2) fastmath : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%2) fastmath : (!fir.box) -> () end module diff --git a/flang/test/Lower/derived-type-temp.f90 b/flang/test/Lower/derived-type-temp.f90 index 18bcacf10753c..4f1d4f2d51cf5 100644 --- a/flang/test/Lower/derived-type-temp.f90 +++ b/flang/test/Lower/derived-type-temp.f90 @@ -16,4 +16,4 @@ program derived_temp_init ! CHECK: %[[temp:.*]] = fir.alloca !fir.type<_QFTt1{i:!fir.box>}> {bindc_name = "x", uniq_name = "_QFEx"} ! CHECK: %[[box:.*]] = fir.embox %[[temp]] : (!fir.ref>}>>) -> !fir.box>}>> ! CHECK: %[[box_none:.*]] = fir.convert %[[box]] : (!fir.box>}>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAInitialize(%[[box_none]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[box_none]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.box, !fir.ref, i32) -> () diff --git a/flang/test/Lower/do_concurrent_local_default_init.f90 b/flang/test/Lower/do_concurrent_local_default_init.f90 index 1766e0a104ff6..7652e4fcd0402 100644 --- a/flang/test/Lower/do_concurrent_local_default_init.f90 +++ b/flang/test/Lower/do_concurrent_local_default_init.f90 @@ -47,6 +47,6 @@ subroutine test_default_init() ! CHECK: %[[VAL_26:.*]] = fir.alloca !fir.type<_QFtest_default_initTt{i:i32}> {bindc_name = "a", pinned, uniq_name = "_QFtest_default_initEa"} ! CHECK: %[[VAL_27:.*]] = fir.embox %[[VAL_26]] : (!fir.ref>) -> !fir.box> ! CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_27]] : (!fir.box>) -> !fir.box -! CHECK: %[[VAL_32:.*]] = fir.call @_FortranAInitialize(%[[VAL_30]], {{.*}} +! CHECK: fir.call @_FortranAInitialize(%[[VAL_30]], {{.*}} ! CHECK: %[[VAL_33:.*]]:2 = hlfir.declare %[[VAL_26]] {uniq_name = "_QFtest_default_initEa"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) ! CHECK: } diff --git a/flang/test/Lower/fail_image.f90 b/flang/test/Lower/fail_image.f90 index a4708cbbf47cc..08be8e19402d2 100644 --- a/flang/test/Lower/fail_image.f90 +++ b/flang/test/Lower/fail_image.f90 @@ -7,7 +7,7 @@ subroutine fail_image_test(fail) ! CHECK: cond_br {{.*}}, ^[[BB1:.*]], ^[[BB2:.*]] ! CHECK: ^[[BB1]]: if (fail) then -! CHECK: {{.*}} = fir.call @_FortranAFailImageStatement() {{.*}}: () -> none +! CHECK: fir.call @_FortranAFailImageStatement() {{.*}}: ! CHECK-NEXT: fir.unreachable FAIL IMAGE end if @@ -17,4 +17,4 @@ subroutine fail_image_test(fail) ! CHECK-NEXT: return return end subroutine -! CHECK-LABEL: func private @_FortranAFailImageStatement() -> none attributes {fir.runtime} +! CHECK-LABEL: func private @_FortranAFailImageStatement() attributes {fir.runtime} diff --git a/flang/test/Lower/forall/forall-allocatable-2.f90 b/flang/test/Lower/forall/forall-allocatable-2.f90 index 95bd290f27350..f7c46acf87275 100644 --- a/flang/test/Lower/forall/forall-allocatable-2.f90 +++ b/flang/test/Lower/forall/forall-allocatable-2.f90 @@ -23,7 +23,7 @@ end subroutine forall_with_allocatable2 ! CHECK: %[[VAL_5:.*]] = arith.constant {{.*}} : i32 ! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_3]] : (!fir.box>>}>>) -> !fir.box ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_8:.*]] = fir.call @_FortranAInitialize(%[[VAL_6]], %[[VAL_7]], %[[VAL_5]]) {{.*}}: (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[VAL_6]], %[[VAL_7]], %[[VAL_5]]) {{.*}}: (!fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_9:.*]] = arith.constant 5 : i32 ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i32) -> index ! CHECK: %[[VAL_11:.*]] = arith.constant 15 : i32 diff --git a/flang/test/Lower/forall/forall-where.f90 b/flang/test/Lower/forall/forall-where.f90 index af309e63535fa..b1dd72fdfb4f2 100644 --- a/flang/test/Lower/forall/forall-where.f90 +++ b/flang/test/Lower/forall/forall-where.f90 @@ -380,6 +380,6 @@ end subroutine test_nested_forall_where ! CHECK: } ! CHECK: fir.array_merge_store %[[VAL_248]], %[[VAL_340:.*]] to %[[VAL_0]] : !fir.array}>>, !fir.array}>>, !fir.box}>>> ! CHECK: %[[VAL_341:.*]] = fir.convert %[[VAL_8]] : (!fir.ref>, !fir.heap>>>) -> !fir.llvm_ptr -! CHECK: %[[VAL_342:.*]] = fir.call @_FortranARaggedArrayDeallocate(%[[VAL_341]]) {{.*}}: (!fir.llvm_ptr) -> none +! CHECK: fir.call @_FortranARaggedArrayDeallocate(%[[VAL_341]]) {{.*}}: (!fir.llvm_ptr) -> () ! CHECK: return ! CHECK: } diff --git a/flang/test/Lower/goto-statement.f90 b/flang/test/Lower/goto-statement.f90 index f69ed6ba656a2..5591c09da9122 100644 --- a/flang/test/Lower/goto-statement.f90 +++ b/flang/test/Lower/goto-statement.f90 @@ -8,7 +8,7 @@ subroutine sub1() ! CHECK-LABEL: sub1 ! CHECK: cf.br ^[[BB1:.*]] ! CHECK: ^[[BB1]]: -! CHECK: {{.*}} fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) {{.*}}: (i32, i1, i1) -> none +! CHECK: {{.*}} fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) {{.*}}: (i32, i1, i1) -> () ! CHECK: } ! Test multiple goto statements @@ -25,7 +25,7 @@ subroutine sub2() ! CHECK: ^[[BB2]]: ! CHECK: cf.br ^[[BB3:.*]] ! CHECK: ^[[BB3]]: -! CHECK: {{.*}} fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) {{.*}}: (i32, i1, i1) -> none +! CHECK: {{.*}} fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) {{.*}}: (i32, i1, i1) -> () ! CHECK: } ! Test goto which branches to a previous label @@ -36,10 +36,10 @@ subroutine sub3() 3 goto 2 end subroutine ! CHECK: sub3 -! CHECK: {{.*}} fir.call @_FortranAPauseStatement() {{.*}}: () -> none +! CHECK: {{.*}} fir.call @_FortranAPauseStatement() {{.*}}: () -> () ! CHECK: cf.br ^[[BB2:.*]] ! CHECK: ^[[BB1:.*]]: // -! CHECK: {{.*}} fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) {{.*}}: (i32, i1, i1) -> none +! CHECK: {{.*}} fir.call @_FortranAStopStatement({{.*}}, {{.*}}, {{.*}}) {{.*}}: (i32, i1, i1) -> () ! CHECK: ^[[BB2]]: ! CHECK: cf.br ^[[BB1]] ! CHECK: } @@ -55,7 +55,7 @@ subroutine sub4() pause end subroutine ! CHECK-LABEL: sub4 -! CHECK: {{.*}} fir.call @_FortranAPauseStatement() {{.*}}: () -> none +! CHECK: {{.*}} fir.call @_FortranAPauseStatement() {{.*}}: () -> () ! CHECK-NEXT: cf.br ^[[BB1:.*]] ! CHECK-NEXT: ^[[BB1]]: ! CHECK-NEXT: cf.br ^[[BB2:.*]] diff --git a/flang/test/Lower/io-statement-big-unit-checks.f90 b/flang/test/Lower/io-statement-big-unit-checks.f90 index 471fe399aee01..2be658c1f76d2 100644 --- a/flang/test/Lower/io-statement-big-unit-checks.f90 +++ b/flang/test/Lower/io-statement-big-unit-checks.f90 @@ -177,7 +177,7 @@ subroutine open_8_error_recovery_1(n, ios) ! CHECK: %[[VAL_20:.*]] = arith.constant false ! CHECK: %[[VAL_21:.*]] = arith.constant false ! CHECK: %[[VAL_22:.*]] = arith.constant false -! CHECK: %[[VAL_23:.*]] = fir.call @_FortranAioEnableHandlers(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]]) {{.*}}: (!fir.ref, i1, i1, i1, i1, i1) -> none +! CHECK: fir.call @_FortranAioEnableHandlers(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]]) {{.*}}: (!fir.ref, i1, i1, i1, i1, i1) -> () ! CHECK: %[[VAL_24:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_17]]) {{.*}}: (!fir.ref) -> i32 ! CHECK: fir.result %[[VAL_24]] : i32 ! CHECK: } else { @@ -209,10 +209,10 @@ subroutine open_8_error_recovery_2(n, msg) ! CHECK: %[[VAL_21:.*]] = arith.constant false ! CHECK: %[[VAL_22:.*]] = arith.constant false ! CHECK: %[[VAL_23:.*]] = arith.constant true -! CHECK: %[[VAL_24:.*]] = fir.call @_FortranAioEnableHandlers(%[[VAL_18]], %[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]], %[[VAL_23]]) {{.*}}: (!fir.ref, i1, i1, i1, i1, i1) -> none +! CHECK: fir.call @_FortranAioEnableHandlers(%[[VAL_18]], %[[VAL_19]], %[[VAL_20]], %[[VAL_21]], %[[VAL_22]], %[[VAL_23]]) {{.*}}: (!fir.ref, i1, i1, i1, i1, i1) -> () ! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_2]]#0 : (!fir.ref>) -> !fir.ref ! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_2]]#1 : (index) -> i64 -! CHECK: %[[VAL_27:.*]] = fir.call @_FortranAioGetIoMsg(%[[VAL_18]], %[[VAL_25]], %[[VAL_26]]) {{.*}}: (!fir.ref, !fir.ref, i64) -> none +! CHECK: fir.call @_FortranAioGetIoMsg(%[[VAL_18]], %[[VAL_25]], %[[VAL_26]]) {{.*}}: (!fir.ref, !fir.ref, i64) -> () ! CHECK: %[[VAL_28:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_18]]) {{.*}}: (!fir.ref) -> i32 ! CHECK: fir.result %[[VAL_28]] : i32 ! CHECK: } else { diff --git a/flang/test/Lower/nested-where.f90 b/flang/test/Lower/nested-where.f90 index b1b6367174ebd..ab457280b80ce 100644 --- a/flang/test/Lower/nested-where.f90 +++ b/flang/test/Lower/nested-where.f90 @@ -310,9 +310,9 @@ program nested_where ! CHECK: } ! CHECK: fir.array_merge_store %[[VAL_35]], %[[VAL_277:.*]] to %[[VAL_5]] : !fir.array<3xi32>, !fir.array<3xi32>, !fir.ref> ! CHECK: %[[VAL_278:.*]] = fir.convert %[[VAL_3]] : (!fir.ref>, !fir.heap>>>) -> !fir.llvm_ptr - ! CHECK: %[[VAL_279:.*]] = fir.call @_FortranARaggedArrayDeallocate(%[[VAL_278]]) {{.*}}: (!fir.llvm_ptr) -> none + ! CHECK: fir.call @_FortranARaggedArrayDeallocate(%[[VAL_278]]) {{.*}}: (!fir.llvm_ptr) -> () ! CHECK: %[[VAL_280:.*]] = fir.convert %[[VAL_4]] : (!fir.ref>, !fir.heap>>>) -> !fir.llvm_ptr - ! CHECK: %[[VAL_281:.*]] = fir.call @_FortranARaggedArrayDeallocate(%[[VAL_280]]) {{.*}}: (!fir.llvm_ptr) -> none + ! CHECK: fir.call @_FortranARaggedArrayDeallocate(%[[VAL_280]]) {{.*}}: (!fir.llvm_ptr) -> () integer :: a(3) = 0 logical :: mask1(3) = (/ .true.,.false.,.true. /) diff --git a/flang/test/Lower/nullify-polymorphic.f90 b/flang/test/Lower/nullify-polymorphic.f90 index 5cb966810f1b9..99470ad48d272 100644 --- a/flang/test/Lower/nullify-polymorphic.f90 +++ b/flang/test/Lower/nullify-polymorphic.f90 @@ -50,4 +50,4 @@ program test ! CHECK: %[[TYPE_DESC_CAST:.*]] = fir.convert %[[DECLARED_TYPE_DESC]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[RANK:.*]] = arith.constant 0 : i32 ! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyDerived(%[[C_DESC_CAST]], %[[TYPE_DESC_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[C_DESC_CAST]], %[[TYPE_DESC_CAST]], %[[RANK]], %[[CORANK]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () diff --git a/flang/test/Lower/optional-value-caller.f90 b/flang/test/Lower/optional-value-caller.f90 index 31bf326dd1df1..d3ad5cf85e6b9 100644 --- a/flang/test/Lower/optional-value-caller.f90 +++ b/flang/test/Lower/optional-value-caller.f90 @@ -333,7 +333,7 @@ subroutine test_array_ptr(i) ! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_7]] : (!fir.box>>) -> !fir.box ! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_18]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_23:.*]] = fir.call @_FortranAAssignTemporary(%[[VAL_20]], %[[VAL_21]], %[[VAL_22]], %[[VAL_19]]) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_20]], %[[VAL_21]], %[[VAL_22]], %[[VAL_19]]) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: fir.result %[[VAL_15]] : !fir.heap> ! CHECK: } ! CHECK: fir.result %[[VAL_24:.*]] : !fir.heap> @@ -440,7 +440,7 @@ subroutine test_char_array(c) ! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_9]] : (!fir.box>>) -> !fir.box ! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_21]] : (!fir.ref>) -> !fir.ref -! CHECK: %[[VAL_26:.*]] = fir.call @_FortranAAssignTemporary(%[[VAL_23]], %[[VAL_24]], %[[VAL_25]], %[[VAL_22]]) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssignTemporary(%[[VAL_23]], %[[VAL_24]], %[[VAL_25]], %[[VAL_22]]) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: fir.result %[[VAL_18]] : !fir.heap>> ! CHECK: } ! CHECK: fir.result %[[VAL_27:.*]] : !fir.heap>> diff --git a/flang/test/Lower/parent-component.f90 b/flang/test/Lower/parent-component.f90 index c6bc53340643f..3cb23f277c9a3 100644 --- a/flang/test/Lower/parent-component.f90 +++ b/flang/test/Lower/parent-component.f90 @@ -192,6 +192,6 @@ subroutine parent_comp_lhs() ! CHECK: fir.store %[[EMBOX_A]] to %[[BOX]] : !fir.ref>> ! CHECK: %[[A_NONE:.*]] = fir.convert %[[BOX]] : (!fir.ref>>) -> !fir.ref> ! CHECK: %[[B_NONE:.*]] = fir.convert %[[EMBOX_B]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAssign(%[[A_NONE]], %[[B_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssign(%[[A_NONE]], %[[B_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> () end diff --git a/flang/test/Lower/pointer-association-polymorphic.f90 b/flang/test/Lower/pointer-association-polymorphic.f90 index 6c56db892d1b8..7d166e1423cfa 100644 --- a/flang/test/Lower/pointer-association-polymorphic.f90 +++ b/flang/test/Lower/pointer-association-polymorphic.f90 @@ -87,7 +87,7 @@ subroutine test_pointer() ! CHECK: %[[C1_DESC_LOAD:.*]] = fir.load %[[C1_DESC]] : !fir.ref>>> ! CHECK: %[[P_CONV:.*]] = fir.convert %[[P_DESC]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[C1_DESC_CONV:.*]] = fir.convert %[[C1_DESC_LOAD]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociate(%[[P_CONV]], %[[C1_DESC_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociate(%[[P_CONV]], %[[C1_DESC_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> () ! CHECK: %[[P_DESC_LOAD:.*]] = fir.load %[[P_DESC]] : !fir.ref>>> ! CHECK: %[[P_REBOX:.*]] = fir.rebox %[[P_DESC_LOAD]] : (!fir.class>>) -> !fir.class> ! CHECK: fir.dispatch "proc"(%[[P_DESC_LOAD]] : !fir.class>>) (%[[P_REBOX]] : !fir.class>) {pass_arg_pos = 0 : i32} @@ -95,7 +95,7 @@ subroutine test_pointer() ! CHECK: %[[C2_DESC_LOAD:.*]] = fir.load %[[C2_DESC]] : !fir.ref>>> ! CHECK: %[[P_CONV:.*]] = fir.convert %[[P_DESC]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[C2_DESC_CONV:.*]] = fir.convert %[[C2_DESC_LOAD]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociate(%[[P_CONV]], %[[C2_DESC_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociate(%[[P_CONV]], %[[C2_DESC_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> () ! CHECK: %[[P_DESC_LOAD:.*]] = fir.load %[[P_DESC]] : !fir.ref>>> ! CHECK: %[[P_REBOX:.*]] = fir.rebox %[[P_DESC_LOAD]] : (!fir.class>>) -> !fir.class> ! CHECK: fir.dispatch "proc"(%[[P_DESC_LOAD]] : !fir.class>>) (%[[P_REBOX]] : !fir.class>) {pass_arg_pos = 0 : i32} @@ -110,7 +110,7 @@ subroutine test_pointer() ! CHECK: %[[C3_EMBOX:.*]] = fir.embox %[[C3_COORD]] source_box %[[C3_LOAD]] : (!fir.ref>, !fir.class>>>) -> !fir.class> ! CHECK: %[[P_CONV:.*]] = fir.convert %[[P_DESC]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[C3_EMBOX_CONV:.*]] = fir.convert %[[C3_EMBOX]] : (!fir.class>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociate(%[[P_CONV]], %[[C3_EMBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociate(%[[P_CONV]], %[[C3_EMBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> () ! CHECK: %[[P_DESC_LOAD:.*]] = fir.load %[[P_DESC]] : !fir.ref>>> ! CHECK: %[[P_REBOX:.*]] = fir.rebox %[[P_DESC_LOAD]] : (!fir.class>>) -> !fir.class> ! CHECK: fir.dispatch "proc"(%[[P_DESC_LOAD]] : !fir.class>>) (%[[P_REBOX]] : !fir.class>) {pass_arg_pos = 0 : i32} @@ -125,7 +125,7 @@ subroutine test_pointer() ! CHECK: %[[C4_EMBOX:.*]] = fir.embox %[[C4_COORD]] source_box %[[C4_LOAD]] : (!fir.ref>, !fir.class>>>) -> !fir.class> ! CHECK: %[[P_CONV:.*]] = fir.convert %[[P_DESC]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[C4_EMBOX_CONV:.*]] = fir.convert %[[C4_EMBOX]] : (!fir.class>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociate(%[[P_CONV]], %[[C4_EMBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociate(%[[P_CONV]], %[[C4_EMBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> () ! CHECK: %[[P_DESC_LOAD:.*]] = fir.load %[[P_DESC]] : !fir.ref>>> ! CHECK: %[[P_REBOX:.*]] = fir.rebox %[[P_DESC_LOAD]] : (!fir.class>>) -> !fir.class> ! CHECK: fir.dispatch "proc"(%[[P_DESC_LOAD]] : !fir.class>>) (%[[P_REBOX]] : !fir.class>) {pass_arg_pos = 0 : i32} @@ -134,7 +134,7 @@ subroutine test_pointer() ! CHECK: %[[C3_REBOX:.*]] = fir.rebox %[[C3_LOAD]](%{{.*}}) : (!fir.class>>>, !fir.shift<1>) -> !fir.class>> ! CHECK: %[[PA_CONV:.*]] = fir.convert %[[PA_DESC]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[C3_REBOX_CONV:.*]] = fir.convert %[[C3_REBOX]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociate(%[[PA_CONV]], %[[C3_REBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociate(%[[PA_CONV]], %[[C3_REBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> () ! CHECK-LABEL: fir.do_loop ! CHECK: %[[PA_LOAD:.*]] = fir.load %[[PA_DESC]] : !fir.ref>>>> ! CHECK: %[[PA_COORD:.*]] = fir.coordinate_of %[[PA_LOAD]], %{{.*}} : (!fir.class>>>, i64) -> !fir.ref> @@ -145,7 +145,7 @@ subroutine test_pointer() ! CHECK: %[[C4_REBOX:.*]] = fir.rebox %[[C4_LOAD]](%{{.*}}) : (!fir.class>>>, !fir.shift<1>) -> !fir.class>> ! CHECK: %[[PA_CONV:.*]] = fir.convert %[[PA_DESC]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[C4_REBOX_CONV:.*]] = fir.convert %[[C4_REBOX]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociate(%[[PA_CONV]], %[[C4_REBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociate(%[[PA_CONV]], %[[C4_REBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> () ! CHECK-LABEL: fir.do_loop ! CHECK: %[[PA_LOAD:.*]] = fir.load %[[PA_DESC]] : !fir.ref>>>> ! CHECK: %[[PA_COORD:.*]] = fir.coordinate_of %[[PA_LOAD]], %{{.*}} : (!fir.class>>>, i64) -> !fir.ref> @@ -166,7 +166,7 @@ subroutine test_pointer() ! CHECK: %[[SLICE_REBOX:.*]] = fir.rebox %[[C4_LOAD]](%[[SHIFT]]) [%[[SLICE]]] : (!fir.class>>>, !fir.shift<1>, !fir.slice<1>) -> !fir.class>> ! CHECK: %[[PA_CONV:.*]] = fir.convert %[[PA_DESC]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[SLICE_REBOX_CONV:.*]] = fir.convert %[[SLICE_REBOX]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociate(%[[PA_CONV]], %[[SLICE_REBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociate(%[[PA_CONV]], %[[SLICE_REBOX_CONV]]) {{.*}} : (!fir.ref>, !fir.box) -> () ! CHECK-LABEL: fir.do_loop ! CHECK: %[[PA_LOAD:.*]] = fir.load %[[PA_DESC]] : !fir.ref>>>> ! CHECK: %[[PA_COORD:.*]] = fir.coordinate_of %[[PA_LOAD]], %{{.*}} : (!fir.class>>>, i64) -> !fir.ref> diff --git a/flang/test/Lower/pointer-disassociate.f90 b/flang/test/Lower/pointer-disassociate.f90 index e341bca5cd89b..fb70fd7795b2e 100644 --- a/flang/test/Lower/pointer-disassociate.f90 +++ b/flang/test/Lower/pointer-disassociate.f90 @@ -118,7 +118,7 @@ subroutine test_polymorphic_null(p) ! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (!fir.tdesc>) -> !fir.ref ! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_5:.*]] = arith.constant 0 : i32 -! CHECK: %[[VAL_6:.*]] = fir.call @_FortranAPointerNullifyDerived(%[[VAL_2]], %[[VAL_3]], %[[VAL_4]], %[[VAL_5]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[VAL_2]], %[[VAL_3]], %[[VAL_4]], %[[VAL_5]]) {{.*}}: (!fir.ref>, !fir.ref, i32, i32) -> () subroutine test_unlimited_polymorphic_null(p) class(*), pointer :: p(:) diff --git a/flang/test/Lower/polymorphic-temp.f90 b/flang/test/Lower/polymorphic-temp.f90 index 8633620e8430e..5e2937e1f5f65 100644 --- a/flang/test/Lower/polymorphic-temp.f90 +++ b/flang/test/Lower/polymorphic-temp.f90 @@ -46,7 +46,7 @@ subroutine test_temp_from_intrinsic_spread() ! CHECK: %[[RES_BOX_NONE:.*]] = fir.convert %[[TEMP_RES0]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[P_BOX_NONE:.*]] = fir.convert %[[LOAD_P]] : (!fir.class>) -> !fir.box ! CHECK: %[[C2_I64:.*]] = fir.convert %[[C2]] : (i32) -> i64 -! CHECK: %{{.*}} = fir.call @_FortranASpread(%[[RES_BOX_NONE]], %[[P_BOX_NONE]], %[[C1]], %[[C2_I64]], %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranASpread(%[[RES_BOX_NONE]], %[[P_BOX_NONE]], %[[C1]], %[[C2_I64]], %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> () ! CHECK: %[[LOAD_RES:.*]] = fir.load %[[TEMP_RES0]] : !fir.ref>>> ! CHECK: %[[RES_ADDR:.*]] = fir.box_addr %[[LOAD_RES]] : (!fir.class>>) -> !fir.heap> ! CHECK: %[[REBOX:.*]] = fir.rebox %[[LOAD_RES]] : (!fir.class>>) -> !fir.class> @@ -57,7 +57,7 @@ subroutine test_temp_from_intrinsic_spread() ! CHECK: %[[EMBOX_PA_1:.*]] = fir.embox %[[COORD_PA_1]] source_box %[[LOAD_PA]] : (!fir.ref, !fir.class>>) -> !fir.class ! CHECK: %[[RES1_BOX_NONE:.*]] = fir.convert %[[TEMP_RES1]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[PA1_BOX_NONE:.*]] = fir.convert %[[EMBOX_PA_1]] : (!fir.class) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranASpread(%[[RES1_BOX_NONE]], %[[PA1_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranASpread(%[[RES1_BOX_NONE]], %[[PA1_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i32, i64, !fir.ref, i32) -> () subroutine test_temp_from_intrinsic_reshape(i) class(*), allocatable :: a(:,:) @@ -73,12 +73,12 @@ subroutine test_temp_from_intrinsic_reshape(i) ! CHECK: fir.store %[[EMBOX_WITH_SOURCE]] to %[[TMP_RES]] : !fir.ref>>> ! CHECK: %[[RES_BOX_NONE:.*]] = fir.convert %[[TMP_RES]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[I_BOX_NONE:.*]] = fir.convert %[[I]] : (!fir.class>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAReshape(%[[RES_BOX_NONE]], %[[I_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAReshape(%[[RES_BOX_NONE]], %[[I_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[LOAD_RES:.*]] = fir.load %[[TMP_RES]] : !fir.ref>>> ! CHECK: %[[RANK:.*]] = arith.constant 2 : i32 ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[RES_BOX_NONE:.*]] = fir.convert %[[LOAD_RES]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[RES_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> none +! CHECK: fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[RES_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref>, !fir.box, i32) -> () subroutine check(r) class(p1) :: r(:) @@ -100,7 +100,7 @@ subroutine test_temp_from_intrinsic_pack(i, mask) ! CHECK: %[[RES_BOX_NONE:.*]] = fir.convert %[[TMP_RES]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[I_BOX_NONE:.*]] = fir.convert %[[I]] : (!fir.class>>) -> !fir.box ! CHECK: %[[MASK_BOX_NONE:.*]] = fir.convert %[[EMBOXED_MASK]] : (!fir.box>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPack(%[[RES_BOX_NONE]], %[[I_BOX_NONE]], %[[MASK_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAPack(%[[RES_BOX_NONE]], %[[I_BOX_NONE]], %[[MASK_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () subroutine check_rank2(r) class(p1), intent(in) :: r(:,:) @@ -119,7 +119,7 @@ subroutine test_temp_from_unpack(v, m, f) ! CHECK: %[[V_BOX_NONE:.*]] = fir.convert %[[V]] : (!fir.class>>) -> !fir.box ! CHECK: %[[M_BOX_NONE:.*]] = fir.convert %[[M]] : (!fir.box>>) -> !fir.box ! CHECK: %[[F_BOX_NONE:.*]] = fir.convert %[[F]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAUnpack(%[[TMP_BOX_NONE]], %[[V_BOX_NONE]], %[[M_BOX_NONE]], %[[F_BOX_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAUnpack(%[[TMP_BOX_NONE]], %[[V_BOX_NONE]], %[[M_BOX_NONE]], %[[F_BOX_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () subroutine check_cshift(r) class(p1) :: r(:) @@ -139,7 +139,7 @@ subroutine test_temp_from_intrinsic_cshift(a, shift) ! CHECK: %[[RES_BOX_NONE:.*]] = fir.convert %[[TMP_RES]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[ARRAY_NONE:.*]] = fir.convert %[[ARRAY]] : (!fir.class>>) -> !fir.box ! CHECK: %[[SHIFT_I64:.*]] = fir.convert %[[LOAD_SHIFT]] : (i32) -> i64 -! CHECK: %{{.*}} = fir.call @_FortranACshiftVector(%[[RES_BOX_NONE]], %[[ARRAY_NONE]], %[[SHIFT_I64]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranACshiftVector(%[[RES_BOX_NONE]], %[[ARRAY_NONE]], %[[SHIFT_I64]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> () subroutine test_temp_from_intrinsic_eoshift(a, shift, b) class(p1), intent(in) :: a(20) @@ -157,7 +157,7 @@ subroutine test_temp_from_intrinsic_eoshift(a, shift, b) ! CHECK: %[[ARRAY_NONE:.*]] = fir.convert %[[ARRAY]] : (!fir.class>>) -> !fir.box ! CHECK: %[[SHIFT_I64:.*]] = fir.convert %[[LOAD_SHIFT]] : (i32) -> i64 ! CHECK: %[[BOUNDARY_NONE:.*]] = fir.convert %[[BOUNDARY]] : (!fir.class>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAEoshiftVector(%[[RES_BOX_NONE]], %[[ARRAY_NONE]], %[[SHIFT_I64]], %[[BOUNDARY_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i64, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAEoshiftVector(%[[RES_BOX_NONE]], %[[ARRAY_NONE]], %[[SHIFT_I64]], %[[BOUNDARY_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, i64, !fir.box, !fir.ref, i32) -> () subroutine test_temp_from_intrinsic_transfer(source, mold) class(p1), intent(in) :: source(:) @@ -171,7 +171,7 @@ subroutine test_temp_from_intrinsic_transfer(source, mold) ! CHECK: %[[RES_BOX_NONE:.*]] = fir.convert %[[TMP_RES]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[SOURCE_NONE:.*]] = fir.convert %[[SOURCE]] : (!fir.class>>) -> !fir.box ! CHECK: %[[MOLD_NONE:.*]] = fir.convert %[[MOLD]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranATransfer(%[[RES_BOX_NONE]], %[[SOURCE_NONE]], %[[MOLD_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranATransfer(%[[RES_BOX_NONE]], %[[SOURCE_NONE]], %[[MOLD_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () subroutine test_temp_from_intrinsic_transpose(matrix) class(p1), intent(in) :: matrix(:,:) @@ -183,7 +183,7 @@ subroutine test_temp_from_intrinsic_transpose(matrix) ! CHECK: %[[TMP_RES:.*]] = fir.alloca !fir.class>>> ! CHECK: %[[RES_BOX_NONE:.*]] = fir.convert %[[TMP_RES]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[MATRIX_NONE:.*]] = fir.convert %[[MATRIX]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranATranspose(%[[RES_BOX_NONE]], %[[MATRIX_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranATranspose(%[[RES_BOX_NONE]], %[[MATRIX_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> () subroutine check_scalar(a) class(p1), intent(in) :: a diff --git a/flang/test/Lower/polymorphic.f90 b/flang/test/Lower/polymorphic.f90 index 8c40c91bc3baa..73603d7ee7bee 100644 --- a/flang/test/Lower/polymorphic.f90 +++ b/flang/test/Lower/polymorphic.f90 @@ -211,7 +211,7 @@ subroutine associate_up_pointer(r) ! CHECK: %[[REBOX_RP:.*]] = fir.rebox %[[LOAD_RP]](%{{.*}}) : (!fir.box>>, !fir.shift<1>) -> !fir.box> ! CHECK: %[[CONV_P:.*]] = fir.convert %[[P]] : (!fir.ref>>>) -> !fir.ref> ! CHECK: %[[RP_BOX_NONE:.*]] = fir.convert %[[REBOX_RP]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociate(%[[CONV_P]], %[[RP_BOX_NONE]]) {{.*}} : (!fir.ref>, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociate(%[[CONV_P]], %[[RP_BOX_NONE]]) {{.*}} : (!fir.ref>, !fir.box) -> () ! CHECK: return ! Test that the fir.dispatch operation is created with the correct pass object @@ -315,7 +315,7 @@ subroutine nullify_pointer_array(a) ! CHECK: %[[CONV_TDESC:.*]] = fir.convert %[[TYPE_DESC]] : (!fir.tdesc>>>}>>) -> !fir.ref ! CHECK: %[[C1:.*]] = arith.constant 1 : i32 ! CHECK: %[[C0:.*]] = arith.constant 0 : i32 -! CHECK: %{{.*}} = fir.call @_FortranAPointerNullifyDerived(%[[CONV_P]], %[[CONV_TDESC]], %[[C1]], %[[C0]]) {{.*}} : (!fir.ref>, !fir.ref, i32, i32) -> none +! CHECK: fir.call @_FortranAPointerNullifyDerived(%[[CONV_P]], %[[CONV_TDESC]], %[[C1]], %[[C0]]) {{.*}} : (!fir.ref>, !fir.ref, i32, i32) -> () subroutine up_input(a) class(*), intent(in) :: a @@ -400,7 +400,7 @@ subroutine assign_polymorphic_allocatable() ! CHECK: %[[BOXED_T:.*]] = fir.embox %[[T]](%[[SHAPE]]) : (!fir.ref>>, !fir.shape<2>) -> !fir.box>> ! CHECK: %[[CONV_C:.*]] = fir.convert %[[C]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[CONV_BOXED_T:.*]] = fir.convert %[[BOXED_T]] : (!fir.box>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAssignPolymorphic(%[[CONV_C]], %[[CONV_BOXED_T]], %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssignPolymorphic(%[[CONV_C]], %[[CONV_BOXED_T]], %{{.*}}, %{{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: return subroutine pointer_assign_remap() @@ -436,7 +436,7 @@ subroutine pointer_assign_remap() ! CHECK: %[[ARG0:.*]] = fir.convert %[[P]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[ARG1:.*]] = fir.convert %[[REBOX_A]] : (!fir.class>>) -> !fir.box ! CHECK: %[[ARG2:.*]] = fir.convert %[[BOXED_BOUND_ARRAY]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociateRemapping(%[[ARG0]], %[[ARG1]], %[[ARG2]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAPointerAssociateRemapping(%[[ARG0]], %[[ARG1]], %[[ARG2]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[C0:.*]] = arith.constant 0 : i64 ! CHECK: %[[C99:.*]] = arith.constant 99 : i64 @@ -454,7 +454,7 @@ subroutine pointer_assign_remap() ! CHECK: %[[ARG0:.*]] = fir.convert %[[Q]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[ARG1:.*]] = fir.convert %[[REBOX_A]] : (!fir.class>>) -> !fir.box ! CHECK: %[[ARG2:.*]] = fir.convert %[[BOXED_BOUND_ARRAY]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociateRemapping(%[[ARG0]], %[[ARG1]], %[[ARG2]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAPointerAssociateRemapping(%[[ARG0]], %[[ARG1]], %[[ARG2]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.box, !fir.ref, i32) -> () subroutine pointer_assign_lower_bounds() class(p1), allocatable, target :: a(:) @@ -467,7 +467,7 @@ subroutine pointer_assign_lower_bounds() ! CHECK: %[[A:.*]] = fir.alloca !fir.class>>> {bindc_name = "a", fir.target, uniq_name = "_QMpolymorphic_testFpointer_assign_lower_boundsEa"} ! CHECK: %[[P:.*]] = fir.alloca !fir.class>>> {bindc_name = "p", uniq_name = "_QMpolymorphic_testFpointer_assign_lower_boundsEp"} ! CHECK: %[[LB:.*]] = arith.constant -50 : i64 -! CHECK: %[[REBOX_A:.*]] = fir.rebox %21(%23) : (!fir.class>>>, !fir.shift<1>) -> !fir.class>> +! CHECK: %[[REBOX_A:.*]] = fir.rebox %{{.*}}(%{{.*}}) : (!fir.class>>>, !fir.shift<1>) -> !fir.class>> ! CHECK: %[[LBOUND_ARRAY:.*]] = fir.alloca !fir.array<1xi64> ! CHECK: %[[ARRAY:.*]] = fir.undefined !fir.array<1xi64> ! CHECK: %[[ARRAY0:.*]] = fir.insert_value %[[ARRAY]], %[[LB]], [0 : index] : (!fir.array<1xi64>, i64) -> !fir.array<1xi64> @@ -478,7 +478,7 @@ subroutine pointer_assign_lower_bounds() ! CHECK: %[[P_BOX_NONE:.*]] = fir.convert %[[P]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[REBOX_A]] : (!fir.class>>) -> !fir.box ! CHECK: %[[LBOUNDS_BOX_NONE:.*]] = fir.convert %[[LBOUND_ARRAY_BOXED]] : (!fir.box>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAPointerAssociateLowerBounds(%[[P_BOX_NONE]], %[[A_BOX_NONE]], %[[LBOUNDS_BOX_NONE]]) {{.*}} : (!fir.ref>, !fir.box, !fir.box) -> none +! CHECK: fir.call @_FortranAPointerAssociateLowerBounds(%[[P_BOX_NONE]], %[[A_BOX_NONE]], %[[LBOUNDS_BOX_NONE]]) {{.*}} : (!fir.ref>, !fir.box, !fir.box) -> () subroutine test_elemental_assign() type(p1) :: pa(3) @@ -501,7 +501,7 @@ subroutine test_elemental_assign() ! CHECK: %[[DO_RES:.*]] = fir.do_loop %[[ARG0:.*]] = %[[C0]] to %[[UB]] step %[[C1]] unordered iter_args(%[[ARG1:.*]] = %[[LOAD_PA]]) -> (!fir.array<3x!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>) { ! CHECK: %[[FETCH_INT:.*]] = fir.array_fetch %[[LOAD_INT_ARRAY]], %[[ARG0]] : (!fir.array<3xi32>, index) -> i32 ! CHECK: %[[ARRAY_MOD:.*]]:2 = fir.array_modify %[[ARG1]], %[[ARG0]] : (!fir.array<3x!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>, index) -> (!fir.ref>, !fir.array<3x!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>) -! CHECK: %[[EMBOXED:.*]] = fir.embox %10#0 : (!fir.ref>) -> !fir.class> +! CHECK: %[[EMBOXED:.*]] = fir.embox %{{.*}}#0 : (!fir.ref>) -> !fir.class> ! CHECK: fir.store %[[FETCH_INT]] to %[[INT]] : !fir.ref ! CHECK: fir.call @_QMpolymorphic_testPassign_p1_int(%[[EMBOXED]], %[[INT]]) fastmath : (!fir.class>, !fir.ref) -> () ! CHECK: fir.result %[[ARRAY_MOD]]#1 : !fir.array<3x!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>> @@ -791,9 +791,9 @@ subroutine test_unlimited_polymorphic_intentout(a) ! CHECK-LABEL: func.func @_QMpolymorphic_testPtest_unlimited_polymorphic_intentout( ! CHECK-SAME: %[[ARG0:.*]]: !fir.class {fir.bindc_name = "a"}) { ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[ARG0]] : (!fir.class) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> () ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[ARG0]] : (!fir.class) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAInitialize(%[[BOX_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[BOX_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.box, !fir.ref, i32) -> () subroutine test_polymorphic_intentout(a) class(p1), intent(out) :: a @@ -802,9 +802,9 @@ subroutine test_polymorphic_intentout(a) ! CHECK-LABEL: func.func @_QMpolymorphic_testPtest_polymorphic_intentout( ! CHECK-SAME: %[[ARG0:.*]]: !fir.class> {fir.bindc_name = "a"}) { ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[ARG0]] : (!fir.class>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> none +! CHECK: fir.call @_FortranADestroy(%[[BOX_NONE]]) {{.*}} : (!fir.box) -> () ! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[ARG0]] : (!fir.class>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAInitialize(%[[BOX_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAInitialize(%[[BOX_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.box, !fir.ref, i32) -> () subroutine rebox_up_to_record_type(p) class(*), allocatable, target :: p(:,:) @@ -944,7 +944,7 @@ subroutine test_rhs_assign(a) ! CHECK: %[[LOAD_RES:.*]] = fir.load %[[RES]] : !fir.ref>>> ! CHECK: %[[A_NONE:.*]] = fir.convert %[[A]] : (!fir.box>) -> !fir.ref> ! CHECK: %[[RES_NONE:.*]] = fir.convert %[[LOAD_RES]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAssign(%[[A_NONE]], %[[RES_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssign(%[[A_NONE]], %[[RES_NONE]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> () subroutine type_with_polymorphic_components(a, b) type(p4) :: a, b @@ -959,7 +959,7 @@ subroutine type_with_polymorphic_components(a, b) ! CHECK: fir.store %[[EMBOX_A]] to %[[ALLOCA]] : !fir.ref>>>}>>> ! CHECK: %[[BOX_NONE1:.*]] = fir.convert %[[ALLOCA]] : (!fir.ref>>>}>>>) -> !fir.ref> ! CHECK: %[[BOX_NONE2:.*]] = fir.convert %[[EMBOX_B]] : (!fir.box>>>}>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAssign(%[[BOX_NONE1]], %[[BOX_NONE2]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssign(%[[BOX_NONE1]], %[[BOX_NONE2]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> () subroutine up_pointer(p) class(*), pointer, intent(in) :: p @@ -1015,7 +1015,7 @@ subroutine test_parent_comp_in_select_type(s) ! CHECK: %[[LOAD_P:.*]] = fir.load %[[P]] : !fir.ref>>> ! CHECK: %[[LHS_CONV:.*]] = fir.convert %[[REBOX_P1]] : (!fir.box>) -> !fir.ref> ! CHECK: %[[RHS_CONV:.*]] = fir.convert %[[LOAD_P]] : (!fir.class>>) -> !fir.box -! CHECK: %{{.*}} = fir.call @_FortranAAssign(%[[LHS_CONV]], %[[RHS_CONV]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssign(%[[LHS_CONV]], %[[RHS_CONV]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref>, !fir.box, !fir.ref, i32) -> () subroutine move_alloc_unlimited_poly(a, b) class(*), allocatable :: a, b @@ -1158,7 +1158,7 @@ program test ! CHECK: %[[O:.*]] = fir.load %[[ADDR_O]] : !fir.ref}>>>> ! CHECK: %[[FIELD_INNER:.*]] = fir.field_index inner, !fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}> ! CHECK: %[[COORD_INNER:.*]] = fir.coordinate_of %[[O]], %[[FIELD_INNER]] : (!fir.box}>>>, !fir.field) -> !fir.ref> -! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} unordered iter_args(%arg1 = %9) -> (!fir.array<5x!fir.logical<4>>) { +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} unordered iter_args(%arg1 = %{{.*}}) -> (!fir.array<5x!fir.logical<4>>) { ! CHECK: %[[EMBOXED:.*]] = fir.embox %[[COORD_INNER]] : (!fir.ref>) -> !fir.class> -! CHECK: %{{.*}} = fir.call @_QMpolymorphic_testPlt(%17, %[[EMBOXED]]) {{.*}} : (!fir.ref, !fir.class>) -> !fir.logical<4> +! CHECK: %{{.*}} = fir.call @_QMpolymorphic_testPlt(%{{.*}}, %[[EMBOXED]]) {{.*}} : (!fir.ref, !fir.class>) -> !fir.logical<4> ! CHECK: } diff --git a/flang/test/Lower/select-type-2.fir b/flang/test/Lower/select-type-2.fir index fbc1eb07bbd2e..87b42611752f9 100644 --- a/flang/test/Lower/select-type-2.fir +++ b/flang/test/Lower/select-type-2.fir @@ -63,7 +63,7 @@ // CHECK: %[[VAL_19:.*]] = fir.address_of(@_QQclX6661696C2074797065) : !fir.ref> // CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_19]] : (!fir.ref>) -> !fir.ref // CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_2]] : (index) -> i64 -// CHECK: %[[VAL_22:.*]] = fir.call @_FortranAStopStatementText(%[[VAL_20]], %[[VAL_21]], %[[VAL_1]], %[[VAL_1]]) fastmath : (!fir.ref, i64, i1, i1) -> none +// CHECK: fir.call @_FortranAStopStatementText(%[[VAL_20]], %[[VAL_21]], %[[VAL_1]], %[[VAL_1]]) fastmath : (!fir.ref, i64, i1, i1) -> () // CHECK: fir.unreachable // CHECK: ^bb4: // CHECK: cf.br ^bb3(%[[VAL_13]] : !fir.class>) @@ -96,7 +96,7 @@ func.func @_QPtest() { %9 = fir.address_of(@_QQclX6661696C2074797065) : !fir.ref> %10 = fir.convert %9 : (!fir.ref>) -> !fir.ref %11 = fir.convert %c9 : (index) -> i64 - %12 = fir.call @_FortranAStopStatementText(%10, %11, %false, %false) fastmath : (!fir.ref, i64, i1, i1) -> none + fir.call @_FortranAStopStatementText(%10, %11, %false, %false) fastmath : (!fir.ref, i64, i1, i1) -> () fir.unreachable ^bb3: // pred: ^bb1 %13 = fir.box_addr %6 : (!fir.class>) -> !fir.ptr diff --git a/flang/test/Lower/stop-statement.f90 b/flang/test/Lower/stop-statement.f90 index cf0665cf5dbd1..0cbb01dd8a742 100644 --- a/flang/test/Lower/stop-statement.f90 +++ b/flang/test/Lower/stop-statement.f90 @@ -75,5 +75,5 @@ subroutine stop_char_lit stop 'crash' end subroutine stop_char_lit -! CHECK-DAG: func private @_Fortran{{.*}}StopStatement(i32, i1, i1) -> none -! CHECK-DAG: func private @_Fortran{{.*}}StopStatementText(!fir.ref, i64, i1, i1) -> none +! CHECK-DAG: func private @_Fortran{{.*}}StopStatement(i32, i1, i1) +! CHECK-DAG: func private @_Fortran{{.*}}StopStatementText(!fir.ref, i64, i1, i1) diff --git a/flang/test/Lower/structure-constructors-alloc-comp.f90 b/flang/test/Lower/structure-constructors-alloc-comp.f90 index 5b1bca317c94f..8887ed4851045 100644 --- a/flang/test/Lower/structure-constructors-alloc-comp.f90 +++ b/flang/test/Lower/structure-constructors-alloc-comp.f90 @@ -31,7 +31,7 @@ subroutine test_alloc1(y) ! HLFIR: %[[CONS_6:.*]] = arith.constant {{.*}} : i32 ! HLFIR: %[[VAL_16:.*]] = fir.convert %[[VAL_14]] : (!fir.box>>}>>) -> !fir.box ! HLFIR: %[[VAL_17:.*]] = fir.convert %[[VAL_15]] : (!fir.ref>) -> !fir.ref -! HLFIR: %{{.*}} = fir.call @_FortranAInitialize(%[[VAL_16]], %[[VAL_17]], %[[CONS_6]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! HLFIR: fir.call @_FortranAInitialize(%[[VAL_16]], %[[VAL_17]], %[[CONS_6]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! HLFIR: %[[VAL_18:.*]] = hlfir.designate %[[VAL_13]]#0{"x"} : (!fir.ref>>}>>) -> !fir.ref ! HLFIR: %[[VAL_19:.*]] = fir.load %[[VAL_12]]#0 : !fir.ref ! HLFIR: hlfir.assign %[[VAL_19]] to %[[VAL_18]] temporary_lhs : f32, !fir.ref @@ -57,7 +57,7 @@ subroutine test_alloc2(y, b) ! HLFIR: %[[CONS_7:.*]] = arith.constant {{.*}} : i32 ! HLFIR: %[[VAL_18:.*]] = fir.convert %[[VAL_16]] : (!fir.box>>}>>) -> !fir.box ! HLFIR: %[[VAL_19:.*]] = fir.convert %[[VAL_17]] : (!fir.ref>) -> !fir.ref -! HLFIR: {{.*}} = fir.call @_FortranAInitialize(%[[VAL_18]], %[[VAL_19]], %[[CONS_7]]) fastmath : (!fir.box, !fir.ref, i32) -> none +! HLFIR: fir.call @_FortranAInitialize(%[[VAL_18]], %[[VAL_19]], %[[CONS_7]]) fastmath : (!fir.box, !fir.ref, i32) -> () ! HLFIR: %[[VAL_20:.*]] = hlfir.designate %[[VAL_15]]#0{"x"} : (!fir.ref>>}>>) -> !fir.ref ! HLFIR: %[[VAL_21:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref ! HLFIR: hlfir.assign %[[VAL_21]] to %[[VAL_20]] temporary_lhs : f32, !fir.ref @@ -111,7 +111,7 @@ subroutine takes_ta_alloc_char(x) ! HLFIR: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "ctor.temp"} : (!fir.ref>>}>>) -> (!fir.ref>>}>>, !fir.ref>>}>>) ! HLFIR: %[[VAL_2:.*]] = fir.embox %[[VAL_1]]#0 : (!fir.ref>>}>>) -> !fir.box>>}>> ! HLFIR: %[[VAL_5:.*]] = fir.convert %[[VAL_2]] : (!fir.box>>}>>) -> !fir.box -! HLFIR: %[[VAL_7:.*]] = fir.call @_FortranAInitialize(%[[VAL_5]], +! HLFIR: fir.call @_FortranAInitialize(%[[VAL_5]], ! HLFIR: %[[VAL_8:.*]] = hlfir.designate %[[VAL_1]]#0{"a"} {fortran_attrs = #fir.var_attrs} : (!fir.ref>>}>>) -> !fir.ref>>> ! HLFIR: %[[VAL_9:.*]] = fir.address_of(@_QQclX68656C6C6F) : !fir.ref> ! HLFIR: %[[VAL_10:.*]] = arith.constant 5 : index @@ -134,7 +134,7 @@ subroutine takes_ta_alloc_char_cst_len(x) ! HLFIR: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "ctor.temp"} : (!fir.ref>>}>>) -> (!fir.ref>>}>>, !fir.ref>>}>>) ! HLFIR: %[[VAL_2:.*]] = fir.embox %[[VAL_1]]#0 : (!fir.ref>>}>>) -> !fir.box>>}>> ! HLFIR: %[[VAL_5:.*]] = fir.convert %[[VAL_2]] : (!fir.box>>}>>) -> !fir.box -! HLFIR: %[[VAL_7:.*]] = fir.call @_FortranAInitialize(%[[VAL_5]], +! HLFIR: fir.call @_FortranAInitialize(%[[VAL_5]], ! HLFIR: %[[VAL_8:.*]] = arith.constant 2 : index ! HLFIR: %[[VAL_9:.*]] = hlfir.designate %[[VAL_1]]#0{"a"} typeparams %[[VAL_8]] {fortran_attrs = #fir.var_attrs} : (!fir.ref>>}>>, index) -> !fir.ref>>> ! HLFIR: %[[VAL_10:.*]] = fir.address_of(@_QQclX68656C6C6F) : !fir.ref> diff --git a/flang/test/Lower/structure-constructors.f90 b/flang/test/Lower/structure-constructors.f90 index 14d8bfe04d1f0..86581ce51bf45 100644 --- a/flang/test/Lower/structure-constructors.f90 +++ b/flang/test/Lower/structure-constructors.f90 @@ -346,7 +346,7 @@ subroutine test_parent_component2() ! CHECK: fir.store %[[VAL_11]] to %[[VAL_1]] : !fir.ref>}>>> ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_1]] : (!fir.ref>}>>>) -> !fir.ref> ! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_9]] : (!fir.box>}>>>) -> !fir.box -! CHECK: %[[VAL_17:.*]] = fir.call @_FortranAAssign(%[[VAL_14]], %[[VAL_15]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> none +! CHECK: fir.call @_FortranAAssign(%[[VAL_14]], %[[VAL_15]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[VAL_18:.*]] = fir.field_index mask, !fir.type<_QFtest_parent_component3Tmid{m:!fir.array<2x!fir.char<1,5>>,mask:!fir.logical<4>}> ! CHECK: %[[VAL_19:.*]] = fir.coordinate_of %[[VAL_2]], %[[VAL_18]] : (!fir.ref>,mask:!fir.logical<4>}>>, !fir.field) -> !fir.ref> ! CHECK: %[[VAL_20:.*]] = arith.constant true diff --git a/flang/test/Lower/transformational-intrinsics.f90 b/flang/test/Lower/transformational-intrinsics.f90 index 3dfb689f18d81..5e10f0f510720 100644 --- a/flang/test/Lower/transformational-intrinsics.f90 +++ b/flang/test/Lower/transformational-intrinsics.f90 @@ -24,7 +24,7 @@ subroutine in_io(x) ! CHECK: %[[res_desc:.]] = fir.alloca !fir.box>>> ! CHECK-DAG: %[[res_arg:.*]] = fir.convert %[[res_desc]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK-DAG: %[[x_arg:.*]] = fir.convert %[[arg0]] : (!fir.box>>) -> !fir.box - ! CHECK: fir.call @_Fortran{{.*}}AllDim(%[[res_arg]], %[[x_arg]], {{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_Fortran{{.*}}AllDim(%[[res_arg]], %[[x_arg]], {{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: %[[res_desc_load:.*]] = fir.load %[[res_desc]] : !fir.ref>>>> ! CHECK-DAG: %[[dims:.*]]:3 = fir.box_dims %[[res_desc_load]], %c0{{.*}} : (!fir.box>>>, index) -> (index, index, index) ! CHECK-DAG: %[[res_addr:.*]] = fir.box_addr %[[res_desc_load]] : (!fir.box>>>) -> !fir.heap>> @@ -44,7 +44,7 @@ subroutine in_call(x) ! CHECK: %[[res_desc:.]] = fir.alloca !fir.box>>> ! CHECK-DAG: %[[res_arg:.*]] = fir.convert %[[res_desc]] : (!fir.ref>>>>) -> !fir.ref> ! CHECK-DAG: %[[x_arg:.*]] = fir.convert %[[arg0]] : (!fir.box>>) -> !fir.box - ! CHECK: fir.call @_Fortran{{.*}}AllDim(%[[res_arg]], %[[x_arg]], {{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_Fortran{{.*}}AllDim(%[[res_arg]], %[[x_arg]], {{.*}}) {{.*}}: (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: %[[res_desc_load:.*]] = fir.load %[[res_desc]] : !fir.ref>>>> ! CHECK-DAG: %[[dims:.*]]:3 = fir.box_dims %[[res_desc_load]], %c0{{.*}} : (!fir.box>>>, index) -> (index, index, index) ! CHECK-DAG: %[[res_addr:.*]] = fir.box_addr %[[res_desc_load]] : (!fir.box>>>) -> !fir.heap>> @@ -157,7 +157,7 @@ subroutine in_elem_expr(x, y, z) ! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_19]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_25]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_26]] : (!fir.ref>) -> !fir.ref - ! CHECK: %[[VAL_32:.*]] = fir.call @_FortranACshift(%[[VAL_28]], %[[VAL_29]], %[[VAL_30]], %[[VAL_17]], %[[VAL_31]], %[[VAL_27]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranACshift(%[[VAL_28]], %[[VAL_29]], %[[VAL_30]], %[[VAL_17]], %[[VAL_31]], %[[VAL_27]]) {{.*}}: (!fir.ref>, !fir.box, !fir.box, i32, !fir.ref, i32) -> () ! CHECK: %[[VAL_33:.*]] = fir.load %[[VAL_2]] : !fir.ref>>> ! CHECK: %[[VAL_34:.*]] = arith.constant 0 : index ! CHECK: %[[VAL_35:.*]]:3 = fir.box_dims %[[VAL_33]], %[[VAL_34]] : (!fir.box>>, index) -> (index, index, index) @@ -198,7 +198,7 @@ subroutine in_elem_expr(x, y, z) ! CHECK: %[[VAL_68:.*]] = fir.convert %[[VAL_59]] : (!fir.box>) -> !fir.box ! CHECK: %[[VAL_69:.*]] = fir.convert %[[VAL_64]] : (i32) -> i64 ! CHECK: %[[VAL_70:.*]] = fir.convert %[[VAL_65]] : (!fir.ref>) -> !fir.ref - ! CHECK: %[[VAL_71:.*]] = fir.call @_FortranACshiftVector(%[[VAL_67]], %[[VAL_68]], %[[VAL_69]], %[[VAL_70]], %[[VAL_66]]) {{.*}}: (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranACshiftVector(%[[VAL_67]], %[[VAL_68]], %[[VAL_69]], %[[VAL_70]], %[[VAL_66]]) {{.*}}: (!fir.ref>, !fir.box, i64, !fir.ref, i32) -> () ! CHECK: %[[VAL_72:.*]] = fir.load %[[VAL_0]] : !fir.ref>>> ! CHECK: %[[VAL_73:.*]] = arith.constant 0 : index ! CHECK: %[[VAL_74:.*]]:3 = fir.box_dims %[[VAL_72]], %[[VAL_73]] : (!fir.box>>, index) -> (index, index, index) @@ -260,7 +260,7 @@ subroutine unpack_test() ! CHECK-DAG: %[[a20:.*]] = fir.convert %[[a10]] : (!fir.box>) -> !fir.box ! CHECK-DAG: %[[a21:.*]] = fir.convert %[[a12]] : (!fir.box>>) -> !fir.box ! CHECK-DAG: %[[a22:.*]] = fir.convert %[[a14]] : (!fir.box>) -> !fir.box - ! CHECK: fir.call @_FortranAUnpack(%[[a19]], %[[a20]], %[[a21]], %[[a22]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAUnpack(%[[a19]], %[[a20]], %[[a21]], %[[a22]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK-NEXT: %[[a22:.*]] = fir.load %{{.*}} : !fir.ref>>> ! CHECK: %[[a25:.*]] = fir.box_addr %[[a22]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[a25]] : !fir.heap> @@ -279,7 +279,7 @@ subroutine unpack_test() ! CHECK: %[[a49:.*]] = fir.convert %[[a41]] : (!fir.box>>) -> !fir.box ! CHECK: %[[a50:.*]] = fir.convert %[[a42]] : (!fir.box) -> !fir.box result = unpack(vector, mask, 343) - ! CHECK: fir.call @_FortranAUnpack(%[[a47]], %[[a48]], %[[a49]], %[[a50]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> none + ! CHECK: fir.call @_FortranAUnpack(%[[a47]], %[[a48]], %[[a49]], %[[a50]], %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref>, !fir.box, !fir.box, !fir.box, !fir.ref, i32) -> () ! CHECK: %[[a53:.*]] = fir.load %[[a0]] : !fir.ref>>> ! CHECK: %[[a56:.*]] = fir.box_addr %[[a53]] : (!fir.box>>) -> !fir.heap> ! CHECK: fir.freemem %[[a56]] : !fir.heap> diff --git a/flang/test/Lower/vector-subscript-io.f90 b/flang/test/Lower/vector-subscript-io.f90 index 9a041af16c88c..0f64e99e03a20 100644 --- a/flang/test/Lower/vector-subscript-io.f90 +++ b/flang/test/Lower/vector-subscript-io.f90 @@ -489,7 +489,7 @@ subroutine simple_iostat(x, y, j, stat) ! CHECK: %[[VAL_341:.*]] = fir.address_of(@_QQ{{.*}}) : !fir.ref> ! CHECK: %[[VAL_342:.*]] = fir.convert %[[VAL_341]] : (!fir.ref>) -> !fir.ref ! CHECK: %[[VAL_343:.*]] = fir.call @_FortranAioBeginExternalListInput(%[[VAL_334]], %[[VAL_342]], %{{.*}}) {{.*}}: (i32, !fir.ref, i32) -> !fir.ref -! CHECK: %[[VAL_344:.*]] = fir.call @_FortranAioEnableHandlers(%[[VAL_343]], %[[VAL_337]], %[[VAL_336]], %[[VAL_336]], %[[VAL_336]], %[[VAL_336]]) {{.*}}: (!fir.ref, i1, i1, i1, i1, i1) -> none +! CHECK: fir.call @_FortranAioEnableHandlers(%[[VAL_343]], %[[VAL_337]], %[[VAL_336]], %[[VAL_336]], %[[VAL_336]], %[[VAL_336]]) {{.*}}: (!fir.ref, i1, i1, i1, i1, i1) -> () ! CHECK: %[[VAL_345:.*]]:3 = fir.box_dims %[[VAL_346]], %[[VAL_339]] : (!fir.box>, index) -> (index, index, index) ! CHECK: %[[VAL_347:.*]] = fir.slice %[[VAL_338]], %[[VAL_345]]#1, %[[VAL_338]] : (index, index, index) -> !fir.slice<1> ! CHECK: %[[VAL_348:.*]] = arith.subi %[[VAL_345]]#1, %[[VAL_338]] : index @@ -538,7 +538,7 @@ subroutine iostat_in_io_loop(k, j, stat) ! CHECK: %[[VAL_376:.*]] = fir.address_of(@_QQ{{.*}}) : !fir.ref> ! CHECK: %[[VAL_377:.*]] = fir.convert %[[VAL_376]] : (!fir.ref>) -> !fir.ref ! CHECK: %[[VAL_378:.*]] = fir.call @_FortranAioBeginExternalListInput(%[[VAL_366]], %[[VAL_377]], %{{.*}}) {{.*}}: (i32, !fir.ref, i32) -> !fir.ref -! CHECK: %[[VAL_379:.*]] = fir.call @_FortranAioEnableHandlers(%[[VAL_378]], %[[VAL_369]], %[[VAL_370]], %[[VAL_370]], %[[VAL_370]], %[[VAL_370]]) {{.*}}: (!fir.ref, i1, i1, i1, i1, i1) -> none +! CHECK: fir.call @_FortranAioEnableHandlers(%[[VAL_378]], %[[VAL_369]], %[[VAL_370]], %[[VAL_370]], %[[VAL_370]], %[[VAL_370]]) {{.*}}: (!fir.ref, i1, i1, i1, i1, i1) -> () ! CHECK: cf.br ^bb1(%[[VAL_371]], %[[VAL_369]] : index, i1) ! CHECK: ^bb1(%[[VAL_380:.*]]: index, %[[VAL_381:.*]]: i1): ! CHECK: %[[VAL_382:.*]] = arith.cmpi sle, %[[VAL_380]], %[[VAL_368]] : index diff --git a/flang/test/Preprocessing/bug129131.F b/flang/test/Preprocessing/bug129131.F index 00aba5da2c7cb..43bbfdc232f92 100644 --- a/flang/test/Preprocessing/bug129131.F +++ b/flang/test/Preprocessing/bug129131.F @@ -1,4 +1,4 @@ -! RUN: %flang -fc1 -fdebug-unparse %s 2>&1 | FileCheck %s +! RUN: %flang_fc1 -fdebug-unparse %s 2>&1 | FileCheck %s ! CHECK: PRINT *, 2_4 ! CHECK: PRINT *, 1_4 #define a ,3 diff --git a/flang/test/Transforms/omp-reduction-cfg-conversion.fir b/flang/test/Transforms/omp-reduction-cfg-conversion.fir index 707e665132afb..cbe599f0da5d2 100644 --- a/flang/test/Transforms/omp-reduction-cfg-conversion.fir +++ b/flang/test/Transforms/omp-reduction-cfg-conversion.fir @@ -18,7 +18,7 @@ omp.declare_reduction @add_reduction_i_32_box_3_byref : !fir.ref>>) -> !fir.ref> %9 = fir.convert %6 : (!fir.box) -> !fir.box %10 = fir.convert %7 : (!fir.ref>) -> !fir.ref - %11 = fir.call @_FortranAAssign(%8, %9, %10, %c4_i32) : (!fir.ref>, !fir.box, !fir.ref, i32) -> none + fir.call @_FortranAAssign(%8, %9, %10, %c4_i32) : (!fir.ref>, !fir.box, !fir.ref, i32) -> () %12 = fir.alloca !fir.box> fir.store %4 to %12 : !fir.ref>> omp.yield(%12 : !fir.ref>>) diff --git a/flang/test/Transforms/simplifyintrinsics.fir b/flang/test/Transforms/simplifyintrinsics.fir index a3930566035b3..b2af152e5a913 100644 --- a/flang/test/Transforms/simplifyintrinsics.fir +++ b/flang/test/Transforms/simplifyintrinsics.fir @@ -262,13 +262,13 @@ module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", llvm.targ %8 = fir.convert %5 : (!fir.ref>) -> !fir.ref %9 = fir.convert %c0 : (index) -> i32 %10 = fir.convert %4 : (!fir.box) -> !fir.box - %11 = fir.call @_FortranACppSumComplex4(%6, %7, %8, %c5_i32, %9, %10) : (!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> none + fir.call @_FortranACppSumComplex4(%6, %7, %8, %c5_i32, %9, %10) : (!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> () %12 = fir.load %0 : !fir.ref> fir.store %12 to %1 : !fir.ref> %13 = fir.load %1 : !fir.ref> return %13 : complex } - func.func private @_FortranACppSumComplex4(!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> none attributes {fir.runtime} + func.func private @_FortranACppSumComplex4(!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> () attributes {fir.runtime} fir.global linkonce @_QQclX2E2F6973756D5F362E66393000 constant : !fir.char<1,13> { %0 = fir.string_lit "./isum_6.f90\00"(13) : !fir.char<1,13> fir.has_value %0 : !fir.char<1,13> @@ -277,7 +277,7 @@ module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", llvm.targ // CHECK-LABEL: func.func @sum_1d_complex(%{{.*}}: !fir.ref>> {fir.bindc_name = "a"}) -> complex { // CHECK-NOT: fir.call @_FortranACppSumComplex4x1_simplified({{.*}}) -// CHECK: fir.call @_FortranACppSumComplex4({{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> none +// CHECK: fir.call @_FortranACppSumComplex4({{.*}}) : (!fir.ref>, !fir.box, !fir.ref, i32, i32, !fir.box) -> () // CHECK-NOT: fir.call @_FortranACppSumComplex4x1_simplified({{.*}}) // ----- @@ -422,7 +422,7 @@ module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", llvm.targ %file = fir.address_of(@filename) : !fir.ref> %file_ref = fir.convert %file : (!fir.ref>) -> !fir.ref %absent_none = fir.convert %absent : (!fir.box) -> !fir.box - %res = fir.call @_FortranASumDim(%box_none, %box_none2, %c1_i32, %file_ref, %lineno, %absent_none) : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> none + fir.call @_FortranASumDim(%box_none, %box_none2, %c1_i32, %file_ref, %lineno, %absent_none) : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box) -> () func.return } } @@ -1236,7 +1236,7 @@ func.func @_QMtestPcount_generate_mask(%arg0: !fir.ref>>) -> !fir.box %12 = fir.convert %c4 : (index) -> i32 %13 = fir.convert %9 : (!fir.ref>) -> !fir.ref - %14 = fir.call @_FortranACountDim(%10, %11, %c2_i32, %12, %13, %c11_i32) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none + fir.call @_FortranACountDim(%10, %11, %c2_i32, %12, %13, %c11_i32) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () %15 = fir.load %0 : !fir.ref>>> %c0_2 = arith.constant 0 : index %16:3 = fir.box_dims %15, %c0_2 : (!fir.box>>, index) -> (index, index, index) @@ -1256,12 +1256,12 @@ func.func @_QMtestPcount_generate_mask(%arg0: !fir.ref> return %22 : !fir.array<10xi32> } -func.func private @_FortranACountDim(!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none attributes {fir.runtime} +func.func private @_FortranACountDim(!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () attributes {fir.runtime} // CHECK-LABEL: func.func @_QMtestPcount_generate_mask( // CHECK-SAME: %[[A:.*]]: !fir.ref>> {fir.bindc_name = "mask"}) -> !fir.array<10xi32> { // CHECK-NOT fir.call @_FortranACountDimLogical4_simplified({{.*}}) -// CHECK: %[[RES:.*]] = fir.call @_FortranACountDim({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranACountDim({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32) -> () // CHECK-NOT fir.call @_FortranACountDimLogical4_simplified({{.*}}) // ----- @@ -1419,7 +1419,7 @@ func.func @_QPtestAny_DimArg(%arg0: !fir.ref>> %10 = fir.convert %0 : (!fir.ref>>>>) -> !fir.ref> %11 = fir.convert %5 : (!fir.box>>) -> !fir.box %12 = fir.convert %9 : (!fir.ref>) -> !fir.ref - %13 = fir.call @_FortranAAnyDim(%10, %11, %c2_i32, %12, %c3_i32) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none + fir.call @_FortranAAnyDim(%10, %11, %c2_i32, %12, %c3_i32) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () %14 = fir.load %0 : !fir.ref>>>> %c0_2 = arith.constant 0 : index %15:3 = fir.box_dims %14, %c0_2 : (!fir.box>>>, index) -> (index, index, index) @@ -1439,12 +1439,12 @@ func.func @_QPtestAny_DimArg(%arg0: !fir.ref>> %21 = fir.load %1 : !fir.ref>> return %21 : !fir.array<10x!fir.logical<4>> } -func.func private @_FortranAAnyDim(!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none attributes {fir.runtime} +func.func private @_FortranAAnyDim(!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () attributes {fir.runtime} // CHECK-LABEL: func.func @_QPtestAny_DimArg( // CHECK-SAME: %[[ARR:.*]]: !fir.ref>> {fir.bindc_name = "a"}) -> !fir.array<10x!fir.logical<4>> { // CHECK-NOT fir.call @_FortranAAnyDimLogical4x1_simplified({{.*}}) -// CHECK: fir.call @_FortranAAnyDim({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAnyDim({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () // CHECK-NOT fir.call @_FortranAAnyDimLogical4x1_simplified({{.*}}) // ----- @@ -1658,7 +1658,7 @@ func.func @_QPtestAll_DimArg(%arg0: !fir.ref>> %10 = fir.convert %0 : (!fir.ref>>>>) -> !fir.ref> %11 = fir.convert %5 : (!fir.box>>) -> !fir.box %12 = fir.convert %9 : (!fir.ref>) -> !fir.ref - %13 = fir.call @_FortranAAllDim(%10, %11, %c1_i32, %12, %c3_i32) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none + fir.call @_FortranAAllDim(%10, %11, %c1_i32, %12, %c3_i32) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () %14 = fir.load %0 : !fir.ref>>>> %c0_2 = arith.constant 0 : index %15:3 = fir.box_dims %14, %c0_2 : (!fir.box>>>, index) -> (index, index, index) @@ -1678,12 +1678,12 @@ func.func @_QPtestAll_DimArg(%arg0: !fir.ref>> %21 = fir.load %1 : !fir.ref>> return %21 : !fir.array<10x!fir.logical<4>> } -func.func private @_FortranAAllDim(!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none attributes {fir.runtime} +func.func private @_FortranAAllDim(!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () attributes {fir.runtime} // CHECK-LABEL: func.func @_QPtestAll_DimArg( // CHECK-SAME: %[[ARR:.*]]: !fir.ref>> {fir.bindc_name = "a"}) -> !fir.array<10x!fir.logical<4>> { // CHECK-NOT fir.call @_FortranAAllDimLogical4x1_simplified({{.*}}) -// CHECK: fir.call @_FortranAAllDim({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> none +// CHECK: fir.call @_FortranAAllDim({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32) -> () // CHECK-NOT fir.call @_FortranAAllDimLogical4x1_simplified({{.*}}) // ----- @@ -1714,7 +1714,7 @@ func.func @_QPtestminloc_works1d(%arg0: !fir.ref> {fir.bindc_ %14 = fir.convert %c4 : (index) -> i32 %15 = fir.convert %11 : (!fir.ref>) -> !fir.ref %16 = fir.convert %7 : (!fir.box>>) -> !fir.box - %17 = fir.call @_FortranAMinlocInteger4(%12, %13, %14, %15, %c5_i32, %16, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMinlocInteger4(%12, %13, %14, %15, %c5_i32, %16, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %18 = fir.load %0 : !fir.ref>>> %c0_1 = arith.constant 0 : index %19:3 = fir.box_dims %18, %c0_1 : (!fir.box>>, index) -> (index, index, index) @@ -1837,7 +1837,7 @@ func.func @_QPtestminloc_works2d_nomask(%arg0: !fir.ref> { %12 = fir.convert %5 : (!fir.box>) -> !fir.box %13 = fir.convert %10 : (!fir.ref>) -> !fir.ref %14 = fir.convert %6 : (!fir.box) -> !fir.box - %15 = fir.call @_FortranAMinlocInteger4(%11, %12, %c8_i32, %13, %c4_i32, %14, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMinlocInteger4(%11, %12, %c8_i32, %13, %c4_i32, %14, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %16 = fir.load %0 : !fir.ref>>> %c0_1 = arith.constant 0 : index %17:3 = fir.box_dims %16, %c0_1 : (!fir.box>>, index) -> (index, index, index) @@ -1957,7 +1957,7 @@ func.func @_QPtestminloc_works1d_scalarmask_f64(%arg0: !fir.ref i32 %14 = fir.convert %10 : (!fir.ref>) -> !fir.ref %15 = fir.convert %6 : (!fir.box>) -> !fir.box - %16 = fir.call @_FortranAMinlocReal8(%11, %12, %13, %14, %c6_i32, %15, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMinlocReal8(%11, %12, %13, %14, %c6_i32, %15, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %17 = fir.load %0 : !fir.ref>>> %c0_0 = arith.constant 0 : index %18:3 = fir.box_dims %17, %c0_0 : (!fir.box>>, index) -> (index, index, index) @@ -2070,7 +2070,7 @@ func.func @_QPtestminloc_doesntwork1d_back(%arg0: !fir.ref> { %13 = fir.convert %c4 : (index) -> i32 %14 = fir.convert %10 : (!fir.ref>) -> !fir.ref %15 = fir.convert %6 : (!fir.box) -> !fir.box - %16 = fir.call @_FortranAMinlocInteger4(%11, %12, %13, %14, %c4_i32, %15, %true) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMinlocInteger4(%11, %12, %13, %14, %c4_i32, %15, %true) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %17 = fir.load %0 : !fir.ref>>> %c0_0 = arith.constant 0 : index %18:3 = fir.box_dims %17, %c0_0 : (!fir.box>>, index) -> (index, index, index) @@ -2094,7 +2094,7 @@ func.func @_QPtestminloc_doesntwork1d_back(%arg0: !fir.ref> { // CHECK-LABEL: func.func @_QPtestminloc_doesntwork1d_back( // CHECK-SAME: %[[ARR:.*]]: !fir.ref> {fir.bindc_name = "a"}) -> !fir.array<1xi32> { // CHECK-NOT: fir.call @_FortranAMinlocInteger4x1_i32_contract_simplified({{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box) -> () -// CHECK: fir.call @_FortranAMinlocInteger4({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK: fir.call @_FortranAMinlocInteger4({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NOT: fir.call @_FortranAMinlocInteger4x1_i32_contract_simplified({{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box) -> () // ----- @@ -2123,7 +2123,7 @@ func.func @_QPtestminloc_1d_dim(%arg0: !fir.ref> {fir.bindc_n %12 = fir.convert %c4 : (index) -> i32 %13 = fir.convert %9 : (!fir.ref>) -> !fir.ref %14 = fir.convert %6 : (!fir.box) -> !fir.box - %15 = fir.call @_FortranAMinlocDim(%10, %11, %12, %c1_i32, %13, %c4_i32, %14, %false) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMinlocDim(%10, %11, %12, %c1_i32, %13, %c4_i32, %14, %false) fastmath : (!fir.ref>, !fir.box, i32, i32, !fir.ref, i32, !fir.box, i1) -> () %16 = fir.load %0 : !fir.ref>> %17 = fir.box_addr %16 : (!fir.box>) -> !fir.heap %18 = fir.load %17 : !fir.heap @@ -2220,7 +2220,7 @@ func.func @_QPtestminloc_doesntwork1d_unknownsize(%arg0: !fir.box i32 %12 = fir.convert %8 : (!fir.ref>) -> !fir.ref %13 = fir.convert %4 : (!fir.box) -> !fir.box - %14 = fir.call @_FortranAMinlocInteger4(%9, %10, %11, %12, %c4_i32, %13, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMinlocInteger4(%9, %10, %11, %12, %c4_i32, %13, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %15 = fir.load %0 : !fir.ref>>> %c0_0 = arith.constant 0 : index %16:3 = fir.box_dims %15, %c0_0 : (!fir.box>>, index) -> (index, index, index) @@ -2243,7 +2243,7 @@ func.func @_QPtestminloc_doesntwork1d_unknownsize(%arg0: !fir.box> {fir.bindc_name = "a"}) -> !fir.array<1xi32> { // CHECK-NOT: fir.call @_FortranAMinlocInteger4x1_i32_contract_simplified({{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box) -> () -// CHECK: fir.call @_FortranAMinlocInteger4({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK: fir.call @_FortranAMinlocInteger4({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NOT: fir.call @_FortranAMinlocInteger4x1_i32_contract_simplified({{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box) -> () // ----- @@ -2275,7 +2275,7 @@ func.func @_QPtestminloc_doesntwork1d_chars(%arg0: !fir.boxchar<1> {fir.bindc_na %15 = fir.convert %c4 : (index) -> i32 %16 = fir.convert %12 : (!fir.ref>) -> !fir.ref %17 = fir.convert %8 : (!fir.box) -> !fir.box - %18 = fir.call @_FortranAMinlocCharacter(%13, %14, %15, %16, %c4_i32, %17, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMinlocCharacter(%13, %14, %15, %16, %c4_i32, %17, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %19 = fir.load %0 : !fir.ref>>> %c0_0 = arith.constant 0 : index %20:3 = fir.box_dims %19, %c0_0 : (!fir.box>>, index) -> (index, index, index) @@ -2299,7 +2299,7 @@ func.func @_QPtestminloc_doesntwork1d_chars(%arg0: !fir.boxchar<1> {fir.bindc_na // CHECK-LABEL: func.func @_QPtestminloc_doesntwork1d_chars( // CHECK-SAME: %[[ARR:.*]]: !fir.boxchar<1> {fir.bindc_name = "a"}) -> !fir.array<1xi32> { // CHECK-NOT: fir.call @_FortranAMinlocCharacterx1_i32_contract_simplified({{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box) -> () -// CHECK: fir.call @_FortranAMinlocCharacter({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK: fir.call @_FortranAMinlocCharacter({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NOT: fir.call @_FortranAMinlocCharacterx1_i32_contract_simplified({{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box) -> () // ----- @@ -2356,7 +2356,7 @@ func.func @_QPtestminloc_doesntwork1d_unknownmask(%arg0: !fir.ref i32 %34 = fir.convert %30 : (!fir.ref>) -> !fir.ref %35 = fir.convert %26 : (!fir.box>>) -> !fir.box - %36 = fir.call @_FortranAMinlocInteger4(%31, %32, %33, %34, %c7_i32, %35, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMinlocInteger4(%31, %32, %33, %34, %c7_i32, %35, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %37 = fir.load %0 : !fir.ref>>> %c0_2 = arith.constant 0 : index %38:3 = fir.box_dims %37, %c0_2 : (!fir.box>>, index) -> (index, index, index) @@ -2380,7 +2380,7 @@ func.func @_QPtestminloc_doesntwork1d_unknownmask(%arg0: !fir.ref> {fir.bindc_name = "a"}) -> !fir.array<1xi32> { // CHECK-NOT: fir.call @_FortranAMinlocInteger4x1_i32_contract_simplified({{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box) -> () -// CHECK: fir.call @_FortranAMinlocInteger4({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none +// CHECK: fir.call @_FortranAMinlocInteger4({{.*}}) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () // CHECK-NOT: fir.call @_FortranAMinlocInteger4x1_i32_contract_simplified({{.*}}) fastmath : (!fir.ref>, !fir.box, !fir.box) -> () // ----- @@ -2411,7 +2411,7 @@ func.func @_QPtestmaxloc_works1d(%arg0: !fir.ref> {fir.bindc_ %14 = fir.convert %c4 : (index) -> i32 %15 = fir.convert %11 : (!fir.ref>) -> !fir.ref %16 = fir.convert %7 : (!fir.box>>) -> !fir.box - %17 = fir.call @_FortranAMaxlocInteger4(%12, %13, %14, %15, %c5_i32, %16, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMaxlocInteger4(%12, %13, %14, %15, %c5_i32, %16, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %18 = fir.load %0 : !fir.ref>>> %c0_1 = arith.constant 0 : index %19:3 = fir.box_dims %18, %c0_1 : (!fir.box>>, index) -> (index, index, index) @@ -2534,7 +2534,7 @@ func.func @_QPtestmaxloc_works1d_scalarmask_f64(%arg0: !fir.ref i32 %14 = fir.convert %10 : (!fir.ref>) -> !fir.ref %15 = fir.convert %6 : (!fir.box>) -> !fir.box - %16 = fir.call @_FortranAMaxlocReal8(%11, %12, %13, %14, %c6_i32, %15, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> none + fir.call @_FortranAMaxlocReal8(%11, %12, %13, %14, %c6_i32, %15, %false) fastmath : (!fir.ref>, !fir.box, i32, !fir.ref, i32, !fir.box, i1) -> () %17 = fir.load %0 : !fir.ref>>> %c0_0 = arith.constant 0 : index %18:3 = fir.box_dims %17, %c0_0 : (!fir.box>>, index) -> (index, index, index) diff --git a/flang/test/Transforms/stack-arrays.fir b/flang/test/Transforms/stack-arrays.fir index a784cea9bc3a4..4a417ed981ab1 100644 --- a/flang/test/Transforms/stack-arrays.fir +++ b/flang/test/Transforms/stack-arrays.fir @@ -356,14 +356,14 @@ func.func @stop_terminator() { fir.freemem %0 : !fir.heap> %c0_i32 = arith.constant 0 : i32 %false = arith.constant false - %none = fir.call @_FortranAStopStatement(%c0_i32, %false, %false) : (i32, i1, i1) -> none + fir.call @_FortranAStopStatement(%c0_i32, %false, %false) : (i32, i1, i1) -> () fir.unreachable } // CHECK: func.func @stop_terminator() { // CHECK-NEXT: fir.alloca !fir.array<42xi32> // CHECK-NEXT: %[[ZERO:.*]] = arith.constant 0 : i32 // CHECK-NEXT: %[[FALSE:.*]] = arith.constant false -// CHECK-NEXT: %[[NONE:.*]] = fir.call @_FortranAStopStatement(%[[ZERO]], %[[FALSE]], %[[FALSE]]) : (i32, i1, i1) -> none +// CHECK-NEXT: fir.call @_FortranAStopStatement(%[[ZERO]], %[[FALSE]], %[[FALSE]]) : (i32, i1, i1) -> () // CHECK-NEXT: fir.unreachable // CHECK-NEXT: } diff --git a/flang/test/lib/Analysis/AliasAnalysis/CMakeLists.txt b/flang/test/lib/Analysis/AliasAnalysis/CMakeLists.txt index c4b3838c9a23e..cba47a4114517 100644 --- a/flang/test/lib/Analysis/AliasAnalysis/CMakeLists.txt +++ b/flang/test/lib/Analysis/AliasAnalysis/CMakeLists.txt @@ -16,14 +16,16 @@ add_flang_library(FIRTestAnalysis FIRSupport FIRTransforms FIRAnalysis + MLIRTestAnalysis + + MLIR_LIBS ${dialect_libs} MLIRFuncDialect MLIRLLVMDialect MLIRAnalysis - MLIRTestAnalysis ) target_include_directories(FIRTestAnalysis PRIVATE ${MLIR_MAIN_SRC_DIR}/.. - ) \ No newline at end of file + ) diff --git a/flang/test/lib/OpenACC/CMakeLists.txt b/flang/test/lib/OpenACC/CMakeLists.txt index 8aa3c7689af4e..e296827ef53be 100644 --- a/flang/test/lib/OpenACC/CMakeLists.txt +++ b/flang/test/lib/OpenACC/CMakeLists.txt @@ -14,6 +14,8 @@ add_flang_library(FIRTestOpenACCInterfaces FIRDialect FIROpenACCSupport FIRSupport + + MLIR_LIBS MLIRIR MLIROpenACCDialect MLIRPass diff --git a/flang/tools/bbc/CMakeLists.txt b/flang/tools/bbc/CMakeLists.txt index 3d92abdc60114..85aeb85e0c530 100644 --- a/flang/tools/bbc/CMakeLists.txt +++ b/flang/tools/bbc/CMakeLists.txt @@ -29,6 +29,9 @@ target_link_libraries(bbc PRIVATE flangFrontend flangPasses FlangOpenMPTransforms +) + +mlir_target_link_libraries(bbc PRIVATE ${dialect_libs} ${extension_libs} MLIRAffineToStandard diff --git a/flang/tools/fir-lsp-server/CMakeLists.txt b/flang/tools/fir-lsp-server/CMakeLists.txt index d5445d8f8e99b..6f095e24524b7 100644 --- a/flang/tools/fir-lsp-server/CMakeLists.txt +++ b/flang/tools/fir-lsp-server/CMakeLists.txt @@ -12,7 +12,9 @@ target_link_libraries(fir-lsp-server PRIVATE CUFDialect FIRDialect FIROpenACCSupport - HLFIRDialect + HLFIRDialect) + +mlir_target_link_libraries(fir-lsp-server PRIVATE MLIRLspServerLib ${dialect_libs} ${extension_libs}) diff --git a/flang/tools/fir-opt/CMakeLists.txt b/flang/tools/fir-opt/CMakeLists.txt index f0741ca282169..efbde329b8b8c 100644 --- a/flang/tools/fir-opt/CMakeLists.txt +++ b/flang/tools/fir-opt/CMakeLists.txt @@ -24,6 +24,9 @@ target_link_libraries(fir-opt PRIVATE FlangOpenMPTransforms FIRAnalysis ${test_libs} +) + +mlir_target_link_libraries(fir-opt PRIVATE ${dialect_libs} ${extension_libs} diff --git a/flang/tools/tco/CMakeLists.txt b/flang/tools/tco/CMakeLists.txt index 0ac18734be2ce..220f908d2f108 100644 --- a/flang/tools/tco/CMakeLists.txt +++ b/flang/tools/tco/CMakeLists.txt @@ -21,6 +21,9 @@ target_link_libraries(tco PRIVATE FIROpenACCSupport FlangOpenMPTransforms FortranCommon +) + +mlir_target_link_libraries(tco PRIVATE ${dialect_libs} ${extension_libs} MLIRIR diff --git a/flang/unittests/Frontend/CMakeLists.txt b/flang/unittests/Frontend/CMakeLists.txt index 22c568af3d121..9177997f41f53 100644 --- a/flang/unittests/Frontend/CMakeLists.txt +++ b/flang/unittests/Frontend/CMakeLists.txt @@ -20,5 +20,9 @@ target_link_libraries(FlangFrontendTests FortranSemantics FortranCommon FortranEvaluate +) + +mlir_target_link_libraries(FlangFrontendTests + PRIVATE MLIRIR ) diff --git a/flang/unittests/Optimizer/Builder/ComplexTest.cpp b/flang/unittests/Optimizer/Builder/ComplexTest.cpp index eefab118e255a..689af4642b0b6 100644 --- a/flang/unittests/Optimizer/Builder/ComplexTest.cpp +++ b/flang/unittests/Optimizer/Builder/ComplexTest.cpp @@ -34,7 +34,7 @@ struct ComplexTest : public testing::Test { helper = std::make_unique(*firBuilder, loc); // Init commonly used types - realTy1 = mlir::FloatType::getF32(&context); + realTy1 = mlir::Float32Type::get(&context); complexTy1 = mlir::ComplexType::get(realTy1); integerTy1 = mlir::IntegerType::get(&context, 32); diff --git a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp index 05407d96998a2..3e2af24c47b96 100644 --- a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp +++ b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp @@ -146,7 +146,7 @@ TEST_F(FIRBuilderTest, createRealZeroConstant) { auto builder = getBuilder(); auto ctx = builder.getContext(); auto loc = builder.getUnknownLoc(); - auto realTy = mlir::FloatType::getF64(ctx); + auto realTy = mlir::Float64Type::get(ctx); auto cst = builder.createRealZeroConstant(loc, realTy); EXPECT_TRUE(mlir::isa(cst.getDefiningOp())); auto cstOp = dyn_cast(cst.getDefiningOp()); @@ -434,7 +434,7 @@ TEST_F(FIRBuilderTest, createZeroValue) { auto intAttr = mlir::dyn_cast(cst.getValue()); EXPECT_TRUE(intAttr && intAttr.getInt() == 0); - mlir::Type f32Ty = mlir::FloatType::getF32(builder.getContext()); + mlir::Type f32Ty = mlir::Float32Type::get(builder.getContext()); mlir::Value zeroFloat = fir::factory::createZeroValue(builder, loc, f32Ty); EXPECT_TRUE(zeroFloat.getType() == f32Ty); auto cst2 = mlir::dyn_cast_or_null( @@ -494,7 +494,7 @@ TEST_F(FIRBuilderTest, getBaseTypeOf) { return {scalars, arrays}; }; - auto f32Ty = mlir::FloatType::getF32(builder.getContext()); + auto f32Ty = mlir::Float32Type::get(builder.getContext()); mlir::Type f32SeqTy = builder.getVarLenSeqTy(f32Ty); auto [f32Scalars, f32Arrays] = makeExv(f32Ty, f32SeqTy); for (const auto &scalar : f32Scalars) { @@ -537,7 +537,7 @@ TEST_F(FIRBuilderTest, genArithFastMath) { auto ctx = builder.getContext(); auto loc = builder.getUnknownLoc(); - auto realTy = mlir::FloatType::getF32(ctx); + auto realTy = mlir::Float32Type::get(ctx); auto arg = builder.create(loc, realTy); // Test that FastMathFlags is 'none' by default. diff --git a/flang/unittests/Optimizer/Builder/HLFIRToolsTest.cpp b/flang/unittests/Optimizer/Builder/HLFIRToolsTest.cpp index 640b7ecc1e565..29700d2d3dbff 100644 --- a/flang/unittests/Optimizer/Builder/HLFIRToolsTest.cpp +++ b/flang/unittests/Optimizer/Builder/HLFIRToolsTest.cpp @@ -59,7 +59,7 @@ struct HLFIRToolsTest : public testing::Test { TEST_F(HLFIRToolsTest, testScalarRoundTrip) { auto &builder = getBuilder(); mlir::Location loc = getLoc(); - mlir::Type f32Type = mlir::FloatType::getF32(&context); + mlir::Type f32Type = mlir::Float32Type::get(&context); mlir::Type scalarf32Type = builder.getRefType(f32Type); mlir::Value scalarf32Addr = builder.create(loc, scalarf32Type); fir::ExtendedValue scalarf32{scalarf32Addr}; @@ -82,7 +82,7 @@ TEST_F(HLFIRToolsTest, testArrayRoundTrip) { llvm::SmallVector lbounds{ createConstant(-1), createConstant(-2)}; - mlir::Type f32Type = mlir::FloatType::getF32(&context); + mlir::Type f32Type = mlir::Float32Type::get(&context); mlir::Type seqf32Type = builder.getVarLenSeqTy(f32Type, 2); mlir::Type arrayf32Type = builder.getRefType(seqf32Type); mlir::Value arrayf32Addr = builder.create(loc, arrayf32Type); diff --git a/flang/unittests/Optimizer/FIRTypesTest.cpp b/flang/unittests/Optimizer/FIRTypesTest.cpp index a07c018a8afd5..b3151b4aa7efb 100644 --- a/flang/unittests/Optimizer/FIRTypesTest.cpp +++ b/flang/unittests/Optimizer/FIRTypesTest.cpp @@ -227,7 +227,7 @@ TEST_F(FIRTypesTest, updateTypeForUnlimitedPolymorphic) { mlir::Type ptrArrNone = fir::PointerType::get(arrNone); mlir::Type i32Ty = mlir::IntegerType::get(&context, 32); - mlir::Type f32Ty = mlir::FloatType::getF32(&context); + mlir::Type f32Ty = mlir::Float32Type::get(&context); mlir::Type l1Ty = fir::LogicalType::get(&context, 1); mlir::Type cplx32Ty = mlir::ComplexType::get(f32Ty); mlir::Type char1Ty = fir::CharacterType::get(&context, 1, 10); @@ -268,12 +268,12 @@ TEST_F(FIRTypesTest, getTypeAsString) { fir::ReferenceType::get(mlir::IntegerType::get(&context, 32)), *kindMap)); EXPECT_EQ( - "f64", fir::getTypeAsString(mlir::FloatType::getF64(&context), *kindMap)); + "f64", fir::getTypeAsString(mlir::Float64Type::get(&context), *kindMap)); EXPECT_EQ( "l8", fir::getTypeAsString(fir::LogicalType::get(&context, 1), *kindMap)); EXPECT_EQ("z32", fir::getTypeAsString( - mlir::ComplexType::get(mlir::FloatType::getF32(&context)), *kindMap)); + mlir::ComplexType::get(mlir::Float32Type::get(&context)), *kindMap)); EXPECT_EQ("c8", fir::getTypeAsString(fir::CharacterType::get(&context, 1, 1), *kindMap)); EXPECT_EQ("c8x10", diff --git a/flang/unittests/Optimizer/FortranVariableTest.cpp b/flang/unittests/Optimizer/FortranVariableTest.cpp index 4ba9359a07e4d..30c23b63b4d56 100644 --- a/flang/unittests/Optimizer/FortranVariableTest.cpp +++ b/flang/unittests/Optimizer/FortranVariableTest.cpp @@ -45,7 +45,7 @@ struct FortranVariableTest : public testing::Test { TEST_F(FortranVariableTest, SimpleScalar) { mlir::Location loc = getLoc(); - mlir::Type eleType = mlir::FloatType::getF32(&context); + mlir::Type eleType = mlir::Float32Type::get(&context); mlir::Value addr = builder->create(loc, eleType); auto name = mlir::StringAttr::get(&context, "x"); auto declare = builder->create(loc, addr.getType(), addr, @@ -96,7 +96,7 @@ TEST_F(FortranVariableTest, CharacterScalar) { TEST_F(FortranVariableTest, SimpleArray) { mlir::Location loc = getLoc(); - mlir::Type eleType = mlir::FloatType::getF32(&context); + mlir::Type eleType = mlir::Float32Type::get(&context); llvm::SmallVector extents{ createConstant(10), createConstant(20), createConstant(30)}; fir::SequenceType::Shape typeShape( diff --git a/flang/unittests/Optimizer/RTBuilder.cpp b/flang/unittests/Optimizer/RTBuilder.cpp index 35b9f1a6d5dcb..00960801928f7 100644 --- a/flang/unittests/Optimizer/RTBuilder.cpp +++ b/flang/unittests/Optimizer/RTBuilder.cpp @@ -31,7 +31,7 @@ TEST(RTBuilderTest, ComplexRuntimeInterface) { auto c99_cacosf_funcTy = mlir::cast(c99_cacosf_signature); EXPECT_EQ(c99_cacosf_funcTy.getNumInputs(), 1u); EXPECT_EQ(c99_cacosf_funcTy.getNumResults(), 1u); - auto cplx_ty = mlir::ComplexType::get(mlir::FloatType::getF32(&ctx)); + auto cplx_ty = mlir::ComplexType::get(mlir::Float32Type::get(&ctx)); EXPECT_EQ(c99_cacosf_funcTy.getInput(0), cplx_ty); EXPECT_EQ(c99_cacosf_funcTy.getResult(0), cplx_ty); } diff --git a/flang/unittests/Runtime/Support.cpp b/flang/unittests/Runtime/Support.cpp index 9d1a417fdbf42..8c8de73b5b979 100644 --- a/flang/unittests/Runtime/Support.cpp +++ b/flang/unittests/Runtime/Support.cpp @@ -67,3 +67,14 @@ TEST(IsAssumedSize, Basic) { std::vector{}, std::vector{0})}; EXPECT_FALSE(RTNAME(IsAssumedSize)(*scalar)); } + +TEST(DescriptorBytesFor, Basic) { + for (size_t i = 0; i < Fortran::common::TypeCategory_enumSize; ++i) { + auto tc{static_cast(i)}; + if (tc == TypeCategory::Derived) + continue; + + auto b{Descriptor::BytesFor(tc, 4)}; + EXPECT_GT(b, 0U); + } +} diff --git a/libc/include/__llvm-libc-common.h b/libc/include/__llvm-libc-common.h index c63eb134a5e5d..b5a23c5765f4d 100644 --- a/libc/include/__llvm-libc-common.h +++ b/libc/include/__llvm-libc-common.h @@ -39,7 +39,11 @@ #define _Thread_local thread_local #undef __NOEXCEPT +#if __cplusplus >= 201103L #define __NOEXCEPT noexcept +#else +#define __NOEXCEPT throw() +#endif #else // not __cplusplus diff --git a/libc/include/llvm-libc-types/sigset_t.h b/libc/include/llvm-libc-types/sigset_t.h index 1f601488db4c3..8c4d3b49533db 100644 --- a/libc/include/llvm-libc-types/sigset_t.h +++ b/libc/include/llvm-libc-types/sigset_t.h @@ -13,8 +13,8 @@ // This definition can be adjusted/specialized for different targets and // platforms as necessary. This definition works for Linux on most targets. -struct sigset_t { +typedef struct { unsigned long __signals[__NSIGSET_WORDS]; -}; +} sigset_t; #endif // LLVM_LIBC_TYPES_SIGSET_T_H diff --git a/libc/include/llvm-libc-types/struct_sigaction.h b/libc/include/llvm-libc-types/struct_sigaction.h index 4257cfecd643a..b4d0c965a4c63 100644 --- a/libc/include/llvm-libc-types/struct_sigaction.h +++ b/libc/include/llvm-libc-types/struct_sigaction.h @@ -17,7 +17,7 @@ struct sigaction { void (*sa_handler)(int); void (*sa_sigaction)(int, siginfo_t *, void *); }; - struct sigset_t sa_mask; + sigset_t sa_mask; int sa_flags; #ifdef __linux__ // This field is present on linux for most targets. diff --git a/libc/src/__support/CMakeLists.txt b/libc/src/__support/CMakeLists.txt index 5090dc218cda4..148484052dcad 100644 --- a/libc/src/__support/CMakeLists.txt +++ b/libc/src/__support/CMakeLists.txt @@ -12,6 +12,7 @@ add_header_library( libc.src.__support.CPP.optional libc.src.__support.CPP.span libc.src.__support.CPP.type_traits + libc.src.__support.math_extras ) add_object_library( diff --git a/libc/src/__support/block.h b/libc/src/__support/block.h index 9ca3f11530c4b..50a745326eac3 100644 --- a/libc/src/__support/block.h +++ b/libc/src/__support/block.h @@ -18,46 +18,22 @@ #include "src/__support/CPP/type_traits.h" #include "src/__support/libc_assert.h" #include "src/__support/macros/config.h" +#include "src/__support/math_extras.h" #include namespace LIBC_NAMESPACE_DECL { -namespace internal { -// Types of corrupted blocks, and functions to crash with an error message -// corresponding to each type. -enum class BlockStatus { - VALID, - MISALIGNED, - PREV_MISMATCHED, - NEXT_MISMATCHED, -}; -} // namespace internal - /// Returns the value rounded down to the nearest multiple of alignment. LIBC_INLINE constexpr size_t align_down(size_t value, size_t alignment) { // Note this shouldn't overflow since the result will always be <= value. return (value / alignment) * alignment; } -/// Returns the value rounded down to the nearest multiple of alignment. -template -LIBC_INLINE constexpr T *align_down(T *value, size_t alignment) { - return reinterpret_cast( - align_down(reinterpret_cast(value), alignment)); -} - -/// Returns the value rounded up to the nearest multiple of alignment. +/// Returns the value rounded up to the nearest multiple of alignment. May wrap +/// around. LIBC_INLINE constexpr size_t align_up(size_t value, size_t alignment) { - __builtin_add_overflow(value, alignment - 1, &value); - return align_down(value, alignment); -} - -/// Returns the value rounded up to the nearest multiple of alignment. -template -LIBC_INLINE constexpr T *align_up(T *value, size_t alignment) { - return reinterpret_cast( - align_up(reinterpret_cast(value), alignment)); + return align_down(value + alignment - 1, alignment); } using ByteSpan = cpp::span; @@ -68,8 +44,8 @@ using cpp::optional; /// The blocks store their offsets to the previous and next blocks. The latter /// is also the block's size. /// -/// Blocks will always be aligned to a `ALIGNMENT` boundary. Block sizes will -/// always be rounded up to a multiple of `ALIGNMENT`. +/// All blocks have their usable space aligned to some multiple of max_align_t. +/// This also implies that block outer sizes are aligned to max_align_t. /// /// As an example, the diagram below represents two contiguous `Block`s. The /// indices indicate byte offsets: @@ -122,15 +98,13 @@ class Block { static constexpr size_t SIZE_MASK = ~(PREV_FREE_MASK | LAST_MASK); public: - static constexpr size_t ALIGNMENT = cpp::max(alignof(max_align_t), size_t{4}); - static const size_t BLOCK_OVERHEAD; - // No copy or move. Block(const Block &other) = delete; Block &operator=(const Block &other) = delete; - /// Creates the first block for a given memory region, followed by a sentinel - /// last block. Returns the first block. + /// Initializes a given memory region into a first block and a sentinel last + /// block. Returns the first block, which has its usable space aligned to + /// max_align_t. static optional init(ByteSpan region); /// @returns A pointer to a `Block`, given a pointer to the start of the @@ -142,11 +116,11 @@ class Block { /// pointer will return a non-null pointer. LIBC_INLINE static Block *from_usable_space(void *usable_space) { auto *bytes = reinterpret_cast(usable_space); - return reinterpret_cast(bytes - BLOCK_OVERHEAD); + return reinterpret_cast(bytes - sizeof(Block)); } LIBC_INLINE static const Block *from_usable_space(const void *usable_space) { const auto *bytes = reinterpret_cast(usable_space); - return reinterpret_cast(bytes - BLOCK_OVERHEAD); + return reinterpret_cast(bytes - sizeof(Block)); } /// @returns The total size of the block in bytes, including the header. @@ -154,7 +128,7 @@ class Block { LIBC_INLINE static size_t outer_size(size_t inner_size) { // The usable region includes the prev_ field of the next block. - return inner_size - sizeof(prev_) + BLOCK_OVERHEAD; + return inner_size - sizeof(prev_) + sizeof(Block); } /// @returns The number of usable bytes inside the block were it to be @@ -182,15 +156,23 @@ class Block { /// @returns The number of usable bytes inside a block with the given outer /// size if it remains free. LIBC_INLINE static size_t inner_size_free(size_t outer_size) { - return outer_size - BLOCK_OVERHEAD; + return outer_size - sizeof(Block); } /// @returns A pointer to the usable space inside this block. + /// + /// Aligned to some multiple of max_align_t. LIBC_INLINE cpp::byte *usable_space() { - return reinterpret_cast(this) + BLOCK_OVERHEAD; + auto *s = reinterpret_cast(this) + sizeof(Block); + LIBC_ASSERT(reinterpret_cast(s) % alignof(max_align_t) == 0 && + "usable space must be aligned to a multiple of max_align_t"); + return s; } LIBC_INLINE const cpp::byte *usable_space() const { - return reinterpret_cast(this) + BLOCK_OVERHEAD; + const auto *s = reinterpret_cast(this) + sizeof(Block); + LIBC_ASSERT(reinterpret_cast(s) % alignof(max_align_t) == 0 && + "usable space must be aligned to a multiple of max_align_t"); + return s; } // @returns The region of memory the block manages, including the header. @@ -201,11 +183,12 @@ class Block { /// Attempts to split this block. /// /// If successful, the block will have an inner size of at least - /// `new_inner_size`, rounded to ensure that the split point is on an - /// ALIGNMENT boundary. The remaining space will be returned as a new block. - /// Note that the prev_ field of the next block counts as part of the inner - /// size of the returnd block. - optional split(size_t new_inner_size); + /// `new_inner_size`. The remaining space will be returned as a new block, + /// with usable space aligned to `usable_space_alignment`. Note that the prev_ + /// field of the next block counts as part of the inner size of the block. + /// `usable_space_alignment` must be a multiple of max_align_t. + optional split(size_t new_inner_size, + size_t usable_space_alignment = alignof(max_align_t)); /// Merges this block with the one that comes after it. bool merge_next(); @@ -248,46 +231,57 @@ class Block { /// nullptr. LIBC_INLINE void mark_last() { next_ |= LAST_MASK; } - LIBC_INLINE constexpr Block(size_t outer_size) : next_(outer_size) { - LIBC_ASSERT(outer_size % ALIGNMENT == 0 && "block sizes must be aligned"); + LIBC_INLINE Block(size_t outer_size) : next_(outer_size) { + LIBC_ASSERT(outer_size % alignof(max_align_t) == 0 && + "block sizes must be aligned"); + LIBC_ASSERT(is_usable_space_aligned(alignof(max_align_t)) && + "usable space must be aligned to a multiple of max_align_t"); } LIBC_INLINE bool is_usable_space_aligned(size_t alignment) const { return reinterpret_cast(usable_space()) % alignment == 0; } - /// @returns The new inner size of this block that would give the usable - /// space of the next block the given alignment. - LIBC_INLINE size_t padding_for_alignment(size_t alignment) const { - if (is_usable_space_aligned(alignment)) + // Returns the minimum inner size necessary for a block of that size to + // always be able to allocate at the given size and alignment. + // + // Returns 0 if there is no such size. + LIBC_INLINE static size_t min_size_for_allocation(size_t alignment, + size_t size) { + LIBC_ASSERT(alignment >= alignof(max_align_t) && + alignment % alignof(max_align_t) == 0 && + "alignment must be multiple of max_align_t"); + + if (alignment == alignof(max_align_t)) + return size; + + // We must create a new block inside this one (splitting). This requires a + // block header in addition to the requested size. + if (add_overflow(size, sizeof(Block), size)) return 0; - // We need to ensure we can always split this block into a "padding" block - // and the aligned block. To do this, we need enough extra space for at - // least one block. - // - // |block |usable_space | - // |........|......................................| - // ^ - // Alignment requirement + // Beyond that, padding space may need to remain in this block to ensure + // that the usable space of the next block is aligned. // + // Consider a position P of some lesser alignment, L, with maximal distance + // to the next position of some greater alignment, G, where G is a multiple + // of L. P must be one L unit past a G-aligned point. If it were one L-unit + // earlier, its distance would be zero. If it were one L-unit later, its + // distance would not be maximal. If it were not some integral number of L + // units away, it would not be L-aligned. // - // |block |space |block |usable_space | - // |........|........|........|....................| - // ^ - // Alignment requirement + // So the maximum distance would be G - L. As a special case, if L is 1 + // (unaligned), the max distance is G - 1. // - alignment = cpp::max(alignment, ALIGNMENT); - uintptr_t start = reinterpret_cast(usable_space()); - uintptr_t next_usable_space = align_up(start + BLOCK_OVERHEAD, alignment); - uintptr_t next_block = next_usable_space - BLOCK_OVERHEAD; - return next_block - start + sizeof(prev_); + // This block's usable space is aligned to max_align_t >= Block. With zero + // padding, the next block's usable space is sizeof(Block) past it, which is + // a point aligned to Block. Thus the max padding needed is alignment - + // alignof(Block). + if (add_overflow(size, alignment - alignof(Block), size)) + return 0; + return size; } - // Check that we can `allocate` a block with a given alignment and size from - // this existing block. - bool can_allocate(size_t alignment, size_t size) const; - // This is the return type for `allocate` which can split one block into up to // three blocks. struct BlockInfo { @@ -309,21 +303,31 @@ class Block { Block *next; }; - // Divide a block into up to 3 blocks according to `BlockInfo`. This should - // only be called if `can_allocate` returns true. + // Divide a block into up to 3 blocks according to `BlockInfo`. Behavior is + // undefined if allocation is not possible for the given size and alignment. static BlockInfo allocate(Block *block, size_t alignment, size_t size); + // These two functions may wrap around. + LIBC_INLINE static uintptr_t next_possible_block_start( + uintptr_t ptr, size_t usable_space_alignment = alignof(max_align_t)) { + return align_up(ptr + sizeof(Block), usable_space_alignment) - + sizeof(Block); + } + LIBC_INLINE static uintptr_t prev_possible_block_start( + uintptr_t ptr, size_t usable_space_alignment = alignof(max_align_t)) { + return align_down(ptr, usable_space_alignment) - sizeof(Block); + } + private: /// Construct a block to represent a span of bytes. Overwrites only enough /// memory for the block header; the rest of the span is left alone. LIBC_INLINE static Block *as_block(ByteSpan bytes) { + LIBC_ASSERT(reinterpret_cast(bytes.data()) % alignof(Block) == + 0 && + "block start must be suitably aligned"); return ::new (bytes.data()) Block(bytes.size()); } - /// Like `split`, but assumes the caller has already checked to parameters to - /// ensure the split will succeed. - Block *split_impl(size_t new_inner_size); - /// Offset from this block to the previous block. 0 if this is the first /// block. This field is only alive when the previous block is free; /// otherwise, its memory is reused as part of the previous block's usable @@ -343,81 +347,58 @@ class Block { /// previous block is free. /// * If the `last` flag is set, the block is the sentinel last block. It is /// summarily considered used and has no next block. -} __attribute__((packed, aligned(cpp::max(alignof(max_align_t), size_t{4})))); - -inline constexpr size_t Block::BLOCK_OVERHEAD = - align_up(sizeof(Block), ALIGNMENT); - -LIBC_INLINE ByteSpan get_aligned_subspan(ByteSpan bytes, size_t alignment) { - if (bytes.data() == nullptr) - return ByteSpan(); - - auto unaligned_start = reinterpret_cast(bytes.data()); - auto aligned_start = align_up(unaligned_start, alignment); - auto unaligned_end = unaligned_start + bytes.size(); - auto aligned_end = align_down(unaligned_end, alignment); - if (aligned_end <= aligned_start) - return ByteSpan(); +public: + /// Only for testing. + static constexpr size_t PREV_FIELD_SIZE = sizeof(prev_); +}; - return bytes.subspan(aligned_start - unaligned_start, - aligned_end - aligned_start); -} +static_assert(alignof(max_align_t) >= 4, + "at least 2 bits must be available in block sizes for flags"); LIBC_INLINE optional Block::init(ByteSpan region) { - optional result = get_aligned_subspan(region, ALIGNMENT); - if (!result) + if (!region.data()) + return {}; + + uintptr_t start = reinterpret_cast(region.data()); + uintptr_t end = start + region.size(); + if (end < start) return {}; - region = result.value(); - // Two blocks are allocated: a free block and a sentinel last block. - if (region.size() < 2 * BLOCK_OVERHEAD) + uintptr_t block_start = next_possible_block_start(start); + if (block_start < start) return {}; - if (cpp::numeric_limits::max() < region.size()) + uintptr_t last_start = prev_possible_block_start(end); + if (last_start >= end) return {}; - Block *block = as_block(region.first(region.size() - BLOCK_OVERHEAD)); - Block *last = as_block(region.last(BLOCK_OVERHEAD)); + if (block_start + sizeof(Block) > last_start) + return {}; + + auto *last_start_ptr = reinterpret_cast(last_start); + Block *block = + as_block({reinterpret_cast(block_start), last_start_ptr}); + Block *last = as_block({last_start_ptr, sizeof(Block)}); block->mark_free(); last->mark_last(); return block; } -LIBC_INLINE -bool Block::can_allocate(size_t alignment, size_t size) const { - if (inner_size() < size) - return false; - if (is_usable_space_aligned(alignment)) - return true; - - // Alignment isn't met, so a padding block is needed. Determine amount of - // inner_size() consumed by the padding block. - size_t padding_size = padding_for_alignment(alignment) - sizeof(prev_); - - // Check that there is room for the allocation in the following aligned block. - size_t aligned_inner_size = inner_size() - padding_size - BLOCK_OVERHEAD; - return size <= aligned_inner_size; -} - LIBC_INLINE Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) { - LIBC_ASSERT( - block->can_allocate(alignment, size) && - "Calls to this function for a given alignment and size should only be " - "done if `can_allocate` for these parameters returns true."); + LIBC_ASSERT(alignment % alignof(max_align_t) == 0 && + "alignment must be a multiple of max_align_t"); BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr}; if (!info.block->is_usable_space_aligned(alignment)) { Block *original = info.block; - optional maybe_aligned_block = - original->split(info.block->padding_for_alignment(alignment)); + // The padding block has no minimum size requirement. + optional maybe_aligned_block = original->split(0, alignment); LIBC_ASSERT(maybe_aligned_block.has_value() && - "This split should always result in a new block. The check in " - "`can_allocate` ensures that we have enough space here to make " - "two blocks."); + "it should always be possible to split for alignment"); if (Block *prev = original->prev_free()) { // If there is a free block before this, we can merge the current one with @@ -441,37 +422,40 @@ Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) { } LIBC_INLINE -optional Block::split(size_t new_inner_size) { +optional Block::split(size_t new_inner_size, + size_t usable_space_alignment) { + LIBC_ASSERT(usable_space_alignment % alignof(max_align_t) == 0 && + "alignment must be a multiple of max_align_t"); if (used()) return {}; - // The prev_ field of the next block is always available, so there is a - // minimum size to a block created through splitting. - if (new_inner_size < sizeof(prev_)) - new_inner_size = sizeof(prev_); - - size_t old_inner_size = inner_size(); - new_inner_size = - align_up(new_inner_size - sizeof(prev_), ALIGNMENT) + sizeof(prev_); - if (old_inner_size < new_inner_size) - return {}; - if (old_inner_size - new_inner_size < BLOCK_OVERHEAD) + // Compute the minimum outer size that produces a block of at least + // `new_inner_size`. + size_t min_outer_size = outer_size(cpp::max(new_inner_size, sizeof(prev_))); + + uintptr_t start = reinterpret_cast(this); + uintptr_t next_block_start = + next_possible_block_start(start + min_outer_size, usable_space_alignment); + if (next_block_start < start) return {}; + size_t new_outer_size = next_block_start - start; + LIBC_ASSERT(new_outer_size % alignof(max_align_t) == 0 && + "new size must be aligned to max_align_t"); - return split_impl(new_inner_size); -} + if (outer_size() < new_outer_size || + outer_size() - new_outer_size < sizeof(Block)) + return {}; -LIBC_INLINE -Block *Block::split_impl(size_t new_inner_size) { - size_t outer_size1 = outer_size(new_inner_size); - LIBC_ASSERT(outer_size1 % ALIGNMENT == 0 && "new size must be aligned"); - ByteSpan new_region = region().subspan(outer_size1); + ByteSpan new_region = region().subspan(new_outer_size); next_ &= ~SIZE_MASK; - next_ |= outer_size1; + next_ |= new_outer_size; Block *new_block = as_block(new_region); mark_free(); // Free status for this block is now stored in new_block. new_block->next()->prev_ = new_region.size(); + + LIBC_ASSERT(new_block->is_usable_space_aligned(usable_space_alignment) && + "usable space must have requested alignment"); return new_block; } diff --git a/libc/src/__support/freelist_heap.h b/libc/src/__support/freelist_heap.h index 8fa36257cb91a..d58685194aeb8 100644 --- a/libc/src/__support/freelist_heap.h +++ b/libc/src/__support/freelist_heap.h @@ -89,28 +89,14 @@ LIBC_INLINE void *FreeListHeap::allocate_impl(size_t alignment, size_t size) { if (!is_initialized) init(); - size_t request_size = size; - - // TODO: usable_space should always be aligned to max_align_t. - if (alignment > alignof(max_align_t) || - (Block::BLOCK_OVERHEAD % alignof(max_align_t) != 0)) { - // TODO: This bound isn't precisely calculated yet. It assumes one extra - // Block::ALIGNMENT to accomodate the possibility for padding block - // overhead. (alignment - 1) ensures that there is an aligned point - // somewhere in usable_space, but this isn't tight either, since - // usable_space is also already somewhat aligned. - if (add_overflow(size, (alignment - 1) + Block::ALIGNMENT, request_size)) - return nullptr; - } + size_t request_size = Block::min_size_for_allocation(alignment, size); + if (!request_size) + return nullptr; Block *block = free_store.remove_best_fit(request_size); if (!block) return nullptr; - LIBC_ASSERT(block->can_allocate(alignment, size) && - "block should always be large enough to allocate at the correct " - "alignment"); - auto block_info = Block::allocate(block, alignment, size); if (block_info.next) free_store.insert(block_info.next); @@ -135,6 +121,9 @@ LIBC_INLINE void *FreeListHeap::aligned_allocate(size_t alignment, if (size % alignment != 0) return nullptr; + // The minimum alignment supported by Block is max_align_t. + alignment = cpp::max(alignment, alignof(max_align_t)); + return allocate_impl(alignment, size); } diff --git a/libc/src/__support/freestore.h b/libc/src/__support/freestore.h index 97197dda4b546..09f2479debb36 100644 --- a/libc/src/__support/freestore.h +++ b/libc/src/__support/freestore.h @@ -40,13 +40,12 @@ class FreeStore { Block *remove_best_fit(size_t size); private: - static constexpr size_t ALIGNMENT = alignof(max_align_t); static constexpr size_t MIN_OUTER_SIZE = - align_up(Block::BLOCK_OVERHEAD + sizeof(FreeList::Node), ALIGNMENT); + align_up(sizeof(Block) + sizeof(FreeList::Node), alignof(max_align_t)); static constexpr size_t MIN_LARGE_OUTER_SIZE = - align_up(Block::BLOCK_OVERHEAD + sizeof(FreeTrie::Node), ALIGNMENT); + align_up(sizeof(Block) + sizeof(FreeTrie::Node), alignof(max_align_t)); static constexpr size_t NUM_SMALL_SIZES = - (MIN_LARGE_OUTER_SIZE - MIN_OUTER_SIZE) / ALIGNMENT; + (MIN_LARGE_OUTER_SIZE - MIN_OUTER_SIZE) / alignof(max_align_t); LIBC_INLINE static bool too_small(Block *block) { return block->outer_size() < MIN_OUTER_SIZE; @@ -99,7 +98,8 @@ LIBC_INLINE Block *FreeStore::remove_best_fit(size_t size) { LIBC_INLINE FreeList &FreeStore::small_list(Block *block) { LIBC_ASSERT(is_small(block) && "only legal for small blocks"); - return small_lists[(block->outer_size() - MIN_OUTER_SIZE) / ALIGNMENT]; + return small_lists[(block->outer_size() - MIN_OUTER_SIZE) / + alignof(max_align_t)]; } LIBC_INLINE FreeList *FreeStore::find_best_small_fit(size_t size) { diff --git a/libc/test/src/__support/block_test.cpp b/libc/test/src/__support/block_test.cpp index 5e437db51b609..c2d9833fb9439 100644 --- a/libc/test/src/__support/block_test.cpp +++ b/libc/test/src/__support/block_test.cpp @@ -22,23 +22,28 @@ using LIBC_NAMESPACE::cpp::span; TEST(LlvmLibcBlockTest, CanCreateSingleAlignedBlock) { constexpr size_t kN = 1024; - alignas(Block::ALIGNMENT) array bytes; + alignas(max_align_t) array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; + EXPECT_EQ(reinterpret_cast(block) % alignof(Block), size_t{0}); + EXPECT_TRUE(block->is_usable_space_aligned(alignof(max_align_t))); + Block *last = block->next(); ASSERT_NE(last, static_cast(nullptr)); - constexpr size_t last_outer_size = Block::BLOCK_OVERHEAD; - EXPECT_EQ(last->outer_size(), last_outer_size); + EXPECT_EQ(reinterpret_cast(last) % alignof(Block), size_t{0}); + + EXPECT_EQ(last->outer_size(), sizeof(Block)); EXPECT_EQ(last->prev_free(), block); EXPECT_TRUE(last->used()); - EXPECT_EQ(block->outer_size(), kN - last_outer_size); - constexpr size_t last_prev_field_size = sizeof(size_t); - EXPECT_EQ(block->inner_size(), kN - last_outer_size - Block::BLOCK_OVERHEAD + - last_prev_field_size); + size_t block_outer_size = + reinterpret_cast(last) - reinterpret_cast(block); + EXPECT_EQ(block->outer_size(), block_outer_size); + EXPECT_EQ(block->inner_size(), + block_outer_size - sizeof(Block) + Block::PREV_FIELD_SIZE); EXPECT_EQ(block->prev_free(), static_cast(nullptr)); EXPECT_FALSE(block->used()); } @@ -47,11 +52,19 @@ TEST(LlvmLibcBlockTest, CanCreateUnalignedSingleBlock) { constexpr size_t kN = 1024; // Force alignment, so we can un-force it below - alignas(Block::ALIGNMENT) array bytes; + alignas(max_align_t) array bytes; span aligned(bytes); auto result = Block::init(aligned.subspan(1)); EXPECT_TRUE(result.has_value()); + + Block *block = *result; + EXPECT_EQ(reinterpret_cast(block) % alignof(Block), size_t{0}); + EXPECT_TRUE(block->is_usable_space_aligned(alignof(max_align_t))); + + Block *last = block->next(); + ASSERT_NE(last, static_cast(nullptr)); + EXPECT_EQ(reinterpret_cast(last) % alignof(Block), size_t{0}); } TEST(LlvmLibcBlockTest, CannotCreateTooSmallBlock) { @@ -62,11 +75,10 @@ TEST(LlvmLibcBlockTest, CannotCreateTooSmallBlock) { TEST(LlvmLibcBlockTest, CanSplitBlock) { constexpr size_t kN = 1024; - constexpr size_t prev_field_size = sizeof(size_t); // Give the split position a large alignment. - constexpr size_t kSplitN = 512 + prev_field_size; + constexpr size_t kSplitN = 512 + Block::PREV_FIELD_SIZE; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); auto *block1 = *result; @@ -78,10 +90,12 @@ TEST(LlvmLibcBlockTest, CanSplitBlock) { EXPECT_EQ(block1->inner_size(), kSplitN); EXPECT_EQ(block1->outer_size(), - kSplitN - prev_field_size + Block::BLOCK_OVERHEAD); + kSplitN - Block::PREV_FIELD_SIZE + sizeof(Block)); EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size()); EXPECT_FALSE(block2->used()); + EXPECT_EQ(reinterpret_cast(block2) % alignof(Block), size_t{0}); + EXPECT_TRUE(block2->is_usable_space_aligned(alignof(max_align_t))); EXPECT_EQ(block1->next(), block2); EXPECT_EQ(block2->prev_free(), block1); @@ -90,28 +104,24 @@ TEST(LlvmLibcBlockTest, CanSplitBlock) { TEST(LlvmLibcBlockTest, CanSplitBlockUnaligned) { constexpr size_t kN = 1024; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block1 = *result; size_t orig_size = block1->outer_size(); constexpr size_t kSplitN = 513; - constexpr size_t prev_field_size = sizeof(size_t); - uintptr_t split_addr = - reinterpret_cast(block1) + (kSplitN - prev_field_size); - // Round split_addr up to a multiple of the alignment. - split_addr += alignof(Block) - (split_addr % alignof(Block)); - uintptr_t split_len = split_addr - (uintptr_t)&bytes + prev_field_size; result = block1->split(kSplitN); ASSERT_TRUE(result.has_value()); Block *block2 = *result; - EXPECT_EQ(block1->inner_size(), split_len); + EXPECT_GE(block1->inner_size(), kSplitN); EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size()); EXPECT_FALSE(block2->used()); + EXPECT_EQ(reinterpret_cast(block2) % alignof(Block), size_t{0}); + EXPECT_TRUE(block2->is_usable_space_aligned(alignof(max_align_t))); EXPECT_EQ(block1->next(), block2); EXPECT_EQ(block2->prev_free(), block1); @@ -131,7 +141,7 @@ TEST(LlvmLibcBlockTest, CanSplitMidBlock) { constexpr size_t kSplit1 = 512; constexpr size_t kSplit2 = 256; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block1 = *result; @@ -152,27 +162,25 @@ TEST(LlvmLibcBlockTest, CanSplitMidBlock) { TEST(LlvmLibcBlockTest, CannotSplitTooSmallBlock) { constexpr size_t kN = 64; - constexpr size_t kSplitN = kN + 1; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; - result = block->split(kSplitN); + result = block->split(block->inner_size() + 1); ASSERT_FALSE(result.has_value()); } TEST(LlvmLibcBlockTest, CannotSplitBlockWithoutHeaderSpace) { constexpr size_t kN = 1024; - constexpr size_t kSplitN = kN - 2 * Block::BLOCK_OVERHEAD - 1; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; - result = block->split(kSplitN); + result = block->split(block->inner_size() - sizeof(Block) + 1); ASSERT_FALSE(result.has_value()); } @@ -180,7 +188,7 @@ TEST(LlvmLibcBlockTest, CannotMakeBlockLargerInSplit) { // Ensure that we can't ask for more space than the block actually has... constexpr size_t kN = 1024; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; @@ -189,55 +197,41 @@ TEST(LlvmLibcBlockTest, CannotMakeBlockLargerInSplit) { ASSERT_FALSE(result.has_value()); } -TEST(LlvmLibcBlockTest, CannotMakeSecondBlockLargerInSplit) { - // Ensure that the second block in split is at least of the size of header. - constexpr size_t kN = 1024; - - alignas(Block::ALIGNMENT) array bytes; - auto result = Block::init(bytes); - ASSERT_TRUE(result.has_value()); - Block *block = *result; - - result = block->split(block->inner_size() - Block::BLOCK_OVERHEAD + 1); - ASSERT_FALSE(result.has_value()); -} - TEST(LlvmLibcBlockTest, CanMakeMinimalSizeFirstBlock) { // This block does support splitting with minimal payload size. constexpr size_t kN = 1024; - constexpr size_t minimal_size = sizeof(size_t); - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; - result = block->split(minimal_size); + result = block->split(0); ASSERT_TRUE(result.has_value()); - EXPECT_EQ(block->inner_size(), minimal_size); + EXPECT_LE(block->outer_size(), sizeof(Block) + alignof(max_align_t)); } TEST(LlvmLibcBlockTest, CanMakeMinimalSizeSecondBlock) { // Likewise, the split block can be minimal-width. constexpr size_t kN = 1024; - constexpr size_t minimal_size = sizeof(size_t); - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block1 = *result; - result = block1->split(block1->inner_size() - Block::BLOCK_OVERHEAD); + result = block1->split(Block::prev_possible_block_start( + reinterpret_cast(block1->next())) - + reinterpret_cast(block1->usable_space()) + + Block::PREV_FIELD_SIZE); ASSERT_TRUE(result.has_value()); - Block *block2 = *result; - - EXPECT_EQ(block2->inner_size(), minimal_size); + EXPECT_LE((*result)->outer_size(), sizeof(Block) + alignof(max_align_t)); } TEST(LlvmLibcBlockTest, CanMarkBlockUsed) { constexpr size_t kN = 1024; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; @@ -255,7 +249,7 @@ TEST(LlvmLibcBlockTest, CannotSplitUsedBlock) { constexpr size_t kN = 1024; constexpr size_t kSplitN = 512; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; @@ -269,21 +263,19 @@ TEST(LlvmLibcBlockTest, CanMergeWithNextBlock) { // Do the three way merge from "CanSplitMidBlock", and let's // merge block 3 and 2 constexpr size_t kN = 1024; - // Give the split positions large alignments. - constexpr size_t prev_field_size = sizeof(size_t); - constexpr size_t kSplit1 = 512 + prev_field_size; - constexpr size_t kSplit2 = 256 + prev_field_size; - - alignas(Block::ALIGNMENT) array bytes; + constexpr size_t kSplit1 = 512; + constexpr size_t kSplit2 = 256; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block1 = *result; - size_t orig_size = block1->outer_size(); + size_t total_size = block1->outer_size(); result = block1->split(kSplit1); ASSERT_TRUE(result.has_value()); result = block1->split(kSplit2); + size_t block1_size = block1->outer_size(); ASSERT_TRUE(result.has_value()); Block *block3 = *result; @@ -291,15 +283,15 @@ TEST(LlvmLibcBlockTest, CanMergeWithNextBlock) { EXPECT_EQ(block1->next(), block3); EXPECT_EQ(block3->prev_free(), block1); - EXPECT_EQ(block1->inner_size(), kSplit2); - EXPECT_EQ(block3->outer_size(), orig_size - block1->outer_size()); + EXPECT_EQ(block1->outer_size(), block1_size); + EXPECT_EQ(block3->outer_size(), total_size - block1->outer_size()); } TEST(LlvmLibcBlockTest, CannotMergeWithFirstOrLastBlock) { constexpr size_t kN = 1024; constexpr size_t kSplitN = 512; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block1 = *result; @@ -316,7 +308,7 @@ TEST(LlvmLibcBlockTest, CannotMergeUsedBlock) { constexpr size_t kN = 1024; constexpr size_t kSplitN = 512; - alignas(Block::ALIGNMENT) array bytes; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; @@ -330,9 +322,7 @@ TEST(LlvmLibcBlockTest, CannotMergeUsedBlock) { } TEST(LlvmLibcBlockTest, CanGetBlockFromUsableSpace) { - constexpr size_t kN = 1024; - - array bytes{}; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block1 = *result; @@ -355,93 +345,85 @@ TEST(LlvmLibcBlockTest, CanGetConstBlockFromUsableSpace) { EXPECT_EQ(block1, block2); } -TEST(LlvmLibcBlockTest, CanAllocate) { - constexpr size_t kN = 1024 + Block::BLOCK_OVERHEAD; +TEST(LlvmLibcBlockTest, Allocate) { + constexpr size_t kN = 1024; // Ensure we can allocate everything up to the block size within this block. - for (size_t i = 0; i < kN - 2 * Block::BLOCK_OVERHEAD; ++i) { - alignas(Block::ALIGNMENT) array bytes{}; + for (size_t i = 0; i < kN; ++i) { + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; - constexpr size_t ALIGN = 1; // Effectively ignores alignment. - EXPECT_TRUE(block->can_allocate(ALIGN, i)); + if (i > block->inner_size()) + continue; - // For each can_allocate, we should be able to do a successful call to - // allocate. - auto info = Block::allocate(block, ALIGN, i); + auto info = Block::allocate(block, alignof(max_align_t), i); EXPECT_NE(info.block, static_cast(nullptr)); } - alignas(Block::ALIGNMENT) array bytes{}; - auto result = Block::init(bytes); - ASSERT_TRUE(result.has_value()); - Block *block = *result; + // Ensure we can allocate a byte at every guaranteeable alignment. + for (size_t i = 1; i < kN / alignof(max_align_t); ++i) { + array bytes; + auto result = Block::init(bytes); + ASSERT_TRUE(result.has_value()); + Block *block = *result; - // Given a block of size N (assuming it's also a power of two), we should be - // able to allocate a block within it that's aligned to N/2. This is - // because regardless of where the buffer is located, we can always find a - // starting location within it that meets this alignment. - EXPECT_TRUE(block->can_allocate(block->outer_size() / 2, 1)); - auto info = Block::allocate(block, block->outer_size() / 2, 1); - EXPECT_NE(info.block, static_cast(nullptr)); + size_t alignment = i * alignof(max_align_t); + if (Block::min_size_for_allocation(alignment, 1) > block->inner_size()) + continue; + + auto info = Block::allocate(block, alignment, 1); + EXPECT_NE(info.block, static_cast(nullptr)); + } } TEST(LlvmLibcBlockTest, AllocateAlreadyAligned) { constexpr size_t kN = 1024; - alignas(Block::ALIGNMENT) array bytes{}; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; + uintptr_t orig_end = reinterpret_cast(block) + block->outer_size(); - // This should result in no new blocks. - constexpr size_t kAlignment = Block::ALIGNMENT; - constexpr size_t prev_field_size = sizeof(size_t); - constexpr size_t kExpectedSize = Block::ALIGNMENT + prev_field_size; - EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize)); + constexpr size_t SIZE = Block::PREV_FIELD_SIZE + 1; auto [aligned_block, prev, next] = - Block::allocate(block, Block::ALIGNMENT, kExpectedSize); + Block::allocate(block, alignof(max_align_t), SIZE); // Since this is already aligned, there should be no previous block. EXPECT_EQ(prev, static_cast(nullptr)); - // Ensure we the block is aligned and the size we expect. + // Ensure we the block is aligned and large enough. EXPECT_NE(aligned_block, static_cast(nullptr)); - EXPECT_TRUE(aligned_block->is_usable_space_aligned(Block::ALIGNMENT)); - EXPECT_EQ(aligned_block->inner_size(), kExpectedSize); + EXPECT_TRUE(aligned_block->is_usable_space_aligned(alignof(max_align_t))); + EXPECT_GE(aligned_block->inner_size(), SIZE); // Check the next block. EXPECT_NE(next, static_cast(nullptr)); EXPECT_EQ(aligned_block->next(), next); - EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), - bytes.data() + bytes.size() - Block::BLOCK_OVERHEAD); + EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), orig_end); } TEST(LlvmLibcBlockTest, AllocateNeedsAlignment) { constexpr size_t kN = 1024; - alignas(kN) array bytes{}; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; - // Ensure first the usable_data is only aligned to the block alignment. - ASSERT_EQ(block->usable_space(), bytes.data() + Block::BLOCK_OVERHEAD); - ASSERT_EQ(block->prev_free(), static_cast(nullptr)); + uintptr_t orig_end = reinterpret_cast(block) + block->outer_size(); // Now pick an alignment such that the usable space is not already aligned to // it. We want to explicitly test that the block will split into one before // it. - constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8; - ASSERT_FALSE(block->is_usable_space_aligned(kAlignment)); - - constexpr size_t kSize = 10; - EXPECT_TRUE(block->can_allocate(kAlignment, kSize)); + size_t alignment = alignof(max_align_t); + while (block->is_usable_space_aligned(alignment)) + alignment += alignof(max_align_t); - auto [aligned_block, prev, next] = Block::allocate(block, kAlignment, kSize); + auto [aligned_block, prev, next] = Block::allocate(block, alignment, 10); // Check the previous block was created appropriately. Since this block is the // first block, a new one should be made before this. @@ -453,19 +435,18 @@ TEST(LlvmLibcBlockTest, AllocateNeedsAlignment) { // Ensure we the block is aligned and the size we expect. EXPECT_NE(next, static_cast(nullptr)); - EXPECT_TRUE(aligned_block->is_usable_space_aligned(kAlignment)); + EXPECT_TRUE(aligned_block->is_usable_space_aligned(alignment)); // Check the next block. EXPECT_NE(next, static_cast(nullptr)); EXPECT_EQ(aligned_block->next(), next); - EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), - bytes.data() + bytes.size() - Block::BLOCK_OVERHEAD); + EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), orig_end); } TEST(LlvmLibcBlockTest, PreviousBlockMergedIfNotFirst) { constexpr size_t kN = 1024; - alignas(kN) array bytes{}; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; @@ -480,15 +461,12 @@ TEST(LlvmLibcBlockTest, PreviousBlockMergedIfNotFirst) { // Now pick an alignment such that the usable space is not already aligned to // it. We want to explicitly test that the block will split into one before // it. - constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8; - ASSERT_FALSE(newblock->is_usable_space_aligned(kAlignment)); + size_t alignment = alignof(max_align_t); + while (newblock->is_usable_space_aligned(alignment)) + alignment += alignof(max_align_t); // Ensure we can allocate in the new block. - constexpr size_t kSize = Block::ALIGNMENT; - EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize)); - - auto [aligned_block, prev, next] = - Block::allocate(newblock, kAlignment, kSize); + auto [aligned_block, prev, next] = Block::allocate(newblock, alignment, 1); // Now there should be no new previous block. Instead, the padding we did // create should be merged into the original previous block. @@ -505,26 +483,26 @@ TEST(LlvmLibcBlockTest, CanRemergeBlockAllocations) { // This is the same setup as with the `AllocateNeedsAlignment` test case. constexpr size_t kN = 1024; - alignas(kN) array bytes{}; + array bytes; auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); Block *block = *result; + + Block *orig_block = block; + size_t orig_size = orig_block->outer_size(); + Block *last = block->next(); - // Ensure first the usable_data is only aligned to the block alignment. - ASSERT_EQ(block->usable_space(), bytes.data() + Block::BLOCK_OVERHEAD); ASSERT_EQ(block->prev_free(), static_cast(nullptr)); // Now pick an alignment such that the usable space is not already aligned to // it. We want to explicitly test that the block will split into one before // it. - constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8; - ASSERT_FALSE(block->is_usable_space_aligned(kAlignment)); - - constexpr size_t kSize = Block::ALIGNMENT; - EXPECT_TRUE(block->can_allocate(kAlignment, kSize)); + size_t alignment = alignof(max_align_t); + while (block->is_usable_space_aligned(alignment)) + alignment += alignof(max_align_t); - auto [aligned_block, prev, next] = Block::allocate(block, kAlignment, kSize); + auto [aligned_block, prev, next] = Block::allocate(block, alignment, 1); // Check we have the appropriate blocks. ASSERT_NE(prev, static_cast(nullptr)); @@ -540,8 +518,6 @@ TEST(LlvmLibcBlockTest, CanRemergeBlockAllocations) { EXPECT_EQ(prev->next(), last); // We should have the original buffer. - EXPECT_EQ(reinterpret_cast(prev), &*bytes.begin()); - EXPECT_EQ(prev->outer_size(), bytes.size() - Block::BLOCK_OVERHEAD); - EXPECT_EQ(reinterpret_cast(prev) + prev->outer_size(), - &*bytes.end() - Block::BLOCK_OVERHEAD); + EXPECT_EQ(prev, orig_block); + EXPECT_EQ(prev->outer_size(), orig_size); } diff --git a/libc/test/src/__support/freelist_heap_test.cpp b/libc/test/src/__support/freelist_heap_test.cpp index 991c158825a88..0623272dd5b9f 100644 --- a/libc/test/src/__support/freelist_heap_test.cpp +++ b/libc/test/src/__support/freelist_heap_test.cpp @@ -42,7 +42,7 @@ using LIBC_NAMESPACE::cpp::span; void RunTest(FreeListHeap &allocator, [[maybe_unused]] size_t N); \ }; \ TEST_F(LlvmLibcFreeListHeapTest##TestCase, TestCase) { \ - alignas(Block) byte buf[BufferSize] = {byte(0)}; \ + byte buf[BufferSize] = {byte(0)}; \ FreeListHeap allocator(buf); \ RunTest(allocator, BufferSize); \ RunTest(*freelist_heap, freelist_heap->region().size()); \ @@ -95,30 +95,31 @@ TEST_FOR_EACH_ALLOCATOR(ReturnsNullWhenAllocationTooLarge, 2048) { // is used for other test cases and we don't explicitly free them. TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) { constexpr size_t N = 2048; - alignas(Block) byte buf[N] = {byte(0)}; + byte buf[N]; FreeListHeap allocator(buf); - // Use aligned_allocate so we don't need to worry about ensuring the `buf` - // being aligned to max_align_t. - EXPECT_NE(allocator.aligned_allocate(1, N - 2 * Block::BLOCK_OVERHEAD), - static_cast(nullptr)); + bool went_null = false; + for (size_t i = 0; i < N; i++) { + if (!allocator.allocate(1)) { + went_null = true; + break; + } + } + EXPECT_TRUE(went_null); EXPECT_EQ(allocator.allocate(1), static_cast(nullptr)); } TEST_FOR_EACH_ALLOCATOR(ReturnedPointersAreAligned, 2048) { void *ptr1 = allocator.allocate(1); - // Should be aligned to native pointer alignment uintptr_t ptr1_start = reinterpret_cast(ptr1); - size_t alignment = alignof(void *); - - EXPECT_EQ(ptr1_start % alignment, static_cast(0)); + EXPECT_EQ(ptr1_start % alignof(max_align_t), static_cast(0)); void *ptr2 = allocator.allocate(1); uintptr_t ptr2_start = reinterpret_cast(ptr2); - EXPECT_EQ(ptr2_start % alignment, static_cast(0)); + EXPECT_EQ(ptr2_start % alignof(max_align_t), static_cast(0)); } TEST_FOR_EACH_ALLOCATOR(CanRealloc, 2048) { @@ -241,16 +242,14 @@ TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) { // This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to // explicitly ensure that the buffer can still return aligned allocations even -// if the underlying buffer is at most aligned to the Block alignment. This -// is so we can check that we can still get aligned allocations even if the -// underlying buffer is not aligned to the alignments we request. -TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockAligned) { - constexpr size_t BUFFER_SIZE = 4096; - constexpr size_t BUFFER_ALIGNMENT = alignof(Block) * 2; - alignas(BUFFER_ALIGNMENT) byte buf[BUFFER_SIZE] = {byte(0)}; - - // Ensure the underlying buffer is at most aligned to the block type. - FreeListHeap allocator(span(buf).subspan(alignof(Block))); +// if the underlying buffer is unaligned. This is so we can check that we can +// still get aligned allocations even if the underlying buffer is not aligned to +// the alignments we request. +TEST(LlvmLibcFreeListHeap, AlignedAllocUnalignedBuffer) { + byte buf[4096] = {byte(0)}; + + // Ensure the underlying buffer is poorly aligned. + FreeListHeap allocator(span(buf).subspan(1)); constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256}; constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5}; diff --git a/libc/test/src/__support/freestore_test.cpp b/libc/test/src/__support/freestore_test.cpp index 7960d32c8bbf0..a32badb39b1e6 100644 --- a/libc/test/src/__support/freestore_test.cpp +++ b/libc/test/src/__support/freestore_test.cpp @@ -24,7 +24,7 @@ TEST(LlvmLibcFreeStore, TooSmall) { optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); Block *too_small = *maybeBlock; - maybeBlock = too_small->split(sizeof(size_t)); + maybeBlock = too_small->split(Block::PREV_FIELD_SIZE); ASSERT_TRUE(maybeBlock.has_value()); Block *remainder = *maybeBlock; @@ -43,12 +43,12 @@ TEST(LlvmLibcFreeStore, RemoveBestFit) { ASSERT_TRUE(maybeBlock.has_value()); Block *smallest = *maybeBlock; - maybeBlock = smallest->split(sizeof(FreeList::Node) + sizeof(size_t)); + maybeBlock = smallest->split(sizeof(FreeList::Node) + Block::PREV_FIELD_SIZE); ASSERT_TRUE(maybeBlock.has_value()); Block *largest_small = *maybeBlock; - maybeBlock = largest_small->split(sizeof(FreeTrie::Node) + sizeof(size_t) - - alignof(max_align_t)); + maybeBlock = largest_small->split( + sizeof(FreeTrie::Node) + Block::PREV_FIELD_SIZE - alignof(max_align_t)); ASSERT_TRUE(maybeBlock.has_value()); if (largest_small->inner_size() == smallest->inner_size()) largest_small = smallest; @@ -86,7 +86,7 @@ TEST(LlvmLibcFreeStore, Remove) { ASSERT_TRUE(maybeBlock.has_value()); Block *small = *maybeBlock; - maybeBlock = small->split(sizeof(FreeList::Node) + sizeof(size_t)); + maybeBlock = small->split(sizeof(FreeList::Node) + Block::PREV_FIELD_SIZE); ASSERT_TRUE(maybeBlock.has_value()); Block *remainder = *maybeBlock; diff --git a/libc/test/src/unistd/getopt_test.cpp b/libc/test/src/unistd/getopt_test.cpp index 8217f7bb6e731..1a31094e98fc8 100644 --- a/libc/test/src/unistd/getopt_test.cpp +++ b/libc/test/src/unistd/getopt_test.cpp @@ -79,7 +79,7 @@ struct LlvmLibcGetoptTest : public LIBC_NAMESPACE::testing::Test { // This is safe because getopt doesn't currently permute argv like GNU's getopt // does so this just helps silence warnings. -char *operator"" _c(const char *c, size_t) { return const_cast(c); } +char *operator""_c(const char *c, size_t) { return const_cast(c); } TEST_F(LlvmLibcGetoptTest, NoMatch) { array argv{"prog"_c, "arg1"_c, nullptr}; diff --git a/libc/utils/hdrgen/function.py b/libc/utils/hdrgen/function.py index d97df7f8a50ec..8ae47e574785f 100644 --- a/libc/utils/hdrgen/function.py +++ b/libc/utils/hdrgen/function.py @@ -22,7 +22,7 @@ def __init__( def __str__(self): attributes_str = " ".join(self.attributes) - arguments_str = ", ".join(self.arguments) + arguments_str = ", ".join(self.arguments) if self.arguments else "void" if attributes_str == "": result = f"{self.return_type} {self.name}({arguments_str})" else: diff --git a/libc/utils/hdrgen/tests/expected_output/test_header.h b/libc/utils/hdrgen/tests/expected_output/test_header.h index a777976134b04..d730078fba064 100644 --- a/libc/utils/hdrgen/tests/expected_output/test_header.h +++ b/libc/utils/hdrgen/tests/expected_output/test_header.h @@ -28,10 +28,10 @@ enum { __BEGIN_C_DECLS -CONST_FUNC_A void func_a() __NOEXCEPT; +CONST_FUNC_A void func_a(void) __NOEXCEPT; #ifdef LIBC_TYPES_HAS_FLOAT128 -float128 func_b() __NOEXCEPT; +float128 func_b(void) __NOEXCEPT; #endif // LIBC_TYPES_HAS_FLOAT128 #ifdef LIBC_TYPES_HAS_FLOAT16 diff --git a/libclc/clc/include/clc/clcmacro.h b/libclc/clc/include/clc/clcmacro.h index c6583749eca66..3c3a69f4f848b 100644 --- a/libclc/clc/include/clc/clcmacro.h +++ b/libclc/clc/include/clc/clcmacro.h @@ -102,29 +102,6 @@ FUNCTION(x.hi, y.hi, z.hi)); \ } -#define _CLC_V_S_S_V_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ - ARG2_TYPE, ARG3_TYPE) \ - DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE x, ARG2_TYPE y, ARG3_TYPE##2 z) { \ - return (RET_TYPE##2)(FUNCTION(x, y, z.lo), FUNCTION(x, y, z.hi)); \ - } \ - \ - DECLSPEC RET_TYPE##3 FUNCTION(ARG1_TYPE x, ARG2_TYPE y, ARG3_TYPE##3 z) { \ - return (RET_TYPE##3)(FUNCTION(x, y, z.x), FUNCTION(x, y, z.y), \ - FUNCTION(x, y, z.z)); \ - } \ - \ - DECLSPEC RET_TYPE##4 FUNCTION(ARG1_TYPE x, ARG2_TYPE y, ARG3_TYPE##4 z) { \ - return (RET_TYPE##4)(FUNCTION(x, y, z.lo), FUNCTION(x, y, z.hi)); \ - } \ - \ - DECLSPEC RET_TYPE##8 FUNCTION(ARG1_TYPE x, ARG2_TYPE y, ARG3_TYPE##8 z) { \ - return (RET_TYPE##8)(FUNCTION(x, y, z.lo), FUNCTION(x, y, z.hi)); \ - } \ - \ - DECLSPEC RET_TYPE##16 FUNCTION(ARG1_TYPE x, ARG2_TYPE y, ARG3_TYPE##16 z) { \ - return (RET_TYPE##16)(FUNCTION(x, y, z.lo), FUNCTION(x, y, z.hi)); \ - } - #define _CLC_V_V_VP_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ ADDR_SPACE, ARG2_TYPE) \ DECLSPEC __CLC_XCONCAT(RET_TYPE, 2) \ diff --git a/libclc/clc/include/clc/common/clc_smoothstep.h b/libclc/clc/include/clc/common/clc_smoothstep.h new file mode 100644 index 0000000000000..fa212245e0794 --- /dev/null +++ b/libclc/clc/include/clc/common/clc_smoothstep.h @@ -0,0 +1,11 @@ +#ifndef __CLC_COMMON_CLC_SMOOTHSTEP_H__ +#define __CLC_COMMON_CLC_SMOOTHSTEP_H__ + +// note: Unlike OpenCL __clc_smoothstep is only defined for three matching +// argument types. + +#define __CLC_BODY +#include +#undef __CLC_BODY + +#endif // __CLC_COMMON_CLC_SMOOTHSTEP_H__ diff --git a/libclc/clc/include/clc/common/clc_smoothstep.inc b/libclc/clc/include/clc/common/clc_smoothstep.inc new file mode 100644 index 0000000000000..3ce33c5573f6c --- /dev/null +++ b/libclc/clc/include/clc/common/clc_smoothstep.inc @@ -0,0 +1,3 @@ +_CLC_OVERLOAD _CLC_DECL __CLC_GENTYPE __clc_smoothstep(__CLC_GENTYPE edge0, + __CLC_GENTYPE edge1, + __CLC_GENTYPE x); diff --git a/libclc/clc/include/clc/shared/clc_clamp.h b/libclc/clc/include/clc/shared/clc_clamp.h index d9d39413c5618..7fd22771c09c0 100644 --- a/libclc/clc/include/clc/shared/clc_clamp.h +++ b/libclc/clc/include/clc/shared/clc_clamp.h @@ -1,17 +1,10 @@ #ifndef __CLC_SHARED_CLC_CLAMP_H__ #define __CLC_SHARED_CLC_CLAMP_H__ -#if defined(CLC_CLSPV) || defined(CLC_SPIRV) -// clspv and spir-v targets provide their own OpenCL-compatible clamp -#define __clc_clamp clamp -#else - #define __CLC_BODY #include #define __CLC_BODY #include -#endif - #endif // __CLC_SHARED_CLC_CLAMP_H__ diff --git a/libclc/clc/lib/clspv/SOURCES b/libclc/clc/lib/clspv/SOURCES index 393e8d773cda0..e6573f586080c 100644 --- a/libclc/clc/lib/clspv/SOURCES +++ b/libclc/clc/lib/clspv/SOURCES @@ -3,3 +3,4 @@ ../generic/math/clc_floor.cl ../generic/math/clc_rint.cl ../generic/math/clc_trunc.cl +../generic/shared/clc_clamp.cl diff --git a/libclc/clc/lib/generic/SOURCES b/libclc/clc/lib/generic/SOURCES index 3916ea15f5c45..f3097de694422 100644 --- a/libclc/clc/lib/generic/SOURCES +++ b/libclc/clc/lib/generic/SOURCES @@ -1,3 +1,4 @@ +common/clc_smoothstep.cl geometric/clc_dot.cl integer/clc_abs.cl integer/clc_abs_diff.cl diff --git a/libclc/clc/lib/generic/common/clc_smoothstep.cl b/libclc/clc/lib/generic/common/clc_smoothstep.cl new file mode 100644 index 0000000000000..ea0e9ed3bb19c --- /dev/null +++ b/libclc/clc/lib/generic/common/clc_smoothstep.cl @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014,2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include +#include +#include + +#define SMOOTHSTEP_SINGLE_DEF(edge_type, x_type, lit_suff) \ + _CLC_OVERLOAD _CLC_DEF x_type __clc_smoothstep(edge_type edge0, \ + edge_type edge1, x_type x) { \ + x_type t = __clc_clamp((x - edge0) / (edge1 - edge0), 0.0##lit_suff, \ + 1.0##lit_suff); \ + return t * t * (3.0##lit_suff - 2.0##lit_suff * t); \ + } + +#define SMOOTHSTEP_DEF(type, lit_suffix) \ + SMOOTHSTEP_SINGLE_DEF(type, type, lit_suffix) \ + SMOOTHSTEP_SINGLE_DEF(type##2, type##2, lit_suffix) \ + SMOOTHSTEP_SINGLE_DEF(type##3, type##3, lit_suffix) \ + SMOOTHSTEP_SINGLE_DEF(type##4, type##4, lit_suffix) \ + SMOOTHSTEP_SINGLE_DEF(type##8, type##8, lit_suffix) \ + SMOOTHSTEP_SINGLE_DEF(type##16, type##16, lit_suffix) + +SMOOTHSTEP_DEF(float, F) + +#ifdef cl_khr_fp64 +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +SMOOTHSTEP_DEF(double, ); +#endif + +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +SMOOTHSTEP_DEF(half, H); +#endif diff --git a/libclc/clc/lib/spirv/SOURCES b/libclc/clc/lib/spirv/SOURCES index 3b29fa0a91624..02784b8def682 100644 --- a/libclc/clc/lib/spirv/SOURCES +++ b/libclc/clc/lib/spirv/SOURCES @@ -1,6 +1,8 @@ +../generic/common/clc_smoothstep.cl ../generic/geometric/clc_dot.cl ../generic/math/clc_ceil.cl ../generic/math/clc_fabs.cl ../generic/math/clc_floor.cl ../generic/math/clc_rint.cl ../generic/math/clc_trunc.cl +../generic/shared/clc_clamp.cl diff --git a/libclc/clc/lib/spirv64/SOURCES b/libclc/clc/lib/spirv64/SOURCES index 3b29fa0a91624..02784b8def682 100644 --- a/libclc/clc/lib/spirv64/SOURCES +++ b/libclc/clc/lib/spirv64/SOURCES @@ -1,6 +1,8 @@ +../generic/common/clc_smoothstep.cl ../generic/geometric/clc_dot.cl ../generic/math/clc_ceil.cl ../generic/math/clc_fabs.cl ../generic/math/clc_floor.cl ../generic/math/clc_rint.cl ../generic/math/clc_trunc.cl +../generic/shared/clc_clamp.cl diff --git a/libclc/generic/lib/common/smoothstep.cl b/libclc/generic/lib/common/smoothstep.cl index 4cdecfc4abe26..78d62044f439b 100644 --- a/libclc/generic/lib/common/smoothstep.cl +++ b/libclc/generic/lib/common/smoothstep.cl @@ -22,35 +22,61 @@ #include #include +#include -_CLC_OVERLOAD _CLC_DEF float smoothstep(float edge0, float edge1, float x) { - float t = clamp((x - edge0) / (edge1 - edge0), 0.0f, 1.0f); - return t * t * (3.0f - 2.0f * t); -} +#define SMOOTHSTEP_SINGLE_DEF(X_TYPE) \ + _CLC_OVERLOAD _CLC_DEF X_TYPE smoothstep(X_TYPE edge0, X_TYPE edge1, \ + X_TYPE x) { \ + return __clc_smoothstep(edge0, edge1, x); \ + } -_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, float, smoothstep, float, float, float); +#define SMOOTHSTEP_S_S_V_DEFS(X_TYPE) \ + _CLC_OVERLOAD _CLC_DEF X_TYPE##2 smoothstep(X_TYPE x, X_TYPE y, \ + X_TYPE##2 z) { \ + return __clc_smoothstep((X_TYPE##2)x, (X_TYPE##2)y, z); \ + } \ + \ + _CLC_OVERLOAD _CLC_DEF X_TYPE##3 smoothstep(X_TYPE x, X_TYPE y, \ + X_TYPE##3 z) { \ + return __clc_smoothstep((X_TYPE##3)x, (X_TYPE##3)y, z); \ + } \ + \ + _CLC_OVERLOAD _CLC_DEF X_TYPE##4 smoothstep(X_TYPE x, X_TYPE y, \ + X_TYPE##4 z) { \ + return __clc_smoothstep((X_TYPE##4)x, (X_TYPE##4)y, z); \ + } \ + \ + _CLC_OVERLOAD _CLC_DEF X_TYPE##8 smoothstep(X_TYPE x, X_TYPE y, \ + X_TYPE##8 z) { \ + return __clc_smoothstep((X_TYPE##8)x, (X_TYPE##8)y, z); \ + } \ + \ + _CLC_OVERLOAD _CLC_DEF X_TYPE##16 smoothstep(X_TYPE x, X_TYPE y, \ + X_TYPE##16 z) { \ + return __clc_smoothstep((X_TYPE##16)x, (X_TYPE##16)y, z); \ + } -_CLC_V_S_S_V_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, float, smoothstep, float, float, float); +#define SMOOTHSTEP_DEF(type) \ + SMOOTHSTEP_SINGLE_DEF(type) \ + SMOOTHSTEP_SINGLE_DEF(type##2) \ + SMOOTHSTEP_SINGLE_DEF(type##3) \ + SMOOTHSTEP_SINGLE_DEF(type##4) \ + SMOOTHSTEP_SINGLE_DEF(type##8) \ + SMOOTHSTEP_SINGLE_DEF(type##16) \ + SMOOTHSTEP_S_S_V_DEFS(type) + +SMOOTHSTEP_DEF(float) #ifdef cl_khr_fp64 #pragma OPENCL EXTENSION cl_khr_fp64 : enable -#define SMOOTH_STEP_DEF(edge_type, x_type, impl) \ - _CLC_OVERLOAD _CLC_DEF x_type smoothstep(edge_type edge0, edge_type edge1, x_type x) { \ - double t = clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0); \ - return t * t * (3.0 - 2.0 * t); \ - } - -SMOOTH_STEP_DEF(double, double, SMOOTH_STEP_IMPL_D); +SMOOTHSTEP_DEF(double); -_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, double, smoothstep, double, double, double); +#endif -#if !defined(CLC_SPIRV) -SMOOTH_STEP_DEF(float, double, SMOOTH_STEP_IMPL_D); -SMOOTH_STEP_DEF(double, float, SMOOTH_STEP_IMPL_D); +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable -_CLC_V_S_S_V_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, double, smoothstep, float, float, double); -_CLC_V_S_S_V_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, float, smoothstep, double, double, float); -#endif +SMOOTHSTEP_DEF(half); #endif diff --git a/libcxx/docs/ReleaseNotes/20.rst b/libcxx/docs/ReleaseNotes/20.rst index be330a9afc331..2736061544c53 100644 --- a/libcxx/docs/ReleaseNotes/20.rst +++ b/libcxx/docs/ReleaseNotes/20.rst @@ -55,7 +55,8 @@ Improvements and New Features - The ``_LIBCPP_ENABLE_CXX20_REMOVED_TEMPORARY_BUFFER`` macro has been added to make ``std::get_temporary_buffer`` and ``std::return_temporary_buffer`` available. -- The ``_LIBCPP_ENABLE_CXX20_REMOVED_UNCAUGHT_EXCEPTION`` macro has been added to make ``std::uncaught_exception`` +- The ``std::uncaught_exception`` function was marked as deprecated since C++17 and removed since C++20. The + ``_LIBCPP_ENABLE_CXX20_REMOVED_UNCAUGHT_EXCEPTION`` macro has been added to make ``std::uncaught_exception`` available in C++20 and later modes. - The internal structure ``__compressed_pair`` has been replaced with ``[[no_unique_address]]``, resulting in reduced @@ -69,12 +70,12 @@ Improvements and New Features - The ``_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STD_ARRAY`` ABI configuration was added, which allows storing valid bounds in ``std::array::iterator`` and detecting OOB accesses when the appropriate hardening mode is enabled. -- The ``input_iterator``-pair overload of ``void assign(InputIt, InputIt)`` has been optimized for ``std::vector``, - resulting in a performance improvement of up to 2x for trivial element types (e.g., ``std::vector``), and up +- The ``input_iterator``-pair overload of ``void assign(InputIt, InputIt)`` has been optimized for ``std::vector``, + resulting in a performance improvement of up to 2x for trivial element types (e.g., ``std::vector``), and up to 3.4x for non-trivial element types (e.g., ``std::vector>``). -- The ``input_iterator``-pair overload of ``iterator insert(const_iterator, InputIt, InputIt)`` has been optimized - for ``std::vector``, resulting in a performance improvement of up to 10x for ``std::vector``, and up to 2.3x +- The ``input_iterator``-pair overload of ``iterator insert(const_iterator, InputIt, InputIt)`` has been optimized + for ``std::vector``, resulting in a performance improvement of up to 10x for ``std::vector``, and up to 2.3x for ``std::vector>``. - On Windows, ````'s ``std::system_category`` is now distinct from ``std::generic_category``. The behavior diff --git a/libcxx/include/__flat_map/key_value_iterator.h b/libcxx/include/__flat_map/key_value_iterator.h index 06a23f3429974..3ebb653deb197 100644 --- a/libcxx/include/__flat_map/key_value_iterator.h +++ b/libcxx/include/__flat_map/key_value_iterator.h @@ -15,9 +15,7 @@ #include <__config> #include <__iterator/iterator_traits.h> #include <__memory/addressof.h> -#include <__ranges/access.h> #include <__type_traits/conditional.h> -#include <__type_traits/maybe_const.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -41,9 +39,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD template struct __key_value_iterator { private: - using __key_iterator _LIBCPP_NODEBUG = ranges::iterator_t; - using __mapped_iterator _LIBCPP_NODEBUG = ranges::iterator_t<__maybe_const<_Const, _MappedContainer>>; - using __reference _LIBCPP_NODEBUG = _If<_Const, typename _Owner::const_reference, typename _Owner::reference>; + using __key_iterator _LIBCPP_NODEBUG = typename _KeyContainer::const_iterator; + using __mapped_iterator _LIBCPP_NODEBUG = + _If<_Const, typename _MappedContainer::const_iterator, typename _MappedContainer::iterator>; + using __reference _LIBCPP_NODEBUG = _If<_Const, typename _Owner::const_reference, typename _Owner::reference>; struct __arrow_proxy { __reference __ref_; @@ -71,8 +70,8 @@ struct __key_value_iterator { _LIBCPP_HIDE_FROM_ABI __key_value_iterator() = default; _LIBCPP_HIDE_FROM_ABI __key_value_iterator(__key_value_iterator<_Owner, _KeyContainer, _MappedContainer, !_Const> __i) - requires _Const && convertible_to, __key_iterator> && - convertible_to, __mapped_iterator> + requires _Const && convertible_to && + convertible_to : __key_iter_(std::move(__i.__key_iter_)), __mapped_iter_(std::move(__i.__mapped_iter_)) {} _LIBCPP_HIDE_FROM_ABI __key_value_iterator(__key_iterator __key_iter, __mapped_iterator __mapped_iter) diff --git a/libcxx/include/__vector/vector.h b/libcxx/include/__vector/vector.h index 3a7ae53178596..28e9495a314a2 100644 --- a/libcxx/include/__vector/vector.h +++ b/libcxx/include/__vector/vector.h @@ -51,6 +51,7 @@ #include <__type_traits/is_constructible.h> #include <__type_traits/is_nothrow_assignable.h> #include <__type_traits/is_nothrow_constructible.h> +#include <__type_traits/is_pointer.h> #include <__type_traits/is_same.h> #include <__type_traits/is_trivially_relocatable.h> #include <__type_traits/type_identity.h> @@ -341,13 +342,17 @@ class _LIBCPP_TEMPLATE_VIS vector { // // Iterators // - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { return __make_iter(this->__begin_); } + _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator begin() _NOEXCEPT { + return __make_iter(__add_alignment_assumption(this->__begin_)); + } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const_iterator begin() const _NOEXCEPT { - return __make_iter(this->__begin_); + return __make_iter(__add_alignment_assumption(this->__begin_)); + } + _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator end() _NOEXCEPT { + return __make_iter(__add_alignment_assumption(this->__end_)); } - _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI iterator end() _NOEXCEPT { return __make_iter(this->__end_); } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI const_iterator end() const _NOEXCEPT { - return __make_iter(this->__end_); + return __make_iter(__add_alignment_assumption(this->__end_)); } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI reverse_iterator rbegin() _NOEXCEPT { @@ -775,6 +780,17 @@ class _LIBCPP_TEMPLATE_VIS vector { } _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void __move_assign_alloc(vector&, false_type) _NOEXCEPT {} + + static _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI pointer __add_alignment_assumption(pointer __p) _NOEXCEPT { +#ifndef _LIBCPP_CXX03_LANG + if constexpr (is_pointer::value) { + if (!__libcpp_is_constant_evaluated()) { + return static_cast(__builtin_assume_aligned(__p, alignof(decltype(*__p)))); + } + } +#endif + return __p; + } }; #if _LIBCPP_STD_VER >= 17 diff --git a/libcxx/test/benchmarks/containers/string.bench.cpp b/libcxx/test/benchmarks/containers/string.bench.cpp index f7da3e2da312b..0b62c87acf7a2 100644 --- a/libcxx/test/benchmarks/containers/string.bench.cpp +++ b/libcxx/test/benchmarks/containers/string.bench.cpp @@ -237,29 +237,6 @@ struct StringMove { static std::string name() { return "BM_StringMove" + Length::name(); } }; -template -struct StringResizeDefaultInit { - static void run(benchmark::State& state) { - constexpr bool opaque = Opaque{} == Opacity::Opaque; - constexpr int kNumStrings = 4 << 10; - size_t length = makeString(Length()).size(); - std::string strings[kNumStrings]; - while (state.KeepRunningBatch(kNumStrings)) { - state.PauseTiming(); - for (int i = 0; i < kNumStrings; ++i) { - std::string().swap(strings[i]); - } - benchmark::DoNotOptimize(strings); - state.ResumeTiming(); - for (int i = 0; i < kNumStrings; ++i) { - strings[i].__resize_default_init(maybeOpaque(length, opaque)); - } - } - } - - static std::string name() { return "BM_StringResizeDefaultInit" + Length::name() + Opaque::name(); } -}; - template struct StringAssignStr { static void run(benchmark::State& state) { @@ -577,7 +554,6 @@ int main(int argc, char** argv) { makeCartesianProductBenchmark(); makeCartesianProductBenchmark(); makeCartesianProductBenchmark(); - makeCartesianProductBenchmark(); makeCartesianProductBenchmark(); makeCartesianProductBenchmark(); makeCartesianProductBenchmark(); diff --git a/libcxx/test/configs/stdlib-libstdc++.cfg.in b/libcxx/test/configs/stdlib-libstdc++.cfg.in index d89254ab47d6a..b9672f038a763 100644 --- a/libcxx/test/configs/stdlib-libstdc++.cfg.in +++ b/libcxx/test/configs/stdlib-libstdc++.cfg.in @@ -1,15 +1,15 @@ # # This testing configuration runs the test suite using the libstdc++ Standard library. # -# The additional '--param libstdcxx-install-prefix=', '--param libstdcxx-triple=' and -# '--param libstdcxx-version=' lit parameters must be provided when invoking lit for the +# The additional '--param libstdcxx_install_prefix=', '--param libstdcxx_triple=' and +# '--param libstdcxx_version=' lit parameters must be provided when invoking lit for the # configuration to find the appropriate headers and library. # # For example: # -# $ ./libcxx/utils/libcxx-lit -sv libcxx/test/std --param libstdcxx-install-prefix=/opt/homebrew/Cellar/gcc/14.1.0_1 \ -# --param libstdcxx-version=14 \ -# --param libstdcxx-triple=aarch64-apple-darwin22 +# $ ./libcxx/utils/libcxx-lit -sv libcxx/test/std --param libstdcxx_install_prefix=/opt/homebrew/Cellar/gcc/14.1.0_1 \ +# --param libstdcxx_version=14 \ +# --param libstdcxx_triple=aarch64-apple-darwin22 # lit_config.load_config(config, '@CMAKE_CURRENT_BINARY_DIR@/cmake-bridge.cfg') @@ -20,19 +20,19 @@ import libcxx.test.params, libcxx.test.config, libcxx.test.dsl # Additional parameters for libstdc++ LIBSTDCXX_PARAMETERS = [ - libcxx.test.dsl.Parameter(name='libstdcxx-install-prefix', type=str, + libcxx.test.dsl.Parameter(name='libstdcxx_install_prefix', type=str, actions=lambda path: [libcxx.test.dsl.AddSubstitution('%{libstdcxx-install-prefix}', path)], help=""" The installation prefix where libstdc++ was installed. This is used to find the libstdc++ headers, link against its built library, etc. """), - libcxx.test.dsl.Parameter(name='libstdcxx-triple', type=str, + libcxx.test.dsl.Parameter(name='libstdcxx_triple', type=str, actions=lambda triple: [libcxx.test.dsl.AddSubstitution('%{libstdcxx-triple}', triple)], help=""" The target triple used for the target-specific include directory of libstdc++. This is used to find the libstdc++ headers. """), - libcxx.test.dsl.Parameter(name='libstdcxx-version', type=str, + libcxx.test.dsl.Parameter(name='libstdcxx_version', type=str, actions=lambda version: [libcxx.test.dsl.AddSubstitution('%{libstdcxx-version}', version)], help=""" The version of libstdc++. This is used to find the libstdc++ headers and library. diff --git a/libcxx/test/std/depr/depr.c.headers/stdlib_h.pass.cpp b/libcxx/test/std/depr/depr.c.headers/stdlib_h.pass.cpp index 587c6b6e10ddb..662331558c121 100644 --- a/libcxx/test/std/depr/depr.c.headers/stdlib_h.pass.cpp +++ b/libcxx/test/std/depr/depr.c.headers/stdlib_h.pass.cpp @@ -8,6 +8,11 @@ // test +// mblen was added in Android API 26. +// TODO: Switch from UNSUPPORTED to XFAIL once the Android CI Docker sysroot is +// updated. +// UNSUPPORTED: LIBCXX-ANDROID-FIXME && target={{.+}}-android{{(eabi)?(21|22|23|24|25)}} + #include #include #include diff --git a/libcxx/test/std/depr/depr.c.headers/wctype_h.compile.pass.cpp b/libcxx/test/std/depr/depr.c.headers/wctype_h.compile.pass.cpp index 35b294532b2bd..4e2fb319336f1 100644 --- a/libcxx/test/std/depr/depr.c.headers/wctype_h.compile.pass.cpp +++ b/libcxx/test/std/depr/depr.c.headers/wctype_h.compile.pass.cpp @@ -8,6 +8,11 @@ // UNSUPPORTED: no-wide-characters +// towctrans and wctrans were added in Android API 26. +// TODO: Switch from UNSUPPORTED to XFAIL once the Android CI Docker sysroot is +// updated. +// UNSUPPORTED: LIBCXX-ANDROID-FIXME && target={{.+}}-android{{(eabi)?(21|22|23|24|25)}} + // #include diff --git a/libcxx/test/std/language.support/support.runtime/cstdlib.pass.cpp b/libcxx/test/std/language.support/support.runtime/cstdlib.pass.cpp index a1f7e1143a1e9..9d3e6d892daf0 100644 --- a/libcxx/test/std/language.support/support.runtime/cstdlib.pass.cpp +++ b/libcxx/test/std/language.support/support.runtime/cstdlib.pass.cpp @@ -8,6 +8,11 @@ // test +// mblen was added in Android API 26. +// TODO: Switch from UNSUPPORTED to XFAIL once the Android CI Docker sysroot is +// updated. +// UNSUPPORTED: LIBCXX-ANDROID-FIXME && target={{.+}}-android{{(eabi)?(21|22|23|24|25)}} + #include #include #include diff --git a/libcxx/test/std/strings/c.strings/cwctype.pass.cpp b/libcxx/test/std/strings/c.strings/cwctype.pass.cpp index 5bc2531d6f6ac..0deabf51ed59c 100644 --- a/libcxx/test/std/strings/c.strings/cwctype.pass.cpp +++ b/libcxx/test/std/strings/c.strings/cwctype.pass.cpp @@ -10,6 +10,11 @@ // UNSUPPORTED: no-wide-characters +// towctrans and wctrans were added in Android API 26. +// TODO: Switch from UNSUPPORTED to XFAIL once the Android CI Docker sysroot is +// updated. +// UNSUPPORTED: LIBCXX-ANDROID-FIXME && target={{.+}}-android{{(eabi)?(21|22|23|24|25)}} + #include #include diff --git a/libunwind/src/CMakeLists.txt b/libunwind/src/CMakeLists.txt index e7ea57734cca9..ecbd019bb29ea 100644 --- a/libunwind/src/CMakeLists.txt +++ b/libunwind/src/CMakeLists.txt @@ -20,7 +20,12 @@ set(LIBUNWIND_C_SOURCES ) set_source_files_properties(${LIBUNWIND_C_SOURCES} PROPERTIES - COMPILE_FLAGS "-std=c99") + # We need to set `-fexceptions` here so that key + # unwinding functions, like + # _UNWIND_RaiseException, are not marked as + # `nounwind`, which breaks LTO builds of + # libunwind. See #56825 and #120657 for context. + COMPILE_FLAGS "-std=c99 -fexceptions") set(LIBUNWIND_ASM_SOURCES UnwindRegistersRestore.S diff --git a/lld/COFF/Config.h b/lld/COFF/Config.h index 9e6b17e87c9e7..924560fef0231 100644 --- a/lld/COFF/Config.h +++ b/lld/COFF/Config.h @@ -120,7 +120,6 @@ struct Configuration { size_t wordsize; bool verbose = false; WindowsSubsystem subsystem = llvm::COFF::IMAGE_SUBSYSTEM_UNKNOWN; - Symbol *entry = nullptr; bool noEntry = false; std::string outputFile; std::string importName; diff --git a/lld/COFF/Driver.cpp b/lld/COFF/Driver.cpp index beb135f08fa3b..8b1a8dc3e5af7 100644 --- a/lld/COFF/Driver.cpp +++ b/lld/COFF/Driver.cpp @@ -175,6 +175,15 @@ llvm::Triple::ArchType LinkerDriver::getArch() { return getMachineArchType(ctx.config.machine); } +std::vector LinkerDriver::getChunks() const { + std::vector res; + for (ObjFile *file : ctx.objFileInstances) { + ArrayRef v = file->getChunks(); + res.insert(res.end(), v.begin(), v.end()); + } + return res; +} + static bool compatibleMachineType(COFFLinkerContext &ctx, MachineTypes mt) { if (mt == IMAGE_FILE_MACHINE_UNKNOWN) return true; @@ -491,8 +500,9 @@ void LinkerDriver::parseDirectives(InputFile *file) { case OPT_entry: if (!arg->getValue()[0]) Fatal(ctx) << "missing entry point symbol name"; - ctx.config.entry = - file->symtab.addGCRoot(file->symtab.mangle(arg->getValue()), true); + ctx.forEachSymtab([&](SymbolTable &symtab) { + symtab.entry = symtab.addGCRoot(symtab.mangle(arg->getValue()), true); + }); break; case OPT_failifmismatch: checkFailIfMismatch(arg->getValue(), file); @@ -1092,7 +1102,7 @@ void LinkerDriver::parseOrderFile(StringRef arg) { // Get a list of all comdat sections for error checking. DenseSet set; - for (Chunk *c : ctx.symtab.getChunks()) + for (Chunk *c : ctx.driver.getChunks()) if (auto *sec = dyn_cast(c)) if (sec->sym) set.insert(sec->sym->getName()); @@ -1394,8 +1404,9 @@ void LinkerDriver::createECExportThunks() { } } - if (ctx.config.entry) - maybeCreateECExportThunk(ctx.config.entry->getName(), ctx.config.entry); + if (ctx.symtabEC->entry) + maybeCreateECExportThunk(ctx.symtabEC->entry->getName(), + ctx.symtabEC->entry); for (Export &e : ctx.config.exports) { if (!e.data) maybeCreateECExportThunk(e.extName.empty() ? e.name : e.extName, e.sym); @@ -2357,33 +2368,32 @@ void LinkerDriver::linkerMain(ArrayRef argsArr) { } // Handle /entry and /dll - { + ctx.forEachSymtab([&](SymbolTable &symtab) { llvm::TimeTraceScope timeScope("Entry point"); if (auto *arg = args.getLastArg(OPT_entry)) { if (!arg->getValue()[0]) Fatal(ctx) << "missing entry point symbol name"; - config->entry = - ctx.symtab.addGCRoot(ctx.symtab.mangle(arg->getValue()), true); - } else if (!config->entry && !config->noEntry) { + symtab.entry = symtab.addGCRoot(symtab.mangle(arg->getValue()), true); + } else if (!symtab.entry && !config->noEntry) { if (args.hasArg(OPT_dll)) { StringRef s = (config->machine == I386) ? "__DllMainCRTStartup@12" : "_DllMainCRTStartup"; - config->entry = ctx.symtab.addGCRoot(s, true); + symtab.entry = symtab.addGCRoot(s, true); } else if (config->driverWdm) { // /driver:wdm implies /entry:_NtProcessStartup - config->entry = - ctx.symtab.addGCRoot(ctx.symtab.mangle("_NtProcessStartup"), true); + symtab.entry = + symtab.addGCRoot(symtab.mangle("_NtProcessStartup"), true); } else { // Windows specific -- If entry point name is not given, we need to // infer that from user-defined entry name. - StringRef s = ctx.symtab.findDefaultEntry(); + StringRef s = symtab.findDefaultEntry(); if (s.empty()) Fatal(ctx) << "entry point must be defined"; - config->entry = ctx.symtab.addGCRoot(s, true); + symtab.entry = symtab.addGCRoot(s, true); Log(ctx) << "Entry name inferred: " << s; } } - } + }); // Handle /delayload { @@ -2522,10 +2532,12 @@ void LinkerDriver::linkerMain(ArrayRef argsArr) { { llvm::TimeTraceScope timeScope("Add unresolved symbols"); do { - // Windows specific -- if entry point is not found, - // search for its mangled names. - if (config->entry) - ctx.symtab.mangleMaybe(config->entry); + ctx.forEachSymtab([&](SymbolTable &symtab) { + // Windows specific -- if entry point is not found, + // search for its mangled names. + if (symtab.entry) + symtab.mangleMaybe(symtab.entry); + }); // Windows specific -- Make sure we resolve all dllexported symbols. for (Export &e : config->exports) { diff --git a/lld/COFF/Driver.h b/lld/COFF/Driver.h index 4558f68c041fa..8ce2e13129ba6 100644 --- a/lld/COFF/Driver.h +++ b/lld/COFF/Driver.h @@ -94,6 +94,9 @@ class LinkerDriver { void enqueuePath(StringRef path, bool wholeArchive, bool lazy); + // Returns a list of chunks of selected symbols. + std::vector getChunks() const; + std::unique_ptr tar; // for /linkrepro void pullArm64ECIcallHelper(); diff --git a/lld/COFF/ICF.cpp b/lld/COFF/ICF.cpp index 796d3a4108ba4..e6c965160e4ef 100644 --- a/lld/COFF/ICF.cpp +++ b/lld/COFF/ICF.cpp @@ -264,7 +264,7 @@ void ICF::run() { // Collect only mergeable sections and group by hash value. uint32_t nextId = 1; - for (Chunk *c : ctx.symtab.getChunks()) { + for (Chunk *c : ctx.driver.getChunks()) { if (auto *sc = dyn_cast(c)) { if (isEligible(sc)) chunks.push_back(sc); diff --git a/lld/COFF/MapFile.cpp b/lld/COFF/MapFile.cpp index e3531c04e7747..af87587d143d5 100644 --- a/lld/COFF/MapFile.cpp +++ b/lld/COFF/MapFile.cpp @@ -301,7 +301,7 @@ void lld::coff::writeMapFile(COFFLinkerContext &ctx) { uint64_t entryAddress = 0; if (!ctx.config.noEntry) { - Defined *entry = dyn_cast_or_null(ctx.config.entry); + Defined *entry = dyn_cast_or_null(ctx.symtab.entry); if (entry) { Chunk *chunk = entry->getChunk(); entrySecIndex = chunk->getOutputSectionIdx(); diff --git a/lld/COFF/MarkLive.cpp b/lld/COFF/MarkLive.cpp index 3c09baa73a9f7..ad50536892ebb 100644 --- a/lld/COFF/MarkLive.cpp +++ b/lld/COFF/MarkLive.cpp @@ -31,7 +31,7 @@ void markLive(COFFLinkerContext &ctx) { // COMDAT section chunks are dead by default. Add non-COMDAT chunks. Do not // traverse DWARF sections. They are live, but they should not keep other // sections alive. - for (Chunk *c : ctx.symtab.getChunks()) + for (Chunk *c : ctx.driver.getChunks()) if (auto *sc = dyn_cast(c)) if (sc->live && !sc->isDWARF()) worklist.push_back(sc); diff --git a/lld/COFF/SymbolTable.cpp b/lld/COFF/SymbolTable.cpp index 7c43ada3d136e..36dcd0dfe1389 100644 --- a/lld/COFF/SymbolTable.cpp +++ b/lld/COFF/SymbolTable.cpp @@ -945,15 +945,6 @@ void SymbolTable::addLibcall(StringRef name) { } } -std::vector SymbolTable::getChunks() const { - std::vector res; - for (ObjFile *file : ctx.objFileInstances) { - ArrayRef v = file->getChunks(); - res.insert(res.end(), v.begin(), v.end()); - } - return res; -} - Symbol *SymbolTable::find(StringRef name) const { return symMap.lookup(CachedHashStringRef(name)); } diff --git a/lld/COFF/SymbolTable.h b/lld/COFF/SymbolTable.h index 1de0b3e1deac3..9e316fcdbe630 100644 --- a/lld/COFF/SymbolTable.h +++ b/lld/COFF/SymbolTable.h @@ -67,9 +67,6 @@ class SymbolTable { void loadMinGWSymbols(); bool handleMinGWAutomaticImport(Symbol *sym, StringRef name); - // Returns a list of chunks of selected symbols. - std::vector getChunks() const; - // Returns a symbol for a given name. Returns a nullptr if not found. Symbol *find(StringRef name) const; Symbol *findUnderscore(StringRef name) const; @@ -143,6 +140,9 @@ class SymbolTable { bool isEC() const { return machine == ARM64EC; } + // An entry point symbol. + Symbol *entry = nullptr; + // A list of chunks which to be added to .rdata. std::vector localImportChunks; diff --git a/lld/COFF/Writer.cpp b/lld/COFF/Writer.cpp index eb82a9cc01593..8247f131dcf07 100644 --- a/lld/COFF/Writer.cpp +++ b/lld/COFF/Writer.cpp @@ -1077,7 +1077,7 @@ void Writer::createSections() { dtorsSec = createSection(".dtors", data | r | w); // Then bin chunks by name and output characteristics. - for (Chunk *c : ctx.symtab.getChunks()) { + for (Chunk *c : ctx.driver.getChunks()) { auto *sc = dyn_cast(c); if (sc && !sc->live) { if (ctx.config.verbose) @@ -1748,7 +1748,7 @@ template void Writer::writeHeader() { pe->SizeOfImage = sizeOfImage; pe->SizeOfHeaders = sizeOfHeaders; if (!config->noEntry) { - Defined *entry = cast(config->entry); + Defined *entry = cast(ctx.symtab.entry); pe->AddressOfEntryPoint = entry->getRVA(); // Pointer to thumb code must have the LSB set, so adjust it. if (config->machine == ARMNT) @@ -2031,8 +2031,10 @@ void Writer::createGuardCFTables() { } // Mark the image entry as address-taken. - if (config->entry) - maybeAddAddressTakenFunction(addressTakenSyms, config->entry); + ctx.forEachSymtab([&](SymbolTable &symtab) { + if (symtab.entry) + maybeAddAddressTakenFunction(addressTakenSyms, symtab.entry); + }); // Mark exported symbols in executable sections as address-taken. for (Export &e : config->exports) @@ -2217,7 +2219,7 @@ void Writer::createECChunks() { void Writer::createRuntimePseudoRelocs() { std::vector rels; - for (Chunk *c : ctx.symtab.getChunks()) { + for (Chunk *c : ctx.driver.getChunks()) { auto *sc = dyn_cast(c); if (!sc || !sc->live) continue; @@ -2584,6 +2586,12 @@ void Writer::createDynamicRelocs() { coffHeaderOffset + offsetof(coff_file_header, Machine), AMD64); + if (ctx.symtab.entry != ctx.hybridSymtab->entry) + ctx.dynamicRelocs->add(IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, sizeof(uint32_t), + peHeaderOffset + + offsetof(pe32plus_header, AddressOfEntryPoint), + cast_or_null(ctx.hybridSymtab->entry)); + // Set the hybrid load config to the EC load config. ctx.dynamicRelocs->add(IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, sizeof(uint32_t), dataDirOffset64 + diff --git a/lld/MachO/BPSectionOrderer.h b/lld/MachO/BPSectionOrderer.h index 4facb652d4c87..69c6b260f044c 100644 --- a/lld/MachO/BPSectionOrderer.h +++ b/lld/MachO/BPSectionOrderer.h @@ -19,7 +19,10 @@ #include "Symbols.h" #include "lld/Common/BPSectionOrdererBase.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/StableHashing.h" #include "llvm/ADT/StringRef.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/xxhash.h" namespace lld::macho { @@ -90,23 +93,24 @@ class BPSectionMacho : public BPSectionBase { §ionToIdx) const override { constexpr unsigned windowSize = 4; - // Calculate content hashes - size_t dataSize = isec->data.size(); - for (size_t i = 0; i < dataSize; i++) { - auto window = isec->data.drop_front(i).take_front(windowSize); - hashes.push_back(xxHash64(window)); - } + // Calculate content hashes: k-mers and the last k-1 bytes. + ArrayRef data = isec->data; + if (data.size() >= windowSize) + for (size_t i = 0; i <= data.size() - windowSize; ++i) + hashes.push_back(llvm::support::endian::read32le(data.data() + i)); + for (uint8_t byte : data.take_back(windowSize - 1)) + hashes.push_back(byte); // Calculate relocation hashes for (const auto &r : isec->relocs) { - if (r.length == 0 || r.referent.isNull() || r.offset >= isec->data.size()) + if (r.length == 0 || r.referent.isNull() || r.offset >= data.size()) continue; uint64_t relocHash = getRelocHash(r, sectionToIdx); uint32_t start = (r.offset < windowSize) ? 0 : r.offset - windowSize + 1; for (uint32_t i = start; i < r.offset + r.length; i++) { - auto window = isec->data.drop_front(i).take_front(windowSize); - hashes.push_back(xxHash64(window) + relocHash); + auto window = data.drop_front(i).take_front(windowSize); + hashes.push_back(xxh3_64bits(window) ^ relocHash); } } @@ -124,19 +128,17 @@ class BPSectionMacho : public BPSectionBase { std::optional sectionIdx; if (auto it = sectionToIdx.find(isec); it != sectionToIdx.end()) sectionIdx = it->second; - std::string kind; + uint64_t kind = -1, value = 0; if (isec) - kind = ("Section " + Twine(isec->kind())).str(); + kind = uint64_t(isec->kind()); if (auto *sym = reloc.referent.dyn_cast()) { - kind += (" Symbol " + Twine(sym->kind())).str(); - if (auto *d = llvm::dyn_cast(sym)) { - return BPSectionBase::getRelocHash(kind, sectionIdx.value_or(0), - d->value, reloc.addend); - } + kind = (kind << 8) | uint8_t(sym->kind()); + if (auto *d = llvm::dyn_cast(sym)) + value = d->value; } - return BPSectionBase::getRelocHash(kind, sectionIdx.value_or(0), 0, - reloc.addend); + return llvm::stable_hash_combine(kind, sectionIdx.value_or(0), value, + reloc.addend); } }; diff --git a/lld/MachO/MapFile.cpp b/lld/MachO/MapFile.cpp index 12417df8cecb8..8919c8d2f9b9c 100644 --- a/lld/MachO/MapFile.cpp +++ b/lld/MachO/MapFile.cpp @@ -161,20 +161,6 @@ static uint64_t getSymSizeForMap(Defined *sym) { return sym->size; } -// Merges two vectors of input sections in order of their outSecOff values. -// This approach creates a new (temporary) vector which is not ideal but the -// ideal approach leads to a lot of code duplication. -static std::vector -mergeOrderedInputs(ArrayRef inputs1, - ArrayRef inputs2) { - std::vector vec(inputs1.size() + inputs2.size()); - std::merge(inputs1.begin(), inputs1.end(), inputs2.begin(), inputs2.end(), - vec.begin(), [](ConcatInputSection *a, ConcatInputSection *b) { - return a->outSecOff < b->outSecOff; - }); - return vec; -} - void macho::writeMapFile() { if (config->mapFile.empty()) return; @@ -217,15 +203,32 @@ void macho::writeMapFile() { seg->name.str().c_str(), osec->name.str().c_str()); } - // Shared function to print an array of symbols. - auto printIsecArrSyms = [&](const std::vector &arr) { - for (const ConcatInputSection *isec : arr) { - for (Defined *sym : isec->symbols) { - if (!(isPrivateLabel(sym->getName()) && getSymSizeForMap(sym) == 0)) - os << format("0x%08llX\t0x%08llX\t[%3u] %s\n", sym->getVA(), - getSymSizeForMap(sym), - readerToFileOrdinal[sym->getFile()], - sym->getName().str().data()); + // Helper lambda that prints all symbols from one ConcatInputSection. + auto printOne = [&](const ConcatInputSection *isec) { + for (Defined *sym : isec->symbols) { + if (!(isPrivateLabel(sym->getName()) && getSymSizeForMap(sym) == 0)) { + os << format("0x%08llX\t0x%08llX\t[%3u] %s\n", sym->getVA(), + getSymSizeForMap(sym), + readerToFileOrdinal.lookup(sym->getFile()), + sym->getName().str().data()); + } + } + }; + // Shared function to print one or two arrays of ConcatInputSection in + // ascending outSecOff order. The second array is optional; if provided, we + // interleave the printing in sorted order without allocating a merged temp + // array. + auto printIsecArrSyms = [&](ArrayRef arr1, + ArrayRef arr2 = {}) { + // Print both arrays in sorted order, interleaving as necessary. + while (!arr1.empty() || !arr2.empty()) { + if (!arr1.empty() && (arr2.empty() || arr1.front()->outSecOff <= + arr2.front()->outSecOff)) { + printOne(arr1.front()); + arr1 = arr1.drop_front(); + } else if (!arr2.empty()) { + printOne(arr2.front()); + arr2 = arr2.drop_front(); } } }; @@ -235,9 +238,7 @@ void macho::writeMapFile() { for (const OutputSegment *seg : outputSegments) { for (const OutputSection *osec : seg->getSections()) { if (auto *textOsec = dyn_cast(osec)) { - auto inputsAndThunks = - mergeOrderedInputs(textOsec->inputs, textOsec->getThunks()); - printIsecArrSyms(inputsAndThunks); + printIsecArrSyms(textOsec->inputs, textOsec->getThunks()); } else if (auto *concatOsec = dyn_cast(osec)) { printIsecArrSyms(concatOsec->inputs); } else if (osec == in.cStringSection || osec == in.objcMethnameSection) { diff --git a/lld/include/lld/Common/BPSectionOrdererBase.h b/lld/include/lld/Common/BPSectionOrdererBase.h index bd5bd638ccd2a..bbd05edc5e55e 100644 --- a/lld/include/lld/Common/BPSectionOrdererBase.h +++ b/lld/include/lld/Common/BPSectionOrdererBase.h @@ -18,7 +18,6 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" -#include "llvm/Support/xxhash.h" #include #include @@ -56,14 +55,6 @@ class BPSectionBase { return P1; } - static uint64_t getRelocHash(llvm::StringRef kind, uint64_t sectionIdx, - uint64_t offset, uint64_t addend) { - return llvm::xxHash64((kind + ": " + llvm::Twine::utohexstr(sectionIdx) + - " + " + llvm::Twine::utohexstr(offset) + " + " + - llvm::Twine::utohexstr(addend)) - .str()); - } - /// Reorders sections using balanced partitioning algorithm based on profile /// data. static llvm::DenseMap diff --git a/lld/test/COFF/arm64x-entry.test b/lld/test/COFF/arm64x-entry.test new file mode 100644 index 0000000000000..d5363c66544a5 --- /dev/null +++ b/lld/test/COFF/arm64x-entry.test @@ -0,0 +1,92 @@ +REQUIRES: aarch64, x86 +RUN: split-file %s %t.dir && cd %t.dir + +RUN: llvm-mc -filetype=obj -triple=arm64ec-windows arm64ec-dllmain.s -o arm64ec-dllmain.obj +RUN: llvm-mc -filetype=obj -triple=aarch64-windows arm64-dllmain.s -o arm64-dllmain.obj +RUN: llvm-mc -filetype=obj -triple=arm64ec-windows arm64ec-func.s -o arm64ec-func.obj +RUN: llvm-mc -filetype=obj -triple=aarch64-windows arm64-func.s -o arm64-func.obj +RUN: llvm-mc -filetype=obj -triple=arm64ec-windows arm64-drectve.s -o arm64ec-drectve.obj +RUN: llvm-mc -filetype=obj -triple=aarch64-windows arm64-drectve.s -o arm64-drectve.obj +RUN: llvm-mc -filetype=obj -triple=arm64ec-windows %S/Inputs/loadconfig-arm64ec.s -o loadconfig-arm64ec.obj +RUN: llvm-mc -filetype=obj -triple=aarch64-windows %S/Inputs/loadconfig-arm64.s -o loadconfig-arm64.obj + +RUN: lld-link -machine:arm64x -dll -out:out.dll arm64ec-dllmain.obj arm64-dllmain.obj \ +RUN: loadconfig-arm64.obj loadconfig-arm64ec.obj + +RUN: llvm-objdump -d out.dll | FileCheck --check-prefix=DISASM %s +DISASM: Disassembly of section .text: +DISASM-EMPTY: +DISASM-NEXT: 0000000180001000 <.text>: +DISASM-NEXT: 180001000: 52800020 mov w0, #0x1 // =1 +DISASM-NEXT: 180001004: d65f03c0 ret +DISASM-NEXT: ... +DISASM-NEXT: 180002000: 52800040 mov w0, #0x2 // =2 +DISASM-NEXT: 180002004: d65f03c0 ret +DISASM-EMPTY: +DISASM-NEXT: Disassembly of section .hexpthk: +DISASM-EMPTY: +DISASM-NEXT: 0000000180003000 <.hexpthk>: +DISASM-NEXT: 180003000: 48 8b c4 movq %rsp, %rax +DISASM-NEXT: 180003003: 48 89 58 20 movq %rbx, 0x20(%rax) +DISASM-NEXT: 180003007: 55 pushq %rbp +DISASM-NEXT: 180003008: 5d popq %rbp +DISASM-NEXT: 180003009: e9 f2 ef ff ff jmp 0x180002000 <.text+0x1000> +DISASM-NEXT: 18000300e: cc int3 +DISASM-NEXT: 18000300f: cc int3 + +RUN: llvm-readobj --headers out.dll | FileCheck --check-prefix=READOBJ %s +READOBJ: AddressOfEntryPoint: 0x1000 +READOBJ: HybridObject { +READOBJ: AddressOfEntryPoint: 0x3000 +READOBJ: } + +RUN: lld-link -machine:arm64x -dll -out:out2.dll arm64ec-func.obj arm64-func.obj \ +RUN: arm64ec-drectve.obj loadconfig-arm64.obj loadconfig-arm64ec.obj +RUN: llvm-objdump -d out2.dll | FileCheck --check-prefix=DISASM %s +RUN: llvm-readobj --headers --coff-load-config out2.dll | FileCheck --check-prefix=READOBJ %s + +RUN: lld-link -machine:arm64x -dll -out:out3.dll arm64ec-func.obj arm64-func.obj \ +RUN: arm64-drectve.obj loadconfig-arm64.obj loadconfig-arm64ec.obj +RUN: llvm-objdump -d out3.dll | FileCheck --check-prefix=DISASM %s +RUN: llvm-readobj --headers --coff-load-config out3.dll | FileCheck --check-prefix=READOBJ %s + +RUN: lld-link -machine:arm64x -dll -out:out4.dll arm64ec-func.obj arm64-func.obj \ +RUN: loadconfig-arm64.obj loadconfig-arm64ec.obj -entry:func +RUN: llvm-objdump -d out4.dll | FileCheck --check-prefix=DISASM %s +RUN: llvm-readobj --headers --coff-load-config out4.dll | FileCheck --check-prefix=READOBJ %s + +#--- arm64-dllmain.s + .section .text,"xr",discard,_DllMainCRTStartup + .globl _DllMainCRTStartup + .p2align 2 +_DllMainCRTStartup: + mov w0, #1 + ret + +#--- arm64ec-dllmain.s + .section .text,"xr",discard,_DllMainCRTStartup + .globl _DllMainCRTStartup + .p2align 2 +_DllMainCRTStartup: + mov w0, #2 + ret + +#--- arm64-func.s + .section .text,"xr",discard,func + .globl func + .p2align 2 +func: + mov w0, #1 + ret + +#--- arm64ec-func.s + .section .text,"xr",discard,func + .globl func + .p2align 2 +func: + mov w0, #2 + ret + +#--- arm64-drectve.s +.section .drectve + .ascii "-entry:func" diff --git a/lld/test/MachO/arm64-thunks.s b/lld/test/MachO/arm64-thunks.s index 858a27dfe36af..76c7d108104d1 100644 --- a/lld/test/MachO/arm64-thunks.s +++ b/lld/test/MachO/arm64-thunks.s @@ -17,13 +17,7 @@ # RUN: %lld -arch arm64 -dead_strip -lSystem -U _extern_sym -map %t/thunk.map -o %t/thunk %t/input.o # RUN: llvm-objdump --no-print-imm-hex -d --no-show-raw-insn %t/thunk | FileCheck %s -## Check that the thunks appear in the map file and that everything is sorted by address -# Because of the `.space` instructions, there will end up being a lot of dead symbols in the -# linker map (linker map will be ~2.7GB). So to avoid the test trying to (slowly) match regex -# across all the ~2.7GB of the linker map - generate a version of the linker map without dead symbols. -# RUN: awk '/# Dead Stripped Symbols:/ {exit} {print}' %t/thunk.map > %t/thunk_no_dead_syms.map - -# RUN: FileCheck %s --input-file %t/thunk_no_dead_syms.map --check-prefix=MAP +# RUN: FileCheck %s --input-file %t/thunk.map --check-prefix=MAP # MAP: 0x{{[[:xdigit:]]+}} {{.*}} _b # MAP-NEXT: 0x{{[[:xdigit:]]+}} {{.*}} _c @@ -339,7 +333,12 @@ _main: ret .section __TEXT,__cstring - .space 0x4000000 + # The .space below has to be composed of non-zero characters. Otherwise, the + # linker will create a symbol for every '0' in the section, leading to + # dramatic memory usage and a huge linker map file + .space 0x4000000, 'A' + .byte 0 + .section __TEXT,__lcxx_override,regular,pure_instructions diff --git a/lldb/include/lldb/API/SBSaveCoreOptions.h b/lldb/include/lldb/API/SBSaveCoreOptions.h index 74aa2fe5bd5f9..7852858f8ade9 100644 --- a/lldb/include/lldb/API/SBSaveCoreOptions.h +++ b/lldb/include/lldb/API/SBSaveCoreOptions.h @@ -14,6 +14,7 @@ #include "lldb/API/SBFileSpec.h" #include "lldb/API/SBProcess.h" #include "lldb/API/SBThread.h" +#include "lldb/API/SBThreadCollection.h" namespace lldb { @@ -111,11 +112,19 @@ class LLDB_API SBSaveCoreOptions { /// style specific regions. SBError AddMemoryRegionToSave(const SBMemoryRegionInfo ®ion); + /// Get an unsorted copy of all threads to save + /// + /// \returns + /// An unsorted copy of all threads to save. If no process is specified + /// an empty collection will be returned. + SBThreadCollection GetThreadsToSave() const; + /// Reset all options. void Clear(); protected: friend class SBProcess; + friend class SBThreadCollection; lldb_private::SaveCoreOptions &ref() const; private: diff --git a/lldb/include/lldb/API/SBThreadCollection.h b/lldb/include/lldb/API/SBThreadCollection.h index fe57a6b95d909..5a052e6246026 100644 --- a/lldb/include/lldb/API/SBThreadCollection.h +++ b/lldb/include/lldb/API/SBThreadCollection.h @@ -48,7 +48,7 @@ class LLDB_API SBThreadCollection { private: friend class SBProcess; friend class SBThread; - + friend class SBSaveCoreOptions; lldb::ThreadCollectionSP m_opaque_sp; }; diff --git a/lldb/include/lldb/Interpreter/Interfaces/OperatingSystemInterface.h b/lldb/include/lldb/Interpreter/Interfaces/OperatingSystemInterface.h index 3c46f99f3b356..58aab7ec914dd 100644 --- a/lldb/include/lldb/Interpreter/Interfaces/OperatingSystemInterface.h +++ b/lldb/include/lldb/Interpreter/Interfaces/OperatingSystemInterface.h @@ -27,6 +27,8 @@ class OperatingSystemInterface : virtual public ScriptedThreadInterface { virtual std::optional GetRegisterContextForTID(lldb::tid_t tid) { return std::nullopt; } + + virtual std::optional DoesPluginReportAllThreads() { return {}; } }; } // namespace lldb_private diff --git a/lldb/include/lldb/Symbol/SaveCoreOptions.h b/lldb/include/lldb/Symbol/SaveCoreOptions.h index d90d08026016d..bcf0087fbea5c 100644 --- a/lldb/include/lldb/Symbol/SaveCoreOptions.h +++ b/lldb/include/lldb/Symbol/SaveCoreOptions.h @@ -9,11 +9,11 @@ #ifndef LLDB_SOURCE_PLUGINS_OBJECTFILE_SaveCoreOPTIONS_H #define LLDB_SOURCE_PLUGINS_OBJECTFILE_SaveCoreOPTIONS_H +#include "lldb/Target/ThreadCollection.h" #include "lldb/Utility/FileSpec.h" #include "lldb/Utility/RangeMap.h" #include -#include #include #include @@ -47,6 +47,8 @@ class SaveCoreOptions { void AddMemoryRegionToSave(const lldb_private::MemoryRegionInfo ®ion); + lldb_private::ThreadCollection::collection GetThreadsToSave() const; + void Clear(); private: diff --git a/lldb/include/lldb/Symbol/SymbolContext.h b/lldb/include/lldb/Symbol/SymbolContext.h index f65f57b0d1103..07769cd8dffae 100644 --- a/lldb/include/lldb/Symbol/SymbolContext.h +++ b/lldb/include/lldb/Symbol/SymbolContext.h @@ -91,15 +91,6 @@ class SymbolContext { /// their default state. void Clear(bool clear_target); - /// Dump a description of this object to a Stream. - /// - /// Dump a description of the contents of this object to the supplied stream - /// \a s. - /// - /// \param[in] s - /// The stream to which to dump the object description. - void Dump(Stream *s, Target *target) const; - /// Dump the stop context in this object to a Stream. /// /// Dump the best description of this object to the stream. The information diff --git a/lldb/include/lldb/Target/OperatingSystem.h b/lldb/include/lldb/Target/OperatingSystem.h index ceeddceb0f2c1..128239569790f 100644 --- a/lldb/include/lldb/Target/OperatingSystem.h +++ b/lldb/include/lldb/Target/OperatingSystem.h @@ -61,6 +61,8 @@ class OperatingSystem : public PluginInterface { virtual bool IsOperatingSystemPluginThread(const lldb::ThreadSP &thread_sp); + virtual bool DoesPluginReportAllThreads() = 0; + protected: // Member variables. Process diff --git a/lldb/include/lldb/Utility/LLDBAssert.h b/lldb/include/lldb/Utility/LLDBAssert.h index aeef3e51e20a8..21dbdb3b3202d 100644 --- a/lldb/include/lldb/Utility/LLDBAssert.h +++ b/lldb/include/lldb/Utility/LLDBAssert.h @@ -19,24 +19,30 @@ // __FILE__ but only renders the last path component (the filename) instead of // an invocation dependent full path to that file. #define lldbassert(x) \ - lldb_private::lldb_assert(static_cast(x), #x, __FUNCTION__, \ - __FILE_NAME__, __LINE__) + lldb_private::_lldb_assert(static_cast(x), #x, __FUNCTION__, \ + __FILE_NAME__, __LINE__) #else #define lldbassert(x) \ - lldb_private::lldb_assert(static_cast(x), #x, __FUNCTION__, __FILE__, \ - __LINE__) + lldb_private::_lldb_assert(static_cast(x), #x, __FUNCTION__, __FILE__, \ + __LINE__) #endif #endif namespace lldb_private { -void lldb_assert(bool expression, const char *expr_text, const char *func, - const char *file, unsigned int line); +/// Don't use _lldb_assert directly. Use the lldbassert macro instead so that +/// LLDB asserts become regular asserts in NDEBUG builds. +void _lldb_assert(bool expression, const char *expr_text, const char *func, + const char *file, unsigned int line); + +/// The default LLDB assert callback, which prints to stderr. typedef void (*LLDBAssertCallback)(llvm::StringRef message, llvm::StringRef backtrace, llvm::StringRef prompt); +/// Replace the LLDB assert callback. void SetLLDBAssertCallback(LLDBAssertCallback callback); + } // namespace lldb_private #endif // LLDB_UTILITY_LLDBASSERT_H diff --git a/lldb/source/API/SBSaveCoreOptions.cpp b/lldb/source/API/SBSaveCoreOptions.cpp index c79b57fa62c2b..35b9da569dfa1 100644 --- a/lldb/source/API/SBSaveCoreOptions.cpp +++ b/lldb/source/API/SBSaveCoreOptions.cpp @@ -10,6 +10,7 @@ #include "lldb/API/SBMemoryRegionInfo.h" #include "lldb/Host/FileSystem.h" #include "lldb/Symbol/SaveCoreOptions.h" +#include "lldb/Target/ThreadCollection.h" #include "lldb/Utility/Instrumentation.h" #include "Utils.h" @@ -100,6 +101,14 @@ SBSaveCoreOptions::AddMemoryRegionToSave(const SBMemoryRegionInfo ®ion) { return SBError(); } +lldb::SBThreadCollection SBSaveCoreOptions::GetThreadsToSave() const { + LLDB_INSTRUMENT_VA(this); + lldb::ThreadCollectionSP threadcollection_sp = + std::make_shared( + m_opaque_up->GetThreadsToSave()); + return SBThreadCollection(threadcollection_sp); +} + void SBSaveCoreOptions::Clear() { LLDB_INSTRUMENT_VA(this); m_opaque_up->Clear(); diff --git a/lldb/source/Expression/LLVMUserExpression.cpp b/lldb/source/Expression/LLVMUserExpression.cpp index 529ac462dfd8f..fac3ce6f5799d 100644 --- a/lldb/source/Expression/LLVMUserExpression.cpp +++ b/lldb/source/Expression/LLVMUserExpression.cpp @@ -187,18 +187,22 @@ LLVMUserExpression::DoExecute(DiagnosticManager &diagnostic_manager, if (execution_result == lldb::eExpressionInterrupted || execution_result == lldb::eExpressionHitBreakpoint) { const char *error_desc = nullptr; + const char *explanation = execution_result == lldb::eExpressionInterrupted + ? "was interrupted" + : "hit a breakpoint"; if (user_expression_plan) { if (auto real_stop_info_sp = user_expression_plan->GetRealStopInfo()) error_desc = real_stop_info_sp->GetDescription(); } + if (error_desc) diagnostic_manager.Printf(lldb::eSeverityError, - "Execution was interrupted, reason: %s.", + "Expression execution %s: %s.", explanation, error_desc); else - diagnostic_manager.PutString(lldb::eSeverityError, - "Execution was interrupted."); + diagnostic_manager.Printf(lldb::eSeverityError, + "Expression execution %s.", explanation); if ((execution_result == lldb::eExpressionInterrupted && options.DoesUnwindOnError()) || @@ -212,31 +216,35 @@ LLVMUserExpression::DoExecute(DiagnosticManager &diagnostic_manager, user_expression_plan->TransferExpressionOwnership(); diagnostic_manager.AppendMessageToDiagnostic( "The process has been left at the point where it was " - "interrupted, " - "use \"thread return -x\" to return to the state before " - "expression evaluation."); + "interrupted, use \"thread return -x\" to return to the state " + "before expression evaluation."); } return execution_result; - } else if (execution_result == lldb::eExpressionStoppedForDebug) { + } + + if (execution_result == lldb::eExpressionStoppedForDebug) { diagnostic_manager.PutString( lldb::eSeverityInfo, - "Execution was halted at the first instruction of the expression " - "function because \"debug\" was requested.\n" + "Expression execution was halted at the first instruction of the " + "expression function because \"debug\" was requested.\n" "Use \"thread return -x\" to return to the state before expression " "evaluation."); return execution_result; - } else if (execution_result == lldb::eExpressionThreadVanished) { - diagnostic_manager.Printf( - lldb::eSeverityError, - "Couldn't complete execution; the thread " - "on which the expression was being run: 0x%" PRIx64 - " exited during its execution.", - expr_thread_id); + } + + if (execution_result == lldb::eExpressionThreadVanished) { + diagnostic_manager.Printf(lldb::eSeverityError, + "Couldn't execute expression: the thread on " + "which the expression was being run (0x%" PRIx64 + ") exited during its execution.", + expr_thread_id); return execution_result; - } else if (execution_result != lldb::eExpressionCompleted) { + } + + if (execution_result != lldb::eExpressionCompleted) { diagnostic_manager.Printf(lldb::eSeverityError, - "Couldn't execute function; result was %s", + "Couldn't execute expression: result was %s", toString(execution_result).c_str()); return execution_result; } @@ -245,9 +253,9 @@ LLVMUserExpression::DoExecute(DiagnosticManager &diagnostic_manager, if (FinalizeJITExecution(diagnostic_manager, exe_ctx, result, function_stack_bottom, function_stack_top)) { return lldb::eExpressionCompleted; - } else { - return lldb::eExpressionResultUnavailable; } + + return lldb::eExpressionResultUnavailable; } bool LLVMUserExpression::FinalizeJITExecution( diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ASTUtils.h b/lldb/source/Plugins/ExpressionParser/Clang/ASTUtils.h index d5c68a436e090..a1f02dc3d1b09 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ASTUtils.h +++ b/lldb/source/Plugins/ExpressionParser/Clang/ASTUtils.h @@ -70,9 +70,10 @@ class ExternalASTSourceWrapper : public clang::ExternalSemaSource { m_Source->updateOutOfDateIdentifier(II); } - bool FindExternalVisibleDeclsByName(const clang::DeclContext *DC, - clang::DeclarationName Name) override { - return m_Source->FindExternalVisibleDeclsByName(DC, Name); + bool FindExternalVisibleDeclsByName( + const clang::DeclContext *DC, clang::DeclarationName Name, + const clang::DeclContext *OriginalDC) override { + return m_Source->FindExternalVisibleDeclsByName(DC, Name, OriginalDC); } bool LoadExternalSpecializations(const clang::Decl *D, @@ -387,10 +388,11 @@ class SemaSourceWithPriorities : public clang::ExternalSemaSource { return EK_ReplyHazy; } - bool FindExternalVisibleDeclsByName(const clang::DeclContext *DC, - clang::DeclarationName Name) override { + bool FindExternalVisibleDeclsByName( + const clang::DeclContext *DC, clang::DeclarationName Name, + const clang::DeclContext *OriginalDC) override { for (size_t i = 0; i < Sources.size(); ++i) - if (Sources[i]->FindExternalVisibleDeclsByName(DC, Name)) + if (Sources[i]->FindExternalVisibleDeclsByName(DC, Name, OriginalDC)) return true; return false; } diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp index e41efdd3f61c7..34129807277d5 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp @@ -99,7 +99,8 @@ void ClangASTSource::StartTranslationUnit(ASTConsumer *Consumer) { // The core lookup interface. bool ClangASTSource::FindExternalVisibleDeclsByName( - const DeclContext *decl_ctx, DeclarationName clang_decl_name) { + const DeclContext *decl_ctx, DeclarationName clang_decl_name, + const clang::DeclContext *original_dc) { if (!m_ast_context) { SetNoExternalVisibleDeclsForName(decl_ctx, clang_decl_name); return false; diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h index 83c910477acc8..dd89bae96f629 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.h @@ -83,8 +83,10 @@ class ClangASTSource : public clang::ExternalASTSource, /// /// \return /// Whatever SetExternalVisibleDeclsForName returns. - bool FindExternalVisibleDeclsByName(const clang::DeclContext *DC, - clang::DeclarationName Name) override; + bool + FindExternalVisibleDeclsByName(const clang::DeclContext *DC, + clang::DeclarationName Name, + const clang::DeclContext *OriginalDC) override; /// Enumerate all Decls in a given lexical context. /// @@ -211,9 +213,10 @@ class ClangASTSource : public clang::ExternalASTSource, public: ClangASTSourceProxy(ClangASTSource &original) : m_original(original) {} - bool FindExternalVisibleDeclsByName(const clang::DeclContext *DC, - clang::DeclarationName Name) override { - return m_original.FindExternalVisibleDeclsByName(DC, Name); + bool FindExternalVisibleDeclsByName( + const clang::DeclContext *DC, clang::DeclarationName Name, + const clang::DeclContext *OriginalDC) override { + return m_original.FindExternalVisibleDeclsByName(DC, Name, OriginalDC); } void FindExternalLexicalDecls( diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExternalASTSourceCallbacks.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExternalASTSourceCallbacks.cpp index e746e6afe39be..3eddf49a8b7e7 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExternalASTSourceCallbacks.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExternalASTSourceCallbacks.cpp @@ -50,7 +50,8 @@ void ClangExternalASTSourceCallbacks::FindExternalLexicalDecls( } bool ClangExternalASTSourceCallbacks::FindExternalVisibleDeclsByName( - const clang::DeclContext *DC, clang::DeclarationName Name) { + const clang::DeclContext *DC, clang::DeclarationName Name, + const clang::DeclContext *OriginalDC) { llvm::SmallVector decls; // Objective-C methods are not added into the LookupPtr when they originate // from an external source. SetExternalVisibleDeclsForName() adds them. diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExternalASTSourceCallbacks.h b/lldb/source/Plugins/ExpressionParser/Clang/ClangExternalASTSourceCallbacks.h index 6bd18186a567d..d0eabb509455c 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExternalASTSourceCallbacks.h +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExternalASTSourceCallbacks.h @@ -37,8 +37,10 @@ class ClangExternalASTSourceCallbacks : public clang::ExternalASTSource { llvm::function_ref IsKindWeWant, llvm::SmallVectorImpl &Result) override; - bool FindExternalVisibleDeclsByName(const clang::DeclContext *DC, - clang::DeclarationName Name) override; + bool + FindExternalVisibleDeclsByName(const clang::DeclContext *DC, + clang::DeclarationName Name, + const clang::DeclContext *OriginalDC) override; void CompleteType(clang::TagDecl *tag_decl) override; diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp index 96a259b811b5e..e4b20b30a069f 100644 --- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp +++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCDeclVendor.cpp @@ -29,8 +29,9 @@ class lldb_private::AppleObjCExternalASTSource AppleObjCExternalASTSource(AppleObjCDeclVendor &decl_vendor) : m_decl_vendor(decl_vendor) {} - bool FindExternalVisibleDeclsByName(const clang::DeclContext *decl_ctx, - clang::DeclarationName name) override { + bool FindExternalVisibleDeclsByName( + const clang::DeclContext *decl_ctx, clang::DeclarationName name, + const clang::DeclContext *original_dc) override { Log *log(GetLog( LLDBLog::Expressions)); // FIXME - a more appropriate log channel? diff --git a/lldb/source/Plugins/OperatingSystem/Python/OperatingSystemPython.cpp b/lldb/source/Plugins/OperatingSystem/Python/OperatingSystemPython.cpp index 3848a2b1deb97..aff521890858c 100644 --- a/lldb/source/Plugins/OperatingSystem/Python/OperatingSystemPython.cpp +++ b/lldb/source/Plugins/OperatingSystem/Python/OperatingSystemPython.cpp @@ -386,4 +386,12 @@ lldb::ThreadSP OperatingSystemPython::CreateThread(lldb::tid_t tid, return ThreadSP(); } +bool OperatingSystemPython::DoesPluginReportAllThreads() { + // If the python plugin has a "DoesPluginReportAllThreads" method, use it. + if (std::optional plugin_answer = + m_operating_system_interface_sp->DoesPluginReportAllThreads()) + return *plugin_answer; + return m_process->GetOSPluginReportsAllThreads(); +} + #endif // #if LLDB_ENABLE_PYTHON diff --git a/lldb/source/Plugins/OperatingSystem/Python/OperatingSystemPython.h b/lldb/source/Plugins/OperatingSystem/Python/OperatingSystemPython.h index 90973acde3ebf..980a544241de4 100644 --- a/lldb/source/Plugins/OperatingSystem/Python/OperatingSystemPython.h +++ b/lldb/source/Plugins/OperatingSystem/Python/OperatingSystemPython.h @@ -60,6 +60,8 @@ class OperatingSystemPython : public lldb_private::OperatingSystem { // Method for lazy creation of threads on demand lldb::ThreadSP CreateThread(lldb::tid_t tid, lldb::addr_t context) override; + bool DoesPluginReportAllThreads() override; + protected: bool IsValid() const { return m_script_object_sp && m_script_object_sp->IsValid(); diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/Interfaces/OperatingSystemPythonInterface.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/Interfaces/OperatingSystemPythonInterface.cpp index c3379e774a0b8..d8b2ea984fd88 100644 --- a/lldb/source/Plugins/ScriptInterpreter/Python/Interfaces/OperatingSystemPythonInterface.cpp +++ b/lldb/source/Plugins/ScriptInterpreter/Python/Interfaces/OperatingSystemPythonInterface.cpp @@ -82,6 +82,16 @@ OperatingSystemPythonInterface::GetRegisterContextForTID(lldb::tid_t tid) { return obj->GetAsString()->GetValue().str(); } +std::optional OperatingSystemPythonInterface::DoesPluginReportAllThreads() { + Status error; + StructuredData::ObjectSP obj = Dispatch("does_plugin_report_all_threads", error); + if (!ScriptedInterface::CheckStructuredDataObject(LLVM_PRETTY_FUNCTION, obj, + error)) + return {}; + + return obj->GetAsBoolean()->GetValue(); +} + void OperatingSystemPythonInterface::Initialize() { const std::vector ci_usages = { "settings set target.process.python-os-plugin-path ", diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/Interfaces/OperatingSystemPythonInterface.h b/lldb/source/Plugins/ScriptInterpreter/Python/Interfaces/OperatingSystemPythonInterface.h index 102c3c3953768..8df48f1b64cc9 100644 --- a/lldb/source/Plugins/ScriptInterpreter/Python/Interfaces/OperatingSystemPythonInterface.h +++ b/lldb/source/Plugins/ScriptInterpreter/Python/Interfaces/OperatingSystemPythonInterface.h @@ -45,6 +45,8 @@ class OperatingSystemPythonInterface std::optional GetRegisterContextForTID(lldb::tid_t tid) override; + std::optional DoesPluginReportAllThreads() override; + static void Initialize(); static void Terminate(); diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDIE.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDIE.cpp index 6857878b354a0..1e2564cb22f25 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDIE.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDIE.cpp @@ -19,6 +19,8 @@ #include "llvm/ADT/iterator.h" #include "llvm/BinaryFormat/Dwarf.h" #include "llvm/DebugInfo/DWARF/DWARFAddressRange.h" +#include "llvm/DebugInfo/DWARF/DWARFTypePrinter.h" +#include "llvm/Support/raw_ostream.h" using namespace lldb_private; using namespace lldb_private::dwarf; @@ -376,7 +378,8 @@ lldb_private::Type *DWARFDIE::ResolveTypeUID(const DWARFDIE &die) const { return nullptr; } -static CompilerContext GetContextEntry(DWARFDIE die) { +static CompilerContext GetContextEntry(DWARFDIE die, + bool derive_template_names) { auto ctx = [die](CompilerContextKind kind) { return CompilerContext(kind, ConstString(die.GetName())); }; @@ -386,11 +389,6 @@ static CompilerContext GetContextEntry(DWARFDIE die) { return ctx(CompilerContextKind::Module); case DW_TAG_namespace: return ctx(CompilerContextKind::Namespace); - case DW_TAG_class_type: - case DW_TAG_structure_type: - return ctx(CompilerContextKind::ClassOrStruct); - case DW_TAG_union_type: - return ctx(CompilerContextKind::Union); case DW_TAG_enumeration_type: return ctx(CompilerContextKind::Enum); case DW_TAG_subprogram: @@ -401,12 +399,28 @@ static CompilerContext GetContextEntry(DWARFDIE die) { return ctx(CompilerContextKind::Typedef); case DW_TAG_base_type: return ctx(CompilerContextKind::Builtin); + case DW_TAG_class_type: + case DW_TAG_structure_type: + case DW_TAG_union_type: { + CompilerContextKind kind = die.Tag() == DW_TAG_union_type + ? CompilerContextKind::Union + : CompilerContextKind::ClassOrStruct; + llvm::StringRef name = die.GetName(); + if (!derive_template_names || name.contains('<')) + return CompilerContext(kind, ConstString(name)); + + std::string name_storage = name.str(); + llvm::raw_string_ostream os(name_storage); + llvm::DWARFTypePrinter(os).appendAndTerminateTemplateParameters( + die); + return CompilerContext(kind, ConstString(os.str())); + } default: llvm_unreachable("Check tag type in the caller!"); } } -static void GetDeclContextImpl(DWARFDIE die, +static void GetDeclContextImpl(DWARFDIE die, bool derive_template_names, llvm::SmallSet &seen, std::vector &context) { // Stop if we hit a cycle. @@ -428,7 +442,7 @@ static void GetDeclContextImpl(DWARFDIE die, case DW_TAG_subprogram: case DW_TAG_variable: case DW_TAG_typedef: - context.push_back(GetContextEntry(die)); + context.push_back(GetContextEntry(die, derive_template_names)); break; default: break; @@ -438,15 +452,16 @@ static void GetDeclContextImpl(DWARFDIE die, } } -std::vector DWARFDIE::GetDeclContext() const { +std::vector +DWARFDIE::GetDeclContext(bool derive_template_names) const { llvm::SmallSet seen; std::vector context; - GetDeclContextImpl(*this, seen, context); + GetDeclContextImpl(*this, derive_template_names, seen, context); std::reverse(context.begin(), context.end()); return context; } -static void GetTypeLookupContextImpl(DWARFDIE die, +static void GetTypeLookupContextImpl(DWARFDIE die, bool derive_template_names, llvm::SmallSet &seen, std::vector &context) { // Stop if we hit a cycle. @@ -461,7 +476,7 @@ static void GetTypeLookupContextImpl(DWARFDIE die, case DW_TAG_variable: case DW_TAG_typedef: case DW_TAG_base_type: - context.push_back(GetContextEntry(die)); + context.push_back(GetContextEntry(die, derive_template_names)); break; // If any of the tags below appear in the parent chain, stop the decl @@ -484,10 +499,11 @@ static void GetTypeLookupContextImpl(DWARFDIE die, } } -std::vector DWARFDIE::GetTypeLookupContext() const { +std::vector +DWARFDIE::GetTypeLookupContext(bool derive_template_names) const { llvm::SmallSet seen; std::vector context; - GetTypeLookupContextImpl(*this, seen, context); + GetTypeLookupContextImpl(*this, derive_template_names, seen, context); std::reverse(context.begin(), context.end()); return context; } diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDIE.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDIE.h index c3239b5b121f9..8785ac09b1f14 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDIE.h +++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDIE.h @@ -73,7 +73,15 @@ class DWARFDIE : public DWARFBaseDIE { /// Return this DIE's decl context as it is needed to look up types /// in Clang modules. This context will include any modules or functions that /// the type is declared in so an exact module match can be efficiently made. - std::vector GetDeclContext() const; + /// + /// \param[in] derive_template_names + /// If true, augments the returned names with template arguments derived + /// from the child DIEs, if the names don't contained template arguments + /// already. If false, the returned context will contain the names exactly + /// as they are spelled in the debug info, regardless of whether that + /// includes template arguments or not. + std::vector + GetDeclContext(bool derive_template_names = false) const; /// Get a context to a type so it can be looked up. /// @@ -85,7 +93,15 @@ class DWARFDIE : public DWARFBaseDIE { /// appropriate time, like either the translation unit or at a function /// context. This is designed to allow users to efficiently look for types /// using a full or partial CompilerContext array. - std::vector GetTypeLookupContext() const; + /// + /// \param[in] derive_template_names + /// If true, augments the returned names with template arguments derived + /// from the child DIEs, if the names don't contained template arguments + /// already. If false, the returned context will contain the names exactly + /// as they are spelled in the debug info, regardless of whether that + /// includes template arguments or not. + std::vector + GetTypeLookupContext(bool derive_template_names = false) const; DWARFDeclContext GetDWARFDeclContext() const; diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp index 360dbaa1beb5e..2f451d173c4dd 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp @@ -10,10 +10,8 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/DebugInfo/DWARF/DWARFAddressRange.h" #include "llvm/DebugInfo/DWARF/DWARFDebugLoc.h" -#include "llvm/DebugInfo/DWARF/DWARFTypePrinter.h" #include "llvm/Support/Casting.h" #include "llvm/Support/FileUtilities.h" -#include "llvm/Support/Format.h" #include "llvm/Support/FormatAdapters.h" #include "llvm/Support/Threading.h" @@ -2740,18 +2738,11 @@ void SymbolFileDWARF::FindTypes(const TypeQuery &query, TypeResults &results) { // Copy our match's context and update the basename we are looking for // so we can use this only to compare the context correctly. m_index->GetTypesWithQuery(query_simple, [&](DWARFDIE die) { - // Check the language, but only if we have a language filter. - if (query.HasLanguage()) { - if (!query.LanguageMatches(GetLanguageFamily(*die.GetCU()))) - return true; // Keep iterating over index types, language mismatch. - } - - std::string qualified_name; - llvm::raw_string_ostream os(qualified_name); - llvm::DWARFTypePrinter type_printer(os); - type_printer.appendQualifiedName(die); - TypeQuery die_query(qualified_name, e_exact_match); - if (query.ContextMatches(die_query.GetContextRef())) + std::vector qualified_context = + query.GetModuleSearch() + ? die.GetDeclContext(/*derive_template_names=*/true) + : die.GetTypeLookupContext(/*derive_template_names=*/true); + if (query.ContextMatches(qualified_context)) if (Type *matching_type = ResolveType(die, true, true)) results.InsertUnique(matching_type->shared_from_this()); return !results.Done(query); // Keep iterating if we aren't done. diff --git a/lldb/source/Symbol/CompilerType.cpp b/lldb/source/Symbol/CompilerType.cpp index e9e6e3bf2600c..09820fb3f0101 100644 --- a/lldb/source/Symbol/CompilerType.cpp +++ b/lldb/source/Symbol/CompilerType.cpp @@ -1105,8 +1105,11 @@ bool CompilerType::GetValueAsScalar(const lldb_private::DataExtractor &data, return false; std::optional byte_size = GetByteSize(exe_scope); - if (!byte_size) + // A bit or byte size of 0 is not a bug, but it doesn't make sense to read a + // scalar of zero size. + if (!byte_size || *byte_size == 0) return false; + lldb::offset_t offset = data_byte_offset; switch (encoding) { case lldb::eEncodingInvalid: diff --git a/lldb/source/Symbol/SaveCoreOptions.cpp b/lldb/source/Symbol/SaveCoreOptions.cpp index 8d9aadece2152..c9f6efeb25d22 100644 --- a/lldb/source/Symbol/SaveCoreOptions.cpp +++ b/lldb/source/Symbol/SaveCoreOptions.cpp @@ -114,9 +114,8 @@ void SaveCoreOptions::AddMemoryRegionToSave( const MemoryRanges &SaveCoreOptions::GetCoreFileMemoryRanges() const { return m_regions_to_save; } - -Status SaveCoreOptions::EnsureValidConfiguration( - lldb::ProcessSP process_sp) const { +Status +SaveCoreOptions::EnsureValidConfiguration(lldb::ProcessSP process_sp) const { Status error; std::string error_str; if (!m_threads_to_save.empty() && GetStyle() == lldb::eSaveCoreFull) @@ -132,10 +131,24 @@ Status SaveCoreOptions::EnsureValidConfiguration( return error; } -void SaveCoreOptions::ClearProcessSpecificData() { +lldb_private::ThreadCollection::collection +SaveCoreOptions::GetThreadsToSave() const { + lldb_private::ThreadCollection::collection thread_collection; + // In cases where no process is set, such as when no threads are specified. + if (!m_process_sp) + return thread_collection; + + ThreadList &thread_list = m_process_sp->GetThreadList(); + for (const auto &tid : m_threads_to_save) + thread_collection.push_back(thread_list.FindThreadByID(tid)); + + return thread_collection; +} + +void SaveCoreOptions::ClearProcessSpecificData() { // Deliberately not following the formatter style here to indicate that // this method will be expanded in the future. - m_threads_to_save.clear(); + m_threads_to_save.clear(); } void SaveCoreOptions::Clear() { diff --git a/lldb/source/Symbol/SymbolContext.cpp b/lldb/source/Symbol/SymbolContext.cpp index 19b6ff6a5302b..f4270ee839676 100644 --- a/lldb/source/Symbol/SymbolContext.cpp +++ b/lldb/source/Symbol/SymbolContext.cpp @@ -317,65 +317,6 @@ uint32_t SymbolContext::GetResolvedMask() const { return resolved_mask; } -void SymbolContext::Dump(Stream *s, Target *target) const { - *s << this << ": "; - s->Indent(); - s->PutCString("SymbolContext"); - s->IndentMore(); - s->EOL(); - s->IndentMore(); - s->Indent(); - *s << "Module = " << module_sp.get() << ' '; - if (module_sp) - module_sp->GetFileSpec().Dump(s->AsRawOstream()); - s->EOL(); - s->Indent(); - *s << "CompileUnit = " << comp_unit; - if (comp_unit != nullptr) - s->Format(" {{{0:x-16}} {1}", comp_unit->GetID(), - comp_unit->GetPrimaryFile()); - s->EOL(); - s->Indent(); - *s << "Function = " << function; - if (function != nullptr) { - s->Format(" {{{0:x-16}} {1}, address-range = ", function->GetID(), - function->GetType()->GetName()); - function->GetAddressRange().Dump(s, target, Address::DumpStyleLoadAddress, - Address::DumpStyleModuleWithFileAddress); - s->EOL(); - s->Indent(); - Type *func_type = function->GetType(); - if (func_type) { - *s << " Type = "; - func_type->Dump(s, false); - } - } - s->EOL(); - s->Indent(); - *s << "Block = " << block; - if (block != nullptr) - s->Format(" {{{0:x-16}}", block->GetID()); - s->EOL(); - s->Indent(); - *s << "LineEntry = "; - line_entry.Dump(s, target, true, Address::DumpStyleLoadAddress, - Address::DumpStyleModuleWithFileAddress, true); - s->EOL(); - s->Indent(); - *s << "Symbol = " << symbol; - if (symbol != nullptr && symbol->GetMangled()) - *s << ' ' << symbol->GetName().AsCString(); - s->EOL(); - *s << "Variable = " << variable; - if (variable != nullptr) { - s->Format(" {{{0:x-16}} {1}", variable->GetID(), - variable->GetType()->GetName()); - s->EOL(); - } - s->IndentLess(); - s->IndentLess(); -} - bool lldb_private::operator==(const SymbolContext &lhs, const SymbolContext &rhs) { return lhs.function == rhs.function && lhs.symbol == rhs.symbol && diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp index 68485a40a3fcc..c47e728fdf716 100644 --- a/lldb/source/Target/Process.cpp +++ b/lldb/source/Target/Process.cpp @@ -1182,7 +1182,7 @@ void Process::UpdateThreadListIfNeeded() { // See if the OS plugin reports all threads. If it does, then // it is safe to clear unseen thread's plans here. Otherwise we // should preserve them in case they show up again: - clear_unused_threads = GetOSPluginReportsAllThreads(); + clear_unused_threads = os->DoesPluginReportAllThreads(); // Turn off dynamic types to ensure we don't run any expressions. // Objective-C can run an expression to determine if a SBValue is a diff --git a/lldb/source/Target/TargetProperties.td b/lldb/source/Target/TargetProperties.td index bb3b500d5fdfb..38a345dfd8849 100644 --- a/lldb/source/Target/TargetProperties.td +++ b/lldb/source/Target/TargetProperties.td @@ -223,7 +223,7 @@ let Definition = "process_experimental" in { def OSPluginReportsAllThreads: Property<"os-plugin-reports-all-threads", "Boolean">, Global, DefaultTrue, - Desc<"Set to False if your OS Plugins doesn't report all threads on each stop.">; + Desc<"Set to False if your Python OS Plugin doesn't report all threads on each stop.">; } let Definition = "process" in { diff --git a/lldb/source/Utility/LLDBAssert.cpp b/lldb/source/Utility/LLDBAssert.cpp index b0c39a284910b..d7adb52f95fa4 100644 --- a/lldb/source/Utility/LLDBAssert.cpp +++ b/lldb/source/Utility/LLDBAssert.cpp @@ -20,6 +20,7 @@ namespace lldb_private { +/// The default callback prints to stderr. static void DefaultAssertCallback(llvm::StringRef message, llvm::StringRef backtrace, llvm::StringRef prompt) { @@ -31,8 +32,8 @@ static void DefaultAssertCallback(llvm::StringRef message, static std::atomic g_lldb_assert_callback = &DefaultAssertCallback; -void lldb_assert(bool expression, const char *expr_text, const char *func, - const char *file, unsigned int line) { +void _lldb_assert(bool expression, const char *expr_text, const char *func, + const char *file, unsigned int line) { if (LLVM_LIKELY(expression)) return; @@ -44,8 +45,6 @@ void lldb_assert(bool expression, const char *expr_text, const char *func, } #endif - // Print a warning and encourage the user to file a bug report, similar to - // LLVM’s crash handler, and then return execution. std::string buffer; llvm::raw_string_ostream backtrace(buffer); llvm::sys::PrintStackTrace(backtrace); diff --git a/lldb/test/API/commands/expression/call-function/TestCallStopAndContinue.py b/lldb/test/API/commands/expression/call-function/TestCallStopAndContinue.py index 69f02ec99f64b..d856b5c23a5ea 100644 --- a/lldb/test/API/commands/expression/call-function/TestCallStopAndContinue.py +++ b/lldb/test/API/commands/expression/call-function/TestCallStopAndContinue.py @@ -31,7 +31,7 @@ def test(self): self.expect( "expr -i false -- returnsFive()", error=True, - substrs=["Execution was interrupted, reason: breakpoint"], + substrs=["Expression execution hit a breakpoint: breakpoint"], ) self.runCmd("continue", "Continue completed") diff --git a/lldb/test/API/commands/expression/unwind_expression/TestUnwindExpression.py b/lldb/test/API/commands/expression/unwind_expression/TestUnwindExpression.py index 82f062876a773..c61fe5d01fd5b 100644 --- a/lldb/test/API/commands/expression/unwind_expression/TestUnwindExpression.py +++ b/lldb/test/API/commands/expression/unwind_expression/TestUnwindExpression.py @@ -2,7 +2,6 @@ Test stopping at a breakpoint in an expression, and unwinding from there. """ - import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * @@ -71,7 +70,7 @@ def do_unwind_test(self, thread, bkpt, timeout): self.assertTrue(val.GetError().Fail(), "We did not complete the execution.") error_str = val.GetError().GetCString() self.assertIn( - "Execution was interrupted, reason: breakpoint", + "Expression execution hit a breakpoint: breakpoint", error_str, "And the reason was right.", ) diff --git a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py index 47d6f5d68bbe6..0d06a9da6535c 100644 --- a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py +++ b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py @@ -40,11 +40,6 @@ def is_os_thread(self, thread): def run_python_os_step_missing_thread(self, do_prune): """Test that the Python operating system plugin works correctly""" - # Our OS plugin does NOT report all threads: - result = self.dbg.HandleCommand( - "settings set process.experimental.os-plugin-reports-all-threads false" - ) - python_os_plugin_path = os.path.join(self.getSourceDir(), "operating_system.py") (target, self.process, thread, thread_bkpt) = lldbutil.run_to_source_breakpoint( self, "first stop in thread - do a step out", self.main_file diff --git a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py index eb02ff534f210..855cdbaf7cdc8 100644 --- a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py +++ b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py @@ -34,6 +34,9 @@ def __init__(self, process): if not self.g_value.IsValid(): print("Could not find g_value") + def does_plugin_report_all_threads(self): + return False + def create_thread(self, tid, context): print("Called create thread with tid: ", tid) return None diff --git a/lldb/test/API/lang/cpp/nested-template/TestNestedTemplate.py b/lldb/test/API/lang/cpp/nested-template/TestNestedTemplate.py index 42db060529a81..055a8e6e21042 100644 --- a/lldb/test/API/lang/cpp/nested-template/TestNestedTemplate.py +++ b/lldb/test/API/lang/cpp/nested-template/TestNestedTemplate.py @@ -17,6 +17,16 @@ def do_test(self, debug_flags): DATA_TYPES_DISPLAYED_CORRECTLY, substrs=["1 match found"], ) + self.expect( + "image lookup -A -t 'NS::Struct'", + DATA_TYPES_DISPLAYED_CORRECTLY, + substrs=["1 match found"], + ) + self.expect( + "image lookup -A -t 'NS::Union'", + DATA_TYPES_DISPLAYED_CORRECTLY, + substrs=["1 match found"], + ) @skipIf(compiler=no_match("clang")) @skipIf(compiler_version=["<", "15.0"]) diff --git a/lldb/test/API/lang/cpp/nested-template/main.cpp b/lldb/test/API/lang/cpp/nested-template/main.cpp index 06d1094880964..9bef73052825f 100644 --- a/lldb/test/API/lang/cpp/nested-template/main.cpp +++ b/lldb/test/API/lang/cpp/nested-template/main.cpp @@ -5,6 +5,15 @@ struct Outer { struct Inner {}; }; +namespace NS { +namespace { +template struct Struct {}; +template struct Union {}; +} // namespace +} // namespace NS + int main() { Outer::Inner oi; + NS::Struct ns_struct; + NS::Union ns_union; } diff --git a/lldb/test/API/python_api/sbsavecoreoptions/TestSBSaveCoreOptions.py b/lldb/test/API/python_api/sbsavecoreoptions/TestSBSaveCoreOptions.py index 40d0cc7e96ff4..ace84e8497a59 100644 --- a/lldb/test/API/python_api/sbsavecoreoptions/TestSBSaveCoreOptions.py +++ b/lldb/test/API/python_api/sbsavecoreoptions/TestSBSaveCoreOptions.py @@ -4,15 +4,18 @@ from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * + class SBSaveCoreOptionsAPICase(TestBase): basic_minidump = "basic_minidump.yaml" basic_minidump_different_pid = "basic_minidump_different_pid.yaml" def get_process_from_yaml(self, yaml_file): minidump_path = self.getBuildArtifact(os.path.basename(yaml_file) + ".dmp") - print ("minidump_path: " + minidump_path) + print("minidump_path: " + minidump_path) self.yaml2obj(yaml_file, minidump_path) - self.assertTrue(os.path.exists(minidump_path), "yaml2obj did not emit a minidump file") + self.assertTrue( + os.path.exists(minidump_path), "yaml2obj did not emit a minidump file" + ) target = self.dbg.CreateTarget(None) process = target.LoadCore(minidump_path) self.assertTrue(process.IsValid(), "Process is not valid") @@ -59,7 +62,6 @@ def test_adding_and_removing_thread(self): removed_success = options.RemoveThread(thread) self.assertFalse(removed_success) - def test_adding_thread_different_process(self): """Test adding and removing a thread from save core options.""" options = lldb.SBSaveCoreOptions() @@ -79,3 +81,26 @@ def test_adding_thread_different_process(self): self.assertTrue(error.Fail()) error = options.AddThread(thread) self.assertTrue(error.Success()) + + def test_removing_and_adding_insertion_order(self): + """Test insertion order is maintained when removing and adding threads.""" + options = lldb.SBSaveCoreOptions() + process = self.get_basic_process() + threads = [] + for x in range(0, 3): + thread = process.GetThreadAtIndex(x) + threads.append(thread) + error = options.AddThread(thread) + self.assertTrue(error.Success()) + + # Get the middle thread, remove it, and insert it back. + middle_thread = threads[1] + self.assertTrue(options.RemoveThread(middle_thread)) + thread_collection = options.GetThreadsToSave() + self.assertTrue(thread_collection is not None) + self.assertEqual(thread_collection.GetSize(), 2) + error = options.AddThread(middle_thread) + self.assertTrue(error.Success()) + thread_collection = options.GetThreadsToSave() + self.assertEqual(thread_collection.GetSize(), 3) + self.assertIn(middle_thread, thread_collection) diff --git a/lldb/test/API/python_api/sbsavecoreoptions/basic_minidump.yaml b/lldb/test/API/python_api/sbsavecoreoptions/basic_minidump.yaml index 993c7da21225a..96302fbfb6b5c 100644 --- a/lldb/test/API/python_api/sbsavecoreoptions/basic_minidump.yaml +++ b/lldb/test/API/python_api/sbsavecoreoptions/basic_minidump.yaml @@ -24,3 +24,13 @@ Streams: Stack: Start of Memory Range: 0x00007FFFC8D0E000 Content: 'DEADBEEF' + - Thread Id: 0x000074DE + Context: 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B0010000000000033000000000000000000000002020100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040109600000000000100000000000000000000000000000068E7D0C8FF7F000068E7D0C8FF7F000097E6D0C8FF7F000010109600000000000000000000000000020000000000000088E4D0C8FF7F0000603FFF85C77F0000F00340000000000080E7D0C8FF7F000000000000000000000000000000000000E0034000000000007F0300000000000000000000000000000000000000000000801F0000FFFF00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF252525252525252525252525252525250000000000000000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000FF00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + Stack: + Start of Memory Range: 0x00007FFFC8D0A000 + Content: 'BEEFDEAD' + - Thread Id: 0x000074DF + Context: 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B0010000000000033000000000000000000000002020100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040109600000000000100000000000000000000000000000068E7D0C8FF7F000068E7D0C8FF7F000097E6D0C8FF7F000010109600000000000000000000000000020000000000000088E4D0C8FF7F0000603FFF85C77F0000F00340000000000080E7D0C8FF7F000000000000000000000000000000000000E0034000000000007F0300000000000000000000000000000000000000000000801F0000FFFF00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF252525252525252525252525252525250000000000000000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000FF00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + Stack: + Start of Memory Range: 0x00007FFFC8DFF000 + Content: 'BAADBEEF' diff --git a/llvm/cmake/config-ix.cmake b/llvm/cmake/config-ix.cmake index 8726b3d617a8c..0cc3a4aa5cccd 100644 --- a/llvm/cmake/config-ix.cmake +++ b/llvm/cmake/config-ix.cmake @@ -17,6 +17,51 @@ include(CheckCompilerVersion) include(CheckProblematicConfigurations) include(HandleLLVMStdlib) +if (ANDROID OR CYGWIN OR CMAKE_SYSTEM_NAME MATCHES "AIX|DragonFly|FreeBSD|Haiku|Linux|NetBSD|OpenBSD|SunOS") + set(HAVE_DLFCN_H 1) + set(HAVE_MACH_MACH_H 0) + set(HAVE_MALLOC_MALLOC_H 0) + set(HAVE_PTHREAD_H 1) + set(HAVE_SYS_MMAN_H 1) + set(HAVE_SYSEXITS_H 1) + set(HAVE_UNISTD_H 1) +elseif (APPLE) + set(HAVE_DLFCN_H 1) + set(HAVE_MACH_MACH_H 1) + set(HAVE_MALLOC_MALLOC_H 1) + set(HAVE_PTHREAD_H 1) + set(HAVE_SYS_MMAN_H 1) + set(HAVE_SYSEXITS_H 1) + set(HAVE_UNISTD_H 1) +elseif (PURE_WINDOWS) + set(HAVE_DLFCN_H 0) + set(HAVE_MACH_MACH_H 0) + set(HAVE_MALLOC_MALLOC_H 0) + set(HAVE_PTHREAD_H 0) + set(HAVE_SYS_MMAN_H 0) + set(HAVE_SYSEXITS_H 0) + set(HAVE_UNISTD_H 0) +elseif (ZOS) + # Confirmed in + # https://github.com/llvm/llvm-project/pull/104706#issuecomment-2297109613 + set(HAVE_DLFCN_H 1) + set(HAVE_MACH_MACH_H 0) + set(HAVE_MALLOC_MALLOC_H 0) + set(HAVE_PTHREAD_H 1) + set(HAVE_SYS_MMAN_H 1) + set(HAVE_SYSEXITS_H 0) + set(HAVE_UNISTD_H 1) +else() + # Other platforms that we don't promise support for. + check_include_file(dlfcn.h HAVE_DLFCN_H) + check_include_file(mach/mach.h HAVE_MACH_MACH_H) + check_include_file(malloc/malloc.h HAVE_MALLOC_MALLOC_H) + check_include_file(pthread.h HAVE_PTHREAD_H) + check_include_file(sys/mman.h HAVE_SYS_MMAN_H) + check_include_file(sysexits.h HAVE_SYSEXITS_H) + check_include_file(unistd.h HAVE_UNISTD_H) +endif() + if( UNIX AND NOT (APPLE OR BEOS OR HAIKU) ) # Used by check_symbol_exists: list(APPEND CMAKE_REQUIRED_LIBRARIES "m") @@ -58,19 +103,6 @@ if(LLVM_USING_GLIBC) endif() # include checks -check_include_file(dlfcn.h HAVE_DLFCN_H) -check_include_file(malloc/malloc.h HAVE_MALLOC_MALLOC_H) -if( NOT PURE_WINDOWS ) - check_include_file(pthread.h HAVE_PTHREAD_H) -endif() -check_include_file(signal.h HAVE_SIGNAL_H) -check_include_file(sys/ioctl.h HAVE_SYS_IOCTL_H) -check_include_file(sys/mman.h HAVE_SYS_MMAN_H) -check_include_file(sys/resource.h HAVE_SYS_RESOURCE_H) -check_include_file(sys/time.h HAVE_SYS_TIME_H) -check_include_file(sysexits.h HAVE_SYSEXITS_H) -check_include_file(termios.h HAVE_TERMIOS_H) -check_include_file(unistd.h HAVE_UNISTD_H) check_include_file(valgrind/valgrind.h HAVE_VALGRIND_VALGRIND_H) check_symbol_exists(FE_ALL_EXCEPT "fenv.h" HAVE_DECL_FE_ALL_EXCEPT) check_symbol_exists(FE_INEXACT "fenv.h" HAVE_DECL_FE_INEXACT) @@ -86,7 +118,6 @@ check_c_source_compiles(" int main(void) { return 0; }" HAVE_BUILTIN_THREAD_POINTER) -check_include_file(mach/mach.h HAVE_MACH_MACH_H) check_include_file(CrashReporterClient.h HAVE_CRASHREPORTERCLIENT_H) if(APPLE) check_c_source_compiles(" @@ -290,7 +321,6 @@ check_symbol_exists(_Unwind_Backtrace "unwind.h" HAVE__UNWIND_BACKTRACE) check_symbol_exists(getpagesize unistd.h HAVE_GETPAGESIZE) check_symbol_exists(sysconf unistd.h HAVE_SYSCONF) check_symbol_exists(getrusage sys/resource.h HAVE_GETRUSAGE) -check_symbol_exists(setrlimit sys/resource.h HAVE_SETRLIMIT) check_symbol_exists(isatty unistd.h HAVE_ISATTY) check_symbol_exists(futimens sys/stat.h HAVE_FUTIMENS) check_symbol_exists(futimes sys/time.h HAVE_FUTIMES) @@ -298,7 +328,7 @@ check_symbol_exists(futimes sys/time.h HAVE_FUTIMES) # Avoid sigaltstack on Apple platforms, where backtrace() cannot handle it # (rdar://7089625) and _Unwind_Backtrace is unusable because it cannot unwind # past the signal handler after an assertion failure (rdar://29866587). -if( HAVE_SIGNAL_H AND NOT LLVM_USE_SANITIZER MATCHES ".*Address.*" AND NOT APPLE ) +if( NOT LLVM_USE_SANITIZER MATCHES ".*Address.*" AND NOT APPLE ) check_symbol_exists(sigaltstack signal.h HAVE_SIGALTSTACK) endif() check_symbol_exists(mallctl malloc_np.h HAVE_MALLCTL) @@ -306,7 +336,6 @@ check_symbol_exists(mallinfo malloc.h HAVE_MALLINFO) check_symbol_exists(mallinfo2 malloc.h HAVE_MALLINFO2) check_symbol_exists(malloc_zone_statistics malloc/malloc.h HAVE_MALLOC_ZONE_STATISTICS) -check_symbol_exists(getrlimit "sys/types.h;sys/time.h;sys/resource.h" HAVE_GETRLIMIT) check_symbol_exists(posix_spawn spawn.h HAVE_POSIX_SPAWN) check_symbol_exists(pread unistd.h HAVE_PREAD) check_symbol_exists(sbrk unistd.h HAVE_SBRK) diff --git a/llvm/cmake/modules/GetHostTriple.cmake b/llvm/cmake/modules/GetHostTriple.cmake index e58d5b1ef14d4..2a2f84ada098f 100644 --- a/llvm/cmake/modules/GetHostTriple.cmake +++ b/llvm/cmake/modules/GetHostTriple.cmake @@ -2,7 +2,7 @@ # Invokes config.guess function( get_host_triple var ) - if( MSVC OR (CMAKE_SYSTEM_NAME STREQUAL "Windows" AND CMAKE_C_COMPILER_ID MATCHES "Clang" AND NOT MINGW AND NOT MSYS)) + if( MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC") if( CMAKE_C_COMPILER_ARCHITECTURE_ID MATCHES "ARM64.*" ) set( value "aarch64-pc-windows-msvc" ) elseif( CMAKE_C_COMPILER_ARCHITECTURE_ID MATCHES "ARM.*" ) diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md index ebfa453d92d37..8f88b824f965a 100644 --- a/llvm/docs/ReleaseNotes.md +++ b/llvm/docs/ReleaseNotes.md @@ -459,6 +459,25 @@ Changes to LLDB * [New Core File API](https://lldb.llvm.org/python_api/lldb.SBSaveCoreOptions.html). This gives greater control on the data captured into the core file, relative to the existing `process save-core` styles. +* When opening ELF core files, LLDB will print additional information about the + signal that killed the process and the disassembly view will display actual + (relocated) targets of the jump instructions instead of raw offsets encoded in + the instruction. This matches existing behavior for live processes. + + Old: + ``` + * thread #1: tid = 329384, 0x0000000000401262, name = 'a.out', stop reason = signal SIGSEGV + + 0x7f1e3193e0a7 <+23>: ja 0xfe100 ; <+112> + ``` + + New: + ``` + * thread #1: tid = 329384, 0x0000000000401262, name = 'a.out', stop reason = SIGSEGV: address not mapped to object (fault address: 0x0) + + 0x7f1e3193e0a7 <+23>: ja 0x7f1e3193e100 ; <+112> + ``` + * `lldb-server` now listens to a single port for gdbserver connections and provides that port to the connection handler processes. This means that only 2 ports need to be opened in the firewall (one for the `lldb-server` platform, one for gdbserver connections). diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h index bf491096e3c47..5291369b3b9f1 100644 --- a/llvm/include/llvm/CodeGen/AsmPrinter.h +++ b/llvm/include/llvm/CodeGen/AsmPrinter.h @@ -44,6 +44,7 @@ class DebugHandlerBase; class DIE; class DIEAbbrev; class DwarfDebug; +class EHStreamer; class GCMetadataPrinter; class GCStrategy; class GlobalAlias; @@ -187,15 +188,17 @@ class AsmPrinter : public MachineFunctionPass { /// For dso_local functions, the current $local alias for the function. MCSymbol *CurrentFnBeginLocal = nullptr; - /// A vector of all debug/EH info emitters we should use. This vector - /// maintains ownership of the emitters. + /// A handle to the EH info emitter (if present). + // Only for EHStreamer subtypes, but some C++ compilers will incorrectly warn + // us if we declare that directly. + SmallVector, 1> EHHandlers; + + // A vector of all Debuginfo emitters we should use. Protected so that + // targets can add their own. This vector maintains ownership of the + // emitters. SmallVector, 2> Handlers; size_t NumUserHandlers = 0; - /// Debuginfo handler. Protected so that targets can add their own. - SmallVector, 1> DebugHandlers; - size_t NumUserDebugHandlers = 0; - StackMaps SM; private: @@ -527,8 +530,6 @@ class AsmPrinter : public MachineFunctionPass { void addAsmPrinterHandler(std::unique_ptr Handler); - void addDebugHandler(std::unique_ptr Handler); - // Targets can, or in the case of EmitInstruction, must implement these to // customize output. diff --git a/llvm/include/llvm/CodeGen/AsmPrinterHandler.h b/llvm/include/llvm/CodeGen/AsmPrinterHandler.h index ed73e618431de..bf3f6c53027a7 100644 --- a/llvm/include/llvm/CodeGen/AsmPrinterHandler.h +++ b/llvm/include/llvm/CodeGen/AsmPrinterHandler.h @@ -64,6 +64,18 @@ class AsmPrinterHandler { /// immediately prior to markFunctionEnd. virtual void endBasicBlockSection(const MachineBasicBlock &MBB) {} + /// For symbols that have a size designated (e.g. common symbols), + /// this tracks that size. + virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) {} + + /// Process beginning of an instruction. + virtual void beginInstruction(const MachineInstr *MI) {} + + /// Process end of an instruction. + virtual void endInstruction() {} + + virtual void beginCodeAlignment(const MachineBasicBlock &MBB) {} + /// Emit target-specific EH funclet machinery. virtual void beginFunclet(const MachineBasicBlock &MBB, MCSymbol *Sym = nullptr) {} diff --git a/llvm/include/llvm/CodeGen/DebugHandlerBase.h b/llvm/include/llvm/CodeGen/DebugHandlerBase.h index d39e7e68cb255..f669bd311ff56 100644 --- a/llvm/include/llvm/CodeGen/DebugHandlerBase.h +++ b/llvm/include/llvm/CodeGen/DebugHandlerBase.h @@ -50,14 +50,10 @@ struct DbgVariableLocation { /// Base class for debug information backends. Common functionality related to /// tracking which variables and scopes are alive at a given PC live here. -class DebugHandlerBase { +class DebugHandlerBase : public AsmPrinterHandler { protected: DebugHandlerBase(AsmPrinter *A); -public: - virtual ~DebugHandlerBase(); - -protected: /// Target of debug info emission. AsmPrinter *Asm = nullptr; @@ -120,24 +116,20 @@ class DebugHandlerBase { private: InstructionOrdering InstOrdering; + // AsmPrinterHandler overrides. public: - /// For symbols that have a size designated (e.g. common symbols), - /// this tracks that size. Only used by DWARF. - virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) {} - - virtual void beginModule(Module *M); - virtual void endModule() = 0; + virtual ~DebugHandlerBase() override; - virtual void beginInstruction(const MachineInstr *MI); - virtual void endInstruction(); + void beginModule(Module *M) override; - void beginFunction(const MachineFunction *MF); - void endFunction(const MachineFunction *MF); + void beginInstruction(const MachineInstr *MI) override; + void endInstruction() override; - void beginBasicBlockSection(const MachineBasicBlock &MBB); - void endBasicBlockSection(const MachineBasicBlock &MBB); + void beginFunction(const MachineFunction *MF) override; + void endFunction(const MachineFunction *MF) override; - virtual void beginCodeAlignment(const MachineBasicBlock &MBB) {} + void beginBasicBlockSection(const MachineBasicBlock &MBB) override; + void endBasicBlockSection(const MachineBasicBlock &MBB) override; /// Return Label preceding the instruction. MCSymbol *getLabelBeforeInsn(const MachineInstr *MI); diff --git a/llvm/include/llvm/Config/config.h.cmake b/llvm/include/llvm/Config/config.h.cmake index d51d01017d986..1d2d00a3b758b 100644 --- a/llvm/include/llvm/Config/config.h.cmake +++ b/llvm/include/llvm/Config/config.h.cmake @@ -90,9 +90,6 @@ /* Define to 1 if you have the `getpagesize' function. */ #cmakedefine HAVE_GETPAGESIZE ${HAVE_GETPAGESIZE} -/* Define to 1 if you have the `getrlimit' function. */ -#cmakedefine HAVE_GETRLIMIT ${HAVE_GETRLIMIT} - /* Define to 1 if you have the `getrusage' function. */ #cmakedefine HAVE_GETRUSAGE ${HAVE_GETRUSAGE} @@ -165,42 +162,24 @@ /* Define to 1 if you have the `setenv' function. */ #cmakedefine HAVE_SETENV ${HAVE_SETENV} -/* Define to 1 if you have the `setrlimit' function. */ -#cmakedefine HAVE_SETRLIMIT ${HAVE_SETRLIMIT} - /* Define to 1 if you have the `sigaltstack' function. */ #cmakedefine HAVE_SIGALTSTACK ${HAVE_SIGALTSTACK} -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SIGNAL_H ${HAVE_SIGNAL_H} - /* Define to 1 if you have the `strerror_r' function. */ #cmakedefine HAVE_STRERROR_R ${HAVE_STRERROR_R} /* Define to 1 if you have the `sysconf' function. */ #cmakedefine HAVE_SYSCONF ${HAVE_SYSCONF} -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_IOCTL_H ${HAVE_SYS_IOCTL_H} - /* Define to 1 if you have the header file. */ #cmakedefine HAVE_SYS_MMAN_H ${HAVE_SYS_MMAN_H} -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_RESOURCE_H ${HAVE_SYS_RESOURCE_H} - -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_TIME_H ${HAVE_SYS_TIME_H} - /* Define to 1 if stat struct has st_mtimespec member .*/ #cmakedefine HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC ${HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC} /* Define to 1 if stat struct has st_mtim member. */ #cmakedefine HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC ${HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC} -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_TERMIOS_H ${HAVE_TERMIOS_H} - /* Define to 1 if you have the header file. */ #cmakedefine HAVE_UNISTD_H ${HAVE_UNISTD_H} diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h b/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h index 356b8cd70aec5..e10242bb2d42c 100644 --- a/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h +++ b/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h @@ -671,6 +671,11 @@ class GOTTableManager : public TableManager { public: static StringRef getSectionName() { return "$__GOT"; } + GOTTableManager(LinkGraph &G) { + if ((GOTSection = G.findSectionByName(getSectionName()))) + registerExistingEntries(); + } + bool visitEdge(LinkGraph &G, Block *B, Edge &E) { Edge::Kind KindToSet = Edge::Invalid; switch (E.getKind()) { @@ -721,16 +726,21 @@ class GOTTableManager : public TableManager { return *GOTSection; } + void registerExistingEntries(); + Section *GOTSection = nullptr; }; /// Procedure Linkage Table Builder. class PLTTableManager : public TableManager { public: - PLTTableManager(GOTTableManager &GOT) : GOT(GOT) {} - static StringRef getSectionName() { return "$__STUBS"; } + PLTTableManager(LinkGraph &G, GOTTableManager &GOT) : GOT(GOT) { + if ((StubsSection = G.findSectionByName(getSectionName()))) + registerExistingEntries(); + } + bool visitEdge(LinkGraph &G, Block *B, Edge &E) { if (E.getKind() == x86_64::BranchPCRel32 && !E.getTarget().isDefined()) { DEBUG_WITH_TYPE("jitlink", { @@ -754,14 +764,16 @@ class PLTTableManager : public TableManager { public: Section &getStubsSection(LinkGraph &G) { - if (!PLTSection) - PLTSection = &G.createSection(getSectionName(), - orc::MemProt::Read | orc::MemProt::Exec); - return *PLTSection; + if (!StubsSection) + StubsSection = &G.createSection(getSectionName(), + orc::MemProt::Read | orc::MemProt::Exec); + return *StubsSection; } + void registerExistingEntries(); + GOTTableManager &GOT; - Section *PLTSection = nullptr; + Section *StubsSection = nullptr; }; /// Optimize the GOT and Stub relocations if the edge target address is in range diff --git a/llvm/include/llvm/FileCheck/FileCheck.h b/llvm/include/llvm/FileCheck/FileCheck.h index 321ce1d26e163..72d0b91b27ad0 100644 --- a/llvm/include/llvm/FileCheck/FileCheck.h +++ b/llvm/include/llvm/FileCheck/FileCheck.h @@ -180,8 +180,7 @@ struct FileCheckString; class FileCheck { FileCheckRequest Req; std::unique_ptr PatternContext; - // C++17 TODO: make this a plain std::vector. - std::unique_ptr> CheckStrings; + std::vector CheckStrings; public: explicit FileCheck(FileCheckRequest Req); diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td index 544f9f428f41a..f21948697c8a6 100644 --- a/llvm/include/llvm/IR/IntrinsicsDirectX.td +++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td @@ -105,6 +105,8 @@ def int_dx_wave_active_countbits : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i1 def int_dx_wave_all : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_i1_ty], [IntrConvergent, IntrNoMem]>; def int_dx_wave_any : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_i1_ty], [IntrConvergent, IntrNoMem]>; def int_dx_wave_getlaneindex : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrConvergent, IntrNoMem]>; +def int_dx_wave_reduce_sum : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; +def int_dx_wave_reduce_usum : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_dx_wave_is_first_lane : DefaultAttrsIntrinsic<[llvm_i1_ty], [], [IntrConvergent]>; def int_dx_wave_readlane : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrConvergent, IntrNoMem]>; def int_dx_sign : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_any_ty], [IntrNoMem]>; diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td index e29415d2d9bc3..be337dbccaf8a 100644 --- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td +++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td @@ -91,6 +91,7 @@ let TargetPrefix = "spv" in { def int_spv_wave_active_countbits : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i1_ty], [IntrConvergent, IntrNoMem]>; def int_spv_wave_all : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_i1_ty], [IntrConvergent, IntrNoMem]>; def int_spv_wave_any : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_i1_ty], [IntrConvergent, IntrNoMem]>; + def int_spv_wave_reduce_sum : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>; def int_spv_wave_is_first_lane : DefaultAttrsIntrinsic<[llvm_i1_ty], [], [IntrConvergent]>; def int_spv_wave_readlane : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrConvergent, IntrNoMem]>; def int_spv_sign : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_any_ty], [IntrNoMem]>; diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h index df2384c5f6e69..ec7d030a20de8 100644 --- a/llvm/include/llvm/IR/Metadata.h +++ b/llvm/include/llvm/IR/Metadata.h @@ -138,6 +138,10 @@ class Metadata { void printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr) const; /// @} + + /// Metadata IDs that may generate poison. + constexpr static const unsigned PoisonGeneratingIDs[] = { + LLVMContext::MD_range, LLVMContext::MD_nonnull, LLVMContext::MD_align}; }; // Create wrappers for C Binding types (see CBindingWrapping.h). diff --git a/llvm/include/llvm/IR/NVVMIntrinsicUtils.h b/llvm/include/llvm/IR/NVVMIntrinsicUtils.h index 8ca073ba82253..ce794e2573637 100644 --- a/llvm/include/llvm/IR/NVVMIntrinsicUtils.h +++ b/llvm/include/llvm/IR/NVVMIntrinsicUtils.h @@ -38,9 +38,8 @@ enum class TMAReductionOp : uint8_t { XOR = 7, }; -inline bool IntrinsicShouldFTZ(Intrinsic::ID IntrinsicID) { +inline bool FPToIntegerIntrinsicShouldFTZ(Intrinsic::ID IntrinsicID) { switch (IntrinsicID) { - // Float to i32 / i64 conversion intrinsics: case Intrinsic::nvvm_f2i_rm_ftz: case Intrinsic::nvvm_f2i_rn_ftz: case Intrinsic::nvvm_f2i_rp_ftz: @@ -61,11 +60,53 @@ inline bool IntrinsicShouldFTZ(Intrinsic::ID IntrinsicID) { case Intrinsic::nvvm_f2ull_rp_ftz: case Intrinsic::nvvm_f2ull_rz_ftz: return true; + + case Intrinsic::nvvm_f2i_rm: + case Intrinsic::nvvm_f2i_rn: + case Intrinsic::nvvm_f2i_rp: + case Intrinsic::nvvm_f2i_rz: + + case Intrinsic::nvvm_f2ui_rm: + case Intrinsic::nvvm_f2ui_rn: + case Intrinsic::nvvm_f2ui_rp: + case Intrinsic::nvvm_f2ui_rz: + + case Intrinsic::nvvm_d2i_rm: + case Intrinsic::nvvm_d2i_rn: + case Intrinsic::nvvm_d2i_rp: + case Intrinsic::nvvm_d2i_rz: + + case Intrinsic::nvvm_d2ui_rm: + case Intrinsic::nvvm_d2ui_rn: + case Intrinsic::nvvm_d2ui_rp: + case Intrinsic::nvvm_d2ui_rz: + + case Intrinsic::nvvm_f2ll_rm: + case Intrinsic::nvvm_f2ll_rn: + case Intrinsic::nvvm_f2ll_rp: + case Intrinsic::nvvm_f2ll_rz: + + case Intrinsic::nvvm_f2ull_rm: + case Intrinsic::nvvm_f2ull_rn: + case Intrinsic::nvvm_f2ull_rp: + case Intrinsic::nvvm_f2ull_rz: + + case Intrinsic::nvvm_d2ll_rm: + case Intrinsic::nvvm_d2ll_rn: + case Intrinsic::nvvm_d2ll_rp: + case Intrinsic::nvvm_d2ll_rz: + + case Intrinsic::nvvm_d2ull_rm: + case Intrinsic::nvvm_d2ull_rn: + case Intrinsic::nvvm_d2ull_rp: + case Intrinsic::nvvm_d2ull_rz: + return false; } + llvm_unreachable("Checking FTZ flag for invalid f2i/d2i intrinsic"); return false; } -inline bool IntrinsicConvertsToSignedInteger(Intrinsic::ID IntrinsicID) { +inline bool FPToIntegerIntrinsicResultIsSigned(Intrinsic::ID IntrinsicID) { switch (IntrinsicID) { // f2i case Intrinsic::nvvm_f2i_rm: @@ -96,12 +137,44 @@ inline bool IntrinsicConvertsToSignedInteger(Intrinsic::ID IntrinsicID) { case Intrinsic::nvvm_d2ll_rp: case Intrinsic::nvvm_d2ll_rz: return true; + + // f2ui + case Intrinsic::nvvm_f2ui_rm: + case Intrinsic::nvvm_f2ui_rm_ftz: + case Intrinsic::nvvm_f2ui_rn: + case Intrinsic::nvvm_f2ui_rn_ftz: + case Intrinsic::nvvm_f2ui_rp: + case Intrinsic::nvvm_f2ui_rp_ftz: + case Intrinsic::nvvm_f2ui_rz: + case Intrinsic::nvvm_f2ui_rz_ftz: + // d2ui + case Intrinsic::nvvm_d2ui_rm: + case Intrinsic::nvvm_d2ui_rn: + case Intrinsic::nvvm_d2ui_rp: + case Intrinsic::nvvm_d2ui_rz: + // f2ull + case Intrinsic::nvvm_f2ull_rm: + case Intrinsic::nvvm_f2ull_rm_ftz: + case Intrinsic::nvvm_f2ull_rn: + case Intrinsic::nvvm_f2ull_rn_ftz: + case Intrinsic::nvvm_f2ull_rp: + case Intrinsic::nvvm_f2ull_rp_ftz: + case Intrinsic::nvvm_f2ull_rz: + case Intrinsic::nvvm_f2ull_rz_ftz: + // d2ull + case Intrinsic::nvvm_d2ull_rm: + case Intrinsic::nvvm_d2ull_rn: + case Intrinsic::nvvm_d2ull_rp: + case Intrinsic::nvvm_d2ull_rz: + return false; } + llvm_unreachable( + "Checking invalid f2i/d2i intrinsic for signed int conversion"); return false; } inline APFloat::roundingMode -IntrinsicGetRoundingMode(Intrinsic::ID IntrinsicID) { +GetFPToIntegerRoundingMode(Intrinsic::ID IntrinsicID) { switch (IntrinsicID) { // RM: case Intrinsic::nvvm_f2i_rm: @@ -167,10 +240,100 @@ IntrinsicGetRoundingMode(Intrinsic::ID IntrinsicID) { case Intrinsic::nvvm_d2ull_rz: return APFloat::rmTowardZero; } - llvm_unreachable("Invalid f2i/d2i rounding mode intrinsic"); + llvm_unreachable("Checking rounding mode for invalid f2i/d2i intrinsic"); return APFloat::roundingMode::Invalid; } +inline bool FMinFMaxShouldFTZ(Intrinsic::ID IntrinsicID) { + switch (IntrinsicID) { + case Intrinsic::nvvm_fmax_ftz_f: + case Intrinsic::nvvm_fmax_ftz_nan_f: + case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f: + + case Intrinsic::nvvm_fmin_ftz_f: + case Intrinsic::nvvm_fmin_ftz_nan_f: + case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f: + return true; + + case Intrinsic::nvvm_fmax_d: + case Intrinsic::nvvm_fmax_f: + case Intrinsic::nvvm_fmax_nan_f: + case Intrinsic::nvvm_fmax_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_xorsign_abs_f: + + case Intrinsic::nvvm_fmin_d: + case Intrinsic::nvvm_fmin_f: + case Intrinsic::nvvm_fmin_nan_f: + case Intrinsic::nvvm_fmin_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_xorsign_abs_f: + return false; + } + llvm_unreachable("Checking FTZ flag for invalid fmin/fmax intrinsic"); + return false; +} + +inline bool FMinFMaxPropagatesNaNs(Intrinsic::ID IntrinsicID) { + switch (IntrinsicID) { + case Intrinsic::nvvm_fmax_ftz_nan_f: + case Intrinsic::nvvm_fmax_nan_f: + case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_nan_xorsign_abs_f: + + case Intrinsic::nvvm_fmin_ftz_nan_f: + case Intrinsic::nvvm_fmin_nan_f: + case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_nan_xorsign_abs_f: + return true; + + case Intrinsic::nvvm_fmax_d: + case Intrinsic::nvvm_fmax_f: + case Intrinsic::nvvm_fmax_ftz_f: + case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmax_xorsign_abs_f: + + case Intrinsic::nvvm_fmin_d: + case Intrinsic::nvvm_fmin_f: + case Intrinsic::nvvm_fmin_ftz_f: + case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmin_xorsign_abs_f: + return false; + } + llvm_unreachable("Checking NaN flag for invalid fmin/fmax intrinsic"); + return false; +} + +inline bool FMinFMaxIsXorSignAbs(Intrinsic::ID IntrinsicID) { + switch (IntrinsicID) { + case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmax_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_xorsign_abs_f: + + case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmin_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_xorsign_abs_f: + return true; + + case Intrinsic::nvvm_fmax_d: + case Intrinsic::nvvm_fmax_f: + case Intrinsic::nvvm_fmax_ftz_f: + case Intrinsic::nvvm_fmax_ftz_nan_f: + case Intrinsic::nvvm_fmax_nan_f: + + case Intrinsic::nvvm_fmin_d: + case Intrinsic::nvvm_fmin_f: + case Intrinsic::nvvm_fmin_ftz_f: + case Intrinsic::nvvm_fmin_ftz_nan_f: + case Intrinsic::nvvm_fmin_nan_f: + return false; + } + llvm_unreachable("Checking XorSignAbs flag for invalid fmin/fmax intrinsic"); + return false; +} + } // namespace nvvm } // namespace llvm #endif // LLVM_IR_NVVMINTRINSICUTILS_H diff --git a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h index 81307d7b025d9..5a20a9ef63287 100644 --- a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h +++ b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h @@ -748,10 +748,15 @@ struct FunctionRecord { }; /// Iterator over Functions, optionally filtered to a single file. +/// When filtering to a single file, the iterator requires a list of potential +/// indices where to find the desired records to avoid quadratic behavior when +/// repeatedly iterating over functions from different files. class FunctionRecordIterator : public iterator_facade_base { ArrayRef Records; + ArrayRef RecordIndices; + ArrayRef::iterator CurrentIndex; ArrayRef::iterator Current; StringRef Filename; @@ -760,8 +765,17 @@ class FunctionRecordIterator public: FunctionRecordIterator(ArrayRef Records_, - StringRef Filename = "") - : Records(Records_), Current(Records.begin()), Filename(Filename) { + StringRef Filename = "", + ArrayRef RecordIndices_ = {}) + : Records(Records_), RecordIndices(RecordIndices_), + CurrentIndex(RecordIndices.begin()), + // If `RecordIndices` is provided, we can skip directly to the first + // index it provides. + Current(CurrentIndex == RecordIndices.end() ? Records.begin() + : &Records[*CurrentIndex]), + Filename(Filename) { + assert(Filename.empty() == RecordIndices_.empty() && + "If `Filename` is specified, `RecordIndices` must also be provided"); skipOtherFiles(); } @@ -774,11 +788,29 @@ class FunctionRecordIterator const FunctionRecord &operator*() const { return *Current; } FunctionRecordIterator &operator++() { - assert(Current != Records.end() && "incremented past end"); - ++Current; + advanceOne(); skipOtherFiles(); return *this; } + +private: + void advanceOne() { + if (RecordIndices.empty()) { + // Iteration over all entries, advance in the list of records. + assert(Current != Records.end() && "incremented past end"); + ++Current; + } else { + // Iterator over entries filtered by file name. Advance in the list of + // indices, and adjust the cursor in the list of records accordingly. + assert(CurrentIndex != RecordIndices.end() && "incremented past end"); + ++CurrentIndex; + if (CurrentIndex == RecordIndices.end()) { + Current = Records.end(); + } else { + Current = &Records[*CurrentIndex]; + } + } + } }; /// Coverage information for a macro expansion or #included file. @@ -1037,8 +1069,10 @@ class CoverageMapping { /// Gets all of the functions in a particular file. iterator_range getCoveredFunctions(StringRef Filename) const { - return make_range(FunctionRecordIterator(Functions, Filename), - FunctionRecordIterator()); + return make_range( + FunctionRecordIterator(Functions, Filename, + getImpreciseRecordIndicesForFilename(Filename)), + FunctionRecordIterator()); } /// Get the list of function instantiation groups in a particular file. diff --git a/llvm/include/llvm/Support/FileSystem.h b/llvm/include/llvm/Support/FileSystem.h index c16ea2dcbb770..245e4a24c70df 100644 --- a/llvm/include/llvm/Support/FileSystem.h +++ b/llvm/include/llvm/Support/FileSystem.h @@ -44,8 +44,6 @@ #include #include -#include - namespace llvm { namespace sys { namespace fs { diff --git a/llvm/include/llvm/Support/MathExtras.h b/llvm/include/llvm/Support/MathExtras.h index a52a9f07bacd4..574e9a6116603 100644 --- a/llvm/include/llvm/Support/MathExtras.h +++ b/llvm/include/llvm/Support/MathExtras.h @@ -44,14 +44,15 @@ using common_sint = namespace numbers { // TODO: Track C++20 std::numbers. // TODO: Favor using the hexadecimal FP constants (requires C++17). -constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113 +// clang-format off +constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145769P+1) https://oeis.org/A001113 egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620 ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162 - ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392 + ln10 = 2.3025850929940456840, // (0x1.26bb1bbb55516P+1) https://oeis.org/A002392 log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0) log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2) pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796 - inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541 + inv_pi = .31830988618379067154, // (0x1.45f306dc9c883P-2) https://oeis.org/A049541 sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161 inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197 sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219 @@ -74,6 +75,7 @@ constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194 inv_sqrt3f = .577350269F, // (0x1.279a74P-1) phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622 +// clang-format on } // namespace numbers /// Create a bitmask with the N right-most bits set to 1, and all other diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h index b15e9fc7328da..d9930a48e8084 100644 --- a/llvm/include/llvm/TableGen/Record.h +++ b/llvm/include/llvm/TableGen/Record.h @@ -1816,7 +1816,7 @@ class Record { assert(!CorrespondingDefInit && "changing type of record after it has been referenced"); assert(!isSubClassOf(R) && "Already subclassing record!"); - SuperClasses.push_back(std::make_pair(R, Range)); + SuperClasses.emplace_back(R, Range); } /// If there are any field references that refer to fields that have been @@ -1971,21 +1971,20 @@ class RecordKeeper { } void addClass(std::unique_ptr R) { - bool Ins = Classes.insert(std::make_pair(std::string(R->getName()), - std::move(R))).second; + bool Ins = + Classes.try_emplace(std::string(R->getName()), std::move(R)).second; (void)Ins; assert(Ins && "Class already exists"); } void addDef(std::unique_ptr R) { - bool Ins = Defs.insert(std::make_pair(std::string(R->getName()), - std::move(R))).second; + bool Ins = Defs.try_emplace(std::string(R->getName()), std::move(R)).second; (void)Ins; assert(Ins && "Record already exists"); } void addExtraGlobal(StringRef Name, const Init *I) { - bool Ins = ExtraGlobals.insert(std::make_pair(std::string(Name), I)).second; + bool Ins = ExtraGlobals.try_emplace(std::string(Name), I).second; (void)Ins; assert(!getDef(Name)); assert(Ins && "Global already exists"); @@ -2071,14 +2070,14 @@ struct LessRecordRegister { for (size_t I = 0, E = Rec.size(); I != E; ++I, ++Len) { bool IsDigit = isDigit(Curr[I]); if (IsDigit != IsDigitPart) { - Parts.push_back(std::make_pair(IsDigitPart, StringRef(Start, Len))); + Parts.emplace_back(IsDigitPart, StringRef(Start, Len)); Len = 0; Start = &Curr[I]; IsDigitPart = isDigit(Curr[I]); } } // Push the last part. - Parts.push_back(std::make_pair(IsDigitPart, StringRef(Start, Len))); + Parts.emplace_back(IsDigitPart, StringRef(Start, Len)); } size_t size() { return Parts.size(); } diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h new file mode 100644 index 0000000000000..586de53f3a724 --- /dev/null +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h @@ -0,0 +1,78 @@ +//===- InstrMaps.h ----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_VECTORIZE_SANDBOXVEC_PASSES_INSTRMAPS_H +#define LLVM_TRANSFORMS_VECTORIZE_SANDBOXVEC_PASSES_INSTRMAPS_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/SandboxIR/Value.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/raw_ostream.h" + +namespace llvm::sandboxir { + +/// Maps the original instructions to the vectorized instrs and the reverse. +/// For now an original instr can only map to a single vector. +class InstrMaps { + /// A map from the original values that got combined into vectors, to the + /// vector value(s). + DenseMap OrigToVectorMap; + /// A map from the vector value to a map of the original value to its lane. + /// Please note that for constant vectors, there may multiple original values + /// with the same lane, as they may be coming from vectorizing different + /// original values. + DenseMap> VectorToOrigLaneMap; + +public: + /// \Returns the vector value that we got from vectorizing \p Orig, or + /// nullptr if not found. + Value *getVectorForOrig(Value *Orig) const { + auto It = OrigToVectorMap.find(Orig); + return It != OrigToVectorMap.end() ? It->second : nullptr; + } + /// \Returns the lane of \p Orig before it got vectorized into \p Vec, or + /// nullopt if not found. + std::optional getOrigLane(Value *Vec, Value *Orig) const { + auto It1 = VectorToOrigLaneMap.find(Vec); + if (It1 == VectorToOrigLaneMap.end()) + return std::nullopt; + const auto &OrigToLaneMap = It1->second; + auto It2 = OrigToLaneMap.find(Orig); + if (It2 == OrigToLaneMap.end()) + return std::nullopt; + return It2->second; + } + /// Update the map to reflect that \p Origs got vectorized into \p Vec. + void registerVector(ArrayRef Origs, Value *Vec) { + auto &OrigToLaneMap = VectorToOrigLaneMap[Vec]; + for (auto [Lane, Orig] : enumerate(Origs)) { + auto Pair = OrigToVectorMap.try_emplace(Orig, Vec); + assert(Pair.second && "Orig already exists in the map!"); + (void)Pair; + OrigToLaneMap[Orig] = Lane; + } + } + void clear() { + OrigToVectorMap.clear(); + VectorToOrigLaneMap.clear(); + } +#ifndef NDEBUG + void print(raw_ostream &OS) const { + OS << "OrigToVectorMap:\n"; + for (auto [Orig, Vec] : OrigToVectorMap) + OS << *Orig << " : " << *Vec << "\n"; + } + LLVM_DUMP_METHOD void dump() const; +#endif +}; +} // namespace llvm::sandboxir + +#endif // LLVM_TRANSFORMS_VECTORIZE_SANDBOXVEC_PASSES_INSTRMAPS_H diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Legality.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Legality.h index 233cf82a1b3df..c03e7a10397ad 100644 --- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Legality.h +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Legality.h @@ -23,10 +23,12 @@ namespace llvm::sandboxir { class LegalityAnalysis; class Value; +class InstrMaps; enum class LegalityResultID { - Pack, ///> Collect scalar values. - Widen, ///> Vectorize by combining scalars to a vector. + Pack, ///> Collect scalar values. + Widen, ///> Vectorize by combining scalars to a vector. + DiamondReuse, ///> Don't generate new code, reuse existing vector. }; /// The reason for vectorizing or not vectorizing. @@ -50,6 +52,8 @@ struct ToStr { return "Pack"; case LegalityResultID::Widen: return "Widen"; + case LegalityResultID::DiamondReuse: + return "DiamondReuse"; } llvm_unreachable("Unknown LegalityResultID enum"); } @@ -137,6 +141,19 @@ class Widen final : public LegalityResult { } }; +class DiamondReuse final : public LegalityResult { + friend class LegalityAnalysis; + Value *Vec; + DiamondReuse(Value *Vec) + : LegalityResult(LegalityResultID::DiamondReuse), Vec(Vec) {} + +public: + static bool classof(const LegalityResult *From) { + return From->getSubclassID() == LegalityResultID::DiamondReuse; + } + Value *getVector() const { return Vec; } +}; + class Pack final : public LegalityResultWithReason { Pack(ResultReason Reason) : LegalityResultWithReason(LegalityResultID::Pack, Reason) {} @@ -148,6 +165,59 @@ class Pack final : public LegalityResultWithReason { } }; +/// Describes how to collect the values needed by each lane. +class CollectDescr { +public: + /// Describes how to get a value element. If the value is a vector then it + /// also provides the index to extract it from. + class ExtractElementDescr { + Value *V; + /// The index in `V` that the value can be extracted from. + /// This is nullopt if we need to use `V` as a whole. + std::optional ExtractIdx; + + public: + ExtractElementDescr(Value *V, int ExtractIdx) + : V(V), ExtractIdx(ExtractIdx) {} + ExtractElementDescr(Value *V) : V(V), ExtractIdx(std::nullopt) {} + Value *getValue() const { return V; } + bool needsExtract() const { return ExtractIdx.has_value(); } + int getExtractIdx() const { return *ExtractIdx; } + }; + + using DescrVecT = SmallVector; + DescrVecT Descrs; + +public: + CollectDescr(SmallVectorImpl &&Descrs) + : Descrs(std::move(Descrs)) {} + /// If all elements come from a single vector input, then return that vector + /// and whether we need a shuffle to get them in order. + std::optional> getSingleInput() const { + const auto &Descr0 = *Descrs.begin(); + Value *V0 = Descr0.getValue(); + if (!Descr0.needsExtract()) + return std::nullopt; + bool NeedsShuffle = Descr0.getExtractIdx() != 0; + int Lane = 1; + for (const auto &Descr : drop_begin(Descrs)) { + if (!Descr.needsExtract()) + return std::nullopt; + if (Descr.getValue() != V0) + return std::nullopt; + if (Descr.getExtractIdx() != Lane++) + NeedsShuffle = true; + } + return std::make_pair(V0, NeedsShuffle); + } + bool hasVectorInputs() const { + return any_of(Descrs, [](const auto &D) { return D.needsExtract(); }); + } + const SmallVector &getDescrs() const { + return Descrs; + } +}; + /// Performs the legality analysis and returns a LegalityResult object. class LegalityAnalysis { Scheduler Sched; @@ -160,11 +230,17 @@ class LegalityAnalysis { ScalarEvolution &SE; const DataLayout &DL; + InstrMaps &IMaps; + + /// Finds how we can collect the values in \p Bndl from the vectorized or + /// non-vectorized code. It returns a map of the value we should extract from + /// and the corresponding shuffle mask we need to use. + CollectDescr getHowToCollectValues(ArrayRef Bndl) const; public: LegalityAnalysis(AAResults &AA, ScalarEvolution &SE, const DataLayout &DL, - Context &Ctx) - : Sched(AA, Ctx), SE(SE), DL(DL) {} + Context &Ctx, InstrMaps &IMaps) + : Sched(AA, Ctx), SE(SE), DL(DL), IMaps(IMaps) {} /// A LegalityResult factory. template ResultT &createLegalityResult(ArgsT... Args) { @@ -177,7 +253,7 @@ class LegalityAnalysis { // TODO: Try to remove the SkipScheduling argument by refactoring the tests. const LegalityResult &canVectorize(ArrayRef Bndl, bool SkipScheduling = false); - void clear() { Sched.clear(); } + void clear(); }; } // namespace llvm::sandboxir diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h index 1a53ca6e06f5f..69cea3c4c7b53 100644 --- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.h @@ -18,6 +18,7 @@ #include "llvm/SandboxIR/Pass.h" #include "llvm/SandboxIR/PassManager.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/Legality.h" namespace llvm::sandboxir { @@ -26,6 +27,8 @@ class BottomUpVec final : public FunctionPass { bool Change = false; std::unique_ptr Legality; DenseSet DeadInstrCandidates; + /// Maps scalars to vectors. + InstrMaps IMaps; /// Creates and returns a vector instruction that replaces the instructions in /// \p Bndl. \p Operands are the already vectorized operands. diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h index 3ec0ac0f78a74..52891c3f7535c 100644 --- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h +++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Scheduler.h @@ -109,8 +109,16 @@ class SchedBundle { /// The list scheduler. class Scheduler { + /// This is a list-scheduler and this is the list containing the instructions + /// that are ready, meaning that all their dependency successors have already + /// been scheduled. ReadyListContainer ReadyList; + /// The dependency graph is used by the scheduler to determine the legal + /// ordering of instructions. DependencyGraph DAG; + /// This is the top of the schedule, i.e. the location where the scheduler + /// is about to place the scheduled instructions. It gets updated as we + /// schedule. std::optional ScheduleTopItOpt; // TODO: This is wasting memory in exchange for fast removal using a raw ptr. DenseMap> Bndls; @@ -145,7 +153,11 @@ class Scheduler { public: Scheduler(AAResults &AA, Context &Ctx) : DAG(AA, Ctx) {} ~Scheduler() {} - + /// Tries to build a schedule that includes all of \p Instrs scheduled at the + /// same scheduling cycle. This essentially checks that there are no + /// dependencies among \p Instrs. This function may involve scheduling + /// intermediate instructions or canceling and re-scheduling if needed. + /// \Returns true on success, false otherwise. bool trySchedule(ArrayRef Instrs); /// Clear the scheduler's state, including the DAG. void clear() { diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index ecdc841a38d11..3e87ea0e90fd5 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -1689,6 +1689,28 @@ bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { case Intrinsic::x86_avx512_cvttsd2usi64: return !Call->isStrictFP(); + // NVVM FMax intrinsics + case Intrinsic::nvvm_fmax_d: + case Intrinsic::nvvm_fmax_f: + case Intrinsic::nvvm_fmax_ftz_f: + case Intrinsic::nvvm_fmax_ftz_nan_f: + case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmax_nan_f: + case Intrinsic::nvvm_fmax_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_xorsign_abs_f: + + // NVVM FMin intrinsics + case Intrinsic::nvvm_fmin_d: + case Intrinsic::nvvm_fmin_f: + case Intrinsic::nvvm_fmin_ftz_f: + case Intrinsic::nvvm_fmin_ftz_nan_f: + case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmin_nan_f: + case Intrinsic::nvvm_fmin_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_xorsign_abs_f: + // NVVM float/double to int32/uint32 conversion intrinsics case Intrinsic::nvvm_f2i_rm: case Intrinsic::nvvm_f2i_rn: @@ -2431,9 +2453,10 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, if (U.isNaN()) return ConstantInt::get(Ty, 0); - APFloat::roundingMode RMode = nvvm::IntrinsicGetRoundingMode(IntrinsicID); - bool IsFTZ = nvvm::IntrinsicShouldFTZ(IntrinsicID); - bool IsSigned = nvvm::IntrinsicConvertsToSignedInteger(IntrinsicID); + APFloat::roundingMode RMode = + nvvm::GetFPToIntegerRoundingMode(IntrinsicID); + bool IsFTZ = nvvm::FPToIntegerIntrinsicShouldFTZ(IntrinsicID); + bool IsSigned = nvvm::FPToIntegerIntrinsicResultIsSigned(IntrinsicID); APSInt ResInt(Ty->getIntegerBitWidth(), !IsSigned); auto FloatToRound = IsFTZ ? FTZPreserveSign(U) : U; @@ -2892,12 +2915,49 @@ static Constant *ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID, Type *Ty, case Intrinsic::minnum: case Intrinsic::maximum: case Intrinsic::minimum: + case Intrinsic::nvvm_fmax_d: + case Intrinsic::nvvm_fmin_d: // If one argument is undef, return the other argument. if (IsOp0Undef) return Operands[1]; if (IsOp1Undef) return Operands[0]; break; + + case Intrinsic::nvvm_fmax_f: + case Intrinsic::nvvm_fmax_ftz_f: + case Intrinsic::nvvm_fmax_ftz_nan_f: + case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmax_nan_f: + case Intrinsic::nvvm_fmax_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_xorsign_abs_f: + + case Intrinsic::nvvm_fmin_f: + case Intrinsic::nvvm_fmin_ftz_f: + case Intrinsic::nvvm_fmin_ftz_nan_f: + case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmin_nan_f: + case Intrinsic::nvvm_fmin_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_xorsign_abs_f: + // If one arg is undef, the other arg can be returned only if it is + // constant, as we may need to flush it to sign-preserving zero or + // canonicalize the NaN. + if (!IsOp0Undef && !IsOp1Undef) + break; + if (auto *Op = dyn_cast(Operands[IsOp0Undef ? 1 : 0])) { + if (Op->isNaN()) { + APInt NVCanonicalNaN(32, 0x7fffffff); + return ConstantFP::get( + Ty, APFloat(Ty->getFltSemantics(), NVCanonicalNaN)); + } + if (nvvm::FMinFMaxShouldFTZ(IntrinsicID)) + return ConstantFP::get(Ty, FTZPreserveSign(Op->getValueAPF())); + else + return Op; + } + break; } } @@ -2955,6 +3015,79 @@ static Constant *ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID, Type *Ty, return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V)); case Intrinsic::maximum: return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V)); + + case Intrinsic::nvvm_fmax_d: + case Intrinsic::nvvm_fmax_f: + case Intrinsic::nvvm_fmax_ftz_f: + case Intrinsic::nvvm_fmax_ftz_nan_f: + case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmax_nan_f: + case Intrinsic::nvvm_fmax_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_xorsign_abs_f: + + case Intrinsic::nvvm_fmin_d: + case Intrinsic::nvvm_fmin_f: + case Intrinsic::nvvm_fmin_ftz_f: + case Intrinsic::nvvm_fmin_ftz_nan_f: + case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmin_nan_f: + case Intrinsic::nvvm_fmin_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmin_xorsign_abs_f: { + + bool ShouldCanonicalizeNaNs = !(IntrinsicID == Intrinsic::nvvm_fmax_d || + IntrinsicID == Intrinsic::nvvm_fmin_d); + bool IsFTZ = nvvm::FMinFMaxShouldFTZ(IntrinsicID); + bool IsNaNPropagating = nvvm::FMinFMaxPropagatesNaNs(IntrinsicID); + bool IsXorSignAbs = nvvm::FMinFMaxIsXorSignAbs(IntrinsicID); + + APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V; + APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V; + + bool XorSign = false; + if (IsXorSignAbs) { + XorSign = A.isNegative() ^ B.isNegative(); + A = abs(A); + B = abs(B); + } + + bool IsFMax = false; + switch (IntrinsicID) { + case Intrinsic::nvvm_fmax_d: + case Intrinsic::nvvm_fmax_f: + case Intrinsic::nvvm_fmax_ftz_f: + case Intrinsic::nvvm_fmax_ftz_nan_f: + case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f: + case Intrinsic::nvvm_fmax_nan_f: + case Intrinsic::nvvm_fmax_nan_xorsign_abs_f: + case Intrinsic::nvvm_fmax_xorsign_abs_f: + IsFMax = true; + break; + } + APFloat Res = IsFMax ? maximum(A, B) : minimum(A, B); + + if (ShouldCanonicalizeNaNs) { + APFloat NVCanonicalNaN(Res.getSemantics(), APInt(32, 0x7fffffff)); + if (A.isNaN() && B.isNaN()) + return ConstantFP::get(Ty, NVCanonicalNaN); + else if (IsNaNPropagating && (A.isNaN() || B.isNaN())) + return ConstantFP::get(Ty, NVCanonicalNaN); + } + + if (A.isNaN() && B.isNaN()) + return Operands[1]; + else if (A.isNaN()) + Res = B; + else if (B.isNaN()) + Res = A; + + if (IsXorSignAbs && XorSign != Res.isNegative()) + Res.changeSign(); + + return ConstantFP::get(Ty->getContext(), Res); + } } if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) diff --git a/llvm/lib/Analysis/InstructionPrecedenceTracking.cpp b/llvm/lib/Analysis/InstructionPrecedenceTracking.cpp index fba5859b74cef..9555e2c8dd5dd 100644 --- a/llvm/lib/Analysis/InstructionPrecedenceTracking.cpp +++ b/llvm/lib/Analysis/InstructionPrecedenceTracking.cpp @@ -115,8 +115,9 @@ void InstructionPrecedenceTracking::insertInstructionTo(const Instruction *Inst, void InstructionPrecedenceTracking::removeInstruction(const Instruction *Inst) { auto *BB = Inst->getParent(); assert(BB && "must be called before instruction is actually removed"); - if (FirstSpecialInsts.count(BB) && FirstSpecialInsts[BB] == Inst) - FirstSpecialInsts.erase(BB); + auto It = FirstSpecialInsts.find(BB); + if (It != FirstSpecialInsts.end() && It->second == Inst) + FirstSpecialInsts.erase(It); } void InstructionPrecedenceTracking::removeUsersOf(const Instruction *Inst) { diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 1853b1c67159e..6e2f0ebde9bb6 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -6192,9 +6192,9 @@ Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { if (isa(V)) return UndefInt8; - // Return Undef for zero-sized type. + // Return poison for zero-sized type. if (DL.getTypeStoreSize(V->getType()).isZero()) - return UndefInt8; + return PoisonValue::get(Type::getInt8Ty(Ctx)); Constant *C = dyn_cast(V); if (!C) { diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 55c1d12a6fa8f..b2a4721f37b26 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -561,11 +561,11 @@ bool AsmPrinter::doInitialization(Module &M) { if (MAI->doesSupportDebugInformation()) { bool EmitCodeView = M.getCodeViewFlag(); if (EmitCodeView && TM.getTargetTriple().isOSWindows()) - DebugHandlers.push_back(std::make_unique(this)); + Handlers.push_back(std::make_unique(this)); if (!EmitCodeView || M.getDwarfVersion()) { if (hasDebugInfo()) { DD = new DwarfDebug(this); - DebugHandlers.push_back(std::unique_ptr(DD)); + Handlers.push_back(std::unique_ptr(DD)); } } } @@ -632,12 +632,12 @@ bool AsmPrinter::doInitialization(Module &M) { // Emit tables for any value of cfguard flag (i.e. cfguard=1 or cfguard=2). if (mdconst::extract_or_null(M.getModuleFlag("cfguard"))) - Handlers.push_back(std::make_unique(this)); + EHHandlers.push_back(std::make_unique(this)); - for (auto &Handler : DebugHandlers) - Handler->beginModule(&M); for (auto &Handler : Handlers) Handler->beginModule(&M); + for (auto &Handler : EHHandlers) + Handler->beginModule(&M); return false; } @@ -784,7 +784,7 @@ void AsmPrinter::emitGlobalVariable(const GlobalVariable *GV) { // sections and expected to be contiguous (e.g. ObjC metadata). const Align Alignment = getGVAlignment(GV, DL); - for (auto &Handler : DebugHandlers) + for (auto &Handler : Handlers) Handler->setSymbolSize(GVSym, Size); // Handle common symbols @@ -1054,14 +1054,14 @@ void AsmPrinter::emitFunctionHeader() { } // Emit pre-function debug and/or EH information. - for (auto &Handler : DebugHandlers) { + for (auto &Handler : Handlers) { Handler->beginFunction(MF); Handler->beginBasicBlockSection(MF->front()); } - for (auto &Handler : Handlers) + for (auto &Handler : EHHandlers) { Handler->beginFunction(MF); - for (auto &Handler : Handlers) Handler->beginBasicBlockSection(MF->front()); + } // Emit the prologue data. if (F.hasPrologueData()) @@ -1836,7 +1836,7 @@ void AsmPrinter::emitFunctionBody() { if (MDNode *MD = MI.getPCSections()) emitPCSectionsLabel(*MF, *MD); - for (auto &Handler : DebugHandlers) + for (auto &Handler : Handlers) Handler->beginInstruction(&MI); if (isVerbose()) @@ -1952,7 +1952,7 @@ void AsmPrinter::emitFunctionBody() { if (MCSymbol *S = MI.getPostInstrSymbol()) OutStreamer->emitLabel(S); - for (auto &Handler : DebugHandlers) + for (auto &Handler : Handlers) Handler->endInstruction(); } @@ -2089,13 +2089,15 @@ void AsmPrinter::emitFunctionBody() { // Call endBasicBlockSection on the last block now, if it wasn't already // called. if (!MF->back().isEndSection()) { - for (auto &Handler : DebugHandlers) - Handler->endBasicBlockSection(MF->back()); for (auto &Handler : Handlers) Handler->endBasicBlockSection(MF->back()); + for (auto &Handler : EHHandlers) + Handler->endBasicBlockSection(MF->back()); } for (auto &Handler : Handlers) Handler->markFunctionEnd(); + for (auto &Handler : EHHandlers) + Handler->markFunctionEnd(); // Update the end label of the entry block's section. MBBSectionRanges[MF->front().getSectionID()].EndLabel = CurrentFnEnd; @@ -2103,10 +2105,10 @@ void AsmPrinter::emitFunctionBody() { emitJumpTableInfo(); // Emit post-function debug and/or EH information. - for (auto &Handler : DebugHandlers) - Handler->endFunction(MF); for (auto &Handler : Handlers) Handler->endFunction(MF); + for (auto &Handler : EHHandlers) + Handler->endFunction(MF); // Emit section containing BB address offsets and their metadata, when // BB labels are requested for this function. Skip empty functions. @@ -2583,17 +2585,16 @@ bool AsmPrinter::doFinalization(Module &M) { emitGlobalIFunc(M, IFunc); // Finalize debug and EH information. - for (auto &Handler : DebugHandlers) - Handler->endModule(); for (auto &Handler : Handlers) Handler->endModule(); + for (auto &Handler : EHHandlers) + Handler->endModule(); // This deletes all the ephemeral handlers that AsmPrinter added, while // keeping all the user-added handlers alive until the AsmPrinter is // destroyed. + EHHandlers.clear(); Handlers.erase(Handlers.begin() + NumUserHandlers, Handlers.end()); - DebugHandlers.erase(DebugHandlers.begin() + NumUserDebugHandlers, - DebugHandlers.end()); DD = nullptr; // If the target wants to know about weak references, print them all. @@ -4196,6 +4197,10 @@ void AsmPrinter::emitBasicBlockStart(const MachineBasicBlock &MBB) { Handler->endFunclet(); Handler->beginFunclet(MBB); } + for (auto &Handler : EHHandlers) { + Handler->endFunclet(); + Handler->beginFunclet(MBB); + } } // Switch to a new section if this basic block must begin a section. The @@ -4208,7 +4213,7 @@ void AsmPrinter::emitBasicBlockStart(const MachineBasicBlock &MBB) { CurrentSectionBeginSym = MBB.getSymbol(); } - for (auto &Handler : DebugHandlers) + for (auto &Handler : Handlers) Handler->beginCodeAlignment(MBB); // Emit an alignment directive for this block, if needed. @@ -4268,10 +4273,10 @@ void AsmPrinter::emitBasicBlockStart(const MachineBasicBlock &MBB) { // if it begins a section (Entry block call is handled separately, next to // beginFunction). if (MBB.isBeginSection() && !MBB.isEntryBlock()) { - for (auto &Handler : DebugHandlers) - Handler->beginBasicBlockSection(MBB); for (auto &Handler : Handlers) Handler->beginBasicBlockSection(MBB); + for (auto &Handler : EHHandlers) + Handler->beginBasicBlockSection(MBB); } } @@ -4279,10 +4284,10 @@ void AsmPrinter::emitBasicBlockEnd(const MachineBasicBlock &MBB) { // Check if CFI information needs to be updated for this MBB with basic block // sections. if (MBB.isEndSection()) { - for (auto &Handler : DebugHandlers) - Handler->endBasicBlockSection(MBB); for (auto &Handler : Handlers) Handler->endBasicBlockSection(MBB); + for (auto &Handler : EHHandlers) + Handler->endBasicBlockSection(MBB); } } @@ -4415,12 +4420,7 @@ void AsmPrinter::addAsmPrinterHandler( NumUserHandlers++; } -void AsmPrinter::addDebugHandler(std::unique_ptr Handler) { - DebugHandlers.insert(DebugHandlers.begin(), std::move(Handler)); - NumUserDebugHandlers++; -} - -/// Pin vtable to this file. +/// Pin vtables to this file. AsmPrinterHandler::~AsmPrinterHandler() = default; void AsmPrinterHandler::markFunctionEnd() {} diff --git a/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h b/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h index 35461e53fbf19..f11b552387501 100644 --- a/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h +++ b/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h @@ -14,7 +14,6 @@ #define LLVM_LIB_CODEGEN_ASMPRINTER_PSEUDOPROBEPRINTER_H #include "llvm/ADT/DenseMap.h" -#include "llvm/CodeGen/AsmPrinterHandler.h" namespace llvm { diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index a3392b7110989..7106e53bd5516 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -7983,8 +7983,8 @@ class VectorPromoteHelper { /// \p UseSplat defines whether or not \p Val should be replicated /// across the whole vector. /// In other words, if UseSplat == true, we generate , - /// otherwise we generate a vector with as many undef as possible: - /// where \p Val is only + /// otherwise we generate a vector with as many poison as possible: + /// where \p Val is only /// used at the index of the extract. Value *getConstantVector(Constant *Val, bool UseSplat) const { unsigned ExtractIdx = std::numeric_limits::max(); @@ -8004,12 +8004,12 @@ class VectorPromoteHelper { if (!EC.isScalable()) { SmallVector ConstVec; - UndefValue *UndefVal = UndefValue::get(Val->getType()); + PoisonValue *PoisonVal = PoisonValue::get(Val->getType()); for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) { if (Idx == ExtractIdx) ConstVec.push_back(Val); else - ConstVec.push_back(UndefVal); + ConstVec.push_back(PoisonVal); } return ConstantVector::get(ConstVec); } else diff --git a/llvm/lib/CodeGen/MachineCSE.cpp b/llvm/lib/CodeGen/MachineCSE.cpp index 0a547050e91a8..728fd2f5f7cd4 100644 --- a/llvm/lib/CodeGen/MachineCSE.cpp +++ b/llvm/lib/CodeGen/MachineCSE.cpp @@ -832,12 +832,11 @@ bool MachineCSEImpl::ProcessBlockPRE(MachineDominatorTree *DT, if (!isPRECandidate(&MI, PhysRefs)) continue; - if (!PREMap.count(&MI)) { - PREMap[&MI] = MBB; + auto [It, Inserted] = PREMap.try_emplace(&MI, MBB); + if (Inserted) continue; - } - auto MBB1 = PREMap[&MI]; + auto *MBB1 = It->second; assert( !DT->properlyDominates(MBB, MBB1) && "MBB cannot properly dominate MBB1 while DFS through dominators tree!"); diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp index 49ce4b660c3ae..0afd73d8ecdcc 100644 --- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp +++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp @@ -117,7 +117,32 @@ class CopyTracker { DenseMap Copies; + // Memoised sets of register units which are preserved by each register mask, + // needed to efficiently remove copies which are invalidated by call + // instructions. + DenseMap RegMaskToPreservedRegUnits; + public: + /// Get the set of register units which are preserved by RegMaskOp. + BitVector &getPreservedRegUnits(const MachineOperand &RegMaskOp, + const TargetRegisterInfo &TRI) { + const uint32_t *RegMask = RegMaskOp.getRegMask(); + auto Existing = RegMaskToPreservedRegUnits.find(RegMask); + if (Existing != RegMaskToPreservedRegUnits.end()) { + return Existing->second; + } else { + BitVector &PreservedRegUnits = RegMaskToPreservedRegUnits[RegMask]; + + PreservedRegUnits.resize(TRI.getNumRegUnits()); + for (unsigned SafeReg = 0, E = TRI.getNumRegs(); SafeReg < E; ++SafeReg) + if (!RegMaskOp.clobbersPhysReg(SafeReg)) + for (auto SafeUnit : TRI.regunits(SafeReg)) + PreservedRegUnits.set(SafeUnit); + + return PreservedRegUnits; + } + } + /// Mark all of the given registers and their subregisters as unavailable for /// copying. void markRegsUnavailable(ArrayRef Regs, @@ -164,64 +189,70 @@ class CopyTracker { Copies.erase(Unit); } - /// Clobber a single register, removing it from the tracker's copy maps. - void clobberRegister(MCRegister Reg, const TargetRegisterInfo &TRI, - const TargetInstrInfo &TII, bool UseCopyInstr) { - for (MCRegUnit Unit : TRI.regunits(Reg)) { - auto I = Copies.find(Unit); - if (I != Copies.end()) { - // When we clobber the source of a copy, we need to clobber everything - // it defined. - markRegsUnavailable(I->second.DefRegs, TRI); - // When we clobber the destination of a copy, we need to clobber the - // whole register it defined. - if (MachineInstr *MI = I->second.MI) { - std::optional CopyOperands = - isCopyInstr(*MI, TII, UseCopyInstr); - - MCRegister Def = CopyOperands->Destination->getReg().asMCReg(); - MCRegister Src = CopyOperands->Source->getReg().asMCReg(); - - markRegsUnavailable(Def, TRI); - - // Since we clobber the destination of a copy, the semantic of Src's - // "DefRegs" to contain Def is no longer effectual. We will also need - // to remove the record from the copy maps that indicates Src defined - // Def. Failing to do so might cause the target to miss some - // opportunities to further eliminate redundant copy instructions. - // Consider the following sequence during the - // ForwardCopyPropagateBlock procedure: - // L1: r0 = COPY r9 <- TrackMI - // L2: r0 = COPY r8 <- TrackMI (Remove r9 defined r0 from tracker) - // L3: use r0 <- Remove L2 from MaybeDeadCopies - // L4: early-clobber r9 <- Clobber r9 (L2 is still valid in tracker) - // L5: r0 = COPY r8 <- Remove NopCopy - for (MCRegUnit SrcUnit : TRI.regunits(Src)) { - auto SrcCopy = Copies.find(SrcUnit); - if (SrcCopy != Copies.end() && SrcCopy->second.LastSeenUseInCopy) { - // If SrcCopy defines multiple values, we only need - // to erase the record for Def in DefRegs. - for (auto itr = SrcCopy->second.DefRegs.begin(); - itr != SrcCopy->second.DefRegs.end(); itr++) { - if (*itr == Def) { - SrcCopy->second.DefRegs.erase(itr); - // If DefReg becomes empty after removal, we can remove the - // SrcCopy from the tracker's copy maps. We only remove those - // entries solely record the Def is defined by Src. If an - // entry also contains the definition record of other Def' - // registers, it cannot be cleared. - if (SrcCopy->second.DefRegs.empty() && !SrcCopy->second.MI) { - Copies.erase(SrcCopy); - } - break; + /// Clobber a single register unit, removing it from the tracker's copy maps. + void clobberRegUnit(MCRegUnit Unit, const TargetRegisterInfo &TRI, + const TargetInstrInfo &TII, bool UseCopyInstr) { + auto I = Copies.find(Unit); + if (I != Copies.end()) { + // When we clobber the source of a copy, we need to clobber everything + // it defined. + markRegsUnavailable(I->second.DefRegs, TRI); + // When we clobber the destination of a copy, we need to clobber the + // whole register it defined. + if (MachineInstr *MI = I->second.MI) { + std::optional CopyOperands = + isCopyInstr(*MI, TII, UseCopyInstr); + + MCRegister Def = CopyOperands->Destination->getReg().asMCReg(); + MCRegister Src = CopyOperands->Source->getReg().asMCReg(); + + markRegsUnavailable(Def, TRI); + + // Since we clobber the destination of a copy, the semantic of Src's + // "DefRegs" to contain Def is no longer effectual. We will also need + // to remove the record from the copy maps that indicates Src defined + // Def. Failing to do so might cause the target to miss some + // opportunities to further eliminate redundant copy instructions. + // Consider the following sequence during the + // ForwardCopyPropagateBlock procedure: + // L1: r0 = COPY r9 <- TrackMI + // L2: r0 = COPY r8 <- TrackMI (Remove r9 defined r0 from tracker) + // L3: use r0 <- Remove L2 from MaybeDeadCopies + // L4: early-clobber r9 <- Clobber r9 (L2 is still valid in tracker) + // L5: r0 = COPY r8 <- Remove NopCopy + for (MCRegUnit SrcUnit : TRI.regunits(Src)) { + auto SrcCopy = Copies.find(SrcUnit); + if (SrcCopy != Copies.end() && SrcCopy->second.LastSeenUseInCopy) { + // If SrcCopy defines multiple values, we only need + // to erase the record for Def in DefRegs. + for (auto itr = SrcCopy->second.DefRegs.begin(); + itr != SrcCopy->second.DefRegs.end(); itr++) { + if (*itr == Def) { + SrcCopy->second.DefRegs.erase(itr); + // If DefReg becomes empty after removal, we can remove the + // SrcCopy from the tracker's copy maps. We only remove those + // entries solely record the Def is defined by Src. If an + // entry also contains the definition record of other Def' + // registers, it cannot be cleared. + if (SrcCopy->second.DefRegs.empty() && !SrcCopy->second.MI) { + Copies.erase(SrcCopy); } + break; } } } } - // Now we can erase the copy. - Copies.erase(I); } + // Now we can erase the copy. + Copies.erase(I); + } + } + + /// Clobber a single register, removing it from the tracker's copy maps. + void clobberRegister(MCRegister Reg, const TargetRegisterInfo &TRI, + const TargetInstrInfo &TII, bool UseCopyInstr) { + for (MCRegUnit Unit : TRI.regunits(Reg)) { + clobberRegUnit(Unit, TRI, TII, UseCopyInstr); } } @@ -960,6 +991,9 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) { // a large set of registers. Treat clobbered registers the same way as // defined registers. if (RegMask) { + BitVector &PreservedRegUnits = + Tracker.getPreservedRegUnits(*RegMask, *TRI); + // Erase any MaybeDeadCopies whose destination register is clobbered. for (SmallSetVector::iterator DI = MaybeDeadCopies.begin(); @@ -978,9 +1012,11 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) { LLVM_DEBUG(dbgs() << "MCP: Removing copy due to regmask clobbering: "; MaybeDead->dump()); - // Make sure we invalidate any entries in the copy maps before erasing - // the instruction. - Tracker.clobberRegister(Reg, *TRI, *TII, UseCopyInstr); + // Invalidate all entries in the copy map which are not preserved by + // this register mask. + for (unsigned RegUnit : TRI->regunits(Reg)) + if (!PreservedRegUnits.test(RegUnit)) + Tracker.clobberRegUnit(RegUnit, *TRI, *TII, UseCopyInstr); // erase() will return the next valid iterator pointing to the next // element after the erased one. diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 6805e0cb23ace..de7fb21f5903e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -23807,6 +23807,13 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { SmallVector VecIn; VecIn.push_back(SDValue()); + // If we have a single extract_element with a constant index, track the index + // value. + unsigned OneConstExtractIndex = ~0u; + + // Count the number of extract_vector_elt sources (i.e. non-constant or undef) + unsigned NumExtracts = 0; + for (unsigned i = 0; i != NumElems; ++i) { SDValue Op = N->getOperand(i); @@ -23824,16 +23831,18 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { // Not an undef or zero. If the input is something other than an // EXTRACT_VECTOR_ELT with an in-range constant index, bail out. - if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || - !isa(Op.getOperand(1))) + if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT) return SDValue(); - SDValue ExtractedFromVec = Op.getOperand(0); + SDValue ExtractedFromVec = Op.getOperand(0); if (ExtractedFromVec.getValueType().isScalableVector()) return SDValue(); + auto *ExtractIdx = dyn_cast(Op.getOperand(1)); + if (!ExtractIdx) + return SDValue(); - const APInt &ExtractIdx = Op.getConstantOperandAPInt(1); - if (ExtractIdx.uge(ExtractedFromVec.getValueType().getVectorNumElements())) + if (ExtractIdx->getAsAPIntVal().uge( + ExtractedFromVec.getValueType().getVectorNumElements())) return SDValue(); // All inputs must have the same element type as the output. @@ -23841,6 +23850,9 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { ExtractedFromVec.getValueType().getVectorElementType()) return SDValue(); + OneConstExtractIndex = ExtractIdx->getZExtValue(); + ++NumExtracts; + // Have we seen this input vector before? // The vectors are expected to be tiny (usually 1 or 2 elements), so using // a map back from SDValues to numbers isn't worth it. @@ -23863,6 +23875,20 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { // VecIn accordingly. bool DidSplitVec = false; if (VecIn.size() == 2) { + // If we only found a single constant indexed extract_vector_elt feeding the + // build_vector, do not produce a more complicated shuffle if the extract is + // cheap with other constant/undef elements. Skip broadcast patterns with + // multiple uses in the build_vector. + + // TODO: This should be more aggressive about skipping the shuffle + // formation, particularly if VecIn[1].hasOneUse(), and regardless of the + // index. + if (NumExtracts == 1 && + TLI.isOperationLegalOrCustom(ISD::EXTRACT_VECTOR_ELT, VT) && + TLI.isTypeLegal(VT.getVectorElementType()) && + TLI.isExtractVecEltCheap(VT, OneConstExtractIndex)) + return SDValue(); + unsigned MaxIndex = 0; unsigned NearestPow2 = 0; SDValue Vec = VecIn.back(); @@ -27526,23 +27552,27 @@ static SDValue scalarizeBinOpOfSplats(SDNode *N, SelectionDAG &DAG, if ((Opcode == ISD::MULHS || Opcode == ISD::MULHU) && !TLI.isTypeLegal(EltVT)) return SDValue(); + if (N0.getOpcode() == ISD::BUILD_VECTOR && N0.getOpcode() == N1.getOpcode()) { + // All but one element should have an undef input, which will fold to a + // constant or undef. Avoid splatting which would over-define potentially + // undefined elements. + + // bo (build_vec ..undef, X, undef...), (build_vec ..undef, Y, undef...) --> + // build_vec ..undef, (bo X, Y), undef... + SmallVector EltsX, EltsY, EltsResult; + DAG.ExtractVectorElements(Src0, EltsX); + DAG.ExtractVectorElements(Src1, EltsY); + + for (auto [X, Y] : zip(EltsX, EltsY)) + EltsResult.push_back(DAG.getNode(Opcode, DL, EltVT, X, Y, N->getFlags())); + return DAG.getBuildVector(VT, DL, EltsResult); + } + SDValue IndexC = DAG.getVectorIdxConstant(Index0, DL); SDValue X = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src0, IndexC); SDValue Y = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src1, IndexC); SDValue ScalarBO = DAG.getNode(Opcode, DL, EltVT, X, Y, N->getFlags()); - // If all lanes but 1 are undefined, no need to splat the scalar result. - // TODO: Keep track of undefs and use that info in the general case. - if (N0.getOpcode() == ISD::BUILD_VECTOR && N0.getOpcode() == N1.getOpcode() && - count_if(N0->ops(), [](SDValue V) { return !V.isUndef(); }) == 1 && - count_if(N1->ops(), [](SDValue V) { return !V.isUndef(); }) == 1) { - // bo (build_vec ..undef, X, undef...), (build_vec ..undef, Y, undef...) --> - // build_vec ..undef, (bo X, Y), undef... - SmallVector Ops(VT.getVectorNumElements(), DAG.getUNDEF(EltVT)); - Ops[Index0] = ScalarBO; - return DAG.getBuildVector(VT, DL, Ops); - } - // bo (splat X, Index), (splat Y, Index) --> splat (bo X, Y), Index return DAG.getSplat(VT, DL, ScalarBO); } diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp index bccda8e90a1fb..2c8790273f8b2 100644 --- a/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp @@ -88,8 +88,8 @@ const uint8_t TLSInfoTableManager_ELF_x86_64::TLSInfoEntryContent[16] = { Error buildTables_ELF_x86_64(LinkGraph &G) { LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n"); - x86_64::GOTTableManager GOT; - x86_64::PLTTableManager PLT(GOT); + x86_64::GOTTableManager GOT(G); + x86_64::PLTTableManager PLT(G, GOT); TLSInfoTableManager_ELF_x86_64 TLSInfo; visitExistingEdges(G, GOT, PLT, TLSInfo); return Error::success(); diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp index 113b1953e36a6..9547266dc9789 100644 --- a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp @@ -459,8 +459,8 @@ class MachOLinkGraphBuilder_x86_64 : public MachOLinkGraphBuilder { }; Error buildGOTAndStubs_MachO_x86_64(LinkGraph &G) { - x86_64::GOTTableManager GOT; - x86_64::PLTTableManager PLT(GOT); + x86_64::GOTTableManager GOT(G); + x86_64::PLTTableManager PLT(G, GOT); visitExistingEdges(G, GOT, PLT); return Error::success(); } diff --git a/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp index a84e0001f115a..6ac991651f082 100644 --- a/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp @@ -89,6 +89,26 @@ const char ReentryTrampolineContent[5] = { static_cast(0xe8), 0x00, 0x00, 0x00, 0x00 }; +void GOTTableManager::registerExistingEntries() { + for (auto *EntrySym : GOTSection->symbols()) { + assert(EntrySym->getBlock().edges_size() == 1 && + "GOT block edge count != 1"); + registerPreExistingEntry(EntrySym->getBlock().edges().begin()->getTarget(), + *EntrySym); + } +} + +void PLTTableManager::registerExistingEntries() { + for (auto *EntrySym : StubsSection->symbols()) { + assert(EntrySym->getBlock().edges_size() == 1 && + "PLT block edge count != 1"); + auto &GOTSym = EntrySym->getBlock().edges().begin()->getTarget(); + assert(GOTSym.getBlock().edges_size() == 1 && "GOT block edge count != 1"); + registerPreExistingEntry(GOTSym.getBlock().edges().begin()->getTarget(), + *EntrySym); + } +} + Error optimizeGOTAndStubAccesses(LinkGraph &G) { LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n"); diff --git a/llvm/lib/FileCheck/FileCheck.cpp b/llvm/lib/FileCheck/FileCheck.cpp index b6c28385ebb09..a6df9672f8100 100644 --- a/llvm/lib/FileCheck/FileCheck.cpp +++ b/llvm/lib/FileCheck/FileCheck.cpp @@ -1766,8 +1766,7 @@ void FileCheckPatternContext::createLineVariable() { } FileCheck::FileCheck(FileCheckRequest Req) - : Req(Req), PatternContext(std::make_unique()), - CheckStrings(std::make_unique>()) {} + : Req(Req), PatternContext(std::make_unique()) {} FileCheck::~FileCheck() = default; @@ -1916,7 +1915,7 @@ bool FileCheck::readCheckFile( // Verify that CHECK-NEXT/SAME/EMPTY lines have at least one CHECK line before them. if ((CheckTy == Check::CheckNext || CheckTy == Check::CheckSame || CheckTy == Check::CheckEmpty) && - CheckStrings->empty()) { + CheckStrings.empty()) { StringRef Type = CheckTy == Check::CheckNext ? "NEXT" : CheckTy == Check::CheckEmpty ? "EMPTY" : "SAME"; @@ -1934,8 +1933,8 @@ bool FileCheck::readCheckFile( } // Okay, add the string we captured to the output vector and move on. - CheckStrings->emplace_back(P, UsedPrefix, PatternLoc); - std::swap(DagNotMatches, CheckStrings->back().DagNotStrings); + CheckStrings.emplace_back(P, UsedPrefix, PatternLoc); + std::swap(DagNotMatches, CheckStrings.back().DagNotStrings); DagNotMatches = ImplicitNegativeChecks; } @@ -1962,10 +1961,10 @@ bool FileCheck::readCheckFile( // Add an EOF pattern for any trailing --implicit-check-not/CHECK-DAG/-NOTs, // and use the first prefix as a filler for the error message. if (!DagNotMatches.empty()) { - CheckStrings->emplace_back( + CheckStrings.emplace_back( Pattern(Check::CheckEOF, PatternContext.get(), LineNumber + 1), *Req.CheckPrefixes.begin(), SMLoc::getFromPointer(Buffer.data())); - std::swap(DagNotMatches, CheckStrings->back().DagNotStrings); + std::swap(DagNotMatches, CheckStrings.back().DagNotStrings); } return false; @@ -2676,13 +2675,13 @@ bool FileCheck::checkInput(SourceMgr &SM, StringRef Buffer, std::vector *Diags) { bool ChecksFailed = false; - unsigned i = 0, j = 0, e = CheckStrings->size(); + unsigned i = 0, j = 0, e = CheckStrings.size(); while (true) { StringRef CheckRegion; if (j == e) { CheckRegion = Buffer; } else { - const FileCheckString &CheckLabelStr = (*CheckStrings)[j]; + const FileCheckString &CheckLabelStr = CheckStrings[j]; if (CheckLabelStr.Pat.getCheckTy() != Check::CheckLabel) { ++j; continue; @@ -2708,7 +2707,7 @@ bool FileCheck::checkInput(SourceMgr &SM, StringRef Buffer, PatternContext->clearLocalVars(); for (; i != j; ++i) { - const FileCheckString &CheckStr = (*CheckStrings)[i]; + const FileCheckString &CheckStr = CheckStrings[i]; // Check each string within the scanned region, including a second check // of any final CHECK-LABEL (to verify CHECK-NOT and CHECK-DAG) diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index 147cd84125c8d..9eaae62a6390b 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -458,9 +458,8 @@ void Instruction::dropPoisonGeneratingFlags() { } bool Instruction::hasPoisonGeneratingMetadata() const { - return hasMetadata(LLVMContext::MD_range) || - hasMetadata(LLVMContext::MD_nonnull) || - hasMetadata(LLVMContext::MD_align); + return any_of(Metadata::PoisonGeneratingIDs, + [this](unsigned ID) { return hasMetadata(ID); }); } bool Instruction::hasNonDebugLocLoopMetadata() const { @@ -487,9 +486,8 @@ bool Instruction::hasNonDebugLocLoopMetadata() const { } void Instruction::dropPoisonGeneratingMetadata() { - eraseMetadata(LLVMContext::MD_range); - eraseMetadata(LLVMContext::MD_nonnull); - eraseMetadata(LLVMContext::MD_align); + for (unsigned ID : Metadata::PoisonGeneratingIDs) + eraseMetadata(ID); } bool Instruction::hasPoisonGeneratingReturnAttributes() const { diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index b2ee75811fbb7..65b63955b6f6d 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -36,7 +36,7 @@ using namespace llvm; -static cl::opt UseDerefAtPointSemantics( +static cl::opt UseDerefAtPointSemantics( "use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(false), cl::desc("Deref attributes and metadata infer facts at definition only")); diff --git a/llvm/lib/ObjectYAML/ELFYAML.cpp b/llvm/lib/ObjectYAML/ELFYAML.cpp index 83e6cf76dd746..7e94d01a97153 100644 --- a/llvm/lib/ObjectYAML/ELFYAML.cpp +++ b/llvm/lib/ObjectYAML/ELFYAML.cpp @@ -1588,7 +1588,7 @@ static bool isInteger(StringRef Val) { void MappingTraits>::mapping( IO &IO, std::unique_ptr &Section) { - ELFYAML::ELF_SHT Type = ELF::ET_NONE; + ELFYAML::ELF_SHT Type; StringRef TypeStr; if (IO.outputting()) { if (auto *S = dyn_cast(Section.get())) diff --git a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp index 6d6678e9e4afe..c39585681911a 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp @@ -618,7 +618,7 @@ unsigned CounterMappingContext::getMaxCounterID(const Counter &C) const { void FunctionRecordIterator::skipOtherFiles() { while (Current != Records.end() && !Filename.empty() && Filename != Current->Filenames[0]) - ++Current; + advanceOne(); if (Current == Records.end()) *this = FunctionRecordIterator(); } diff --git a/llvm/lib/Support/ErrorHandling.cpp b/llvm/lib/Support/ErrorHandling.cpp index 8659f9492d5a3..afe3b37cc3431 100644 --- a/llvm/lib/Support/ErrorHandling.cpp +++ b/llvm/lib/Support/ErrorHandling.cpp @@ -33,7 +33,7 @@ #if defined(HAVE_UNISTD_H) # include #endif -#if defined(_MSC_VER) +#if defined(_WIN32) # include # include #endif diff --git a/llvm/lib/Support/Unix/Process.inc b/llvm/lib/Support/Unix/Process.inc index 2c55059e055bc..550b0de2e0455 100644 --- a/llvm/lib/Support/Unix/Process.inc +++ b/llvm/lib/Support/Unix/Process.inc @@ -18,16 +18,10 @@ #include #include #include -#ifdef HAVE_SYS_TIME_H #include -#endif -#ifdef HAVE_SYS_RESOURCE_H #include -#endif #include -#if HAVE_SIGNAL_H #include -#endif #if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2) #include #endif @@ -37,12 +31,6 @@ #ifdef HAVE_MALLOC_MALLOC_H #include #endif -#ifdef HAVE_SYS_IOCTL_H -#include -#endif -#ifdef HAVE_TERMIOS_H -#include -#endif //===----------------------------------------------------------------------===// //=== WARNING: Implementation here must contain only generic UNIX code that @@ -138,7 +126,6 @@ void Process::GetTimeUsage(TimePoint<> &elapsed, // their operation. To prevent the disk from filling up, this function // does what's necessary to prevent their generation. void Process::PreventCoreFiles() { -#if HAVE_SETRLIMIT struct rlimit rlim; getrlimit(RLIMIT_CORE, &rlim); #ifdef __linux__ @@ -161,7 +148,6 @@ void Process::PreventCoreFiles() { rlim.rlim_cur = 0; #endif setrlimit(RLIMIT_CORE, &rlim); -#endif #if defined(HAVE_MACH_MACH_H) && !defined(__GNU__) // Disable crash reporting on Mac OS X 10.0-10.4 diff --git a/llvm/lib/Support/Unix/Program.inc b/llvm/lib/Support/Unix/Program.inc index 74312ed6238ae..0708df1eed0a3 100644 --- a/llvm/lib/Support/Unix/Program.inc +++ b/llvm/lib/Support/Unix/Program.inc @@ -29,12 +29,8 @@ #include "llvm/Support/SystemZ/zOSSupport.h" #include "llvm/Support/raw_ostream.h" #include -#if HAVE_SYS_RESOURCE_H #include -#endif -#if HAVE_SIGNAL_H #include -#endif #include #if HAVE_UNISTD_H #include @@ -142,7 +138,6 @@ static bool RedirectIO_PS(const std::string *Path, int FD, std::string *ErrMsg, static void TimeOutHandler(int Sig) {} static void SetMemoryLimits(unsigned size) { -#if HAVE_SYS_RESOURCE_H && HAVE_GETRLIMIT && HAVE_SETRLIMIT struct rlimit r; __typeof__(r.rlim_cur) limit = (__typeof__(r.rlim_cur))(size)*1048576; @@ -156,7 +151,6 @@ static void SetMemoryLimits(unsigned size) { r.rlim_cur = limit; setrlimit(RLIMIT_RSS, &r); #endif -#endif } static std::vector diff --git a/llvm/lib/Support/Unix/Signals.inc b/llvm/lib/Support/Unix/Signals.inc index b66e858c965ff..b2f68d25221a2 100644 --- a/llvm/lib/Support/Unix/Signals.inc +++ b/llvm/lib/Support/Unix/Signals.inc @@ -50,9 +50,7 @@ #ifdef HAVE_BACKTRACE #include BACKTRACE_HEADER // For backtrace(). #endif -#if HAVE_SIGNAL_H #include -#endif #include #if HAVE_DLFCN_H #include diff --git a/llvm/lib/Support/Unix/Unix.h b/llvm/lib/Support/Unix/Unix.h index 4840b51f75908..f16c7fcda22c3 100644 --- a/llvm/lib/Support/Unix/Unix.h +++ b/llvm/lib/Support/Unix/Unix.h @@ -36,9 +36,7 @@ #include #endif -#ifdef HAVE_SYS_TIME_H -# include -#endif +#include #include #ifdef HAVE_DLFCN_H diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp index 597ccb7ca144b..b76d7bcc95a56 100644 --- a/llvm/lib/TableGen/Record.cpp +++ b/llvm/lib/TableGen/Record.cpp @@ -671,7 +671,7 @@ const StringInit *StringInit::get(RecordKeeper &RK, StringRef V, detail::RecordKeeperImpl &RKImpl = RK.getImpl(); auto &InitMap = Fmt == SF_String ? RKImpl.StringInitStringPool : RKImpl.StringInitCodePool; - auto &Entry = *InitMap.insert(std::make_pair(V, nullptr)).first; + auto &Entry = *InitMap.try_emplace(V, nullptr).first; if (!Entry.second) Entry.second = new (RKImpl.Allocator) StringInit(RK, Entry.getKey(), Fmt); return Entry.second; @@ -1674,7 +1674,7 @@ static const Init *ForeachDagApply(const Init *LHS, const DagInit *MHSd, else NewArg = ItemApply(LHS, Arg, RHS, CurRec); - NewArgs.push_back(std::make_pair(NewArg, ArgName)); + NewArgs.emplace_back(NewArg, ArgName); if (Arg != NewArg) Change = true; } @@ -2260,7 +2260,7 @@ const VarInit *VarInit::get(StringRef VN, const RecTy *T) { const VarInit *VarInit::get(const Init *VN, const RecTy *T) { detail::RecordKeeperImpl &RK = T->getRecordKeeper().getImpl(); - VarInit *&I = RK.TheVarInitPool[std::make_pair(T, VN)]; + VarInit *&I = RK.TheVarInitPool[{T, VN}]; if (!I) I = new (RK.Allocator) VarInit(VN, T); return I; @@ -2285,7 +2285,7 @@ const Init *VarInit::resolveReferences(Resolver &R) const { const VarBitInit *VarBitInit::get(const TypedInit *T, unsigned B) { detail::RecordKeeperImpl &RK = T->getRecordKeeper().getImpl(); - VarBitInit *&I = RK.TheVarBitInitPool[std::make_pair(T, B)]; + VarBitInit *&I = RK.TheVarBitInitPool[{T, B}]; if (!I) I = new (RK.Allocator) VarBitInit(T, B); return I; @@ -2461,7 +2461,7 @@ std::string VarDefInit::getAsString() const { const FieldInit *FieldInit::get(const Init *R, const StringInit *FN) { detail::RecordKeeperImpl &RK = R->getRecordKeeper().getImpl(); - FieldInit *&I = RK.TheFieldInitPool[std::make_pair(R, FN)]; + FieldInit *&I = RK.TheFieldInitPool[{R, FN}]; if (!I) I = new (RK.Allocator) FieldInit(R, FN); return I; diff --git a/llvm/lib/TableGen/TGLexer.h b/llvm/lib/TableGen/TGLexer.h index bac583c4e33a1..6680915211205 100644 --- a/llvm/lib/TableGen/TGLexer.h +++ b/llvm/lib/TableGen/TGLexer.h @@ -234,7 +234,7 @@ class TGLexer { std::pair getCurBinaryIntVal() const { assert(CurCode == tgtok::BinaryIntVal && "This token isn't a binary integer"); - return std::make_pair(CurIntVal, (CurPtr - TokStart)-2); + return {CurIntVal, (CurPtr - TokStart) - 2}; } SMLoc getLoc() const; diff --git a/llvm/lib/TableGen/TGParser.cpp b/llvm/lib/TableGen/TGParser.cpp index 60ae11b7f4261..d2115ab7627da 100644 --- a/llvm/lib/TableGen/TGParser.cpp +++ b/llvm/lib/TableGen/TGParser.cpp @@ -3162,7 +3162,7 @@ void TGParser::ParseDagArgList( Lex.Lex(); // eat the VarName. } - Result.push_back(std::make_pair(Val, VarName)); + Result.emplace_back(Val, VarName); } if (!consume(tgtok::comma)) break; @@ -4152,9 +4152,8 @@ bool TGParser::ParseMultiClass() { return TokError("expected identifier after multiclass for name"); std::string Name = Lex.getCurStrVal(); - auto Result = - MultiClasses.insert(std::make_pair(Name, - std::make_unique(Name, Lex.getLoc(),Records))); + auto Result = MultiClasses.try_emplace( + Name, std::make_unique(Name, Lex.getLoc(), Records)); if (!Result.second) return TokError("multiclass '" + Name + "' already defined"); diff --git a/llvm/lib/TableGen/TGParser.h b/llvm/lib/TableGen/TGParser.h index 4509893eefc2c..6094bba84fa55 100644 --- a/llvm/lib/TableGen/TGParser.h +++ b/llvm/lib/TableGen/TGParser.h @@ -131,7 +131,7 @@ class TGVarScope { } void addVar(StringRef Name, const Init *I) { - bool Ins = Vars.insert(std::make_pair(std::string(Name), I)).second; + bool Ins = Vars.try_emplace(std::string(Name), I).second; (void)Ins; assert(Ins && "Local variable already exists"); } diff --git a/llvm/lib/Target/AArch64/AArch64.td b/llvm/lib/Target/AArch64/AArch64.td index 8644264f5fb1c..3677f669c3481 100644 --- a/llvm/lib/Target/AArch64/AArch64.td +++ b/llvm/lib/Target/AArch64/AArch64.td @@ -58,34 +58,34 @@ include "AArch64SystemOperands.td" class AArch64Unsupported { list F; } -let F = [HasSVE2p1, HasSVE2p1_or_HasSME2, HasSVE2p1_or_HasSME2p1] in +let F = [HasSVE2p1, HasSVE2p1_or_SME2, HasSVE2p1_or_SME2p1] in def SVE2p1Unsupported : AArch64Unsupported; def SVE2Unsupported : AArch64Unsupported { - let F = !listconcat([HasSVE2, HasSVE2orSME, HasSVE2orSME2, HasSSVE_FP8FMA, HasSMEF8F16, + let F = !listconcat([HasSVE2, HasSVE2_or_SME, HasSVE2_or_SME2, HasSSVE_FP8FMA, HasSMEF8F16, HasSMEF8F32, HasSVEAES, HasSVE2SHA3, HasSVE2SM4, HasSVEBitPerm, HasSVEB16B16], SVE2p1Unsupported.F); } def SVEUnsupported : AArch64Unsupported { - let F = !listconcat([HasSVE, HasSVEorSME], + let F = !listconcat([HasSVE, HasSVE_or_SME], SVE2Unsupported.F); } -let F = [HasSME2p2, HasSVE2p2orSME2p2, HasNonStreamingSVEorSME2p2, - HasNonStreamingSVE2p2orSME2p2, HasNonStreamingSVE2orSSVE_BitPerm, +let F = [HasSME2p2, HasSVE2p2_or_SME2p2, HasNonStreamingSVE_or_SME2p2, + HasNonStreamingSVE2p2_or_SME2p2, HasNonStreamingSVE2_or_SSVE_BitPerm, HasSME_MOP4, HasSME_TMOP] in def SME2p2Unsupported : AArch64Unsupported; def SME2p1Unsupported : AArch64Unsupported { - let F = !listconcat([HasSME2p1, HasSVE2p1_or_HasSME2p1, HasNonStreamingSVE2p1orSSVE_AES], + let F = !listconcat([HasSME2p1, HasSVE2p1_or_SME2p1, HasNonStreamingSVE2p1_or_SSVE_AES], SME2p2Unsupported.F); } def SME2Unsupported : AArch64Unsupported { - let F = !listconcat([HasSME2, HasSVE2orSME2, HasSVE2p1_or_HasSME2, HasSSVE_FP8FMA, - HasSMEF8F16, HasSMEF8F32, HasSMEF16F16orSMEF8F16, HasSMEB16B16], + let F = !listconcat([HasSME2, HasSVE2_or_SME2, HasSVE2p1_or_SME2, HasSSVE_FP8FMA, + HasSMEF8F16, HasSMEF8F32, HasSMEF16F16_or_SMEF8F16, HasSMEB16B16], SME2p1Unsupported.F); } diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 9c7dc7784e939..8215f3a4fdae1 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -244,53 +244,53 @@ def HasOCCMO : Predicate<"Subtarget->hasOCCMO()">, // A subset of SVE(2) instructions are legal in Streaming SVE execution mode, // they should be enabled if either has been specified. -def HasSVEorSME +def HasSVE_or_SME : Predicate<"Subtarget->hasSVE() || (Subtarget->isStreaming() && Subtarget->hasSME())">, AssemblerPredicateWithAll<(any_of FeatureSVE, FeatureSME), "sve or sme">; -def HasNonStreamingSVEorSME2p2 +def HasNonStreamingSVE_or_SME2p2 : Predicate<"(Subtarget->isSVEAvailable() && Subtarget->hasSVE()) ||" "(Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->hasSME2p2())">, AssemblerPredicateWithAll<(any_of FeatureSVE, FeatureSME2p2), "sve or sme2p2">; -def HasSVE2orSME +def HasSVE2_or_SME : Predicate<"Subtarget->hasSVE2() || (Subtarget->isStreaming() && Subtarget->hasSME())">, AssemblerPredicateWithAll<(any_of FeatureSVE2, FeatureSME), "sve2 or sme">; -def HasSVE2orSME2 +def HasSVE2_or_SME2 : Predicate<"Subtarget->hasSVE2() || (Subtarget->isStreaming() && Subtarget->hasSME2())">, AssemblerPredicateWithAll<(any_of FeatureSVE2, FeatureSME2), "sve2 or sme2">; -def HasNonStreamingSVE2orSSVE_AES +def HasNonStreamingSVE2_or_SSVE_AES : Predicate<"(Subtarget->isSVEAvailable() && Subtarget->hasSVE2()) ||" "(Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->hasSSVE_AES())">, AssemblerPredicateWithAll<(any_of FeatureSVE2, FeatureSSVE_AES), "sve2 or ssve-aes">; -def HasSVE2p1_or_HasSME +def HasSVE2p1_or_SME : Predicate<"Subtarget->hasSVE2p1() || (Subtarget->isStreaming() && Subtarget->hasSME())">, AssemblerPredicateWithAll<(any_of FeatureSME, FeatureSVE2p1), "sme or sve2p1">; -def HasSVE2p1_or_HasSME2 +def HasSVE2p1_or_SME2 : Predicate<"Subtarget->hasSVE2p1() || (Subtarget->isStreaming() && Subtarget->hasSME2())">, AssemblerPredicateWithAll<(any_of FeatureSME2, FeatureSVE2p1), "sme2 or sve2p1">; -def HasSVE2p1_or_HasSME2p1 +def HasSVE2p1_or_SME2p1 : Predicate<"Subtarget->hasSVE2p1() || (Subtarget->isStreaming() && Subtarget->hasSME2p1())">, AssemblerPredicateWithAll<(any_of FeatureSME2p1, FeatureSVE2p1), "sme2p1 or sve2p1">; -def HasSVE2p2orSME2p2 +def HasSVE2p2_or_SME2p2 : Predicate<"Subtarget->isSVEorStreamingSVEAvailable() && (Subtarget->hasSVE2p2() || Subtarget->hasSME2p2())">, AssemblerPredicateWithAll<(any_of FeatureSME2p2, FeatureSVE2p2), "sme2p2 or sve2p2">; -def HasNonStreamingSVE2p1orSSVE_AES +def HasNonStreamingSVE2p1_or_SSVE_AES : Predicate<"(Subtarget->isSVEAvailable() && Subtarget->hasSVE2p1()) ||" "(Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->hasSSVE_AES())">, AssemblerPredicateWithAll<(any_of FeatureSVE2p1, FeatureSSVE_AES), "sve2p1 or ssve-aes">; -def HasSMEF16F16orSMEF8F16 +def HasSMEF16F16_or_SMEF8F16 : Predicate<"Subtarget->isStreaming() && (Subtarget->hasSMEF16F16() || Subtarget->hasSMEF8F16())">, AssemblerPredicateWithAll<(any_of FeatureSMEF16F16, FeatureSMEF8F16), "sme-f16f16 or sme-f8f16">; -def HasNonStreamingSVE2p2orSME2p2 +def HasNonStreamingSVE2p2_or_SME2p2 : Predicate<"(Subtarget->isSVEAvailable() && Subtarget->hasSVE2p2()) ||" "(Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->hasSME2p2())">, AssemblerPredicateWithAll<(any_of FeatureSVE2p2, FeatureSME2p2), "sme2p2 or sve2p2">; -def HasNonStreamingSVE2orSSVE_BitPerm +def HasNonStreamingSVE2_or_SSVE_BitPerm : Predicate<"(Subtarget->isSVEAvailable() && Subtarget->hasSVE2()) ||" "(Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->hasSSVE_BitPerm())">, AssemblerPredicateWithAll<(any_of FeatureSVE2, FeatureSSVE_BitPerm), "sve2 or ssve-bitperm">; diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td index 98b027862383d..d2aa86f388db2 100644 --- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -882,7 +882,7 @@ defm LUTI4_S_2ZTZI : sme2p1_luti4_vector_vg2_index<"luti4">; defm LUTI4_S_4ZTZI : sme2p1_luti4_vector_vg4_index<"luti4">; } -let Predicates = [HasSMEF16F16orSMEF8F16] in { +let Predicates = [HasSMEF16F16_or_SMEF8F16] in { defm FADD_VG2_M2Z_H : sme2_multivec_accum_add_sub_vg2<"fadd", 0b0100, MatrixOp16, ZZ_h_mul_r, nxv8f16, int_aarch64_sme_add_za16_vg1x2>; defm FADD_VG4_M4Z_H : sme2_multivec_accum_add_sub_vg4<"fadd", 0b0100, MatrixOp16, ZZZZ_h_mul_r, nxv8f16, int_aarch64_sme_add_za16_vg1x4>; defm FSUB_VG2_M2Z_H : sme2_multivec_accum_add_sub_vg2<"fsub", 0b0101, MatrixOp16, ZZ_h_mul_r, nxv8f16, int_aarch64_sme_sub_za16_vg1x2>; diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 22715c61126d1..27c88a55919e6 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -563,7 +563,7 @@ let Predicates = [HasSVE] in { def WRFFR : sve_int_wrffr<"wrffr", int_aarch64_sve_wrffr>; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm ADD_ZZZ : sve_int_bin_cons_arit_0<0b000, "add", add>; defm SUB_ZZZ : sve_int_bin_cons_arit_0<0b001, "sub", sub>; defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", saddsat>; @@ -584,9 +584,9 @@ let Predicates = [HasSVEorSME] in { defm EOR_ZPmZ : sve_int_bin_pred_log<0b001, "eor", "EOR_ZPZZ", AArch64eor_m1, DestructiveBinaryComm>; defm AND_ZPmZ : sve_int_bin_pred_log<0b010, "and", "AND_ZPZZ", AArch64and_m1, DestructiveBinaryComm>; defm BIC_ZPmZ : sve_int_bin_pred_log<0b011, "bic", "BIC_ZPZZ", int_aarch64_sve_bic, DestructiveBinary>; -} // End HasSVEorSME +} // End HasSVE_or_SME -let Predicates = [HasSVEorSME, UseExperimentalZeroingPseudos] in { +let Predicates = [HasSVE_or_SME, UseExperimentalZeroingPseudos] in { defm ADD_ZPZZ : sve_int_bin_pred_zeroing_bhsd; defm SUB_ZPZZ : sve_int_bin_pred_zeroing_bhsd; defm SUBR_ZPZZ : sve_int_bin_pred_zeroing_bhsd; @@ -595,9 +595,9 @@ let Predicates = [HasSVEorSME, UseExperimentalZeroingPseudos] in { defm EOR_ZPZZ : sve_int_bin_pred_zeroing_bhsd; defm AND_ZPZZ : sve_int_bin_pred_zeroing_bhsd; defm BIC_ZPZZ : sve_int_bin_pred_zeroing_bhsd; -} // End HasSVEorSME, UseExperimentalZeroingPseudos +} // End HasSVE_or_SME, UseExperimentalZeroingPseudos -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm ADD_ZI : sve_int_arith_imm0<0b000, "add", add>; defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", sub>; defm SUBR_ZI : sve_int_arith_imm0<0b011, "subr", AArch64subr>; @@ -764,9 +764,9 @@ let Predicates = [HasSVEorSME] in { defm FABD_ZPZZ : sve_fp_bin_pred_hfd; defm FMULX_ZPZZ : sve_fp_bin_pred_hfd; defm FDIV_ZPZZ : sve_fp_bin_pred_hfd; -} // End HasSVEorSME +} // End HasSVE_or_SME -let Predicates = [HasSVEorSME, UseExperimentalZeroingPseudos] in { +let Predicates = [HasSVE_or_SME, UseExperimentalZeroingPseudos] in { defm FADD_ZPZZ : sve_fp_2op_p_zds_zeroing_hsd; defm FSUB_ZPZZ : sve_fp_2op_p_zds_zeroing_hsd; defm FMUL_ZPZZ : sve_fp_2op_p_zds_zeroing_hsd; @@ -779,28 +779,28 @@ let Predicates = [HasSVEorSME, UseExperimentalZeroingPseudos] in { defm FMULX_ZPZZ : sve_fp_2op_p_zds_zeroing_hsd; defm FDIVR_ZPZZ : sve_fp_2op_p_zds_zeroing_hsd; defm FDIV_ZPZZ : sve_fp_2op_p_zds_zeroing_hsd; -} // End HasSVEorSME, UseExperimentalZeroingPseudos +} // End HasSVE_or_SME, UseExperimentalZeroingPseudos -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm FADD_ZZZ : sve_fp_3op_u_zd<0b000, "fadd", AArch64fadd>; defm FSUB_ZZZ : sve_fp_3op_u_zd<0b001, "fsub", AArch64fsub>; defm FMUL_ZZZ : sve_fp_3op_u_zd<0b010, "fmul", AArch64fmul>; -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { defm FTSMUL_ZZZ : sve_fp_3op_u_zd_ftsmul<0b011, "ftsmul", int_aarch64_sve_ftsmul_x>; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm FRECPS_ZZZ : sve_fp_3op_u_zd<0b110, "frecps", AArch64frecps>; defm FRSQRTS_ZZZ : sve_fp_3op_u_zd<0b111, "frsqrts", AArch64frsqrts>; -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { defm FTSSEL_ZZZ : sve_int_bin_cons_misc_0_b<"ftssel", int_aarch64_sve_ftssel_x>; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm FCADD_ZPmZ : sve_fp_fcadd<"fcadd", int_aarch64_sve_fcadd>; defm FCMLA_ZPmZZ : sve_fp_fcmla<"fcmla", int_aarch64_sve_fcmla>; @@ -818,26 +818,26 @@ let Predicates = [HasSVEorSME] in { defm FMLS_ZPZZZ : sve_fp_3op_pred_hfd; defm FNMLA_ZPZZZ : sve_fp_3op_pred_hfd; defm FNMLS_ZPZZZ : sve_fp_3op_pred_hfd; -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { defm FTMAD_ZZI : sve_fp_ftmad<"ftmad", int_aarch64_sve_ftmad_x>; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm FMLA_ZZZI : sve_fp_fma_by_indexed_elem<0b00, "fmla", int_aarch64_sve_fmla_lane>; defm FMLS_ZZZI : sve_fp_fma_by_indexed_elem<0b01, "fmls", int_aarch64_sve_fmls_lane>; defm FCMLA_ZZZI : sve_fp_fcmla_by_indexed_elem<"fcmla", int_aarch64_sve_fcmla_lane>; defm FMUL_ZZZI : sve_fp_fmul_by_indexed_elem<"fmul", int_aarch64_sve_fmul_lane>; -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { // SVE floating point reductions. defm FADDA_VPZ : sve_fp_2op_p_vd<0b000, "fadda", AArch64fadda_p>; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm FADDV_VPZ : sve_fp_fast_red<0b000, "faddv", AArch64faddv_p>; defm FMAXNMV_VPZ : sve_fp_fast_red<0b100, "fmaxnmv", AArch64fmaxnmv_p>; defm FMINNMV_VPZ : sve_fp_fast_red<0b101, "fminnmv", AArch64fminnmv_p>; @@ -937,14 +937,14 @@ let Predicates = [HasSVEorSME] in { defm SEL_ZPZZ : sve_int_sel_vvv<"sel", vselect>; defm SPLICE_ZPZ : sve_int_perm_splice<"splice", AArch64splice>; -} // End HasSVEorSME +} // End HasSVE_or_SME // COMPACT - word and doubleword -let Predicates = [HasNonStreamingSVEorSME2p2] in { +let Predicates = [HasNonStreamingSVE_or_SME2p2] in { defm COMPACT_ZPZ : sve_int_perm_compact_sd<"compact", int_aarch64_sve_compact>; } -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm INSR_ZR : sve_int_perm_insrs<"insr", AArch64insr>; defm INSR_ZV : sve_int_perm_insrv<"insr", AArch64insr>; defm EXT_ZZI : sve_int_perm_extract_i<"ext", AArch64ext>; @@ -973,13 +973,13 @@ let Predicates = [HasSVEorSME] in { defm MOVPRFX_ZPzZ : sve_int_movprfx_pred_zero<0b000, "movprfx">; defm MOVPRFX_ZPmZ : sve_int_movprfx_pred_merge<0b001, "movprfx">; def MOVPRFX_ZZ : sve_int_bin_cons_misc_0_c<0b00000001, "movprfx", ZPRAny>; -} // End HasSVEorSME +} // End HasSVE_or_SME -let Predicates = [HasNonStreamingSVEorSME2p2] in { +let Predicates = [HasNonStreamingSVE_or_SME2p2] in { defm FEXPA_ZZ : sve_int_bin_cons_misc_0_c_fexpa<"fexpa", int_aarch64_sve_fexpa_x>; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm BRKPA_PPzPP : sve_int_brkp<0b00, "brkpa", int_aarch64_sve_brkpa_z>; defm BRKPAS_PPzPP : sve_int_brkp<0b10, "brkpas", null_frag>; defm BRKPB_PPzPP : sve_int_brkp<0b01, "brkpb", int_aarch64_sve_brkpb_z>; @@ -1118,7 +1118,7 @@ let Predicates = [HasSVEorSME] in { let Predicates = [HasSVE2p1] in { defm LD1D_Q : sve_mem_128b_cld_ss<0b11, "ld1d", GPR64NoXZRshifted64>; } -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { // non-faulting continuous load with reg+immediate @@ -1158,7 +1158,7 @@ let Predicates = [HasSVE] in { defm LDFF1D : sve_mem_cldff_ss<0b1111, "ldff1d", Z_d, ZPR64, GPR64shifted64>; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { // LD(2|3|4) structured loads with reg+immediate defm LD2B_IMM : sve_mem_eld_si<0b00, 0b001, ZZ_b, "ld2b", simm4s2>; defm LD3B_IMM : sve_mem_eld_si<0b00, 0b010, ZZZ_b, "ld3b", simm4s3>; @@ -1172,7 +1172,7 @@ let Predicates = [HasSVEorSME] in { defm LD2D_IMM : sve_mem_eld_si<0b11, 0b001, ZZ_d, "ld2d", simm4s2>; defm LD3D_IMM : sve_mem_eld_si<0b11, 0b010, ZZZ_d, "ld3d", simm4s3>; defm LD4D_IMM : sve_mem_eld_si<0b11, 0b011, ZZZZ_d, "ld4d", simm4s4>; - let Predicates = [HasSVE2p1_or_HasSME2p1] in { + let Predicates = [HasSVE2p1_or_SME2p1] in { defm LD2Q_IMM : sve_mem_eld_si<0b01, 0b100, ZZ_q, "ld2q", simm4s2>; defm LD3Q_IMM : sve_mem_eld_si<0b10, 0b100, ZZZ_q, "ld3q", simm4s3>; defm LD4Q_IMM : sve_mem_eld_si<0b11, 0b100, ZZZZ_q, "ld4q", simm4s4>; @@ -1191,12 +1191,12 @@ let Predicates = [HasSVEorSME] in { def LD2D : sve_mem_eld_ss<0b11, 0b101, ZZ_d, "ld2d", GPR64NoXZRshifted64>; def LD3D : sve_mem_eld_ss<0b11, 0b110, ZZZ_d, "ld3d", GPR64NoXZRshifted64>; def LD4D : sve_mem_eld_ss<0b11, 0b111, ZZZZ_d, "ld4d", GPR64NoXZRshifted64>; - let Predicates = [HasSVE2p1_or_HasSME2p1] in { + let Predicates = [HasSVE2p1_or_SME2p1] in { def LD2Q : sve_mem_eld_ss<0b01, 0b001, ZZ_q, "ld2q", GPR64NoXZRshifted128>; def LD3Q : sve_mem_eld_ss<0b10, 0b001, ZZZ_q, "ld3q", GPR64NoXZRshifted128>; def LD4Q : sve_mem_eld_ss<0b11, 0b001, ZZZZ_q, "ld4q", GPR64NoXZRshifted128>; } -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { // Gathers using unscaled 32-bit offsets, e.g. @@ -1401,7 +1401,7 @@ let Predicates = [HasSVE] in { defm : sve_masked_gather_x4; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { // Non-temporal contiguous loads (register + immediate) defm LDNT1B_ZRI : sve_mem_cldnt_si<0b00, "ldnt1b", Z_b, ZPR8>; defm LDNT1H_ZRI : sve_mem_cldnt_si<0b01, "ldnt1h", Z_h, ZPR16>; @@ -1492,7 +1492,7 @@ let Predicates = [HasSVEorSME] in { defm : sve_st1q_pat; defm : sve_st1q_pat; -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { // Scatters using unpacked, unscaled 32-bit offsets, e.g. @@ -1624,7 +1624,7 @@ let Predicates = [HasSVE] in { defm : sve_masked_scatter_x4; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { // ST(2|3|4) structured stores (register + immediate) defm ST2B_IMM : sve_mem_est_si<0b00, 0b01, ZZ_b, "st2b", simm4s2>; defm ST3B_IMM : sve_mem_est_si<0b00, 0b10, ZZZ_b, "st3b", simm4s3>; @@ -1638,7 +1638,7 @@ let Predicates = [HasSVEorSME] in { defm ST2D_IMM : sve_mem_est_si<0b11, 0b01, ZZ_d, "st2d", simm4s2>; defm ST3D_IMM : sve_mem_est_si<0b11, 0b10, ZZZ_d, "st3d", simm4s3>; defm ST4D_IMM : sve_mem_est_si<0b11, 0b11, ZZZZ_d, "st4d", simm4s4>; - let Predicates = [HasSVE2p1_or_HasSME2p1] in { + let Predicates = [HasSVE2p1_or_SME2p1] in { defm ST2Q_IMM : sve_mem_128b_est_si<0b01, ZZ_q, "st2q", simm4s2>; defm ST3Q_IMM : sve_mem_128b_est_si<0b10, ZZZ_q, "st3q", simm4s3>; defm ST4Q_IMM : sve_mem_128b_est_si<0b11, ZZZZ_q, "st4q", simm4s4>; @@ -1657,7 +1657,7 @@ let Predicates = [HasSVEorSME] in { def ST2D : sve_mem_est_ss<0b11, 0b01, ZZ_d, "st2d", GPR64NoXZRshifted64>; def ST3D : sve_mem_est_ss<0b11, 0b10, ZZZ_d, "st3d", GPR64NoXZRshifted64>; def ST4D : sve_mem_est_ss<0b11, 0b11, ZZZZ_d, "st4d", GPR64NoXZRshifted64>; - let Predicates = [HasSVE2p1_or_HasSME2p1] in { + let Predicates = [HasSVE2p1_or_SME2p1] in { def ST2Q : sve_mem_128b_est_ss<0b01, ZZ_q, "st2q", GPR64NoXZRshifted128>; def ST3Q : sve_mem_128b_est_ss<0b10, ZZZ_q, "st3q", GPR64NoXZRshifted128>; def ST4Q : sve_mem_128b_est_ss<0b11, ZZZZ_q, "st4q", GPR64NoXZRshifted128>; @@ -1714,7 +1714,7 @@ let Predicates = [HasSVEorSME] in { defm : sve_prefetch; defm : sve_prefetch; defm : sve_prefetch; -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { // Gather prefetch using scaled 32-bit offsets, e.g. @@ -1820,7 +1820,7 @@ let Predicates = [HasSVE] in { defm : adrXtwShiftPat; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm TBL_ZZZ : sve_int_perm_tbl<"tbl", AArch64tbl>; defm ZIP1_ZZZ : sve_int_perm_bin_perm_zz<0b000, "zip1", AArch64zip1>; @@ -2168,7 +2168,7 @@ let Predicates = [HasSVEorSME] in { defm INCD_XPiI : sve_int_pred_pattern_a<0b110, "incd", add, int_aarch64_sve_cntd>; defm DECD_XPiI : sve_int_pred_pattern_a<0b111, "decd", sub, int_aarch64_sve_cntd>; -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm SQINCB_XPiWdI : sve_int_pred_pattern_b_s32<0b00000, "sqincb", int_aarch64_sve_sqincb_n32>; defm UQINCB_WPiI : sve_int_pred_pattern_b_u32<0b00001, "uqincb", int_aarch64_sve_uqincb_n32>; defm SQDECB_XPiWdI : sve_int_pred_pattern_b_s32<0b00010, "sqdecb", int_aarch64_sve_sqdecb_n32>; @@ -2297,9 +2297,9 @@ let Predicates = [HasSVEorSME] in { defm ASR_ZPZI : sve_int_shift_pred_bhsd; defm LSR_ZPZI : sve_int_shift_pred_bhsd; defm LSL_ZPZI : sve_int_shift_pred_bhsd; -} // End HasSVEorSME +} // End HasSVE_or_SME -let Predicates = [HasSVEorSME, UseExperimentalZeroingPseudos] in { +let Predicates = [HasSVE_or_SME, UseExperimentalZeroingPseudos] in { defm ASR_ZPZZ : sve_int_bin_pred_zeroing_bhsd; defm LSR_ZPZZ : sve_int_bin_pred_zeroing_bhsd; defm LSL_ZPZZ : sve_int_bin_pred_zeroing_bhsd; @@ -2308,9 +2308,9 @@ let Predicates = [HasSVEorSME, UseExperimentalZeroingPseudos] in { defm ASR_ZPZI : sve_int_bin_pred_imm_zeroing_bhsd; defm LSR_ZPZI : sve_int_bin_pred_imm_zeroing_bhsd; defm LSL_ZPZI : sve_int_bin_pred_imm_zeroing_bhsd; -} // End HasSVEorSME, UseExperimentalZeroingPseudos +} // End HasSVE_or_SME, UseExperimentalZeroingPseudos -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm ASR_ZPmZ : sve_int_bin_pred_shift<0b000, "asr", "ASR_ZPZZ", int_aarch64_sve_asr, "ASRR_ZPmZ">; defm LSR_ZPmZ : sve_int_bin_pred_shift<0b001, "lsr", "LSR_ZPZZ", int_aarch64_sve_lsr, "LSRR_ZPmZ">; defm LSL_ZPmZ : sve_int_bin_pred_shift<0b011, "lsl", "LSL_ZPZZ", int_aarch64_sve_lsl, "LSLR_ZPmZ">; @@ -2431,18 +2431,18 @@ let Predicates = [HasSVEorSME] in { defm FRINTI_ZPmZ : sve_fp_2op_p_zd_HSD<0b00111, "frinti", AArch64frinti_mt>; defm FRECPX_ZPmZ : sve_fp_2op_p_zd_HSD<0b01100, "frecpx", AArch64frecpx_mt>; defm FSQRT_ZPmZ : sve_fp_2op_p_zd_HSD<0b01101, "fsqrt", AArch64fsqrt_mt>; -} // End HasSVEorSME +} // End HasSVE_or_SME -let Predicates = [HasBF16, HasSVEorSME] in { +let Predicates = [HasBF16, HasSVE_or_SME] in { defm BFDOT_ZZZ : sve_float_dot<0b1, 0b0, ZPR32, ZPR16, "bfdot", nxv8bf16, int_aarch64_sve_bfdot>; defm BFDOT_ZZI : sve_float_dot_indexed<0b1, 0b00, ZPR16, ZPR3b16, "bfdot", nxv8bf16, int_aarch64_sve_bfdot_lane_v2>; -} // End HasBF16, HasSVEorSME +} // End HasBF16, HasSVE_or_SME let Predicates = [HasBF16, HasSVE] in { defm BFMMLA_ZZZ : sve_fp_matrix_mla<0b01, "bfmmla", ZPR32, ZPR16, int_aarch64_sve_bfmmla, nxv4f32, nxv8bf16>; } // End HasBF16, HasSVE -let Predicates = [HasBF16, HasSVEorSME] in { +let Predicates = [HasBF16, HasSVE_or_SME] in { defm BFMLALB_ZZZ : sve2_fp_mla_long<0b100, "bfmlalb", nxv4f32, nxv8bf16, int_aarch64_sve_bfmlalb>; defm BFMLALT_ZZZ : sve2_fp_mla_long<0b101, "bfmlalt", nxv4f32, nxv8bf16, int_aarch64_sve_bfmlalt>; defm BFMLALB_ZZZI : sve2_fp_mla_long_by_indexed_elem<0b100, "bfmlalb", nxv4f32, nxv8bf16, int_aarch64_sve_bfmlalb_lane_v2>; @@ -2450,9 +2450,9 @@ let Predicates = [HasBF16, HasSVEorSME] in { defm BFCVT_ZPmZ : sve_bfloat_convert<"bfcvt", int_aarch64_sve_fcvt_bf16f32_v2, AArch64fcvtr_mt>; defm BFCVTNT_ZPmZ : sve_bfloat_convert_top<"bfcvtnt", int_aarch64_sve_fcvtnt_bf16f32_v2>; -} // End HasBF16, HasSVEorSME +} // End HasBF16, HasSVE_or_SME -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { // InstAliases def : InstAlias<"mov $Zd, $Zn", (ORR_ZZZ ZPR64:$Zd, ZPR64:$Zn, ZPR64:$Zn), 1>; @@ -2588,7 +2588,7 @@ let Predicates = [HasSVEorSME] in { // LDR1 of 64-bit data defm : LD1RPat; - let Predicates = [HasSVEorSME, UseSVEFPLD1R] in { + let Predicates = [HasSVE_or_SME, UseSVEFPLD1R] in { // LD1R of FP data defm : LD1RPat; defm : LD1RPat; @@ -2640,7 +2640,7 @@ let Predicates = [HasSVEorSME] in { } // Add NoUseScalarIncVL to avoid affecting for patterns with UseScalarIncVL - let Predicates = [HasSVEorSME, NoUseScalarIncVL] in { + let Predicates = [HasSVE_or_SME, NoUseScalarIncVL] in { def : Pat<(add GPR64:$op, (vscale (sve_cnth_imm_neg i32:$imm))), (SUBXrs GPR64:$op, (CNTH_XPiI 31, $imm), 0)>; def : Pat<(add GPR64:$op, (vscale (sve_cntw_imm_neg i32:$imm))), @@ -2672,7 +2672,7 @@ let Predicates = [HasSVEorSME] in { (DECD_ZPiI ZPR:$op, 31, $imm)>; } - let Predicates = [HasSVEorSME, UseScalarIncVL], AddedComplexity = 5 in { + let Predicates = [HasSVE_or_SME, UseScalarIncVL], AddedComplexity = 5 in { def : Pat<(add GPR64:$op, (vscale (sve_rdvl_imm i32:$imm))), (ADDVL_XXI GPR64:$op, $imm)>; @@ -3059,7 +3059,7 @@ let Predicates = [HasSVEorSME] in { // 16-element contiguous loads defm : ld1; -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE] in { multiclass ldnf1 { @@ -3144,7 +3144,7 @@ let Predicates = [HasSVE] in { defm : ldff1; } // End HasSVE -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { multiclass st1 { // reg + reg @@ -3462,7 +3462,7 @@ let Predicates = [HasSVEorSME] in { (SUB_ZPmZ_S PPR:$pred, ZPR:$op, (DUP_ZI_S 255, 0))>; def : Pat<(nxv2i64 (sub ZPR:$op, (sext nxv2i1:$pred))), (SUB_ZPmZ_D PPR:$pred, ZPR:$op, (DUP_ZI_D 255, 0))>; -} // End HasSVEorSME +} // End HasSVE_or_SME let Predicates = [HasSVE, HasMatMulInt8] in { defm SMMLA_ZZZ : sve_int_matmul<0b00, "smmla", int_aarch64_sve_smmla>; @@ -3470,11 +3470,11 @@ let Predicates = [HasSVE, HasMatMulInt8] in { defm USMMLA_ZZZ : sve_int_matmul<0b10, "usmmla", int_aarch64_sve_usmmla>; } // End HasSVE, HasMatMulInt8 -let Predicates = [HasSVEorSME, HasMatMulInt8] in { +let Predicates = [HasSVE_or_SME, HasMatMulInt8] in { defm USDOT_ZZZ : sve_int_dot_mixed<"usdot", AArch64usdot>; defm USDOT_ZZZI : sve_int_dot_mixed_indexed<0, "usdot", int_aarch64_sve_usdot_lane>; defm SUDOT_ZZZI : sve_int_dot_mixed_indexed<1, "sudot", int_aarch64_sve_sudot_lane>; -} // End HasSVEorSME, HasMatMulInt8 +} // End HasSVE_or_SME, HasMatMulInt8 let Predicates = [HasSVE, HasMatMulFP32] in { defm FMMLA_ZZZ_S : sve_fp_matrix_mla<0b10, "fmmla", ZPR32, ZPR32, int_aarch64_sve_fmmla, nxv4f32, nxv4f32>; @@ -3496,16 +3496,16 @@ let Predicates = [HasSVE, HasMatMulFP64] in { defm LD1RO_D : sve_mem_ldor_ss<0b11, "ld1rod", Z_d, ZPR64, GPR64NoXZRshifted64, nxv2i64, nxv2i1, AArch64ld1ro_z, am_sve_regreg_lsl3>; } // End HasSVE, HasMatMulFP64 -let Predicates = [HasSVEorSME, HasMatMulFP64] in { +let Predicates = [HasSVE_or_SME, HasMatMulFP64] in { defm ZIP1_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b00, 0, "zip1", int_aarch64_sve_zip1q>; defm ZIP2_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b00, 1, "zip2", int_aarch64_sve_zip2q>; defm UZP1_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b01, 0, "uzp1", int_aarch64_sve_uzp1q>; defm UZP2_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b01, 1, "uzp2", int_aarch64_sve_uzp2q>; defm TRN1_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b11, 0, "trn1", int_aarch64_sve_trn1q>; defm TRN2_ZZZ_Q : sve_int_perm_bin_perm_128_zz<0b11, 1, "trn2", int_aarch64_sve_trn2q>; -} // End HasSVEorSME, HasMatMulFP64 +} // End HasSVE_or_SME, HasMatMulFP64 -let Predicates = [HasSVE2orSME] in { +let Predicates = [HasSVE2_or_SME] in { // SVE2 integer multiply-add (indexed) defm MLA_ZZZI : sve2_int_mla_by_indexed_elem<0b01, 0b0, "mla", int_aarch64_sve_mla_lane>; defm MLS_ZZZI : sve2_int_mla_by_indexed_elem<0b01, 0b1, "mls", int_aarch64_sve_mls_lane>; @@ -3653,17 +3653,17 @@ let Predicates = [HasSVE2orSME] in { defm UQSHL_ZPZZ : sve_int_bin_pred_all_active_bhsd; defm SQRSHL_ZPZZ : sve_int_bin_pred_all_active_bhsd; defm UQRSHL_ZPZZ : sve_int_bin_pred_all_active_bhsd; -} // End HasSVE2orSME +} // End HasSVE2_or_SME -let Predicates = [HasSVE2orSME, UseExperimentalZeroingPseudos] in { +let Predicates = [HasSVE2_or_SME, UseExperimentalZeroingPseudos] in { defm SQSHL_ZPZI : sve_int_bin_pred_shift_imm_left_zeroing_bhsd; defm UQSHL_ZPZI : sve_int_bin_pred_shift_imm_left_zeroing_bhsd; defm SRSHR_ZPZI : sve_int_bin_pred_shift_imm_right_zeroing_bhsd; defm URSHR_ZPZI : sve_int_bin_pred_shift_imm_right_zeroing_bhsd; defm SQSHLU_ZPZI : sve_int_bin_pred_shift_imm_left_zeroing_bhsd; -} // End HasSVE2orSME, UseExperimentalZeroingPseudos +} // End HasSVE2_or_SME, UseExperimentalZeroingPseudos -let Predicates = [HasSVE2orSME] in { +let Predicates = [HasSVE2_or_SME] in { // SVE2 predicated shifts defm SQSHL_ZPmI : sve_int_bin_pred_shift_imm_left_dup<0b0110, "sqshl", "SQSHL_ZPZI", int_aarch64_sve_sqshl>; defm UQSHL_ZPmI : sve_int_bin_pred_shift_imm_left_dup<0b0111, "uqshl", "UQSHL_ZPZI", int_aarch64_sve_uqshl>; @@ -3776,7 +3776,7 @@ let Predicates = [HasSVE2orSME] in { defm SQXTNT_ZZ : sve2_int_sat_extract_narrow_top<0b00, "sqxtnt", int_aarch64_sve_sqxtnt>; defm UQXTNT_ZZ : sve2_int_sat_extract_narrow_top<0b01, "uqxtnt", int_aarch64_sve_uqxtnt>; defm SQXTUNT_ZZ : sve2_int_sat_extract_narrow_top<0b10, "sqxtunt", int_aarch64_sve_sqxtunt>; -} // End HasSVE2orSME +} // End HasSVE2_or_SME let Predicates = [HasSVE2] in { // SVE2 character match @@ -3784,7 +3784,7 @@ let Predicates = [HasSVE2] in { defm NMATCH_PPzZZ : sve2_char_match<0b1, "nmatch", int_aarch64_sve_nmatch>; } // End HasSVE2 -let Predicates = [HasSVE2orSME] in { +let Predicates = [HasSVE2_or_SME] in { // SVE2 bitwise exclusive-or interleaved defm EORBT_ZZZ : sve2_bitwise_xor_interleaved<0b0, "eorbt", int_aarch64_sve_eorbt>; defm EORTB_ZZZ : sve2_bitwise_xor_interleaved<0b1, "eortb", int_aarch64_sve_eortb>; @@ -3799,7 +3799,7 @@ let Predicates = [HasSVE2orSME] in { defm SADDLBT_ZZZ : sve2_misc_int_addsub_long_interleaved<0b00, "saddlbt", int_aarch64_sve_saddlbt>; defm SSUBLBT_ZZZ : sve2_misc_int_addsub_long_interleaved<0b10, "ssublbt", int_aarch64_sve_ssublbt>; defm SSUBLTB_ZZZ : sve2_misc_int_addsub_long_interleaved<0b11, "ssubltb", int_aarch64_sve_ssubltb>; -} // End HasSVE2orSME +} // End HasSVE2_or_SME let Predicates = [HasSVE2] in { // SVE2 histogram generation (segment) @@ -3809,16 +3809,16 @@ let Predicates = [HasSVE2] in { defm HISTCNT_ZPzZZ : sve2_hist_gen_vector<"histcnt", int_aarch64_sve_histcnt>; } // End HasSVE2 -let Predicates = [HasSVE2orSME] in { +let Predicates = [HasSVE2_or_SME] in { // SVE2 floating-point base 2 logarithm as integer defm FLOGB_ZPmZ : sve2_fp_flogb<"flogb", "FLOGB_ZPZZ", int_aarch64_sve_flogb>; } -let Predicates = [HasSVE2orSME, UseExperimentalZeroingPseudos] in { +let Predicates = [HasSVE2_or_SME, UseExperimentalZeroingPseudos] in { defm FLOGB_ZPZZ : sve2_fp_un_pred_zeroing_hsd; -} // End HasSVE2orSME, UseExperimentalZeroingPseudos +} // End HasSVE2_or_SME, UseExperimentalZeroingPseudos -let Predicates = [HasSVE2orSME] in { +let Predicates = [HasSVE2_or_SME] in { // SVE2 floating-point convert precision defm FCVTXNT_ZPmZ : sve2_fp_convert_down_odd_rounding_top<"fcvtxnt", "int_aarch64_sve_fcvtxnt">; defm FCVTX_ZPmZ : sve2_fp_convert_down_odd_rounding<"fcvtx", "int_aarch64_sve_fcvtx", AArch64fcvtx_mt>; @@ -3861,7 +3861,7 @@ let Predicates = [HasSVE2orSME] in { def : Pat<(nxv16i8 (AArch64ext nxv16i8:$zn1, nxv16i8:$zn2, (i32 imm0_255:$imm))), (EXT_ZZI_B (REG_SEQUENCE ZPR2, $zn1, zsub0, $zn2, zsub1), imm0_255:$imm)>; } -} // End HasSVE2orSME +} // End HasSVE2_or_SME let Predicates = [HasSVE2] in { // SVE2 non-temporal gather loads @@ -3880,10 +3880,10 @@ let Predicates = [HasSVE2] in { defm LDNT1D_ZZR_D : sve2_mem_gldnt_vs_64_ptrs<0b11110, "ldnt1d", AArch64ldnt1_gather_z, nxv2i64>; } // End HasSVE2 -let Predicates = [HasSVE2orSME] in { +let Predicates = [HasSVE2_or_SME] in { // SVE2 vector splice (constructive) defm SPLICE_ZPZZ : sve2_int_perm_splice_cons<"splice", AArch64splice>; -} // End HasSVE2orSME +} // End HasSVE2_or_SME let Predicates = [HasSVE2] in { // SVE2 non-temporal scatter stores @@ -3897,7 +3897,7 @@ let Predicates = [HasSVE2] in { defm STNT1D_ZZR_D : sve2_mem_sstnt_vs_64_ptrs<0b110, "stnt1d", AArch64stnt1_scatter, nxv2i64>; } // End HasSVE2 -let Predicates = [HasSVE2orSME] in { +let Predicates = [HasSVE2_or_SME] in { // SVE2 table lookup (three sources) defm TBL_ZZZZ : sve2_int_perm_tbl<"tbl", int_aarch64_sve_tbl2>; defm TBX_ZZZ : sve2_int_perm_tbx<"tbx", 0b01, int_aarch64_sve_tbx>; @@ -3916,9 +3916,9 @@ let Predicates = [HasSVE2orSME] in { // SVE2 pointer conflict compare defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", "int_aarch64_sve_whilewr">; defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", "int_aarch64_sve_whilerw">; -} // End HasSVE2orSME +} // End HasSVE2_or_SME -let Predicates = [HasSVEAES, HasNonStreamingSVE2orSSVE_AES] in { +let Predicates = [HasSVEAES, HasNonStreamingSVE2_or_SSVE_AES] in { // SVE2 crypto destructive binary operations defm AESE_ZZZ_B : sve2_crypto_des_bin_op<0b00, "aese", ZPR8, int_aarch64_sve_aese, nxv16i8>; defm AESD_ZZZ_B : sve2_crypto_des_bin_op<0b01, "aesd", ZPR8, int_aarch64_sve_aesd, nxv16i8>; @@ -3946,14 +3946,14 @@ let Predicates = [HasSVE2SHA3] in { defm RAX1_ZZZ_D : sve2_crypto_cons_bin_op<0b1, "rax1", ZPR64, int_aarch64_sve_rax1, nxv2i64>; } // End HasSVE2SHA3 -let Predicates = [HasSVEBitPerm, HasNonStreamingSVE2orSSVE_BitPerm] in { +let Predicates = [HasSVEBitPerm, HasNonStreamingSVE2_or_SSVE_BitPerm] in { // SVE2 bitwise permute defm BEXT_ZZZ : sve2_misc_bitwise<0b1100, "bext", int_aarch64_sve_bext_x>; defm BDEP_ZZZ : sve2_misc_bitwise<0b1101, "bdep", int_aarch64_sve_bdep_x>; defm BGRP_ZZZ : sve2_misc_bitwise<0b1110, "bgrp", int_aarch64_sve_bgrp_x>; } -let Predicates = [HasSVEAES2, HasNonStreamingSVE2p1orSSVE_AES] in { +let Predicates = [HasSVEAES2, HasNonStreamingSVE2p1_or_SSVE_AES] in { // SVE_AES2 multi-vector instructions (x2) def AESE_2ZZI_B : sve_crypto_binary_multi2<0b000, "aese">; def AESD_2ZZI_B : sve_crypto_binary_multi2<0b010, "aesd">; @@ -3974,20 +3974,20 @@ let Predicates = [HasSVEAES2, HasNonStreamingSVE2p1orSSVE_AES] in { // SME or SVE2.1 instructions //===----------------------------------------------------------------------===// -let Predicates = [HasSVE2p1_or_HasSME] in { +let Predicates = [HasSVE2p1_or_SME] in { defm REVD_ZPmZ : sve2_int_perm_revd<"revd", AArch64revd_mt>; defm SCLAMP_ZZZ : sve2_clamp<"sclamp", 0b0, AArch64sclamp>; defm UCLAMP_ZZZ : sve2_clamp<"uclamp", 0b1, AArch64uclamp>; defm PSEL_PPPRI : sve2_int_perm_sel_p<"psel", int_aarch64_sve_psel>; -} // End HasSVE2p1_or_HasSME +} // End HasSVE2p1_or_SME //===----------------------------------------------------------------------===// // SME2 or SVE2.1 instructions //===----------------------------------------------------------------------===// -let Predicates = [HasSVE2p1_or_HasSME2] in { +let Predicates = [HasSVE2p1_or_SME2] in { defm FCLAMP_ZZZ : sve_fp_clamp<"fclamp", AArch64fclamp>; defm FDOT_ZZZ_S : sve_float_dot<0b0, 0b0, ZPR32, ZPR16, "fdot", nxv8f16, int_aarch64_sve_fdot_x2>; @@ -4154,9 +4154,9 @@ defm WHILEHS_CXX : sve2p1_int_while_rr_pn<"whilehs", 0b100>; defm WHILEHI_CXX : sve2p1_int_while_rr_pn<"whilehi", 0b101>; defm WHILELO_CXX : sve2p1_int_while_rr_pn<"whilelo", 0b110>; defm WHILELS_CXX : sve2p1_int_while_rr_pn<"whilels", 0b111>; -} // End HasSVE2p1_or_HasSME2 +} // End HasSVE2p1_or_SME2 -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { // Aliases for existing SVE instructions for which predicate-as-counter are // accepted as an operand to the instruction @@ -4222,7 +4222,7 @@ let Predicates = [HasSVEBFSCALE] in { //===----------------------------------------------------------------------===// // SME2.1 or SVE2.1 instructions //===----------------------------------------------------------------------===// -let Predicates = [HasSVE2p1_or_HasSME2p1] in { +let Predicates = [HasSVE2p1_or_SME2p1] in { defm FADDQV : sve2p1_fp_reduction_q<0b000, "faddqv", int_aarch64_sve_faddqv>; defm FMAXNMQV : sve2p1_fp_reduction_q<0b100, "fmaxnmqv", int_aarch64_sve_fmaxnmqv>; defm FMINNMQV : sve2p1_fp_reduction_q<0b101, "fminnmqv", int_aarch64_sve_fminnmqv>; @@ -4250,13 +4250,13 @@ defm UZPQ1_ZZZ : sve2p1_permute_vec_elems_q<0b010, "uzpq1", int_aarch64_sve_uzpq defm UZPQ2_ZZZ : sve2p1_permute_vec_elems_q<0b011, "uzpq2", int_aarch64_sve_uzpq2>; defm TBXQ_ZZZ : sve2_int_perm_tbx<"tbxq", 0b10, int_aarch64_sve_tbxq>; defm TBLQ_ZZZ : sve2p1_tblq<"tblq", int_aarch64_sve_tblq>; -} // End HasSVE2p1_or_HasSME2p1 +} // End HasSVE2p1_or_SME2p1 //===----------------------------------------------------------------------===// // SME2.2 or SVE2.2 instructions //===----------------------------------------------------------------------===// -let Predicates = [HasSVE2p2orSME2p2] in { +let Predicates = [HasSVE2p2_or_SME2p2] in { // SVE Floating-point convert precision, zeroing predicate defm FCVT_ZPzZ : sve_fp_z2op_p_zd_b_0<"fcvt", "int_aarch64_sve_fcvt">; @@ -4349,7 +4349,7 @@ let Predicates = [HasSVE2p2orSME2p2] in { //===----------------------------------------------------------------------===// // SME2.2 or SVE2.2 instructions - Legal in streaming mode iff target has SME2p2 //===----------------------------------------------------------------------===// -let Predicates = [HasNonStreamingSVE2p2orSME2p2] in { +let Predicates = [HasNonStreamingSVE2p2_or_SME2p2] in { // SVE2 EXPAND defm EXPAND_ZPZ : sve2_int_perm_expand<"expand">; // SVE COMPACT - byte and halfword @@ -4359,7 +4359,7 @@ let Predicates = [HasNonStreamingSVE2p2orSME2p2] in { //===----------------------------------------------------------------------===// // SVE2 FP8 instructions //===----------------------------------------------------------------------===// -let Predicates = [HasSVE2orSME2, HasFP8] in { +let Predicates = [HasSVE2_or_SME2, HasFP8] in { // FP8 upconvert defm F1CVT_ZZ : sve2_fp8_cvt_single<0b0, 0b00, "f1cvt", nxv8f16, int_aarch64_sve_fp8_cvt1>; defm F2CVT_ZZ : sve2_fp8_cvt_single<0b0, 0b01, "f2cvt", nxv8f16, int_aarch64_sve_fp8_cvt2>; @@ -4376,15 +4376,15 @@ defm FCVTNB_Z2Z_StoB : sve2_fp8_down_cvt_single<0b01, "fcvtnb", ZZ_s_mul_r, nxv4 defm BFCVTN_Z2Z_HtoB : sve2_fp8_down_cvt_single<0b10, "bfcvtn", ZZ_h_mul_r, nxv8bf16, int_aarch64_sve_fp8_cvtn>; defm FCVTNT_Z2Z_StoB : sve2_fp8_down_cvt_single_top<0b11, "fcvtnt", ZZ_s_mul_r, nxv4f32, int_aarch64_sve_fp8_cvtnt>; -} // End HasSVE2orSME2, HasFP8 +} // End HasSVE2_or_SME2, HasFP8 -let Predicates = [HasSVE2orSME2, HasFAMINMAX] in { +let Predicates = [HasSVE2_or_SME2, HasFAMINMAX] in { defm FAMIN_ZPmZ : sve_fp_2op_p_zds<0b1111, "famin", "FAMIN_ZPZZ", int_aarch64_sve_famin, DestructiveBinaryComm>; defm FAMAX_ZPmZ : sve_fp_2op_p_zds<0b1110, "famax", "FAMAX_ZPZZ", int_aarch64_sve_famax, DestructiveBinaryComm>; defm FAMAX_ZPZZ : sve_fp_bin_pred_hfd; defm FAMIN_ZPZZ : sve_fp_bin_pred_hfd; -} // End HasSVE2orSME2, HasFAMINMAX +} // End HasSVE2_or_SME2, HasFAMINMAX let Predicates = [HasSSVE_FP8FMA] in { // FP8 Widening Multiply-Add Long - Indexed Group @@ -4428,14 +4428,14 @@ defm FDOT_ZZZI_BtoS : sve2_fp8_dot_indexed_s<"fdot", int_aarch64_sve_fp8_fdot_la defm FDOT_ZZZ_BtoS : sve_fp8_dot<0b1, ZPR32, "fdot", nxv4f32, int_aarch64_sve_fp8_fdot>; } -let Predicates = [HasSVE2orSME2, HasLUT] in { +let Predicates = [HasSVE2_or_SME2, HasLUT] in { // LUTI2 defm LUTI2_ZZZI : sve2_luti2_vector_index<"luti2">; // LUTI4 defm LUTI4_ZZZI : sve2_luti4_vector_index<"luti4">; // LUTI4 (two contiguous registers) defm LUTI4_Z2ZZI : sve2_luti4_vector_vg2_index<"luti4">; -} // End HasSVE2orSME2, HasLUT +} // End HasSVE2_or_SME2, HasLUT //===----------------------------------------------------------------------===// // Checked Pointer Arithmetic (FEAT_CPA) diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp index 15f1c99e87246..e4719b26cab52 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -165,7 +165,7 @@ struct IncomingArgHandler : public CallLowering::IncomingValueHandler { void assignValueToReg(Register ValVReg, Register PhysReg, const CCValAssign &VA) override { - markPhysRegUsed(PhysReg); + markRegUsed(PhysReg); IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); } @@ -207,16 +207,16 @@ struct IncomingArgHandler : public CallLowering::IncomingValueHandler { /// How the physical register gets marked varies between formal /// parameters (it's a basic-block live-in), and a call instruction /// (it's an implicit-def of the BL). - virtual void markPhysRegUsed(MCRegister PhysReg) = 0; + virtual void markRegUsed(Register Reg) = 0; }; struct FormalArgHandler : public IncomingArgHandler { FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) : IncomingArgHandler(MIRBuilder, MRI) {} - void markPhysRegUsed(MCRegister PhysReg) override { - MIRBuilder.getMRI()->addLiveIn(PhysReg); - MIRBuilder.getMBB().addLiveIn(PhysReg); + void markRegUsed(Register Reg) override { + MIRBuilder.getMRI()->addLiveIn(Reg.asMCReg()); + MIRBuilder.getMBB().addLiveIn(Reg.asMCReg()); } }; @@ -225,8 +225,8 @@ struct CallReturnHandler : public IncomingArgHandler { MachineInstrBuilder MIB) : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} - void markPhysRegUsed(MCRegister PhysReg) override { - MIB.addDef(PhysReg, RegState::Implicit); + void markRegUsed(Register Reg) override { + MIB.addDef(Reg, RegState::Implicit); } MachineInstrBuilder MIB; @@ -239,7 +239,7 @@ struct ReturnedArgCallReturnHandler : public CallReturnHandler { MachineInstrBuilder MIB) : CallReturnHandler(MIRBuilder, MRI, MIB) {} - void markPhysRegUsed(MCRegister PhysReg) override {} + void markRegUsed(Register Reg) override {} }; struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler { diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td index 0ef862fc1a27c..873fbf7dd346b 100644 --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -469,7 +469,7 @@ multiclass sve_int_ptrue opc, string asm, SDPatternOperator op> { def SDT_AArch64PTrue : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>; def AArch64ptrue : SDNode<"AArch64ISD::PTRUE", SDT_AArch64PTrue>; -let Predicates = [HasSVEorSME] in { +let Predicates = [HasSVE_or_SME] in { defm PTRUE : sve_int_ptrue<0b000, "ptrue", AArch64ptrue>; defm PTRUES : sve_int_ptrue<0b001, "ptrues", null_frag>; @@ -1263,7 +1263,7 @@ class sve_int_pred_pattern_a opc, string asm> multiclass sve_int_pred_pattern_a opc, string asm, SDPatternOperator op, SDPatternOperator opcnt> { - let Predicates = [HasSVEorSME] in { + let Predicates = [HasSVE_or_SME] in { def NAME : sve_int_pred_pattern_a; def : InstAlias opc, string asm, (!cast(NAME) GPR64:$Rdn, 0b11111, 1), 2>; } - let Predicates = [HasSVEorSME, UseScalarIncVL] in { + let Predicates = [HasSVE_or_SME, UseScalarIncVL] in { def : Pat<(i64 (op GPR64:$Rdn, (opcnt sve_pred_enum:$pattern))), (!cast(NAME) GPR64:$Rdn, sve_pred_enum:$pattern, 1)>; diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index 5207201e14c09..6baef137df5e1 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -3007,8 +3007,8 @@ bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) { switch (I.getOpcode()) { case AMDGPU::V_ADDC_U32_e32: case AMDGPU::V_ADDC_U32_dpp: - case AMDGPU::V_CNDMASK_B16_e32: - case AMDGPU::V_CNDMASK_B16_dpp: + case AMDGPU::V_CNDMASK_B16_fake16_e32: + case AMDGPU::V_CNDMASK_B16_fake16_dpp: case AMDGPU::V_CNDMASK_B32_e32: case AMDGPU::V_CNDMASK_B32_dpp: case AMDGPU::V_DIV_FMAS_F32_e64: @@ -3023,8 +3023,8 @@ bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) { HazardReg == AMDGPU::VCC_HI; case AMDGPU::V_ADDC_U32_e64: case AMDGPU::V_ADDC_U32_e64_dpp: - case AMDGPU::V_CNDMASK_B16_e64: - case AMDGPU::V_CNDMASK_B16_e64_dpp: + case AMDGPU::V_CNDMASK_B16_fake16_e64: + case AMDGPU::V_CNDMASK_B16_fake16_e64_dpp: case AMDGPU::V_CNDMASK_B32_e64: case AMDGPU::V_CNDMASK_B32_e64_dpp: case AMDGPU::V_SUBB_U32_e64: diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 21f1f20e5e69a..e068b5f0b8769 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1952,6 +1952,13 @@ bool SITargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, return Index == 0; } +bool SITargetLowering::isExtractVecEltCheap(EVT VT, unsigned Index) const { + // TODO: This should be more aggressive, particular for 16-bit element + // vectors. However there are some mixed improvements and regressions. + EVT EltTy = VT.getVectorElementType(); + return EltTy.getSizeInBits() % 32 == 0; +} + bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { if (Subtarget->has16BitInsts() && VT == MVT::i16) { switch (Op) { @@ -13877,6 +13884,37 @@ static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, EVT VT, return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); } +// Fold +// y = lshr i64 x, 32 +// res = add (mul i64 y, Const), x where "Const" is a 64-bit constant +// with Const.hi == -1 +// To +// res = mad_u64_u32 y.lo ,Const.lo, x.lo +static SDValue tryFoldMADwithSRL(SelectionDAG &DAG, const SDLoc &SL, + SDValue MulLHS, SDValue MulRHS, + SDValue AddRHS) { + if (MulRHS.getOpcode() == ISD::SRL) + std::swap(MulLHS, MulRHS); + + if (MulLHS.getValueType() != MVT::i64 || MulLHS.getOpcode() != ISD::SRL) + return SDValue(); + + ConstantSDNode *ShiftVal = dyn_cast(MulLHS.getOperand(1)); + if (!ShiftVal || ShiftVal->getAsZExtVal() != 32 || + MulLHS.getOperand(0) != AddRHS) + return SDValue(); + + ConstantSDNode *Const = dyn_cast(MulRHS.getNode()); + if (!Const || Hi_32(Const->getZExtValue()) != uint32_t(-1)) + return SDValue(); + + SDValue ConstMul = + DAG.getConstant(Lo_32(Const->getZExtValue()), SL, MVT::i32); + return getMad64_32(DAG, SL, MVT::i64, + DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulLHS), ConstMul, + DAG.getZeroExtendInReg(AddRHS, SL, MVT::i32), false); +} + // Fold (add (mul x, y), z) --> (mad_[iu]64_[iu]32 x, y, z) plus high // multiplies, if any. // @@ -13935,6 +13973,9 @@ SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N, SDValue MulRHS = LHS.getOperand(1); SDValue AddRHS = RHS; + if (SDValue FoldedMAD = tryFoldMADwithSRL(DAG, SL, MulLHS, MulRHS, AddRHS)) + return FoldedMAD; + // Always check whether operands are small unsigned values, since that // knowledge is useful in more cases. Check for small signed values only if // doing so can unlock a shorter code sequence. diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index 5c215f76552d9..bbb96d9115a0a 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -365,6 +365,7 @@ class SITargetLowering final : public AMDGPUTargetLowering { bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override; + bool isExtractVecEltCheap(EVT VT, unsigned Index) const override; bool isTypeDesirableForOp(unsigned Op, EVT VT) const override; diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index cdc1132579d8d..1abbf4c217a69 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -1245,11 +1245,22 @@ class VOPSelectPat : GCNPat < (vt (select i1:$src0, vt:$src1, vt:$src2)), (V_CNDMASK_B32_e64 0, VSrc_b32:$src2, 0, VSrc_b32:$src1, SSrc_i1:$src0) >; +class VOPSelectPat_t16 : GCNPat < + (vt (select i1:$src0, vt:$src1, vt:$src2)), + (V_CNDMASK_B16_t16_e64 0, VSrcT_b16:$src2, 0, VSrcT_b16:$src1, SSrc_i1:$src0) +>; def : VOPSelectModsPat ; def : VOPSelectModsPat ; -def : VOPSelectPat ; -def : VOPSelectPat ; +foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in +let True16Predicate = p in { + def : VOPSelectPat ; + def : VOPSelectPat ; +} // End True16Predicate = p +let True16Predicate = UseRealTrue16Insts in { + def : VOPSelectPat_t16 ; + def : VOPSelectPat_t16 ; +} // End True16Predicate = UseRealTrue16Insts let AddedComplexity = 1 in { def : GCNPat < diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td index 6bbf19179b7f6..900c91731aa1b 100644 --- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td @@ -714,6 +714,26 @@ class VOP2e_SGPR ArgVT> : VOPProfile { def VOP2e_I32_I32_I32_I1 : VOP2e_SGPR<[i32, i32, i32, i1]>; def VOP2e_I16_I16_I16_I1 : VOP2e_SGPR<[i16, i16, i16, i1]>; // V_CNDMASK_B16 is VOP3 only +def VOP2e_I16_I16_I16_I1_true16 : VOP2e_SGPR<[i16, i16, i16, i1]> { + let IsTrue16 = 1; + let IsRealTrue16 = 1; + let HasOpSel = 1; + let DstRC64 = getVALUDstForVT.ret; + let Src0RC64 = getVOP3SrcForVT.ret; + let Src1RC64 = getVOP3SrcForVT.ret; + let Src2RC64 = getVOP3SrcForVT.ret; + let Src0Mod = getSrc0Mod.ret; + let Src1Mod = getSrcMod.ret; + let HasSrc2Mods = 0; + let InsVOP3OpSel = getInsVOP3Base.ret; + let Src0VOP3DPP = VGPRSrc_16; + let Src1VOP3DPP = getVOP3DPPSrcForVT.ret; + let Src0ModVOP3DPP = getSrc0ModVOP3DPP.ret; + let Src1ModVOP3DPP = getSrcModVOP3DPP.ret; +} def VOP2e_I16_I16_I16_I1_fake16 : VOP2e_SGPR<[i16, i16, i16, i1]> { let IsTrue16 = 1; let DstRC64 = getVALUDstForVT.ret; @@ -765,8 +785,10 @@ def VOP_WRITELANE : VOPProfile<[i32, i32, i32, i32]> { // VOP2 Instructions //===----------------------------------------------------------------------===// -let SubtargetPredicate = isGFX11Plus in -defm V_CNDMASK_B16 : VOP2eInst <"v_cndmask_b16", VOP2e_I16_I16_I16_I1_fake16>; +let SubtargetPredicate = isGFX11Plus, True16Predicate = UseRealTrue16Insts in +defm V_CNDMASK_B16_t16 : VOP2eInst <"v_cndmask_b16_t16", VOP2e_I16_I16_I16_I1_true16>; +let SubtargetPredicate = isGFX11Plus, True16Predicate = UseFakeTrue16Insts in +defm V_CNDMASK_B16_fake16 : VOP2eInst <"v_cndmask_b16_fake16", VOP2e_I16_I16_I16_I1_fake16>; defm V_CNDMASK_B32 : VOP2eInst_VOPD <"v_cndmask_b32", VOP2e_I32_I32_I32_I1, 0x9, "v_cndmask_b32">; let SubtargetPredicate = HasMadMacF32Insts, isReMaterializable = 1 in def V_MADMK_F32 : VOP2_Pseudo <"v_madmk_f32", VOP_MADMK_F32, []>; @@ -1846,7 +1868,7 @@ defm V_FMAMK_F16 : VOP2Only_Real_MADK_t16_and_fake16_gfx11_gfx12<0x037 defm V_FMAAK_F16 : VOP2Only_Real_MADK_t16_and_fake16_gfx11_gfx12<0x038, "v_fmaak_f16">; // VOP3 only. -defm V_CNDMASK_B16 : VOP3Only_Realtriple_gfx11_gfx12<0x25d>; +defm V_CNDMASK_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x25d, "v_cndmask_b16">; defm V_LDEXP_F32 : VOP3Only_Realtriple_gfx11_gfx12<0x31c>; defm V_BFM_B32 : VOP3Only_Realtriple_gfx11_gfx12<0x31d>; defm V_BCNT_U32_B32 : VOP3Only_Realtriple_gfx11_gfx12<0x31e>; diff --git a/llvm/lib/Target/BPF/BPFAsmPrinter.cpp b/llvm/lib/Target/BPF/BPFAsmPrinter.cpp index ab03a4e56ea07..b3c27a3d1d6fa 100644 --- a/llvm/lib/Target/BPF/BPFAsmPrinter.cpp +++ b/llvm/lib/Target/BPF/BPFAsmPrinter.cpp @@ -60,7 +60,7 @@ bool BPFAsmPrinter::doInitialization(Module &M) { // Only emit BTF when debuginfo available. if (MAI->doesSupportDebugInformation() && !M.debug_compile_units().empty()) { BTF = new BTFDebug(this); - DebugHandlers.push_back(std::unique_ptr(BTF)); + Handlers.push_back(std::unique_ptr(BTF)); } return false; diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td index 7d1651971f340..4b20a64cb0722 100644 --- a/llvm/lib/Target/DirectX/DXIL.td +++ b/llvm/lib/Target/DirectX/DXIL.td @@ -303,6 +303,14 @@ defvar BarrierMode_GroupMemoryBarrierWithGroupSync = 9; defvar BarrierMode_AllMemoryBarrier = 10; defvar BarrierMode_AllMemoryBarrierWithGroupSync = 11; +defvar WaveOpKind_Sum = 0; +defvar WaveOpKind_Product = 1; +defvar WaveOpKind_Min = 2; +defvar WaveOpKind_Max = 3; + +defvar SignedOpKind_Signed = 0; +defvar SignedOpKind_Unsigned = 1; + // Intrinsic arg selection class IntrinArgSelectType; def IntrinArgSelect_Index : IntrinArgSelectType; @@ -340,7 +348,7 @@ class IntrinArgI32 : IntrinArgSelect; // IntrinSelect, IntrinArgI8<0>, IntrinArgI8<1> ] // >, -// IntrinSelect, IntrinArgI8<0>, IntrinArgI8<0> ] // >, // ] @@ -991,6 +999,24 @@ def WaveActiveAnyTrue : DXILOp<113, waveAnyTrue> { let stages = [Stages]; } +def WaveActiveOp : DXILOp<119, waveActiveOp> { + let Doc = "returns the result of the operation across waves"; + let intrinsics = [ + IntrinSelect< + int_dx_wave_reduce_sum, + [ IntrinArgIndex<0>, IntrinArgI8, IntrinArgI8 ]>, + IntrinSelect< + int_dx_wave_reduce_usum, + [ IntrinArgIndex<0>, IntrinArgI8, IntrinArgI8 ]>, + ]; + + let arguments = [OverloadTy, Int8Ty, Int8Ty]; + let result = OverloadTy; + let overloads = [Overloads]; + let stages = [Stages]; + let attributes = [Attributes]; +} + def WaveIsFirstLane : DXILOp<110, waveIsFirstLane> { let Doc = "returns 1 for the first lane in the wave"; let intrinsics = [ IntrinSelect ]; diff --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp index 60a9d5c257f1c..4e6e01bc5edbc 100644 --- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp +++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp @@ -40,6 +40,8 @@ bool DirectXTTIImpl::isTargetIntrinsicTriviallyScalarizable( switch (ID) { case Intrinsic::dx_frac: case Intrinsic::dx_rsqrt: + case Intrinsic::dx_wave_reduce_sum: + case Intrinsic::dx_wave_reduce_usum: case Intrinsic::dx_wave_readlane: case Intrinsic::dx_asdouble: case Intrinsic::dx_splitdouble: diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index 2e66b67dfdcc7..8f6adf2c22f92 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "NVPTXISelDAGToDAG.h" +#include "NVPTX.h" #include "NVPTXUtilities.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/ISDOpcodes.h" @@ -191,6 +192,12 @@ void NVPTXDAGToDAGISel::Select(SDNode *N) { } break; } + case ISD::FADD: + case ISD::FMUL: + case ISD::FSUB: + if (tryBF16ArithToFMA(N)) + return; + break; default: break; } @@ -2450,6 +2457,62 @@ bool NVPTXDAGToDAGISel::tryBFE(SDNode *N) { return true; } +// Select bf16/bf16v2 FADD, FSUB, FMUL as fma on targets with only fma +bool NVPTXDAGToDAGISel::tryBF16ArithToFMA(SDNode *N) { + EVT VT = SDValue(N, 0).getValueType(); + if (VT.getScalarType() != MVT::bf16) + return false; + + const NVPTXSubtarget *STI = TM.getSubtargetImpl(); + if (STI->hasNativeBF16Support(N->getOpcode())) + return false; + + const bool IsVec = VT.isVector(); + assert(!IsVec || VT.getVectorNumElements() == 2); + SDLoc DL(N); + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SmallVector Operands; + auto GetConstant = [&](float Value) -> SDValue { + // BF16 immediates must be legalized to integer register values + APFloat APF(Value); + bool LosesInfo; + APF.convert(APFloat::BFloat(), APFloat::rmNearestTiesToEven, &LosesInfo); + assert(!LosesInfo); + if (IsVec) { + auto API = APF.bitcastToAPInt(); + API = API.concat(API); + auto Const = CurDAG->getTargetConstant(API, DL, MVT::i32); + return SDValue(CurDAG->getMachineNode(NVPTX::IMOV32ri, DL, VT, Const), 0); + } + auto Const = CurDAG->getTargetConstantFP(APF, DL, VT); + return SDValue(CurDAG->getMachineNode(NVPTX::BFMOV16ri, DL, VT, Const), 0); + }; + + switch (N->getOpcode()) { + case ISD::FADD: + // add(a, b) -> fma(a, 1.0, b) + Operands = {N0, GetConstant(1.0), N1}; + break; + case ISD::FSUB: + // sub(a, b) -> fma(b, -1.0, a) + Operands = {N1, GetConstant(-1.0), N0}; + break; + case ISD::FMUL: + // mul(a, b) -> fma(a, b, -0.0) + // NOTE: The identity is -0, not 0, because -0 + 0 == 0 for floats + Operands = {N0, N1, GetConstant(-0.0)}; + break; + default: + llvm_unreachable("Unexpected opcode"); + }; + + int Opcode = IsVec ? NVPTX::BFMA16x2rrr : NVPTX::BFMA16rrr; + MachineSDNode *FMA = CurDAG->getMachineNode(Opcode, DL, VT, Operands); + ReplaceNode(N, FMA); + return true; +} + static inline bool isAddLike(const SDValue V) { return V.getOpcode() == ISD::ADD || (V->getOpcode() == ISD::OR && V->getFlags().hasDisjoint()); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h index 8cadde8a82264..7661f153238fc 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h @@ -84,6 +84,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel { bool tryFence(SDNode *N); void SelectAddrSpaceCast(SDNode *N); bool tryBFE(SDNode *N); + bool tryBF16ArithToFMA(SDNode *N); bool tryConstantFP(SDNode *N); bool SelectSETP_F16X2(SDNode *N); bool SelectSETP_BF16X2(SDNode *N); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 184f96b872aa6..c40c09c204fd7 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -94,6 +94,13 @@ static cl::opt UsePrecSqrtF32( cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true)); +/// Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it +/// does NOT use lg2.approx for log2, so this is disabled by default. +static cl::opt UseApproxLog2F32( + "nvptx-approx-log2f32", + cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), + cl::init(false)); + static cl::opt ForceMinByValParamAlign( "nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" @@ -529,40 +536,16 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, case ISD::FMINIMUM: IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70; break; + case ISD::FEXP2: + IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70; + break; } setOperationAction(Op, VT, IsOpSupported ? Action : NoF16Action); }; auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action, LegalizeAction NoBF16Action) { - bool IsOpSupported = STI.hasBF16Math(); - switch (Op) { - // Several BF16 instructions are available on sm_90 only. - case ISD::FADD: - case ISD::FMUL: - case ISD::FSUB: - case ISD::SELECT: - case ISD::SELECT_CC: - case ISD::SETCC: - case ISD::FEXP2: - case ISD::FCEIL: - case ISD::FFLOOR: - case ISD::FNEARBYINT: - case ISD::FRINT: - case ISD::FROUNDEVEN: - case ISD::FTRUNC: - IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 78; - break; - // Several BF16 instructions are available on sm_80 only. - case ISD::FMINNUM: - case ISD::FMAXNUM: - case ISD::FMAXNUM_IEEE: - case ISD::FMINNUM_IEEE: - case ISD::FMAXIMUM: - case ISD::FMINIMUM: - IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70; - break; - } + bool IsOpSupported = STI.hasNativeBF16Support(Op); setOperationAction( Op, VT, IsOpSupported ? Action : NoBF16Action); }; @@ -862,6 +845,15 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, AddPromotedToType(Op, MVT::bf16, MVT::f32); } + // On SM80, we select add/mul/sub as fma to avoid promotion to float + for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB}) { + for (const auto &VT : {MVT::bf16, MVT::v2bf16}) { + if (!STI.hasNativeBF16Support(Op) && STI.hasNativeBF16Support(ISD::FMA)) { + setOperationAction(Op, VT, Custom); + } + } + } + // f16/f16x2 neg was introduced in PTX 60, SM_53. const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 && STI.getPTXVersion() >= 60 && @@ -977,7 +969,26 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction(ISD::CopyToReg, MVT::i128, Custom); setOperationAction(ISD::CopyFromReg, MVT::i128, Custom); - // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate. + // FEXP2 support: + // - f32 + // - f16/f16x2 (sm_70+, PTX 7.0+) + // - bf16/bf16x2 (sm_90+, PTX 7.8+) + // When f16/bf16 types aren't supported, they are promoted/expanded to f32. + setOperationAction(ISD::FEXP2, MVT::f32, Legal); + setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote); + setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand); + setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote); + setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand); + + // FLOG2 supports f32 only + // f16/bf16 types aren't supported, but they are promoted/expanded to f32. + if (UseApproxLog2F32) { + setOperationAction(ISD::FLOG2, MVT::f32, Legal); + setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32); + setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32); + setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16}, Expand); + } + // No FPOW or FREM in PTX. // Now deduce the information based on the above mentioned @@ -2498,6 +2509,27 @@ SDValue NVPTXTargetLowering::LowerFROUND64(SDValue Op, return DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA); } +static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + EVT NVT = MVT::f32; + if (VT.isVector()) { + NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount()); + } + SDLoc DL(N); + SDValue Tmp0 = DAG.getFPExtendOrRound(N->getOperand(0), DL, NVT); + SDValue Tmp1 = DAG.getFPExtendOrRound(N->getOperand(1), DL, NVT); + SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags()); + return DAG.getFPExtendOrRound(Res, DL, VT); +} + +SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op, + SelectionDAG &DAG) const { + if (useF32FTZ(DAG.getMachineFunction())) { + return PromoteBinOpToF32(Op.getNode(), DAG); + } + return Op; +} + SDValue NVPTXTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78); @@ -2689,6 +2721,12 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { return LowerSTACKSAVE(Op, DAG); case ISD::CopyToReg: return LowerCopyToReg_128(Op, DAG); + case ISD::FADD: + case ISD::FSUB: + case ISD::FMUL: + // Used only for bf16 on SM80, where we select fma for non-ftz operation + return PromoteBinOpIfF32FTZ(Op, DAG); + default: llvm_unreachable("Custom lowering not defined for operation"); } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h index 51265ed2179d8..5adf69d621552 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h @@ -278,6 +278,8 @@ class NVPTXTargetLowering : public TargetLowering { SDValue LowerFROUND32(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const; + SDValue PromoteBinOpIfF32FTZ(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index 4cf36c8b5b633..a076fde8ee767 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -569,6 +569,18 @@ multiclass F2_Support_Half { } +// Variant where only .ftz.bf16 is supported. +multiclass F2_Support_Half_BF { + def bf16_ftz : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a), + OpcStr # ".ftz.bf16 \t$dst, $a;", + [(set bf16:$dst, (OpNode bf16:$a))]>, + Requires<[hasSM<90>, hasPTX<78>]>; + def bf16x2_ftz: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a), + OpcStr # ".ftz.bf16x2 \t$dst, $a;", + [(set v2bf16:$dst, (OpNode v2bf16:$a))]>, + Requires<[hasSM<90>, hasPTX<78>]>; +} + //===----------------------------------------------------------------------===// // NVPTX Instructions. //===----------------------------------------------------------------------===// @@ -1183,6 +1195,8 @@ defm FNEG_H: F2_Support_Half<"neg", fneg>; defm FSQRT : F2<"sqrt.rn", fsqrt>; +defm FEXP2_H: F2_Support_Half_BF<"ex2.approx", fexp2>; + // // F16 NEG // diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index 2d6ee2e28b4df..48d75728aef8e 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -1304,11 +1304,21 @@ def INT_NVVM_EX2_APPROX_F : F_MATH_1<"ex2.approx.f32 \t$dst, $src0;", Float32Regs, Float32Regs, int_nvvm_ex2_approx_f>; def INT_NVVM_EX2_APPROX_D : F_MATH_1<"ex2.approx.f64 \t$dst, $src0;", Float64Regs, Float64Regs, int_nvvm_ex2_approx_d>; + def INT_NVVM_EX2_APPROX_F16 : F_MATH_1<"ex2.approx.f16 \t$dst, $src0;", Int16Regs, Int16Regs, int_nvvm_ex2_approx_f16, [hasPTX<70>, hasSM<75>]>; def INT_NVVM_EX2_APPROX_F16X2 : F_MATH_1<"ex2.approx.f16x2 \t$dst, $src0;", Int32Regs, Int32Regs, int_nvvm_ex2_approx_f16x2, [hasPTX<70>, hasSM<75>]>; +def : Pat<(fexp2 f32:$a), + (INT_NVVM_EX2_APPROX_FTZ_F $a)>, Requires<[doF32FTZ]>; +def : Pat<(fexp2 f32:$a), + (INT_NVVM_EX2_APPROX_F $a)>, Requires<[doNoF32FTZ]>; +def : Pat<(fexp2 f16:$a), + (INT_NVVM_EX2_APPROX_F16 $a)>, Requires<[useFP16Math]>; +def : Pat<(fexp2 v2f16:$a), + (INT_NVVM_EX2_APPROX_F16X2 $a)>, Requires<[useFP16Math]>; + def INT_NVVM_LG2_APPROX_FTZ_F : F_MATH_1<"lg2.approx.ftz.f32 \t$dst, $src0;", Float32Regs, Float32Regs, int_nvvm_lg2_approx_ftz_f>; def INT_NVVM_LG2_APPROX_F : F_MATH_1<"lg2.approx.f32 \t$dst, $src0;", @@ -1316,6 +1326,11 @@ def INT_NVVM_LG2_APPROX_F : F_MATH_1<"lg2.approx.f32 \t$dst, $src0;", def INT_NVVM_LG2_APPROX_D : F_MATH_1<"lg2.approx.f64 \t$dst, $src0;", Float64Regs, Float64Regs, int_nvvm_lg2_approx_d>; +def : Pat<(flog2 f32:$a), (INT_NVVM_LG2_APPROX_FTZ_F $a)>, + Requires<[doF32FTZ]>; +def : Pat<(flog2 f32:$a), (INT_NVVM_LG2_APPROX_F $a)>, + Requires<[doNoF32FTZ]>; + // // Sin Cos // diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp b/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp index 74ce6a9fc4ac0..e5d680c19d921 100644 --- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp @@ -70,6 +70,38 @@ bool NVPTXSubtarget::allowFP16Math() const { return hasFP16Math() && NoF16Math == false; } +bool NVPTXSubtarget::hasNativeBF16Support(int Opcode) const { + if (!hasBF16Math()) + return false; + + switch (Opcode) { + // Several BF16 instructions are available on sm_90 only. + case ISD::FADD: + case ISD::FMUL: + case ISD::FSUB: + case ISD::SELECT: + case ISD::SELECT_CC: + case ISD::SETCC: + case ISD::FEXP2: + case ISD::FCEIL: + case ISD::FFLOOR: + case ISD::FNEARBYINT: + case ISD::FRINT: + case ISD::FROUNDEVEN: + case ISD::FTRUNC: + return getSmVersion() >= 90 && getPTXVersion() >= 78; + // Several BF16 instructions are available on sm_80 only. + case ISD::FMINNUM: + case ISD::FMAXNUM: + case ISD::FMAXNUM_IEEE: + case ISD::FMINNUM_IEEE: + case ISD::FMAXIMUM: + case ISD::FMINIMUM: + return getSmVersion() >= 80 && getPTXVersion() >= 70; + } + return true; +} + void NVPTXSubtarget::failIfClustersUnsupported( std::string const &FailureMessage) const { if (hasClusters()) diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h index bbc1cca7c12d8..3b5c28e357e0c 100644 --- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h +++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h @@ -118,6 +118,8 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo { } bool hasTargetName() const { return !TargetName.empty(); } + bool hasNativeBF16Support(int Opcode) const; + // Get maximum value of required alignments among the supported data types. // From the PTX ISA doc, section 8.2.3: // The memory consistency model relates operations executed on memory diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index 911cea27a48ac..333c8060f37f4 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -502,7 +502,7 @@ getPushOrLibCallsSavedInfo(const MachineFunction &MF, void RISCVFrameLowering::allocateAndProbeStackForRVV( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Amount, - MachineInstr::MIFlag Flag, bool EmitCFI) const { + MachineInstr::MIFlag Flag, bool EmitCFI, bool DynAllocation) const { assert(Amount != 0 && "Did not need to adjust stack pointer for RVV."); // Emit a variable-length allocation probing loop. @@ -545,6 +545,15 @@ void RISCVFrameLowering::allocateAndProbeStackForRVV( .addReg(SPReg) .addReg(TargetReg) .setMIFlag(Flag); + + // If we have a dynamic allocation later we need to probe any residuals. + if (DynAllocation) { + BuildMI(MBB, MBBI, DL, TII->get(STI.is64Bit() ? RISCV::SD : RISCV::SW)) + .addReg(RISCV::X0) + .addReg(SPReg) + .addImm(0) + .setMIFlags(MachineInstr::FrameSetup); + } } static void appendScalableVectorExpression(const TargetRegisterInfo &TRI, @@ -634,11 +643,12 @@ void RISCVFrameLowering::allocateStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineFunction &MF, uint64_t Offset, uint64_t RealStackSize, bool EmitCFI, - bool NeedProbe, - uint64_t ProbeSize) const { + bool NeedProbe, uint64_t ProbeSize, + bool DynAllocation) const { DebugLoc DL; const RISCVRegisterInfo *RI = STI.getRegisterInfo(); const RISCVInstrInfo *TII = STI.getInstrInfo(); + bool IsRV64 = STI.is64Bit(); // Simply allocate the stack if it's not big enough to require a probe. if (!NeedProbe || Offset <= ProbeSize) { @@ -654,13 +664,21 @@ void RISCVFrameLowering::allocateStack(MachineBasicBlock &MBB, .setMIFlag(MachineInstr::FrameSetup); } + if (NeedProbe && DynAllocation) { + // s[d|w] zero, 0(sp) + BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW)) + .addReg(RISCV::X0) + .addReg(SPReg) + .addImm(0) + .setMIFlags(MachineInstr::FrameSetup); + } + return; } // Unroll the probe loop depending on the number of iterations. if (Offset < ProbeSize * 5) { uint64_t CurrentOffset = 0; - bool IsRV64 = STI.is64Bit(); while (CurrentOffset + ProbeSize <= Offset) { RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(-ProbeSize), MachineInstr::FrameSetup, @@ -696,6 +714,15 @@ void RISCVFrameLowering::allocateStack(MachineBasicBlock &MBB, .addCFIIndex(CFIIndex) .setMIFlag(MachineInstr::FrameSetup); } + + if (DynAllocation) { + // s[d|w] zero, 0(sp) + BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW)) + .addReg(RISCV::X0) + .addReg(SPReg) + .addImm(0) + .setMIFlags(MachineInstr::FrameSetup); + } } return; @@ -736,9 +763,18 @@ void RISCVFrameLowering::allocateStack(MachineBasicBlock &MBB, .setMIFlags(MachineInstr::FrameSetup); } - if (Residual) + if (Residual) { RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(-Residual), MachineInstr::FrameSetup, getStackAlign()); + if (DynAllocation) { + // s[d|w] zero, 0(sp) + BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW)) + .addReg(RISCV::X0) + .addReg(SPReg) + .addImm(0) + .setMIFlags(MachineInstr::FrameSetup); + } + } if (EmitCFI) { // Emit ".cfi_def_cfa_offset Offset" @@ -869,9 +905,11 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF, const RISCVTargetLowering *TLI = Subtarget.getTargetLowering(); bool NeedProbe = TLI->hasInlineStackProbe(MF); uint64_t ProbeSize = TLI->getStackProbeSize(MF, getStackAlign()); + bool DynAllocation = + MF.getInfo()->hasDynamicAllocation(); if (StackSize != 0) allocateStack(MBB, MBBI, MF, StackSize, RealStackSize, /*EmitCFI=*/true, - NeedProbe, ProbeSize); + NeedProbe, ProbeSize, DynAllocation); // The frame pointer is callee-saved, and code has been generated for us to // save it to the stack. We need to skip over the storing of callee-saved @@ -914,13 +952,14 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF, allocateStack(MBB, MBBI, MF, SecondSPAdjustAmount, getStackSizeWithRVVPadding(MF), !hasFP(MF), NeedProbe, - ProbeSize); + ProbeSize, DynAllocation); } if (RVVStackSize) { if (NeedProbe) { allocateAndProbeStackForRVV(MF, MBB, MBBI, DL, RVVStackSize, - MachineInstr::FrameSetup, !hasFP(MF)); + MachineInstr::FrameSetup, !hasFP(MF), + DynAllocation); } else { // We must keep the stack pointer aligned through any intermediate // updates. @@ -2148,6 +2187,7 @@ static void emitStackProbeInline(MachineFunction &MF, MachineBasicBlock &MBB, } ExitMBB->splice(ExitMBB->end(), &MBB, std::next(MBBI), MBB.end()); + ExitMBB->transferSuccessorsAndUpdatePHIs(&MBB); LoopTestMBB->addSuccessor(ExitMBB); LoopTestMBB->addSuccessor(LoopTestMBB); diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h index 26d2a26d681c3..d013755ce58a0 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h @@ -81,7 +81,7 @@ class RISCVFrameLowering : public TargetFrameLowering { void allocateStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineFunction &MF, uint64_t Offset, uint64_t RealStackSize, bool EmitCFI, bool NeedProbe, - uint64_t ProbeSize) const; + uint64_t ProbeSize, bool DynAllocation) const; protected: const RISCVSubtarget &STI; @@ -110,8 +110,8 @@ class RISCVFrameLowering : public TargetFrameLowering { void allocateAndProbeStackForRVV(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Amount, - MachineInstr::MIFlag Flag, - bool EmitCFI) const; + MachineInstr::MIFlag Flag, bool EmitCFI, + bool DynAllocation) const; }; } // namespace llvm #endif diff --git a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp index a71e6bbb93638..39c0af7985971 100644 --- a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp @@ -211,10 +211,6 @@ bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L, assert(Phi->getIncomingValue(IncrementingBlock) == Inc && "Expected one operand of phi to be Inc"); - // Only proceed if the step is loop invariant. - if (!L->isLoopInvariant(Step)) - return false; - // Step should be a splat. Step = getSplatValue(Step); if (!Step) @@ -298,6 +294,7 @@ bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L, BasePtr->getIncomingBlock(StartBlock)->getTerminator()); Builder.SetCurrentDebugLocation(DebugLoc()); + // TODO: Share this switch with matchStridedStart? switch (BO->getOpcode()) { default: llvm_unreachable("Unexpected opcode!"); @@ -310,18 +307,32 @@ bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L, } case Instruction::Mul: { Start = Builder.CreateMul(Start, SplatOp, "start"); - Step = Builder.CreateMul(Step, SplatOp, "step"); Stride = Builder.CreateMul(Stride, SplatOp, "stride"); break; } case Instruction::Shl: { Start = Builder.CreateShl(Start, SplatOp, "start"); - Step = Builder.CreateShl(Step, SplatOp, "step"); Stride = Builder.CreateShl(Stride, SplatOp, "stride"); break; } } + // If the Step was defined inside the loop, adjust it before its definition + // instead of in the preheader. + if (auto *StepI = dyn_cast(Step); StepI && L->contains(StepI)) + Builder.SetInsertPoint(*StepI->getInsertionPointAfterDef()); + + switch (BO->getOpcode()) { + default: + break; + case Instruction::Mul: + Step = Builder.CreateMul(Step, SplatOp, "step"); + break; + case Instruction::Shl: + Step = Builder.CreateShl(Step, SplatOp, "step"); + break; + } + Inc->setOperand(StepIndex, Step); BasePtr->setIncomingValue(StartBlock, Start); return true; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index b25cb128bce9f..de100c683a94f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -280,7 +280,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, MVT::i1, Promote); // TODO: add all necessary setOperationAction calls. - setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Custom); setOperationAction(ISD::BR_JT, MVT::Other, Expand); setOperationAction(ISD::BR_CC, XLenVT, Expand); @@ -1530,7 +1530,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS, ISD::EXPERIMENTAL_VP_REVERSE, ISD::MUL, ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, - ISD::INSERT_VECTOR_ELT, ISD::ABS, ISD::CTPOP}); + ISD::INSERT_VECTOR_ELT, ISD::ABS, ISD::CTPOP, + ISD::VECTOR_SHUFFLE}); if (Subtarget.hasVendorXTHeadMemPair()) setTargetDAGCombine({ISD::LOAD, ISD::STORE}); if (Subtarget.useRVVForFixedLengthVectors()) @@ -7727,6 +7728,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, return emitFlushICache(DAG, Op.getOperand(0), Op.getOperand(1), Op.getOperand(2), Flags, DL); } + case ISD::DYNAMIC_STACKALLOC: + return lowerDYNAMIC_STACKALLOC(Op, DAG); case ISD::INIT_TRAMPOLINE: return lowerINIT_TRAMPOLINE(Op, DAG); case ISD::ADJUST_TRAMPOLINE: @@ -16229,6 +16232,68 @@ static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG, return DAG.getNode(RISCVISD::BREV8, DL, VT, Src.getOperand(0)); } +static SDValue performVP_REVERSECombine(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + // Fold: + // vp.reverse(vp.load(ADDR, MASK)) -> vp.strided.load(ADDR, -1, MASK) + + // Check if its first operand is a vp.load. + auto *VPLoad = dyn_cast(N->getOperand(0)); + if (!VPLoad) + return SDValue(); + + EVT LoadVT = VPLoad->getValueType(0); + // We do not have a strided_load version for masks, and the evl of vp.reverse + // and vp.load should always be the same. + if (!LoadVT.getVectorElementType().isByteSized() || + N->getOperand(2) != VPLoad->getVectorLength() || + !N->getOperand(0).hasOneUse()) + return SDValue(); + + // Check if the mask of outer vp.reverse are all 1's. + if (!isOneOrOneSplat(N->getOperand(1))) + return SDValue(); + + SDValue LoadMask = VPLoad->getMask(); + // If Mask is all ones, then load is unmasked and can be reversed. + if (!isOneOrOneSplat(LoadMask)) { + // If the mask is not all ones, we can reverse the load if the mask was also + // reversed by an unmasked vp.reverse with the same EVL. + if (LoadMask.getOpcode() != ISD::EXPERIMENTAL_VP_REVERSE || + !isOneOrOneSplat(LoadMask.getOperand(1)) || + LoadMask.getOperand(2) != VPLoad->getVectorLength()) + return SDValue(); + LoadMask = LoadMask.getOperand(0); + } + + // Base = LoadAddr + (NumElem - 1) * ElemWidthByte + SDLoc DL(N); + MVT XLenVT = Subtarget.getXLenVT(); + SDValue NumElem = VPLoad->getVectorLength(); + uint64_t ElemWidthByte = VPLoad->getValueType(0).getScalarSizeInBits() / 8; + + SDValue Temp1 = DAG.getNode(ISD::SUB, DL, XLenVT, NumElem, + DAG.getConstant(1, DL, XLenVT)); + SDValue Temp2 = DAG.getNode(ISD::MUL, DL, XLenVT, Temp1, + DAG.getConstant(ElemWidthByte, DL, XLenVT)); + SDValue Base = DAG.getNode(ISD::ADD, DL, XLenVT, VPLoad->getBasePtr(), Temp2); + SDValue Stride = DAG.getConstant(-ElemWidthByte, DL, XLenVT); + + MachineFunction &MF = DAG.getMachineFunction(); + MachinePointerInfo PtrInfo(VPLoad->getAddressSpace()); + MachineMemOperand *MMO = MF.getMachineMemOperand( + PtrInfo, VPLoad->getMemOperand()->getFlags(), + LocationSize::beforeOrAfterPointer(), VPLoad->getAlign()); + + SDValue Ret = DAG.getStridedLoadVP( + LoadVT, DL, VPLoad->getChain(), Base, Stride, LoadMask, + VPLoad->getVectorLength(), MMO, VPLoad->isExpandingLoad()); + + DAG.ReplaceAllUsesOfValueWith(SDValue(VPLoad, 1), Ret.getValue(1)); + + return Ret; +} + // Convert from one FMA opcode to another based on whether we are negating the // multiply result and/or the accumulator. // NOTE: Only supports RVV operations with VL. @@ -17012,6 +17077,37 @@ static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG, return DAG.getBitcast(VT.getSimpleVT(), StridedLoad); } +/// Custom legalize or to . This runs +/// during the combine phase before type legalization, and relies on +/// DAGCombine not undoing the transform if isShuffleMaskLegal returns false +/// for the source mask. +static SDValue performVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget, + const RISCVTargetLowering &TLI) { + SDLoc DL(N); + EVT VT = N->getValueType(0); + const unsigned ElementSize = VT.getScalarSizeInBits(); + SDValue V1 = N->getOperand(0); + SDValue V2 = N->getOperand(1); + ArrayRef Mask = cast(N)->getMask(); + + if (TLI.isTypeLegal(VT) || ElementSize <= Subtarget.getELen() || + !isPowerOf2_64(ElementSize) || VT.getVectorNumElements() % 2 != 0 || + VT.isFloatingPoint() || TLI.isShuffleMaskLegal(Mask, VT)) + return SDValue(); + + SmallVector NewMask; + narrowShuffleMaskElts(2, Mask, NewMask); + + LLVMContext &C = *DAG.getContext(); + EVT NewEltVT = EVT::getIntegerVT(C, ElementSize / 2); + EVT NewVT = EVT::getVectorVT(C, NewEltVT, VT.getVectorNumElements() * 2); + SDValue Res = DAG.getVectorShuffle(NewVT, DL, DAG.getBitcast(NewVT, V1), + DAG.getBitcast(NewVT, V2), NewMask); + return DAG.getBitcast(VT, Res); +} + + static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { @@ -18241,6 +18337,10 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, if (SDValue V = performCONCAT_VECTORSCombine(N, DAG, Subtarget, *this)) return V; break; + case ISD::VECTOR_SHUFFLE: + if (SDValue V = performVECTOR_SHUFFLECombine(N, DAG, Subtarget, *this)) + return V; + break; case ISD::INSERT_VECTOR_ELT: if (SDValue V = performINSERT_VECTOR_ELTCombine(N, DAG, Subtarget, *this)) return V; @@ -18372,6 +18472,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, } } } + case ISD::EXPERIMENTAL_VP_REVERSE: + return performVP_REVERSECombine(N, DAG, Subtarget); case ISD::BITCAST: { assert(Subtarget.useRVVForFixedLengthVectors()); SDValue N0 = N->getOperand(0); @@ -19641,6 +19743,8 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, case RISCV::PseudoFROUND_D_INX: case RISCV::PseudoFROUND_D_IN32X: return emitFROUND(MI, BB, Subtarget); + case RISCV::PROBED_STACKALLOC_DYN: + return emitDynamicProbedAlloc(MI, BB); case TargetOpcode::STATEPOINT: // STATEPOINT is a pseudo instruction which has no implicit defs/uses // while jal call instruction (where statepoint will be lowered at the end) @@ -20873,6 +20977,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(SF_VC_V_IVW_SE) NODE_NAME_CASE(SF_VC_V_VVW_SE) NODE_NAME_CASE(SF_VC_V_FVW_SE) + NODE_NAME_CASE(PROBED_ALLOCA) } // clang-format on return nullptr; @@ -22602,3 +22707,95 @@ unsigned RISCVTargetLowering::getStackProbeSize(const MachineFunction &MF, StackProbeSize = alignDown(StackProbeSize, StackAlign.value()); return StackProbeSize ? StackProbeSize : StackAlign.value(); } + +SDValue RISCVTargetLowering::lowerDYNAMIC_STACKALLOC(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + if (!hasInlineStackProbe(MF)) + return SDValue(); + + MVT XLenVT = Subtarget.getXLenVT(); + // Get the inputs. + SDValue Chain = Op.getOperand(0); + SDValue Size = Op.getOperand(1); + + MaybeAlign Align = + cast(Op.getOperand(2))->getMaybeAlignValue(); + SDLoc dl(Op); + EVT VT = Op.getValueType(); + + // Construct the new SP value in a GPR. + SDValue SP = DAG.getCopyFromReg(Chain, dl, RISCV::X2, XLenVT); + Chain = SP.getValue(1); + SP = DAG.getNode(ISD::SUB, dl, XLenVT, SP, Size); + if (Align) + SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), + DAG.getSignedConstant(-(uint64_t)Align->value(), dl, VT)); + + // Set the real SP to the new value with a probing loop. + Chain = DAG.getNode(RISCVISD::PROBED_ALLOCA, dl, MVT::Other, Chain, SP); + return DAG.getMergeValues({SP, Chain}, dl); +} + +MachineBasicBlock * +RISCVTargetLowering::emitDynamicProbedAlloc(MachineInstr &MI, + MachineBasicBlock *MBB) const { + MachineFunction &MF = *MBB->getParent(); + MachineBasicBlock::iterator MBBI = MI.getIterator(); + DebugLoc DL = MBB->findDebugLoc(MBBI); + Register TargetReg = MI.getOperand(1).getReg(); + + const RISCVInstrInfo *TII = Subtarget.getInstrInfo(); + bool IsRV64 = Subtarget.is64Bit(); + Align StackAlign = Subtarget.getFrameLowering()->getStackAlign(); + const RISCVTargetLowering *TLI = Subtarget.getTargetLowering(); + uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign); + + MachineFunction::iterator MBBInsertPoint = std::next(MBB->getIterator()); + MachineBasicBlock *LoopTestMBB = + MF.CreateMachineBasicBlock(MBB->getBasicBlock()); + MF.insert(MBBInsertPoint, LoopTestMBB); + MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); + MF.insert(MBBInsertPoint, ExitMBB); + Register SPReg = RISCV::X2; + Register ScratchReg = + MF.getRegInfo().createVirtualRegister(&RISCV::GPRRegClass); + + // ScratchReg = ProbeSize + TII->movImm(*MBB, MBBI, DL, ScratchReg, ProbeSize, MachineInstr::NoFlags); + + // LoopTest: + // SUB SP, SP, ProbeSize + BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::SUB), SPReg) + .addReg(SPReg) + .addReg(ScratchReg); + + // s[d|w] zero, 0(sp) + BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, + TII->get(IsRV64 ? RISCV::SD : RISCV::SW)) + .addReg(RISCV::X0) + .addReg(SPReg) + .addImm(0); + + // BLT TargetReg, SP, LoopTest + BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::BLT)) + .addReg(TargetReg) + .addReg(SPReg) + .addMBB(LoopTestMBB); + + // Adjust with: MV SP, TargetReg. + BuildMI(*ExitMBB, ExitMBB->end(), DL, TII->get(RISCV::ADDI), SPReg) + .addReg(TargetReg) + .addImm(0); + + ExitMBB->splice(ExitMBB->end(), MBB, std::next(MBBI), MBB->end()); + ExitMBB->transferSuccessorsAndUpdatePHIs(MBB); + + LoopTestMBB->addSuccessor(ExitMBB); + LoopTestMBB->addSuccessor(LoopTestMBB); + MBB->addSuccessor(LoopTestMBB); + + MI.eraseFromParent(); + MF.getInfo()->setDynamicAllocation(); + return ExitMBB->begin()->getParent(); +} diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index ea077c7d2d23a..892c1cd96ca61 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -461,6 +461,10 @@ enum NodeType : unsigned { SF_VC_V_VVW_SE, SF_VC_V_FVW_SE, + // To avoid stack clash, allocation is performed by block and each block is + // probed. + PROBED_ALLOCA, + // RISC-V vector tuple type version of INSERT_SUBVECTOR/EXTRACT_SUBVECTOR. TUPLE_INSERT, TUPLE_EXTRACT, @@ -922,6 +926,9 @@ class RISCVTargetLowering : public TargetLowering { unsigned getStackProbeSize(const MachineFunction &MF, Align StackAlign) const; + MachineBasicBlock *emitDynamicProbedAlloc(MachineInstr &MI, + MachineBasicBlock *MBB) const; + private: void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo, const SmallVectorImpl &Ins, bool IsRet, @@ -1015,6 +1022,8 @@ class RISCVTargetLowering : public TargetLowering { SDValue lowerVectorStrictFSetcc(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; + SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const; SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index ee86f53a5c8a8..bb5bb6352c32a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -100,6 +100,11 @@ def riscv_add_tprel : SDNode<"RISCVISD::ADD_TPREL", SDTCisSameAs<0, 3>, SDTCisInt<0>]>>; +def riscv_probed_alloca : SDNode<"RISCVISD::PROBED_ALLOCA", + SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, + SDTCisVT<0, i32>]>, + [SDNPHasChain, SDNPMayStore]>; + //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. //===----------------------------------------------------------------------===// @@ -1428,6 +1433,11 @@ def PROBED_STACKALLOC_RVV : Pseudo<(outs GPR:$sp), (ins GPR:$scratch), []>, Sched<[]>; +let usesCustomInserter = 1 in +def PROBED_STACKALLOC_DYN : Pseudo<(outs GPR:$rd), + (ins GPR:$scratch), + [(set GPR:$rd, (riscv_probed_alloca GPR:$scratch))]>, + Sched<[]>; } /// HI and ADD_LO address nodes. diff --git a/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h b/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h index 8909f2f3bd317..27a13bb7cace1 100644 --- a/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h +++ b/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h @@ -78,6 +78,9 @@ class RISCVMachineFunctionInfo : public MachineFunctionInfo { int64_t StackProbeSize = 0; + /// Does it probe the stack for a dynamic allocation? + bool HasDynamicAllocation = false; + public: RISCVMachineFunctionInfo(const Function &F, const RISCVSubtarget *STI); @@ -159,6 +162,9 @@ class RISCVMachineFunctionInfo : public MachineFunctionInfo { bool isVectorCall() const { return IsVectorCall; } void setIsVectorCall() { IsVectorCall = true; } + + bool hasDynamicAllocation() const { return HasDynamicAllocation; } + void setDynamicAllocation() { HasDynamicAllocation = true; } }; } // end namespace llvm diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index 8156eaff8a04c..54ca8ccd8d9e9 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -206,6 +206,7 @@ getOperandLog2EEW(const MachineOperand &MO, const MachineRegisterInfo *MRI) { MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm(); const bool HasPassthru = RISCVII::isFirstDefTiedToFirstUse(MI.getDesc()); + const bool IsTied = RISCVII::isTiedPseudo(MI.getDesc().TSFlags); // We bail out early for instructions that have passthru with non NoRegister, // which means they are using TU policy. We are not interested in these @@ -568,7 +569,8 @@ getOperandLog2EEW(const MachineOperand &MO, const MachineRegisterInfo *MRI) { case RISCV::VFWADD_WV: case RISCV::VFWSUB_WF: case RISCV::VFWSUB_WV: { - bool IsOp1 = HasPassthru ? MO.getOperandNo() == 2 : MO.getOperandNo() == 1; + bool IsOp1 = (HasPassthru && !IsTied) ? MO.getOperandNo() == 2 + : MO.getOperandNo() == 1; bool TwoTimes = IsMODef || IsOp1; return TwoTimes ? MILog2SEW + 1 : MILog2SEW; } @@ -610,6 +612,7 @@ getOperandLog2EEW(const MachineOperand &MO, const MachineRegisterInfo *MRI) { case RISCV::VFNCVT_F_F_W: case RISCV::VFNCVT_ROD_F_F_W: case RISCV::VFNCVTBF16_F_F_W: { + assert(!IsTied); bool IsOp1 = HasPassthru ? MO.getOperandNo() == 2 : MO.getOperandNo() == 1; bool TwoTimes = IsOp1; return TwoTimes ? MILog2SEW + 1 : MILog2SEW; diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index ad9f136708639..4c861f71b0889 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -215,6 +215,9 @@ class SPIRVInstructionSelector : public InstructionSelector { bool selectDot4AddPackedExpansion(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; + bool selectWaveReduceSum(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I) const; + void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I, int OpIdx) const; void renderFImm64(MachineInstrBuilder &MIB, const MachineInstr &I, @@ -2081,6 +2084,31 @@ bool SPIRVInstructionSelector::selectWaveActiveCountBits( return Result; } +bool SPIRVInstructionSelector::selectWaveReduceSum(Register ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const { + assert(I.getNumOperands() == 3); + assert(I.getOperand(2).isReg()); + MachineBasicBlock &BB = *I.getParent(); + Register InputRegister = I.getOperand(2).getReg(); + SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister); + + if (!InputType) + report_fatal_error("Input Type could not be determined."); + + SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII); + // Retreive the operation to use based on input type + bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat); + auto Opcode = + IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd; + return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy, TII)) + .addImm(SPIRV::GroupOperation::Reduce) + .addUse(I.getOperand(2).getReg()); +} + bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const { @@ -3010,6 +3038,8 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformAny); case Intrinsic::spv_wave_is_first_lane: return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformElect); + case Intrinsic::spv_wave_reduce_sum: + return selectWaveReduceSum(ResVReg, ResType, I); case Intrinsic::spv_wave_readlane: return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformShuffle); diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp index 020c11a3af4e1..4fa2dca5a78c0 100644 --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp @@ -1352,9 +1352,7 @@ void addInstrRequirements(const MachineInstr &MI, case SPIRV::GroupOperation::Reduce: case SPIRV::GroupOperation::InclusiveScan: case SPIRV::GroupOperation::ExclusiveScan: - Reqs.addCapability(SPIRV::Capability::Kernel); Reqs.addCapability(SPIRV::Capability::GroupNonUniformArithmetic); - Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot); break; case SPIRV::GroupOperation::ClusteredReduce: Reqs.addCapability(SPIRV::Capability::GroupNonUniformClustered); diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp index 9d1b7b8b0e7cd..979b44b22338e 100644 --- a/llvm/lib/TargetParser/Host.cpp +++ b/llvm/lib/TargetParser/Host.cpp @@ -1509,6 +1509,18 @@ StringRef sys::getHostCPUName() { return getCPUNameFromS390Model(Id, HaveVectorSupport); } #elif defined(__APPLE__) && (defined(__arm__) || defined(__aarch64__)) +// Copied from in the macOS SDK. +// +// Also available here, though usually not as up-to-date: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.41.3/osfmk/mach/machine.h#L403-L452. +#define CPUFAMILY_UNKNOWN 0 +#define CPUFAMILY_ARM_9 0xe73283ae +#define CPUFAMILY_ARM_11 0x8ff620d8 +#define CPUFAMILY_ARM_XSCALE 0x53b005f5 +#define CPUFAMILY_ARM_12 0xbd1b0ae9 +#define CPUFAMILY_ARM_13 0x0cc90e64 +#define CPUFAMILY_ARM_14 0x96077ef1 +#define CPUFAMILY_ARM_15 0xa8511bca #define CPUFAMILY_ARM_SWIFT 0x1e2d6381 #define CPUFAMILY_ARM_CYCLONE 0x37a09642 #define CPUFAMILY_ARM_TYPHOON 0x2c91a47e @@ -1520,13 +1532,46 @@ StringRef sys::getHostCPUName() { #define CPUFAMILY_ARM_FIRESTORM_ICESTORM 0x1b588bb3 #define CPUFAMILY_ARM_BLIZZARD_AVALANCHE 0xda33d83d #define CPUFAMILY_ARM_EVEREST_SAWTOOTH 0x8765edea +#define CPUFAMILY_ARM_IBIZA 0xfa33415e +#define CPUFAMILY_ARM_PALMA 0x72015832 +#define CPUFAMILY_ARM_COLL 0x2876f5b5 +#define CPUFAMILY_ARM_LOBOS 0x5f4dea93 +#define CPUFAMILY_ARM_DONAN 0x6f5129ac +#define CPUFAMILY_ARM_BRAVA 0x17d5b93a +#define CPUFAMILY_ARM_TAHITI 0x75d4acb9 +#define CPUFAMILY_ARM_TUPAI 0x204526d0 StringRef sys::getHostCPUName() { uint32_t Family; size_t Length = sizeof(Family); sysctlbyname("hw.cpufamily", &Family, &Length, NULL, 0); + // This is found by testing on actual hardware, and by looking at: + // https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.41.3/osfmk/arm/cpuid.c#L109-L231. + // + // Another great resource is + // https://github.com/AsahiLinux/docs/wiki/Codenames. + // + // NOTE: We choose to return `apple-mX` instead of `apple-aX`, since the M1, + // M2, M3 etc. aliases are more widely known to users than A14, A15, A16 etc. + // (and this code is basically only used on host macOS anyways). switch (Family) { + case CPUFAMILY_UNKNOWN: + return "generic"; + case CPUFAMILY_ARM_9: + return "arm920t"; // or arm926ej-s + case CPUFAMILY_ARM_11: + return "arm1136jf-s"; + case CPUFAMILY_ARM_XSCALE: + return "xscale"; + case CPUFAMILY_ARM_12: // Seems unused by the kernel + return "generic"; + case CPUFAMILY_ARM_13: + return "cortex-a8"; + case CPUFAMILY_ARM_14: + return "cortex-a9"; + case CPUFAMILY_ARM_15: + return "cortex-a7"; case CPUFAMILY_ARM_SWIFT: return "swift"; case CPUFAMILY_ARM_CYCLONE: @@ -1543,15 +1588,25 @@ StringRef sys::getHostCPUName() { return "apple-a12"; case CPUFAMILY_ARM_LIGHTNING_THUNDER: return "apple-a13"; - case CPUFAMILY_ARM_FIRESTORM_ICESTORM: + case CPUFAMILY_ARM_FIRESTORM_ICESTORM: // A14 / M1 return "apple-m1"; - case CPUFAMILY_ARM_BLIZZARD_AVALANCHE: + case CPUFAMILY_ARM_BLIZZARD_AVALANCHE: // A15 / M2 return "apple-m2"; - case CPUFAMILY_ARM_EVEREST_SAWTOOTH: + case CPUFAMILY_ARM_EVEREST_SAWTOOTH: // A16 + case CPUFAMILY_ARM_IBIZA: // M3 + case CPUFAMILY_ARM_PALMA: // M3 Max + case CPUFAMILY_ARM_LOBOS: // M3 Pro return "apple-m3"; + case CPUFAMILY_ARM_COLL: // A17 Pro + return "apple-a17"; + case CPUFAMILY_ARM_DONAN: // M4 + case CPUFAMILY_ARM_BRAVA: // M4 Max + case CPUFAMILY_ARM_TAHITI: // A18 Pro + case CPUFAMILY_ARM_TUPAI: // A18 + return "apple-m4"; default: // Default to the newest CPU we know about. - return "apple-m3"; + return "apple-m4"; } } #elif defined(_AIX) diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index d540e6ca86154..c440638884322 100644 --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -259,8 +259,7 @@ doPromotion(Function *F, FunctionAnalysisManager &FAM, // all promoted loads. if (LI->hasMetadata(LLVMContext::MD_noundef)) LI->copyMetadata(*Pair.second.MustExecInstr, - {LLVMContext::MD_range, LLVMContext::MD_nonnull, - LLVMContext::MD_align}); + Metadata::PoisonGeneratingIDs); } Args.push_back(LI); ArgAttrVec.push_back(AttributeSet()); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index f7d17b1aa3865..ca8a20b4b7312 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1683,12 +1683,9 @@ static Instruction *reassociateFCmps(BinaryOperator &BO, // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z - Value *NewFCmp = Builder.CreateFCmp(NanPred, X, Y); - if (auto *NewFCmpInst = dyn_cast(NewFCmp)) { - // Intersect FMF from the 2 source fcmps. - NewFCmpInst->copyIRFlags(Op0); - NewFCmpInst->andIRFlags(BO10); - } + // Intersect FMF from the 2 source fcmps. + Value *NewFCmp = + Builder.CreateFCmpFMF(NanPred, X, Y, FMFSource::intersect(Op0, BO10)); return BinaryOperator::Create(Opcode, NewFCmp, BO11); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 67b7ec3ae3c9e..842881156dc67 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2522,13 +2522,12 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { default: llvm_unreachable("unexpected intrinsic ID"); } - Value *V = Builder.CreateBinaryIntrinsic( - IID, X, ConstantFP::get(Arg0->getType(), Res), II); // TODO: Conservatively intersecting FMF. If Res == C2, the transform // was a simplification (so Arg0 and its original flags could // propagate?) - if (auto *CI = dyn_cast(V)) - CI->andIRFlags(M); + Value *V = Builder.CreateBinaryIntrinsic( + IID, X, ConstantFP::get(Arg0->getType(), Res), + FMFSource::intersect(II, M)); return replaceInstUsesWith(*II, V); } } @@ -2623,13 +2622,11 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { } case Intrinsic::fmuladd: { // Try to simplify the underlying FMul. - if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), - II->getFastMathFlags(), - SQ.getWithInstruction(II))) { - auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); - FAdd->copyFastMathFlags(II); - return FAdd; - } + if (Value *V = + simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), + II->getFastMathFlags(), SQ.getWithInstruction(II))) + return BinaryOperator::CreateFAddFMF(V, II->getArgOperand(2), + II->getFastMathFlags()); [[fallthrough]]; } @@ -2656,11 +2653,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { // Try to simplify the underlying FMul. We can only apply simplifications // that do not require rounding. if (Value *V = simplifyFMAFMul(Src0, Src1, II->getFastMathFlags(), - SQ.getWithInstruction(II))) { - auto *FAdd = BinaryOperator::CreateFAdd(V, Src2); - FAdd->copyFastMathFlags(II); - return FAdd; - } + SQ.getWithInstruction(II))) + return BinaryOperator::CreateFAddFMF(V, Src2, II->getFastMathFlags()); // fma x, y, 0 -> fmul x, y // This is always valid for -0.0, but requires nsz for +0.0 as @@ -2754,8 +2748,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { m_CopySign(m_Value(Magnitude), m_Value(Sign)))) { // fabs (copysign x, y) -> (fabs x) CallInst *AbsSign = - Builder.CreateCall(II->getCalledFunction(), {Magnitude}); - AbsSign->copyFastMathFlags(II); + Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Magnitude, II); return replaceInstUsesWith(*II, AbsSign); } @@ -2862,16 +2855,15 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { Value *NewLdexp = nullptr; Value *Select = nullptr; if (match(SelectRHS, m_ZeroInt())) { - NewLdexp = Builder.CreateLdexp(Src, SelectLHS); + NewLdexp = Builder.CreateLdexp(Src, SelectLHS, II); Select = Builder.CreateSelect(SelectCond, NewLdexp, Src); } else if (match(SelectLHS, m_ZeroInt())) { - NewLdexp = Builder.CreateLdexp(Src, SelectRHS); + NewLdexp = Builder.CreateLdexp(Src, SelectRHS, II); Select = Builder.CreateSelect(SelectCond, Src, NewLdexp); } if (NewLdexp) { Select->takeName(II); - cast(NewLdexp)->copyFastMathFlags(II); return replaceInstUsesWith(*II, Select); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index f87a4a5847040..61f1c17592e96 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -1060,6 +1060,10 @@ Instruction *InstCombinerImpl::visitLoadInst(LoadInst &LI) { V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); V2->setAlignment(Alignment); V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); + // It is safe to copy any metadata that does not trigger UB. Copy any + // poison-generating metadata. + V1->copyMetadata(LI, Metadata::PoisonGeneratingIDs); + V2->copyMetadata(LI, Metadata::PoisonGeneratingIDs); return SelectInst::Create(SI->getCondition(), V1, V2); } diff --git a/llvm/lib/Transforms/Vectorize/CMakeLists.txt b/llvm/lib/Transforms/Vectorize/CMakeLists.txt index d769d5100afd2..6a025652f92f8 100644 --- a/llvm/lib/Transforms/Vectorize/CMakeLists.txt +++ b/llvm/lib/Transforms/Vectorize/CMakeLists.txt @@ -4,6 +4,7 @@ add_llvm_component_library(LLVMVectorize LoopVectorizationLegality.cpp LoopVectorize.cpp SandboxVectorizer/DependencyGraph.cpp + SandboxVectorizer/InstrMaps.cpp SandboxVectorizer/Interval.cpp SandboxVectorizer/Legality.cpp SandboxVectorizer/Passes/BottomUpVec.cpp diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/InstrMaps.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/InstrMaps.cpp new file mode 100644 index 0000000000000..4df4829a04c41 --- /dev/null +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/InstrMaps.cpp @@ -0,0 +1,21 @@ +//===- InstructionMaps.cpp - Maps scalars to vectors and reverse ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h" +#include "llvm/Support/Debug.h" + +namespace llvm::sandboxir { + +#ifndef NDEBUG +void InstrMaps::dump() const { + print(dbgs()); + dbgs() << "\n"; +} +#endif // NDEBUG + +} // namespace llvm::sandboxir diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp index 8c6deeb7df249..f8149c5bc6636 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Legality.cpp @@ -12,6 +12,7 @@ #include "llvm/SandboxIR/Utils.h" #include "llvm/SandboxIR/Value.h" #include "llvm/Support/Debug.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h" #include "llvm/Transforms/Vectorize/SandboxVectorizer/VecUtils.h" namespace llvm::sandboxir { @@ -184,6 +185,22 @@ static void dumpBndl(ArrayRef Bndl) { } #endif // NDEBUG +CollectDescr +LegalityAnalysis::getHowToCollectValues(ArrayRef Bndl) const { + SmallVector Vec; + Vec.reserve(Bndl.size()); + for (auto [Lane, V] : enumerate(Bndl)) { + if (auto *VecOp = IMaps.getVectorForOrig(V)) { + // If there is a vector containing `V`, then get the lane it came from. + std::optional ExtractIdxOpt = IMaps.getOrigLane(VecOp, V); + Vec.emplace_back(VecOp, ExtractIdxOpt ? *ExtractIdxOpt : -1); + } else { + Vec.emplace_back(V); + } + } + return CollectDescr(std::move(Vec)); +} + const LegalityResult &LegalityAnalysis::canVectorize(ArrayRef Bndl, bool SkipScheduling) { // If Bndl contains values other than instructions, we need to Pack. @@ -193,11 +210,21 @@ const LegalityResult &LegalityAnalysis::canVectorize(ArrayRef Bndl, return createLegalityResult(ResultReason::NotInstructions); } + auto CollectDescrs = getHowToCollectValues(Bndl); + if (CollectDescrs.hasVectorInputs()) { + if (auto ValueShuffleOpt = CollectDescrs.getSingleInput()) { + auto [Vec, NeedsShuffle] = *ValueShuffleOpt; + if (!NeedsShuffle) + return createLegalityResult(Vec); + llvm_unreachable("TODO: Unimplemented"); + } else { + llvm_unreachable("TODO: Unimplemented"); + } + } + if (auto ReasonOpt = notVectorizableBasedOnOpcodesAndTypes(Bndl)) return createLegalityResult(*ReasonOpt); - // TODO: Check for existing vectors containing values in Bndl. - if (!SkipScheduling) { // TODO: Try to remove the IBndl vector. SmallVector IBndl; @@ -210,4 +237,9 @@ const LegalityResult &LegalityAnalysis::canVectorize(ArrayRef Bndl, return createLegalityResult(); } + +void LegalityAnalysis::clear() { + Sched.clear(); + IMaps.clear(); +} } // namespace llvm::sandboxir diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp index d44199609838d..6b2032be53560 100644 --- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp +++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Passes/BottomUpVec.cpp @@ -56,103 +56,114 @@ getInsertPointAfterInstrs(ArrayRef Instrs) { Value *BottomUpVec::createVectorInstr(ArrayRef Bndl, ArrayRef Operands) { - Change = true; - assert(all_of(Bndl, [](auto *V) { return isa(V); }) && - "Expect Instructions!"); - auto &Ctx = Bndl[0]->getContext(); + auto CreateVectorInstr = [](ArrayRef Bndl, + ArrayRef Operands) -> Value * { + assert(all_of(Bndl, [](auto *V) { return isa(V); }) && + "Expect Instructions!"); + auto &Ctx = Bndl[0]->getContext(); - Type *ScalarTy = VecUtils::getElementType(Utils::getExpectedType(Bndl[0])); - auto *VecTy = VecUtils::getWideType(ScalarTy, VecUtils::getNumLanes(Bndl)); + Type *ScalarTy = VecUtils::getElementType(Utils::getExpectedType(Bndl[0])); + auto *VecTy = VecUtils::getWideType(ScalarTy, VecUtils::getNumLanes(Bndl)); - BasicBlock::iterator WhereIt = getInsertPointAfterInstrs(Bndl); + BasicBlock::iterator WhereIt = getInsertPointAfterInstrs(Bndl); - auto Opcode = cast(Bndl[0])->getOpcode(); - switch (Opcode) { - case Instruction::Opcode::ZExt: - case Instruction::Opcode::SExt: - case Instruction::Opcode::FPToUI: - case Instruction::Opcode::FPToSI: - case Instruction::Opcode::FPExt: - case Instruction::Opcode::PtrToInt: - case Instruction::Opcode::IntToPtr: - case Instruction::Opcode::SIToFP: - case Instruction::Opcode::UIToFP: - case Instruction::Opcode::Trunc: - case Instruction::Opcode::FPTrunc: - case Instruction::Opcode::BitCast: { - assert(Operands.size() == 1u && "Casts are unary!"); - return CastInst::create(VecTy, Opcode, Operands[0], WhereIt, Ctx, "VCast"); - } - case Instruction::Opcode::FCmp: - case Instruction::Opcode::ICmp: { - auto Pred = cast(Bndl[0])->getPredicate(); - assert(all_of(drop_begin(Bndl), - [Pred](auto *SBV) { - return cast(SBV)->getPredicate() == Pred; - }) && - "Expected same predicate across bundle."); - return CmpInst::create(Pred, Operands[0], Operands[1], WhereIt, Ctx, - "VCmp"); - } - case Instruction::Opcode::Select: { - return SelectInst::create(Operands[0], Operands[1], Operands[2], WhereIt, - Ctx, "Vec"); - } - case Instruction::Opcode::FNeg: { - auto *UOp0 = cast(Bndl[0]); - auto OpC = UOp0->getOpcode(); - return UnaryOperator::createWithCopiedFlags(OpC, Operands[0], UOp0, WhereIt, - Ctx, "Vec"); - } - case Instruction::Opcode::Add: - case Instruction::Opcode::FAdd: - case Instruction::Opcode::Sub: - case Instruction::Opcode::FSub: - case Instruction::Opcode::Mul: - case Instruction::Opcode::FMul: - case Instruction::Opcode::UDiv: - case Instruction::Opcode::SDiv: - case Instruction::Opcode::FDiv: - case Instruction::Opcode::URem: - case Instruction::Opcode::SRem: - case Instruction::Opcode::FRem: - case Instruction::Opcode::Shl: - case Instruction::Opcode::LShr: - case Instruction::Opcode::AShr: - case Instruction::Opcode::And: - case Instruction::Opcode::Or: - case Instruction::Opcode::Xor: { - auto *BinOp0 = cast(Bndl[0]); - auto *LHS = Operands[0]; - auto *RHS = Operands[1]; - return BinaryOperator::createWithCopiedFlags(BinOp0->getOpcode(), LHS, RHS, - BinOp0, WhereIt, Ctx, "Vec"); - } - case Instruction::Opcode::Load: { - auto *Ld0 = cast(Bndl[0]); - Value *Ptr = Ld0->getPointerOperand(); - return LoadInst::create(VecTy, Ptr, Ld0->getAlign(), WhereIt, Ctx, "VecL"); - } - case Instruction::Opcode::Store: { - auto Align = cast(Bndl[0])->getAlign(); - Value *Val = Operands[0]; - Value *Ptr = Operands[1]; - return StoreInst::create(Val, Ptr, Align, WhereIt, Ctx); - } - case Instruction::Opcode::Br: - case Instruction::Opcode::Ret: - case Instruction::Opcode::PHI: - case Instruction::Opcode::AddrSpaceCast: - case Instruction::Opcode::Call: - case Instruction::Opcode::GetElementPtr: - llvm_unreachable("Unimplemented"); - break; - default: - llvm_unreachable("Unimplemented"); - break; + auto Opcode = cast(Bndl[0])->getOpcode(); + switch (Opcode) { + case Instruction::Opcode::ZExt: + case Instruction::Opcode::SExt: + case Instruction::Opcode::FPToUI: + case Instruction::Opcode::FPToSI: + case Instruction::Opcode::FPExt: + case Instruction::Opcode::PtrToInt: + case Instruction::Opcode::IntToPtr: + case Instruction::Opcode::SIToFP: + case Instruction::Opcode::UIToFP: + case Instruction::Opcode::Trunc: + case Instruction::Opcode::FPTrunc: + case Instruction::Opcode::BitCast: { + assert(Operands.size() == 1u && "Casts are unary!"); + return CastInst::create(VecTy, Opcode, Operands[0], WhereIt, Ctx, + "VCast"); + } + case Instruction::Opcode::FCmp: + case Instruction::Opcode::ICmp: { + auto Pred = cast(Bndl[0])->getPredicate(); + assert(all_of(drop_begin(Bndl), + [Pred](auto *SBV) { + return cast(SBV)->getPredicate() == Pred; + }) && + "Expected same predicate across bundle."); + return CmpInst::create(Pred, Operands[0], Operands[1], WhereIt, Ctx, + "VCmp"); + } + case Instruction::Opcode::Select: { + return SelectInst::create(Operands[0], Operands[1], Operands[2], WhereIt, + Ctx, "Vec"); + } + case Instruction::Opcode::FNeg: { + auto *UOp0 = cast(Bndl[0]); + auto OpC = UOp0->getOpcode(); + return UnaryOperator::createWithCopiedFlags(OpC, Operands[0], UOp0, + WhereIt, Ctx, "Vec"); + } + case Instruction::Opcode::Add: + case Instruction::Opcode::FAdd: + case Instruction::Opcode::Sub: + case Instruction::Opcode::FSub: + case Instruction::Opcode::Mul: + case Instruction::Opcode::FMul: + case Instruction::Opcode::UDiv: + case Instruction::Opcode::SDiv: + case Instruction::Opcode::FDiv: + case Instruction::Opcode::URem: + case Instruction::Opcode::SRem: + case Instruction::Opcode::FRem: + case Instruction::Opcode::Shl: + case Instruction::Opcode::LShr: + case Instruction::Opcode::AShr: + case Instruction::Opcode::And: + case Instruction::Opcode::Or: + case Instruction::Opcode::Xor: { + auto *BinOp0 = cast(Bndl[0]); + auto *LHS = Operands[0]; + auto *RHS = Operands[1]; + return BinaryOperator::createWithCopiedFlags( + BinOp0->getOpcode(), LHS, RHS, BinOp0, WhereIt, Ctx, "Vec"); + } + case Instruction::Opcode::Load: { + auto *Ld0 = cast(Bndl[0]); + Value *Ptr = Ld0->getPointerOperand(); + return LoadInst::create(VecTy, Ptr, Ld0->getAlign(), WhereIt, Ctx, + "VecL"); + } + case Instruction::Opcode::Store: { + auto Align = cast(Bndl[0])->getAlign(); + Value *Val = Operands[0]; + Value *Ptr = Operands[1]; + return StoreInst::create(Val, Ptr, Align, WhereIt, Ctx); + } + case Instruction::Opcode::Br: + case Instruction::Opcode::Ret: + case Instruction::Opcode::PHI: + case Instruction::Opcode::AddrSpaceCast: + case Instruction::Opcode::Call: + case Instruction::Opcode::GetElementPtr: + llvm_unreachable("Unimplemented"); + break; + default: + llvm_unreachable("Unimplemented"); + break; + } + llvm_unreachable("Missing switch case!"); + // TODO: Propagate debug info. + }; + + auto *VecI = CreateVectorInstr(Bndl, Operands); + if (VecI != nullptr) { + Change = true; + IMaps.registerVector(Bndl, VecI); } - llvm_unreachable("Missing switch case!"); - // TODO: Propagate debug info. + return VecI; } void BottomUpVec::tryEraseDeadInstrs() { @@ -280,6 +291,10 @@ Value *BottomUpVec::vectorizeRec(ArrayRef Bndl, unsigned Depth) { collectPotentiallyDeadInstrs(Bndl); break; } + case LegalityResultID::DiamondReuse: { + NewVec = cast(LegalityRes).getVector(); + break; + } case LegalityResultID::Pack: { // If we can't vectorize the seeds then just return. if (Depth == 0) @@ -300,9 +315,10 @@ bool BottomUpVec::tryVectorize(ArrayRef Bndl) { } bool BottomUpVec::runOnFunction(Function &F, const Analyses &A) { + IMaps.clear(); Legality = std::make_unique( A.getAA(), A.getScalarEvolution(), F.getParent()->getDataLayout(), - F.getContext()); + F.getContext(), IMaps); Change = false; const auto &DL = F.getParent()->getDataLayout(); unsigned VecRegBits = diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 92166ebca06da..eceddff6be6ff 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -1223,8 +1223,8 @@ class VPInstruction : public VPRecipeWithIRFlags, // operand). Only generates scalar values (either for the first lane only or // for all lanes, depending on its uses). PtrAdd, - // Returns a scalar boolean value, which is true if any lane of its single - // operand is true. + // Returns a scalar boolean value, which is true if any lane of its (only + // boolean) vector operand is true. AnyOf, }; diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 8fea2c6fd33b6..27357ff04b5f2 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -60,7 +60,10 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { } case Instruction::ICmp: case VPInstruction::ActiveLaneMask: - return inferScalarType(R->getOperand(1)); + assert(inferScalarType(R->getOperand(0)) == + inferScalarType(R->getOperand(1)) && + "different types inferred for different operands"); + return IntegerType::get(Ctx, 1); case VPInstruction::ComputeReductionResult: { auto *PhiR = cast(R->getOperand(0)); auto *OrigPhi = cast(PhiR->getUnderlyingValue()); @@ -71,6 +74,9 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { case VPInstruction::FirstOrderRecurrenceSplice: case VPInstruction::Not: case VPInstruction::ResumePhi: + case VPInstruction::CalculateTripCountMinusVF: + case VPInstruction::CanonicalIVIncrementForPart: + case VPInstruction::AnyOf: return SetResultTyFromOp(); case VPInstruction::ExtractFromEnd: { Type *BaseTy = inferScalarType(R->getOperand(0)); @@ -79,6 +85,9 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { return BaseTy; } case VPInstruction::LogicalAnd: + assert(inferScalarType(R->getOperand(0))->isIntegerTy(1) && + inferScalarType(R->getOperand(1))->isIntegerTy(1) && + "LogicalAnd operands should be bool"); return IntegerType::get(Ctx, 1); case VPInstruction::PtrAdd: // Return the type based on the pointer argument (i.e. first operand). diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index be420a873bef5..a30bc82cbde85 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -26,6 +26,7 @@ using namespace llvm; namespace { class VPlanVerifier { const VPDominatorTree &VPDT; + VPTypeAnalysis &TypeInfo; SmallPtrSet WrappedIRBBs; @@ -58,7 +59,8 @@ class VPlanVerifier { bool verifyRegionRec(const VPRegionBlock *Region); public: - VPlanVerifier(VPDominatorTree &VPDT) : VPDT(VPDT) {} + VPlanVerifier(VPDominatorTree &VPDT, VPTypeAnalysis &TypeInfo) + : VPDT(VPDT), TypeInfo(TypeInfo) {} bool verify(const VPlan &Plan); }; @@ -195,6 +197,14 @@ bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { return false; } for (const VPValue *V : R.definedValues()) { + // Verify that we can infer a scalar type for each defined value. With + // assertions enabled, inferScalarType will perform some consistency + // checks during type inference. + if (!TypeInfo.inferScalarType(V)) { + errs() << "Failed to infer scalar type!\n"; + return false; + } + for (const VPUser *U : V->users()) { auto *UI = dyn_cast(U); // TODO: check dominance of incoming values for phis properly. @@ -406,6 +416,8 @@ bool VPlanVerifier::verify(const VPlan &Plan) { bool llvm::verifyVPlanIsValid(const VPlan &Plan) { VPDominatorTree VPDT; VPDT.recalculate(const_cast(Plan)); - VPlanVerifier Verifier(VPDT); + VPTypeAnalysis TypeInfo( + const_cast(Plan).getCanonicalIV()->getScalarType()); + VPlanVerifier Verifier(VPDT, TypeInfo); return Verifier.verify(Plan); } diff --git a/llvm/test/Analysis/BasicAA/dereferenceable.ll b/llvm/test/Analysis/BasicAA/dereferenceable.ll index 98bd5e3d5aa6a..8df2e4c6bda3a 100644 --- a/llvm/test/Analysis/BasicAA/dereferenceable.ll +++ b/llvm/test/Analysis/BasicAA/dereferenceable.ll @@ -1,5 +1,5 @@ ; RUN: opt -aa-pipeline=basic-aa -print-all-alias-modref-info -passes=aa-eval < %s 2>&1 | FileCheck %s -; RUN: opt -aa-pipeline=basic-aa -print-all-alias-modref-info -passes=aa-eval -use-dereferenceable-at-point-semantics=1 < %s 2>&1 | FileCheck %s +; RUN: opt -aa-pipeline=basic-aa -print-all-alias-modref-info -passes=aa-eval -use-dereferenceable-at-point-semantics < %s 2>&1 | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll b/llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll index 1efe4a90ea780..4b63c036f5491 100644 --- a/llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll +++ b/llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll @@ -1,4 +1,4 @@ -; RUN: opt -passes=print-memderefs -S < %s -disable-output -use-dereferenceable-at-point-semantics=1 2>&1 | FileCheck %s --check-prefixes=CHECK +; RUN: opt -passes=print-memderefs -S < %s -disable-output -use-dereferenceable-at-point-semantics 2>&1 | FileCheck %s --check-prefixes=CHECK target datalayout = "e-i32:32:64" diff --git a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll index 3da1aaa8a68a8..8c5216e0c45d9 100644 --- a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll +++ b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll @@ -1,5 +1,5 @@ -; RUN: opt -passes=print-memderefs -S < %s -disable-output -use-dereferenceable-at-point-semantics=0 2>&1 | FileCheck %s --check-prefixes=CHECK,GLOBAL -; RUN: opt -passes=print-memderefs -S < %s -disable-output -use-dereferenceable-at-point-semantics=1 2>&1 | FileCheck %s --check-prefixes=CHECK,POINT +; RUN: opt -passes=print-memderefs -S < %s -disable-output -use-dereferenceable-at-point-semantics=false 2>&1 | FileCheck %s --check-prefixes=CHECK,GLOBAL +; RUN: opt -passes=print-memderefs -S < %s -disable-output -use-dereferenceable-at-point-semantics 2>&1 | FileCheck %s --check-prefixes=CHECK,POINT ; Uses the print-deref (+ analyze to print) pass to run diff --git a/llvm/test/CodeGen/AArch64/double_reduct.ll b/llvm/test/CodeGen/AArch64/double_reduct.ll index 0c356b1d98287..f30895db2c098 100644 --- a/llvm/test/CodeGen/AArch64/double_reduct.ll +++ b/llvm/test/CodeGen/AArch64/double_reduct.ll @@ -1,111 +1,291 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc --mtriple=aarch64-eabi < %s | FileCheck %s +; RUN: llc --mtriple=aarch64-eabi < %s -global-isel=false | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc --mtriple=aarch64-eabi < %s -global-isel=true | FileCheck %s --check-prefixes=CHECK,CHECK-GI define float @add_f32(<8 x float> %a, <4 x float> %b) { -; CHECK-LABEL: add_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fadd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: fadd v0.4s, v0.4s, v2.4s -; CHECK-NEXT: faddp v0.4s, v0.4s, v0.4s -; CHECK-NEXT: faddp s0, v0.2s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fadd v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: faddp v0.4s, v0.4s, v0.4s +; CHECK-SD-NEXT: faddp s0, v0.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: faddp v1.4s, v2.4s, v2.4s +; CHECK-GI-NEXT: faddp v0.4s, v0.4s, v0.4s +; CHECK-GI-NEXT: faddp s1, v1.2s +; CHECK-GI-NEXT: faddp s0, v0.2s +; CHECK-GI-NEXT: fadd s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float -0.0, <8 x float> %a) %r2 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %b) %r = fadd fast float %r1, %r2 ret float %r } +define float @add_f32_same(<4 x float> %a, <4 x float> %b) { +; CHECK-SD-LABEL: add_f32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: faddp v0.4s, v0.4s, v0.4s +; CHECK-SD-NEXT: faddp s0, v0.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_f32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: faddp v0.4s, v0.4s, v0.4s +; CHECK-GI-NEXT: faddp v1.4s, v1.4s, v1.4s +; CHECK-GI-NEXT: faddp s0, v0.2s +; CHECK-GI-NEXT: faddp s1, v1.2s +; CHECK-GI-NEXT: fadd s0, s0, s1 +; CHECK-GI-NEXT: ret + %r1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a) + %r2 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %b) + %r = fadd fast float %r1, %r2 + ret float %r +} + define float @fmul_f32(<8 x float> %a, <4 x float> %b) { -; CHECK-LABEL: fmul_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fmul v0.4s, v0.4s, v1.4s -; CHECK-NEXT: fmul v0.4s, v0.4s, v2.4s -; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: fmul v0.2s, v0.2s, v1.2s -; CHECK-NEXT: fmul s0, s0, v0.s[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: fmul_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fmul v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: fmul v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: fmul s0, s0, v0.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fmul_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: mov d3, v2.d[1] +; CHECK-GI-NEXT: mov d1, v0.d[1] +; CHECK-GI-NEXT: fmul v0.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: fmul v1.2s, v2.2s, v3.2s +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v1.s[1] +; CHECK-GI-NEXT: fmul s0, s0, s2 +; CHECK-GI-NEXT: fmul s1, s1, s3 +; CHECK-GI-NEXT: fmul s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call fast float @llvm.vector.reduce.fmul.f32.v8f32(float 1.0, <8 x float> %a) %r2 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %b) %r = fmul fast float %r1, %r2 ret float %r } +define float @fmul_f32_same(<4 x float> %a, <4 x float> %b) { +; CHECK-SD-LABEL: fmul_f32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: fmul v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: fmul s0, s0, v0.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fmul_f32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: fmul v0.2s, v0.2s, v2.2s +; CHECK-GI-NEXT: fmul v1.2s, v1.2s, v3.2s +; CHECK-GI-NEXT: mov s2, v0.s[1] +; CHECK-GI-NEXT: mov s3, v1.s[1] +; CHECK-GI-NEXT: fmul s0, s0, s2 +; CHECK-GI-NEXT: fmul s1, s1, s3 +; CHECK-GI-NEXT: fmul s0, s0, s1 +; CHECK-GI-NEXT: ret + %r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a) + %r2 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %b) + %r = fmul fast float %r1, %r2 + ret float %r +} + define float @fmin_f32(<8 x float> %a, <4 x float> %b) { -; CHECK-LABEL: fmin_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fminnm v0.4s, v0.4s, v1.4s -; CHECK-NEXT: fminnm v0.4s, v0.4s, v2.4s -; CHECK-NEXT: fminnmv s0, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: fmin_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fminnm v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fminnm v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: fminnmv s0, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fmin_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fminnm v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: fminnmv s1, v2.4s +; CHECK-GI-NEXT: fminnmv s0, v0.4s +; CHECK-GI-NEXT: fminnm s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %a) %r2 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %b) %r = call float @llvm.minnum.f32(float %r1, float %r2) ret float %r } +define float @fmin_f32_same(<4 x float> %a, <4 x float> %b) { +; CHECK-SD-LABEL: fmin_f32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fminnm v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fminnmv s0, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fmin_f32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fminnmv s0, v0.4s +; CHECK-GI-NEXT: fminnmv s1, v1.4s +; CHECK-GI-NEXT: fminnm s0, s0, s1 +; CHECK-GI-NEXT: ret + %r1 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a) + %r2 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %b) + %r = call float @llvm.minnum.f32(float %r1, float %r2) + ret float %r +} + define float @fmax_f32(<8 x float> %a, <4 x float> %b) { -; CHECK-LABEL: fmax_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s -; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v2.4s -; CHECK-NEXT: fmaxnmv s0, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: fmax_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmaxnm v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fmaxnm v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: fmaxnmv s0, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fmax_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmaxnm v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: fmaxnmv s1, v2.4s +; CHECK-GI-NEXT: fmaxnmv s0, v0.4s +; CHECK-GI-NEXT: fmaxnm s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %a) %r2 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b) %r = call float @llvm.maxnum.f32(float %r1, float %r2) ret float %r } +define float @fmax_f32_same(<4 x float> %a, <4 x float> %b) { +; CHECK-SD-LABEL: fmax_f32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmaxnm v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fmaxnmv s0, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fmax_f32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmaxnmv s0, v0.4s +; CHECK-GI-NEXT: fmaxnmv s1, v1.4s +; CHECK-GI-NEXT: fmaxnm s0, s0, s1 +; CHECK-GI-NEXT: ret + %r1 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a) + %r2 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b) + %r = call float @llvm.maxnum.f32(float %r1, float %r2) + ret float %r +} + define float @fminimum_f32(<8 x float> %a, <4 x float> %b) { -; CHECK-LABEL: fminimum_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fmin v0.4s, v0.4s, v1.4s -; CHECK-NEXT: fmin v0.4s, v0.4s, v2.4s -; CHECK-NEXT: fminv s0, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: fminimum_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmin v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fmin v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: fminv s0, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fminimum_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmin v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: fminv s1, v2.4s +; CHECK-GI-NEXT: fminv s0, v0.4s +; CHECK-GI-NEXT: fmin s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> %a) %r2 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> %b) %r = call float @llvm.minimum.f32(float %r1, float %r2) ret float %r } +define float @fminimum_f32_same(<4 x float> %a, <4 x float> %b) { +; CHECK-SD-LABEL: fminimum_f32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmin v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fminv s0, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fminimum_f32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fminv s0, v0.4s +; CHECK-GI-NEXT: fminv s1, v1.4s +; CHECK-GI-NEXT: fmin s0, s0, s1 +; CHECK-GI-NEXT: ret + %r1 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> %a) + %r2 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> %b) + %r = call float @llvm.minimum.f32(float %r1, float %r2) + ret float %r +} + define float @fmaximum_f32(<8 x float> %a, <4 x float> %b) { -; CHECK-LABEL: fmaximum_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: fmax v0.4s, v0.4s, v1.4s -; CHECK-NEXT: fmax v0.4s, v0.4s, v2.4s -; CHECK-NEXT: fmaxv s0, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: fmaximum_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmax v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fmax v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: fmaxv s0, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fmaximum_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmax v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: fmaxv s1, v2.4s +; CHECK-GI-NEXT: fmaxv s0, v0.4s +; CHECK-GI-NEXT: fmax s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> %a) %r2 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> %b) %r = call float @llvm.maximum.f32(float %r1, float %r2) ret float %r } +define float @fmaximum_f32_same(<4 x float> %a, <4 x float> %b) { +; CHECK-SD-LABEL: fmaximum_f32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmax v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: fmaxv s0, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: fmaximum_f32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmaxv s0, v0.4s +; CHECK-GI-NEXT: fmaxv s1, v1.4s +; CHECK-GI-NEXT: fmax s0, s0, s1 +; CHECK-GI-NEXT: ret + %r1 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> %a) + %r2 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> %b) + %r = call float @llvm.maximum.f32(float %r1, float %r2) + ret float %r +} + ; These next two tests have incorrect minnum/minimum combinations -define float @fminimumnum_f32(<8 x float> %a, <4 x float> %b) { +define float @fminimumnum_f32(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: fminimumnum_f32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmin v0.4s, v0.4s, v1.4s -; CHECK-NEXT: fminv s1, v2.4s ; CHECK-NEXT: fminv s0, v0.4s +; CHECK-NEXT: fminv s1, v1.4s ; CHECK-NEXT: fminnm s0, s0, s1 ; CHECK-NEXT: ret - %r1 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> %a) + %r1 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> %a) %r2 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> %b) %r = call float @llvm.minnum.f32(float %r1, float %r2) ret float %r } -define float @fmaxnumimum_f32(<8 x float> %a, <4 x float> %b) { +define float @fmaxnumimum_f32(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: fmaxnumimum_f32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s -; CHECK-NEXT: fmaxnmv s1, v2.4s ; CHECK-NEXT: fmaxnmv s0, v0.4s +; CHECK-NEXT: fmaxnmv s1, v1.4s ; CHECK-NEXT: fmax s0, s0, s1 ; CHECK-NEXT: ret - %r1 = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %a) + %r1 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a) %r2 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b) %r = call float @llvm.maximum.f32(float %r1, float %r2) ret float %r @@ -113,13 +293,23 @@ define float @fmaxnumimum_f32(<8 x float> %a, <4 x float> %b) { define i32 @add_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: add_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add v0.4s, v0.4s, v2.4s -; CHECK-NEXT: addv s0, v0.4s -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: addv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: addv s1, v2.4s +; CHECK-GI-NEXT: addv s0, v0.4s +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.add.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %b) %r = add i32 %r1, %r2 @@ -127,13 +317,22 @@ define i32 @add_i32(<8 x i32> %a, <4 x i32> %b) { } define i16 @add_ext_i16(<16 x i8> %a, <16 x i8> %b) { -; CHECK-LABEL: add_ext_i16: -; CHECK: // %bb.0: -; CHECK-NEXT: uaddlp v1.8h, v1.16b -; CHECK-NEXT: uadalp v1.8h, v0.16b -; CHECK-NEXT: addv h0, v1.8h -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ext_i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: uaddlp v1.8h, v1.16b +; CHECK-SD-NEXT: uadalp v1.8h, v0.16b +; CHECK-SD-NEXT: addv h0, v1.8h +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ext_i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: uaddlv h0, v0.16b +; CHECK-GI-NEXT: uaddlv h1, v1.16b +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: ret %ae = zext <16 x i8> %a to <16 x i16> %be = zext <16 x i8> %b to <16 x i16> %r1 = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %ae) @@ -143,15 +342,27 @@ define i16 @add_ext_i16(<16 x i8> %a, <16 x i8> %b) { } define i16 @add_ext_v32i16(<32 x i8> %a, <16 x i8> %b) { -; CHECK-LABEL: add_ext_v32i16: -; CHECK: // %bb.0: -; CHECK-NEXT: uaddl2 v3.8h, v0.16b, v1.16b -; CHECK-NEXT: uaddl v0.8h, v0.8b, v1.8b -; CHECK-NEXT: add v0.8h, v0.8h, v3.8h -; CHECK-NEXT: uadalp v0.8h, v2.16b -; CHECK-NEXT: addv h0, v0.8h -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ext_v32i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: uaddl2 v3.8h, v0.16b, v1.16b +; CHECK-SD-NEXT: uaddl v0.8h, v0.8b, v1.8b +; CHECK-SD-NEXT: add v0.8h, v0.8h, v3.8h +; CHECK-SD-NEXT: uadalp v0.8h, v2.16b +; CHECK-SD-NEXT: addv h0, v0.8h +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ext_v32i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: uaddlv h0, v0.16b +; CHECK-GI-NEXT: uaddlv h1, v1.16b +; CHECK-GI-NEXT: uaddlv h2, v2.16b +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: add w8, w8, w9 +; CHECK-GI-NEXT: fmov w9, s2 +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: ret %ae = zext <32 x i8> %a to <32 x i16> %be = zext <16 x i8> %b to <16 x i16> %r1 = call i16 @llvm.vector.reduce.add.i16.v32i16(<32 x i16> %ae) @@ -161,141 +372,486 @@ define i16 @add_ext_v32i16(<32 x i8> %a, <16 x i8> %b) { } define i32 @mul_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: mul_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s -; CHECK-NEXT: mul v0.4s, v0.4s, v2.4s -; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: mul v0.2s, v0.2s, v1.2s -; CHECK-NEXT: mov w8, v0.s[1] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mul w0, w9, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mul_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mul v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: mul v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: mul v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: mov w8, v0.s[1] +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: mul w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mul_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov d4, v1.d[1] +; CHECK-GI-NEXT: mul v0.2s, v0.2s, v3.2s +; CHECK-GI-NEXT: mul v1.2s, v1.2s, v4.2s +; CHECK-GI-NEXT: mov d3, v2.d[1] +; CHECK-GI-NEXT: mul v0.2s, v0.2s, v1.2s +; CHECK-GI-NEXT: mul v1.2s, v2.2s, v3.2s +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: mul w8, w10, w8 +; CHECK-GI-NEXT: fmov w10, s1 +; CHECK-GI-NEXT: mul w9, w10, w9 +; CHECK-GI-NEXT: mul w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.mul.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %b) %r = mul i32 %r1, %r2 ret i32 %r } +define i32 @mul_i32_same(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: mul_i32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mul v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: mul v0.2s, v0.2s, v1.2s +; CHECK-SD-NEXT: mov w8, v0.s[1] +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: mul w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mul_i32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: mul v0.2s, v0.2s, v2.2s +; CHECK-GI-NEXT: mul v1.2s, v1.2s, v3.2s +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: fmov w11, s1 +; CHECK-GI-NEXT: mul w8, w10, w8 +; CHECK-GI-NEXT: mul w9, w11, w9 +; CHECK-GI-NEXT: mul w0, w8, w9 +; CHECK-GI-NEXT: ret + %r1 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %a) + %r2 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %b) + %r = mul i32 %r1, %r2 + ret i32 %r +} + define i32 @and_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: and_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: and v0.16b, v0.16b, v1.16b -; CHECK-NEXT: and v0.16b, v0.16b, v2.16b -; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: and v0.8b, v0.8b, v1.8b -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: lsr x9, x8, #32 -; CHECK-NEXT: and w0, w8, w9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: and_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: fmov x8, d0 +; CHECK-SD-NEXT: lsr x9, x8, #32 +; CHECK-SD-NEXT: and w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: and_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: mov d1, v2.d[1] +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: and v1.8b, v2.8b, v1.8b +; CHECK-GI-NEXT: and v0.8b, v0.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s1 +; CHECK-GI-NEXT: mov w9, v0.s[1] +; CHECK-GI-NEXT: fmov w11, s0 +; CHECK-GI-NEXT: and w8, w10, w8 +; CHECK-GI-NEXT: and w8, w11, w8 +; CHECK-GI-NEXT: and w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.and.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32> %b) %r = and i32 %r1, %r2 ret i32 %r } +define i32 @and_i32_same(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: and_i32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: fmov x8, d0 +; CHECK-SD-NEXT: lsr x9, x8, #32 +; CHECK-SD-NEXT: and w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: and_i32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-GI-NEXT: and v1.8b, v1.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: fmov w11, s1 +; CHECK-GI-NEXT: and w8, w10, w8 +; CHECK-GI-NEXT: and w9, w11, w9 +; CHECK-GI-NEXT: and w0, w8, w9 +; CHECK-GI-NEXT: ret + %r1 = call i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32> %a) + %r2 = call i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32> %b) + %r = and i32 %r1, %r2 + ret i32 %r +} + define i32 @or_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: or_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b -; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: lsr x9, x8, #32 -; CHECK-NEXT: orr w0, w8, w9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: or_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: orr v0.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: orr v0.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: fmov x8, d0 +; CHECK-SD-NEXT: lsr x9, x8, #32 +; CHECK-SD-NEXT: orr w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: or_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: orr v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: mov d1, v2.d[1] +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: orr v1.8b, v2.8b, v1.8b +; CHECK-GI-NEXT: orr v0.8b, v0.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s1 +; CHECK-GI-NEXT: mov w9, v0.s[1] +; CHECK-GI-NEXT: fmov w11, s0 +; CHECK-GI-NEXT: orr w8, w10, w8 +; CHECK-GI-NEXT: orr w8, w11, w8 +; CHECK-GI-NEXT: orr w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.or.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32> %b) %r = or i32 %r1, %r2 ret i32 %r } +define i32 @or_i32_same(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: or_i32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: orr v0.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: orr v0.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: fmov x8, d0 +; CHECK-SD-NEXT: lsr x9, x8, #32 +; CHECK-SD-NEXT: orr w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: or_i32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: orr v0.8b, v0.8b, v2.8b +; CHECK-GI-NEXT: orr v1.8b, v1.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: fmov w11, s1 +; CHECK-GI-NEXT: orr w8, w10, w8 +; CHECK-GI-NEXT: orr w9, w11, w9 +; CHECK-GI-NEXT: orr w0, w8, w9 +; CHECK-GI-NEXT: ret + %r1 = call i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32> %a) + %r2 = call i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32> %b) + %r = or i32 %r1, %r2 + ret i32 %r +} + define i32 @xor_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: xor_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b -; CHECK-NEXT: eor v0.16b, v0.16b, v2.16b -; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: lsr x9, x8, #32 -; CHECK-NEXT: eor w0, w8, w9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: xor_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: eor v0.16b, v0.16b, v2.16b +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: fmov x8, d0 +; CHECK-SD-NEXT: lsr x9, x8, #32 +; CHECK-SD-NEXT: eor w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: xor_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-GI-NEXT: mov d1, v2.d[1] +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: eor v1.8b, v2.8b, v1.8b +; CHECK-GI-NEXT: eor v0.8b, v0.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s1 +; CHECK-GI-NEXT: mov w9, v0.s[1] +; CHECK-GI-NEXT: fmov w11, s0 +; CHECK-GI-NEXT: eor w8, w10, w8 +; CHECK-GI-NEXT: eor w8, w11, w8 +; CHECK-GI-NEXT: eor w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.xor.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32> %b) %r = xor i32 %r1, %r2 ret i32 %r } +define i32 @xor_i32_same(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: xor_i32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-SD-NEXT: fmov x8, d0 +; CHECK-SD-NEXT: lsr x9, x8, #32 +; CHECK-SD-NEXT: eor w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: xor_i32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: eor v0.8b, v0.8b, v2.8b +; CHECK-GI-NEXT: eor v1.8b, v1.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: fmov w11, s1 +; CHECK-GI-NEXT: eor w8, w10, w8 +; CHECK-GI-NEXT: eor w9, w11, w9 +; CHECK-GI-NEXT: eor w0, w8, w9 +; CHECK-GI-NEXT: ret + %r1 = call i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32> %a) + %r2 = call i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32> %b) + %r = xor i32 %r1, %r2 + ret i32 %r +} + define i32 @umin_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: umin_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: umin v0.4s, v0.4s, v1.4s -; CHECK-NEXT: umin v0.4s, v0.4s, v2.4s -; CHECK-NEXT: uminv s0, v0.4s -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umin_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: uminv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umin_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: uminv s1, v2.4s +; CHECK-GI-NEXT: uminv s0, v0.4s +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: fcsel s0, s0, s1, lo +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.umin.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32> %b) %r = call i32 @llvm.umin.i32(i32 %r1, i32 %r2) ret i32 %r } +define i32 @umin_i32_same(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: umin_i32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umin v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: uminv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umin_i32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: uminv s0, v0.4s +; CHECK-GI-NEXT: uminv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: fcsel s0, s0, s1, lo +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret + %r1 = call i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32> %a) + %r2 = call i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32> %b) + %r = call i32 @llvm.umin.i32(i32 %r1, i32 %r2) + ret i32 %r +} + define i32 @umax_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: umax_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: umax v0.4s, v0.4s, v1.4s -; CHECK-NEXT: umax v0.4s, v0.4s, v2.4s -; CHECK-NEXT: umaxv s0, v0.4s -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umax_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umax v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: umax v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: umaxv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umax_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: umax v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: umaxv s1, v2.4s +; CHECK-GI-NEXT: umaxv s0, v0.4s +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: fcsel s0, s0, s1, hi +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.umax.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32> %b) %r = call i32 @llvm.umax.i32(i32 %r1, i32 %r2) ret i32 %r } +define i32 @umax_i32_same(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: umax_i32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umax v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: umaxv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umax_i32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: umaxv s0, v0.4s +; CHECK-GI-NEXT: umaxv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: fcsel s0, s0, s1, hi +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret + %r1 = call i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32> %a) + %r2 = call i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32> %b) + %r = call i32 @llvm.umax.i32(i32 %r1, i32 %r2) + ret i32 %r +} + define i32 @smin_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: smin_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s -; CHECK-NEXT: smin v0.4s, v0.4s, v2.4s -; CHECK-NEXT: sminv s0, v0.4s -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: smin_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: sminv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smin_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: sminv s1, v2.4s +; CHECK-GI-NEXT: sminv s0, v0.4s +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: fcsel s0, s0, s1, lt +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.smin.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32> %b) %r = call i32 @llvm.smin.i32(i32 %r1, i32 %r2) ret i32 %r } +define i32 @smin_i32_same(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: smin_i32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: smin v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: sminv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smin_i32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sminv s0, v0.4s +; CHECK-GI-NEXT: sminv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: fcsel s0, s0, s1, lt +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret + %r1 = call i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32> %a) + %r2 = call i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32> %b) + %r = call i32 @llvm.smin.i32(i32 %r1, i32 %r2) + ret i32 %r +} + define i32 @smax_i32(<8 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: smax_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: smax v0.4s, v0.4s, v1.4s -; CHECK-NEXT: smax v0.4s, v0.4s, v2.4s -; CHECK-NEXT: smaxv s0, v0.4s -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: smax_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: smaxv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smax_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: smaxv s1, v2.4s +; CHECK-GI-NEXT: smaxv s0, v0.4s +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: fcsel s0, s0, s1, gt +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.smax.i32.v8i32(<8 x i32> %a) %r2 = call i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32> %b) %r = call i32 @llvm.smax.i32(i32 %r1, i32 %r2) ret i32 %r } +define i32 @smax_i32_same(<4 x i32> %a, <4 x i32> %b) { +; CHECK-SD-LABEL: smax_i32_same: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: smax v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: smaxv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smax_i32_same: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: smaxv s0, v0.4s +; CHECK-GI-NEXT: smaxv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: fcsel s0, s0, s1, gt +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret + %r1 = call i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32> %a) + %r2 = call i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32> %b) + %r = call i32 @llvm.smax.i32(i32 %r1, i32 %r2) + ret i32 %r +} + define float @nested_fadd_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) { -; CHECK-LABEL: nested_fadd_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: faddp v1.4s, v1.4s, v1.4s -; CHECK-NEXT: faddp v0.4s, v0.4s, v0.4s -; CHECK-NEXT: faddp s1, v1.2s -; CHECK-NEXT: faddp s0, v0.2s -; CHECK-NEXT: fadd s1, s1, s3 -; CHECK-NEXT: fadd s0, s0, s2 -; CHECK-NEXT: fadd s0, s0, s1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_fadd_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: faddp v1.4s, v1.4s, v1.4s +; CHECK-SD-NEXT: faddp v0.4s, v0.4s, v0.4s +; CHECK-SD-NEXT: faddp s1, v1.2s +; CHECK-SD-NEXT: faddp s0, v0.2s +; CHECK-SD-NEXT: fadd s1, s1, s3 +; CHECK-SD-NEXT: fadd s0, s0, s2 +; CHECK-SD-NEXT: fadd s0, s0, s1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_fadd_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: faddp v0.4s, v0.4s, v0.4s +; CHECK-GI-NEXT: faddp v1.4s, v1.4s, v1.4s +; CHECK-GI-NEXT: faddp s0, v0.2s +; CHECK-GI-NEXT: faddp s1, v1.2s +; CHECK-GI-NEXT: fadd s0, s0, s2 +; CHECK-GI-NEXT: fadd s1, s1, s3 +; CHECK-GI-NEXT: fadd s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a) %a1 = fadd fast float %r1, %c %r2 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %b) @@ -305,22 +861,39 @@ define float @nested_fadd_f32(<4 x float> %a, <4 x float> %b, float %c, float %d } define float @nested_fadd_f32_slow(<4 x float> %a, <4 x float> %b, float %c, float %d) { -; CHECK-LABEL: nested_fadd_f32_slow: -; CHECK: // %bb.0: -; CHECK-NEXT: mov s4, v1.s[2] -; CHECK-NEXT: mov s5, v0.s[2] -; CHECK-NEXT: faddp s6, v0.2s -; CHECK-NEXT: faddp s7, v1.2s -; CHECK-NEXT: mov s1, v1.s[3] -; CHECK-NEXT: mov s0, v0.s[3] -; CHECK-NEXT: fadd s5, s6, s5 -; CHECK-NEXT: fadd s4, s7, s4 -; CHECK-NEXT: fadd s0, s5, s0 -; CHECK-NEXT: fadd s1, s4, s1 -; CHECK-NEXT: fadd s0, s0, s2 -; CHECK-NEXT: fadd s1, s1, s3 -; CHECK-NEXT: fadd s0, s0, s1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_fadd_f32_slow: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov s4, v1.s[2] +; CHECK-SD-NEXT: mov s5, v0.s[2] +; CHECK-SD-NEXT: faddp s6, v0.2s +; CHECK-SD-NEXT: faddp s7, v1.2s +; CHECK-SD-NEXT: mov s1, v1.s[3] +; CHECK-SD-NEXT: mov s0, v0.s[3] +; CHECK-SD-NEXT: fadd s5, s6, s5 +; CHECK-SD-NEXT: fadd s4, s7, s4 +; CHECK-SD-NEXT: fadd s0, s5, s0 +; CHECK-SD-NEXT: fadd s1, s4, s1 +; CHECK-SD-NEXT: fadd s0, s0, s2 +; CHECK-SD-NEXT: fadd s1, s1, s3 +; CHECK-SD-NEXT: fadd s0, s0, s1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_fadd_f32_slow: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov s4, v0.s[2] +; CHECK-GI-NEXT: faddp s5, v0.2s +; CHECK-GI-NEXT: mov s6, v1.s[2] +; CHECK-GI-NEXT: faddp s7, v1.2s +; CHECK-GI-NEXT: mov s0, v0.s[3] +; CHECK-GI-NEXT: mov s1, v1.s[3] +; CHECK-GI-NEXT: fadd s4, s5, s4 +; CHECK-GI-NEXT: fadd s5, s7, s6 +; CHECK-GI-NEXT: fadd s0, s4, s0 +; CHECK-GI-NEXT: fadd s1, s5, s1 +; CHECK-GI-NEXT: fadd s0, s0, s2 +; CHECK-GI-NEXT: fadd s1, s1, s3 +; CHECK-GI-NEXT: fadd s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a) %a1 = fadd float %r1, %c %r2 = call float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %b) @@ -330,18 +903,33 @@ define float @nested_fadd_f32_slow(<4 x float> %a, <4 x float> %b, float %c, flo } define float @nested_mul_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) { -; CHECK-LABEL: nested_mul_f32: -; CHECK: // %bb.0: -; CHECK-NEXT: ext v4.16b, v1.16b, v1.16b, #8 -; CHECK-NEXT: ext v5.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: fmul v1.2s, v1.2s, v4.2s -; CHECK-NEXT: fmul v0.2s, v0.2s, v5.2s -; CHECK-NEXT: fmul s1, s1, v1.s[1] -; CHECK-NEXT: fmul s0, s0, v0.s[1] -; CHECK-NEXT: fmul s1, s1, s3 -; CHECK-NEXT: fmul s0, s0, s2 -; CHECK-NEXT: fmul s0, s0, s1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_mul_f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ext v4.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: ext v5.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: fmul v1.2s, v1.2s, v4.2s +; CHECK-SD-NEXT: fmul v0.2s, v0.2s, v5.2s +; CHECK-SD-NEXT: fmul s1, s1, v1.s[1] +; CHECK-SD-NEXT: fmul s0, s0, v0.s[1] +; CHECK-SD-NEXT: fmul s1, s1, s3 +; CHECK-SD-NEXT: fmul s0, s0, s2 +; CHECK-SD-NEXT: fmul s0, s0, s1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_mul_f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d4, v0.d[1] +; CHECK-GI-NEXT: mov d5, v1.d[1] +; CHECK-GI-NEXT: fmul v0.2s, v0.2s, v4.2s +; CHECK-GI-NEXT: fmul v1.2s, v1.2s, v5.2s +; CHECK-GI-NEXT: mov s4, v0.s[1] +; CHECK-GI-NEXT: mov s5, v1.s[1] +; CHECK-GI-NEXT: fmul s0, s0, s4 +; CHECK-GI-NEXT: fmul s1, s1, s5 +; CHECK-GI-NEXT: fmul s0, s0, s2 +; CHECK-GI-NEXT: fmul s1, s1, s3 +; CHECK-GI-NEXT: fmul s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a) %a1 = fmul fast float %r1, %c %r2 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %b) @@ -351,16 +939,27 @@ define float @nested_mul_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) } define i32 @nested_add_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_add_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: addv s1, v1.4s -; CHECK-NEXT: addv s0, v0.4s -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: add w9, w9, w0 -; CHECK-NEXT: add w8, w8, w1 -; CHECK-NEXT: add w0, w9, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_add_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: addv s1, v1.4s +; CHECK-SD-NEXT: addv s0, v0.4s +; CHECK-SD-NEXT: fmov w8, s1 +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: add w9, w9, w0 +; CHECK-SD-NEXT: add w8, w8, w1 +; CHECK-SD-NEXT: add w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_add_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: addv s0, v0.4s +; CHECK-GI-NEXT: addv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: add w8, w8, w0 +; CHECK-GI-NEXT: add w9, w9, w1 +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) %a1 = add i32 %r1, %c %r2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %b) @@ -370,16 +969,27 @@ define i32 @nested_add_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_add_c1_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_add_c1_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: addv s1, v1.4s -; CHECK-NEXT: addv s0, v0.4s -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: add w9, w0, w9 -; CHECK-NEXT: add w8, w8, w1 -; CHECK-NEXT: add w0, w9, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_add_c1_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: addv s1, v1.4s +; CHECK-SD-NEXT: addv s0, v0.4s +; CHECK-SD-NEXT: fmov w8, s1 +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: add w9, w0, w9 +; CHECK-SD-NEXT: add w8, w8, w1 +; CHECK-SD-NEXT: add w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_add_c1_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: addv s0, v0.4s +; CHECK-GI-NEXT: addv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: add w8, w0, w8 +; CHECK-GI-NEXT: add w9, w9, w1 +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) %a1 = add i32 %c, %r1 %r2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %b) @@ -389,16 +999,27 @@ define i32 @nested_add_c1_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_add_c2_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_add_c2_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: addv s1, v1.4s -; CHECK-NEXT: addv s0, v0.4s -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: add w9, w9, w0 -; CHECK-NEXT: add w8, w1, w8 -; CHECK-NEXT: add w0, w9, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_add_c2_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: addv s1, v1.4s +; CHECK-SD-NEXT: addv s0, v0.4s +; CHECK-SD-NEXT: fmov w8, s1 +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: add w9, w9, w0 +; CHECK-SD-NEXT: add w8, w1, w8 +; CHECK-SD-NEXT: add w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_add_c2_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: addv s0, v0.4s +; CHECK-GI-NEXT: addv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: add w8, w8, w0 +; CHECK-GI-NEXT: add w9, w1, w9 +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) %a1 = add i32 %r1, %c %r2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %b) @@ -408,14 +1029,29 @@ define i32 @nested_add_c2_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_add_manyreduct_i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) { -; CHECK-LABEL: nested_add_manyreduct_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: add v1.4s, v1.4s, v3.4s -; CHECK-NEXT: add v0.4s, v0.4s, v2.4s -; CHECK-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-NEXT: addv s0, v0.4s -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_add_manyreduct_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add v1.4s, v1.4s, v3.4s +; CHECK-SD-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: addv s0, v0.4s +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_add_manyreduct_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: addv s0, v0.4s +; CHECK-GI-NEXT: addv s2, v2.4s +; CHECK-GI-NEXT: addv s1, v1.4s +; CHECK-GI-NEXT: addv s3, v3.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s2 +; CHECK-GI-NEXT: fmov w10, s1 +; CHECK-GI-NEXT: fmov w11, s3 +; CHECK-GI-NEXT: add w8, w8, w9 +; CHECK-GI-NEXT: add w9, w10, w11 +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) %r3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %c) %a1 = add i32 %r1, %r3 @@ -427,22 +1063,39 @@ define i32 @nested_add_manyreduct_i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, } define i32 @nested_mul_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_mul_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: ext v2.16b, v1.16b, v1.16b, #8 -; CHECK-NEXT: mul v0.2s, v0.2s, v3.2s -; CHECK-NEXT: mul v1.2s, v1.2s, v2.2s -; CHECK-NEXT: mov w8, v0.s[1] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: mov w9, v1.s[1] -; CHECK-NEXT: mul w8, w10, w8 -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: mul w9, w10, w9 -; CHECK-NEXT: mul w8, w8, w0 -; CHECK-NEXT: mul w9, w9, w1 -; CHECK-NEXT: mul w0, w8, w9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_mul_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ext v3.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: ext v2.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: mul v0.2s, v0.2s, v3.2s +; CHECK-SD-NEXT: mul v1.2s, v1.2s, v2.2s +; CHECK-SD-NEXT: mov w8, v0.s[1] +; CHECK-SD-NEXT: fmov w10, s0 +; CHECK-SD-NEXT: mov w9, v1.s[1] +; CHECK-SD-NEXT: mul w8, w10, w8 +; CHECK-SD-NEXT: fmov w10, s1 +; CHECK-SD-NEXT: mul w9, w10, w9 +; CHECK-SD-NEXT: mul w8, w8, w0 +; CHECK-SD-NEXT: mul w9, w9, w1 +; CHECK-SD-NEXT: mul w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_mul_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: mul v0.2s, v0.2s, v2.2s +; CHECK-GI-NEXT: mul v1.2s, v1.2s, v3.2s +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: mul w8, w10, w8 +; CHECK-GI-NEXT: fmov w10, s1 +; CHECK-GI-NEXT: mul w9, w10, w9 +; CHECK-GI-NEXT: mul w8, w8, w0 +; CHECK-GI-NEXT: mul w9, w9, w1 +; CHECK-GI-NEXT: mul w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %a) %a1 = mul i32 %r1, %c %r2 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %b) @@ -452,22 +1105,39 @@ define i32 @nested_mul_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_and_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_and_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: ext v2.16b, v1.16b, v1.16b, #8 -; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: and v1.8b, v1.8b, v2.8b -; CHECK-NEXT: and v0.8b, v0.8b, v3.8b -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: lsr x10, x9, #32 -; CHECK-NEXT: lsr x11, x8, #32 -; CHECK-NEXT: and w9, w9, w0 -; CHECK-NEXT: and w8, w8, w1 -; CHECK-NEXT: and w9, w9, w10 -; CHECK-NEXT: and w8, w8, w11 -; CHECK-NEXT: and w0, w9, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_and_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ext v2.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: ext v3.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b +; CHECK-SD-NEXT: and v0.8b, v0.8b, v3.8b +; CHECK-SD-NEXT: fmov x8, d1 +; CHECK-SD-NEXT: fmov x9, d0 +; CHECK-SD-NEXT: lsr x10, x9, #32 +; CHECK-SD-NEXT: lsr x11, x8, #32 +; CHECK-SD-NEXT: and w9, w9, w0 +; CHECK-SD-NEXT: and w8, w8, w1 +; CHECK-SD-NEXT: and w9, w9, w10 +; CHECK-SD-NEXT: and w8, w8, w11 +; CHECK-SD-NEXT: and w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_and_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-GI-NEXT: and v1.8b, v1.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: fmov w11, s1 +; CHECK-GI-NEXT: and w10, w10, w0 +; CHECK-GI-NEXT: and w11, w11, w1 +; CHECK-GI-NEXT: and w8, w10, w8 +; CHECK-GI-NEXT: and w9, w11, w9 +; CHECK-GI-NEXT: and w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a) %a1 = and i32 %r1, %c %r2 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %b) @@ -477,22 +1147,39 @@ define i32 @nested_and_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_or_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_or_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: ext v2.16b, v1.16b, v1.16b, #8 -; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: orr v1.8b, v1.8b, v2.8b -; CHECK-NEXT: orr v0.8b, v0.8b, v3.8b -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: lsr x10, x9, #32 -; CHECK-NEXT: lsr x11, x8, #32 -; CHECK-NEXT: orr w9, w9, w0 -; CHECK-NEXT: orr w8, w8, w1 -; CHECK-NEXT: orr w9, w9, w10 -; CHECK-NEXT: orr w8, w8, w11 -; CHECK-NEXT: orr w0, w9, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_or_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ext v2.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: ext v3.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: orr v1.8b, v1.8b, v2.8b +; CHECK-SD-NEXT: orr v0.8b, v0.8b, v3.8b +; CHECK-SD-NEXT: fmov x8, d1 +; CHECK-SD-NEXT: fmov x9, d0 +; CHECK-SD-NEXT: lsr x10, x9, #32 +; CHECK-SD-NEXT: lsr x11, x8, #32 +; CHECK-SD-NEXT: orr w9, w9, w0 +; CHECK-SD-NEXT: orr w8, w8, w1 +; CHECK-SD-NEXT: orr w9, w9, w10 +; CHECK-SD-NEXT: orr w8, w8, w11 +; CHECK-SD-NEXT: orr w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_or_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: orr v0.8b, v0.8b, v2.8b +; CHECK-GI-NEXT: orr v1.8b, v1.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: fmov w11, s1 +; CHECK-GI-NEXT: orr w10, w10, w0 +; CHECK-GI-NEXT: orr w11, w11, w1 +; CHECK-GI-NEXT: orr w8, w10, w8 +; CHECK-GI-NEXT: orr w9, w11, w9 +; CHECK-GI-NEXT: orr w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %a) %a1 = or i32 %r1, %c %r2 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %b) @@ -502,22 +1189,39 @@ define i32 @nested_or_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_xor_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_xor_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: ext v2.16b, v1.16b, v1.16b, #8 -; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: eor v1.8b, v1.8b, v2.8b -; CHECK-NEXT: eor v0.8b, v0.8b, v3.8b -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: lsr x10, x9, #32 -; CHECK-NEXT: lsr x11, x8, #32 -; CHECK-NEXT: eor w9, w9, w0 -; CHECK-NEXT: eor w8, w8, w1 -; CHECK-NEXT: eor w9, w9, w10 -; CHECK-NEXT: eor w8, w8, w11 -; CHECK-NEXT: eor w0, w9, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_xor_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ext v2.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: ext v3.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: eor v1.8b, v1.8b, v2.8b +; CHECK-SD-NEXT: eor v0.8b, v0.8b, v3.8b +; CHECK-SD-NEXT: fmov x8, d1 +; CHECK-SD-NEXT: fmov x9, d0 +; CHECK-SD-NEXT: lsr x10, x9, #32 +; CHECK-SD-NEXT: lsr x11, x8, #32 +; CHECK-SD-NEXT: eor w9, w9, w0 +; CHECK-SD-NEXT: eor w8, w8, w1 +; CHECK-SD-NEXT: eor w9, w9, w10 +; CHECK-SD-NEXT: eor w8, w8, w11 +; CHECK-SD-NEXT: eor w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_xor_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: mov d3, v1.d[1] +; CHECK-GI-NEXT: eor v0.8b, v0.8b, v2.8b +; CHECK-GI-NEXT: eor v1.8b, v1.8b, v3.8b +; CHECK-GI-NEXT: mov w8, v0.s[1] +; CHECK-GI-NEXT: mov w9, v1.s[1] +; CHECK-GI-NEXT: fmov w10, s0 +; CHECK-GI-NEXT: fmov w11, s1 +; CHECK-GI-NEXT: eor w10, w10, w0 +; CHECK-GI-NEXT: eor w11, w11, w1 +; CHECK-GI-NEXT: eor w8, w10, w8 +; CHECK-GI-NEXT: eor w9, w11, w9 +; CHECK-GI-NEXT: eor w0, w8, w9 +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a) %a1 = xor i32 %r1, %c %r2 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %b) @@ -527,19 +1231,33 @@ define i32 @nested_xor_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_smin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_smin_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: sminv s0, v0.4s -; CHECK-NEXT: sminv s1, v1.4s -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: cmp w9, w0 -; CHECK-NEXT: csel w9, w9, w0, lt -; CHECK-NEXT: cmp w8, w1 -; CHECK-NEXT: csel w8, w8, w1, lt -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w0, w9, w8, lt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_smin_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sminv s0, v0.4s +; CHECK-SD-NEXT: sminv s1, v1.4s +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: fmov w8, s1 +; CHECK-SD-NEXT: cmp w9, w0 +; CHECK-SD-NEXT: csel w9, w9, w0, lt +; CHECK-SD-NEXT: cmp w8, w1 +; CHECK-SD-NEXT: csel w8, w8, w1, lt +; CHECK-SD-NEXT: cmp w9, w8 +; CHECK-SD-NEXT: csel w0, w9, w8, lt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_smin_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sminv s0, v0.4s +; CHECK-GI-NEXT: sminv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: cmp w8, w0 +; CHECK-GI-NEXT: csel w8, w8, w0, lt +; CHECK-GI-NEXT: cmp w9, w1 +; CHECK-GI-NEXT: csel w9, w9, w1, lt +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: csel w0, w8, w9, lt +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a) %a1 = call i32 @llvm.smin.i32(i32 %r1, i32 %c) %r2 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %b) @@ -549,19 +1267,33 @@ define i32 @nested_smin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_smax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_smax_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: smaxv s0, v0.4s -; CHECK-NEXT: smaxv s1, v1.4s -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: cmp w9, w0 -; CHECK-NEXT: csel w9, w9, w0, gt -; CHECK-NEXT: cmp w8, w1 -; CHECK-NEXT: csel w8, w8, w1, gt -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w0, w9, w8, gt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_smax_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: smaxv s0, v0.4s +; CHECK-SD-NEXT: smaxv s1, v1.4s +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: fmov w8, s1 +; CHECK-SD-NEXT: cmp w9, w0 +; CHECK-SD-NEXT: csel w9, w9, w0, gt +; CHECK-SD-NEXT: cmp w8, w1 +; CHECK-SD-NEXT: csel w8, w8, w1, gt +; CHECK-SD-NEXT: cmp w9, w8 +; CHECK-SD-NEXT: csel w0, w9, w8, gt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_smax_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: smaxv s0, v0.4s +; CHECK-GI-NEXT: smaxv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: cmp w8, w0 +; CHECK-GI-NEXT: csel w8, w8, w0, gt +; CHECK-GI-NEXT: cmp w9, w1 +; CHECK-GI-NEXT: csel w9, w9, w1, gt +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: csel w0, w8, w9, gt +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a) %a1 = call i32 @llvm.smax.i32(i32 %r1, i32 %c) %r2 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %b) @@ -571,19 +1303,33 @@ define i32 @nested_smax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_umin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_umin_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: uminv s0, v0.4s -; CHECK-NEXT: uminv s1, v1.4s -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: cmp w9, w0 -; CHECK-NEXT: csel w9, w9, w0, lo -; CHECK-NEXT: cmp w8, w1 -; CHECK-NEXT: csel w8, w8, w1, lo -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w0, w9, w8, lo -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_umin_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: uminv s0, v0.4s +; CHECK-SD-NEXT: uminv s1, v1.4s +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: fmov w8, s1 +; CHECK-SD-NEXT: cmp w9, w0 +; CHECK-SD-NEXT: csel w9, w9, w0, lo +; CHECK-SD-NEXT: cmp w8, w1 +; CHECK-SD-NEXT: csel w8, w8, w1, lo +; CHECK-SD-NEXT: cmp w9, w8 +; CHECK-SD-NEXT: csel w0, w9, w8, lo +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_umin_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: uminv s0, v0.4s +; CHECK-GI-NEXT: uminv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: cmp w8, w0 +; CHECK-GI-NEXT: csel w8, w8, w0, lo +; CHECK-GI-NEXT: cmp w9, w1 +; CHECK-GI-NEXT: csel w9, w9, w1, lo +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: csel w0, w8, w9, lo +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a) %a1 = call i32 @llvm.umin.i32(i32 %r1, i32 %c) %r2 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %b) @@ -593,19 +1339,33 @@ define i32 @nested_umin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define i32 @nested_umax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { -; CHECK-LABEL: nested_umax_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: umaxv s0, v0.4s -; CHECK-NEXT: umaxv s1, v1.4s -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: cmp w9, w0 -; CHECK-NEXT: csel w9, w9, w0, hi -; CHECK-NEXT: cmp w8, w1 -; CHECK-NEXT: csel w8, w8, w1, hi -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w0, w9, w8, hi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_umax_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umaxv s0, v0.4s +; CHECK-SD-NEXT: umaxv s1, v1.4s +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: fmov w8, s1 +; CHECK-SD-NEXT: cmp w9, w0 +; CHECK-SD-NEXT: csel w9, w9, w0, hi +; CHECK-SD-NEXT: cmp w8, w1 +; CHECK-SD-NEXT: csel w8, w8, w1, hi +; CHECK-SD-NEXT: cmp w9, w8 +; CHECK-SD-NEXT: csel w0, w9, w8, hi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_umax_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: umaxv s0, v0.4s +; CHECK-GI-NEXT: umaxv s1, v1.4s +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: fmov w9, s1 +; CHECK-GI-NEXT: cmp w8, w0 +; CHECK-GI-NEXT: csel w8, w8, w0, hi +; CHECK-GI-NEXT: cmp w9, w1 +; CHECK-GI-NEXT: csel w9, w9, w1, hi +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: csel w0, w8, w9, hi +; CHECK-GI-NEXT: ret %r1 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a) %a1 = call i32 @llvm.umax.i32(i32 %r1, i32 %c) %r2 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %b) @@ -615,14 +1375,23 @@ define i32 @nested_umax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) { } define float @nested_fmin_float(<4 x float> %a, <4 x float> %b, float %c, float %d) { -; CHECK-LABEL: nested_fmin_float: -; CHECK: // %bb.0: -; CHECK-NEXT: fminnmv s1, v1.4s -; CHECK-NEXT: fminnmv s0, v0.4s -; CHECK-NEXT: fminnm s1, s1, s3 -; CHECK-NEXT: fminnm s0, s0, s2 -; CHECK-NEXT: fminnm s0, s0, s1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_fmin_float: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fminnmv s1, v1.4s +; CHECK-SD-NEXT: fminnmv s0, v0.4s +; CHECK-SD-NEXT: fminnm s1, s1, s3 +; CHECK-SD-NEXT: fminnm s0, s0, s2 +; CHECK-SD-NEXT: fminnm s0, s0, s1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_fmin_float: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fminnmv s0, v0.4s +; CHECK-GI-NEXT: fminnmv s1, v1.4s +; CHECK-GI-NEXT: fminnm s0, s0, s2 +; CHECK-GI-NEXT: fminnm s1, s1, s3 +; CHECK-GI-NEXT: fminnm s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a) %a1 = call float @llvm.minnum.f32(float %r1, float %c) %r2 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %b) @@ -632,14 +1401,23 @@ define float @nested_fmin_float(<4 x float> %a, <4 x float> %b, float %c, float } define float @nested_fmax_float(<4 x float> %a, <4 x float> %b, float %c, float %d) { -; CHECK-LABEL: nested_fmax_float: -; CHECK: // %bb.0: -; CHECK-NEXT: fmaxnmv s1, v1.4s -; CHECK-NEXT: fmaxnmv s0, v0.4s -; CHECK-NEXT: fmaxnm s1, s1, s3 -; CHECK-NEXT: fmaxnm s0, s0, s2 -; CHECK-NEXT: fmaxnm s0, s0, s1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: nested_fmax_float: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmaxnmv s1, v1.4s +; CHECK-SD-NEXT: fmaxnmv s0, v0.4s +; CHECK-SD-NEXT: fmaxnm s1, s1, s3 +; CHECK-SD-NEXT: fmaxnm s0, s0, s2 +; CHECK-SD-NEXT: fmaxnm s0, s0, s1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: nested_fmax_float: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmaxnmv s0, v0.4s +; CHECK-GI-NEXT: fmaxnmv s1, v1.4s +; CHECK-GI-NEXT: fmaxnm s0, s0, s2 +; CHECK-GI-NEXT: fmaxnm s1, s1, s3 +; CHECK-GI-NEXT: fmaxnm s0, s0, s1 +; CHECK-GI-NEXT: ret %r1 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a) %a1 = call float @llvm.maxnum.f32(float %r1, float %c) %r2 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b) diff --git a/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir b/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir index 5b379c2bd5629..c166b6b48f981 100644 --- a/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir +++ b/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir @@ -1,6 +1,17 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 # RUN: llc -o - %s --run-pass=machine-cp -mcp-use-is-copy-instr -mtriple=arm64-apple-macos --verify-machineinstrs | FileCheck %s +--- | + declare void @foo() + + define void @test() { + unreachable + } + define void @test2() { + unreachable + } +... + --- name: test tracksRegLiveness: true @@ -30,3 +41,22 @@ body: | RET undef $lr, implicit $x0 ... +--- +name: test2 +tracksRegLiveness: true +body: | + bb.0: + liveins: $q14, $d29, $x0, $x1 + ; CHECK-LABEL: name: test2 + ; CHECK: liveins: $q14, $d29, $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $d8 = COPY killed renamable $d29 + ; CHECK-NEXT: BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ; CHECK-NEXT: renamable $b0 = SMAXVv8i8v killed renamable $d8, implicit-def $q0 + ; CHECK-NEXT: RET_ReallyLR implicit $b0 + renamable $q8 = COPY renamable $q14 + renamable $d8 = COPY killed renamable $d29 + BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + renamable $b0 = SMAXVv8i8v killed renamable $d8, implicit-def $q0 + RET_ReallyLR implicit $b0 +... diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index bc359d6ff3aaa..8e3c905b0eae5 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -34508,14 +34508,25 @@ define bfloat @v_select_bf16(i1 %cond, bfloat %a, bfloat %b) { ; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: v_select_bf16: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX11TRUE16-LABEL: v_select_bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v0.l, v0.h, vcc_lo +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_select_bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 1, v0 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = select i1 %cond, bfloat %a, bfloat %b ret bfloat %op } @@ -34573,11 +34584,14 @@ define bfloat @v_select_fneg_lhs_bf16(i1 %cond, bfloat %a, bfloat %b) { ; GFX11TRUE16-LABEL: v_select_fneg_lhs_bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11TRUE16-NEXT: v_xor_b16 v1.l, 0x8000, v1.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 +; GFX11TRUE16-NEXT: v_xor_b16 v0.l, 0x8000, v0.l +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v0.h, v0.l, vcc_lo ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_select_fneg_lhs_bf16: @@ -34647,11 +34661,14 @@ define bfloat @v_select_fneg_rhs_bf16(i1 %cond, bfloat %a, bfloat %b) { ; GFX11TRUE16-LABEL: v_select_fneg_rhs_bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11TRUE16-NEXT: v_xor_b16 v2.l, 0x8000, v2.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 +; GFX11TRUE16-NEXT: v_xor_b16 v0.l, 0x8000, v0.l +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v0.l, v0.h, vcc_lo ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_select_fneg_rhs_bf16: @@ -34749,11 +34766,15 @@ define <2 x bfloat> @v_select_v2bf16(i1 %cond, <2 x bfloat> %a, <2 x bfloat> %b) ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v1 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v1 :: v_dual_cndmask_b32 v1, v3, v4 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v3.l, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v2.l, v1.l, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] @@ -34856,14 +34877,19 @@ define <2 x bfloat> @v_vselect_v2bf16(<2 x i1> %cond, <2 x bfloat> %a, <2 x bflo ; GFX11TRUE16-LABEL: v_vselect_v2bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 1, v1 ; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v2 :: v_dual_and_b32 v1, 1, v1 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 1, v0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v5.l, v4.l, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v3.l, v2.l, s0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] @@ -34936,16 +34962,27 @@ define amdgpu_ps i32 @s_select_bf16(bfloat inreg %a, bfloat inreg %b, i32 %c) { ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: ; return to shader part epilog ; -; GFX11-LABEL: s_select_bf16: -; GFX11: ; %bb.0: -; GFX11-NEXT: v_mov_b32_e32 v1, s0 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_cndmask_b32_e32 v0, s1, v1, vcc_lo -; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_readfirstlane_b32 s0, v0 -; GFX11-NEXT: ; return to shader part epilog +; GFX11TRUE16-LABEL: s_select_bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, s1, v0.l, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 +; GFX11TRUE16-NEXT: ; return to shader part epilog +; +; GFX11FAKE16-LABEL: s_select_bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: v_mov_b32_e32 v1, s0 +; GFX11FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, s1, v1, vcc_lo +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_readfirstlane_b32 s0, v0 +; GFX11FAKE16-NEXT: ; return to shader part epilog %cond = icmp eq i32 %c, 0 %op = select i1 %cond, bfloat %a, bfloat %b %cast = bitcast bfloat %op to i16 @@ -35038,17 +35075,21 @@ define amdgpu_ps i32 @s_select_v2bf16(<2 x bfloat> inreg %a, <2 x bfloat> inreg ; ; GFX11TRUE16-LABEL: s_select_v2bf16: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: s_lshr_b32 s2, s1, 16 -; GFX11TRUE16-NEXT: s_lshr_b32 s3, s0, 16 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, s2 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, s3 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, s1 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, s0 +; GFX11TRUE16-NEXT: s_lshr_b32 s2, s0, 16 +; GFX11TRUE16-NEXT: s_lshr_b32 s3, s1, 16 ; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_cndmask_b32 v1, v3, v4 -; GFX11TRUE16-NEXT: v_perm_b32 v0, v0, v1, 0x5040100 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s3 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s2 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, s1 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, s0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v0.l, v0.h, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v1.l, v1.h, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11TRUE16-NEXT: ; return to shader part epilog ; @@ -35156,17 +35197,20 @@ define amdgpu_ps i32 @s_vselect_v2bf16(<2 x bfloat> inreg %a, <2 x bfloat> inreg ; ; GFX11TRUE16-LABEL: s_vselect_v2bf16: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: s_lshr_b32 s2, s1, 16 -; GFX11TRUE16-NEXT: s_lshr_b32 s3, s0, 16 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, s2 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, s3 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, s1 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, s0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo +; GFX11TRUE16-NEXT: s_lshr_b32 s3, s1, 16 +; GFX11TRUE16-NEXT: s_lshr_b32 s4, s0, 16 ; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s2, 0, v1 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s3 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s4 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, s1 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, s0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v0.l, v0.h, s2 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v1.l, v1.h, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 @@ -36876,33 +36920,38 @@ define amdgpu_ps <2 x i32> @s_vselect_v4bf16(<4 x bfloat> inreg %a, <4 x bfloat> ; ; GFX11TRUE16-LABEL: s_vselect_v4bf16: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: s_lshr_b32 s4, s3, 16 -; GFX11TRUE16-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, s4 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, s5 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3 -; GFX11TRUE16-NEXT: s_lshr_b32 s4, s0, 16 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, s2 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, s0 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, s4 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo -; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, s3 -; GFX11TRUE16-NEXT: s_lshr_b32 s3, s2, 16 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, s3 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, s1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc_lo +; GFX11TRUE16-NEXT: s_lshr_b32 s7, s3, 16 ; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v9, vcc_lo -; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s4, 0, v1 +; GFX11TRUE16-NEXT: s_lshr_b32 s8, s1, 16 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s7 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, s3 +; GFX11TRUE16-NEXT: s_lshr_b32 s3, s2, 16 +; GFX11TRUE16-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s5, 0, v2 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s6, 0, v3 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s8 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, s3 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, s7 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.h, s2 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, s1 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v0.l, v0.h, s6 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v2.l, s4 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.h, v2.h, v3.l, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.l, v1.l, v3.h, s5 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h +; GFX11TRUE16-NEXT: v_perm_b32 v1, v2, v1, 0x5040100 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 -; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_perm_b32 v0, v0, v3, 0x5040100 ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s1, v1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11TRUE16-NEXT: ; return to shader part epilog ; ; GFX11FAKE16-LABEL: s_vselect_v4bf16: @@ -37078,29 +37127,33 @@ define <4 x bfloat> @v_vselect_v4bf16(<4 x i1> %cond, <4 x bfloat> %a, <4 x bflo ; GFX11TRUE16-LABEL: v_vselect_v4bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 1, v2 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v7.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, v5.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5 ; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 -; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v3 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v9, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 ; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 1, v1 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v6 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v4 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v4, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v7 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 1, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v2 ; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 -; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s1, 1, v3 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v6 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s2, 1, v0 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v6.l, v4.l, s0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v1.l, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.l, v8.l, v3.l, s1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.h, v7.l, v5.l, s2 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v1, v3, v1, 0x5040100 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_vselect_v4bf16: @@ -37368,51 +37421,51 @@ define <8 x bfloat> @v_vselect_v8bf16(<8 x i1> %cond, <8 x bfloat> %a, <8 x bflo ; GFX11TRUE16-LABEL: v_vselect_v8bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v16.l, v15.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v17.l, v11.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v15 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 1, v6 -; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 1, v4 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v6 -; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v3 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v4 ; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 1, v1 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v14 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v10 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v14, v10, vcc_lo -; GFX11TRUE16-NEXT: v_mov_b16_e32 v10.l, v13.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v14.l, v9.l -; GFX11TRUE16-NEXT: v_and_b32_e32 v5, 1, v5 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9 -; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 1, v2 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v5 ; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v10, v14, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 1, v7 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v12 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v8 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v12, v8, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 1, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 1, v4 ; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v10, v14, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 -; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v13, v9, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v15, v11, vcc_lo -; GFX11TRUE16-NEXT: v_perm_b32 v2, v5, v4, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v3, v7, v6, 0x5040100 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 1, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 1, v6 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s1, 1, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v5 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v15 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s2, 1, v0 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s3, 1, v1 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v11 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s4, 1, v4 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s5, 1, v2 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s6, 1, v3 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v8 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v5.l, v1.l, s2 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v12 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v9 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v13 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v10 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v14 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v15.l, v11.l, s3 +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.l, v14.l, v10.l, s4 +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.h, v3.l, v2.l, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b16 v2.l, v12.l, v8.l, s0 +; GFX11TRUE16-NEXT: v_cndmask_b16 v2.h, v5.l, v4.l, s1 +; GFX11TRUE16-NEXT: v_cndmask_b16 v3.l, v13.l, v9.l, s5 +; GFX11TRUE16-NEXT: v_cndmask_b16 v3.h, v7.l, v6.l, s6 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v1.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v2.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v3.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v1.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v0.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, v0.l +; GFX11TRUE16-NEXT: v_perm_b32 v0, v4, v5, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v1, v2, v6, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v2, v3, v7, 0x5040100 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_perm_b32 v3, v8, v9, 0x5040100 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_vselect_v8bf16: @@ -38024,101 +38077,96 @@ define <16 x bfloat> @v_vselect_v16bf16(<16 x i1> %cond, <16 x bfloat> %a, <16 x ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11TRUE16-NEXT: scratch_load_b32 v31, off, s32 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v49.l, v26.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v50.l, v18.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v26 -; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 1, v12 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v33.l, v30.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v34.l, v22.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v53.l, v24.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v54.l, v16.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24 -; GFX11TRUE16-NEXT: v_and_b32_e32 v10, 1, v10 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v12 -; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 1, v1 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v35.l, v29.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v36.l, v21.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v51.l, v25.l -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v12, v33, v34, vcc_lo -; GFX11TRUE16-NEXT: v_mov_b16_e32 v52.l, v17.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v25 +; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 1, v9 ; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 1, v8 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v10 -; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v3 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v37.l, v28.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v38.l, v20.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v10, v35, v36, vcc_lo -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 1, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 1, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 1, v7 ; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 1, v6 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v20 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s7, 1, v9 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s8, 1, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v3 ; GFX11TRUE16-NEXT: v_and_b32_e32 v5, 1, v5 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v39.l, v27.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v48.l, v19.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v37, v38, vcc_lo -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27 ; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 1, v4 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v6 -; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 1, v7 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v29 -; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 1, v2 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v39, v48, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v4 -; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 1, v9 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v22 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30 -; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v49, v50, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 ; GFX11TRUE16-NEXT: v_and_b32_e32 v11, 1, v11 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v32.l, v23.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v23 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v51, v52, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v10, 1, v10 ; GFX11TRUE16-NEXT: v_and_b32_e32 v13, 1, v13 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v53, v54, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v13 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v30, v22, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v11 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v29, v21, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v9, v28, v20, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v27, v19, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 1, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 1, v15 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 1, v14 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v27 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v16 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v24 ; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v24, v16, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v25, v17, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v5 -; GFX11TRUE16-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v26, v18, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_perm_b32 v2, v5, v4, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v4, v9, v8, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v5, v11, v10, 0x5040100 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 1, v0 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s2, 1, v2 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s5, 1, v7 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s6, 1, v6 +; GFX11TRUE16-NEXT: v_cndmask_b16 v2.l, v28.l, v20.l, s8 +; GFX11TRUE16-NEXT: v_cndmask_b16 v2.h, v38.l, v37.l, s7 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v30 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v29 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v26 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v25 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s1, 1, v3 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s3, 1, v5 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s4, 1, v4 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s9, 1, v11 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s10, 1, v12 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s11, 1, v13 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s12, 1, v10 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s13, 1, v15 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s14, 1, v14 +; GFX11TRUE16-NEXT: v_cndmask_b16 v3.l, v27.l, v19.l, s6 +; GFX11TRUE16-NEXT: v_cndmask_b16 v3.h, v48.l, v39.l, s5 +; GFX11TRUE16-NEXT: v_cndmask_b16 v4.h, v54.l, v53.l, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b16 v5.l, v24.l, v16.l, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v12.l, v2.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v13.l, v2.l +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v30.l, v22.l, s10 +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v34.l, v33.l, s11 +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.l, v29.l, v21.l, s12 +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.h, v36.l, v35.l, s9 +; GFX11TRUE16-NEXT: v_cndmask_b16 v5.h, v52.l, v51.l, s1 +; GFX11TRUE16-NEXT: v_cndmask_b16 v6.l, v25.l, v17.l, s2 +; GFX11TRUE16-NEXT: v_cndmask_b16 v6.h, v50.l, v49.l, s3 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v5.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v10.l, v3.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v11.l, v3.l +; GFX11TRUE16-NEXT: v_cndmask_b16 v4.l, v26.l, v18.l, s4 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v5.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, v6.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v6.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v14.l, v1.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v15.l, v1.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v16.l, v0.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v17.l, v0.l +; GFX11TRUE16-NEXT: v_perm_b32 v0, v7, v8, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v1, v5, v9, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v5, v14, v15, 0x5040100 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v31 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v17.l, v31.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v16.l -; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 1, v14 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v14 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v14, v17, v32 :: v_dual_and_b32 v15, 1, v15 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v15 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v15, v3, v23, vcc_lo -; GFX11TRUE16-NEXT: v_perm_b32 v3, v7, v6, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v6, v13, v12, 0x5040100 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_perm_b32 v7, v15, v14, 0x5040100 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v31 +; GFX11TRUE16-NEXT: v_cndmask_b16 v3.l, v31.l, v23.l, s14 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b16 v3.h, v2.l, v32.l, s13 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.l +; GFX11TRUE16-NEXT: v_perm_b32 v2, v6, v4, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v4, v12, v13, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v6, v16, v17, 0x5040100 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v3.h +; GFX11TRUE16-NEXT: v_perm_b32 v3, v10, v11, 0x5040100 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_perm_b32 v7, v8, v7, 0x5040100 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_vselect_v16bf16: @@ -39660,217 +39708,197 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x ; GFX11TRUE16-NEXT: scratch_load_b32 v85, off, s32 offset:8 ; GFX11TRUE16-NEXT: scratch_load_b32 v86, off, s32 offset:68 ; GFX11TRUE16-NEXT: scratch_load_b32 v87, off, s32 offset:4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 1, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v22, 1, v22 +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 1, v24 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 1, v26 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 1, v28 +; GFX11TRUE16-NEXT: v_and_b32_e32 v30, 1, v30 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 1, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 1, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v5, 1, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 1, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 1, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 1, v9 +; GFX11TRUE16-NEXT: v_and_b32_e32 v11, 1, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v10, 1, v10 +; GFX11TRUE16-NEXT: v_and_b32_e32 v13, 1, v13 +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 1, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 1, v15 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 1, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v17, 1, v17 +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 1, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v19, 1, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v18, 1, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v21, 1, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v20, 1, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v23, 1, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 1, v25 +; GFX11TRUE16-NEXT: v_and_b32_e32 v27, 1, v27 +; GFX11TRUE16-NEXT: v_and_b32_e32 v29, 1, v29 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 1, v0 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s8, 1, v8 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s22, 1, v22 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s24, 1, v24 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s26, 1, v30 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s27, 1, v26 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s29, 1, v28 +; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 1, v6 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s1, 1, v3 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s2, 1, v2 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s3, 1, v5 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s4, 1, v4 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s5, 1, v7 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s7, 1, v9 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s9, 1, v11 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s10, 1, v10 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s11, 1, v13 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s12, 1, v12 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s13, 1, v15 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s14, 1, v14 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s15, 1, v17 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s16, 1, v16 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s17, 1, v19 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s18, 1, v18 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s19, 1, v21 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s20, 1, v20 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s21, 1, v23 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s23, 1, v25 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s25, 1, v27 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s28, 1, v29 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s6, 1, v6 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(32) +; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 1, v31 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(31) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v96.l, v32.l +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v32 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(30) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v97.l, v33.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(29) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v98.l, v34.l +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.l, v32.l, v33.l, s26 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v33 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(28) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v99.l, v35.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(27) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v100.l, v36.l +; GFX11TRUE16-NEXT: v_cndmask_b16 v0.h, v34.l, v35.l, s29 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v35 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v34 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(26) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v101.l, v37.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(25) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v102.l, v38.l +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.l, v36.l, v37.l, s27 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v37 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v36 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(24) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v103.l, v39.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(23) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v112.l, v48.l +; GFX11TRUE16-NEXT: v_cndmask_b16 v1.h, v38.l, v39.l, s24 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v39 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v38 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(22) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v113.l, v49.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(21) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v114.l, v50.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(20) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v115.l, v51.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(19) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v116.l, v52.l +; GFX11TRUE16-NEXT: v_cndmask_b16 v2.l, v48.l, v49.l, s22 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v48 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(18) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v117.l, v53.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(17) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v118.l, v54.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(16) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v119.l, v55.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(15) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v128.l, v64.l +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v53 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v52 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(14) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v129.l, v65.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(13) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v130.l, v66.l -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(12) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v131.l, v67.l +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v65 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v64 +; GFX11TRUE16-NEXT: v_cndmask_b16 v2.h, v50.l, v51.l, s20 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(11) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v132.l, v68.l +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v68 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(10) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v133.l, v69.l +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v69 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(9) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v134.l, v70.l +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v70 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(8) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v135.l, v71.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v71 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v70 -; GFX11TRUE16-NEXT: v_and_b32_e32 v30, 1, v30 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v71 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(7) +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v80 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(6) +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v81 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(5) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v146.l, v82.l +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v82 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(4) -; GFX11TRUE16-NEXT: v_mov_b16_e64 v147.l, v83.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v83 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v82 -; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 1, v28 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v30 -; GFX11TRUE16-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v83 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(3) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v30.l, v84.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v84, 16, v84 -; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 1, v26 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v96, v96, v97, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v28 -; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 1, v1 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v84 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(2) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v97.l, v85.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v85, 16, v85 -; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 1, v24 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v98, v98, v99, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v26 -; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 1, v7 -; GFX11TRUE16-NEXT: v_mov_b16_e64 v144.l, v80.l -; GFX11TRUE16-NEXT: v_mov_b16_e64 v145.l, v81.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v81 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v26, v100, v101, vcc_lo -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v80 -; GFX11TRUE16-NEXT: v_and_b32_e32 v22, 1, v22 -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v24 -; GFX11TRUE16-NEXT: v_and_b32_e32 v5, 1, v5 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v69 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v68 -; GFX11TRUE16-NEXT: v_and_b32_e32 v20, 1, v20 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v24, v102, v103, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v22 -; GFX11TRUE16-NEXT: v_and_b32_e32 v11, 1, v11 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v67 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v66 -; GFX11TRUE16-NEXT: v_and_b32_e32 v18, 1, v18 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v22, v112, v113, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v20 -; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 1, v9 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v65 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v64 -; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 1, v16 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v20, v114, v115, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v18 -; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 1, v15 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v55 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v54 -; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 1, v14 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v116, v117, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v16 -; GFX11TRUE16-NEXT: v_and_b32_e32 v13, 1, v13 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v53 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v52 -; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 1, v12 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v16, v118, v119, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v14 -; GFX11TRUE16-NEXT: v_and_b32_e32 v19, 1, v19 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v51 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v50 -; GFX11TRUE16-NEXT: v_and_b32_e32 v10, 1, v10 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v128, v129, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v12 -; GFX11TRUE16-NEXT: v_and_b32_e32 v17, 1, v17 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v49 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48 -; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 1, v8 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v12, v130, v131, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v10 -; GFX11TRUE16-NEXT: v_and_b32_e32 v23, 1, v23 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38 -; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 1, v6 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v10, v132, v133, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8 -; GFX11TRUE16-NEXT: v_and_b32_e32 v21, 1, v21 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36 -; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 1, v4 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v134, v135, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v6 -; GFX11TRUE16-NEXT: v_and_b32_e32 v27, 1, v27 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34 -; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 1, v2 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v144, v145, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v4 -; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 1, v25 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32 -; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v146, v147, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 -; GFX11TRUE16-NEXT: v_and_b32_e32 v31, 1, v31 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v85 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v28.l, v86.l +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v86 ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v99.l, v87.l -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v87, 16, v87 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v30, v97, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11TRUE16-NEXT: v_and_b32_e32 v29, 1, v29 -; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v86, 16, v86 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v30.l, v84.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v84.l, v85.l -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v28, v99, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v31 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v28.l, v86.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v85.l, v87.l -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v29 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v29, v34, v35, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v27 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v27, v36, v37, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v25 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v25, v38, v39, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v23 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v23, v48, v49, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v21 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v21, v50, v51, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v19 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v19, v52, v53, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v17 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v17, v54, v55, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v15 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v15, v64, v65, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v13 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v66, v67, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v11 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v68, v69, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v80, v81, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v30, v84, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v28, v85, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v5 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v82, v83, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9 -; GFX11TRUE16-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v3, v7, v6, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v6, v13, v12, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v2, v5, v4, 0x5040100 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v9, v70, v71, vcc_lo -; GFX11TRUE16-NEXT: v_perm_b32 v5, v11, v10, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v7, v15, v14, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v10, v21, v20, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v11, v23, v22, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v4, v9, v8, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v8, v17, v16, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v9, v19, v18, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v12, v25, v24, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v13, v27, v26, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v14, v29, v98, 0x5040100 -; GFX11TRUE16-NEXT: v_perm_b32 v15, v31, v96, 0x5040100 +; GFX11TRUE16-NEXT: v_cndmask_b16 v7.h, v86.l, v87.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v87 +; GFX11TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 1, v8 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v50 +; GFX11TRUE16-NEXT: v_cndmask_b16 v3.l, v52.l, v53.l, s18 +; GFX11TRUE16-NEXT: v_cndmask_b16 v3.h, v54.l, v55.l, s16 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v55 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v54 +; GFX11TRUE16-NEXT: v_cndmask_b16 v4.l, v64.l, v65.l, s14 +; GFX11TRUE16-NEXT: v_cndmask_b16 v4.h, v66.l, v67.l, s12 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v67 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v66 +; GFX11TRUE16-NEXT: v_cndmask_b16 v5.h, v70.l, v71.l, s8 +; GFX11TRUE16-NEXT: v_cndmask_b16 v6.h, v82.l, v83.l, s4 +; GFX11TRUE16-NEXT: v_cndmask_b16 v8.l, v10.l, v9.l, s28 +; GFX11TRUE16-NEXT: v_cndmask_b16 v8.h, v12.l, v11.l, s25 +; GFX11TRUE16-NEXT: v_cndmask_b16 v9.l, v14.l, v13.l, s23 +; GFX11TRUE16-NEXT: v_cndmask_b16 v9.h, v18.l, v15.l, s21 +; GFX11TRUE16-NEXT: v_cndmask_b16 v10.h, v22.l, v21.l, s17 +; GFX11TRUE16-NEXT: v_cndmask_b16 v11.h, v26.l, v25.l, s13 +; GFX11TRUE16-NEXT: v_cndmask_b16 v12.h, v30.l, v29.l, s9 +; GFX11TRUE16-NEXT: v_cndmask_b16 v13.l, v32.l, v31.l, s7 +; GFX11TRUE16-NEXT: v_cndmask_b16 v13.h, v34.l, v33.l, s5 +; GFX11TRUE16-NEXT: v_cndmask_b16 v14.l, v36.l, v35.l, s3 +; GFX11TRUE16-NEXT: v_cndmask_b16 v14.h, v38.l, v37.l, s1 +; GFX11TRUE16-NEXT: v_cndmask_b16 v15.l, v48.l, v39.l, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b16 v15.h, v17.l, v16.l, s0 +; GFX11TRUE16-NEXT: v_cndmask_b16 v5.l, v68.l, v69.l, s10 +; GFX11TRUE16-NEXT: v_cndmask_b16 v6.l, v80.l, v81.l, s6 +; GFX11TRUE16-NEXT: v_cndmask_b16 v7.l, v84.l, v85.l, s2 +; GFX11TRUE16-NEXT: v_cndmask_b16 v10.l, v20.l, v19.l, s19 +; GFX11TRUE16-NEXT: v_cndmask_b16 v11.l, v24.l, v23.l, s15 +; GFX11TRUE16-NEXT: v_cndmask_b16 v12.l, v28.l, v27.l, s11 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v18.l, v7.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v19.l, v6.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v20.l, v5.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v21.l, v4.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v23.l, v3.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v24.l, v3.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v25.l, v2.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v26.l, v2.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v27.l, v1.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v28.l, v1.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v29.l, v0.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v30.l, v0.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v15.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v14.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v14.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v13.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v13.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v13.l, v12.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v14.l, v11.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v16.l, v10.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v17.l, v9.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v31.l, v9.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v32.l, v8.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v15.l, v15.h +; GFX11TRUE16-NEXT: v_perm_b32 v0, v0, v18, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v1, v1, v7, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v2, v2, v19, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v3, v3, v6, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v4, v4, v20, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v5, v13, v5, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v6, v12, v21, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v7, v14, v22, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v8, v11, v23, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v9, v16, v24, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v10, v10, v25, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v11, v17, v26, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v12, v31, v27, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v13, v32, v28, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v14, v33, v29, 0x5040100 +; GFX11TRUE16-NEXT: v_perm_b32 v15, v15, v30, 0x5040100 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_vselect_v32bf16: diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll index 7912d1cf8dc0d..add8c0f75bf33 100644 --- a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll +++ b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll @@ -452,11 +452,11 @@ define amdgpu_kernel void @byte8_inselt(ptr addrspace(1) %out, <8 x i8> %vec, i3 ; GCN-NEXT: s_and_b32 s6, s4, 0x1010101 ; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5] ; GCN-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3] -; GCN-NEXT: v_mov_b32_e32 v3, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: v_mov_b32_e32 v2, s0 -; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: v_mov_b32_e32 v3, s3 +; GCN-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GCN-NEXT: s_endpgm entry: %v = insertelement <8 x i8> %vec, i8 1, i32 %sel diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix.ll b/llvm/test/CodeGen/AMDGPU/mad-mix.ll index b520dd1060ec8..30e3bc3ba5da8 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix.ll @@ -385,17 +385,15 @@ define <2 x float> @v_mad_mix_v2f32_shuffle(<2 x half> %src0, <2 x half> %src1, ; SDAG-CI: ; %bb.0: ; SDAG-CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v4, v5 ; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v5, v1 +; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v1, v4 ; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v4, v5 -; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v5, v0 -; SDAG-CI-NEXT: v_mad_f32 v0, v4, v2, v1 -; SDAG-CI-NEXT: v_mac_f32_e32 v1, v5, v3 +; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v4, v0 +; SDAG-CI-NEXT: v_mad_f32 v0, v1, v2, v5 +; SDAG-CI-NEXT: v_mad_f32 v1, v4, v3, v5 ; SDAG-CI-NEXT: s_setpc_b64 s[30:31] ; ; GISEL-CI-LABEL: v_mad_mix_v2f32_shuffle: diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll index 3be17f9538d0f..7d18739fd0c32 100644 --- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -1337,10 +1337,10 @@ define i64 @lshr_mad_i64_1(i64 %arg0, i64 %arg1) #0 { ; CI-LABEL: lshr_mad_i64_1: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, v1 +; CI-NEXT: v_mov_b32_e32 v1, 0 ; CI-NEXT: s_movk_i32 s4, 0xfc19 -; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] -; CI-NEXT: v_sub_i32_e32 v1, vcc, v3, v1 -; CI-NEXT: v_mov_b32_e32 v0, v2 +; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1] ; CI-NEXT: s_setpc_b64 s[30:31] ; ; SI-LABEL: lshr_mad_i64_1: @@ -1357,20 +1357,28 @@ define i64 @lshr_mad_i64_1(i64 %arg0, i64 %arg1) #0 { ; GFX9-LABEL: lshr_mad_i64_1: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: s_movk_i32 s4, 0xfc19 -; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] -; GFX9-NEXT: v_sub_u32_e32 v1, v3, v1 -; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1] ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: lshr_mad_i64_1: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0xfffffc19, v1, v[0:1] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_sub_nc_u32_e32 v1, v3, v1 -; GFX11-NEXT: v_mov_b32_e32 v0, v2 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX1100-LABEL: lshr_mad_i64_1: +; GFX1100: ; %bb.0: +; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1100-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v1, 0 +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1100-NEXT: v_mad_u64_u32 v[2:3], null, 0xfffffc19, v4, v[0:1] +; GFX1100-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3 +; GFX1100-NEXT: s_setpc_b64 s[30:31] +; +; GFX1150-LABEL: lshr_mad_i64_1: +; GFX1150: ; %bb.0: +; GFX1150-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1150-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0 +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1150-NEXT: v_mad_u64_u32 v[0:1], null, 0xfffffc19, v2, v[0:1] +; GFX1150-NEXT: s_setpc_b64 s[30:31] ; ; GFX12-LABEL: lshr_mad_i64_1: ; GFX12: ; %bb.0: @@ -1379,10 +1387,9 @@ define i64 @lshr_mad_i64_1(i64 %arg0, i64 %arg1) #0 { ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xfffffc19, v1, v[0:1] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX12-NEXT: v_sub_nc_u32_e32 v1, v3, v1 -; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, 0xfffffc19, v2, v[0:1] ; GFX12-NEXT: s_setpc_b64 s[30:31] %lsh = lshr i64 %arg0, 32 %mul = mul i64 %lsh, s0xfffffffffffffc19 @@ -1395,10 +1402,10 @@ define i64 @lshr_mad_i64_2(i64 %arg0) #0 { ; CI-LABEL: lshr_mad_i64_2: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, v1 +; CI-NEXT: v_mov_b32_e32 v1, 0 ; CI-NEXT: s_movk_i32 s4, 0xd1 -; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] -; CI-NEXT: v_sub_i32_e32 v1, vcc, v3, v1 -; CI-NEXT: v_mov_b32_e32 v0, v2 +; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1] ; CI-NEXT: s_setpc_b64 s[30:31] ; ; SI-LABEL: lshr_mad_i64_2: @@ -1415,20 +1422,28 @@ define i64 @lshr_mad_i64_2(i64 %arg0) #0 { ; GFX9-LABEL: lshr_mad_i64_2: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: s_movk_i32 s4, 0xd1 -; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] -; GFX9-NEXT: v_sub_u32_e32 v1, v3, v1 -; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1] ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: lshr_mad_i64_2: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0xd1, v1, v[0:1] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_sub_nc_u32_e32 v1, v3, v1 -; GFX11-NEXT: v_mov_b32_e32 v0, v2 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX1100-LABEL: lshr_mad_i64_2: +; GFX1100: ; %bb.0: +; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1100-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v1, 0 +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1100-NEXT: v_mad_u64_u32 v[2:3], null, 0xd1, v4, v[0:1] +; GFX1100-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3 +; GFX1100-NEXT: s_setpc_b64 s[30:31] +; +; GFX1150-LABEL: lshr_mad_i64_2: +; GFX1150: ; %bb.0: +; GFX1150-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1150-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0 +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1150-NEXT: v_mad_u64_u32 v[0:1], null, 0xd1, v2, v[0:1] +; GFX1150-NEXT: s_setpc_b64 s[30:31] ; ; GFX12-LABEL: lshr_mad_i64_2: ; GFX12: ; %bb.0: @@ -1437,10 +1452,9 @@ define i64 @lshr_mad_i64_2(i64 %arg0) #0 { ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xd1, v1, v[0:1] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX12-NEXT: v_sub_nc_u32_e32 v1, v3, v1 -; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, 0xd1, v2, v[0:1] ; GFX12-NEXT: s_setpc_b64 s[30:31] %lsh = lshr i64 %arg0, 32 %mul = mul i64 %lsh, s0xffffffff000000d1 @@ -1453,10 +1467,10 @@ define i64 @lshr_mad_i64_3(i64 %arg0) #0 { ; CI-LABEL: lshr_mad_i64_3: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, v1 +; CI-NEXT: v_mov_b32_e32 v1, 0 ; CI-NEXT: s_movk_i32 s4, 0xfc88 -; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] -; CI-NEXT: v_sub_i32_e32 v1, vcc, v3, v1 -; CI-NEXT: v_mov_b32_e32 v0, v2 +; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1] ; CI-NEXT: s_setpc_b64 s[30:31] ; ; SI-LABEL: lshr_mad_i64_3: @@ -1473,20 +1487,28 @@ define i64 @lshr_mad_i64_3(i64 %arg0) #0 { ; GFX9-LABEL: lshr_mad_i64_3: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: s_movk_i32 s4, 0xfc88 -; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] -; GFX9-NEXT: v_sub_u32_e32 v1, v3, v1 -; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1] ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: lshr_mad_i64_3: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0xfffffc88, v1, v[0:1] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_sub_nc_u32_e32 v1, v3, v1 -; GFX11-NEXT: v_mov_b32_e32 v0, v2 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX1100-LABEL: lshr_mad_i64_3: +; GFX1100: ; %bb.0: +; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1100-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v1, 0 +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1100-NEXT: v_mad_u64_u32 v[2:3], null, 0xfffffc88, v4, v[0:1] +; GFX1100-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3 +; GFX1100-NEXT: s_setpc_b64 s[30:31] +; +; GFX1150-LABEL: lshr_mad_i64_3: +; GFX1150: ; %bb.0: +; GFX1150-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1150-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0 +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1150-NEXT: v_mad_u64_u32 v[0:1], null, 0xfffffc88, v2, v[0:1] +; GFX1150-NEXT: s_setpc_b64 s[30:31] ; ; GFX12-LABEL: lshr_mad_i64_3: ; GFX12: ; %bb.0: @@ -1495,10 +1517,9 @@ define i64 @lshr_mad_i64_3(i64 %arg0) #0 { ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xfffffc88, v1, v[0:1] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX12-NEXT: v_sub_nc_u32_e32 v1, v3, v1 -; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, 0xfffffc88, v2, v[0:1] ; GFX12-NEXT: s_setpc_b64 s[30:31] %lsh = lshr i64 %arg0, 32 %mul = mul i64 s0xfffffffffffffc88, %lsh @@ -1511,12 +1532,12 @@ define i64 @lshr_mad_i64_4(i32 %arg0, i64 %arg1) #0 { ; CI-LABEL: lshr_mad_i64_4: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NEXT: v_mul_lo_u32 v3, v2, v0 -; CI-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v1, v0, 0 +; CI-NEXT: v_mul_lo_u32 v2, v2, v0 +; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v1, v0, 0 ; CI-NEXT: s_movk_i32 s4, 0xfc88 -; CI-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[1:2] -; CI-NEXT: v_sub_i32_e32 v1, vcc, v1, v2 +; CI-NEXT: v_add_i32_e32 v2, vcc, v1, v2 +; CI-NEXT: v_mov_b32_e32 v1, 0 +; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1] ; CI-NEXT: s_setpc_b64 s[30:31] ; ; SI-LABEL: lshr_mad_i64_4: @@ -1539,26 +1560,33 @@ define i64 @lshr_mad_i64_4(i32 %arg0, i64 %arg1) #0 { ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v6, v5 -; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, v0, v[6:7] -; GFX9-NEXT: v_mov_b32_e32 v5, v2 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, v0, v[6:7] +; GFX9-NEXT: v_mov_b32_e32 v5, 0 ; GFX9-NEXT: s_movk_i32 s4, 0xfc88 -; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[4:5] -; GFX9-NEXT: v_sub_u32_e32 v1, v1, v2 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, s4, v[4:5] ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: lshr_mad_i64_4: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v1, v0, 0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_mov_b32_e32 v1, v4 -; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v2, v0, v[1:2] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_mov_b32_e32 v4, v5 -; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, 0xfffffc88, v5, v[3:4] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_sub_nc_u32_e32 v1, v1, v5 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX1100-LABEL: lshr_mad_i64_4: +; GFX1100: ; %bb.0: +; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1100-NEXT: v_mad_u64_u32 v[3:4], null, v1, v0, 0 +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1100-NEXT: v_dual_mov_b32 v1, v4 :: v_dual_mov_b32 v4, 0 +; GFX1100-NEXT: v_mad_u64_u32 v[5:6], null, v2, v0, v[1:2] +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1100-NEXT: v_mad_u64_u32 v[0:1], null, 0xfffffc88, v5, v[3:4] +; GFX1100-NEXT: s_setpc_b64 s[30:31] +; +; GFX1150-LABEL: lshr_mad_i64_4: +; GFX1150: ; %bb.0: +; GFX1150-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1150-NEXT: v_mad_u64_u32 v[3:4], null, v1, v0, 0 +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1150-NEXT: v_dual_mov_b32 v1, v4 :: v_dual_mov_b32 v4, 0 +; GFX1150-NEXT: v_mad_u64_u32 v[0:1], null, v2, v0, v[1:2] +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1150-NEXT: v_mad_u64_u32 v[0:1], null, 0xfffffc88, v0, v[3:4] +; GFX1150-NEXT: s_setpc_b64 s[30:31] ; ; GFX12-LABEL: lshr_mad_i64_4: ; GFX12: ; %bb.0: @@ -1569,13 +1597,10 @@ define i64 @lshr_mad_i64_4(i32 %arg0, i64 %arg1) #0 { ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v1, v0, 0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_mov_b32_e32 v1, v4 -; GFX12-NEXT: v_mad_co_u64_u32 v[5:6], null, v2, v0, v[1:2] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_mov_b32_e32 v4, v5 -; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, 0xfffffc88, v5, v[3:4] +; GFX12-NEXT: v_dual_mov_b32 v1, v4 :: v_dual_mov_b32 v4, 0 +; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v2, v0, v[1:2] ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_sub_nc_u32_e32 v1, v1, v5 +; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, 0xfffffc88, v0, v[3:4] ; GFX12-NEXT: s_setpc_b64 s[30:31] %ext = zext i32 %arg0 to i64 %mul1 = mul i64 %arg1, %ext @@ -1862,10 +1887,9 @@ define amdgpu_ps i64 @lshr_mad_i64_sgpr(i64 inreg %arg0) #0 { ; CI-LABEL: lshr_mad_i64_sgpr: ; CI: ; %bb.0: ; CI-NEXT: v_mov_b32_e32 v0, s0 +; CI-NEXT: v_mov_b32_e32 v1, 0 ; CI-NEXT: v_mov_b32_e32 v2, 0xffff1c18 -; CI-NEXT: v_mov_b32_e32 v1, s1 -; CI-NEXT: v_mad_u64_u32 v[0:1], s[2:3], s1, v2, v[0:1] -; CI-NEXT: v_subrev_i32_e32 v1, vcc, s1, v1 +; CI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s1, v2, v[0:1] ; CI-NEXT: v_readfirstlane_b32 s0, v0 ; CI-NEXT: v_readfirstlane_b32 s1, v1 ; CI-NEXT: ; return to shader part epilog @@ -1920,14 +1944,16 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 { ; CI-LABEL: lshr_mad_i64_vec: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v6, v3 +; CI-NEXT: v_mov_b32_e32 v3, v1 +; CI-NEXT: v_mov_b32_e32 v1, 0 ; CI-NEXT: s_mov_b32 s4, 0xffff1c18 -; CI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[0:1] +; CI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, s4, v[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v1 ; CI-NEXT: s_mov_b32 s4, 0xffff1118 -; CI-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v3, s4, v[2:3] -; CI-NEXT: v_sub_i32_e32 v1, vcc, v5, v1 -; CI-NEXT: v_sub_i32_e32 v3, vcc, v7, v3 +; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s4, v[2:3] ; CI-NEXT: v_mov_b32_e32 v0, v4 -; CI-NEXT: v_mov_b32_e32 v2, v6 +; CI-NEXT: v_mov_b32_e32 v1, v5 ; CI-NEXT: s_setpc_b64 s[30:31] ; ; SI-LABEL: lshr_mad_i64_vec: @@ -1950,28 +1976,44 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 { ; GFX9-LABEL: lshr_mad_i64_vec: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v6, v3 +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: s_mov_b32 s4, 0xffff1c18 -; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[0:1] +; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, s4, v[0:1] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 ; GFX9-NEXT: s_mov_b32 s4, 0xffff1118 -; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v3, s4, v[2:3] -; GFX9-NEXT: v_sub_u32_e32 v1, v5, v1 -; GFX9-NEXT: v_sub_u32_e32 v3, v7, v3 +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s4, v[2:3] ; GFX9-NEXT: v_mov_b32_e32 v0, v4 -; GFX9-NEXT: v_mov_b32_e32 v2, v6 +; GFX9-NEXT: v_mov_b32_e32 v1, v5 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-LABEL: lshr_mad_i64_vec: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, 0xffff1c18, v1, v[0:1] -; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, 0xffff1118, v3, v[2:3] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_sub_nc_u32_e32 v1, v5, v1 -; GFX11-NEXT: v_mov_b32_e32 v0, v4 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-NEXT: v_sub_nc_u32_e32 v3, v7, v3 -; GFX11-NEXT: v_mov_b32_e32 v2, v6 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; GFX1100-LABEL: lshr_mad_i64_vec: +; GFX1100: ; %bb.0: +; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1100-NEXT: v_mov_b32_e32 v8, v3 +; GFX1100-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v1, 0 +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1100-NEXT: v_mad_u64_u32 v[4:5], null, 0xffff1c18, v6, v[0:1] +; GFX1100-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v0, v4 +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1100-NEXT: v_mad_u64_u32 v[6:7], null, 0xffff1118, v8, v[2:3] +; GFX1100-NEXT: v_dual_mov_b32 v1, v5 :: v_dual_mov_b32 v2, v6 +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1100-NEXT: v_mov_b32_e32 v3, v7 +; GFX1100-NEXT: s_setpc_b64 s[30:31] +; +; GFX1150-LABEL: lshr_mad_i64_vec: +; GFX1150: ; %bb.0: +; GFX1150-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1150-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v5, v1 +; GFX1150-NEXT: v_mov_b32_e32 v1, 0 +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1150-NEXT: v_mov_b32_e32 v3, v1 +; GFX1150-NEXT: v_mad_u64_u32 v[0:1], null, 0xffff1c18, v5, v[0:1] +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1150-NEXT: v_mad_u64_u32 v[2:3], null, 0xffff1118, v4, v[2:3] +; GFX1150-NEXT: s_setpc_b64 s[30:31] ; ; GFX12-LABEL: lshr_mad_i64_vec: ; GFX12: ; %bb.0: @@ -1980,14 +2022,13 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 { ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mad_co_u64_u32 v[4:5], null, 0xffff1c18, v1, v[0:1] -; GFX12-NEXT: v_mad_co_u64_u32 v[6:7], null, 0xffff1118, v3, v[2:3] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_sub_nc_u32_e32 v1, v5, v1 -; GFX12-NEXT: v_mov_b32_e32 v0, v4 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX12-NEXT: v_sub_nc_u32_e32 v3, v7, v3 -; GFX12-NEXT: v_mov_b32_e32 v2, v6 +; GFX12-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v5, v1 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: v_mov_b32_e32 v3, v1 +; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, 0xffff1c18, v5, v[0:1] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xffff1118, v4, v[2:3] ; GFX12-NEXT: s_setpc_b64 s[30:31] %lsh = lshr <2 x i64> %arg0, %mul = mul <2 x i64> %lsh, diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll index 6b7eff316fe95..0833dada43e4d 100644 --- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll +++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll @@ -549,17 +549,19 @@ bb: ret void } -; GCN-LABEL: {{^}}fadd_fadd_fsub: +; GCN-LABEL: {{^}}fadd_fadd_fsub_0: ; GFX900: v_add_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, 0 ; GFX900: v_add_f32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} -; PACKED-SDAG: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], 0 op_sel_hi:[1,0]{{$}} -; PACKED-SDAG: v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 0 op_sel_hi:[1,0]{{$}} + +; PACKED-SDAG: v_add_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, 0 +; PACKED-SDAG: v_add_f32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} + ; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}]{{$}} ; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}} -define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg) { +define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) { bb: %i12 = fadd <2 x float> zeroinitializer, %arg - %shift8 = shufflevector <2 x float> %i12, <2 x float> undef, <2 x i32> + %shift8 = shufflevector <2 x float> %i12, <2 x float> poison, <2 x i32> %i13 = fadd <2 x float> zeroinitializer, %shift8 %i14 = shufflevector <2 x float> %arg, <2 x float> %i13, <2 x i32> %i15 = fsub <2 x float> %i14, zeroinitializer @@ -567,6 +569,26 @@ bb: ret void } +; GCN-LABEL: {{^}}fadd_fadd_fsub: +; GFX900: v_add_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; GFX900: v_add_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} + +; PACKED-SDAG: v_add_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; PACKED-SDAG: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}] op_sel_hi:[1,0]{{$}} + +; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}]{{$}} +; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}]{{$}} +define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, ptr addrspace(1) %ptr) { +bb: + %i12 = fadd <2 x float> %arg, %arg1 + %shift8 = shufflevector <2 x float> %i12, <2 x float> poison, <2 x i32> + %i13 = fadd <2 x float> %arg1, %shift8 + %i14 = shufflevector <2 x float> %arg, <2 x float> %i13, <2 x i32> + %i15 = fsub <2 x float> %i14, %arg1 + store <2 x float> %i15, ptr addrspace(1) %ptr + ret void +} + ; GCN-LABEL: {{^}}fadd_shuffle_v4: ; GFX900-COUNT-4: v_add_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} ; PACKED-SDAG-COUNT-2: v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] op_sel_hi:[1,0]{{$}} diff --git a/llvm/test/CodeGen/AMDGPU/trunc-combine.ll b/llvm/test/CodeGen/AMDGPU/trunc-combine.ll index aa3e05fdbdb36..15d172eb17688 100644 --- a/llvm/test/CodeGen/AMDGPU/trunc-combine.ll +++ b/llvm/test/CodeGen/AMDGPU/trunc-combine.ll @@ -156,3 +156,335 @@ define <2 x i16> @trunc_v2i64_arg_to_v2i16(<2 x i64> %arg0) #0 { %trunc = trunc <2 x i64> %arg0 to <2 x i16> ret <2 x i16> %trunc } + +; Test for regression where an unnecessary v_alignbit_b32 was inserted +; on the final result, due to losing the fact that the upper half of +; the lhs vector was undef. +define <2 x i16> @vector_trunc_high_bits_undef_lshr_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_lshr_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_lshr_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = lshr <2 x i32> %undef.hi.elt, splat (i32 16) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_lshr_rhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_lshr_rhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshr_b32_e32 v0, 16, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_lshr_rhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e64 v0, v0, 16 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = lshr <2 x i32> splat (i32 16), %undef.hi.elt + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_ashr_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_ashr_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_ashr_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %ashr = ashr <2 x i32> %undef.hi.elt, splat (i32 16) + %trunc = trunc <2 x i32> %ashr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_ashr_rhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_ashr_rhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_ashr_i32_e32 v0, -4, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_ashr_rhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_ashrrev_i32_e64 v0, v0, -4 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = ashr <2 x i32> splat (i32 -4), %undef.hi.elt + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_add_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_add_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 16, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_add_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = add <2 x i32> %undef.hi.elt, splat (i32 16) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_shl_rhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_shl_rhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshl_b32_e32 v0, 2, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xfffe, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_shl_rhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshlrev_b32_e64 v0, v0, 2 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = shl <2 x i32> splat (i32 2), %undef.hi.elt + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_sub_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_sub_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, -16, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_sub_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v0, vcc, -16, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = sub <2 x i32> %undef.hi.elt, splat (i32 16) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_or_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_or_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, 0xffff0011, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0xffff +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_or_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_or_b32_e32 v0, 0xffff0011, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = or <2 x i32> %undef.hi.elt, splat (i32 17) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_xor_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_xor_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_xor_b32_e32 v0, 17, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_xor_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_xor_b32_e32 v0, 17, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = xor <2 x i32> %undef.hi.elt, splat (i32 17) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_shl_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_shl_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xfffc, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_shl_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshlrev_b16_e32 v0, 2, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %shl = shl <2 x i32> %undef.hi.elt, splat (i32 2) + %trunc = trunc <2 x i32> %shl to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_mul_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_mul_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mul_lo_u32 v0, v0, 18 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: v_and_b32_e32 v0, 0xfffe, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_mul_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mul_lo_u32 v0, v0, 18 +; VI-NEXT: v_and_b32_e32 v0, 0xfffe, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = mul <2 x i32> %undef.hi.elt, splat (i32 18) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_sdiv_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_sdiv_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_mov_b32 s4, 0x38e38e39 +; SI-NEXT: v_mul_hi_i32 v0, v0, s4 +; SI-NEXT: v_lshrrev_b32_e32 v1, 31, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 2, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_sdiv_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_mov_b32 s4, 0x38e38e39 +; VI-NEXT: v_mul_hi_i32 v0, v0, s4 +; VI-NEXT: v_lshrrev_b32_e32 v1, 31, v0 +; VI-NEXT: v_ashrrev_i32_e32 v0, 2, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = sdiv <2 x i32> %undef.hi.elt, splat (i32 18) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_srem_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_srem_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_mov_b32 s4, 0x38e38e39 +; SI-NEXT: v_mul_hi_i32 v1, v0, s4 +; SI-NEXT: v_lshrrev_b32_e32 v2, 31, v1 +; SI-NEXT: v_lshrrev_b32_e32 v1, 2, v1 +; SI-NEXT: v_add_i32_e32 v1, vcc, v1, v2 +; SI-NEXT: v_mul_lo_u32 v1, v1, 18 +; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_srem_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_mov_b32 s4, 0x38e38e39 +; VI-NEXT: v_mul_hi_i32 v1, v0, s4 +; VI-NEXT: v_lshrrev_b32_e32 v2, 31, v1 +; VI-NEXT: v_ashrrev_i32_e32 v1, 2, v1 +; VI-NEXT: v_add_u32_e32 v1, vcc, v1, v2 +; VI-NEXT: v_mul_lo_u32 v1, v1, 18 +; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = srem <2 x i32> %undef.hi.elt, splat (i32 18) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + + +define <2 x i16> @vector_trunc_high_bits_undef_udiv_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_udiv_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_mov_b32 s4, 0x38e38e39 +; SI-NEXT: v_mul_hi_u32 v0, v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: v_bfe_u32 v0, v0, 2, 16 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_udiv_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_mov_b32 s4, 0x38e38e39 +; VI-NEXT: v_mul_hi_u32 v0, v0, s4 +; VI-NEXT: v_lshrrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = udiv <2 x i32> %undef.hi.elt, splat (i32 18) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} + +define <2 x i16> @vector_trunc_high_bits_undef_urem_lhs_alignbit_regression(i32 %arg0) { +; SI-LABEL: vector_trunc_high_bits_undef_urem_lhs_alignbit_regression: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_mov_b32 s4, 0x38e38e39 +; SI-NEXT: v_mul_hi_u32 v1, v0, s4 +; SI-NEXT: v_lshrrev_b32_e32 v1, 2, v1 +; SI-NEXT: v_mul_lo_u32 v1, v1, 18 +; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: vector_trunc_high_bits_undef_urem_lhs_alignbit_regression: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_mov_b32 s4, 0x38e38e39 +; VI-NEXT: v_mul_hi_u32 v1, v0, s4 +; VI-NEXT: v_lshrrev_b32_e32 v1, 2, v1 +; VI-NEXT: v_mul_lo_u32 v1, v1, 18 +; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] + %undef.hi.elt = insertelement <2 x i32> poison, i32 %arg0, i32 0 + %lshr = urem <2 x i32> %undef.hi.elt, splat (i32 18) + %trunc = trunc <2 x i32> %lshr to <2 x i16> + ret <2 x i16> %trunc +} diff --git a/llvm/test/CodeGen/AMDGPU/v_cndmask.ll b/llvm/test/CodeGen/AMDGPU/v_cndmask.ll index f20c1ccb2d63e..c6cc479b5deb1 100644 --- a/llvm/test/CodeGen/AMDGPU/v_cndmask.ll +++ b/llvm/test/CodeGen/AMDGPU/v_cndmask.ll @@ -3,6 +3,7 @@ ; RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX10 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX12 %s declare i32 @llvm.amdgcn.workitem.id.x() #1 declare half @llvm.fabs.f16(half) @@ -90,6 +91,24 @@ define amdgpu_kernel void @v_cnd_nan_nosgpr(ptr addrspace(1) %out, i32 %c, ptr a ; GFX11-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc ; GFX11-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: v_cnd_nan_nosgpr: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v0, v0, s[0:1] +; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_eq_u32 s2, 0 +; GFX12-NEXT: s_cselect_b64 vcc, -1, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc +; GFX12-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX12-NEXT: s_endpgm %idx = call i32 @llvm.amdgcn.workitem.id.x() #1 %f.gep = getelementptr float, ptr addrspace(1) %fptr, i32 %idx %f = load float, ptr addrspace(1) %f.gep @@ -155,6 +174,18 @@ define amdgpu_kernel void @v_cnd_nan(ptr addrspace(1) %out, i32 %c, float %f) #0 ; GFX11-NEXT: v_cndmask_b32_e64 v1, -1, s3, s[4:5] ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: v_cnd_nan: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_mov_b32_e32 v0, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_eq_u32 s2, 0 +; GFX12-NEXT: s_cselect_b32 s2, s3, -1 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v1, s2 +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %setcc = icmp ne i32 %c, 0 %select = select i1 %setcc, float 0xFFFFFFFFE0000000, float %f store float %select, ptr addrspace(1) %out @@ -220,6 +251,21 @@ define amdgpu_kernel void @fcmp_sgprX_k0_select_k1_sgprZ_f32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e64 v1, 1.0, s1, s[4:5] ; GFX11-NEXT: global_store_b32 v0, v1, s[2:3] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_sgprX_k0_select_k1_sgprZ_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x4c +; GFX12-NEXT: s_load_b64 s[2:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_nlg_f32 s0, 0 +; GFX12-NEXT: s_cselect_b32 s0, s1, 1.0 +; GFX12-NEXT: v_mov_b32_e32 v1, s0 +; GFX12-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i64 %tid.ext @@ -285,6 +331,19 @@ define amdgpu_kernel void @fcmp_sgprX_k0_select_k1_sgprX_f32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e64 v1, 1.0, s6, s[2:3] ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_sgprX_k0_select_k1_sgprX_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_nlg_f32 s2, 0 +; GFX12-NEXT: s_cselect_b32 s2, s2, 1.0 +; GFX12-NEXT: v_mov_b32_e32 v1, s2 +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i64 %tid.ext @@ -350,6 +409,21 @@ define amdgpu_kernel void @fcmp_sgprX_k0_select_k0_sgprZ_f32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, s1, s[4:5] ; GFX11-NEXT: global_store_b32 v0, v1, s[2:3] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_sgprX_k0_select_k0_sgprZ_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x4c +; GFX12-NEXT: s_load_b64 s[2:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_nlg_f32 s0, 0 +; GFX12-NEXT: s_cselect_b32 s0, s1, 0 +; GFX12-NEXT: v_mov_b32_e32 v1, s0 +; GFX12-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i64 %tid.ext @@ -415,6 +489,19 @@ define amdgpu_kernel void @fcmp_sgprX_k0_select_k0_sgprX_f32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, s6, s[2:3] ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_sgprX_k0_select_k0_sgprX_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_nlg_f32 s2, 0 +; GFX12-NEXT: s_cselect_b32 s2, s2, 0 +; GFX12-NEXT: v_mov_b32_e32 v1, s2 +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i64 %tid.ext @@ -498,6 +585,23 @@ define amdgpu_kernel void @fcmp_sgprX_k0_select_k0_vgprZ_f32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_sgprX_k0_select_k0_vgprZ_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v1, v0, s[0:1] +; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_nlg_f32 s2, 0 +; GFX12-NEXT: s_cselect_b64 vcc, -1, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %z.gep = getelementptr inbounds float, ptr addrspace(1) %z.ptr, i64 %tid.ext @@ -583,6 +687,23 @@ define amdgpu_kernel void @fcmp_sgprX_k0_select_k1_vgprZ_f32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e32 v1, 1.0, v1, vcc ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_sgprX_k0_select_k1_vgprZ_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v1, v0, s[0:1] +; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_nlg_f32 s2, 0 +; GFX12-NEXT: s_cselect_b64 vcc, -1, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cndmask_b32_e32 v1, 1.0, v1, vcc +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %z.gep = getelementptr inbounds float, ptr addrspace(1) %z.ptr, i64 %tid.ext @@ -661,6 +782,21 @@ define amdgpu_kernel void @fcmp_vgprX_k0_select_k1_sgprZ_f32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e64 v1, 1.0, s4, vcc ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_vgprX_k0_select_k1_sgprZ_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v1, v0, s[2:3] +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v1 +; GFX12-NEXT: v_cndmask_b32_e64 v1, 1.0, s4, vcc +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds float, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -751,6 +887,24 @@ define amdgpu_kernel void @fcmp_vgprX_k0_select_k1_vgprZ_f32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e32 v1, 1.0, v2, vcc ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_vgprX_k0_select_k1_vgprZ_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v1, v0, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b32 v2, v0, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v1 +; GFX12-NEXT: v_cndmask_b32_e32 v1, 1.0, v2, vcc +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds float, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -843,6 +997,24 @@ define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i32(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e32 v1, 2, v2, vcc ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: icmp_vgprX_k0_select_k1_vgprZ_i32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v1, v0, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b32 v2, v0, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_lt_i32_e32 vcc, -1, v1 +; GFX12-NEXT: v_cndmask_b32_e32 v1, 2, v2, vcc +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds i32, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -939,6 +1111,25 @@ define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i64(ptr addrspace(1) %o ; GFX11-NEXT: v_cndmask_b32_e32 v0, 2, v2, vcc ; GFX11-NEXT: global_store_b64 v4, v[0:1], s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: icmp_vgprX_k0_select_k1_vgprZ_i64: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v4, 3, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b64 v[0:1], v4, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b64 v[2:3], v4, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[0:1] +; GFX12-NEXT: v_cndmask_b32_e32 v1, 0, v3, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v0, 2, v2, vcc +; GFX12-NEXT: global_store_b64 v4, v[0:1], s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds i64, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1048,6 +1239,28 @@ define amdgpu_kernel void @fcmp_vgprX_k0_select_vgprZ_k1_v4f32(ptr addrspace(1) ; GFX11-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc ; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_vgprX_k0_select_vgprZ_k1_v4f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v4, 4, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v5, v1, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b128 v[0:3], v4, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_nge_f32_e32 vcc, 4.0, v5 +; GFX12-NEXT: v_cndmask_b32_e32 v3, 4.0, v3, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v2, -0.5, v2, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v1, 2.0, v1, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc +; GFX12-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds float, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1157,6 +1370,28 @@ define amdgpu_kernel void @fcmp_vgprX_k0_select_k1_vgprZ_v4f32(ptr addrspace(1) ; GFX11-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc ; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_vgprX_k0_select_k1_vgprZ_v4f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v4, 4, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v5, v1, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b128 v[0:3], v4, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ge_f32_e32 vcc, 4.0, v5 +; GFX12-NEXT: v_cndmask_b32_e32 v3, 4.0, v3, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v2, -0.5, v2, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v1, 2.0, v1, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc +; GFX12-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds float, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1268,6 +1503,28 @@ define amdgpu_kernel void @fcmp_k0_vgprX_select_k1_vgprZ_v4f32(ptr addrspace(1) ; GFX11-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc ; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_k0_vgprX_select_k1_vgprZ_v4f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v4, 4, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v5, v1, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b128 v[0:3], v4, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 4.0, v5 +; GFX12-NEXT: v_cndmask_b32_e32 v3, 4.0, v3, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v2, -0.5, v2, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v1, 2.0, v1, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc +; GFX12-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds float, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1375,6 +1632,29 @@ define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i1(ptr addrspace(1) %ou ; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] ; GFX11-NEXT: global_store_b8 v0, v1, s[8:9] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: icmp_vgprX_k0_select_k1_vgprZ_i1: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[8:11], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v1, v1, s[10:11] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_u8 v2, v0, s[0:1] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1 +; GFX12-NEXT: v_and_b32_e32 v2, 1, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v2 +; GFX12-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; GFX12-NEXT: global_store_b8 v0, v1, s[8:9] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds i32, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1479,6 +1759,26 @@ define amdgpu_kernel void @fcmp_vgprX_k0_selectf64_k1_vgprZ_f32(ptr addrspace(1) ; GFX11-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc ; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_vgprX_k0_selectf64_k1_vgprZ_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v2, 3, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v3, v1, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b64 v[0:1], v2, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v3 +; GFX12-NEXT: v_cndmask_b32_e32 v1, 0x3ff00000, v1, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc +; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds float, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1581,6 +1881,26 @@ define amdgpu_kernel void @fcmp_vgprX_k0_selecti64_k1_vgprZ_f32(ptr addrspace(1) ; GFX11-NEXT: v_cndmask_b32_e32 v0, 3, v0, vcc ; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_vgprX_k0_selecti64_k1_vgprZ_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v2, 3, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v3, v1, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b64 v[0:1], v2, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v3 +; GFX12-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc +; GFX12-NEXT: v_cndmask_b32_e32 v0, 3, v0, vcc +; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds float, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1674,6 +1994,24 @@ define amdgpu_kernel void @icmp_vgprX_k0_selectf32_k1_vgprZ_i32(ptr addrspace(1) ; GFX11-NEXT: v_cndmask_b32_e32 v1, 4.0, v2, vcc ; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: icmp_vgprX_k0_selectf32_k1_vgprZ_i32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v1, v0, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b32 v2, v0, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_gt_u32_e32 vcc, 2, v1 +; GFX12-NEXT: v_cndmask_b32_e32 v1, 4.0, v2, vcc +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds i32, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1783,6 +2121,28 @@ define amdgpu_kernel void @fcmp_k0_vgprX_select_k1_vgprZ_f32_cond_use_x2(ptr add ; GFX11-NEXT: global_store_b32 v0, v2, s[0:1] dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_k0_vgprX_select_k1_vgprZ_f32_cond_use_x2: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v1, v0, s[2:3] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: global_load_b32 v2, v0, s[4:5] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_nle_f32_e32 vcc, 4.0, v1 +; GFX12-NEXT: v_cndmask_b32_e64 v1, v2, -1.0, vcc +; GFX12-NEXT: v_cndmask_b32_e64 v2, v2, -2.0, vcc +; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_storecnt 0x0 +; GFX12-NEXT: global_store_b32 v0, v2, s[0:1] scope:SCOPE_SYS +; GFX12-NEXT: s_wait_storecnt 0x0 +; GFX12-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tid.ext = sext i32 %tid to i64 %x.gep = getelementptr inbounds float, ptr addrspace(1) %x.ptr, i64 %tid.ext @@ -1890,6 +2250,27 @@ define amdgpu_kernel void @v_cndmask_abs_neg_f16(ptr addrspace(1) %out, i32 %c, ; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX11-NEXT: global_store_b16 v2, v0, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: v_cndmask_abs_neg_f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v2, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 1, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_u16 v0, v0, s[0:1] +; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_lg_u32 s2, 0 +; GFX12-NEXT: s_cselect_b64 vcc, -1, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_and_b32_e32 v1, 0x7fff, v0 +; GFX12-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; GFX12-NEXT: global_store_b16 v2, v0, s[0:1] +; GFX12-NEXT: s_endpgm %idx = call i32 @llvm.amdgcn.workitem.id.x() #1 %f.gep = getelementptr half, ptr addrspace(1) %fptr, i32 %idx %f = load half, ptr addrspace(1) %f.gep @@ -1981,6 +2362,24 @@ define amdgpu_kernel void @v_cndmask_abs_neg_f32(ptr addrspace(1) %out, i32 %c, ; GFX11-NEXT: v_cndmask_b32_e64 v0, -v0, |v0|, s[2:3] ; GFX11-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: v_cndmask_abs_neg_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b32 v0, v0, s[0:1] +; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_lg_u32 s2, 0 +; GFX12-NEXT: s_cselect_b64 s[2:3], -1, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cndmask_b32_e64 v0, -v0, |v0|, s[2:3] +; GFX12-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX12-NEXT: s_endpgm %idx = call i32 @llvm.amdgcn.workitem.id.x() #1 %f.gep = getelementptr float, ptr addrspace(1) %fptr, i32 %idx %f = load float, ptr addrspace(1) %f.gep @@ -2086,6 +2485,28 @@ define amdgpu_kernel void @v_cndmask_abs_neg_f64(ptr addrspace(1) %out, i32 %c, ; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX11-NEXT: global_store_b64 v3, v[0:1], s[0:1] ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: v_cndmask_abs_neg_f64: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v3, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 3, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: global_load_b64 v[0:1], v0, s[0:1] +; GFX12-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_lg_u32 s2, 0 +; GFX12-NEXT: s_cselect_b64 vcc, -1, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_and_b32_e32 v2, 0x7fffffff, v1 +; GFX12-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GFX12-NEXT: v_cndmask_b32_e32 v0, v0, v0, vcc +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX12-NEXT: global_store_b64 v3, v[0:1], s[0:1] +; GFX12-NEXT: s_endpgm %idx = call i32 @llvm.amdgcn.workitem.id.x() #1 %f.gep = getelementptr double, ptr addrspace(1) %fptr, i32 %idx %f = load double, ptr addrspace(1) %f.gep diff --git a/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir b/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir index c936c13ac6c66..d91ee54215924 100644 --- a/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir +++ b/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir @@ -159,16 +159,16 @@ name: mask_hazard_cndmask_dpp3 body: | bb.0: ; GFX11-LABEL: name: mask_hazard_cndmask_dpp3 - ; GFX11: $vgpr0 = V_CNDMASK_B16_e64_dpp $vgpr0, 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, 1, 15, 15, 1, implicit $exec + ; GFX11: $vgpr0 = V_CNDMASK_B16_fake16_e64_dpp $vgpr0, 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, 1, 15, 15, 1, implicit $exec ; GFX11-NEXT: $sgpr2_sgpr3 = S_CSELECT_B64 -1, 0, implicit $scc ; GFX11-NEXT: S_WAITCNT_DEPCTR 65534 ; GFX11-NEXT: S_ENDPGM 0 ; ; GFX12-LABEL: name: mask_hazard_cndmask_dpp3 - ; GFX12: $vgpr0 = V_CNDMASK_B16_e64_dpp $vgpr0, 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, 1, 15, 15, 1, implicit $exec + ; GFX12: $vgpr0 = V_CNDMASK_B16_fake16_e64_dpp $vgpr0, 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, 1, 15, 15, 1, implicit $exec ; GFX12-NEXT: $sgpr2_sgpr3 = S_CSELECT_B64 -1, 0, implicit $scc ; GFX12-NEXT: S_ENDPGM 0 - $vgpr0 = V_CNDMASK_B16_e64_dpp $vgpr0, 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, 1, 15, 15, 1, implicit $exec + $vgpr0 = V_CNDMASK_B16_fake16_e64_dpp $vgpr0, 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, 1, 15, 15, 1, implicit $exec $sgpr2_sgpr3 = S_CSELECT_B64 -1, 0, implicit $scc S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/ARM/vector-promotion.ll b/llvm/test/CodeGen/ARM/vector-promotion.ll index a9a8f58963a37..344014ad80449 100644 --- a/llvm/test/CodeGen/ARM/vector-promotion.ll +++ b/llvm/test/CodeGen/ARM/vector-promotion.ll @@ -4,7 +4,7 @@ ; IR-BOTH-LABEL: @simpleOneInstructionPromotion ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, ptr %addr1 -; IR-BOTH-NEXT: [[VECTOR_OR:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[LOAD]], +; IR-BOTH-NEXT: [[VECTOR_OR:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[LOAD]], ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[VECTOR_OR]], i32 1 ; IR-BOTH-NEXT: store i32 [[EXTRACT]], ptr %dest ; IR-BOTH-NEXT: ret @@ -71,13 +71,13 @@ end: ; IR-LABEL: @chainOfInstructionsToPromote ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, ptr %addr1 -; IR-BOTH-NEXT: [[VECTOR_OR1:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[LOAD]], -; IR-BOTH-NEXT: [[VECTOR_OR2:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR1]], -; IR-BOTH-NEXT: [[VECTOR_OR3:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR2]], -; IR-BOTH-NEXT: [[VECTOR_OR4:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR3]], -; IR-BOTH-NEXT: [[VECTOR_OR5:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR4]], -; IR-BOTH-NEXT: [[VECTOR_OR6:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR5]], -; IR-BOTH-NEXT: [[VECTOR_OR7:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR6]], +; IR-BOTH-NEXT: [[VECTOR_OR1:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[LOAD]], +; IR-BOTH-NEXT: [[VECTOR_OR2:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR1]], +; IR-BOTH-NEXT: [[VECTOR_OR3:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR2]], +; IR-BOTH-NEXT: [[VECTOR_OR4:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR3]], +; IR-BOTH-NEXT: [[VECTOR_OR5:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR4]], +; IR-BOTH-NEXT: [[VECTOR_OR6:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR5]], +; IR-BOTH-NEXT: [[VECTOR_OR7:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[VECTOR_OR6]], ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[VECTOR_OR7]], i32 0 ; IR-BOTH-NEXT: store i32 [[EXTRACT]], ptr %dest ; IR-BOTH-NEXT: ret @@ -276,7 +276,7 @@ define void @undefRemCase(ptr %addr1, ptr %dest) { ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[LOAD]], i32 1 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = frem nnan float [[EXTRACT]], 7.0 ; Vector version: -; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = frem nnan <2 x float> [[LOAD]], +; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = frem nnan <2 x float> [[LOAD]], ; IR-STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[DIV]], i32 1 ; ; IR-BOTH-NEXT: store float [[RES]], ptr %dest @@ -297,7 +297,7 @@ define void @undefConstantFRemCaseWithFastMath(ptr %addr1, ptr %dest) { ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[LOAD]], i32 1 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = frem nnan float 7.000000e+00, [[EXTRACT]] ; Vector version: -; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = frem nnan <2 x float> , [[LOAD]] +; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = frem nnan <2 x float> , [[LOAD]] ; IR-STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[DIV]], i32 1 ; ; IR-BOTH-NEXT: store float [[RES]], ptr %dest @@ -319,7 +319,7 @@ define void @undefVectorFRemCaseWithFastMath(ptr %addr1, ptr %dest) { ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[LOAD]], i32 1 ; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = fadd float [[EXTRACT]], 1.0 ; Vector version: -; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = fadd <2 x float> [[LOAD]], +; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = fadd <2 x float> [[LOAD]], ; IR-STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = extractelement <2 x float> [[DIV]], i32 1 ; ; IR-BOTH-NEXT: store float [[RES]], ptr %dest @@ -358,7 +358,7 @@ define void @simpleOneInstructionPromotionVariableIdx(ptr %addr1, ptr %dest, i32 ; Check a vector with more than 2 elements. ; IR-BOTH-LABEL: @simpleOneInstructionPromotion8x8 ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <8 x i8>, ptr %addr1 -; IR-BOTH-NEXT: [[OR:%[a-zA-Z_0-9-]+]] = or <8 x i8> [[LOAD]], +; IR-BOTH-NEXT: [[OR:%[a-zA-Z_0-9-]+]] = or <8 x i8> [[LOAD]], ; IR-BOTH-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = extractelement <8 x i8> [[OR]], i32 1 ; IR-BOTH-NEXT: store i8 [[RES]], ptr %dest ; IR-BOTH-NEXT: ret @@ -374,7 +374,7 @@ define void @simpleOneInstructionPromotion8x8(ptr %addr1, ptr %dest) { ; lowered on a Q register. ; IR-BOTH-LABEL: @simpleOneInstructionPromotion ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <4 x i32>, ptr %addr1 -; IR-BOTH-NEXT: [[VECTOR_OR:%[a-zA-Z_0-9-]+]] = or <4 x i32> [[LOAD]], +; IR-BOTH-NEXT: [[VECTOR_OR:%[a-zA-Z_0-9-]+]] = or <4 x i32> [[LOAD]], ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <4 x i32> [[VECTOR_OR]], i32 1 ; IR-BOTH-NEXT: store i32 [[EXTRACT]], ptr %dest ; IR-BOTH-NEXT: ret diff --git a/llvm/test/CodeGen/DirectX/WaveActiveSum.ll b/llvm/test/CodeGen/DirectX/WaveActiveSum.ll new file mode 100644 index 0000000000000..d5180eb10c699 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/WaveActiveSum.ll @@ -0,0 +1,143 @@ +; RUN: opt -S -scalarizer -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library < %s | FileCheck %s + +; Test that for scalar values, WaveActiveSum maps down to the DirectX op + +define noundef half @wave_active_sum_half(half noundef %expr) { +entry: +; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr, i8 0, i8 0) + %ret = call half @llvm.dx.wave.reduce.sum.f16(half %expr) + ret half %ret +} + +define noundef float @wave_active_sum_float(float noundef %expr) { +entry: +; CHECK: call float @dx.op.waveActiveOp.f32(i32 119, float %expr, i8 0, i8 0) + %ret = call float @llvm.dx.wave.reduce.sum.f32(float %expr) + ret float %ret +} + +define noundef double @wave_active_sum_double(double noundef %expr) { +entry: +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr, i8 0, i8 0) + %ret = call double @llvm.dx.wave.reduce.sum.f64(double %expr) + ret double %ret +} + +define noundef i16 @wave_active_sum_i16(i16 noundef %expr) { +entry: +; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr, i8 0, i8 0) + %ret = call i16 @llvm.dx.wave.reduce.sum.i16(i16 %expr) + ret i16 %ret +} + +define noundef i32 @wave_active_sum_i32(i32 noundef %expr) { +entry: +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr, i8 0, i8 0) + %ret = call i32 @llvm.dx.wave.reduce.sum.i32(i32 %expr) + ret i32 %ret +} + +define noundef i64 @wave_active_sum_i64(i64 noundef %expr) { +entry: +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr, i8 0, i8 0) + %ret = call i64 @llvm.dx.wave.reduce.sum.i64(i64 %expr) + ret i64 %ret +} + +define noundef i16 @wave_active_usum_i16(i16 noundef %expr) { +entry: +; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr, i8 0, i8 1) + %ret = call i16 @llvm.dx.wave.reduce.usum.i16(i16 %expr) + ret i16 %ret +} + +define noundef i32 @wave_active_usum_i32(i32 noundef %expr) { +entry: +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr, i8 0, i8 1) + %ret = call i32 @llvm.dx.wave.reduce.usum.i32(i32 %expr) + ret i32 %ret +} + +define noundef i64 @wave_active_usum_i64(i64 noundef %expr) { +entry: +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr, i8 0, i8 1) + %ret = call i64 @llvm.dx.wave.reduce.usum.i64(i64 %expr) + ret i64 %ret +} + +declare half @llvm.dx.wave.reduce.sum.f16(half) +declare float @llvm.dx.wave.reduce.sum.f32(float) +declare double @llvm.dx.wave.reduce.sum.f64(double) + +declare i16 @llvm.dx.wave.reduce.sum.i16(i16) +declare i32 @llvm.dx.wave.reduce.sum.i32(i32) +declare i64 @llvm.dx.wave.reduce.sum.i64(i64) + +declare i16 @llvm.dx.wave.reduce.usum.i16(i16) +declare i32 @llvm.dx.wave.reduce.usum.i32(i32) +declare i64 @llvm.dx.wave.reduce.usum.i64(i64) + +; Test that for vector values, WaveActiveSum scalarizes and maps down to the +; DirectX op + +define noundef <2 x half> @wave_active_sum_v2half(<2 x half> noundef %expr) { +entry: +; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr.i0, i8 0, i8 0) +; CHECK: call half @dx.op.waveActiveOp.f16(i32 119, half %expr.i1, i8 0, i8 0) + %ret = call <2 x half> @llvm.dx.wave.reduce.sum.v2f16(<2 x half> %expr) + ret <2 x half> %ret +} + +define noundef <3 x i32> @wave_active_sum_v3i32(<3 x i32> noundef %expr) { +entry: +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i0, i8 0, i8 0) +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i1, i8 0, i8 0) +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i2, i8 0, i8 0) + %ret = call <3 x i32> @llvm.dx.wave.reduce.sum.v3i32(<3 x i32> %expr) + ret <3 x i32> %ret +} + +define noundef <4 x double> @wave_active_sum_v4f64(<4 x double> noundef %expr) { +entry: +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i0, i8 0, i8 0) +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i1, i8 0, i8 0) +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i2, i8 0, i8 0) +; CHECK: call double @dx.op.waveActiveOp.f64(i32 119, double %expr.i3, i8 0, i8 0) + %ret = call <4 x double> @llvm.dx.wave.reduce.sum.v464(<4 x double> %expr) + ret <4 x double> %ret +} + +declare <2 x half> @llvm.dx.wave.reduce.sum.v2f16(<2 x half>) +declare <3 x i32> @llvm.dx.wave.reduce.sum.v3i32(<3 x i32>) +declare <4 x double> @llvm.dx.wave.reduce.sum.v4f64(<4 x double>) + +define noundef <2 x i16> @wave_active_usum_v2i16(<2 x i16> noundef %expr) { +entry: +; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr.i0, i8 0, i8 1) +; CHECK: call i16 @dx.op.waveActiveOp.i16(i32 119, i16 %expr.i1, i8 0, i8 1) + %ret = call <2 x i16> @llvm.dx.wave.reduce.usum.v2f16(<2 x i16> %expr) + ret <2 x i16> %ret +} + +define noundef <3 x i32> @wave_active_usum_v3i32(<3 x i32> noundef %expr) { +entry: +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i0, i8 0, i8 1) +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i1, i8 0, i8 1) +; CHECK: call i32 @dx.op.waveActiveOp.i32(i32 119, i32 %expr.i2, i8 0, i8 1) + %ret = call <3 x i32> @llvm.dx.wave.reduce.usum.v3i32(<3 x i32> %expr) + ret <3 x i32> %ret +} + +define noundef <4 x i64> @wave_active_usum_v4f64(<4 x i64> noundef %expr) { +entry: +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i0, i8 0, i8 1) +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i1, i8 0, i8 1) +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i2, i8 0, i8 1) +; CHECK: call i64 @dx.op.waveActiveOp.i64(i32 119, i64 %expr.i3, i8 0, i8 1) + %ret = call <4 x i64> @llvm.dx.wave.reduce.usum.v464(<4 x i64> %expr) + ret <4 x i64> %ret +} + +declare <2 x i16> @llvm.dx.wave.reduce.usum.v2f16(<2 x i16>) +declare <3 x i32> @llvm.dx.wave.reduce.usum.v3i32(<3 x i32>) +declare <4 x i64> @llvm.dx.wave.reduce.usum.v4f64(<4 x i64>) diff --git a/llvm/test/CodeGen/NVPTX/atomics-sm90.ll b/llvm/test/CodeGen/NVPTX/atomics-sm90.ll index f81b785f13225..67552b95e0491 100644 --- a/llvm/test/CodeGen/NVPTX/atomics-sm90.ll +++ b/llvm/test/CodeGen/NVPTX/atomics-sm90.ll @@ -46,58 +46,52 @@ define void @test(ptr %dp0, ptr addrspace(1) %dp1, ptr addrspace(3) %dp3, bfloat ; CHECKPTX71-LABEL: test( ; CHECKPTX71: { ; CHECKPTX71-NEXT: .reg .pred %p<5>; -; CHECKPTX71-NEXT: .reg .b16 %rs<22>; +; CHECKPTX71-NEXT: .reg .b16 %rs<26>; ; CHECKPTX71-NEXT: .reg .b32 %r<4>; -; CHECKPTX71-NEXT: .reg .f32 %f<12>; ; CHECKPTX71-EMPTY: ; CHECKPTX71-NEXT: // %bb.0: ; CHECKPTX71-NEXT: ld.param.b16 %rs13, [test_param_3]; ; CHECKPTX71-NEXT: ld.param.u32 %r3, [test_param_2]; ; CHECKPTX71-NEXT: ld.param.u32 %r2, [test_param_1]; ; CHECKPTX71-NEXT: ld.param.u32 %r1, [test_param_0]; -; CHECKPTX71-NEXT: ld.b16 %rs18, [%r1]; -; CHECKPTX71-NEXT: cvt.f32.bf16 %f1, %rs13; +; CHECKPTX71-NEXT: ld.b16 %rs22, [%r1]; ; CHECKPTX71-NEXT: $L__BB0_1: // %atomicrmw.start14 ; CHECKPTX71-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECKPTX71-NEXT: cvt.f32.bf16 %f2, %rs18; -; CHECKPTX71-NEXT: add.rn.f32 %f3, %f2, %f1; -; CHECKPTX71-NEXT: cvt.rn.bf16.f32 %rs14, %f3; -; CHECKPTX71-NEXT: atom.cas.b16 %rs3, [%r1], %rs18, %rs14; -; CHECKPTX71-NEXT: setp.ne.s16 %p1, %rs3, %rs18; -; CHECKPTX71-NEXT: mov.u16 %rs18, %rs3; +; CHECKPTX71-NEXT: mov.b16 %rs14, 0x3F80; +; CHECKPTX71-NEXT: fma.rn.bf16 %rs15, %rs22, %rs14, %rs13; +; CHECKPTX71-NEXT: atom.cas.b16 %rs3, [%r1], %rs22, %rs15; +; CHECKPTX71-NEXT: setp.ne.s16 %p1, %rs3, %rs22; +; CHECKPTX71-NEXT: mov.u16 %rs22, %rs3; ; CHECKPTX71-NEXT: @%p1 bra $L__BB0_1; ; CHECKPTX71-NEXT: // %bb.2: // %atomicrmw.end13 -; CHECKPTX71-NEXT: ld.b16 %rs19, [%r1]; +; CHECKPTX71-NEXT: ld.b16 %rs23, [%r1]; ; CHECKPTX71-NEXT: $L__BB0_3: // %atomicrmw.start8 ; CHECKPTX71-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECKPTX71-NEXT: cvt.f32.bf16 %f4, %rs19; -; CHECKPTX71-NEXT: add.rn.f32 %f5, %f4, 0f3F800000; -; CHECKPTX71-NEXT: cvt.rn.bf16.f32 %rs15, %f5; -; CHECKPTX71-NEXT: atom.cas.b16 %rs6, [%r1], %rs19, %rs15; -; CHECKPTX71-NEXT: setp.ne.s16 %p2, %rs6, %rs19; -; CHECKPTX71-NEXT: mov.u16 %rs19, %rs6; +; CHECKPTX71-NEXT: mov.b16 %rs16, 0x3F80; +; CHECKPTX71-NEXT: fma.rn.bf16 %rs17, %rs23, %rs16, %rs16; +; CHECKPTX71-NEXT: atom.cas.b16 %rs6, [%r1], %rs23, %rs17; +; CHECKPTX71-NEXT: setp.ne.s16 %p2, %rs6, %rs23; +; CHECKPTX71-NEXT: mov.u16 %rs23, %rs6; ; CHECKPTX71-NEXT: @%p2 bra $L__BB0_3; ; CHECKPTX71-NEXT: // %bb.4: // %atomicrmw.end7 -; CHECKPTX71-NEXT: ld.global.b16 %rs20, [%r2]; +; CHECKPTX71-NEXT: ld.global.b16 %rs24, [%r2]; ; CHECKPTX71-NEXT: $L__BB0_5: // %atomicrmw.start2 ; CHECKPTX71-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECKPTX71-NEXT: cvt.f32.bf16 %f7, %rs20; -; CHECKPTX71-NEXT: add.rn.f32 %f8, %f7, %f1; -; CHECKPTX71-NEXT: cvt.rn.bf16.f32 %rs16, %f8; -; CHECKPTX71-NEXT: atom.global.cas.b16 %rs9, [%r2], %rs20, %rs16; -; CHECKPTX71-NEXT: setp.ne.s16 %p3, %rs9, %rs20; -; CHECKPTX71-NEXT: mov.u16 %rs20, %rs9; +; CHECKPTX71-NEXT: mov.b16 %rs18, 0x3F80; +; CHECKPTX71-NEXT: fma.rn.bf16 %rs19, %rs24, %rs18, %rs13; +; CHECKPTX71-NEXT: atom.global.cas.b16 %rs9, [%r2], %rs24, %rs19; +; CHECKPTX71-NEXT: setp.ne.s16 %p3, %rs9, %rs24; +; CHECKPTX71-NEXT: mov.u16 %rs24, %rs9; ; CHECKPTX71-NEXT: @%p3 bra $L__BB0_5; ; CHECKPTX71-NEXT: // %bb.6: // %atomicrmw.end1 -; CHECKPTX71-NEXT: ld.shared.b16 %rs21, [%r3]; +; CHECKPTX71-NEXT: ld.shared.b16 %rs25, [%r3]; ; CHECKPTX71-NEXT: $L__BB0_7: // %atomicrmw.start ; CHECKPTX71-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECKPTX71-NEXT: cvt.f32.bf16 %f10, %rs21; -; CHECKPTX71-NEXT: add.rn.f32 %f11, %f10, %f1; -; CHECKPTX71-NEXT: cvt.rn.bf16.f32 %rs17, %f11; -; CHECKPTX71-NEXT: atom.shared.cas.b16 %rs12, [%r3], %rs21, %rs17; -; CHECKPTX71-NEXT: setp.ne.s16 %p4, %rs12, %rs21; -; CHECKPTX71-NEXT: mov.u16 %rs21, %rs12; +; CHECKPTX71-NEXT: mov.b16 %rs20, 0x3F80; +; CHECKPTX71-NEXT: fma.rn.bf16 %rs21, %rs25, %rs20, %rs13; +; CHECKPTX71-NEXT: atom.shared.cas.b16 %rs12, [%r3], %rs25, %rs21; +; CHECKPTX71-NEXT: setp.ne.s16 %p4, %rs12, %rs25; +; CHECKPTX71-NEXT: mov.u16 %rs25, %rs12; ; CHECKPTX71-NEXT: @%p4 bra $L__BB0_7; ; CHECKPTX71-NEXT: // %bb.8: // %atomicrmw.end ; CHECKPTX71-NEXT: ret; diff --git a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll index 6828bac18cad7..0c1b1e2166928 100644 --- a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll @@ -42,17 +42,14 @@ define bfloat @test_fadd(bfloat %0, bfloat %1) { ; ; SM80-LABEL: test_fadd( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<4>; -; SM80-NEXT: .reg .f32 %f<4>; +; SM80-NEXT: .reg .b16 %rs<5>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b16 %rs1, [test_fadd_param_0]; ; SM80-NEXT: ld.param.b16 %rs2, [test_fadd_param_1]; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs2; -; SM80-NEXT: cvt.f32.bf16 %f2, %rs1; -; SM80-NEXT: add.rn.f32 %f3, %f2, %f1; -; SM80-NEXT: cvt.rn.bf16.f32 %rs3, %f3; -; SM80-NEXT: st.param.b16 [func_retval0], %rs3; +; SM80-NEXT: mov.b16 %rs3, 0x3F80; +; SM80-NEXT: fma.rn.bf16 %rs4, %rs1, %rs3, %rs2; +; SM80-NEXT: st.param.b16 [func_retval0], %rs4; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fadd( @@ -113,17 +110,14 @@ define bfloat @test_fsub(bfloat %0, bfloat %1) { ; ; SM80-LABEL: test_fsub( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<4>; -; SM80-NEXT: .reg .f32 %f<4>; +; SM80-NEXT: .reg .b16 %rs<5>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b16 %rs1, [test_fsub_param_0]; -; SM80-NEXT: ld.param.b16 %rs2, [test_fsub_param_1]; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs2; -; SM80-NEXT: cvt.f32.bf16 %f2, %rs1; -; SM80-NEXT: sub.rn.f32 %f3, %f2, %f1; -; SM80-NEXT: cvt.rn.bf16.f32 %rs3, %f3; -; SM80-NEXT: st.param.b16 [func_retval0], %rs3; +; SM80-NEXT: mov.b16 %rs2, 0xBF80; +; SM80-NEXT: ld.param.b16 %rs3, [test_fsub_param_1]; +; SM80-NEXT: fma.rn.bf16 %rs4, %rs3, %rs2, %rs1; +; SM80-NEXT: st.param.b16 [func_retval0], %rs4; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fsub( @@ -202,23 +196,14 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; ; SM80-LABEL: test_faddx2( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<5>; -; SM80-NEXT: .reg .b32 %r<4>; -; SM80-NEXT: .reg .f32 %f<7>; +; SM80-NEXT: .reg .b32 %r<5>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: -; SM80-NEXT: ld.param.b32 %r1, [test_faddx2_param_0]; -; SM80-NEXT: ld.param.b32 %r2, [test_faddx2_param_1]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r2; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r1; -; SM80-NEXT: cvt.f32.bf16 %f2, %rs3; -; SM80-NEXT: add.rn.f32 %f3, %f2, %f1; -; SM80-NEXT: cvt.f32.bf16 %f4, %rs2; -; SM80-NEXT: cvt.f32.bf16 %f5, %rs4; -; SM80-NEXT: add.rn.f32 %f6, %f5, %f4; -; SM80-NEXT: cvt.rn.bf16x2.f32 %r3, %f6, %f3; -; SM80-NEXT: st.param.b32 [func_retval0], %r3; +; SM80-NEXT: ld.param.b32 %r1, [test_faddx2_param_1]; +; SM80-NEXT: ld.param.b32 %r2, [test_faddx2_param_0]; +; SM80-NEXT: mov.b32 %r3, 1065369472; +; SM80-NEXT: fma.rn.bf16x2 %r4, %r2, %r3, %r1; +; SM80-NEXT: st.param.b32 [func_retval0], %r4; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_faddx2( @@ -303,23 +288,14 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; ; SM80-LABEL: test_fsubx2( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<5>; -; SM80-NEXT: .reg .b32 %r<4>; -; SM80-NEXT: .reg .f32 %f<7>; +; SM80-NEXT: .reg .b32 %r<5>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b32 %r1, [test_fsubx2_param_0]; ; SM80-NEXT: ld.param.b32 %r2, [test_fsubx2_param_1]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r2; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r1; -; SM80-NEXT: cvt.f32.bf16 %f2, %rs3; -; SM80-NEXT: sub.rn.f32 %f3, %f2, %f1; -; SM80-NEXT: cvt.f32.bf16 %f4, %rs2; -; SM80-NEXT: cvt.f32.bf16 %f5, %rs4; -; SM80-NEXT: sub.rn.f32 %f6, %f5, %f4; -; SM80-NEXT: cvt.rn.bf16x2.f32 %r3, %f6, %f3; -; SM80-NEXT: st.param.b32 [func_retval0], %r3; +; SM80-NEXT: mov.b32 %r3, -1082081408; +; SM80-NEXT: fma.rn.bf16x2 %r4, %r2, %r3, %r1; +; SM80-NEXT: st.param.b32 [func_retval0], %r4; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fsubx2( @@ -404,23 +380,14 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; ; SM80-LABEL: test_fmulx2( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<5>; -; SM80-NEXT: .reg .b32 %r<4>; -; SM80-NEXT: .reg .f32 %f<7>; +; SM80-NEXT: .reg .b32 %r<5>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: -; SM80-NEXT: ld.param.b32 %r1, [test_fmulx2_param_0]; -; SM80-NEXT: ld.param.b32 %r2, [test_fmulx2_param_1]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r2; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r1; -; SM80-NEXT: cvt.f32.bf16 %f2, %rs3; -; SM80-NEXT: mul.rn.f32 %f3, %f2, %f1; -; SM80-NEXT: cvt.f32.bf16 %f4, %rs2; -; SM80-NEXT: cvt.f32.bf16 %f5, %rs4; -; SM80-NEXT: mul.rn.f32 %f6, %f5, %f4; -; SM80-NEXT: cvt.rn.bf16x2.f32 %r3, %f6, %f3; -; SM80-NEXT: st.param.b32 [func_retval0], %r3; +; SM80-NEXT: ld.param.b32 %r1, [test_fmulx2_param_1]; +; SM80-NEXT: ld.param.b32 %r2, [test_fmulx2_param_0]; +; SM80-NEXT: mov.b32 %r3, -2147450880; +; SM80-NEXT: fma.rn.bf16x2 %r4, %r2, %r1, %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r4; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fmulx2( @@ -727,15 +694,13 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 { ; ; SM80-LABEL: test_fadd_imm_1( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<3>; -; SM80-NEXT: .reg .f32 %f<3>; +; SM80-NEXT: .reg .b16 %rs<4>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b16 %rs1, [test_fadd_imm_1_param_0]; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: add.rn.f32 %f2, %f1, 0f3F800000; -; SM80-NEXT: cvt.rn.bf16.f32 %rs2, %f2; -; SM80-NEXT: st.param.b16 [func_retval0], %rs2; +; SM80-NEXT: mov.b16 %rs2, 0x3F80; +; SM80-NEXT: fma.rn.bf16 %rs3, %rs1, %rs2, %rs2; +; SM80-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fadd_imm_1( diff --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll index 03cdeb9683aba..e6d35bd5ba536 100644 --- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll @@ -22,19 +22,14 @@ define <2 x bfloat> @test_ret_const() #0 { define <2 x bfloat> @test_fadd_imm_0(<2 x bfloat> %a) #0 { ; SM80-LABEL: test_fadd_imm_0( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<3>; -; SM80-NEXT: .reg .b32 %r<3>; -; SM80-NEXT: .reg .f32 %f<5>; +; SM80-NEXT: .reg .b32 %r<5>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b32 %r1, [test_fadd_imm_0_param_0]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r1; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: add.rn.f32 %f2, %f1, 0f3F800000; -; SM80-NEXT: cvt.f32.bf16 %f3, %rs2; -; SM80-NEXT: add.rn.f32 %f4, %f3, 0f40000000; -; SM80-NEXT: cvt.rn.bf16x2.f32 %r2, %f4, %f2; -; SM80-NEXT: st.param.b32 [func_retval0], %r2; +; SM80-NEXT: mov.b32 %r2, 1065369472; +; SM80-NEXT: mov.b32 %r3, 1073758080; +; SM80-NEXT: fma.rn.bf16x2 %r4, %r1, %r2, %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r4; ; SM80-NEXT: ret; ; ; SM90-LABEL: test_fadd_imm_0( @@ -54,15 +49,13 @@ define <2 x bfloat> @test_fadd_imm_0(<2 x bfloat> %a) #0 { define bfloat @test_fadd_imm_1(bfloat %a) #0 { ; SM80-LABEL: test_fadd_imm_1( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<3>; -; SM80-NEXT: .reg .f32 %f<3>; +; SM80-NEXT: .reg .b16 %rs<4>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b16 %rs1, [test_fadd_imm_1_param_0]; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: add.rn.f32 %f2, %f1, 0f3F800000; -; SM80-NEXT: cvt.rn.bf16.f32 %rs2, %f2; -; SM80-NEXT: st.param.b16 [func_retval0], %rs2; +; SM80-NEXT: mov.b16 %rs2, 0x3F80; +; SM80-NEXT: fma.rn.bf16 %rs3, %rs1, %rs2, %rs2; +; SM80-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-NEXT: ret; ; ; SM90-LABEL: test_fadd_imm_1( @@ -82,23 +75,14 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 { define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-LABEL: test_fsubx2( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<5>; -; SM80-NEXT: .reg .b32 %r<4>; -; SM80-NEXT: .reg .f32 %f<7>; +; SM80-NEXT: .reg .b32 %r<5>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b32 %r1, [test_fsubx2_param_0]; ; SM80-NEXT: ld.param.b32 %r2, [test_fsubx2_param_1]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r2; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r1; -; SM80-NEXT: cvt.f32.bf16 %f2, %rs3; -; SM80-NEXT: sub.rn.f32 %f3, %f2, %f1; -; SM80-NEXT: cvt.f32.bf16 %f4, %rs2; -; SM80-NEXT: cvt.f32.bf16 %f5, %rs4; -; SM80-NEXT: sub.rn.f32 %f6, %f5, %f4; -; SM80-NEXT: cvt.rn.bf16x2.f32 %r3, %f6, %f3; -; SM80-NEXT: st.param.b32 [func_retval0], %r3; +; SM80-NEXT: mov.b32 %r3, -1082081408; +; SM80-NEXT: fma.rn.bf16x2 %r4, %r2, %r3, %r1; +; SM80-NEXT: st.param.b32 [func_retval0], %r4; ; SM80-NEXT: ret; ; ; SM90-LABEL: test_fsubx2( @@ -118,23 +102,14 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-LABEL: test_fmulx2( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<5>; -; SM80-NEXT: .reg .b32 %r<4>; -; SM80-NEXT: .reg .f32 %f<7>; +; SM80-NEXT: .reg .b32 %r<5>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: -; SM80-NEXT: ld.param.b32 %r1, [test_fmulx2_param_0]; -; SM80-NEXT: ld.param.b32 %r2, [test_fmulx2_param_1]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r2; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r1; -; SM80-NEXT: cvt.f32.bf16 %f2, %rs3; -; SM80-NEXT: mul.rn.f32 %f3, %f2, %f1; -; SM80-NEXT: cvt.f32.bf16 %f4, %rs2; -; SM80-NEXT: cvt.f32.bf16 %f5, %rs4; -; SM80-NEXT: mul.rn.f32 %f6, %f5, %f4; -; SM80-NEXT: cvt.rn.bf16x2.f32 %r3, %f6, %f3; -; SM80-NEXT: st.param.b32 [func_retval0], %r3; +; SM80-NEXT: ld.param.b32 %r1, [test_fmulx2_param_1]; +; SM80-NEXT: ld.param.b32 %r2, [test_fmulx2_param_0]; +; SM80-NEXT: mov.b32 %r3, -2147450880; +; SM80-NEXT: fma.rn.bf16x2 %r4, %r2, %r1, %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r4; ; SM80-NEXT: ret; ; ; SM90-LABEL: test_fmulx2( @@ -543,30 +518,16 @@ define <2 x bfloat> @test_fabs(<2 x bfloat> %a) #0 { define <2 x bfloat> @test_fabs_add(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-LABEL: test_fabs_add( ; SM80: { -; SM80-NEXT: .reg .b16 %rs<7>; -; SM80-NEXT: .reg .b32 %r<6>; -; SM80-NEXT: .reg .f32 %f<11>; +; SM80-NEXT: .reg .b32 %r<7>; ; SM80-EMPTY: ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b32 %r1, [test_fabs_add_param_1]; ; SM80-NEXT: ld.param.b32 %r2, [test_fabs_add_param_0]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r2; -; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: add.rn.f32 %f2, %f1, %f1; -; SM80-NEXT: cvt.f32.bf16 %f3, %rs2; -; SM80-NEXT: add.rn.f32 %f4, %f3, %f3; -; SM80-NEXT: cvt.rn.bf16x2.f32 %r3, %f4, %f2; -; SM80-NEXT: abs.bf16x2 %r4, %r3; -; SM80-NEXT: mov.b32 {%rs3, %rs4}, %r4; -; SM80-NEXT: cvt.f32.bf16 %f5, %rs3; -; SM80-NEXT: mov.b32 {%rs5, %rs6}, %r1; -; SM80-NEXT: cvt.f32.bf16 %f6, %rs5; -; SM80-NEXT: add.rn.f32 %f7, %f5, %f6; -; SM80-NEXT: cvt.f32.bf16 %f8, %rs4; -; SM80-NEXT: cvt.f32.bf16 %f9, %rs6; -; SM80-NEXT: add.rn.f32 %f10, %f8, %f9; -; SM80-NEXT: cvt.rn.bf16x2.f32 %r5, %f10, %f7; -; SM80-NEXT: st.param.b32 [func_retval0], %r5; +; SM80-NEXT: mov.b32 %r3, 1065369472; +; SM80-NEXT: fma.rn.bf16x2 %r4, %r2, %r3, %r2; +; SM80-NEXT: abs.bf16x2 %r5, %r4; +; SM80-NEXT: fma.rn.bf16x2 %r6, %r5, %r3, %r1; +; SM80-NEXT: st.param.b32 [func_retval0], %r6; ; SM80-NEXT: ret; ; ; SM90-LABEL: test_fabs_add( @@ -802,45 +763,18 @@ define <2 x bfloat> @test_round(<2 x bfloat> %a) #0 { } define <2 x bfloat> @test_copysign(<2 x bfloat> %a, <2 x bfloat> %b) #0 { -; SM80-LABEL: test_copysign( -; SM80: { -; SM80-NEXT: .reg .pred %p<3>; -; SM80-NEXT: .reg .b16 %rs<15>; -; SM80-NEXT: .reg .b32 %r<4>; -; SM80-EMPTY: -; SM80-NEXT: // %bb.0: -; SM80-NEXT: ld.param.b32 %r1, [test_copysign_param_1]; -; SM80-NEXT: ld.param.b32 %r2, [test_copysign_param_0]; -; SM80-NEXT: mov.b32 {%rs1, %rs2}, %r2; -; SM80-NEXT: abs.bf16 %rs3, %rs2; -; SM80-NEXT: neg.bf16 %rs4, %rs3; -; SM80-NEXT: mov.b32 {%rs5, %rs6}, %r1; -; SM80-NEXT: shr.u16 %rs7, %rs6, 15; -; SM80-NEXT: and.b16 %rs8, %rs7, 1; -; SM80-NEXT: setp.eq.b16 %p1, %rs8, 1; -; SM80-NEXT: selp.b16 %rs9, %rs4, %rs3, %p1; -; SM80-NEXT: abs.bf16 %rs10, %rs1; -; SM80-NEXT: neg.bf16 %rs11, %rs10; -; SM80-NEXT: shr.u16 %rs12, %rs5, 15; -; SM80-NEXT: and.b16 %rs13, %rs12, 1; -; SM80-NEXT: setp.eq.b16 %p2, %rs13, 1; -; SM80-NEXT: selp.b16 %rs14, %rs11, %rs10, %p2; -; SM80-NEXT: mov.b32 %r3, {%rs14, %rs9}; -; SM80-NEXT: st.param.b32 [func_retval0], %r3; -; SM80-NEXT: ret; -; -; SM90-LABEL: test_copysign( -; SM90: { -; SM90-NEXT: .reg .b32 %r<6>; -; SM90-EMPTY: -; SM90-NEXT: // %bb.0: -; SM90-NEXT: ld.param.b32 %r1, [test_copysign_param_0]; -; SM90-NEXT: ld.param.b32 %r2, [test_copysign_param_1]; -; SM90-NEXT: and.b32 %r3, %r2, -2147450880; -; SM90-NEXT: and.b32 %r4, %r1, 2147450879; -; SM90-NEXT: or.b32 %r5, %r4, %r3; -; SM90-NEXT: st.param.b32 [func_retval0], %r5; -; SM90-NEXT: ret; +; CHECK-LABEL: test_copysign( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [test_copysign_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [test_copysign_param_1]; +; CHECK-NEXT: and.b32 %r3, %r2, -2147450880; +; CHECK-NEXT: and.b32 %r4, %r1, 2147450879; +; CHECK-NEXT: or.b32 %r5, %r4, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; %r = call <2 x bfloat> @llvm.copysign.f16(<2 x bfloat> %a, <2 x bfloat> %b) ret <2 x bfloat> %r } diff --git a/llvm/test/CodeGen/NVPTX/f16-ex2.ll b/llvm/test/CodeGen/NVPTX/f16-ex2.ll index df3a36db52b1a..ae70946b4b1dc 100644 --- a/llvm/test/CodeGen/NVPTX/f16-ex2.ll +++ b/llvm/test/CodeGen/NVPTX/f16-ex2.ll @@ -1,21 +1,37 @@ -; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_75 -mattr=+ptx70 | FileCheck %s -; RUN: %if ptxas-11.0 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_75 -mattr=+ptx70 | %ptxas-verify -arch=sm_75 %} +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_75 -mattr=+ptx70 | FileCheck --check-prefixes=CHECK-FP16 %s +; RUN: %if ptxas-11.0 %{ llc < %s -mcpu=sm_75 -mattr=+ptx70 | %ptxas-verify -arch=sm_75 %} +target triple = "nvptx64-nvidia-cuda" declare half @llvm.nvvm.ex2.approx.f16(half) declare <2 x half> @llvm.nvvm.ex2.approx.f16x2(<2 x half>) -; CHECK-LABEL: exp2_half -define half @exp2_half(half %0) { - ; CHECK-NOT: call - ; CHECK: ex2.approx.f16 - %res = call half @llvm.nvvm.ex2.approx.f16(half %0); +; CHECK-LABEL: ex2_half +define half @ex2_half(half %0) { +; CHECK-FP16-LABEL: ex2_half( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .b16 %rs<3>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: +; CHECK-FP16-NEXT: ld.param.b16 %rs1, [ex2_half_param_0]; +; CHECK-FP16-NEXT: ex2.approx.f16 %rs2, %rs1; +; CHECK-FP16-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-FP16-NEXT: ret; + %res = call half @llvm.nvvm.ex2.approx.f16(half %0) ret half %res } -; CHECK-LABEL: exp2_2xhalf -define <2 x half> @exp2_2xhalf(<2 x half> %0) { - ; CHECK-NOT: call - ; CHECK: ex2.approx.f16x2 - %res = call <2 x half> @llvm.nvvm.ex2.approx.f16x2(<2 x half> %0); +; CHECK-LABEL: ex2_2xhalf +define <2 x half> @ex2_2xhalf(<2 x half> %0) { +; CHECK-FP16-LABEL: ex2_2xhalf( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .b32 %r<3>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: +; CHECK-FP16-NEXT: ld.param.b32 %r1, [ex2_2xhalf_param_0]; +; CHECK-FP16-NEXT: ex2.approx.f16x2 %r2, %r1; +; CHECK-FP16-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-FP16-NEXT: ret; + %res = call <2 x half> @llvm.nvvm.ex2.approx.f16x2(<2 x half> %0) ret <2 x half> %res } diff --git a/llvm/test/CodeGen/NVPTX/f32-ex2.ll b/llvm/test/CodeGen/NVPTX/f32-ex2.ll new file mode 100644 index 0000000000000..c9eff2a8ff17d --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/f32-ex2.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_50 -mattr=+ptx32 | FileCheck --check-prefixes=CHECK %s +; RUN: %if ptxas-11.0 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_50 -mattr=+ptx32 | %ptxas-verify -arch=sm_50 %} +target triple = "nvptx-nvidia-cuda" + +declare float @llvm.nvvm.ex2.approx.f(float) + +; CHECK-LABEL: ex2_float +define float @ex2_float(float %0) { +; CHECK-LABEL: ex2_float( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f32 %f1, [ex2_float_param_0]; +; CHECK-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-NEXT: ret; + %res = call float @llvm.nvvm.ex2.approx.f(float %0) + ret float %res +} + +; CHECK-LABEL: ex2_float_ftz +define float @ex2_float_ftz(float %0) { +; CHECK-LABEL: ex2_float_ftz( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f32 %f1, [ex2_float_ftz_param_0]; +; CHECK-NEXT: ex2.approx.ftz.f32 %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-NEXT: ret; + %res = call float @llvm.nvvm.ex2.approx.ftz.f(float %0) + ret float %res +} diff --git a/llvm/test/CodeGen/NVPTX/f32-lg2.ll b/llvm/test/CodeGen/NVPTX/f32-lg2.ll new file mode 100644 index 0000000000000..13324c6860926 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/f32-lg2.ll @@ -0,0 +1,37 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_20 -mattr=+ptx32 | FileCheck --check-prefixes=CHECK %s +; RUN: %if ptxas %{ llc < %s -mcpu=sm_20 -mattr=+ptx32 | %ptxas-verify %} +target triple = "nvptx-nvidia-cuda" + +declare float @llvm.nvvm.lg2.approx.f(float) +declare float @llvm.nvvm.lg2.approx.ftz.f(float) + +; CHECK-LABEL: lg2_float +define float @lg2_float(float %0) { +; CHECK-LABEL: lg2_float( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f32 %f1, [lg2_float_param_0]; +; CHECK-NEXT: lg2.approx.f32 %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-NEXT: ret; + %res = call float @llvm.nvvm.lg2.approx.f(float %0) + ret float %res +} + +; CHECK-LABEL: lg2_float_ftz +define float @lg2_float_ftz(float %0) { +; CHECK-LABEL: lg2_float_ftz( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f32 %f1, [lg2_float_ftz_param_0]; +; CHECK-NEXT: lg2.approx.ftz.f32 %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-NEXT: ret; + %res = call float @llvm.nvvm.lg2.approx.ftz.f(float %0) + ret float %res +} diff --git a/llvm/test/CodeGen/NVPTX/fexp2.ll b/llvm/test/CodeGen/NVPTX/fexp2.ll new file mode 100644 index 0000000000000..7e485dca65764 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/fexp2.ll @@ -0,0 +1,414 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_50 -mattr=+ptx32 | FileCheck --check-prefixes=CHECK %s +; RUN: llc < %s -mcpu=sm_75 -mattr=+ptx70 | FileCheck --check-prefixes=CHECK-FP16 %s +; RUN: llc < %s -mcpu=sm_90 -mattr=+ptx78 | FileCheck --check-prefixes=CHECK-BF16 %s +; RUN: %if ptxas-12.0 %{ llc < %s -mcpu=sm_50 -mattr=+ptx32 | %ptxas-verify -arch=sm_50 %} +; RUN: %if ptxas-12.0 %{ llc < %s -mcpu=sm_75 -mattr=+ptx70 | %ptxas-verify -arch=sm_75 %} +; RUN: %if ptxas-12.0 %{ llc < %s -mcpu=sm_90 -mattr=+ptx78 | %ptxas-verify -arch=sm_90 %} +target triple = "nvptx64-nvidia-cuda" + +; --- f32 --- + +; CHECK-LABEL: exp2_test +define float @exp2_test(float %in) { +; CHECK-LABEL: exp2_test( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.f32 %f1, [exp2_test_param_0]; +; CHECK-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-NEXT: ret; +; +; CHECK-FP16-LABEL: exp2_test( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .f32 %f<3>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: // %entry +; CHECK-FP16-NEXT: ld.param.f32 %f1, [exp2_test_param_0]; +; CHECK-FP16-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-FP16-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-FP16-NEXT: ret; +; +; CHECK-BF16-LABEL: exp2_test( +; CHECK-BF16: { +; CHECK-BF16-NEXT: .reg .f32 %f<3>; +; CHECK-BF16-EMPTY: +; CHECK-BF16-NEXT: // %bb.0: // %entry +; CHECK-BF16-NEXT: ld.param.f32 %f1, [exp2_test_param_0]; +; CHECK-BF16-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-BF16-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-BF16-NEXT: ret; +entry: + %exp2 = call float @llvm.exp2.f32(float %in) + ret float %exp2 +} + +; CHECK-LABEL: exp2_ftz_test +define float @exp2_ftz_test(float %in) #0 { +; CHECK-LABEL: exp2_ftz_test( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.f32 %f1, [exp2_ftz_test_param_0]; +; CHECK-NEXT: ex2.approx.ftz.f32 %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-NEXT: ret; +; +; CHECK-FP16-LABEL: exp2_ftz_test( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .f32 %f<3>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: // %entry +; CHECK-FP16-NEXT: ld.param.f32 %f1, [exp2_ftz_test_param_0]; +; CHECK-FP16-NEXT: ex2.approx.ftz.f32 %f2, %f1; +; CHECK-FP16-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-FP16-NEXT: ret; +; +; CHECK-BF16-LABEL: exp2_ftz_test( +; CHECK-BF16: { +; CHECK-BF16-NEXT: .reg .f32 %f<3>; +; CHECK-BF16-EMPTY: +; CHECK-BF16-NEXT: // %bb.0: // %entry +; CHECK-BF16-NEXT: ld.param.f32 %f1, [exp2_ftz_test_param_0]; +; CHECK-BF16-NEXT: ex2.approx.ftz.f32 %f2, %f1; +; CHECK-BF16-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-BF16-NEXT: ret; +entry: + %exp2 = call float @llvm.exp2.f32(float %in) + ret float %exp2 +} + +; CHECK-LABEL: exp2_test_v +define <2 x float> @exp2_test_v(<2 x float> %in) { +; CHECK-LABEL: exp2_test_v( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [exp2_test_v_param_0]; +; CHECK-NEXT: ex2.approx.f32 %f3, %f2; +; CHECK-NEXT: ex2.approx.f32 %f4, %f1; +; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f4, %f3}; +; CHECK-NEXT: ret; +; +; CHECK-FP16-LABEL: exp2_test_v( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .f32 %f<5>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: // %entry +; CHECK-FP16-NEXT: ld.param.v2.f32 {%f1, %f2}, [exp2_test_v_param_0]; +; CHECK-FP16-NEXT: ex2.approx.f32 %f3, %f2; +; CHECK-FP16-NEXT: ex2.approx.f32 %f4, %f1; +; CHECK-FP16-NEXT: st.param.v2.f32 [func_retval0], {%f4, %f3}; +; CHECK-FP16-NEXT: ret; +; +; CHECK-BF16-LABEL: exp2_test_v( +; CHECK-BF16: { +; CHECK-BF16-NEXT: .reg .f32 %f<5>; +; CHECK-BF16-EMPTY: +; CHECK-BF16-NEXT: // %bb.0: // %entry +; CHECK-BF16-NEXT: ld.param.v2.f32 {%f1, %f2}, [exp2_test_v_param_0]; +; CHECK-BF16-NEXT: ex2.approx.f32 %f3, %f2; +; CHECK-BF16-NEXT: ex2.approx.f32 %f4, %f1; +; CHECK-BF16-NEXT: st.param.v2.f32 [func_retval0], {%f4, %f3}; +; CHECK-BF16-NEXT: ret; +entry: + %exp2 = call <2 x float> @llvm.exp2.v2f32(<2 x float> %in) + ret <2 x float> %exp2 +} + +; --- f16 --- + +; CHECK-LABEL: exp2_f16_test +define half @exp2_f16_test(half %in) { +; CHECK-LABEL: exp2_f16_test( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b16 %rs1, [exp2_f16_test_param_0]; +; CHECK-NEXT: cvt.f32.f16 %f1, %rs1; +; CHECK-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-NEXT: cvt.rn.f16.f32 %rs2, %f2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-NEXT: ret; +; +; CHECK-FP16-LABEL: exp2_f16_test( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .b16 %rs<3>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: // %entry +; CHECK-FP16-NEXT: ld.param.b16 %rs1, [exp2_f16_test_param_0]; +; CHECK-FP16-NEXT: ex2.approx.f16 %rs2, %rs1; +; CHECK-FP16-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-FP16-NEXT: ret; +; +; CHECK-BF16-LABEL: exp2_f16_test( +; CHECK-BF16: { +; CHECK-BF16-NEXT: .reg .b16 %rs<3>; +; CHECK-BF16-EMPTY: +; CHECK-BF16-NEXT: // %bb.0: // %entry +; CHECK-BF16-NEXT: ld.param.b16 %rs1, [exp2_f16_test_param_0]; +; CHECK-BF16-NEXT: ex2.approx.f16 %rs2, %rs1; +; CHECK-BF16-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-BF16-NEXT: ret; +entry: + %exp2 = call half @llvm.exp2.f16(half %in) + ret half %exp2 +} + +; COM: we should never have .ftz for f16 +; CHECK-LABEL: exp2_f16_ftz_test +define half @exp2_f16_ftz_test(half %in) #0 { +; CHECK-LABEL: exp2_f16_ftz_test( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b16 %rs1, [exp2_f16_ftz_test_param_0]; +; CHECK-NEXT: cvt.ftz.f32.f16 %f1, %rs1; +; CHECK-NEXT: ex2.approx.ftz.f32 %f2, %f1; +; CHECK-NEXT: cvt.rn.f16.f32 %rs2, %f2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-NEXT: ret; +; +; CHECK-FP16-LABEL: exp2_f16_ftz_test( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .b16 %rs<3>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: // %entry +; CHECK-FP16-NEXT: ld.param.b16 %rs1, [exp2_f16_ftz_test_param_0]; +; CHECK-FP16-NEXT: ex2.approx.f16 %rs2, %rs1; +; CHECK-FP16-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-FP16-NEXT: ret; +; +; CHECK-BF16-LABEL: exp2_f16_ftz_test( +; CHECK-BF16: { +; CHECK-BF16-NEXT: .reg .b16 %rs<3>; +; CHECK-BF16-EMPTY: +; CHECK-BF16-NEXT: // %bb.0: // %entry +; CHECK-BF16-NEXT: ld.param.b16 %rs1, [exp2_f16_ftz_test_param_0]; +; CHECK-BF16-NEXT: ex2.approx.f16 %rs2, %rs1; +; CHECK-BF16-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-BF16-NEXT: ret; +entry: + %exp2 = call half @llvm.exp2.f16(half %in) + ret half %exp2 +} + +; CHECK-LABEL: exp2_f16_test_v +define <2 x half> @exp2_f16_test_v(<2 x half> %in) { +; CHECK-LABEL: exp2_f16_test_v( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<5>; +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b32 %r1, [exp2_f16_test_v_param_0]; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; +; CHECK-NEXT: cvt.f32.f16 %f1, %rs2; +; CHECK-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-NEXT: cvt.rn.f16.f32 %rs3, %f2; +; CHECK-NEXT: cvt.f32.f16 %f3, %rs1; +; CHECK-NEXT: ex2.approx.f32 %f4, %f3; +; CHECK-NEXT: cvt.rn.f16.f32 %rs4, %f4; +; CHECK-NEXT: mov.b32 %r2, {%rs4, %rs3}; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-NEXT: ret; +; +; CHECK-FP16-LABEL: exp2_f16_test_v( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .b32 %r<3>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: // %entry +; CHECK-FP16-NEXT: ld.param.b32 %r1, [exp2_f16_test_v_param_0]; +; CHECK-FP16-NEXT: ex2.approx.f16x2 %r2, %r1; +; CHECK-FP16-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-FP16-NEXT: ret; +; +; CHECK-BF16-LABEL: exp2_f16_test_v( +; CHECK-BF16: { +; CHECK-BF16-NEXT: .reg .b32 %r<3>; +; CHECK-BF16-EMPTY: +; CHECK-BF16-NEXT: // %bb.0: // %entry +; CHECK-BF16-NEXT: ld.param.b32 %r1, [exp2_f16_test_v_param_0]; +; CHECK-BF16-NEXT: ex2.approx.f16x2 %r2, %r1; +; CHECK-BF16-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-BF16-NEXT: ret; +entry: + %exp2 = call <2 x half> @llvm.exp2.v2f16(<2 x half> %in) + ret <2 x half> %exp2 +} + +; --- bf16 --- + +; COM: we should always have .ftz for bf16 +; CHECK-LABEL: exp2_bf16_test +define bfloat @exp2_bf16_test(bfloat %in) { +; CHECK-LABEL: exp2_bf16_test( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<2>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.u16 %r1, [exp2_bf16_test_param_0]; +; CHECK-NEXT: shl.b32 %r2, %r1, 16; +; CHECK-NEXT: mov.b32 %f1, %r2; +; CHECK-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-NEXT: mov.b32 %r3, %f2; +; CHECK-NEXT: bfe.u32 %r4, %r3, 16, 1; +; CHECK-NEXT: add.s32 %r5, %r4, %r3; +; CHECK-NEXT: add.s32 %r6, %r5, 32767; +; CHECK-NEXT: setp.nan.f32 %p1, %f2, %f2; +; CHECK-NEXT: or.b32 %r7, %r3, 4194304; +; CHECK-NEXT: selp.b32 %r8, %r7, %r6, %p1; +; CHECK-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r8; } +; CHECK-NEXT: st.param.b16 [func_retval0], %rs1; +; CHECK-NEXT: ret; +; +; CHECK-FP16-LABEL: exp2_bf16_test( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .pred %p<2>; +; CHECK-FP16-NEXT: .reg .b16 %rs<2>; +; CHECK-FP16-NEXT: .reg .b32 %r<9>; +; CHECK-FP16-NEXT: .reg .f32 %f<3>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: // %entry +; CHECK-FP16-NEXT: ld.param.u16 %r1, [exp2_bf16_test_param_0]; +; CHECK-FP16-NEXT: shl.b32 %r2, %r1, 16; +; CHECK-FP16-NEXT: mov.b32 %f1, %r2; +; CHECK-FP16-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-FP16-NEXT: mov.b32 %r3, %f2; +; CHECK-FP16-NEXT: bfe.u32 %r4, %r3, 16, 1; +; CHECK-FP16-NEXT: add.s32 %r5, %r4, %r3; +; CHECK-FP16-NEXT: add.s32 %r6, %r5, 32767; +; CHECK-FP16-NEXT: setp.nan.f32 %p1, %f2, %f2; +; CHECK-FP16-NEXT: or.b32 %r7, %r3, 4194304; +; CHECK-FP16-NEXT: selp.b32 %r8, %r7, %r6, %p1; +; CHECK-FP16-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r8; } +; CHECK-FP16-NEXT: st.param.b16 [func_retval0], %rs1; +; CHECK-FP16-NEXT: ret; +; +; CHECK-BF16-LABEL: exp2_bf16_test( +; CHECK-BF16: { +; CHECK-BF16-NEXT: .reg .b16 %rs<3>; +; CHECK-BF16-EMPTY: +; CHECK-BF16-NEXT: // %bb.0: // %entry +; CHECK-BF16-NEXT: ld.param.b16 %rs1, [exp2_bf16_test_param_0]; +; CHECK-BF16-NEXT: ex2.approx.ftz.bf16 %rs2, %rs1; +; CHECK-BF16-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-BF16-NEXT: ret; +entry: + %exp2 = call bfloat @llvm.exp2.bf16(bfloat %in) + ret bfloat %exp2 +} + +; CHECK-LABEL: exp2_bf16_test_v +define <2 x bfloat> @exp2_bf16_test_v(<2 x bfloat> %in) { +; CHECK-LABEL: exp2_bf16_test_v( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<19>; +; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b32 %r1, [exp2_bf16_test_v_param_0]; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; +; CHECK-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-NEXT: shl.b32 %r3, %r2, 16; +; CHECK-NEXT: mov.b32 %f1, %r3; +; CHECK-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-NEXT: mov.b32 %r4, %f2; +; CHECK-NEXT: bfe.u32 %r5, %r4, 16, 1; +; CHECK-NEXT: add.s32 %r6, %r5, %r4; +; CHECK-NEXT: add.s32 %r7, %r6, 32767; +; CHECK-NEXT: setp.nan.f32 %p1, %f2, %f2; +; CHECK-NEXT: or.b32 %r8, %r4, 4194304; +; CHECK-NEXT: selp.b32 %r9, %r8, %r7, %p1; +; CHECK-NEXT: cvt.u32.u16 %r10, %rs1; +; CHECK-NEXT: shl.b32 %r11, %r10, 16; +; CHECK-NEXT: mov.b32 %f3, %r11; +; CHECK-NEXT: ex2.approx.f32 %f4, %f3; +; CHECK-NEXT: mov.b32 %r12, %f4; +; CHECK-NEXT: bfe.u32 %r13, %r12, 16, 1; +; CHECK-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-NEXT: add.s32 %r15, %r14, 32767; +; CHECK-NEXT: setp.nan.f32 %p2, %f4, %f4; +; CHECK-NEXT: or.b32 %r16, %r12, 4194304; +; CHECK-NEXT: selp.b32 %r17, %r16, %r15, %p2; +; CHECK-NEXT: prmt.b32 %r18, %r17, %r9, 0x7632U; +; CHECK-NEXT: st.param.b32 [func_retval0], %r18; +; CHECK-NEXT: ret; +; +; CHECK-FP16-LABEL: exp2_bf16_test_v( +; CHECK-FP16: { +; CHECK-FP16-NEXT: .reg .pred %p<3>; +; CHECK-FP16-NEXT: .reg .b16 %rs<3>; +; CHECK-FP16-NEXT: .reg .b32 %r<19>; +; CHECK-FP16-NEXT: .reg .f32 %f<5>; +; CHECK-FP16-EMPTY: +; CHECK-FP16-NEXT: // %bb.0: // %entry +; CHECK-FP16-NEXT: ld.param.b32 %r1, [exp2_bf16_test_v_param_0]; +; CHECK-FP16-NEXT: mov.b32 {%rs1, %rs2}, %r1; +; CHECK-FP16-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-FP16-NEXT: shl.b32 %r3, %r2, 16; +; CHECK-FP16-NEXT: mov.b32 %f1, %r3; +; CHECK-FP16-NEXT: ex2.approx.f32 %f2, %f1; +; CHECK-FP16-NEXT: mov.b32 %r4, %f2; +; CHECK-FP16-NEXT: bfe.u32 %r5, %r4, 16, 1; +; CHECK-FP16-NEXT: add.s32 %r6, %r5, %r4; +; CHECK-FP16-NEXT: add.s32 %r7, %r6, 32767; +; CHECK-FP16-NEXT: setp.nan.f32 %p1, %f2, %f2; +; CHECK-FP16-NEXT: or.b32 %r8, %r4, 4194304; +; CHECK-FP16-NEXT: selp.b32 %r9, %r8, %r7, %p1; +; CHECK-FP16-NEXT: cvt.u32.u16 %r10, %rs1; +; CHECK-FP16-NEXT: shl.b32 %r11, %r10, 16; +; CHECK-FP16-NEXT: mov.b32 %f3, %r11; +; CHECK-FP16-NEXT: ex2.approx.f32 %f4, %f3; +; CHECK-FP16-NEXT: mov.b32 %r12, %f4; +; CHECK-FP16-NEXT: bfe.u32 %r13, %r12, 16, 1; +; CHECK-FP16-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-FP16-NEXT: add.s32 %r15, %r14, 32767; +; CHECK-FP16-NEXT: setp.nan.f32 %p2, %f4, %f4; +; CHECK-FP16-NEXT: or.b32 %r16, %r12, 4194304; +; CHECK-FP16-NEXT: selp.b32 %r17, %r16, %r15, %p2; +; CHECK-FP16-NEXT: prmt.b32 %r18, %r17, %r9, 0x7632U; +; CHECK-FP16-NEXT: st.param.b32 [func_retval0], %r18; +; CHECK-FP16-NEXT: ret; +; +; CHECK-BF16-LABEL: exp2_bf16_test_v( +; CHECK-BF16: { +; CHECK-BF16-NEXT: .reg .b32 %r<3>; +; CHECK-BF16-EMPTY: +; CHECK-BF16-NEXT: // %bb.0: // %entry +; CHECK-BF16-NEXT: ld.param.b32 %r1, [exp2_bf16_test_v_param_0]; +; CHECK-BF16-NEXT: ex2.approx.ftz.bf16x2 %r2, %r1; +; CHECK-BF16-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-BF16-NEXT: ret; +entry: + %exp2 = call <2 x bfloat> @llvm.exp2.v2bf16(<2 x bfloat> %in) + ret <2 x bfloat> %exp2 +} + +declare float @llvm.exp2.f32(float %val) + +declare <2 x float> @llvm.exp2.v2f32(<2 x float> %val) + +declare half @llvm.exp2.f16(half %val) + +declare <2 x half> @llvm.exp2.v2f16(<2 x half> %val) + +declare bfloat @llvm.exp2.bf16(bfloat %val) + +declare <2 x bfloat> @llvm.exp2.v2bf16(<2 x bfloat> %val) + +attributes #0 = {"denormal-fp-math"="preserve-sign"} diff --git a/llvm/test/CodeGen/NVPTX/flog2.ll b/llvm/test/CodeGen/NVPTX/flog2.ll new file mode 100644 index 0000000000000..ff762dcf74b2f --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/flog2.ll @@ -0,0 +1,234 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_50 -mattr=+ptx32 -nvptx-approx-log2f32 | FileCheck --check-prefixes=CHECK %s +; RUN: %if ptxas-12.0 %{ llc < %s -mcpu=sm_50 -mattr=+ptx32 -nvptx-approx-log2f32 | %ptxas-verify -arch=sm_50 %} +target triple = "nvptx64-nvidia-cuda" + +; CHECK-LABEL: log2_test +define float @log2_test(float %in) { +; CHECK-LABEL: log2_test( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.f32 %f1, [log2_test_param_0]; +; CHECK-NEXT: lg2.approx.f32 %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-NEXT: ret; +entry: + %log2 = call float @llvm.log2.f32(float %in) + ret float %log2 +} + +; CHECK-LABEL: log2_ftz_test +define float @log2_ftz_test(float %in) #0 { +; CHECK-LABEL: log2_ftz_test( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.f32 %f1, [log2_ftz_test_param_0]; +; CHECK-NEXT: lg2.approx.ftz.f32 %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; +; CHECK-NEXT: ret; +entry: + %log2 = call float @llvm.log2.f32(float %in) + ret float %log2 +} + +; CHECK-LABEL: log2_test_v +define <2 x float> @log2_test_v(<2 x float> %in) { +; CHECK-LABEL: log2_test_v( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [log2_test_v_param_0]; +; CHECK-NEXT: lg2.approx.f32 %f3, %f2; +; CHECK-NEXT: lg2.approx.f32 %f4, %f1; +; CHECK-NEXT: st.param.v2.f32 [func_retval0], {%f4, %f3}; +; CHECK-NEXT: ret; +entry: + %log2 = call <2 x float> @llvm.log2.v2f32(<2 x float> %in) + ret <2 x float> %log2 +} + +; --- f16 --- + +; CHECK-LABEL: log2_f16_test +define half @log2_f16_test(half %in) { +; CHECK-LABEL: log2_f16_test( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b16 %rs1, [log2_f16_test_param_0]; +; CHECK-NEXT: cvt.f32.f16 %f1, %rs1; +; CHECK-NEXT: lg2.approx.f32 %f2, %f1; +; CHECK-NEXT: cvt.rn.f16.f32 %rs2, %f2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-NEXT: ret; +entry: + %log2 = call half @llvm.log2.f16(half %in) + ret half %log2 +} + +; CHECK-LABEL: log2_f16_ftz_test +define half @log2_f16_ftz_test(half %in) #0 { +; CHECK-LABEL: log2_f16_ftz_test( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b16 %rs1, [log2_f16_ftz_test_param_0]; +; CHECK-NEXT: cvt.ftz.f32.f16 %f1, %rs1; +; CHECK-NEXT: lg2.approx.ftz.f32 %f2, %f1; +; CHECK-NEXT: cvt.rn.f16.f32 %rs2, %f2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs2; +; CHECK-NEXT: ret; +entry: + %log2 = call half @llvm.log2.f16(half %in) + ret half %log2 +} + +; CHECK-LABEL: log2_f16_test_v +define <2 x half> @log2_f16_test_v(<2 x half> %in) { +; CHECK-LABEL: log2_f16_test_v( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<5>; +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b32 %r1, [log2_f16_test_v_param_0]; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; +; CHECK-NEXT: cvt.f32.f16 %f1, %rs2; +; CHECK-NEXT: lg2.approx.f32 %f2, %f1; +; CHECK-NEXT: cvt.rn.f16.f32 %rs3, %f2; +; CHECK-NEXT: cvt.f32.f16 %f3, %rs1; +; CHECK-NEXT: lg2.approx.f32 %f4, %f3; +; CHECK-NEXT: cvt.rn.f16.f32 %rs4, %f4; +; CHECK-NEXT: mov.b32 %r2, {%rs4, %rs3}; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-NEXT: ret; +entry: + %log2 = call <2 x half> @llvm.log2.v2f16(<2 x half> %in) + ret <2 x half> %log2 +} + +; --- bf16 --- + +; CHECK-LABEL: log2_bf16_test +define bfloat @log2_bf16_test(bfloat %in) { +; CHECK-LABEL: log2_bf16_test( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<2>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.u16 %r1, [log2_bf16_test_param_0]; +; CHECK-NEXT: shl.b32 %r2, %r1, 16; +; CHECK-NEXT: mov.b32 %f1, %r2; +; CHECK-NEXT: lg2.approx.f32 %f2, %f1; +; CHECK-NEXT: mov.b32 %r3, %f2; +; CHECK-NEXT: bfe.u32 %r4, %r3, 16, 1; +; CHECK-NEXT: add.s32 %r5, %r4, %r3; +; CHECK-NEXT: add.s32 %r6, %r5, 32767; +; CHECK-NEXT: setp.nan.f32 %p1, %f2, %f2; +; CHECK-NEXT: or.b32 %r7, %r3, 4194304; +; CHECK-NEXT: selp.b32 %r8, %r7, %r6, %p1; +; CHECK-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r8; } +; CHECK-NEXT: st.param.b16 [func_retval0], %rs1; +; CHECK-NEXT: ret; +entry: + %log2 = call bfloat @llvm.log2.bf16(bfloat %in) + ret bfloat %log2 +} + +; CHECK-LABEL: log2_bf16_ftz_test +define bfloat @log2_bf16_ftz_test(bfloat %in) #0 { +; CHECK-LABEL: log2_bf16_ftz_test( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<2>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .f32 %f<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.u16 %r1, [log2_bf16_ftz_test_param_0]; +; CHECK-NEXT: shl.b32 %r2, %r1, 16; +; CHECK-NEXT: mov.b32 %f1, %r2; +; CHECK-NEXT: lg2.approx.ftz.f32 %f2, %f1; +; CHECK-NEXT: mov.b32 %r3, %f2; +; CHECK-NEXT: bfe.u32 %r4, %r3, 16, 1; +; CHECK-NEXT: add.s32 %r5, %r4, %r3; +; CHECK-NEXT: add.s32 %r6, %r5, 32767; +; CHECK-NEXT: setp.nan.ftz.f32 %p1, %f2, %f2; +; CHECK-NEXT: or.b32 %r7, %r3, 4194304; +; CHECK-NEXT: selp.b32 %r8, %r7, %r6, %p1; +; CHECK-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r8; } +; CHECK-NEXT: st.param.b16 [func_retval0], %rs1; +; CHECK-NEXT: ret; +entry: + %log2 = call bfloat @llvm.log2.bf16(bfloat %in) + ret bfloat %log2 +} + +; CHECK-LABEL: log2_bf16_test_v +define <2 x bfloat> @log2_bf16_test_v(<2 x bfloat> %in) { +; CHECK-LABEL: log2_bf16_test_v( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<19>; +; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b32 %r1, [log2_bf16_test_v_param_0]; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; +; CHECK-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-NEXT: shl.b32 %r3, %r2, 16; +; CHECK-NEXT: mov.b32 %f1, %r3; +; CHECK-NEXT: lg2.approx.f32 %f2, %f1; +; CHECK-NEXT: mov.b32 %r4, %f2; +; CHECK-NEXT: bfe.u32 %r5, %r4, 16, 1; +; CHECK-NEXT: add.s32 %r6, %r5, %r4; +; CHECK-NEXT: add.s32 %r7, %r6, 32767; +; CHECK-NEXT: setp.nan.f32 %p1, %f2, %f2; +; CHECK-NEXT: or.b32 %r8, %r4, 4194304; +; CHECK-NEXT: selp.b32 %r9, %r8, %r7, %p1; +; CHECK-NEXT: cvt.u32.u16 %r10, %rs1; +; CHECK-NEXT: shl.b32 %r11, %r10, 16; +; CHECK-NEXT: mov.b32 %f3, %r11; +; CHECK-NEXT: lg2.approx.f32 %f4, %f3; +; CHECK-NEXT: mov.b32 %r12, %f4; +; CHECK-NEXT: bfe.u32 %r13, %r12, 16, 1; +; CHECK-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-NEXT: add.s32 %r15, %r14, 32767; +; CHECK-NEXT: setp.nan.f32 %p2, %f4, %f4; +; CHECK-NEXT: or.b32 %r16, %r12, 4194304; +; CHECK-NEXT: selp.b32 %r17, %r16, %r15, %p2; +; CHECK-NEXT: prmt.b32 %r18, %r17, %r9, 0x7632U; +; CHECK-NEXT: st.param.b32 [func_retval0], %r18; +; CHECK-NEXT: ret; +entry: + %log2 = call <2 x bfloat> @llvm.log2.v2bf16(<2 x bfloat> %in) + ret <2 x bfloat> %log2 +} + +declare float @llvm.log2.f32(float %val) + +declare <2 x float> @llvm.log2.v2f32(<2 x float> %val) + +declare half @llvm.log2.f16(half %val) + +declare <2 x half> @llvm.log2.v2f16(<2 x half> %val) + +declare bfloat @llvm.log2.bf16(bfloat %val) + +declare <2 x bfloat> @llvm.log2.v2bf16(<2 x bfloat> %val) + +attributes #0 = {"denormal-fp-math"="preserve-sign"} diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll b/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll index 48c94f275274b..7dce894620e6b 100644 --- a/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll +++ b/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll @@ -352,9 +352,7 @@ define bfloat @fma_bf16_expanded_no_nans(bfloat %a, bfloat %b, bfloat %c) #0 { define bfloat @fma_bf16_expanded_no_nans_multiple_uses_of_fma(bfloat %a, bfloat %b, bfloat %c) #0 { ; CHECK-LABEL: fma_bf16_expanded_no_nans_multiple_uses_of_fma( ; CHECK: { -; CHECK-NEXT: .reg .b16 %rs<9>; -; CHECK-NEXT: .reg .b32 %r<7>; -; CHECK-NEXT: .reg .f32 %f<6>; +; CHECK-NEXT: .reg .b16 %rs<11>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b16 %rs1, [fma_bf16_expanded_no_nans_multiple_uses_of_fma_param_0]; @@ -363,20 +361,11 @@ define bfloat @fma_bf16_expanded_no_nans_multiple_uses_of_fma(bfloat %a, bfloat ; CHECK-NEXT: fma.rn.bf16 %rs4, %rs1, %rs2, %rs3; ; CHECK-NEXT: mov.b16 %rs5, 0x0000; ; CHECK-NEXT: max.bf16 %rs6, %rs4, %rs5; -; CHECK-NEXT: cvt.u32.u16 %r1, %rs4; -; CHECK-NEXT: shl.b32 %r2, %r1, 16; -; CHECK-NEXT: mov.b32 %f1, %r2; -; CHECK-NEXT: add.f32 %f2, %f1, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs7, %f2; -; CHECK-NEXT: cvt.u32.u16 %r3, %rs6; -; CHECK-NEXT: shl.b32 %r4, %r3, 16; -; CHECK-NEXT: mov.b32 %f3, %r4; -; CHECK-NEXT: cvt.u32.u16 %r5, %rs7; -; CHECK-NEXT: shl.b32 %r6, %r5, 16; -; CHECK-NEXT: mov.b32 %f4, %r6; -; CHECK-NEXT: add.f32 %f5, %f3, %f4; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs8, %f5; -; CHECK-NEXT: st.param.b16 [func_retval0], %rs8; +; CHECK-NEXT: mov.b16 %rs7, 0x3F80; +; CHECK-NEXT: mov.b16 %rs8, 0x40E0; +; CHECK-NEXT: fma.rn.bf16 %rs9, %rs4, %rs7, %rs8; +; CHECK-NEXT: fma.rn.bf16 %rs10, %rs6, %rs7, %rs9; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs10; ; CHECK-NEXT: ret; ; ; CHECK-FTZ-LABEL: fma_bf16_expanded_no_nans_multiple_uses_of_fma( @@ -959,9 +948,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> % define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) #0 { ; CHECK-LABEL: fma_bf16x2_expanded_no_nans_multiple_uses_of_fma( ; CHECK: { -; CHECK-NEXT: .reg .b16 %rs<7>; -; CHECK-NEXT: .reg .b32 %r<20>; -; CHECK-NEXT: .reg .f32 %f<11>; +; CHECK-NEXT: .reg .b32 %r<11>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2]; @@ -970,34 +957,11 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa ; CHECK-NEXT: fma.rn.bf16x2 %r4, %r3, %r2, %r1; ; CHECK-NEXT: mov.b32 %r5, 0; ; CHECK-NEXT: max.bf16x2 %r6, %r4, %r5; -; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4; -; CHECK-NEXT: cvt.u32.u16 %r7, %rs2; -; CHECK-NEXT: shl.b32 %r8, %r7, 16; -; CHECK-NEXT: mov.b32 %f1, %r8; -; CHECK-NEXT: add.f32 %f2, %f1, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs3, %f2; -; CHECK-NEXT: cvt.u32.u16 %r9, %rs1; -; CHECK-NEXT: shl.b32 %r10, %r9, 16; -; CHECK-NEXT: mov.b32 %f3, %r10; -; CHECK-NEXT: add.f32 %f4, %f3, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs4, %f4; -; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r6; -; CHECK-NEXT: cvt.u32.u16 %r11, %rs5; -; CHECK-NEXT: shl.b32 %r12, %r11, 16; -; CHECK-NEXT: mov.b32 %f5, %r12; -; CHECK-NEXT: cvt.u32.u16 %r13, %rs4; -; CHECK-NEXT: shl.b32 %r14, %r13, 16; -; CHECK-NEXT: mov.b32 %f6, %r14; -; CHECK-NEXT: add.f32 %f7, %f5, %f6; -; CHECK-NEXT: cvt.u32.u16 %r15, %rs6; -; CHECK-NEXT: shl.b32 %r16, %r15, 16; -; CHECK-NEXT: mov.b32 %f8, %r16; -; CHECK-NEXT: cvt.u32.u16 %r17, %rs3; -; CHECK-NEXT: shl.b32 %r18, %r17, 16; -; CHECK-NEXT: mov.b32 %f9, %r18; -; CHECK-NEXT: add.f32 %f10, %f8, %f9; -; CHECK-NEXT: cvt.rn.bf16x2.f32 %r19, %f10, %f7; -; CHECK-NEXT: st.param.b32 [func_retval0], %r19; +; CHECK-NEXT: mov.b32 %r7, 1065369472; +; CHECK-NEXT: mov.b32 %r8, 1088438496; +; CHECK-NEXT: fma.rn.bf16x2 %r9, %r4, %r7, %r8; +; CHECK-NEXT: fma.rn.bf16x2 %r10, %r6, %r7, %r9; +; CHECK-NEXT: st.param.b32 [func_retval0], %r10; ; CHECK-NEXT: ret; ; ; CHECK-FTZ-LABEL: fma_bf16x2_expanded_no_nans_multiple_uses_of_fma( diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll b/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll index 561f2b0cc0673..eb51d7db81372 100644 --- a/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll +++ b/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll @@ -221,26 +221,18 @@ define bfloat @fma_bf16_no_nans(bfloat %a, bfloat %b, bfloat %c) #0 { define bfloat @fma_bf16_no_nans_multiple_uses_of_fma(bfloat %a, bfloat %b, bfloat %c) #0 { ; CHECK-LABEL: fma_bf16_no_nans_multiple_uses_of_fma( ; CHECK: { -; CHECK-NEXT: .reg .b16 %rs<7>; -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-NEXT: .reg .b16 %rs<9>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b16 %rs1, [fma_bf16_no_nans_multiple_uses_of_fma_param_0]; ; CHECK-NEXT: ld.param.b16 %rs2, [fma_bf16_no_nans_multiple_uses_of_fma_param_1]; ; CHECK-NEXT: ld.param.b16 %rs3, [fma_bf16_no_nans_multiple_uses_of_fma_param_2]; ; CHECK-NEXT: fma.rn.bf16 %rs4, %rs1, %rs2, %rs3; -; CHECK-NEXT: cvt.u32.u16 %r1, %rs4; -; CHECK-NEXT: shl.b32 %r2, %r1, 16; -; CHECK-NEXT: mov.b32 %f1, %r2; -; CHECK-NEXT: add.f32 %f2, %f1, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs5, %f2; -; CHECK-NEXT: cvt.u32.u16 %r3, %rs5; -; CHECK-NEXT: shl.b32 %r4, %r3, 16; -; CHECK-NEXT: mov.b32 %f3, %r4; -; CHECK-NEXT: add.f32 %f4, %f3, %f1; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs6, %f4; -; CHECK-NEXT: st.param.b16 [func_retval0], %rs6; +; CHECK-NEXT: mov.b16 %rs5, 0x3F80; +; CHECK-NEXT: mov.b16 %rs6, 0x40E0; +; CHECK-NEXT: fma.rn.bf16 %rs7, %rs4, %rs5, %rs6; +; CHECK-NEXT: fma.rn.bf16 %rs8, %rs7, %rs5, %rs4; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs8; ; CHECK-NEXT: ret; ; ; CHECK-FTZ-LABEL: fma_bf16_no_nans_multiple_uses_of_fma( @@ -642,36 +634,18 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) #0 { ; CHECK-LABEL: fma_bf16x2_no_nans_multiple_uses_of_fma( ; CHECK: { -; CHECK-NEXT: .reg .b16 %rs<5>; -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-NEXT: .reg .f32 %f<9>; +; CHECK-NEXT: .reg .b32 %r<9>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2]; ; CHECK-NEXT: ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1]; ; CHECK-NEXT: ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0]; ; CHECK-NEXT: fma.rn.bf16x2 %r4, %r3, %r2, %r1; -; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4; -; CHECK-NEXT: cvt.u32.u16 %r5, %rs2; -; CHECK-NEXT: shl.b32 %r6, %r5, 16; -; CHECK-NEXT: mov.b32 %f1, %r6; -; CHECK-NEXT: add.f32 %f2, %f1, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs3, %f2; -; CHECK-NEXT: cvt.u32.u16 %r7, %rs1; -; CHECK-NEXT: shl.b32 %r8, %r7, 16; -; CHECK-NEXT: mov.b32 %f3, %r8; -; CHECK-NEXT: add.f32 %f4, %f3, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs4, %f4; -; CHECK-NEXT: cvt.u32.u16 %r9, %rs4; -; CHECK-NEXT: shl.b32 %r10, %r9, 16; -; CHECK-NEXT: mov.b32 %f5, %r10; -; CHECK-NEXT: add.f32 %f6, %f5, %f3; -; CHECK-NEXT: cvt.u32.u16 %r11, %rs3; -; CHECK-NEXT: shl.b32 %r12, %r11, 16; -; CHECK-NEXT: mov.b32 %f7, %r12; -; CHECK-NEXT: add.f32 %f8, %f7, %f1; -; CHECK-NEXT: cvt.rn.bf16x2.f32 %r13, %f8, %f6; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-NEXT: mov.b32 %r5, 1065369472; +; CHECK-NEXT: mov.b32 %r6, 1088438496; +; CHECK-NEXT: fma.rn.bf16x2 %r7, %r4, %r5, %r6; +; CHECK-NEXT: fma.rn.bf16x2 %r8, %r7, %r5, %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-NEXT: ret; ; ; CHECK-FTZ-LABEL: fma_bf16x2_no_nans_multiple_uses_of_fma( diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll b/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll index b20ca24dd91a0..a3545f5171425 100644 --- a/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll +++ b/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll @@ -233,9 +233,7 @@ define bfloat @fma_bf16_expanded_no_nans(bfloat %a, bfloat %b, bfloat %c) { define bfloat @fma_bf16_expanded_no_nans_multiple_uses_of_fma(bfloat %a, bfloat %b, bfloat %c) { ; CHECK-LABEL: fma_bf16_expanded_no_nans_multiple_uses_of_fma( ; CHECK: { -; CHECK-NEXT: .reg .b16 %rs<9>; -; CHECK-NEXT: .reg .b32 %r<7>; -; CHECK-NEXT: .reg .f32 %f<6>; +; CHECK-NEXT: .reg .b16 %rs<11>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b16 %rs1, [fma_bf16_expanded_no_nans_multiple_uses_of_fma_param_0]; @@ -244,20 +242,11 @@ define bfloat @fma_bf16_expanded_no_nans_multiple_uses_of_fma(bfloat %a, bfloat ; CHECK-NEXT: fma.rn.bf16 %rs4, %rs1, %rs2, %rs3; ; CHECK-NEXT: mov.b16 %rs5, 0x0000; ; CHECK-NEXT: max.bf16 %rs6, %rs4, %rs5; -; CHECK-NEXT: cvt.u32.u16 %r1, %rs4; -; CHECK-NEXT: shl.b32 %r2, %r1, 16; -; CHECK-NEXT: mov.b32 %f1, %r2; -; CHECK-NEXT: add.rn.f32 %f2, %f1, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs7, %f2; -; CHECK-NEXT: cvt.u32.u16 %r3, %rs6; -; CHECK-NEXT: shl.b32 %r4, %r3, 16; -; CHECK-NEXT: mov.b32 %f3, %r4; -; CHECK-NEXT: cvt.u32.u16 %r5, %rs7; -; CHECK-NEXT: shl.b32 %r6, %r5, 16; -; CHECK-NEXT: mov.b32 %f4, %r6; -; CHECK-NEXT: add.rn.f32 %f5, %f3, %f4; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs8, %f5; -; CHECK-NEXT: st.param.b16 [func_retval0], %rs8; +; CHECK-NEXT: mov.b16 %rs7, 0x3F80; +; CHECK-NEXT: mov.b16 %rs8, 0x40E0; +; CHECK-NEXT: fma.rn.bf16 %rs9, %rs4, %rs7, %rs8; +; CHECK-NEXT: fma.rn.bf16 %rs10, %rs6, %rs7, %rs9; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs10; ; CHECK-NEXT: ret; ; ; CHECK-FTZ-LABEL: fma_bf16_expanded_no_nans_multiple_uses_of_fma( @@ -694,9 +683,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> % define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) { ; CHECK-LABEL: fma_bf16x2_expanded_no_nans_multiple_uses_of_fma( ; CHECK: { -; CHECK-NEXT: .reg .b16 %rs<7>; -; CHECK-NEXT: .reg .b32 %r<20>; -; CHECK-NEXT: .reg .f32 %f<11>; +; CHECK-NEXT: .reg .b32 %r<11>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2]; @@ -705,34 +692,11 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa ; CHECK-NEXT: fma.rn.bf16x2 %r4, %r3, %r2, %r1; ; CHECK-NEXT: mov.b32 %r5, 0; ; CHECK-NEXT: max.bf16x2 %r6, %r4, %r5; -; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4; -; CHECK-NEXT: cvt.u32.u16 %r7, %rs2; -; CHECK-NEXT: shl.b32 %r8, %r7, 16; -; CHECK-NEXT: mov.b32 %f1, %r8; -; CHECK-NEXT: add.rn.f32 %f2, %f1, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs3, %f2; -; CHECK-NEXT: cvt.u32.u16 %r9, %rs1; -; CHECK-NEXT: shl.b32 %r10, %r9, 16; -; CHECK-NEXT: mov.b32 %f3, %r10; -; CHECK-NEXT: add.rn.f32 %f4, %f3, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs4, %f4; -; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r6; -; CHECK-NEXT: cvt.u32.u16 %r11, %rs5; -; CHECK-NEXT: shl.b32 %r12, %r11, 16; -; CHECK-NEXT: mov.b32 %f5, %r12; -; CHECK-NEXT: cvt.u32.u16 %r13, %rs4; -; CHECK-NEXT: shl.b32 %r14, %r13, 16; -; CHECK-NEXT: mov.b32 %f6, %r14; -; CHECK-NEXT: add.rn.f32 %f7, %f5, %f6; -; CHECK-NEXT: cvt.u32.u16 %r15, %rs6; -; CHECK-NEXT: shl.b32 %r16, %r15, 16; -; CHECK-NEXT: mov.b32 %f8, %r16; -; CHECK-NEXT: cvt.u32.u16 %r17, %rs3; -; CHECK-NEXT: shl.b32 %r18, %r17, 16; -; CHECK-NEXT: mov.b32 %f9, %r18; -; CHECK-NEXT: add.rn.f32 %f10, %f8, %f9; -; CHECK-NEXT: cvt.rn.bf16x2.f32 %r19, %f10, %f7; -; CHECK-NEXT: st.param.b32 [func_retval0], %r19; +; CHECK-NEXT: mov.b32 %r7, 1065369472; +; CHECK-NEXT: mov.b32 %r8, 1088438496; +; CHECK-NEXT: fma.rn.bf16x2 %r9, %r4, %r7, %r8; +; CHECK-NEXT: fma.rn.bf16x2 %r10, %r6, %r7, %r9; +; CHECK-NEXT: st.param.b32 [func_retval0], %r10; ; CHECK-NEXT: ret; ; ; CHECK-FTZ-LABEL: fma_bf16x2_expanded_no_nans_multiple_uses_of_fma( @@ -1204,26 +1168,18 @@ define bfloat @fma_bf16_no_nans(bfloat %a, bfloat %b, bfloat %c) { define bfloat @fma_bf16_no_nans_multiple_uses_of_fma(bfloat %a, bfloat %b, bfloat %c) { ; CHECK-LABEL: fma_bf16_no_nans_multiple_uses_of_fma( ; CHECK: { -; CHECK-NEXT: .reg .b16 %rs<7>; -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-NEXT: .reg .b16 %rs<9>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b16 %rs1, [fma_bf16_no_nans_multiple_uses_of_fma_param_0]; ; CHECK-NEXT: ld.param.b16 %rs2, [fma_bf16_no_nans_multiple_uses_of_fma_param_1]; ; CHECK-NEXT: ld.param.b16 %rs3, [fma_bf16_no_nans_multiple_uses_of_fma_param_2]; ; CHECK-NEXT: fma.rn.bf16 %rs4, %rs1, %rs2, %rs3; -; CHECK-NEXT: cvt.u32.u16 %r1, %rs4; -; CHECK-NEXT: shl.b32 %r2, %r1, 16; -; CHECK-NEXT: mov.b32 %f1, %r2; -; CHECK-NEXT: add.rn.f32 %f2, %f1, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs5, %f2; -; CHECK-NEXT: cvt.u32.u16 %r3, %rs5; -; CHECK-NEXT: shl.b32 %r4, %r3, 16; -; CHECK-NEXT: mov.b32 %f3, %r4; -; CHECK-NEXT: add.rn.f32 %f4, %f3, %f1; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs6, %f4; -; CHECK-NEXT: st.param.b16 [func_retval0], %rs6; +; CHECK-NEXT: mov.b16 %rs5, 0x3F80; +; CHECK-NEXT: mov.b16 %rs6, 0x40E0; +; CHECK-NEXT: fma.rn.bf16 %rs7, %rs4, %rs5, %rs6; +; CHECK-NEXT: fma.rn.bf16 %rs8, %rs7, %rs5, %rs4; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs8; ; CHECK-NEXT: ret; ; ; CHECK-FTZ-LABEL: fma_bf16_no_nans_multiple_uses_of_fma( @@ -1629,36 +1585,18 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) { ; CHECK-LABEL: fma_bf16x2_no_nans_multiple_uses_of_fma( ; CHECK: { -; CHECK-NEXT: .reg .b16 %rs<5>; -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-NEXT: .reg .f32 %f<9>; +; CHECK-NEXT: .reg .b32 %r<9>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2]; ; CHECK-NEXT: ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1]; ; CHECK-NEXT: ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0]; ; CHECK-NEXT: fma.rn.bf16x2 %r4, %r3, %r2, %r1; -; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4; -; CHECK-NEXT: cvt.u32.u16 %r5, %rs2; -; CHECK-NEXT: shl.b32 %r6, %r5, 16; -; CHECK-NEXT: mov.b32 %f1, %r6; -; CHECK-NEXT: add.rn.f32 %f2, %f1, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs3, %f2; -; CHECK-NEXT: cvt.u32.u16 %r7, %rs1; -; CHECK-NEXT: shl.b32 %r8, %r7, 16; -; CHECK-NEXT: mov.b32 %f3, %r8; -; CHECK-NEXT: add.rn.f32 %f4, %f3, 0f40E00000; -; CHECK-NEXT: cvt.rn.bf16.f32 %rs4, %f4; -; CHECK-NEXT: cvt.u32.u16 %r9, %rs4; -; CHECK-NEXT: shl.b32 %r10, %r9, 16; -; CHECK-NEXT: mov.b32 %f5, %r10; -; CHECK-NEXT: add.rn.f32 %f6, %f5, %f3; -; CHECK-NEXT: cvt.u32.u16 %r11, %rs3; -; CHECK-NEXT: shl.b32 %r12, %r11, 16; -; CHECK-NEXT: mov.b32 %f7, %r12; -; CHECK-NEXT: add.rn.f32 %f8, %f7, %f1; -; CHECK-NEXT: cvt.rn.bf16x2.f32 %r13, %f8, %f6; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-NEXT: mov.b32 %r5, 1065369472; +; CHECK-NEXT: mov.b32 %r6, 1088438496; +; CHECK-NEXT: fma.rn.bf16x2 %r7, %r4, %r5, %r6; +; CHECK-NEXT: fma.rn.bf16x2 %r8, %r7, %r5, %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-NEXT: ret; ; ; CHECK-FTZ-LABEL: fma_bf16x2_no_nans_multiple_uses_of_fma( diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll index 2ffb079e83b0b..df1c803ca8850 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -1133,132 +1133,200 @@ define <16 x i32> @shuffle_disjoint_lanes_one_splat(i32 %v, <16 x i32> %w) { define <4 x i128> @shuffle_i128(<4 x i128> %a) { ; RV32-LABEL: shuffle_i128: ; RV32: # %bb.0: -; RV32-NEXT: lw a2, 0(a1) -; RV32-NEXT: lw a3, 4(a1) -; RV32-NEXT: lw a4, 8(a1) -; RV32-NEXT: lw a5, 12(a1) -; RV32-NEXT: lw a6, 48(a1) -; RV32-NEXT: lw a7, 52(a1) -; RV32-NEXT: lw t0, 56(a1) -; RV32-NEXT: lw t1, 60(a1) -; RV32-NEXT: lw t2, 32(a1) -; RV32-NEXT: lw t3, 36(a1) -; RV32-NEXT: lw t4, 40(a1) -; RV32-NEXT: lw a1, 44(a1) -; RV32-NEXT: sw t2, 48(a0) -; RV32-NEXT: sw t3, 52(a0) -; RV32-NEXT: sw t4, 56(a0) -; RV32-NEXT: sw a1, 60(a0) -; RV32-NEXT: sw a6, 32(a0) -; RV32-NEXT: sw a7, 36(a0) -; RV32-NEXT: sw t0, 40(a0) -; RV32-NEXT: sw t1, 44(a0) -; RV32-NEXT: sw a2, 16(a0) -; RV32-NEXT: sw a3, 20(a0) -; RV32-NEXT: sw a4, 24(a0) -; RV32-NEXT: sw a5, 28(a0) -; RV32-NEXT: sw a2, 0(a0) -; RV32-NEXT: sw a3, 4(a0) -; RV32-NEXT: sw a4, 8(a0) -; RV32-NEXT: sw a5, 12(a0) +; RV32-NEXT: addi sp, sp, -128 +; RV32-NEXT: .cfi_def_cfa_offset 128 +; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 128 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: lw a2, 60(a1) +; RV32-NEXT: sw a2, 60(sp) +; RV32-NEXT: lw a2, 56(a1) +; RV32-NEXT: sw a2, 56(sp) +; RV32-NEXT: lw a2, 52(a1) +; RV32-NEXT: sw a2, 52(sp) +; RV32-NEXT: lw a2, 48(a1) +; RV32-NEXT: sw a2, 48(sp) +; RV32-NEXT: lw a2, 44(a1) +; RV32-NEXT: sw a2, 44(sp) +; RV32-NEXT: lw a2, 40(a1) +; RV32-NEXT: sw a2, 40(sp) +; RV32-NEXT: lw a2, 36(a1) +; RV32-NEXT: sw a2, 36(sp) +; RV32-NEXT: lw a2, 32(a1) +; RV32-NEXT: sw a2, 32(sp) +; RV32-NEXT: lw a2, 12(a1) +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: lw a2, 8(a1) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lw a2, 4(a1) +; RV32-NEXT: sw a2, 4(sp) +; RV32-NEXT: lw a1, 0(a1) +; RV32-NEXT: mv a2, sp +; RV32-NEXT: sw a1, 0(sp) +; RV32-NEXT: lui a1, %hi(.LCPI78_0) +; RV32-NEXT: addi a1, a1, %lo(.LCPI78_0) +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vle32.v v8, (a2) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32-NEXT: vle16.v v12, (a1) +; RV32-NEXT: vrgatherei16.vv v16, v8, v12 +; RV32-NEXT: vse64.v v16, (a0) +; RV32-NEXT: addi sp, s0, -128 +; RV32-NEXT: .cfi_def_cfa sp, 128 +; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: addi sp, sp, 128 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_i128: ; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -128 +; RV64-NEXT: .cfi_def_cfa_offset 128 +; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 128 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: ld a2, 56(a1) +; RV64-NEXT: sd a2, 56(sp) ; RV64-NEXT: ld a2, 48(a1) -; RV64-NEXT: ld a3, 56(a1) -; RV64-NEXT: ld a4, 0(a1) -; RV64-NEXT: ld a5, 8(a1) -; RV64-NEXT: ld a6, 32(a1) -; RV64-NEXT: ld a1, 40(a1) -; RV64-NEXT: sd a2, 32(a0) -; RV64-NEXT: sd a3, 40(a0) -; RV64-NEXT: sd a6, 48(a0) -; RV64-NEXT: sd a1, 56(a0) -; RV64-NEXT: sd a4, 0(a0) -; RV64-NEXT: sd a5, 8(a0) -; RV64-NEXT: sd a4, 16(a0) -; RV64-NEXT: sd a5, 24(a0) +; RV64-NEXT: sd a2, 48(sp) +; RV64-NEXT: ld a2, 40(a1) +; RV64-NEXT: sd a2, 40(sp) +; RV64-NEXT: ld a2, 32(a1) +; RV64-NEXT: sd a2, 32(sp) +; RV64-NEXT: ld a2, 8(a1) +; RV64-NEXT: sd a2, 8(sp) +; RV64-NEXT: ld a1, 0(a1) +; RV64-NEXT: mv a2, sp +; RV64-NEXT: sd a1, 0(sp) +; RV64-NEXT: lui a1, %hi(.LCPI78_0) +; RV64-NEXT: addi a1, a1, %lo(.LCPI78_0) +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vle64.v v8, (a2) +; RV64-NEXT: vle16.v v12, (a1) +; RV64-NEXT: vrgatherei16.vv v16, v8, v12 +; RV64-NEXT: vse64.v v16, (a0) +; RV64-NEXT: addi sp, s0, -128 +; RV64-NEXT: .cfi_def_cfa sp, 128 +; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 +; RV64-NEXT: addi sp, sp, 128 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %res = shufflevector <4 x i128> %a, <4 x i128> poison, <4 x i32> ret <4 x i128> %res } define void @shuffle_i128_ldst(ptr %p) { -; RV32-LABEL: shuffle_i128_ldst: +; CHECK-LABEL: shuffle_i128_ldst: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: lui a1, %hi(.LCPI79_0) +; CHECK-NEXT: addi a1, a1, %lo(.LCPI79_0) +; CHECK-NEXT: vle16.v v12, (a1) +; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 +; CHECK-NEXT: vse64.v v16, (a0) +; CHECK-NEXT: ret + %a = load <4 x i128>, ptr %p + %res = shufflevector <4 x i128> %a, <4 x i128> poison, <4 x i32> + store <4 x i128> %res, ptr %p + ret void +} + +define void @shuffle_i256_ldst(ptr %p) { +; CHECK-LABEL: shuffle_i256_ldst: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a1, %hi(.LCPI80_0) +; CHECK-NEXT: addi a1, a1, %lo(.LCPI80_0) +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vle64.v v16, (a0) +; CHECK-NEXT: vsext.vf2 v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vrgatherei16.vv v24, v16, v10 +; CHECK-NEXT: vse64.v v24, (a0) +; CHECK-NEXT: ret + %a = load <4 x i256>, ptr %p + %res = shufflevector <4 x i256> %a, <4 x i256> poison, <4 x i32> + store <4 x i256> %res, ptr %p + ret void +} + +define void @shuffle_i64_splat(ptr %p) nounwind { +; RV32-LABEL: shuffle_i64_splat: ; RV32: # %bb.0: -; RV32-NEXT: lw a1, 48(a0) -; RV32-NEXT: lw a2, 52(a0) -; RV32-NEXT: lw a3, 56(a0) -; RV32-NEXT: lw a4, 60(a0) -; RV32-NEXT: lw a5, 0(a0) -; RV32-NEXT: lw a6, 4(a0) -; RV32-NEXT: lw a7, 8(a0) -; RV32-NEXT: lw t0, 12(a0) -; RV32-NEXT: lw t1, 32(a0) -; RV32-NEXT: lw t2, 36(a0) -; RV32-NEXT: lw t3, 40(a0) -; RV32-NEXT: lw t4, 44(a0) -; RV32-NEXT: sw t1, 48(a0) -; RV32-NEXT: sw t2, 52(a0) -; RV32-NEXT: sw t3, 56(a0) -; RV32-NEXT: sw t4, 60(a0) -; RV32-NEXT: sw a5, 16(a0) -; RV32-NEXT: sw a6, 20(a0) -; RV32-NEXT: sw a7, 24(a0) -; RV32-NEXT: sw t0, 28(a0) +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: shuffle_i64_splat: +; RV64: # %bb.0: +; RV64-NEXT: ld a1, 0(a0) +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmv.v.x v8, a1 +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: ret + %a = load <4 x i64>, ptr %p + %res = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> + store <4 x i64> %res, ptr %p + ret void +} + +define void @shuffle_i128_splat(ptr %p) nounwind { +; RV32-LABEL: shuffle_i128_splat: +; RV32: # %bb.0: +; RV32-NEXT: lw a1, 0(a0) +; RV32-NEXT: lw a2, 4(a0) +; RV32-NEXT: lw a3, 8(a0) +; RV32-NEXT: lw a4, 12(a0) +; RV32-NEXT: sw a1, 48(a0) +; RV32-NEXT: sw a2, 52(a0) +; RV32-NEXT: sw a3, 56(a0) +; RV32-NEXT: sw a4, 60(a0) +; RV32-NEXT: sw a1, 16(a0) +; RV32-NEXT: sw a2, 20(a0) +; RV32-NEXT: sw a3, 24(a0) +; RV32-NEXT: sw a4, 28(a0) ; RV32-NEXT: sw a1, 32(a0) ; RV32-NEXT: sw a2, 36(a0) ; RV32-NEXT: sw a3, 40(a0) ; RV32-NEXT: sw a4, 44(a0) ; RV32-NEXT: ret ; -; RV64-LABEL: shuffle_i128_ldst: +; RV64-LABEL: shuffle_i128_splat: ; RV64: # %bb.0: ; RV64-NEXT: ld a1, 0(a0) ; RV64-NEXT: ld a2, 8(a0) -; RV64-NEXT: ld a3, 32(a0) -; RV64-NEXT: ld a4, 40(a0) -; RV64-NEXT: ld a5, 48(a0) -; RV64-NEXT: ld a6, 56(a0) -; RV64-NEXT: sd a3, 48(a0) -; RV64-NEXT: sd a4, 56(a0) +; RV64-NEXT: sd a1, 48(a0) +; RV64-NEXT: sd a2, 56(a0) ; RV64-NEXT: sd a1, 16(a0) ; RV64-NEXT: sd a2, 24(a0) -; RV64-NEXT: sd a5, 32(a0) -; RV64-NEXT: sd a6, 40(a0) +; RV64-NEXT: sd a1, 32(a0) +; RV64-NEXT: sd a2, 40(a0) ; RV64-NEXT: ret %a = load <4 x i128>, ptr %p - %res = shufflevector <4 x i128> %a, <4 x i128> poison, <4 x i32> + %res = shufflevector <4 x i128> %a, <4 x i128> poison, <4 x i32> store <4 x i128> %res, ptr %p ret void } -define void @shuffle_i256_ldst(ptr %p) { -; RV32-LABEL: shuffle_i256_ldst: +define void @shuffle_i256_splat(ptr %p) nounwind { +; RV32-LABEL: shuffle_i256_splat: ; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -48 -; RV32-NEXT: .cfi_def_cfa_offset 48 -; RV32-NEXT: sw s0, 44(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s1, 40(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s2, 36(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s3, 32(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s4, 28(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s5, 24(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s6, 20(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s7, 16(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s8, 12(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s9, 8(sp) # 4-byte Folded Spill -; RV32-NEXT: .cfi_offset s0, -4 -; RV32-NEXT: .cfi_offset s1, -8 -; RV32-NEXT: .cfi_offset s2, -12 -; RV32-NEXT: .cfi_offset s3, -16 -; RV32-NEXT: .cfi_offset s4, -20 -; RV32-NEXT: .cfi_offset s5, -24 -; RV32-NEXT: .cfi_offset s6, -28 -; RV32-NEXT: .cfi_offset s7, -32 -; RV32-NEXT: .cfi_offset s8, -36 -; RV32-NEXT: .cfi_offset s9, -40 ; RV32-NEXT: lw a1, 0(a0) ; RV32-NEXT: lw a2, 4(a0) ; RV32-NEXT: lw a3, 8(a0) @@ -1267,38 +1335,22 @@ define void @shuffle_i256_ldst(ptr %p) { ; RV32-NEXT: lw a6, 20(a0) ; RV32-NEXT: lw a7, 24(a0) ; RV32-NEXT: lw t0, 28(a0) -; RV32-NEXT: lw t1, 96(a0) -; RV32-NEXT: lw t2, 100(a0) -; RV32-NEXT: lw t3, 104(a0) -; RV32-NEXT: lw t4, 108(a0) -; RV32-NEXT: lw t5, 112(a0) -; RV32-NEXT: lw t6, 116(a0) -; RV32-NEXT: lw s0, 120(a0) -; RV32-NEXT: lw s1, 124(a0) -; RV32-NEXT: lw s2, 64(a0) -; RV32-NEXT: lw s3, 68(a0) -; RV32-NEXT: lw s4, 72(a0) -; RV32-NEXT: lw s5, 76(a0) -; RV32-NEXT: lw s6, 80(a0) -; RV32-NEXT: lw s7, 84(a0) -; RV32-NEXT: lw s8, 88(a0) -; RV32-NEXT: lw s9, 92(a0) -; RV32-NEXT: sw s6, 112(a0) -; RV32-NEXT: sw s7, 116(a0) -; RV32-NEXT: sw s8, 120(a0) -; RV32-NEXT: sw s9, 124(a0) -; RV32-NEXT: sw s2, 96(a0) -; RV32-NEXT: sw s3, 100(a0) -; RV32-NEXT: sw s4, 104(a0) -; RV32-NEXT: sw s5, 108(a0) -; RV32-NEXT: sw t5, 80(a0) -; RV32-NEXT: sw t6, 84(a0) -; RV32-NEXT: sw s0, 88(a0) -; RV32-NEXT: sw s1, 92(a0) -; RV32-NEXT: sw t1, 64(a0) -; RV32-NEXT: sw t2, 68(a0) -; RV32-NEXT: sw t3, 72(a0) -; RV32-NEXT: sw t4, 76(a0) +; RV32-NEXT: sw a5, 112(a0) +; RV32-NEXT: sw a6, 116(a0) +; RV32-NEXT: sw a7, 120(a0) +; RV32-NEXT: sw t0, 124(a0) +; RV32-NEXT: sw a1, 96(a0) +; RV32-NEXT: sw a2, 100(a0) +; RV32-NEXT: sw a3, 104(a0) +; RV32-NEXT: sw a4, 108(a0) +; RV32-NEXT: sw a5, 80(a0) +; RV32-NEXT: sw a6, 84(a0) +; RV32-NEXT: sw a7, 88(a0) +; RV32-NEXT: sw t0, 92(a0) +; RV32-NEXT: sw a1, 64(a0) +; RV32-NEXT: sw a2, 68(a0) +; RV32-NEXT: sw a3, 72(a0) +; RV32-NEXT: sw a4, 76(a0) ; RV32-NEXT: sw a5, 48(a0) ; RV32-NEXT: sw a6, 52(a0) ; RV32-NEXT: sw a7, 56(a0) @@ -1307,59 +1359,30 @@ define void @shuffle_i256_ldst(ptr %p) { ; RV32-NEXT: sw a2, 36(a0) ; RV32-NEXT: sw a3, 40(a0) ; RV32-NEXT: sw a4, 44(a0) -; RV32-NEXT: lw s0, 44(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s1, 40(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s2, 36(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s3, 32(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s4, 28(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s5, 24(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s6, 20(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s7, 16(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s8, 12(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s9, 8(sp) # 4-byte Folded Reload -; RV32-NEXT: .cfi_restore s0 -; RV32-NEXT: .cfi_restore s1 -; RV32-NEXT: .cfi_restore s2 -; RV32-NEXT: .cfi_restore s3 -; RV32-NEXT: .cfi_restore s4 -; RV32-NEXT: .cfi_restore s5 -; RV32-NEXT: .cfi_restore s6 -; RV32-NEXT: .cfi_restore s7 -; RV32-NEXT: .cfi_restore s8 -; RV32-NEXT: .cfi_restore s9 -; RV32-NEXT: addi sp, sp, 48 -; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; -; RV64-LABEL: shuffle_i256_ldst: +; RV64-LABEL: shuffle_i256_splat: ; RV64: # %bb.0: -; RV64-NEXT: ld a1, 96(a0) -; RV64-NEXT: ld a2, 104(a0) -; RV64-NEXT: ld a3, 112(a0) -; RV64-NEXT: ld a4, 120(a0) -; RV64-NEXT: ld a5, 0(a0) -; RV64-NEXT: ld a6, 8(a0) -; RV64-NEXT: ld a7, 16(a0) -; RV64-NEXT: ld t0, 24(a0) -; RV64-NEXT: ld t1, 64(a0) -; RV64-NEXT: ld t2, 72(a0) -; RV64-NEXT: ld t3, 80(a0) -; RV64-NEXT: ld t4, 88(a0) -; RV64-NEXT: sd t1, 96(a0) -; RV64-NEXT: sd t2, 104(a0) -; RV64-NEXT: sd t3, 112(a0) -; RV64-NEXT: sd t4, 120(a0) -; RV64-NEXT: sd a5, 32(a0) -; RV64-NEXT: sd a6, 40(a0) -; RV64-NEXT: sd a7, 48(a0) -; RV64-NEXT: sd t0, 56(a0) +; RV64-NEXT: ld a1, 0(a0) +; RV64-NEXT: ld a2, 8(a0) +; RV64-NEXT: ld a3, 16(a0) +; RV64-NEXT: ld a4, 24(a0) +; RV64-NEXT: sd a1, 96(a0) +; RV64-NEXT: sd a2, 104(a0) +; RV64-NEXT: sd a3, 112(a0) +; RV64-NEXT: sd a4, 120(a0) +; RV64-NEXT: sd a1, 32(a0) +; RV64-NEXT: sd a2, 40(a0) +; RV64-NEXT: sd a3, 48(a0) +; RV64-NEXT: sd a4, 56(a0) ; RV64-NEXT: sd a1, 64(a0) ; RV64-NEXT: sd a2, 72(a0) ; RV64-NEXT: sd a3, 80(a0) ; RV64-NEXT: sd a4, 88(a0) ; RV64-NEXT: ret %a = load <4 x i256>, ptr %p - %res = shufflevector <4 x i256> %a, <4 x i256> poison, <4 x i32> + %res = shufflevector <4 x i256> %a, <4 x i256> poison, <4 x i32> store <4 x i256> %res, ptr %p ret void } + diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll index 54d0acc3ba8b5..afd560fd74d16 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll @@ -396,49 +396,16 @@ entry: } define void @shuffle_i128_ldst(ptr %p) vscale_range(2,2) { -; RV32-LABEL: shuffle_i128_ldst: -; RV32: # %bb.0: -; RV32-NEXT: lw a1, 48(a0) -; RV32-NEXT: lw a2, 52(a0) -; RV32-NEXT: lw a3, 56(a0) -; RV32-NEXT: lw a4, 60(a0) -; RV32-NEXT: lw a5, 0(a0) -; RV32-NEXT: lw a6, 4(a0) -; RV32-NEXT: lw a7, 8(a0) -; RV32-NEXT: lw t0, 12(a0) -; RV32-NEXT: lw t1, 32(a0) -; RV32-NEXT: lw t2, 36(a0) -; RV32-NEXT: lw t3, 40(a0) -; RV32-NEXT: lw t4, 44(a0) -; RV32-NEXT: sw t1, 48(a0) -; RV32-NEXT: sw t2, 52(a0) -; RV32-NEXT: sw t3, 56(a0) -; RV32-NEXT: sw t4, 60(a0) -; RV32-NEXT: sw a5, 16(a0) -; RV32-NEXT: sw a6, 20(a0) -; RV32-NEXT: sw a7, 24(a0) -; RV32-NEXT: sw t0, 28(a0) -; RV32-NEXT: sw a1, 32(a0) -; RV32-NEXT: sw a2, 36(a0) -; RV32-NEXT: sw a3, 40(a0) -; RV32-NEXT: sw a4, 44(a0) -; RV32-NEXT: ret -; -; RV64-LABEL: shuffle_i128_ldst: -; RV64: # %bb.0: -; RV64-NEXT: ld a1, 0(a0) -; RV64-NEXT: ld a2, 8(a0) -; RV64-NEXT: ld a3, 32(a0) -; RV64-NEXT: ld a4, 40(a0) -; RV64-NEXT: ld a5, 48(a0) -; RV64-NEXT: ld a6, 56(a0) -; RV64-NEXT: sd a3, 48(a0) -; RV64-NEXT: sd a4, 56(a0) -; RV64-NEXT: sd a1, 16(a0) -; RV64-NEXT: sd a2, 24(a0) -; RV64-NEXT: sd a5, 32(a0) -; RV64-NEXT: sd a6, 40(a0) -; RV64-NEXT: ret +; CHECK-LABEL: shuffle_i128_ldst: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4re64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vmv4r.v v12, v8 +; CHECK-NEXT: vmv1r.v v14, v11 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vs4r.v v12, (a0) +; CHECK-NEXT: ret %a = load <4 x i128>, ptr %p %res = shufflevector <4 x i128> %a, <4 x i128> poison, <4 x i32> store <4 x i128> %res, ptr %p @@ -446,129 +413,19 @@ define void @shuffle_i128_ldst(ptr %p) vscale_range(2,2) { } define void @shuffle_i256_ldst(ptr %p) vscale_range(2,2) { -; RV32-LABEL: shuffle_i256_ldst: -; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -48 -; RV32-NEXT: .cfi_def_cfa_offset 48 -; RV32-NEXT: sw s0, 44(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s1, 40(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s2, 36(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s3, 32(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s4, 28(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s5, 24(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s6, 20(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s7, 16(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s8, 12(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s9, 8(sp) # 4-byte Folded Spill -; RV32-NEXT: .cfi_offset s0, -4 -; RV32-NEXT: .cfi_offset s1, -8 -; RV32-NEXT: .cfi_offset s2, -12 -; RV32-NEXT: .cfi_offset s3, -16 -; RV32-NEXT: .cfi_offset s4, -20 -; RV32-NEXT: .cfi_offset s5, -24 -; RV32-NEXT: .cfi_offset s6, -28 -; RV32-NEXT: .cfi_offset s7, -32 -; RV32-NEXT: .cfi_offset s8, -36 -; RV32-NEXT: .cfi_offset s9, -40 -; RV32-NEXT: lw a1, 0(a0) -; RV32-NEXT: lw a2, 4(a0) -; RV32-NEXT: lw a3, 8(a0) -; RV32-NEXT: lw a4, 12(a0) -; RV32-NEXT: lw a5, 16(a0) -; RV32-NEXT: lw a6, 20(a0) -; RV32-NEXT: lw a7, 24(a0) -; RV32-NEXT: lw t0, 28(a0) -; RV32-NEXT: lw t1, 96(a0) -; RV32-NEXT: lw t2, 100(a0) -; RV32-NEXT: lw t3, 104(a0) -; RV32-NEXT: lw t4, 108(a0) -; RV32-NEXT: lw t5, 112(a0) -; RV32-NEXT: lw t6, 116(a0) -; RV32-NEXT: lw s0, 120(a0) -; RV32-NEXT: lw s1, 124(a0) -; RV32-NEXT: lw s2, 64(a0) -; RV32-NEXT: lw s3, 68(a0) -; RV32-NEXT: lw s4, 72(a0) -; RV32-NEXT: lw s5, 76(a0) -; RV32-NEXT: lw s6, 80(a0) -; RV32-NEXT: lw s7, 84(a0) -; RV32-NEXT: lw s8, 88(a0) -; RV32-NEXT: lw s9, 92(a0) -; RV32-NEXT: sw s6, 112(a0) -; RV32-NEXT: sw s7, 116(a0) -; RV32-NEXT: sw s8, 120(a0) -; RV32-NEXT: sw s9, 124(a0) -; RV32-NEXT: sw s2, 96(a0) -; RV32-NEXT: sw s3, 100(a0) -; RV32-NEXT: sw s4, 104(a0) -; RV32-NEXT: sw s5, 108(a0) -; RV32-NEXT: sw t5, 80(a0) -; RV32-NEXT: sw t6, 84(a0) -; RV32-NEXT: sw s0, 88(a0) -; RV32-NEXT: sw s1, 92(a0) -; RV32-NEXT: sw t1, 64(a0) -; RV32-NEXT: sw t2, 68(a0) -; RV32-NEXT: sw t3, 72(a0) -; RV32-NEXT: sw t4, 76(a0) -; RV32-NEXT: sw a5, 48(a0) -; RV32-NEXT: sw a6, 52(a0) -; RV32-NEXT: sw a7, 56(a0) -; RV32-NEXT: sw t0, 60(a0) -; RV32-NEXT: sw a1, 32(a0) -; RV32-NEXT: sw a2, 36(a0) -; RV32-NEXT: sw a3, 40(a0) -; RV32-NEXT: sw a4, 44(a0) -; RV32-NEXT: lw s0, 44(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s1, 40(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s2, 36(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s3, 32(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s4, 28(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s5, 24(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s6, 20(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s7, 16(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s8, 12(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s9, 8(sp) # 4-byte Folded Reload -; RV32-NEXT: .cfi_restore s0 -; RV32-NEXT: .cfi_restore s1 -; RV32-NEXT: .cfi_restore s2 -; RV32-NEXT: .cfi_restore s3 -; RV32-NEXT: .cfi_restore s4 -; RV32-NEXT: .cfi_restore s5 -; RV32-NEXT: .cfi_restore s6 -; RV32-NEXT: .cfi_restore s7 -; RV32-NEXT: .cfi_restore s8 -; RV32-NEXT: .cfi_restore s9 -; RV32-NEXT: addi sp, sp, 48 -; RV32-NEXT: .cfi_def_cfa_offset 0 -; RV32-NEXT: ret -; -; RV64-LABEL: shuffle_i256_ldst: -; RV64: # %bb.0: -; RV64-NEXT: ld a1, 96(a0) -; RV64-NEXT: ld a2, 104(a0) -; RV64-NEXT: ld a3, 112(a0) -; RV64-NEXT: ld a4, 120(a0) -; RV64-NEXT: ld a5, 0(a0) -; RV64-NEXT: ld a6, 8(a0) -; RV64-NEXT: ld a7, 16(a0) -; RV64-NEXT: ld t0, 24(a0) -; RV64-NEXT: ld t1, 64(a0) -; RV64-NEXT: ld t2, 72(a0) -; RV64-NEXT: ld t3, 80(a0) -; RV64-NEXT: ld t4, 88(a0) -; RV64-NEXT: sd t1, 96(a0) -; RV64-NEXT: sd t2, 104(a0) -; RV64-NEXT: sd t3, 112(a0) -; RV64-NEXT: sd t4, 120(a0) -; RV64-NEXT: sd a5, 32(a0) -; RV64-NEXT: sd a6, 40(a0) -; RV64-NEXT: sd a7, 48(a0) -; RV64-NEXT: sd t0, 56(a0) -; RV64-NEXT: sd a1, 64(a0) -; RV64-NEXT: sd a2, 72(a0) -; RV64-NEXT: sd a3, 80(a0) -; RV64-NEXT: sd a4, 88(a0) -; RV64-NEXT: ret +; CHECK-LABEL: shuffle_i256_ldst: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v9 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: vmv1r.v v20, v14 +; CHECK-NEXT: vmv1r.v v21, v15 +; CHECK-NEXT: vmv1r.v v22, v12 +; CHECK-NEXT: vmv1r.v v23, v13 +; CHECK-NEXT: vs8r.v v16, (a0) +; CHECK-NEXT: ret %a = load <4 x i256>, ptr %p %res = shufflevector <4 x i256> %a, <4 x i256> poison, <4 x i32> store <4 x i256> %res, ptr %p diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll index 83a9b23a387d2..84de566e05dff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll @@ -320,8 +320,8 @@ for.cond.cleanup: ; preds = %vector.body define void @gather_unknown_pow2(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, i64 %shift) { ; CHECK-LABEL: @gather_unknown_pow2( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[STEP:%.*]] = shl i64 8, [[SHIFT:%.*]] -; CHECK-NEXT: [[STRIDE:%.*]] = shl i64 1, [[SHIFT]] +; CHECK-NEXT: [[STRIDE:%.*]] = shl i64 1, [[SHIFT:%.*]] +; CHECK-NEXT: [[STEP:%.*]] = shl i64 8, [[SHIFT]] ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[STRIDE]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: diff --git a/llvm/test/CodeGen/RISCV/rvv/stack-probing-dynamic.ll b/llvm/test/CodeGen/RISCV/rvv/stack-probing-dynamic.ll new file mode 100644 index 0000000000000..c3c1643e6de01 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/stack-probing-dynamic.ll @@ -0,0 +1,550 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+v -O2 < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I +; RUN: llc -mtriple=riscv32 -mattr=+m,+v -O2 < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +; Tests copied from AArch64. + +; Dynamically-sized allocation, needs a loop which can handle any size at +; runtime. The final iteration of the loop will temporarily put SP below the +; target address, but this doesn't break any of the ABI constraints on the +; stack, and also doesn't probe below the target SP value. +define void @dynamic(i64 %size, ptr %out) #0 { +; RV64I-LABEL: dynamic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: addi s0, sp, 16 +; RV64I-NEXT: .cfi_def_cfa s0, 0 +; RV64I-NEXT: addi a0, a0, 15 +; RV64I-NEXT: andi a0, a0, -16 +; RV64I-NEXT: sub a0, sp, a0 +; RV64I-NEXT: lui a2, 1 +; RV64I-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sub sp, sp, a2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: blt a0, sp, .LBB0_1 +; RV64I-NEXT: # %bb.2: +; RV64I-NEXT: mv sp, a0 +; RV64I-NEXT: sd a0, 0(a1) +; RV64I-NEXT: addi sp, s0, -16 +; RV64I-NEXT: .cfi_def_cfa sp, 16 +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 +; RV64I-NEXT: ret +; +; RV32I-LABEL: dynamic: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: addi s0, sp, 16 +; RV32I-NEXT: .cfi_def_cfa s0, 0 +; RV32I-NEXT: addi a0, a0, 15 +; RV32I-NEXT: andi a0, a0, -16 +; RV32I-NEXT: sub a0, sp, a0 +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: blt a0, sp, .LBB0_1 +; RV32I-NEXT: # %bb.2: +; RV32I-NEXT: mv sp, a0 +; RV32I-NEXT: sw a0, 0(a2) +; RV32I-NEXT: addi sp, s0, -16 +; RV32I-NEXT: .cfi_def_cfa sp, 16 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 +; RV32I-NEXT: ret + %v = alloca i8, i64 %size, align 1 + store ptr %v, ptr %out, align 8 + ret void +} + +; This function has a fixed-size stack slot and a dynamic one. The fixed size +; slot isn't large enough that we would normally probe it, but we need to do so +; here otherwise the gap between the CSR save and the first probe of the +; dynamic allocation could be too far apart when the size of the dynamic +; allocation is close to the guard size. +define void @dynamic_fixed(i64 %size, ptr %out1, ptr %out2) #0 { +; RV64I-LABEL: dynamic_fixed: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -80 +; RV64I-NEXT: .cfi_def_cfa_offset 80 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: addi s0, sp, 80 +; RV64I-NEXT: .cfi_def_cfa s0, 0 +; RV64I-NEXT: addi a3, s0, -80 +; RV64I-NEXT: addi a0, a0, 15 +; RV64I-NEXT: sd a3, 0(a1) +; RV64I-NEXT: andi a0, a0, -16 +; RV64I-NEXT: sub a0, sp, a0 +; RV64I-NEXT: lui a1, 1 +; RV64I-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sub sp, sp, a1 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: blt a0, sp, .LBB1_1 +; RV64I-NEXT: # %bb.2: +; RV64I-NEXT: mv sp, a0 +; RV64I-NEXT: sd a0, 0(a2) +; RV64I-NEXT: addi sp, s0, -80 +; RV64I-NEXT: .cfi_def_cfa sp, 80 +; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: addi sp, sp, 80 +; RV64I-NEXT: .cfi_def_cfa_offset 0 +; RV64I-NEXT: ret +; +; RV32I-LABEL: dynamic_fixed: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -80 +; RV32I-NEXT: .cfi_def_cfa_offset 80 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: addi s0, sp, 80 +; RV32I-NEXT: .cfi_def_cfa s0, 0 +; RV32I-NEXT: addi a1, s0, -72 +; RV32I-NEXT: addi a0, a0, 15 +; RV32I-NEXT: sw a1, 0(a2) +; RV32I-NEXT: andi a0, a0, -16 +; RV32I-NEXT: sub a0, sp, a0 +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: blt a0, sp, .LBB1_1 +; RV32I-NEXT: # %bb.2: +; RV32I-NEXT: mv sp, a0 +; RV32I-NEXT: sw a0, 0(a3) +; RV32I-NEXT: addi sp, s0, -80 +; RV32I-NEXT: .cfi_def_cfa sp, 80 +; RV32I-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: addi sp, sp, 80 +; RV32I-NEXT: .cfi_def_cfa_offset 0 +; RV32I-NEXT: ret + %v1 = alloca i8, i64 64, align 1 + store ptr %v1, ptr %out1, align 8 + %v2 = alloca i8, i64 %size, align 1 + store ptr %v2, ptr %out2, align 8 + ret void +} + +; Dynamic allocation, with an alignment requirement greater than the alignment +; of SP. Done by ANDing the target SP with a constant to align it down, then +; doing the loop as normal. Note that we also re-align the stack in the prolog, +; which isn't actually needed because the only aligned allocations are dynamic, +; this is done even without stack probing. +define void @dynamic_align_64(i64 %size, ptr %out) #0 { +; RV64I-LABEL: dynamic_align_64: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -64 +; RV64I-NEXT: .cfi_def_cfa_offset 64 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: .cfi_offset s1, -24 +; RV64I-NEXT: addi s0, sp, 64 +; RV64I-NEXT: .cfi_def_cfa s0, 0 +; RV64I-NEXT: andi sp, sp, -64 +; RV64I-NEXT: mv s1, sp +; RV64I-NEXT: addi a0, a0, 15 +; RV64I-NEXT: andi a0, a0, -16 +; RV64I-NEXT: sub a0, sp, a0 +; RV64I-NEXT: andi a0, a0, -64 +; RV64I-NEXT: lui a2, 1 +; RV64I-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sub sp, sp, a2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: blt a0, sp, .LBB2_1 +; RV64I-NEXT: # %bb.2: +; RV64I-NEXT: mv sp, a0 +; RV64I-NEXT: sd a0, 0(a1) +; RV64I-NEXT: addi sp, s0, -64 +; RV64I-NEXT: .cfi_def_cfa sp, 64 +; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: .cfi_restore s1 +; RV64I-NEXT: addi sp, sp, 64 +; RV64I-NEXT: .cfi_def_cfa_offset 0 +; RV64I-NEXT: ret +; +; RV32I-LABEL: dynamic_align_64: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -64 +; RV32I-NEXT: .cfi_def_cfa_offset 64 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 52(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: .cfi_offset s1, -12 +; RV32I-NEXT: addi s0, sp, 64 +; RV32I-NEXT: .cfi_def_cfa s0, 0 +; RV32I-NEXT: andi sp, sp, -64 +; RV32I-NEXT: mv s1, sp +; RV32I-NEXT: addi a0, a0, 15 +; RV32I-NEXT: andi a0, a0, -16 +; RV32I-NEXT: sub a0, sp, a0 +; RV32I-NEXT: andi a0, a0, -64 +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: blt a0, sp, .LBB2_1 +; RV32I-NEXT: # %bb.2: +; RV32I-NEXT: mv sp, a0 +; RV32I-NEXT: sw a0, 0(a2) +; RV32I-NEXT: addi sp, s0, -64 +; RV32I-NEXT: .cfi_def_cfa sp, 64 +; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 52(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: .cfi_restore s1 +; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: .cfi_def_cfa_offset 0 +; RV32I-NEXT: ret + %v = alloca i8, i64 %size, align 64 + store ptr %v, ptr %out, align 8 + ret void +} + +; Dynamic allocation, with an alignment greater than the stack guard size. The +; only difference to the dynamic allocation is the constant used for aligning +; the target SP, the loop will probe the whole allocation without needing to +; know about the alignment padding. +define void @dynamic_align_8192(i64 %size, ptr %out) #0 { +; RV64I-LABEL: dynamic_align_8192: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -2032 +; RV64I-NEXT: .cfi_def_cfa_offset 2032 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 2008(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: .cfi_offset s1, -24 +; RV64I-NEXT: addi s0, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa s0, 0 +; RV64I-NEXT: lui a2, 1 +; RV64I-NEXT: sub sp, sp, a2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sub sp, sp, a2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sub sp, sp, a2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: addi sp, sp, -2048 +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: srli a2, sp, 13 +; RV64I-NEXT: slli sp, a2, 13 +; RV64I-NEXT: mv s1, sp +; RV64I-NEXT: addi a0, a0, 15 +; RV64I-NEXT: lui a2, 1048574 +; RV64I-NEXT: andi a0, a0, -16 +; RV64I-NEXT: sub a0, sp, a0 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: lui a2, 1 +; RV64I-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sub sp, sp, a2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: blt a0, sp, .LBB3_1 +; RV64I-NEXT: # %bb.2: +; RV64I-NEXT: mv sp, a0 +; RV64I-NEXT: sd a0, 0(a1) +; RV64I-NEXT: addi sp, s0, -2032 +; RV64I-NEXT: .cfi_def_cfa sp, 2032 +; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 2008(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: .cfi_restore s1 +; RV64I-NEXT: addi sp, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa_offset 0 +; RV64I-NEXT: ret +; +; RV32I-LABEL: dynamic_align_8192: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -2032 +; RV32I-NEXT: .cfi_def_cfa_offset 2032 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 2024(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 2020(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: .cfi_offset s1, -12 +; RV32I-NEXT: addi s0, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa s0, 0 +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: addi sp, sp, -2048 +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: srli a1, sp, 13 +; RV32I-NEXT: slli sp, a1, 13 +; RV32I-NEXT: mv s1, sp +; RV32I-NEXT: addi a0, a0, 15 +; RV32I-NEXT: lui a1, 1048574 +; RV32I-NEXT: andi a0, a0, -16 +; RV32I-NEXT: sub a0, sp, a0 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: blt a0, sp, .LBB3_1 +; RV32I-NEXT: # %bb.2: +; RV32I-NEXT: mv sp, a0 +; RV32I-NEXT: sw a0, 0(a2) +; RV32I-NEXT: addi sp, s0, -2032 +; RV32I-NEXT: .cfi_def_cfa sp, 2032 +; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: .cfi_restore s1 +; RV32I-NEXT: addi sp, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa_offset 0 +; RV32I-NEXT: ret + %v = alloca i8, i64 %size, align 8192 + store ptr %v, ptr %out, align 8 + ret void +} + +; If a function has variable-sized stack objects, then any function calls which +; need to pass arguments on the stack must allocate the stack space for them +; dynamically, to ensure they are at the bottom of the frame. +define void @no_reserved_call_frame(i64 %n, i32 %dummy) #0 { +; RV64I-LABEL: no_reserved_call_frame: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: addi s0, sp, 16 +; RV64I-NEXT: .cfi_def_cfa s0, 0 +; RV64I-NEXT: slli a0, a0, 2 +; RV64I-NEXT: addi a0, a0, 15 +; RV64I-NEXT: andi a0, a0, -16 +; RV64I-NEXT: sub a0, sp, a0 +; RV64I-NEXT: lui a2, 1 +; RV64I-NEXT: .LBB4_1: # %entry +; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sub sp, sp, a2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: blt a0, sp, .LBB4_1 +; RV64I-NEXT: # %bb.2: # %entry +; RV64I-NEXT: mv sp, a0 +; RV64I-NEXT: call callee_stack_args +; RV64I-NEXT: addi sp, s0, -16 +; RV64I-NEXT: .cfi_def_cfa sp, 16 +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 +; RV64I-NEXT: ret +; +; RV32I-LABEL: no_reserved_call_frame: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: addi s0, sp, 16 +; RV32I-NEXT: .cfi_def_cfa s0, 0 +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: addi a0, a0, 15 +; RV32I-NEXT: andi a0, a0, -16 +; RV32I-NEXT: sub a0, sp, a0 +; RV32I-NEXT: lui a2, 1 +; RV32I-NEXT: .LBB4_1: # %entry +; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sub sp, sp, a2 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: blt a0, sp, .LBB4_1 +; RV32I-NEXT: # %bb.2: # %entry +; RV32I-NEXT: mv sp, a0 +; RV32I-NEXT: call callee_stack_args +; RV32I-NEXT: addi sp, s0, -16 +; RV32I-NEXT: .cfi_def_cfa sp, 16 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 +; RV32I-NEXT: ret +entry: + %v = alloca i32, i64 %n + call void @callee_stack_args(ptr %v, i32 %dummy) + ret void +} + +; Same as above but without a variable-sized allocation, so the reserved call +; frame can be folded into the fixed-size allocation in the prologue. +define void @reserved_call_frame(i64 %n, i32 %dummy) #0 { +; RV64I-LABEL: reserved_call_frame: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: addi sp, sp, -416 +; RV64I-NEXT: .cfi_def_cfa_offset 416 +; RV64I-NEXT: sd ra, 408(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: addi a0, sp, 8 +; RV64I-NEXT: call callee_stack_args +; RV64I-NEXT: ld ra, 408(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: addi sp, sp, 416 +; RV64I-NEXT: .cfi_def_cfa_offset 0 +; RV64I-NEXT: ret +; +; RV32I-LABEL: reserved_call_frame: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -416 +; RV32I-NEXT: .cfi_def_cfa_offset 416 +; RV32I-NEXT: sw ra, 412(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: addi a0, sp, 12 +; RV32I-NEXT: call callee_stack_args +; RV32I-NEXT: lw ra, 412(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: addi sp, sp, 416 +; RV32I-NEXT: .cfi_def_cfa_offset 0 +; RV32I-NEXT: ret +entry: + %v = alloca i32, i64 100 + call void @callee_stack_args(ptr %v, i32 %dummy) + ret void +} + +declare void @callee_stack_args(ptr, i32) + +; Dynamic allocation of vectors +define void @dynamic_vector(i64 %size, ptr %out) #0 { +; RV64I-LABEL: dynamic_vector: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: addi s0, sp, 16 +; RV64I-NEXT: .cfi_def_cfa s0, 0 +; RV64I-NEXT: csrr a2, vlenb +; RV64I-NEXT: mul a0, a2, a0 +; RV64I-NEXT: slli a0, a0, 1 +; RV64I-NEXT: sub a0, sp, a0 +; RV64I-NEXT: lui a2, 1 +; RV64I-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sub sp, sp, a2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: blt a0, sp, .LBB6_1 +; RV64I-NEXT: # %bb.2: +; RV64I-NEXT: mv sp, a0 +; RV64I-NEXT: sd a0, 0(a1) +; RV64I-NEXT: addi sp, s0, -16 +; RV64I-NEXT: .cfi_def_cfa sp, 16 +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 +; RV64I-NEXT: ret +; +; RV32I-LABEL: dynamic_vector: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: addi s0, sp, 16 +; RV32I-NEXT: .cfi_def_cfa s0, 0 +; RV32I-NEXT: csrr a1, vlenb +; RV32I-NEXT: mul a0, a1, a0 +; RV32I-NEXT: slli a0, a0, 1 +; RV32I-NEXT: sub a0, sp, a0 +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: blt a0, sp, .LBB6_1 +; RV32I-NEXT: # %bb.2: +; RV32I-NEXT: mv sp, a0 +; RV32I-NEXT: sw a0, 0(a2) +; RV32I-NEXT: addi sp, s0, -16 +; RV32I-NEXT: .cfi_def_cfa sp, 16 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 +; RV32I-NEXT: ret + %v = alloca , i64 %size, align 16 + store ptr %v, ptr %out, align 8 + ret void +} + +attributes #0 = { uwtable(async) "probe-stack"="inline-asm" "frame-pointer"="none" } diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll index f777c450bc106..45f158f929ca8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll @@ -105,6 +105,166 @@ for.cond.cleanup: ; preds = %vector.body ret %accum.next } +define @gather_non_invariant_step(ptr %a, ptr %b, i32 %len) { +; CHECK-LABEL: @gather_non_invariant_step( +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LEN:%.*]] to i64 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND_SCALAR1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR1:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACCUM:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR1]], i32 3 +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i64(ptr [[TMP0]], i64 16, splat (i1 true), i32 [[TMP1]]) +; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.vp.select.nxv1i64( splat (i1 true), [[TMP3]], undef, i32 [[TMP1]]) +; CHECK-NEXT: [[ACCUM_NEXT]] = add [[ACCUM]], [[GATHER]] +; CHECK-NEXT: [[B:%.*]] = getelementptr i64, ptr [[B1:%.*]], i64 [[VEC_IND_SCALAR]] +; CHECK-NEXT: [[STEP:%.*]] = load i64, ptr [[B]], align 8 +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add nuw i64 [[VEC_IND_SCALAR]], [[STEP]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR1]] = add i64 [[VEC_IND_SCALAR1]], [[STEP]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[VEC_IND_NEXT_SCALAR]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret [[ACCUM_NEXT]] +; +vector.ph: + %wide.trip.count = zext i32 %len to i64 + %1 = tail call @llvm.stepvector.nxv1i64() + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.ind = phi [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ] + %accum = phi [ zeroinitializer, %vector.ph ], [ %accum.next, %vector.body ] + %2 = getelementptr inbounds %struct.foo, ptr %a, %vec.ind, i32 3 + %gather = call @llvm.masked.gather.nxv1i64.nxv1p0( %2, i32 8, splat (i1 true), undef) + %accum.next = add %accum, %gather + + %b.gep = getelementptr i64, ptr %b, i64 %index + %step = load i64, ptr %b.gep + %index.next = add nuw i64 %index, %step + %.splatinsert = insertelement poison, i64 %step, i64 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + %vec.ind.next = add %vec.ind, %.splat + %3 = icmp ne i64 %index.next, %wide.trip.count + br i1 %3, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret %accum.next +} + +define @gather_non_invariant_step_shl(ptr %a, ptr %b, i32 %len) { +; CHECK-LABEL: @gather_non_invariant_step_shl( +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LEN:%.*]] to i64 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND_SCALAR1:%.*]] = phi i64 [ 168, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR1:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACCUM:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR1]], i32 3 +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i64(ptr [[TMP0]], i64 64, splat (i1 true), i32 [[TMP1]]) +; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.vp.select.nxv1i64( splat (i1 true), [[TMP3]], undef, i32 [[TMP1]]) +; CHECK-NEXT: [[ACCUM_NEXT]] = add [[ACCUM]], [[GATHER]] +; CHECK-NEXT: [[B:%.*]] = getelementptr i64, ptr [[B1:%.*]], i64 [[VEC_IND_SCALAR]] +; CHECK-NEXT: [[STEP:%.*]] = load i64, ptr [[B]], align 8 +; CHECK-NEXT: [[STEP1:%.*]] = shl i64 [[STEP]], 2 +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add nuw i64 [[VEC_IND_SCALAR]], [[STEP]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR1]] = add i64 [[VEC_IND_SCALAR1]], [[STEP1]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[VEC_IND_NEXT_SCALAR]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret [[ACCUM_NEXT]] +; +vector.ph: + %wide.trip.count = zext i32 %len to i64 + %1 = tail call @llvm.stepvector.nxv1i64() + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.ind = phi [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ] + %accum = phi [ zeroinitializer, %vector.ph ], [ %accum.next, %vector.body ] + + %vec.ind.add = add %vec.ind, splat (i64 42) + %vec.ind.shl = shl %vec.ind.add, splat (i64 2) + + %2 = getelementptr inbounds %struct.foo, ptr %a, %vec.ind.shl, i32 3 + %gather = call @llvm.masked.gather.nxv1i64.nxv1p0( %2, i32 8, splat (i1 true), undef) + %accum.next = add %accum, %gather + + %b.gep = getelementptr i64, ptr %b, i64 %index + %step = load i64, ptr %b.gep + %index.next = add nuw i64 %index, %step + %.splatinsert = insertelement poison, i64 %step, i64 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + %vec.ind.next = add %vec.ind, %.splat + %3 = icmp ne i64 %index.next, %wide.trip.count + br i1 %3, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret %accum.next +} + +; Check that the operand of the binary op (%scale.splat in shl) always dominates +; the existing step value when we're adjusting it. +define @gather_splat_op_after_step(ptr %a, ptr %b, i32 %len) { +; CHECK-LABEL: @gather_splat_op_after_step( +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LEN:%.*]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[SCALE:%.*]] = load i64, ptr [[B:%.*]], align 8 +; CHECK-NEXT: [[STRIDE:%.*]] = shl i64 1, [[SCALE]] +; CHECK-NEXT: [[STEP:%.*]] = shl i64 [[TMP0]], [[SCALE]] +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 16 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACCUM:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR]], i32 3 +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i64(ptr [[TMP2]], i64 [[TMP1]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.vp.select.nxv1i64( splat (i1 true), [[TMP4]], undef, i32 [[TMP3]]) +; CHECK-NEXT: [[ACCUM_NEXT]] = add [[ACCUM]], [[GATHER]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP0]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i64 [[VEC_IND_SCALAR]], [[STEP]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[INDEX_NEXT]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret [[ACCUM_NEXT]] +; +vector.ph: + %wide.trip.count = zext i32 %len to i64 + %0 = tail call i64 @llvm.vscale.i64() + %1 = tail call @llvm.stepvector.nxv1i64() + %.splatinsert = insertelement poison, i64 %0, i64 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + + %scale = load i64, ptr %b + %scale.head = insertelement poison, i64 %scale, i64 0 + %scale.splat = shufflevector %scale.head, poison, zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.ind = phi [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ] + %accum = phi [ zeroinitializer, %vector.ph ], [ %accum.next, %vector.body ] + %vec.ind.shl = shl %vec.ind, %scale.splat + %2 = getelementptr inbounds %struct.foo, ptr %a, %vec.ind.shl, i32 3 + %gather = call @llvm.masked.gather.nxv1i64.nxv1p0( %2, i32 8, splat (i1 true), undef) + %accum.next = add %accum, %gather + %index.next = add nuw i64 %index, %0 + %vec.ind.next = add %vec.ind, %.splat + %3 = icmp ne i64 %index.next, %wide.trip.count + br i1 %3, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret %accum.next +} + define void @scatter(ptr %a, i32 %len) { ; CHECK-LABEL: @scatter( ; CHECK-NEXT: vector.ph: @@ -146,6 +306,99 @@ for.cond.cleanup: ; preds = %vector.body ret void } +define void @scatter_non_invariant_step(ptr %a, ptr %b, i32 %len) { +; CHECK-LABEL: @scatter_non_invariant_step( +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LEN:%.*]] to i64 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND_SCALAR1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR1:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR1]], i32 3 +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: call void @llvm.experimental.vp.strided.store.nxv1i64.p0.i64( zeroinitializer, ptr [[TMP0]], i64 16, splat (i1 true), i32 [[TMP1]]) +; CHECK-NEXT: [[B:%.*]] = getelementptr i64, ptr [[B1:%.*]], i64 [[VEC_IND_SCALAR]] +; CHECK-NEXT: [[STEP:%.*]] = load i64, ptr [[B]], align 8 +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add nuw i64 [[VEC_IND_SCALAR]], [[STEP]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR1]] = add i64 [[VEC_IND_SCALAR1]], [[STEP]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[VEC_IND_NEXT_SCALAR]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +vector.ph: + %wide.trip.count = zext i32 %len to i64 + %1 = tail call @llvm.stepvector.nxv1i64() + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.ind = phi [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ] + %2 = getelementptr inbounds %struct.foo, ptr %a, %vec.ind, i32 3 + tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %2, i32 8, splat (i1 true)) + + %b.gep = getelementptr i64, ptr %b, i64 %index + %step = load i64, ptr %b.gep + %index.next = add nuw i64 %index, %step + %.splatinsert = insertelement poison, i64 %step, i64 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + %vec.ind.next = add %vec.ind, %.splat + %3 = icmp ne i64 %index.next, %wide.trip.count + br i1 %3, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @scatter_non_invariant_step_add_shl(ptr %a, ptr %b, i32 %len) { +; CHECK-LABEL: @scatter_non_invariant_step_add_shl( +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LEN:%.*]] to i64 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND_SCALAR1:%.*]] = phi i64 [ 168, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR1:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR1]], i32 3 +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: call void @llvm.experimental.vp.strided.store.nxv1i64.p0.i64( zeroinitializer, ptr [[TMP0]], i64 64, splat (i1 true), i32 [[TMP1]]) +; CHECK-NEXT: [[B:%.*]] = getelementptr i64, ptr [[B1:%.*]], i64 [[VEC_IND_SCALAR]] +; CHECK-NEXT: [[STEP:%.*]] = load i64, ptr [[B]], align 8 +; CHECK-NEXT: [[STEP1:%.*]] = shl i64 [[STEP]], 2 +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add nuw i64 [[VEC_IND_SCALAR]], [[STEP]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR1]] = add i64 [[VEC_IND_SCALAR1]], [[STEP1]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[VEC_IND_NEXT_SCALAR]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +vector.ph: + %wide.trip.count = zext i32 %len to i64 + %1 = tail call @llvm.stepvector.nxv1i64() + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.ind = phi [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ] + + %vec.ind.add = add %vec.ind, splat (i64 42) + %vec.ind.shl = shl %vec.ind.add, splat (i64 2) + + %2 = getelementptr inbounds %struct.foo, ptr %a, %vec.ind.shl, i32 3 + tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %2, i32 8, splat (i1 true)) + + %b.gep = getelementptr i64, ptr %b, i64 %index + %step = load i64, ptr %b.gep + %index.next = add nuw i64 %index, %step + %.splatinsert = insertelement poison, i64 %step, i64 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + %vec.ind.next = add %vec.ind, %.splat + %3 = icmp ne i64 %index.next, %wide.trip.count + br i1 %3, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + define @gather_loopless(ptr %p, i64 %stride) { ; CHECK-LABEL: @gather_loopless( ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE:%.*]], 4 @@ -491,23 +744,20 @@ define @evl_gather(ptr %a, i32 %len) { ; CHECK-LABEL: @evl_gather( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LEN:%.*]] to i64 -; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.stepvector.nxv1i64() ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[TMP1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND_SCALAR1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR1:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[ACCUM:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ELEMS:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[INDEX]] +; CHECK-NEXT: [[ELEMS:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[VEC_IND_SCALAR]] ; CHECK-NEXT: [[EVL:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[ELEMS]], i32 1, i1 true) -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_FOO:%.*]], ptr [[A:%.*]], [[VEC_IND]], i32 3 -; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.vp.gather.nxv1i64.nxv1p0( [[TMP2]], splat (i1 true), i32 [[EVL]]) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR1]], i32 3 +; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i64(ptr [[TMP0]], i64 16, splat (i1 true), i32 [[EVL]]) ; CHECK-NEXT: [[ACCUM_NEXT]] = add [[ACCUM]], [[GATHER]] ; CHECK-NEXT: [[EVL_ZEXT:%.*]] = zext i32 [[EVL]] to i64 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[EVL_ZEXT]] -; CHECK-NEXT: [[EVL_SPLATINSERT:%.*]] = insertelement poison, i64 [[EVL_ZEXT]], i64 0 -; CHECK-NEXT: [[EVL_SPLAT:%.*]] = shufflevector [[EVL_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[EVL_SPLAT]] -; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[INDEX_NEXT]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add nuw i64 [[VEC_IND_SCALAR]], [[EVL_ZEXT]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR1]] = add i64 [[VEC_IND_SCALAR1]], [[EVL_ZEXT]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[VEC_IND_NEXT_SCALAR]], [[WIDE_TRIP_COUNT]] ; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret [[ACCUM_NEXT]] @@ -548,21 +798,18 @@ define void @evl_scatter(ptr %a, i32 %len) { ; CHECK-LABEL: @evl_scatter( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LEN:%.*]] to i64 -; CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.stepvector.nxv1i64() ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ELEMS:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[VEC_IND_SCALAR]] +; CHECK-NEXT: [[VEC_IND_SCALAR1:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[VEC_IND_NEXT_SCALAR1:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[ELEMS:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[VEC_IND_SCALAR1]] ; CHECK-NEXT: [[EVL:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[ELEMS]], i32 1, i1 true) -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_FOO:%.*]], ptr [[A:%.*]], [[VEC_IND]], i32 3 -; CHECK-NEXT: tail call void @llvm.vp.scatter.nxv1i64.nxv1p0( zeroinitializer, [[TMP1]], splat (i1 true), i32 [[EVL]]) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR]], i32 3 +; CHECK-NEXT: call void @llvm.experimental.vp.strided.store.nxv1i64.p0.i64( zeroinitializer, ptr [[TMP0]], i64 16, splat (i1 true), i32 [[EVL]]) ; CHECK-NEXT: [[EVL_ZEXT:%.*]] = zext i32 [[EVL]] to i64 -; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add nuw i64 [[VEC_IND_SCALAR]], [[EVL_ZEXT]] -; CHECK-NEXT: [[EVL_SPLATINSERT:%.*]] = insertelement poison, i64 [[EVL_ZEXT]], i64 0 -; CHECK-NEXT: [[EVL_SPLAT:%.*]] = shufflevector [[EVL_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[EVL_SPLAT]] -; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[VEC_IND_NEXT_SCALAR]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR1]] = add nuw i64 [[VEC_IND_SCALAR1]], [[EVL_ZEXT]] +; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i64 [[VEC_IND_SCALAR]], [[EVL_ZEXT]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[VEC_IND_NEXT_SCALAR1]], [[WIDE_TRIP_COUNT]] ; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir index fe0929a6f8745..edcd32c4098bc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir @@ -243,6 +243,36 @@ body: | %y:vrm2 = PseudoVWADD_WV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 ... --- +name: tied_vwop_wv_vs1 +body: | + bb.0: + ; CHECK-LABEL: name: tied_vwop_wv_vs1 + ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 + %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 +... +--- +name: tied_vwop_wv_vs1_incompatible_eew +body: | + bb.0: + ; CHECK-LABEL: name: tied_vwop_wv_vs1_incompatible_eew + ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */ + ; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 + %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 +... +--- +name: tied_vwop_wv_vs1_incompatible_emul +body: | + bb.0: + ; CHECK-LABEL: name: tied_vwop_wv_vs1_incompatible_emul + ; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 + %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 +... +--- name: vop_vf2_vd body: | bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir index 56bfe0fd3eb93..027eb8ca3c17f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir @@ -140,4 +140,12 @@ body: | %x:vr = nofpexcept PseudoVFNCVTBF16_F_F_W_M1_E16 $noreg, $noreg, 7, -1, 4 /* e16 */, 0 /* tu, mu */, implicit $frm %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 ... - +--- +name: vwadd_tied_vs1 +body: | + bb.0: + ; CHECK-LABEL: name: vwadd_tied_vs1 + ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ + %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll b/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll new file mode 100644 index 0000000000000..50e26bd141070 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll @@ -0,0 +1,79 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+f,+v -verify-machineinstrs < %s | FileCheck %s + +define @test_reverse_load_combiner(* %ptr, i32 zeroext %evl) { +; CHECK-LABEL: test_reverse_load_combiner: +; CHECK: # %bb.0: +; CHECK-NEXT: slli a2, a1, 2 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: addi a0, a0, -4 +; CHECK-NEXT: li a2, -4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a2 +; CHECK-NEXT: ret + %load = call @llvm.vp.load.nxv2f32.p0nxv2f32(* %ptr, splat (i1 true), i32 %evl) + %rev = call @llvm.experimental.vp.reverse.nxv2f32( %load, splat (i1 true), i32 %evl) + ret %rev +} + +define @test_load_mask_is_vp_reverse(* %ptr, %mask, i32 zeroext %evl) { +; CHECK-LABEL: test_load_mask_is_vp_reverse: +; CHECK: # %bb.0: +; CHECK-NEXT: slli a2, a1, 2 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: addi a0, a0, -4 +; CHECK-NEXT: li a2, -4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %loadmask = call @llvm.experimental.vp.reverse.nxv2i1( %mask, splat (i1 true), i32 %evl) + %load = call @llvm.vp.load.nxv2f32.p0nxv2f32(* %ptr, %loadmask, i32 %evl) + %rev = call @llvm.experimental.vp.reverse.nxv2f32( %load, splat (i1 true), i32 %evl) + ret %rev +} + +define @test_load_mask_not_all_one(* %ptr, %notallones, i32 zeroext %evl) { +; CHECK-LABEL: test_load_mask_not_all_one: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v9, (a0), v0.t +; CHECK-NEXT: vid.v v8, v0.t +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: vrsub.vx v10, v8, a1, v0.t +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %load = call @llvm.vp.load.nxv2f32.p0nxv2f32(* %ptr, %notallones, i32 %evl) + %rev = call @llvm.experimental.vp.reverse.nxv2f32( %load, %notallones, i32 %evl) + ret %rev +} + +define @test_different_evl(* %ptr, %mask, i32 zeroext %evl1, i32 zeroext %evl2) { +; CHECK-LABEL: test_different_evl: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, a1, -1 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; CHECK-NEXT: vrsub.vx v8, v8, a3 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma +; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 +; CHECK-NEXT: vrgatherei16.vv v10, v9, v8 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v9, (a0), v0.t +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vrsub.vx v10, v8, a2 +; CHECK-NEXT: vrgather.vv v8, v9, v10 +; CHECK-NEXT: ret + %loadmask = call @llvm.experimental.vp.reverse.nxv2i1( %mask, splat (i1 true), i32 %evl1) + %load = call @llvm.vp.load.nxv2f32.p0nxv2f32(* %ptr, %loadmask, i32 %evl2) + %rev = call @llvm.experimental.vp.reverse.nxv2f32( %load, splat (i1 true), i32 %evl2) + ret %rev +} + +declare @llvm.vp.load.nxv2f32.p0nxv2f32(* nocapture, , i32) +declare @llvm.experimental.vp.reverse.nxv2f32(, , i32) +declare @llvm.experimental.vp.reverse.nxv2i1(, , i32) diff --git a/llvm/test/CodeGen/RISCV/stack-clash-prologue.ll b/llvm/test/CodeGen/RISCV/stack-clash-prologue.ll index 843e57a42d926..b1c0755c36ec1 100644 --- a/llvm/test/CodeGen/RISCV/stack-clash-prologue.ll +++ b/llvm/test/CodeGen/RISCV/stack-clash-prologue.ll @@ -606,4 +606,129 @@ define i32 @f10(i64 %i) local_unnamed_addr #0 { ret i32 %c } +define void @f11(i32 %vla_size, i64 %i) #0 { +; RV64I-LABEL: f11: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -2032 +; RV64I-NEXT: .cfi_def_cfa_offset 2032 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 2008(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: .cfi_offset s1, -24 +; RV64I-NEXT: addi s0, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa s0, 0 +; RV64I-NEXT: lui a2, 15 +; RV64I-NEXT: sub t1, sp, a2 +; RV64I-NEXT: lui t2, 1 +; RV64I-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sub sp, sp, t2 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: bne sp, t1, .LBB11_1 +; RV64I-NEXT: # %bb.2: +; RV64I-NEXT: addi sp, sp, -2048 +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: srli a2, sp, 15 +; RV64I-NEXT: slli sp, a2, 15 +; RV64I-NEXT: mv s1, sp +; RV64I-NEXT: slli a1, a1, 2 +; RV64I-NEXT: lui a2, 8 +; RV64I-NEXT: add a2, s1, a2 +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: li a2, 1 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: sw a2, 0(a1) +; RV64I-NEXT: addi a0, a0, 15 +; RV64I-NEXT: andi a0, a0, -16 +; RV64I-NEXT: sub a0, sp, a0 +; RV64I-NEXT: andi a0, a0, -2048 +; RV64I-NEXT: lui a1, 1 +; RV64I-NEXT: .LBB11_3: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sub sp, sp, a1 +; RV64I-NEXT: sd zero, 0(sp) +; RV64I-NEXT: blt a0, sp, .LBB11_3 +; RV64I-NEXT: # %bb.4: +; RV64I-NEXT: mv sp, a0 +; RV64I-NEXT: lbu zero, 0(a0) +; RV64I-NEXT: addi sp, s0, -2032 +; RV64I-NEXT: .cfi_def_cfa sp, 2032 +; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 2008(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: .cfi_restore s1 +; RV64I-NEXT: addi sp, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa_offset 0 +; RV64I-NEXT: ret +; +; RV32I-LABEL: f11: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -2032 +; RV32I-NEXT: .cfi_def_cfa_offset 2032 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 2024(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 2020(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: .cfi_offset s1, -12 +; RV32I-NEXT: addi s0, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa s0, 0 +; RV32I-NEXT: lui a2, 15 +; RV32I-NEXT: sub t1, sp, a2 +; RV32I-NEXT: lui t2, 1 +; RV32I-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sub sp, sp, t2 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: bne sp, t1, .LBB11_1 +; RV32I-NEXT: # %bb.2: +; RV32I-NEXT: addi sp, sp, -2048 +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: srli a2, sp, 15 +; RV32I-NEXT: slli sp, a2, 15 +; RV32I-NEXT: mv s1, sp +; RV32I-NEXT: slli a1, a1, 2 +; RV32I-NEXT: lui a2, 8 +; RV32I-NEXT: add a2, s1, a2 +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: li a2, 1 +; RV32I-NEXT: addi a0, a0, 15 +; RV32I-NEXT: andi a0, a0, -16 +; RV32I-NEXT: sw a2, 0(a1) +; RV32I-NEXT: sub a0, sp, a0 +; RV32I-NEXT: andi a0, a0, -2048 +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: .LBB11_3: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sub sp, sp, a1 +; RV32I-NEXT: sw zero, 0(sp) +; RV32I-NEXT: blt a0, sp, .LBB11_3 +; RV32I-NEXT: # %bb.4: +; RV32I-NEXT: mv sp, a0 +; RV32I-NEXT: lbu zero, 0(a0) +; RV32I-NEXT: addi sp, s0, -2032 +; RV32I-NEXT: .cfi_def_cfa sp, 2032 +; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: .cfi_restore s1 +; RV32I-NEXT: addi sp, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa_offset 0 +; RV32I-NEXT: ret + %a = alloca i32, i32 4096, align 32768 + %b = getelementptr inbounds i32, ptr %a, i64 %i + store volatile i32 1, ptr %b + %1 = zext i32 %vla_size to i64 + %vla = alloca i8, i64 %1, align 2048 + %2 = load volatile i8, ptr %vla, align 2048 + ret void +} + attributes #0 = { "probe-stack"="inline-asm" } diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveSum.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveSum.ll new file mode 100644 index 0000000000000..739b7bb1d5bd4 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveActiveSum.ll @@ -0,0 +1,41 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-vulkan-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - -filetype=obj | spirv-val %} + +; Test lowering to spir-v backend for various types and scalar/vector + +; CHECK-DAG: %[[#f16:]] = OpTypeFloat 16 +; CHECK-DAG: %[[#f32:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#v4_half:]] = OpTypeVector %[[#f16]] 4 +; CHECK-DAG: %[[#scope:]] = OpConstant %[[#uint]] 3 + +; CHECK-LABEL: Begin function test_float +; CHECK: %[[#fexpr:]] = OpFunctionParameter %[[#f32]] +define float @test_float(float %fexpr) { +entry: +; CHECK: %[[#fret:]] = OpGroupNonUniformFAdd %[[#f32]] %[[#scope]] Reduce %[[#fexpr]] + %0 = call float @llvm.spv.wave.reduce.sum.f32(float %fexpr) + ret float %0 +} + +; CHECK-LABEL: Begin function test_int +; CHECK: %[[#iexpr:]] = OpFunctionParameter %[[#uint]] +define i32 @test_int(i32 %iexpr) { +entry: +; CHECK: %[[#iret:]] = OpGroupNonUniformIAdd %[[#uint]] %[[#scope]] Reduce %[[#iexpr]] + %0 = call i32 @llvm.spv.wave.reduce.sum.i32(i32 %iexpr) + ret i32 %0 +} + +; CHECK-LABEL: Begin function test_vhalf +; CHECK: %[[#vbexpr:]] = OpFunctionParameter %[[#v4_half]] +define <4 x half> @test_vhalf(<4 x half> %vbexpr) { +entry: +; CHECK: %[[#vhalfret:]] = OpGroupNonUniformFAdd %[[#v4_half]] %[[#scope]] Reduce %[[#vbexpr]] + %0 = call <4 x half> @llvm.spv.wave.reduce.sum.v4half(<4 x half> %vbexpr) + ret <4 x half> %0 +} + +declare float @llvm.spv.wave.reduce.sum.f32(float) +declare i32 @llvm.spv.wave.reduce.sum.i32(i32) +declare <4 x half> @llvm.spv.wave.reduce.sum.v4half(<4 x half>) diff --git a/llvm/test/CodeGen/SPIRV/validate/sycl-tangle-group-algorithms.ll b/llvm/test/CodeGen/SPIRV/validate/sycl-tangle-group-algorithms.ll index a10341bce4859..e1dfecb8bca82 100644 --- a/llvm/test/CodeGen/SPIRV/validate/sycl-tangle-group-algorithms.ll +++ b/llvm/test/CodeGen/SPIRV/validate/sycl-tangle-group-algorithms.ll @@ -8,7 +8,7 @@ ; The only pass criterion is that spirv-val considers output valid. -; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64v1.5-unknown-unknown %s -o - -filetype=obj | spirv-val --target-env spv1.4 %} +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64v1.5-unknown-unknown %s -o - -filetype=obj | spirv-val --target-env spv1.5 %} %"nd_item" = type { i8 } %struct.AssertHappened = type { i32, [257 x i8], [257 x i8], [129 x i8], i32, i64, i64, i64, i64, i64, i64 } diff --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll index 94a1792cb8985..87a948a4f1f7e 100644 --- a/llvm/test/CodeGen/X86/vector-compress.ll +++ b/llvm/test/CodeGen/X86/vector-compress.ll @@ -3,7 +3,7 @@ ; RUN: llc -mtriple=x86_64 -mattr=+avx512f < %s | FileCheck %s --check-prefixes=CHECK,AVX512F ; RUN: llc -mtriple=x86_64 -mattr=+avx512f,+avx512vl,+avx512vbmi2 < %s | FileCheck %s --check-prefixes=CHECK,AVX512VL -define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) { +define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) nounwind { ; AVX2-LABEL: test_compress_v4i32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 @@ -64,7 +64,7 @@ define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> ret <4 x i32> %out } -define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) { +define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) nounwind { ; AVX2-LABEL: test_compress_v4f32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 @@ -129,7 +129,7 @@ define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x f ret <4 x float> %out } -define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) { +define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) nounwind { ; AVX2-LABEL: test_compress_v2i64: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $63, %xmm1, %xmm1 @@ -181,7 +181,7 @@ define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> ret <2 x i64> %out } -define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) { +define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) nounwind { ; AVX2-LABEL: test_compress_v2f64: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $63, %xmm1, %xmm1 @@ -236,18 +236,14 @@ define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x ret <2 x double> %out } -define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) { +define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) nounwind { ; AVX2-LABEL: test_compress_v8i32: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .cfi_offset %rbp, -16 ; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .cfi_def_cfa_register %rbp ; AVX2-NEXT: pushq %rbx ; AVX2-NEXT: andq $-32, %rsp ; AVX2-NEXT: subq $64, %rsp -; AVX2-NEXT: .cfi_offset %rbx, -24 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 ; AVX2-NEXT: vpsrad $31, %ymm1, %ymm3 @@ -315,7 +311,6 @@ define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> ; AVX2-NEXT: leaq -8(%rbp), %rsp ; AVX2-NEXT: popq %rbx ; AVX2-NEXT: popq %rbp -; AVX2-NEXT: .cfi_def_cfa %rsp, 8 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: test_compress_v8i32: @@ -340,14 +335,11 @@ define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> ret <8 x i32> %out } -define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) { +define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) nounwind { ; AVX2-LABEL: test_compress_v8f32: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .cfi_offset %rbp, -16 ; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .cfi_def_cfa_register %rbp ; AVX2-NEXT: andq $-32, %rsp ; AVX2-NEXT: subq $64, %rsp ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero @@ -424,7 +416,6 @@ define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x f ; AVX2-NEXT: vmovaps (%rsp), %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp -; AVX2-NEXT: .cfi_def_cfa %rsp, 8 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: test_compress_v8f32: @@ -449,14 +440,11 @@ define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x f ret <8 x float> %out } -define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) { +define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) nounwind { ; AVX2-LABEL: test_compress_v4i64: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: .cfi_def_cfa_offset 16 -; AVX2-NEXT: .cfi_offset %rbp, -16 ; AVX2-NEXT: movq %rsp, %rbp -; AVX2-NEXT: .cfi_def_cfa_register %rbp ; AVX2-NEXT: andq $-32, %rsp ; AVX2-NEXT: subq $64, %rsp ; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 @@ -499,7 +487,6 @@ define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> ; AVX2-NEXT: vmovaps (%rsp), %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp -; AVX2-NEXT: .cfi_def_cfa %rsp, 8 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: test_compress_v4i64: @@ -525,7 +512,58 @@ define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> ret <4 x i64> %out } -define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) { +define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) nounwind { +; AVX2-LABEL: test_compress_v4f64: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm3 +; AVX2-NEXT: vmovaps %ymm2, (%rsp) +; AVX2-NEXT: vpsrlq $63, %ymm3, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpaddq %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vmovq %xmm1, %rcx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: andl $3, %ecx +; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vmovlpd %xmm0, (%rsp) +; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: vmovhpd %xmm0, (%rsp,%rcx,8) +; AVX2-NEXT: vpextrq $1, %xmm3, %rcx +; AVX2-NEXT: subq %rcx, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovlpd %xmm0, (%rsp,%rcx,8) +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2 +; AVX2-NEXT: vmovq %xmm2, %rcx +; AVX2-NEXT: subq %rcx, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $3, %ecx +; AVX2-NEXT: vmovhpd %xmm0, (%rsp,%rcx,8) +; AVX2-NEXT: vpextrq $1, %xmm2, %rcx +; AVX2-NEXT: subq %rcx, %rax +; AVX2-NEXT: cmpq $4, %rax +; AVX2-NEXT: jb .LBB7_2 +; AVX2-NEXT: # %bb.1: +; AVX2-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX2-NEXT: .LBB7_2: +; AVX2-NEXT: cmpq $3, %rax +; AVX2-NEXT: movl $3, %ecx +; AVX2-NEXT: cmovbq %rax, %rcx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: vmovsd %xmm1, (%rsp,%rax,8) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; ; AVX512F-LABEL: test_compress_v4f64: ; AVX512F: # %bb.0: ; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 @@ -549,7 +587,141 @@ define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x ret <4 x double> %out } -define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) { +define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) nounwind { +; AVX2-LABEL: test_compress_v16i32: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $128, %rsp +; AVX2-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps %ymm3, (%rsp) +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero +; AVX2-NEXT: vpaddd %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vpextrd $1, %xmm3, %eax +; AVX2-NEXT: vmovd %xmm3, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: vpextrd $2, %xmm3, %eax +; AVX2-NEXT: vpextrd $3, %xmm3, %edx +; AVX2-NEXT: addl %eax, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: andl $15, %edx +; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $1, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $2, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $3, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $4, %xmm2, %r8d +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addq %rax, %r8 +; AVX2-NEXT: vpextrb $5, %xmm2, %r9d +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: addq %r8, %r9 +; AVX2-NEXT: vpextrb $6, %xmm2, %r10d +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: addq %r9, %r10 +; AVX2-NEXT: vpextrb $7, %xmm2, %r11d +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: addq %r10, %r11 +; AVX2-NEXT: vpextrb $8, %xmm2, %ebx +; AVX2-NEXT: andl $1, %ebx +; AVX2-NEXT: addq %r11, %rbx +; AVX2-NEXT: vpextrb $9, %xmm2, %r14d +; AVX2-NEXT: andl $1, %r14d +; AVX2-NEXT: addq %rbx, %r14 +; AVX2-NEXT: vpextrb $10, %xmm2, %r15d +; AVX2-NEXT: andl $1, %r15d +; AVX2-NEXT: addq %r14, %r15 +; AVX2-NEXT: vpextrb $11, %xmm2, %r12d +; AVX2-NEXT: andl $1, %r12d +; AVX2-NEXT: addq %r15, %r12 +; AVX2-NEXT: vpextrb $12, %xmm2, %r13d +; AVX2-NEXT: andl $1, %r13d +; AVX2-NEXT: addq %r12, %r13 +; AVX2-NEXT: vpextrb $13, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %r13, %rcx +; AVX2-NEXT: vpextrb $14, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vpextrb $15, %xmm2, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rax, %rdx +; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX2-NEXT: cmpq $16, %rdx +; AVX2-NEXT: vextractps $3, %xmm2, %esi +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload +; AVX2-NEXT: cmovbl (%rsp,%rdi,4), %esi +; AVX2-NEXT: movl %esi, %edi +; AVX2-NEXT: vmovss %xmm0, (%rsp) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rsi,4) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rsi,4) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rsi,4) +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rsi,4) +; AVX2-NEXT: andl $15, %r8d +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%r8,4) +; AVX2-NEXT: andl $15, %r9d +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%r9,4) +; AVX2-NEXT: andl $15, %r10d +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%r10,4) +; AVX2-NEXT: andl $15, %r11d +; AVX2-NEXT: vmovss %xmm1, (%rsp,%r11,4) +; AVX2-NEXT: andl $15, %ebx +; AVX2-NEXT: vextractps $1, %xmm1, (%rsp,%rbx,4) +; AVX2-NEXT: andl $15, %r14d +; AVX2-NEXT: vextractps $2, %xmm1, (%rsp,%r14,4) +; AVX2-NEXT: andl $15, %r15d +; AVX2-NEXT: vextractps $3, %xmm1, (%rsp,%r15,4) +; AVX2-NEXT: andl $15, %r12d +; AVX2-NEXT: vmovss %xmm2, (%rsp,%r12,4) +; AVX2-NEXT: andl $15, %r13d +; AVX2-NEXT: vextractps $1, %xmm2, (%rsp,%r13,4) +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vextractps $2, %xmm2, (%rsp,%rcx,4) +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vextractps $3, %xmm2, (%rsp,%rax,4) +; AVX2-NEXT: cmpq $15, %rdx +; AVX2-NEXT: movl $15, %eax +; AVX2-NEXT: cmovbq %rdx, %rax +; AVX2-NEXT: movl %eax, %eax +; AVX2-NEXT: movl %edi, (%rsp,%rax,4) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: leaq -40(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; ; AVX512F-LABEL: test_compress_v16i32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 @@ -570,7 +742,135 @@ define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x ret <16 x i32> %out } -define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) { +define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) nounwind { +; AVX2-LABEL: test_compress_v16f32: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps %ymm3, (%rsp) +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero +; AVX2-NEXT: vpaddd %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vpextrd $1, %xmm3, %eax +; AVX2-NEXT: vmovd %xmm3, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: vpextrd $2, %xmm3, %eax +; AVX2-NEXT: vpextrd $3, %xmm3, %edx +; AVX2-NEXT: addl %eax, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: andl $15, %edx +; AVX2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovss %xmm0, (%rsp) +; AVX2-NEXT: vmovd %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vpextrb $1, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrb $2, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vpextrb $3, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrb $4, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vpextrb $5, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vpextrb $6, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrb $7, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vpextrb $8, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vmovss %xmm1, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrb $9, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vextractps $1, %xmm1, (%rsp,%rax,4) +; AVX2-NEXT: vpextrb $10, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vextractps $2, %xmm1, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrb $11, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vextractps $3, %xmm1, (%rsp,%rax,4) +; AVX2-NEXT: vpextrb $12, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrb $13, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vpextrb $14, %xmm2, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrb $15, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rdx, %rax +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $15, %edx +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: cmpq $16, %rax +; AVX2-NEXT: jae .LBB9_2 +; AVX2-NEXT: # %bb.1: +; AVX2-NEXT: vmovaps %xmm3, %xmm0 +; AVX2-NEXT: .LBB9_2: +; AVX2-NEXT: cmpq $15, %rax +; AVX2-NEXT: movl $15, %ecx +; AVX2-NEXT: cmovbq %rax, %rcx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; ; AVX512F-LABEL: test_compress_v16f32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 @@ -591,7 +891,79 @@ define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <1 ret <16 x float> %out } -define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) { +define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) nounwind { +; AVX2-LABEL: test_compress_v8i64: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps %ymm3, (%rsp) +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] +; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX2-NEXT: vpaddq %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vpextrq $1, %xmm3, %rcx +; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: addl %ecx, %eax +; AVX2-NEXT: andl $7, %eax +; AVX2-NEXT: vpextrw $1, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %xmm2, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: vpextrw $2, %xmm2, %esi +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: addq %rcx, %rsi +; AVX2-NEXT: vpextrw $3, %xmm2, %edi +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: addq %rsi, %rdi +; AVX2-NEXT: vpextrw $4, %xmm2, %r8d +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addq %rdi, %r8 +; AVX2-NEXT: vpextrw $5, %xmm2, %r9d +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: addq %r8, %r9 +; AVX2-NEXT: vpextrw $6, %xmm2, %r10d +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: addq %r9, %r10 +; AVX2-NEXT: vpextrw $7, %xmm2, %r11d +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: addq %r10, %r11 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rbx +; AVX2-NEXT: cmpq $8, %r11 +; AVX2-NEXT: cmovbq (%rsp,%rax,8), %rbx +; AVX2-NEXT: vmovq %xmm0, (%rsp) +; AVX2-NEXT: vpextrq $1, %xmm0, (%rsp,%rdx,8) +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, (%rsp,%rcx,8) +; AVX2-NEXT: vpextrq $1, %xmm0, (%rsp,%rsi,8) +; AVX2-NEXT: andl $7, %edi +; AVX2-NEXT: vmovq %xmm1, (%rsp,%rdi,8) +; AVX2-NEXT: andl $7, %r8d +; AVX2-NEXT: vpextrq $1, %xmm1, (%rsp,%r8,8) +; AVX2-NEXT: andl $7, %r9d +; AVX2-NEXT: vmovq %xmm2, (%rsp,%r9,8) +; AVX2-NEXT: andl $7, %r10d +; AVX2-NEXT: vpextrq $1, %xmm2, (%rsp,%r10,8) +; AVX2-NEXT: cmpq $7, %r11 +; AVX2-NEXT: movl $7, %eax +; AVX2-NEXT: cmovbq %r11, %rax +; AVX2-NEXT: movl %eax, %eax +; AVX2-NEXT: movq %rbx, (%rsp,%rax,8) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: leaq -8(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; ; AVX512F-LABEL: test_compress_v8i64: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 @@ -612,7 +984,84 @@ define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> ret <8 x i64> %out } -define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) { +define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) nounwind { +; AVX2-LABEL: test_compress_v8f64: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps %ymm3, (%rsp) +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] +; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX2-NEXT: vpaddq %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vpextrq $1, %xmm3, %rax +; AVX2-NEXT: vmovq %xmm3, %rcx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: andl $7, %ecx +; AVX2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX2-NEXT: vmovlps %xmm0, (%rsp) +; AVX2-NEXT: vmovd %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vmovhps %xmm0, (%rsp,%rax,8) +; AVX2-NEXT: vpextrw $1, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovlps %xmm0, (%rsp,%rcx,8) +; AVX2-NEXT: vpextrw $2, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vmovhps %xmm0, (%rsp,%rax,8) +; AVX2-NEXT: vpextrw $3, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vpextrw $4, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $7, %ecx +; AVX2-NEXT: vmovlpd %xmm1, (%rsp,%rcx,8) +; AVX2-NEXT: vpextrw $5, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $7, %eax +; AVX2-NEXT: vmovhpd %xmm1, (%rsp,%rax,8) +; AVX2-NEXT: vpextrw $6, %xmm2, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $7, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm0 +; AVX2-NEXT: vmovlpd %xmm0, (%rsp,%rcx,8) +; AVX2-NEXT: vpextrw $7, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rdx, %rax +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $7, %edx +; AVX2-NEXT: vmovhpd %xmm0, (%rsp,%rdx,8) +; AVX2-NEXT: cmpq $8, %rax +; AVX2-NEXT: jb .LBB11_2 +; AVX2-NEXT: # %bb.1: +; AVX2-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0] +; AVX2-NEXT: .LBB11_2: +; AVX2-NEXT: cmpq $7, %rax +; AVX2-NEXT: movl $7, %ecx +; AVX2-NEXT: cmovbq %rax, %rcx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: vmovsd %xmm3, (%rsp,%rax,8) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; ; AVX512F-LABEL: test_compress_v8f64: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 @@ -633,43 +1082,257 @@ define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x ret <8 x double> %out } -define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) { -; AVX512F-LABEL: test_compress_v16i8: -; AVX512F: # %bb.0: -; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 -; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 -; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512F-NEXT: vpcompressd %zmm0, %zmm1 {%k1} -; AVX512F-NEXT: vpmovdb %zmm1, %xmm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: test_compress_v16i8: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsllw $7, %xmm1, %xmm1 -; AVX512VL-NEXT: vpmovb2m %xmm1, %k1 -; AVX512VL-NEXT: vpcompressb %xmm0, %xmm2 {%k1} -; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 -; AVX512VL-NEXT: retq - %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) - ret <16 x i8> %out -} - -define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) { -; AVX512F-LABEL: test_compress_v8i16: -; AVX512F: # %bb.0: -; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 -; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1 -; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1 -; AVX512F-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero -; AVX512F-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero -; AVX512F-NEXT: vpcompressq %zmm0, %zmm1 {%k1} -; AVX512F-NEXT: vpmovqw %zmm1, %xmm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; +define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) nounwind { +; AVX2-LABEL: test_compress_v16i8: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1 +; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $1, %xmm1, %r11d +; AVX2-NEXT: vmovd %xmm1, %eax +; AVX2-NEXT: movzbl %al, %edx +; AVX2-NEXT: # kill: def $al killed $al killed $eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: subb %r11b, %al +; AVX2-NEXT: vpextrb $2, %xmm1, %esi +; AVX2-NEXT: subb %sil, %al +; AVX2-NEXT: vpextrb $3, %xmm1, %r13d +; AVX2-NEXT: subb %r13b, %al +; AVX2-NEXT: vpextrb $4, %xmm1, %r12d +; AVX2-NEXT: subb %r12b, %al +; AVX2-NEXT: vpextrb $5, %xmm1, %r15d +; AVX2-NEXT: subb %r15b, %al +; AVX2-NEXT: vpextrb $6, %xmm1, %r14d +; AVX2-NEXT: subb %r14b, %al +; AVX2-NEXT: vpextrb $7, %xmm1, %ebp +; AVX2-NEXT: subb %bpl, %al +; AVX2-NEXT: vpextrb $8, %xmm1, %ebx +; AVX2-NEXT: subb %bl, %al +; AVX2-NEXT: vpextrb $9, %xmm1, %r10d +; AVX2-NEXT: subb %r10b, %al +; AVX2-NEXT: vpextrb $10, %xmm1, %r9d +; AVX2-NEXT: subb %r9b, %al +; AVX2-NEXT: vpextrb $11, %xmm1, %r8d +; AVX2-NEXT: subb %r8b, %al +; AVX2-NEXT: vpextrb $12, %xmm1, %edi +; AVX2-NEXT: subb %dil, %al +; AVX2-NEXT: vpextrb $13, %xmm1, %ecx +; AVX2-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: subb %cl, %al +; AVX2-NEXT: vpextrb $14, %xmm1, %ecx +; AVX2-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: subb %cl, %al +; AVX2-NEXT: vpextrb $15, %xmm1, %ecx +; AVX2-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: subb %cl, %al +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: movzbl -40(%rsp,%rax), %eax +; AVX2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: vpextrb $0, %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vpextrb $1, %xmm0, -40(%rsp,%rdx) +; AVX2-NEXT: movzbl %r11b, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rdx, %rax +; AVX2-NEXT: vpextrb $2, %xmm0, -40(%rsp,%rax) +; AVX2-NEXT: movzbl %sil, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vpextrb $3, %xmm0, -40(%rsp,%rcx) +; AVX2-NEXT: movzbl %r13b, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vpextrb $4, %xmm0, -40(%rsp,%rax) +; AVX2-NEXT: movzbl %r12b, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movzbl %r15b, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $5, %xmm0, -40(%rsp,%rcx) +; AVX2-NEXT: movzbl %r14b, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $6, %xmm0, -40(%rsp,%rax) +; AVX2-NEXT: movzbl %bpl, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $7, %xmm0, -40(%rsp,%rcx) +; AVX2-NEXT: movzbl %bl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $8, %xmm0, -40(%rsp,%rax) +; AVX2-NEXT: movzbl %r10b, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $9, %xmm0, -40(%rsp,%rcx) +; AVX2-NEXT: movzbl %r9b, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $10, %xmm0, -40(%rsp,%rax) +; AVX2-NEXT: movzbl %r8b, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $11, %xmm0, -40(%rsp,%rcx) +; AVX2-NEXT: movzbl %dil, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $12, %xmm0, -40(%rsp,%rax) +; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $13, %xmm0, -40(%rsp,%rcx) +; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $14, %xmm0, -40(%rsp,%rax) +; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $15, %xmm0, -40(%rsp,%rcx) +; AVX2-NEXT: cmpq $15, %rax +; AVX2-NEXT: movl $15, %ecx +; AVX2-NEXT: cmovbq %rax, %rcx +; AVX2-NEXT: vpextrb $15, %xmm0, %eax +; AVX2-NEXT: cmovbel {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload +; AVX2-NEXT: movb %al, -40(%rsp,%rcx) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_v16i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512F-NEXT: vpcompressd %zmm0, %zmm1 {%k1} +; AVX512F-NEXT: vpmovdb %zmm1, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovb2m %xmm1, %k1 +; AVX512VL-NEXT: vpcompressb %xmm0, %xmm2 {%k1} +; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512VL-NEXT: retq + %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) + ret <16 x i8> %out +} + +define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) nounwind { +; AVX2-LABEL: test_compress_v8i16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX2-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $1, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vmovd %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: leal (%rcx,%rax), %esi +; AVX2-NEXT: vpextrw $2, %xmm1, %edi +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: vpextrw $3, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: leal (%rdi,%rdx), %r10d +; AVX2-NEXT: addl %esi, %r10d +; AVX2-NEXT: vpextrw $4, %xmm1, %r9d +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: vpextrw $5, %xmm1, %esi +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: leal (%r9,%rsi), %r11d +; AVX2-NEXT: vpextrw $6, %xmm1, %r8d +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addl %r8d, %r11d +; AVX2-NEXT: addl %r10d, %r11d +; AVX2-NEXT: vpextrw $7, %xmm1, %r10d +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: addl %r10d, %r11d +; AVX2-NEXT: andl $7, %r11d +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: addq %rax, %rdi +; AVX2-NEXT: addq %rdi, %rdx +; AVX2-NEXT: addq %rdx, %r9 +; AVX2-NEXT: addq %r9, %rsi +; AVX2-NEXT: addq %rsi, %r8 +; AVX2-NEXT: addq %r8, %r10 +; AVX2-NEXT: vpextrw $7, %xmm0, %ebx +; AVX2-NEXT: cmpq $8, %r10 +; AVX2-NEXT: cmovbw -16(%rsp,%r11,2), %bx +; AVX2-NEXT: vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $1, %xmm0, -16(%rsp,%rcx,2) +; AVX2-NEXT: vpextrw $2, %xmm0, -16(%rsp,%rax,2) +; AVX2-NEXT: vpextrw $3, %xmm0, -16(%rsp,%rdi,2) +; AVX2-NEXT: andl $7, %edx +; AVX2-NEXT: vpextrw $4, %xmm0, -16(%rsp,%rdx,2) +; AVX2-NEXT: andl $7, %r9d +; AVX2-NEXT: vpextrw $5, %xmm0, -16(%rsp,%r9,2) +; AVX2-NEXT: andl $7, %esi +; AVX2-NEXT: vpextrw $6, %xmm0, -16(%rsp,%rsi,2) +; AVX2-NEXT: andl $7, %r8d +; AVX2-NEXT: vpextrw $7, %xmm0, -16(%rsp,%r8,2) +; AVX2-NEXT: cmpq $7, %r10 +; AVX2-NEXT: movl $7, %eax +; AVX2-NEXT: cmovbq %r10, %rax +; AVX2-NEXT: movl %eax, %eax +; AVX2-NEXT: movw %bx, -16(%rsp,%rax,2) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_v8i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 +; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX512F-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero +; AVX512F-NEXT: vpcompressq %zmm0, %zmm1 {%k1} +; AVX512F-NEXT: vpmovqw %zmm1, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; ; AVX512VL-LABEL: test_compress_v8i16: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsllw $15, %xmm1, %xmm1 @@ -681,14 +1344,252 @@ define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> ret <8 x i16> %out } -define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) { +define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) nounwind { +; AVX2-LABEL: test_compress_v32i8: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm3 +; AVX2-NEXT: vmovaps %ymm2, (%rsp) +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm1 +; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm4 +; AVX2-NEXT: vpand %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpaddb %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vpextrb $1, %xmm2, %eax +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: addb %al, %cl +; AVX2-NEXT: vpextrb $2, %xmm2, %eax +; AVX2-NEXT: vpextrb $3, %xmm2, %edx +; AVX2-NEXT: addb %al, %dl +; AVX2-NEXT: addb %cl, %dl +; AVX2-NEXT: vpextrb $4, %xmm2, %eax +; AVX2-NEXT: vpextrb $5, %xmm2, %ecx +; AVX2-NEXT: addb %al, %cl +; AVX2-NEXT: vpextrb $6, %xmm2, %eax +; AVX2-NEXT: addb %cl, %al +; AVX2-NEXT: addb %dl, %al +; AVX2-NEXT: vpextrb $7, %xmm2, %ecx +; AVX2-NEXT: vpextrb $8, %xmm2, %edx +; AVX2-NEXT: addb %cl, %dl +; AVX2-NEXT: vpextrb $9, %xmm2, %ecx +; AVX2-NEXT: addb %dl, %cl +; AVX2-NEXT: vpextrb $10, %xmm2, %edx +; AVX2-NEXT: addb %cl, %dl +; AVX2-NEXT: addb %al, %dl +; AVX2-NEXT: vpextrb $11, %xmm2, %eax +; AVX2-NEXT: vpextrb $12, %xmm2, %ecx +; AVX2-NEXT: addb %al, %cl +; AVX2-NEXT: vpextrb $13, %xmm2, %eax +; AVX2-NEXT: addb %cl, %al +; AVX2-NEXT: vpextrb $14, %xmm2, %ecx +; AVX2-NEXT: addb %al, %cl +; AVX2-NEXT: vpextrb $15, %xmm2, %eax +; AVX2-NEXT: addb %cl, %al +; AVX2-NEXT: addb %dl, %al +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $31, %eax +; AVX2-NEXT: movzbl (%rsp,%rax), %eax +; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp) +; AVX2-NEXT: vmovd %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $1, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $2, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $3, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $4, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $5, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: vpextrb $6, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $7, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $8, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $9, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $10, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $11, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $12, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $13, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $14, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $15, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vmovd %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $1, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $2, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $3, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $4, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $5, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $6, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $7, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $8, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $9, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $10, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $11, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $12, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $13, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: vpextrb $14, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rdx) +; AVX2-NEXT: vpextrb $15, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: cmpq $31, %rdx +; AVX2-NEXT: movl $31, %ecx +; AVX2-NEXT: cmovbq %rdx, %rcx +; AVX2-NEXT: vpextrb $15, %xmm0, %edx +; AVX2-NEXT: cmovbel %eax, %edx +; AVX2-NEXT: movb %dl, (%rsp,%rcx) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; ; AVX512F-LABEL: test_compress_v32i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: pushq %rbp -; AVX512F-NEXT: .cfi_def_cfa_offset 16 -; AVX512F-NEXT: .cfi_offset %rbp, -16 ; AVX512F-NEXT: movq %rsp, %rbp -; AVX512F-NEXT: .cfi_def_cfa_register %rbp ; AVX512F-NEXT: andq $-32, %rsp ; AVX512F-NEXT: subq $64, %rsp ; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm3 @@ -719,7 +1620,6 @@ define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> ; AVX512F-NEXT: vpblendvb %ymm0, (%rsp), %ymm2, %ymm0 ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: test_compress_v32i8: @@ -733,7 +1633,147 @@ define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> ret <32 x i8> %out } -define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) { +define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) nounwind { +; AVX2-LABEL: test_compress_v16i16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2-NEXT: vpsllw $15, %ymm1, %ymm3 +; AVX2-NEXT: vpsraw $15, %ymm3, %ymm1 +; AVX2-NEXT: vmovaps %ymm2, (%rsp) +; AVX2-NEXT: vpsrlw $15, %ymm3, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpaddw %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpextrw $1, %xmm2, %eax +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: vpextrw $2, %xmm2, %eax +; AVX2-NEXT: vpextrw $3, %xmm2, %edx +; AVX2-NEXT: addl %eax, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: vpextrw $4, %xmm2, %eax +; AVX2-NEXT: vpextrw $5, %xmm2, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: vpextrw $6, %xmm2, %eax +; AVX2-NEXT: addl %ecx, %eax +; AVX2-NEXT: addl %edx, %eax +; AVX2-NEXT: vpextrw $7, %xmm2, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrw $1, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vmovd %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrw $2, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrw $3, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrw $4, %xmm1, %r8d +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addq %rax, %r8 +; AVX2-NEXT: vpextrw $5, %xmm1, %r9d +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: addq %r8, %r9 +; AVX2-NEXT: vpextrw $6, %xmm1, %r10d +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: addq %r9, %r10 +; AVX2-NEXT: vpextrw $7, %xmm1, %r11d +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: addq %r10, %r11 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vmovd %xmm1, %ebx +; AVX2-NEXT: andl $1, %ebx +; AVX2-NEXT: addq %r11, %rbx +; AVX2-NEXT: vpextrw $1, %xmm1, %r14d +; AVX2-NEXT: andl $1, %r14d +; AVX2-NEXT: addq %rbx, %r14 +; AVX2-NEXT: vpextrw $2, %xmm1, %r15d +; AVX2-NEXT: andl $1, %r15d +; AVX2-NEXT: addq %r14, %r15 +; AVX2-NEXT: vpextrw $3, %xmm1, %r12d +; AVX2-NEXT: andl $1, %r12d +; AVX2-NEXT: addq %r15, %r12 +; AVX2-NEXT: vpextrw $4, %xmm1, %r13d +; AVX2-NEXT: andl $1, %r13d +; AVX2-NEXT: addq %r12, %r13 +; AVX2-NEXT: vpextrw $5, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %r13, %rdx +; AVX2-NEXT: vpextrw $6, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: vpextrw $7, %xmm1, %edi +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: addq %rcx, %rdi +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: cmpq $16, %rdi +; AVX2-NEXT: vpextrw $7, %xmm1, %eax +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: cmovbw (%rsp,%rsi,2), %ax +; AVX2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: vpextrw $0, %xmm0, (%rsp) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: vpextrw $1, %xmm0, (%rsp,%rsi,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: vpextrw $2, %xmm0, (%rsp,%rsi,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: vpextrw $3, %xmm0, (%rsp,%rsi,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; AVX2-NEXT: vpextrw $4, %xmm0, (%rsp,%rax,2) +; AVX2-NEXT: andl $15, %r8d +; AVX2-NEXT: vpextrw $5, %xmm0, (%rsp,%r8,2) +; AVX2-NEXT: andl $15, %r9d +; AVX2-NEXT: vpextrw $6, %xmm0, (%rsp,%r9,2) +; AVX2-NEXT: andl $15, %r10d +; AVX2-NEXT: vpextrw $7, %xmm0, (%rsp,%r10,2) +; AVX2-NEXT: andl $15, %r11d +; AVX2-NEXT: vpextrw $0, %xmm1, (%rsp,%r11,2) +; AVX2-NEXT: andl $15, %ebx +; AVX2-NEXT: vpextrw $1, %xmm1, (%rsp,%rbx,2) +; AVX2-NEXT: andl $15, %r14d +; AVX2-NEXT: vpextrw $2, %xmm1, (%rsp,%r14,2) +; AVX2-NEXT: andl $15, %r15d +; AVX2-NEXT: vpextrw $3, %xmm1, (%rsp,%r15,2) +; AVX2-NEXT: andl $15, %r12d +; AVX2-NEXT: vpextrw $4, %xmm1, (%rsp,%r12,2) +; AVX2-NEXT: andl $15, %r13d +; AVX2-NEXT: vpextrw $5, %xmm1, (%rsp,%r13,2) +; AVX2-NEXT: andl $15, %edx +; AVX2-NEXT: vpextrw $6, %xmm1, (%rsp,%rdx,2) +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrw $7, %xmm1, (%rsp,%rcx,2) +; AVX2-NEXT: cmpq $15, %rdi +; AVX2-NEXT: movl $15, %eax +; AVX2-NEXT: cmovbq %rdi, %rax +; AVX2-NEXT: movl %eax, %eax +; AVX2-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; AVX2-NEXT: movw %cx, (%rsp,%rax,2) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: leaq -40(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; ; AVX512F-LABEL: test_compress_v16i16: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 @@ -745,97 +1785,2541 @@ define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x ; AVX512F-NEXT: vpmovdw %zmm1, %ymm0 ; AVX512F-NEXT: retq ; -; AVX512VL-LABEL: test_compress_v16i16: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsllw $7, %xmm1, %xmm1 -; AVX512VL-NEXT: vpmovb2m %xmm1, %k1 -; AVX512VL-NEXT: vpcompressw %ymm0, %ymm2 {%k1} -; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 -; AVX512VL-NEXT: retq - %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) - ret <16 x i16> %out -} - -define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) { -; AVX512VL-LABEL: test_compress_v64i8: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsllw $7, %zmm1, %zmm1 -; AVX512VL-NEXT: vpmovb2m %zmm1, %k1 -; AVX512VL-NEXT: vpcompressb %zmm0, %zmm2 {%k1} -; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 -; AVX512VL-NEXT: retq - %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) - ret <64 x i8> %out -} - -define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) { -; AVX512F-LABEL: test_compress_v32i16: +; AVX512VL-LABEL: test_compress_v16i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovb2m %xmm1, %k1 +; AVX512VL-NEXT: vpcompressw %ymm0, %ymm2 {%k1} +; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512VL-NEXT: retq + %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) + ret <16 x i16> %out +} + +define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) nounwind { +; AVX2-LABEL: test_compress_v64i8: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $128, %rsp +; AVX2-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movl %ecx, %r13d +; AVX2-NEXT: movl %edx, %r15d +; AVX2-NEXT: movl %esi, %ebx +; AVX2-NEXT: # kill: def $edi killed $edi def $rdi +; AVX2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movl 360(%rbp), %eax +; AVX2-NEXT: movl 352(%rbp), %ecx +; AVX2-NEXT: vmovd %ecx, %xmm4 +; AVX2-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 368(%rbp), %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 376(%rbp), %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 384(%rbp), %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 392(%rbp), %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 400(%rbp), %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 408(%rbp), %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 416(%rbp), %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 424(%rbp), %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 432(%rbp), %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 440(%rbp), %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 448(%rbp), %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 456(%rbp), %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 464(%rbp), %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 472(%rbp), %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm4, %xmm4 +; AVX2-NEXT: movl 224(%rbp), %eax +; AVX2-NEXT: vmovd %eax, %xmm5 +; AVX2-NEXT: movl 232(%rbp), %eax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 240(%rbp), %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 248(%rbp), %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 256(%rbp), %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 264(%rbp), %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 272(%rbp), %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 280(%rbp), %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 288(%rbp), %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 296(%rbp), %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 304(%rbp), %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 312(%rbp), %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 320(%rbp), %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 328(%rbp), %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 336(%rbp), %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 344(%rbp), %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm5, %xmm5 +; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4 +; AVX2-NEXT: movl 96(%rbp), %eax +; AVX2-NEXT: vmovd %eax, %xmm5 +; AVX2-NEXT: movl 104(%rbp), %eax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 112(%rbp), %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 120(%rbp), %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 128(%rbp), %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 136(%rbp), %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 144(%rbp), %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 152(%rbp), %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 160(%rbp), %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 168(%rbp), %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 176(%rbp), %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 184(%rbp), %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 192(%rbp), %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 200(%rbp), %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 208(%rbp), %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm5, %xmm5 +; AVX2-NEXT: movl 216(%rbp), %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm5, %xmm5 +; AVX2-NEXT: vmovd %edi, %xmm6 +; AVX2-NEXT: vpinsrb $1, %esi, %xmm6, %xmm6 +; AVX2-NEXT: vpinsrb $2, %edx, %xmm6, %xmm6 +; AVX2-NEXT: vpinsrb $3, %r13d, %xmm6, %xmm6 +; AVX2-NEXT: vpinsrb $4, %r8d, %xmm6, %xmm6 +; AVX2-NEXT: vpinsrb $5, %r9d, %xmm6, %xmm6 +; AVX2-NEXT: movl 16(%rbp), %esi +; AVX2-NEXT: vpinsrb $6, %esi, %xmm6, %xmm6 +; AVX2-NEXT: movl 24(%rbp), %edi +; AVX2-NEXT: vpinsrb $7, %edi, %xmm6, %xmm6 +; AVX2-NEXT: movl 32(%rbp), %r8d +; AVX2-NEXT: vpinsrb $8, %r8d, %xmm6, %xmm6 +; AVX2-NEXT: movl 40(%rbp), %r9d +; AVX2-NEXT: vpinsrb $9, %r9d, %xmm6, %xmm6 +; AVX2-NEXT: movl 48(%rbp), %r10d +; AVX2-NEXT: vpinsrb $10, %r10d, %xmm6, %xmm6 +; AVX2-NEXT: movl 56(%rbp), %r11d +; AVX2-NEXT: vpinsrb $11, %r11d, %xmm6, %xmm6 +; AVX2-NEXT: movl 64(%rbp), %r14d +; AVX2-NEXT: vpinsrb $12, %r14d, %xmm6, %xmm6 +; AVX2-NEXT: movl 72(%rbp), %r12d +; AVX2-NEXT: vpinsrb $13, %r12d, %xmm6, %xmm6 +; AVX2-NEXT: movl 80(%rbp), %eax +; AVX2-NEXT: vpinsrb $14, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movl 88(%rbp), %eax +; AVX2-NEXT: vpinsrb $15, %eax, %xmm6, %xmm6 +; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm6, %ymm5 +; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX2-NEXT: vpand %ymm6, %ymm5, %ymm5 +; AVX2-NEXT: vpand %ymm6, %ymm4, %ymm4 +; AVX2-NEXT: vpaddb %ymm4, %ymm5, %ymm4 +; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm5 +; AVX2-NEXT: vpaddb %xmm5, %xmm4, %xmm4 +; AVX2-NEXT: vpextrb $1, %xmm4, %eax +; AVX2-NEXT: vmovd %xmm4, %ecx +; AVX2-NEXT: addb %al, %cl +; AVX2-NEXT: vpextrb $2, %xmm4, %edx +; AVX2-NEXT: vpextrb $3, %xmm4, %eax +; AVX2-NEXT: addb %dl, %al +; AVX2-NEXT: addb %cl, %al +; AVX2-NEXT: vpextrb $4, %xmm4, %ecx +; AVX2-NEXT: vpextrb $5, %xmm4, %edx +; AVX2-NEXT: addb %cl, %dl +; AVX2-NEXT: vpextrb $6, %xmm4, %ecx +; AVX2-NEXT: addb %dl, %cl +; AVX2-NEXT: addb %al, %cl +; AVX2-NEXT: vpextrb $7, %xmm4, %eax +; AVX2-NEXT: vpextrb $8, %xmm4, %edx +; AVX2-NEXT: addb %al, %dl +; AVX2-NEXT: vpextrb $9, %xmm4, %eax +; AVX2-NEXT: addb %dl, %al +; AVX2-NEXT: vpextrb $10, %xmm4, %edx +; AVX2-NEXT: addb %al, %dl +; AVX2-NEXT: addb %cl, %dl +; AVX2-NEXT: vpextrb $11, %xmm4, %eax +; AVX2-NEXT: vpextrb $12, %xmm4, %ecx +; AVX2-NEXT: addb %al, %cl +; AVX2-NEXT: vpextrb $13, %xmm4, %eax +; AVX2-NEXT: addb %cl, %al +; AVX2-NEXT: vpextrb $14, %xmm4, %ecx +; AVX2-NEXT: addb %al, %cl +; AVX2-NEXT: vpextrb $15, %xmm4, %eax +; AVX2-NEXT: addb %cl, %al +; AVX2-NEXT: addb %dl, %al +; AVX2-NEXT: vmovaps %ymm3, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps %ymm2, (%rsp) +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: movzbl (%rsp,%rax), %eax +; AVX2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rax) +; AVX2-NEXT: andl $1, %ebx +; AVX2-NEXT: addq %rax, %rbx +; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rbx) +; AVX2-NEXT: andl $1, %r15d +; AVX2-NEXT: addq %rbx, %r15 +; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%r15) +; AVX2-NEXT: andl $1, %r13d +; AVX2-NEXT: addq %r15, %r13 +; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%r13) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %r13, %rcx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: addq %rax, %rsi +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rax) +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: addq %rsi, %rdi +; AVX2-NEXT: # kill: def $esi killed $esi killed $rsi def $rsi +; AVX2-NEXT: andl $63, %esi +; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rsi) +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addq %rdi, %r8 +; AVX2-NEXT: # kill: def $edi killed $edi killed $rdi def $rdi +; AVX2-NEXT: andl $63, %edi +; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rdi) +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: addq %r8, %r9 +; AVX2-NEXT: # kill: def $r8d killed $r8d killed $r8 def $r8 +; AVX2-NEXT: andl $63, %r8d +; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%r8) +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: addq %r9, %r10 +; AVX2-NEXT: # kill: def $r9d killed $r9d killed $r9 def $r9 +; AVX2-NEXT: andl $63, %r9d +; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%r9) +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: addq %r10, %r11 +; AVX2-NEXT: # kill: def $r10d killed $r10d killed $r10 def $r10 +; AVX2-NEXT: andl $63, %r10d +; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%r10) +; AVX2-NEXT: andl $1, %r14d +; AVX2-NEXT: addq %r11, %r14 +; AVX2-NEXT: # kill: def $r11d killed $r11d killed $r11 def $r11 +; AVX2-NEXT: andl $63, %r11d +; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%r11) +; AVX2-NEXT: andl $1, %r12d +; AVX2-NEXT: addq %r14, %r12 +; AVX2-NEXT: # kill: def $r14d killed $r14d killed $r14 def $r14 +; AVX2-NEXT: andl $63, %r14d +; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%r14) +; AVX2-NEXT: movl 80(%rbp), %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %r12, %rax +; AVX2-NEXT: # kill: def $r12d killed $r12d killed $r12 def $r12 +; AVX2-NEXT: andl $63, %r12d +; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%r12) +; AVX2-NEXT: movl 88(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 96(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 104(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 112(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 120(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 128(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 136(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 144(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 152(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 160(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 168(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 176(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 184(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 192(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 200(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 208(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 216(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 224(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $0, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 232(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $1, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 240(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $2, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 248(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $3, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 256(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $4, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 264(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $5, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 272(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $6, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 280(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $7, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 288(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $8, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 296(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $9, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 304(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $10, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 312(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $11, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 320(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $12, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 328(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $13, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 336(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $14, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 344(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $15, %xmm1, (%rsp,%rax) +; AVX2-NEXT: movl 352(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0 +; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 360(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 368(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 376(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 384(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 392(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 400(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 408(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 416(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 424(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 432(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 440(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 448(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 456(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 464(%rbp), %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movl 472(%rbp), %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rax) +; AVX2-NEXT: vpextrb $15, %xmm0, %eax +; AVX2-NEXT: cmpq $64, %rcx +; AVX2-NEXT: cmovbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload +; AVX2-NEXT: cmpq $63, %rcx +; AVX2-NEXT: movq %rcx, %rdx +; AVX2-NEXT: movl $63, %ecx +; AVX2-NEXT: cmovbq %rdx, %rcx +; AVX2-NEXT: movb %al, (%rsp,%rcx) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: leaq -40(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_v64i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: andq $-64, %rsp +; AVX512F-NEXT: subq $256, %rsp # imm = 0x100 +; AVX512F-NEXT: movzbl 352(%rbp), %eax +; AVX512F-NEXT: andl $1, %eax +; AVX512F-NEXT: kmovw %eax, %k0 +; AVX512F-NEXT: movzbl 360(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-5, %ax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %k1, %k3 +; AVX512F-NEXT: movzbl 368(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $13, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-9, %ax +; AVX512F-NEXT: kmovw %eax, %k7 +; AVX512F-NEXT: kandw %k7, %k0, %k0 +; AVX512F-NEXT: kmovw %k7, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 376(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $12, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-17, %ax +; AVX512F-NEXT: kmovw %eax, %k5 +; AVX512F-NEXT: kandw %k5, %k0, %k0 +; AVX512F-NEXT: movzbl 384(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $11, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-33, %ax +; AVX512F-NEXT: kmovw %eax, %k6 +; AVX512F-NEXT: kandw %k6, %k0, %k0 +; AVX512F-NEXT: movzbl 392(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $10, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-65, %ax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 400(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $9, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-129, %ax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 408(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $8, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-257, %ax # imm = 0xFEFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 416(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $7, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-513, %ax # imm = 0xFDFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 424(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $6, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-1025, %ax # imm = 0xFBFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 432(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $5, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-2049, %ax # imm = 0xF7FF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 440(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $4, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-4097, %ax # imm = 0xEFFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 448(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $3, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-8193, %ax # imm = 0xDFFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 456(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $2, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k1 +; AVX512F-NEXT: movw $-16385, %ax # imm = 0xBFFF +; AVX512F-NEXT: kmovw %eax, %k4 +; AVX512F-NEXT: kandw %k4, %k1, %k1 +; AVX512F-NEXT: kmovw %k4, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 464(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $14, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kshiftlw $1, %k1, %k1 +; AVX512F-NEXT: kshiftrw $1, %k1, %k1 +; AVX512F-NEXT: movzbl 472(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 224(%rbp), %eax +; AVX512F-NEXT: andl $1, %eax +; AVX512F-NEXT: movzbl 232(%rbp), %r10d +; AVX512F-NEXT: kmovw %r10d, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kmovw %k3, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k3, %k1, %k1 +; AVX512F-NEXT: movzbl 240(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $13, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 248(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $12, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k5, %k1, %k1 +; AVX512F-NEXT: movzbl 256(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $11, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k6, %k1, %k1 +; AVX512F-NEXT: movzbl 264(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $10, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 272(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $9, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 280(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $8, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k0, %k2 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 288(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k0, %k2 +; AVX512F-NEXT: kshiftrw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 296(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $6, %k0, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 304(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $5, %k0, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 312(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $4, %k0, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 320(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $3, %k0, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 328(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $2, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k4, %k1, %k1 +; AVX512F-NEXT: movzbl 336(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $14, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kshiftlw $1, %k1, %k1 +; AVX512F-NEXT: kshiftrw $1, %k1, %k1 +; AVX512F-NEXT: movzbl 344(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 96(%rbp), %eax +; AVX512F-NEXT: andl $1, %eax +; AVX512F-NEXT: movzbl 104(%rbp), %r10d +; AVX512F-NEXT: kmovw %r10d, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kandw %k3, %k1, %k1 +; AVX512F-NEXT: movzbl 112(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $13, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k4 # 2-byte Reload +; AVX512F-NEXT: kandw %k4, %k1, %k1 +; AVX512F-NEXT: movzbl 120(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $12, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k5, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k5, %k1, %k1 +; AVX512F-NEXT: movzbl 128(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $11, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k6, %k1, %k1 +; AVX512F-NEXT: movzbl 136(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $10, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 144(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $9, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 152(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $8, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload +; AVX512F-NEXT: kandw %k3, %k1, %k1 +; AVX512F-NEXT: movzbl 160(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 168(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $6, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 176(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $5, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 184(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $4, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 192(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $3, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 200(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $2, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 208(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $14, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kshiftlw $1, %k1, %k1 +; AVX512F-NEXT: kshiftrw $1, %k1, %k1 +; AVX512F-NEXT: movzbl 216(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: andl $1, %edi +; AVX512F-NEXT: kmovw %esi, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: kmovw %edi, %k2 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %edx, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $13, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k4, %k1, %k1 +; AVX512F-NEXT: kmovw %ecx, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $12, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k5, %k1, %k1 +; AVX512F-NEXT: kmovw %r8d, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $11, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k6, %k1, %k1 +; AVX512F-NEXT: kmovw %r9d, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $10, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 16(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $9, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kandw %k1, %k2, %k1 +; AVX512F-NEXT: movzbl 24(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $8, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k3, %k1, %k1 +; AVX512F-NEXT: movzbl 32(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 40(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $6, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 48(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k5 +; AVX512F-NEXT: kshiftrw $5, %k5, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 56(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k4 +; AVX512F-NEXT: kshiftrw $4, %k4, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 64(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k3 +; AVX512F-NEXT: kshiftrw $3, %k3, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 72(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $2, %k2, %k0 +; AVX512F-NEXT: korw %k0, %k1, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 80(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $14, %k1, %k7 +; AVX512F-NEXT: korw %k7, %k0, %k0 +; AVX512F-NEXT: kshiftlw $1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k7 +; AVX512F-NEXT: movzbl 88(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k0 +; AVX512F-NEXT: kshiftlw $15, %k0, %k6 +; AVX512F-NEXT: korw %k6, %k7, %k6 +; AVX512F-NEXT: kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movw $-3, %ax +; AVX512F-NEXT: kmovw %eax, %k6 +; AVX512F-NEXT: kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kandw %k6, %k7, %k6 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $14, %k7, %k7 +; AVX512F-NEXT: korw %k7, %k6, %k6 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kandw %k7, %k6, %k6 +; AVX512F-NEXT: kshiftrw $13, %k5, %k5 +; AVX512F-NEXT: korw %k5, %k6, %k5 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k6 # 2-byte Reload +; AVX512F-NEXT: kandw %k6, %k5, %k5 +; AVX512F-NEXT: kshiftrw $12, %k4, %k4 +; AVX512F-NEXT: korw %k4, %k5, %k4 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k5 # 2-byte Reload +; AVX512F-NEXT: kandw %k5, %k4, %k4 +; AVX512F-NEXT: kshiftrw $11, %k3, %k3 +; AVX512F-NEXT: korw %k3, %k4, %k3 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k4 # 2-byte Reload +; AVX512F-NEXT: kandw %k4, %k3, %k3 +; AVX512F-NEXT: kshiftrw $10, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k3, %k2 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload +; AVX512F-NEXT: kandw %k3, %k2, %k2 +; AVX512F-NEXT: kshiftlw $6, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kshiftlw $9, %k1, %k1 +; AVX512F-NEXT: kshiftrw $9, %k1, %k1 +; AVX512F-NEXT: kshiftlw $7, %k0, %k0 +; AVX512F-NEXT: korw %k0, %k1, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $9, %k1, %k1 +; AVX512F-NEXT: kshiftrw $9, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kxorw %k0, %k1, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k7, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $13, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k6, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $12, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k5, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $11, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k4, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $10, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k3, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $6, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftlw $9, %k0, %k0 +; AVX512F-NEXT: kshiftrw $9, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $7, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $9, %k1, %k1 +; AVX512F-NEXT: kshiftrw $9, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kxorw %k0, %k1, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %ecx +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload +; AVX512F-NEXT: kxorw %k2, %k3, %k0 +; AVX512F-NEXT: kshiftrw $8, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %edx +; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: vpcompressd %zmm3, %zmm3 {%k1} {z} +; AVX512F-NEXT: vpternlogd {{.*#+}} zmm4 {%k1} {z} = -1 +; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm2 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: vpcompressd %zmm2, %zmm2 {%k1} {z} +; AVX512F-NEXT: vpternlogd {{.*#+}} zmm5 {%k1} {z} = -1 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512F-NEXT: vpcompressd %zmm6, %zmm6 {%k3} {z} +; AVX512F-NEXT: vpternlogd {{.*#+}} zmm7 {%k3} {z} = -1 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k2} {z} +; AVX512F-NEXT: vpternlogd {{.*#+}} zmm8 {%k2} {z} = -1 +; AVX512F-NEXT: vpmovdb %zmm6, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $31, %eax +; AVX512F-NEXT: vpmovdb %zmm0, 64(%rsp,%rax) +; AVX512F-NEXT: vpmovdb %zmm3, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $31, %ecx +; AVX512F-NEXT: vpmovdb %zmm2, 96(%rsp,%rcx) +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm0 +; AVX512F-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $63, %edx +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm0 +; AVX512F-NEXT: vmovaps %ymm0, 128(%rsp,%rdx) +; AVX512F-NEXT: vpmovdb %zmm4, %xmm0 +; AVX512F-NEXT: vpmovdb %zmm5, %xmm2 +; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; AVX512F-NEXT: vpblendvb %ymm0, {{[0-9]+}}(%rsp), %ymm2, %ymm0 +; AVX512F-NEXT: vpmovdb %zmm7, %xmm2 +; AVX512F-NEXT: vpmovdb %zmm8, %xmm3 +; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpblendvb %ymm2, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v64i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %zmm1, %zmm1 +; AVX512VL-NEXT: vpmovb2m %zmm1, %k1 +; AVX512VL-NEXT: vpcompressb %zmm0, %zmm2 {%k1} +; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512VL-NEXT: retq + %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) + ret <64 x i8> %out +} + +define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) nounwind { +; AVX2-LABEL: test_compress_v32i16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $256, %rsp # imm = 0x100 +; AVX2-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps %ymm3, (%rsp) +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero +; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX2-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX2-NEXT: vpand %ymm5, %ymm6, %ymm5 +; AVX2-NEXT: vpaddw %ymm4, %ymm5, %ymm4 +; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm5 +; AVX2-NEXT: vpaddw %xmm5, %xmm4, %xmm4 +; AVX2-NEXT: vpextrw $1, %xmm4, %eax +; AVX2-NEXT: vmovd %xmm4, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: vpextrw $2, %xmm4, %eax +; AVX2-NEXT: vpextrw $3, %xmm4, %edx +; AVX2-NEXT: addl %eax, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: vpextrw $4, %xmm4, %eax +; AVX2-NEXT: vpextrw $5, %xmm4, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: vpextrw $6, %xmm4, %eax +; AVX2-NEXT: addl %ecx, %eax +; AVX2-NEXT: addl %edx, %eax +; AVX2-NEXT: vpextrw $7, %xmm4, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $1, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $2, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $3, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $4, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $5, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $6, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $7, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $8, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $9, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $10, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $11, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $12, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $13, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $14, %xmm2, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $15, %xmm2, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vmovd %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $1, %xmm3, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $2, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vpextrb $3, %xmm3, %r12d +; AVX2-NEXT: andl $1, %r12d +; AVX2-NEXT: addq %rcx, %r12 +; AVX2-NEXT: vpextrb $4, %xmm3, %r15d +; AVX2-NEXT: andl $1, %r15d +; AVX2-NEXT: addq %r12, %r15 +; AVX2-NEXT: vpextrb $5, %xmm3, %r14d +; AVX2-NEXT: andl $1, %r14d +; AVX2-NEXT: addq %r15, %r14 +; AVX2-NEXT: vpextrb $6, %xmm3, %ebx +; AVX2-NEXT: andl $1, %ebx +; AVX2-NEXT: addq %r14, %rbx +; AVX2-NEXT: vpextrb $7, %xmm3, %r11d +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: addq %rbx, %r11 +; AVX2-NEXT: vpextrb $8, %xmm3, %r10d +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: addq %r11, %r10 +; AVX2-NEXT: vpextrb $9, %xmm3, %r9d +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: addq %r10, %r9 +; AVX2-NEXT: vpextrb $10, %xmm3, %r8d +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addq %r9, %r8 +; AVX2-NEXT: vpextrb $11, %xmm3, %edi +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: addq %r8, %rdi +; AVX2-NEXT: vpextrb $12, %xmm3, %esi +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: addq %rdi, %rsi +; AVX2-NEXT: vpextrb $13, %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rsi, %rdx +; AVX2-NEXT: vpextrb $14, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: vpextrb $15, %xmm3, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: cmpq $32, %rax +; AVX2-NEXT: vpextrw $7, %xmm2, %eax +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: cmovbw (%rsp,%r13,2), %ax +; AVX2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: vpextrw $0, %xmm0, (%rsp) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: vpextrw $1, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: vpextrw $2, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: vpextrw $3, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: vpextrw $4, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: vpextrw $5, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $6, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $7, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vpextrw $0, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $1, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $2, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $3, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $4, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $5, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $6, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $7, %xmm0, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $0, %xmm1, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $1, %xmm1, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload +; AVX2-NEXT: andl $31, %r13d +; AVX2-NEXT: vpextrw $2, %xmm1, (%rsp,%r13,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; AVX2-NEXT: andl $31, %eax +; AVX2-NEXT: vpextrw $3, %xmm1, (%rsp,%rax,2) +; AVX2-NEXT: andl $31, %r12d +; AVX2-NEXT: vpextrw $4, %xmm1, (%rsp,%r12,2) +; AVX2-NEXT: andl $31, %r15d +; AVX2-NEXT: vpextrw $5, %xmm1, (%rsp,%r15,2) +; AVX2-NEXT: andl $31, %r14d +; AVX2-NEXT: vpextrw $6, %xmm1, (%rsp,%r14,2) +; AVX2-NEXT: andl $31, %ebx +; AVX2-NEXT: vpextrw $7, %xmm1, (%rsp,%rbx,2) +; AVX2-NEXT: andl $31, %r11d +; AVX2-NEXT: vpextrw $0, %xmm2, (%rsp,%r11,2) +; AVX2-NEXT: andl $31, %r10d +; AVX2-NEXT: vpextrw $1, %xmm2, (%rsp,%r10,2) +; AVX2-NEXT: andl $31, %r9d +; AVX2-NEXT: vpextrw $2, %xmm2, (%rsp,%r9,2) +; AVX2-NEXT: andl $31, %r8d +; AVX2-NEXT: vpextrw $3, %xmm2, (%rsp,%r8,2) +; AVX2-NEXT: andl $31, %edi +; AVX2-NEXT: vpextrw $4, %xmm2, (%rsp,%rdi,2) +; AVX2-NEXT: andl $31, %esi +; AVX2-NEXT: vpextrw $5, %xmm2, (%rsp,%rsi,2) +; AVX2-NEXT: andl $31, %edx +; AVX2-NEXT: vpextrw $6, %xmm2, (%rsp,%rdx,2) +; AVX2-NEXT: andl $31, %ecx +; AVX2-NEXT: vpextrw $7, %xmm2, (%rsp,%rcx,2) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX2-NEXT: cmpq $31, %rcx +; AVX2-NEXT: movl $31, %eax +; AVX2-NEXT: cmovbq %rcx, %rax +; AVX2-NEXT: movl %eax, %eax +; AVX2-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; AVX2-NEXT: movw %cx, (%rsp,%rax,2) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: leaq -40(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_v32i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: andq $-64, %rsp +; AVX512F-NEXT: subq $128, %rsp +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm5 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero +; AVX512F-NEXT: vpmovsxbd %xmm5, %zmm5 +; AVX512F-NEXT: vpslld $31, %zmm5, %zmm5 +; AVX512F-NEXT: vptestmd %zmm5, %zmm5, %k1 +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k2 +; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512F-NEXT: vpcompressd %zmm1, %zmm1 {%k2} {z} +; AVX512F-NEXT: vpmovdw %zmm1, (%rsp) +; AVX512F-NEXT: kshiftrw $8, %k2, %k0 +; AVX512F-NEXT: kxorw %k0, %k2, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: andl $31, %eax +; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: vpmovdw %zmm0, (%rsp,%rax,2) +; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 +; AVX512F-NEXT: vpsllw $15, %ymm4, %ymm1 +; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm1 +; AVX512F-NEXT: vpblendvb %ymm1, {{[0-9]+}}(%rsp), %ymm0, %ymm0 +; AVX512F-NEXT: vpsllw $15, %ymm3, %ymm1 +; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm1 +; AVX512F-NEXT: vpblendvb %ymm1, (%rsp), %ymm2, %ymm1 +; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v32i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX512VL-NEXT: vpmovb2m %ymm1, %k1 +; AVX512VL-NEXT: vpcompressw %zmm0, %zmm2 {%k1} +; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512VL-NEXT: retq + %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) + ret <32 x i16> %out +} + +define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) nounwind { +; AVX2-LABEL: test_compress_large: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $288, %rsp # imm = 0x120 +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: # kill: def $r9d killed $r9d def $r9 +; AVX2-NEXT: # kill: def $r8d killed $r8d def $r8 +; AVX2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: vmovss %xmm0, (%rsp) +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rsi,4) +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %esi, %edx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addl %ecx, %r8d +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%r8,4) +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: addl %r8d, %r9d +; AVX2-NEXT: movzbl 16(%rbp), %ecx +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%r9,4) +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %r9d, %ecx +; AVX2-NEXT: movzbl 24(%rbp), %edx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: movzbl 32(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 40(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vmovss %xmm1, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 48(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm1, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 56(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm1, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 64(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm1, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 72(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 80(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 88(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 96(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 104(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vmovss %xmm2, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 112(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm2, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 120(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm2, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 128(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm2, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 136(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm2, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 144(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 152(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 160(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 168(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vmovss %xmm3, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 176(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm3, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 184(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm3, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 192(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm3, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 200(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm3, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 208(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 216(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 224(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 232(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vmovss %xmm4, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 240(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm4, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 248(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm4, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 256(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm4, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 264(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm4, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 272(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 280(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 288(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 296(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vmovss %xmm5, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 304(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm5, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 312(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm5, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 320(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm5, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 328(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm5, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 336(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 344(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 352(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 360(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vmovss %xmm6, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 368(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm6, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 376(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm6, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 384(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm6, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 392(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm6, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 400(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 408(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 416(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 424(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vmovss %xmm7, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 432(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm7, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 440(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm7, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 448(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm7, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 456(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm7, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: movzbl 464(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addl %edx, %ecx +; AVX2-NEXT: # kill: def $edx killed $edx def $rdx +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: movzbl 472(%rbp), %edx +; AVX2-NEXT: movzbl %dl, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: andl $63, %edx +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm2 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm3 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm4 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm5 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm6 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm7 +; AVX2-NEXT: vmovaps %ymm7, 224(%rdi) +; AVX2-NEXT: vmovaps %ymm6, 192(%rdi) +; AVX2-NEXT: vmovaps %ymm5, 160(%rdi) +; AVX2-NEXT: vmovaps %ymm4, 128(%rdi) +; AVX2-NEXT: vmovaps %ymm3, 96(%rdi) +; AVX2-NEXT: vmovaps %ymm2, 64(%rdi) +; AVX2-NEXT: vmovaps %ymm1, 32(%rdi) +; AVX2-NEXT: vmovaps %ymm0, (%rdi) +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_large: ; AVX512F: # %bb.0: ; AVX512F-NEXT: pushq %rbp -; AVX512F-NEXT: .cfi_def_cfa_offset 16 -; AVX512F-NEXT: .cfi_offset %rbp, -16 ; AVX512F-NEXT: movq %rsp, %rbp -; AVX512F-NEXT: .cfi_def_cfa_register %rbp ; AVX512F-NEXT: andq $-64, %rsp -; AVX512F-NEXT: subq $128, %rsp -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm5 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero -; AVX512F-NEXT: vpmovsxbd %xmm5, %zmm5 -; AVX512F-NEXT: vpslld $31, %zmm5, %zmm5 -; AVX512F-NEXT: vptestmd %zmm5, %zmm5, %k1 -; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 -; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k2 -; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512F-NEXT: vpcompressd %zmm1, %zmm1 {%k2} {z} -; AVX512F-NEXT: vpmovdw %zmm1, (%rsp) -; AVX512F-NEXT: kshiftrw $8, %k2, %k0 -; AVX512F-NEXT: kxorw %k0, %k2, %k0 +; AVX512F-NEXT: subq $640, %rsp # imm = 0x280 +; AVX512F-NEXT: movzbl 352(%rbp), %eax +; AVX512F-NEXT: andl $1, %eax +; AVX512F-NEXT: kmovw %eax, %k0 +; AVX512F-NEXT: movzbl 360(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-5, %ax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %k1, %k3 +; AVX512F-NEXT: movzbl 368(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $13, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-9, %ax +; AVX512F-NEXT: kmovw %eax, %k7 +; AVX512F-NEXT: kandw %k7, %k0, %k0 +; AVX512F-NEXT: kmovw %k7, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 376(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $12, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-17, %ax +; AVX512F-NEXT: kmovw %eax, %k5 +; AVX512F-NEXT: kandw %k5, %k0, %k0 +; AVX512F-NEXT: movzbl 384(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $11, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-33, %ax +; AVX512F-NEXT: kmovw %eax, %k6 +; AVX512F-NEXT: kandw %k6, %k0, %k0 +; AVX512F-NEXT: movzbl 392(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $10, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-65, %ax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 400(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $9, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-129, %ax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 408(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $8, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-257, %ax # imm = 0xFEFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 416(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $7, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-513, %ax # imm = 0xFDFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 424(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $6, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-1025, %ax # imm = 0xFBFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 432(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $5, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-2049, %ax # imm = 0xF7FF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 440(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $4, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-4097, %ax # imm = 0xEFFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 448(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $3, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-8193, %ax # imm = 0xDFFF +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 456(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $2, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k1 +; AVX512F-NEXT: movw $-16385, %ax # imm = 0xBFFF +; AVX512F-NEXT: kmovw %eax, %k4 +; AVX512F-NEXT: kandw %k4, %k1, %k1 +; AVX512F-NEXT: kmovw %k4, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 464(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $14, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kshiftlw $1, %k1, %k1 +; AVX512F-NEXT: kshiftrw $1, %k1, %k1 +; AVX512F-NEXT: movzbl 472(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 224(%rbp), %eax +; AVX512F-NEXT: andl $1, %eax +; AVX512F-NEXT: movzbl 232(%rbp), %r10d +; AVX512F-NEXT: kmovw %r10d, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kmovw %k3, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k3, %k1, %k1 +; AVX512F-NEXT: movzbl 240(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $13, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 248(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $12, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k5, %k1, %k1 +; AVX512F-NEXT: movzbl 256(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $11, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k6, %k1, %k1 +; AVX512F-NEXT: movzbl 264(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $10, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 272(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $9, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 280(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $8, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k0, %k2 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 288(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k0, %k2 +; AVX512F-NEXT: kshiftrw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 296(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $6, %k0, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 304(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $5, %k0, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 312(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill ; AVX512F-NEXT: kshiftrw $4, %k0, %k2 -; AVX512F-NEXT: kxorw %k2, %k0, %k0 -; AVX512F-NEXT: kshiftrw $2, %k0, %k2 -; AVX512F-NEXT: kxorw %k2, %k0, %k0 -; AVX512F-NEXT: kshiftrw $1, %k0, %k2 -; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 320(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k0 +; AVX512F-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $3, %k0, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 328(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $2, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k4, %k1, %k1 +; AVX512F-NEXT: movzbl 336(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $14, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kshiftlw $1, %k1, %k1 +; AVX512F-NEXT: kshiftrw $1, %k1, %k1 +; AVX512F-NEXT: movzbl 344(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movzbl 96(%rbp), %eax +; AVX512F-NEXT: andl $1, %eax +; AVX512F-NEXT: movzbl 104(%rbp), %r10d +; AVX512F-NEXT: kmovw %r10d, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kandw %k3, %k1, %k1 +; AVX512F-NEXT: movzbl 112(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $13, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k4 # 2-byte Reload +; AVX512F-NEXT: kandw %k4, %k1, %k1 +; AVX512F-NEXT: movzbl 120(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $12, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k5, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k5, %k1, %k1 +; AVX512F-NEXT: movzbl 128(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $11, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kandw %k6, %k1, %k1 +; AVX512F-NEXT: movzbl 136(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $10, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 144(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $9, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 152(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $8, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload +; AVX512F-NEXT: kandw %k3, %k1, %k1 +; AVX512F-NEXT: movzbl 160(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 168(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $6, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 176(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $5, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 184(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $4, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 192(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $3, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 200(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $2, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 208(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $14, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kshiftlw $1, %k1, %k1 +; AVX512F-NEXT: kshiftrw $1, %k1, %k1 +; AVX512F-NEXT: movzbl 216(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: andl $1, %edi +; AVX512F-NEXT: kmovw %esi, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: kmovw %edi, %k2 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw %edx, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $13, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k4, %k1, %k1 +; AVX512F-NEXT: kmovw %ecx, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $12, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k5, %k1, %k1 +; AVX512F-NEXT: kmovw %r8d, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $11, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k6, %k1, %k1 +; AVX512F-NEXT: kmovw %r9d, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $10, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 16(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $9, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kandw %k1, %k2, %k1 +; AVX512F-NEXT: movzbl 24(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $8, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k3, %k1, %k1 +; AVX512F-NEXT: movzbl 32(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k7, %k1, %k1 +; AVX512F-NEXT: movzbl 40(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kshiftrw $6, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 48(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k5 +; AVX512F-NEXT: kshiftrw $5, %k5, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 56(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k4 +; AVX512F-NEXT: kshiftrw $4, %k4, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kandw %k2, %k1, %k1 +; AVX512F-NEXT: movzbl 64(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k3 +; AVX512F-NEXT: kshiftrw $3, %k3, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kandw %k0, %k1, %k1 +; AVX512F-NEXT: movzbl 72(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k2 +; AVX512F-NEXT: kshiftlw $15, %k2, %k2 +; AVX512F-NEXT: kshiftrw $2, %k2, %k0 +; AVX512F-NEXT: korw %k0, %k1, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: movzbl 80(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kshiftlw $14, %k1, %k7 +; AVX512F-NEXT: korw %k7, %k0, %k0 +; AVX512F-NEXT: kshiftlw $1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k7 +; AVX512F-NEXT: movzbl 88(%rbp), %eax +; AVX512F-NEXT: kmovw %eax, %k0 +; AVX512F-NEXT: kshiftlw $15, %k0, %k6 +; AVX512F-NEXT: korw %k6, %k7, %k6 +; AVX512F-NEXT: kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: movw $-3, %ax +; AVX512F-NEXT: kmovw %eax, %k6 +; AVX512F-NEXT: kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kandw %k6, %k7, %k6 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $14, %k7, %k7 +; AVX512F-NEXT: korw %k7, %k6, %k6 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k7 # 2-byte Reload +; AVX512F-NEXT: kandw %k7, %k6, %k6 +; AVX512F-NEXT: kshiftrw $13, %k5, %k5 +; AVX512F-NEXT: korw %k5, %k6, %k5 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k6 # 2-byte Reload +; AVX512F-NEXT: kandw %k6, %k5, %k5 +; AVX512F-NEXT: kshiftrw $12, %k4, %k4 +; AVX512F-NEXT: korw %k4, %k5, %k4 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k5 # 2-byte Reload +; AVX512F-NEXT: kandw %k5, %k4, %k4 +; AVX512F-NEXT: kshiftrw $11, %k3, %k3 +; AVX512F-NEXT: korw %k3, %k4, %k3 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k4 # 2-byte Reload +; AVX512F-NEXT: kandw %k4, %k3, %k3 +; AVX512F-NEXT: kshiftrw $10, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k3, %k2 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload +; AVX512F-NEXT: kandw %k3, %k2, %k2 +; AVX512F-NEXT: kshiftlw $6, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k2, %k1 +; AVX512F-NEXT: kshiftlw $9, %k1, %k1 +; AVX512F-NEXT: kshiftrw $9, %k1, %k1 +; AVX512F-NEXT: kshiftlw $7, %k0, %k0 +; AVX512F-NEXT: korw %k0, %k1, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $9, %k1, %k1 +; AVX512F-NEXT: kshiftrw $9, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kxorw %k0, %k1, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 ; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: vpcompressd %zmm2, %zmm2 {%k1} {z} +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 2-byte Reload +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k7, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $13, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k6, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $12, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k5, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $11, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k4, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftrw $10, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kandw %k3, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $6, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftlw $9, %k0, %k0 +; AVX512F-NEXT: kshiftrw $9, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $7, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $9, %k1, %k1 +; AVX512F-NEXT: kshiftrw $9, %k1, %k1 +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: kshiftlw $7, %k2, %k2 +; AVX512F-NEXT: korw %k2, %k1, %k1 +; AVX512F-NEXT: kxorw %k0, %k1, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %ecx +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: vpcompressd %zmm3, %zmm3 {%k1} {z} +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k2} {z} +; AVX512F-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512F-NEXT: vpcompressd %zmm1, %zmm1 {%k1} {z} +; AVX512F-NEXT: kxorw %k1, %k2, %k0 +; AVX512F-NEXT: kshiftrw $8, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k1 +; AVX512F-NEXT: kxorw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %edx +; AVX512F-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp) ; AVX512F-NEXT: andl $31, %eax -; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0 -; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z} -; AVX512F-NEXT: vpmovdw %zmm0, (%rsp,%rax,2) -; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 -; AVX512F-NEXT: vpsllw $15, %ymm4, %ymm1 -; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm1 -; AVX512F-NEXT: vpblendvb %ymm1, {{[0-9]+}}(%rsp), %ymm0, %ymm0 -; AVX512F-NEXT: vpsllw $15, %ymm3, %ymm1 -; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm1 -; AVX512F-NEXT: vpblendvb %ymm1, (%rsp), %ymm2, %ymm1 -; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512F-NEXT: vmovdqa64 %zmm1, 64(%rsp,%rax,4) +; AVX512F-NEXT: vmovdqa64 %zmm2, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $31, %ecx +; AVX512F-NEXT: vmovdqa64 %zmm3, 192(%rsp,%rcx,4) +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 +; AVX512F-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $63, %edx +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 +; AVX512F-NEXT: vmovaps %zmm0, 320(%rsp,%rdx,4) +; AVX512F-NEXT: vmovaps %zmm1, {{[0-9]+}}(%rsp) +; AVX512F-NEXT: vmovaps %zmm2, 384(%rsp,%rdx,4) +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 +; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm3 ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: retq ; -; AVX512VL-LABEL: test_compress_v32i16: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpsllw $7, %ymm1, %ymm1 -; AVX512VL-NEXT: vpmovb2m %ymm1, %k1 -; AVX512VL-NEXT: vpcompressw %zmm0, %zmm2 {%k1} -; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 -; AVX512VL-NEXT: retq - %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) - ret <32 x i16> %out -} - -define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) { ; AVX512VL-LABEL: test_compress_large: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: pushq %rbp -; AVX512VL-NEXT: .cfi_def_cfa_offset 16 -; AVX512VL-NEXT: .cfi_offset %rbp, -16 ; AVX512VL-NEXT: movq %rsp, %rbp -; AVX512VL-NEXT: .cfi_def_cfa_register %rbp ; AVX512VL-NEXT: andq $-64, %rsp ; AVX512VL-NEXT: subq $576, %rsp # imm = 0x240 ; AVX512VL-NEXT: vpsllw $7, %zmm0, %zmm0 @@ -896,13 +4380,12 @@ define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i ; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm3 ; AVX512VL-NEXT: movq %rbp, %rsp ; AVX512VL-NEXT: popq %rbp -; AVX512VL-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512VL-NEXT: retq %out = call <64 x i32> @llvm.experimental.vector.compress(<64 x i32> %vec, <64 x i1> %mask, <64 x i32> undef) ret <64 x i32> %out } -define <4 x i32> @test_compress_all_const() { +define <4 x i32> @test_compress_all_const() nounwind { ; AVX2-LABEL: test_compress_all_const: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0] @@ -923,7 +4406,7 @@ define <4 x i32> @test_compress_all_const() { ret <4 x i32> %out } -define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) { +define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) nounwind { ; CHECK-LABEL: test_compress_const_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3] @@ -932,7 +4415,7 @@ define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) { ret <4 x i32> %out } -define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) { +define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) nounwind { ; CHECK-LABEL: test_compress_const_mask_passthrough: ; CHECK: # %bb.0: ; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3] @@ -941,7 +4424,7 @@ define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> ret <4 x i32> %out } -define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) { +define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) nounwind { ; CHECK-LABEL: test_compress_const_mask_const_passthrough: ; CHECK: # %bb.0: ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] @@ -956,7 +4439,7 @@ define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) { ; We pass a placeholder value for the const_mask* tests to check that they are converted to a no-op by simply copying ; the second vector input register to the return register or doing nothing. -define <4 x i32> @test_compress_const_splat1_mask(<4 x i32> %ignore, <4 x i32> %vec) { +define <4 x i32> @test_compress_const_splat1_mask(<4 x i32> %ignore, <4 x i32> %vec) nounwind { ; CHECK-LABEL: test_compress_const_splat1_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm1, %xmm0 @@ -964,21 +4447,21 @@ define <4 x i32> @test_compress_const_splat1_mask(<4 x i32> %ignore, <4 x i32> % %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 -1), <4 x i32> undef) ret <4 x i32> %out } -define <4 x i32> @test_compress_const_splat0_mask(<4 x i32> %ignore, <4 x i32> %vec) { +define <4 x i32> @test_compress_const_splat0_mask(<4 x i32> %ignore, <4 x i32> %vec) nounwind { ; CHECK-LABEL: test_compress_const_splat0_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: retq %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef) ret <4 x i32> %out } -define <4 x i32> @test_compress_undef_mask(<4 x i32> %ignore, <4 x i32> %vec) { +define <4 x i32> @test_compress_undef_mask(<4 x i32> %ignore, <4 x i32> %vec) nounwind { ; CHECK-LABEL: test_compress_undef_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: retq %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> undef, <4 x i32> undef) ret <4 x i32> %out } -define <4 x i32> @test_compress_const_splat0_mask_with_passthru(<4 x i32> %ignore, <4 x i32> %vec, <4 x i32> %passthru) { +define <4 x i32> @test_compress_const_splat0_mask_with_passthru(<4 x i32> %ignore, <4 x i32> %vec, <4 x i32> %passthru) nounwind { ; CHECK-LABEL: test_compress_const_splat0_mask_with_passthru: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, %xmm0 @@ -986,7 +4469,7 @@ define <4 x i32> @test_compress_const_splat0_mask_with_passthru(<4 x i32> %ignor %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> %passthru) ret <4 x i32> %out } -define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ignore, <4 x i32> %vec) { +define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ignore, <4 x i32> %vec) nounwind { ; CHECK-LABEL: test_compress_const_splat0_mask_without_passthru: ; CHECK: # %bb.0: ; CHECK-NEXT: retq @@ -994,7 +4477,98 @@ define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ig ret <4 x i32> %out } -define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) { +define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) nounwind { +; AVX2-LABEL: test_compress_small: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpextrb $0, %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovd %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vpextrb $1, %xmm0, -24(%rsp,%rax) +; AVX2-NEXT: vpextrb $1, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vpextrb $2, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: vpextrb $2, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vpextrb $3, %xmm0, -24(%rsp,%rax) +; AVX2-NEXT: vpextrb $3, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vpextrb $4, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: vpextrb $4, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vpextrb $5, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $5, %xmm0, -24(%rsp,%rax) +; AVX2-NEXT: vpextrb $6, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $6, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: vpextrb $7, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $7, %xmm0, -24(%rsp,%rax) +; AVX2-NEXT: vpextrb $8, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $8, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: vpextrb $9, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $9, %xmm0, -24(%rsp,%rax) +; AVX2-NEXT: vpextrb $10, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $10, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: vpextrb $11, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $11, %xmm0, -24(%rsp,%rax) +; AVX2-NEXT: vpextrb $12, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $12, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: vpextrb $13, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $13, %xmm0, -24(%rsp,%rax) +; AVX2-NEXT: vpextrb $14, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addl %ecx, %eax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpextrb $14, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpextrb $15, %xmm0, -24(%rsp,%rax) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: retq +; ; AVX512F-LABEL: test_compress_small: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1 @@ -1017,7 +4591,7 @@ define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) { ret <4 x i8> %out } -define <4 x i4> @test_compress_illegal_element_type(<4 x i4> %vec, <4 x i1> %mask) { +define <4 x i4> @test_compress_illegal_element_type(<4 x i4> %vec, <4 x i1> %mask) nounwind { ; AVX2-LABEL: test_compress_illegal_element_type: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 @@ -1059,7 +4633,7 @@ define <4 x i4> @test_compress_illegal_element_type(<4 x i4> %vec, <4 x i1> %mas ret <4 x i4> %out } -define <3 x i32> @test_compress_narrow(<3 x i32> %vec, <3 x i1> %mask) { +define <3 x i32> @test_compress_narrow(<3 x i32> %vec, <3 x i1> %mask) nounwind { ; AVX2-LABEL: test_compress_narrow: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovd %edi, %xmm1 @@ -1132,7 +4706,7 @@ define <3 x i32> @test_compress_narrow(<3 x i32> %vec, <3 x i1> %mask) { ret <3 x i32> %out } -define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i1> %mask) { +define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i1> %mask) nounwind { ; AVX2-LABEL: test_compress_narrow_illegal_element_type: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovd %ecx, %xmm0 @@ -1222,7 +4796,7 @@ define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i ret <3 x i3> %out } -define <4 x i32> @test_compress_v4i32_zero_passthru(<4 x i32> %vec, <4 x i1> %mask) { +define <4 x i32> @test_compress_v4i32_zero_passthru(<4 x i32> %vec, <4 x i1> %mask) nounwind { ; AVX2-LABEL: test_compress_v4i32_zero_passthru: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s index 6bc92bc29ea8a..40e3fbda47787 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s @@ -899,104 +899,131 @@ v_bfm_b32 v5, src_scc, vcc_lo v_bfm_b32 v255, 0xaf123456, vcc_hi // GFX11: v_bfm_b32 v255, 0xaf123456, vcc_hi ; encoding: [0xff,0x00,0x1d,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf] -v_cndmask_b16 v5, v1, src_scc, s3 -// W32: v_cndmask_b16 v5, v1, src_scc, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x0d,0x00] -// W64-ERR: :[[@LINE-2]]:32: error: invalid operand for instruction +v_cndmask_b16 v5.l, v1.l, src_scc, s3 +// W32: v_cndmask_b16 v5.l, v1.l, src_scc, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x0d,0x00] +// W64-ERR: :[[@LINE-2]]:36: error: invalid operand for instruction -v_cndmask_b16 v5, v255, 0.5, s3 -// W32: v_cndmask_b16 v5, v255, 0.5, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x0d,0x00] -// W64-ERR: :[[@LINE-2]]:30: error: invalid operand for instruction +v_cndmask_b16 v5.l, v255.l, 0.5, s3 +// W32: v_cndmask_b16 v5.l, v255.l, 0.5, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x0d,0x00] +// W64-ERR: :[[@LINE-2]]:34: error: invalid operand for instruction -v_cndmask_b16 v5, s105, s105, s3 -// W32: v_cndmask_b16 v5, s105, s105, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x0c,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction - -v_cndmask_b16 v5, vcc_hi, v2, s3 -// W32: v_cndmask_b16 v5, vcc_hi, v2, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x0e,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, s105, s105, s3 +// W32: v_cndmask_b16 v5.l, s105, s105, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x0c,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, ttmp15, ttmp15, s3 -// W32: v_cndmask_b16 v5, ttmp15, ttmp15, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x0c,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, vcc_hi, v2.l, s3 +// W32: v_cndmask_b16 v5.l, vcc_hi, v2.l, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x0e,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, m0, v255, s3 -// W32: v_cndmask_b16 v5, m0, v255, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x0f,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, ttmp15, ttmp15, s3 +// W32: v_cndmask_b16 v5.l, ttmp15, ttmp15, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x0c,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, exec_lo, exec_lo, s3 -// W32: v_cndmask_b16 v5, exec_lo, exec_lo, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x0c,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, m0, v255.l, s3 +// W32: v_cndmask_b16 v5.l, m0, v255.l, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x0f,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, exec_hi, exec_hi, s3 -// W32: v_cndmask_b16 v5, exec_hi, exec_hi, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x0c,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, exec_lo, exec_lo, s3 +// W32: v_cndmask_b16 v5.l, exec_lo, exec_lo, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x0c,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, null, m0, s105 -// W32: v_cndmask_b16 v5, null, m0, s105 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0xa4,0x01] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, exec_hi, exec_hi, s3 +// W32: v_cndmask_b16 v5.l, exec_hi, exec_hi, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x0c,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, -1, -|vcc_lo|, vcc_lo -// W32: v_cndmask_b16 v5, -1, -|vcc_lo|, vcc_lo ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa8,0x41] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, null, m0, s105 +// W32: v_cndmask_b16 v5.l, null, m0, s105 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0xa4,0x01] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, 0.5, -1, vcc_hi -// W32: v_cndmask_b16 v5, 0.5, -1, vcc_hi ; encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xad,0x01] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, -1, -|vcc_lo|, vcc_lo +// W32: v_cndmask_b16 v5.l, -1, -|vcc_lo|, vcc_lo ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa8,0x41] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, -|src_scc|, null, ttmp15 -// W32: v_cndmask_b16 v5, -|src_scc|, null, ttmp15 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xec,0x21] +v_cndmask_b16 v5.l, 0.5, -1, vcc_hi +// W32: v_cndmask_b16 v5.l, 0.5, -1, vcc_hi ; encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xad,0x01] // W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, v1, src_scc, s[6:7] -// W64: v_cndmask_b16 v5, v1, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] -// W32-ERR: :[[@LINE-2]]:32: error: invalid operand for instruction +v_cndmask_b16 v5.l, -|src_scc|, null, ttmp15 +// W32: v_cndmask_b16 v5.l, -|src_scc|, null, ttmp15 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xec,0x21] +// W64-ERR: :[[@LINE-2]]:23: error: invalid operand for instruction -v_cndmask_b16 v5, v255, 0.5, s[6:7] -// W64: v_cndmask_b16 v5, v255, 0.5, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x19,0x00] -// W32-ERR: :[[@LINE-2]]:30: error: invalid operand for instruction +v_cndmask_b16 v5.l, v1.l, src_scc, s[6:7] +// W64: v_cndmask_b16 v5.l, v1.l, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +// W32-ERR: :[[@LINE-2]]:36: error: invalid operand for instruction -v_cndmask_b16 v5, s105, s105, s[6:7] -// W64: v_cndmask_b16 v5, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, v255.l, 0.5, s[6:7] +// W64: v_cndmask_b16 v5.l, v255.l, 0.5, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x19,0x00] +// W32-ERR: :[[@LINE-2]]:34: error: invalid operand for instruction -v_cndmask_b16 v5, vcc_hi, v2, s[6:7] -// W64: v_cndmask_b16 v5, vcc_hi, v2, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, s105, s105, s[6:7] +// W64: v_cndmask_b16 v5.l, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7] -// W64: v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, vcc_hi, v2.l, s[6:7] +// W64: v_cndmask_b16 v5.l, vcc_hi, v2.l, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, m0, v255, s[6:7] -// W64: v_cndmask_b16 v5, m0, v255, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, ttmp15, ttmp15, s[6:7] +// W64: v_cndmask_b16 v5.l, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7] -// W64: v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, m0, v255.l, s[6:7] +// W64: v_cndmask_b16 v5.l, m0, v255.l, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7] -// W64: v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, exec_lo, exec_lo, s[6:7] +// W64: v_cndmask_b16 v5.l, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, null, m0, s[6:7] -// W64: v_cndmask_b16 v5, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, exec_hi, exec_hi, s[6:7] +// W64: v_cndmask_b16 v5.l, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105] -// W64: v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, null, m0, s[6:7] +// W64: v_cndmask_b16 v5.l, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, -1, -|vcc_lo|, s[104:105] +// W64: v_cndmask_b16 v5.l, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, 0.5, -1, vcc +// W64: v_cndmask_b16 v5.l, 0.5, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xa9,0x01] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, -|src_scc|, null, ttmp[14:15] +// W64: v_cndmask_b16 v5.l, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +// W32-ERR: :[[@LINE-2]]:23: error: invalid operand for instruction + +v_cndmask_b16 v255.l, -|0xfe0b|, -|vcc_hi|, null +// GFX11: v_cndmask_b16 v255.l, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] + +v_cndmask_b16 v5.l, 0x3800, -1, vcc_lo +// W32: v_cndmask_b16 v5.l, 0x3800, -1, vcc_lo ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, 0x3800, -1, vcc +// W64: v_cndmask_b16 v5.l, 0x3800, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, v255.h, 0.5, s3 +// W32: v_cndmask_b16 v5.l, v255.h, 0.5, s3 ; encoding: [0x05,0x08,0x5d,0xd6,0xff,0xe1,0x0d,0x00] +// W64-ERR: :[[@LINE-2]]:34: error: invalid operand for instruction + +v_cndmask_b16 v5.l, m0, v255.h, s3 +// W32: v_cndmask_b16 v5.l, m0, v255.h, s3 ; encoding: [0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x0f,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, 0.5, -1, vcc -// W64: v_cndmask_b16 v5, 0.5, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xa9,0x01] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, v255.h, 0.5, s[6:7] +// W64: v_cndmask_b16 v5.l, v255.h, 0.5, s[6:7] ; encoding: [0x05,0x08,0x5d,0xd6,0xff,0xe1,0x19,0x00] +// W32-ERR: :[[@LINE-2]]:34: error: invalid operand for instruction -v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15] -// W64: v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +v_cndmask_b16 v5.l, m0, v255.h, s[6:7] +// W64: v_cndmask_b16 v5.l, m0, v255.h, s[6:7] ; encoding: [0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] // W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null -// GFX11: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +v_cndmask_b16 v255.h, -|0xfe0b|, -|vcc_hi|, null +// GFX11: v_cndmask_b16 v255.h, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x43,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] v_cubeid_f32 v5, v1, v2, s3 // GFX11: v_cubeid_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x0c,0xd6,0x01,0x05,0x0e,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s index 5fa1334aa6e95..2bff644605ff6 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s @@ -765,112 +765,139 @@ v_bfm_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 v_bfm_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 // GFX11: v_bfm_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x00,0x1d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30] -v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1b,0x00,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 quad_perm:[3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1b,0x00,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[0,1,2,3] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0xe4,0x00,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 quad_perm:[0,1,2,3] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0xe4,0x00,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_mirror -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_half_mirror -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x41,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_half_mirror +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x41,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:1 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x01,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shl:1 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x01,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:15 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x0f,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shl:15 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x0f,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:1 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x11,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shr:1 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x11,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:15 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1f,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shr:15 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1f,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_ror:1 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x21,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_ror:1 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x21,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s105 row_ror:15 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s105 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x2f,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s105 row_ror:15 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s105 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x2f,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x50,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x50,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 -// W32: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] -// W64-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// W32: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xee,0x21,0x01,0x60,0x09,0x13] -// W64-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// W32: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xee,0x21,0x01,0x60,0x09,0x13] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0] -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3] -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[0,1,2,3] +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_mirror +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_half_mirror +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:1 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:15 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:1 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:15 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:1 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:15 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 -// W64: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 +// W64: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// W64: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x09,0x13] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x09,0x13] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX11: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x05,0x30] +v_cndmask_b16_e64_dpp v255.l, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX11: v_cndmask_b16_e64_dpp v255.l, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x05,0x30] + +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_share:0 row_mask:0xf bank_mask:0xf +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x50,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_hi row_share:15 row_mask:0x0 bank_mask:0x1 +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_hi row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xae,0x41,0x01,0x5f,0x01,0x01] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_hi row_share:15 row_mask:0x0 bank_mask:0x1 +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_hi row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x09,0x5d,0xd6,0xfa,0x04,0xae,0x41,0x01,0x5f,0x01,0x01] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// W32: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x12,0x5d,0xd6,0xfa,0x04,0xee,0x21,0x01,0x60,0x09,0x13] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 +// W64: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x09,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x12,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x09,0x13] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX11: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x05,0x30] v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] // GFX11: v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s index 2fc02061c59de..2f9b5efca9e17 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s @@ -424,44 +424,71 @@ v_bfm_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 v_bfm_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX11: v_bfm_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x00,0x1d,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00] -v_cndmask_b16_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// W32: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xee,0x21,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W32: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xee,0x21,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0] -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] dpp8:[7,6,5,4,3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0] -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] dpp8:[7,6,5,4,3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc dpp8:[7,6,5,4,3,2,1,0] -// W64: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 -// W64: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xea,0x21,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX11: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x03,0x5d,0xd6,0xe9,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] +v_cndmask_b16_e64_dpp v255.l, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX11: v_cndmask_b16_e64_dpp v255.l, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x03,0x5d,0xd6,0xe9,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] + +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xae,0x41,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x09,0x5d,0xd6,0xe9,0x04,0xae,0x41,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W32: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x12,0x5d,0xd6,0xea,0x04,0xee,0x21,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x09,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x12,0x5d,0xd6,0xea,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX11: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x43,0x5d,0xd6,0xe9,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] v_cubeid_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] // GFX11: v_cubeid_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s index 3e7b7d28c2e97..cd4ed2b9458e6 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s @@ -857,104 +857,131 @@ v_bfm_b32 v5, src_scc, vcc_lo v_bfm_b32 v255, 0xaf123456, vcc_hi // GFX12: v_bfm_b32 v255, 0xaf123456, vcc_hi ; encoding: [0xff,0x00,0x1d,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf] -v_cndmask_b16 v5, v1, src_scc, s3 -// W32: v_cndmask_b16 v5, v1, src_scc, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x0d,0x00] -// W64-ERR: :[[@LINE-2]]:32: error: invalid operand for instruction +v_cndmask_b16 v5.l, v1.l, src_scc, s3 +// W32: v_cndmask_b16 v5.l, v1.l, src_scc, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x0d,0x00] +// W64-ERR: :[[@LINE-2]]:36: error: invalid operand for instruction -v_cndmask_b16 v5, v255, 0.5, s3 -// W32: v_cndmask_b16 v5, v255, 0.5, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x0d,0x00] -// W64-ERR: :[[@LINE-2]]:30: error: invalid operand for instruction +v_cndmask_b16 v5.l, v255.l, 0.5, s3 +// W32: v_cndmask_b16 v5.l, v255.l, 0.5, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x0d,0x00] +// W64-ERR: :[[@LINE-2]]:34: error: invalid operand for instruction -v_cndmask_b16 v5, s105, s105, s3 -// W32: v_cndmask_b16 v5, s105, s105, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x0c,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction - -v_cndmask_b16 v5, vcc_hi, v2, s3 -// W32: v_cndmask_b16 v5, vcc_hi, v2, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x0e,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, s105, s105, s3 +// W32: v_cndmask_b16 v5.l, s105, s105, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x0c,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, ttmp15, ttmp15, s3 -// W32: v_cndmask_b16 v5, ttmp15, ttmp15, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x0c,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, vcc_hi, v2.l, s3 +// W32: v_cndmask_b16 v5.l, vcc_hi, v2.l, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x0e,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, m0, v255, s3 -// W32: v_cndmask_b16 v5, m0, v255, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x0f,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, ttmp15, ttmp15, s3 +// W32: v_cndmask_b16 v5.l, ttmp15, ttmp15, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x0c,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, exec_lo, exec_lo, s3 -// W32: v_cndmask_b16 v5, exec_lo, exec_lo, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x0c,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, m0, v255.l, s3 +// W32: v_cndmask_b16 v5.l, m0, v255.l, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x0f,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, exec_hi, exec_hi, s3 -// W32: v_cndmask_b16 v5, exec_hi, exec_hi, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x0c,0x00] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, exec_lo, exec_lo, s3 +// W32: v_cndmask_b16 v5.l, exec_lo, exec_lo, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x0c,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, null, m0, s105 -// W32: v_cndmask_b16 v5, null, m0, s105 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0xa4,0x01] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, exec_hi, exec_hi, s3 +// W32: v_cndmask_b16 v5.l, exec_hi, exec_hi, s3 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x0c,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, -1, -|vcc_lo|, vcc_lo -// W32: v_cndmask_b16 v5, -1, -|vcc_lo|, vcc_lo ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa8,0x41] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, null, m0, s105 +// W32: v_cndmask_b16 v5.l, null, m0, s105 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0xa4,0x01] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, 0.5, -1, vcc_hi -// W32: v_cndmask_b16 v5, 0.5, -1, vcc_hi ; encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xad,0x01] -// W64-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, -1, -|vcc_lo|, vcc_lo +// W32: v_cndmask_b16 v5.l, -1, -|vcc_lo|, vcc_lo ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa8,0x41] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, -|src_scc|, null, ttmp15 -// W32: v_cndmask_b16 v5, -|src_scc|, null, ttmp15 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xec,0x21] +v_cndmask_b16 v5.l, 0.5, -1, vcc_hi +// W32: v_cndmask_b16 v5.l, 0.5, -1, vcc_hi ; encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xad,0x01] // W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, v1, src_scc, s[6:7] -// W64: v_cndmask_b16 v5, v1, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] -// W32-ERR: :[[@LINE-2]]:32: error: invalid operand for instruction +v_cndmask_b16 v5.l, -|src_scc|, null, ttmp15 +// W32: v_cndmask_b16 v5.l, -|src_scc|, null, ttmp15 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xec,0x21] +// W64-ERR: :[[@LINE-2]]:23: error: invalid operand for instruction -v_cndmask_b16 v5, v255, 0.5, s[6:7] -// W64: v_cndmask_b16 v5, v255, 0.5, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x19,0x00] -// W32-ERR: :[[@LINE-2]]:30: error: invalid operand for instruction +v_cndmask_b16 v5.l, v1.l, src_scc, s[6:7] +// W64: v_cndmask_b16 v5.l, v1.l, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +// W32-ERR: :[[@LINE-2]]:36: error: invalid operand for instruction -v_cndmask_b16 v5, s105, s105, s[6:7] -// W64: v_cndmask_b16 v5, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, v255.l, 0.5, s[6:7] +// W64: v_cndmask_b16 v5.l, v255.l, 0.5, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x19,0x00] +// W32-ERR: :[[@LINE-2]]:34: error: invalid operand for instruction -v_cndmask_b16 v5, vcc_hi, v2, s[6:7] -// W64: v_cndmask_b16 v5, vcc_hi, v2, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, s105, s105, s[6:7] +// W64: v_cndmask_b16 v5.l, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7] -// W64: v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, vcc_hi, v2.l, s[6:7] +// W64: v_cndmask_b16 v5.l, vcc_hi, v2.l, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, m0, v255, s[6:7] -// W64: v_cndmask_b16 v5, m0, v255, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, ttmp15, ttmp15, s[6:7] +// W64: v_cndmask_b16 v5.l, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7] -// W64: v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, m0, v255.l, s[6:7] +// W64: v_cndmask_b16 v5.l, m0, v255.l, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7] -// W64: v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, exec_lo, exec_lo, s[6:7] +// W64: v_cndmask_b16 v5.l, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, null, m0, s[6:7] -// W64: v_cndmask_b16 v5, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, exec_hi, exec_hi, s[6:7] +// W64: v_cndmask_b16 v5.l, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105] -// W64: v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, null, m0, s[6:7] +// W64: v_cndmask_b16 v5.l, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, -1, -|vcc_lo|, s[104:105] +// W64: v_cndmask_b16 v5.l, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, 0.5, -1, vcc +// W64: v_cndmask_b16 v5.l, 0.5, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xa9,0x01] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, -|src_scc|, null, ttmp[14:15] +// W64: v_cndmask_b16 v5.l, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +// W32-ERR: :[[@LINE-2]]:23: error: invalid operand for instruction + +v_cndmask_b16 v255.l, -|0xfe0b|, -|vcc_hi|, null +// GFX12: v_cndmask_b16 v255.l, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] -v_cndmask_b16 v5, 0.5, -1, vcc -// W64: v_cndmask_b16 v5, 0.5, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xa9,0x01] -// W32-ERR: :[[@LINE-2]]:19: error: invalid operand for instruction +v_cndmask_b16 v5.l, v255.h, 0.5, s3 +// W32: v_cndmask_b16 v5.l, v255.h, 0.5, s3 ; encoding: [0x05,0x08,0x5d,0xd6,0xff,0xe1,0x0d,0x00] +// W64-ERR: :[[@LINE-2]]:34: error: invalid operand for instruction + +v_cndmask_b16 v5.l, m0, v255.h, s3 +// W32: v_cndmask_b16 v5.l, m0, v255.h, s3 ; encoding: [0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x0f,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, v255.h, 0.5, s[6:7] +// W64: v_cndmask_b16 v5.l, v255.h, 0.5, s[6:7] ; encoding: [0x05,0x08,0x5d,0xd6,0xff,0xe1,0x19,0x00] +// W32-ERR: :[[@LINE-2]]:34: error: invalid operand for instruction + +v_cndmask_b16 v5.l, m0, v255.h, s[6:7] +// W64: v_cndmask_b16 v5.l, m0, v255.h, s[6:7] ; encoding: [0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +// W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction + +v_cndmask_b16 v5.l, 0x3800, -1, vcc_lo +// W32: v_cndmask_b16 v5.l, 0x3800, -1, vcc_lo ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +// W64-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15] -// W64: v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +v_cndmask_b16 v5.l, 0x3800, -1, vcc +// W64: v_cndmask_b16 v5.l, 0x3800, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] // W32-ERR: :[[@LINE-2]]:21: error: invalid operand for instruction -v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null -// GFX12: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +v_cndmask_b16 v255.h, -|0xfe0b|, -|vcc_hi|, null +// GFX12: v_cndmask_b16 v255.h, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x43,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] v_cubeid_f32 v5, v1, v2, s3 // GFX12: v_cubeid_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x0c,0xd6,0x01,0x05,0x0e,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s index aa804cc302bf0..78ce7451c1ba7 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s @@ -869,128 +869,147 @@ v_bfm_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 v_bfm_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 // GFX12: v_bfm_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x00,0x1d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30] -v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1b,0x00,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 quad_perm:[3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1b,0x00,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[0,1,2,3] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0xe4,0x00,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 quad_perm:[0,1,2,3] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0xe4,0x00,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_mirror -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, s2, s3 row_mirror -// W32: v_cndmask_b16_e64_dpp v5, v1, s2, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0c,0x00,0x01,0x40,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, s2, s3 row_mirror +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, s2, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0c,0x00,0x01,0x40,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, 10, s3 row_mirror -// W32: v_cndmask_b16_e64_dpp v5, v1, 10, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x14,0x0d,0x00,0x01,0x40,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, 10, s3 row_mirror +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, 10, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x14,0x0d,0x00,0x01,0x40,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_half_mirror -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x41,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_half_mirror +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x41,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:1 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x01,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shl:1 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x01,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:15 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x0f,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shl:15 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x0f,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:1 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x11,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shr:1 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x11,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:15 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1f,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shr:15 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1f,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_ror:1 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x21,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_ror:1 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x21,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s105 row_ror:15 -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s105 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x2f,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s105 row_ror:15 +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s105 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x2f,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x50,0x01,0xff] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x50,0x01,0xff] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 -// W32: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] -// W64-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// W32: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xee,0x21,0x01,0x60,0x09,0x13] -// W64-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// W32: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xee,0x21,0x01,0x60,0x09,0x13] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0] -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3] -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[0,1,2,3] +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_mirror +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_half_mirror +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, s2, s[6:7] row_half_mirror -// W64: v_cndmask_b16_e64_dpp v5, v1, s2, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x18,0x00,0x01,0x41,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, s2, s[6:7] row_half_mirror +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, s2, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x18,0x00,0x01,0x41,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, 10, s[6:7] row_half_mirror -// W64: v_cndmask_b16_e64_dpp v5, v1, 10, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x14,0x19,0x00,0x01,0x41,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, 10, s[6:7] row_half_mirror +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, 10, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x14,0x19,0x00,0x01,0x41,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:1 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:15 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:1 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:15 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:1 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15 -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:15 +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 -// W64: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 +// W64: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// W64: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x09,0x13] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x09,0x13] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX12: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x05,0x30] +v_cndmask_b16_e64_dpp v255.l, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX12: v_cndmask_b16_e64_dpp v255.l, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x05,0x30] + +v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_hi row_share:15 row_mask:0x0 bank_mask:0x1 +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_hi row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x09,0x5d,0xd6,0xfa,0x04,0xae,0x41,0x01,0x5f,0x01,0x01] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// W32: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x12,0x5d,0xd6,0xfa,0x04,0xee,0x21,0x01,0x60,0x09,0x13] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 +// W64: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x09,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x12,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x09,0x13] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX12: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x05,0x30] v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] // GFX12: v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s index e93a65ec92e73..b41f92b889368 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s @@ -516,56 +516,75 @@ v_bfm_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 v_bfm_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX12: v_bfm_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x00,0x1d,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00] -v_cndmask_b16_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, s2, s3 dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, s2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, s2, s3 dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, s2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, 10, s3 dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, 10, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x14,0x0d,0x00,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, 10, s3 dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, 10, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x14,0x0d,0x00,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] -// W32: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// W32: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xee,0x21,0x01,0x77,0x39,0x05] -// W64-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W32: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xee,0x21,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0] -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] dpp8:[7,6,5,4,3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0] -// W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:35: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] dpp8:[7,6,5,4,3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:41: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc dpp8:[7,6,5,4,3,2,1,0] -// W64: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 -// W64: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xea,0x21,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v5, -v1, |s2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 -// W64: v_cndmask_b16_e64_dpp v5, -v1, |s2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xe8,0x21,0x01,0x77,0x39,0x05] -// W32-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction +v_cndmask_b16_e64_dpp v5.l, -v1.l, |s2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |s2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xe8,0x21,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:42: error: invalid operand for instruction -v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX12: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x03,0x5d,0xd6,0xe9,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] +v_cndmask_b16_e64_dpp v255.l, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX12: v_cndmask_b16_e64_dpp v255.l, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x03,0x5d,0xd6,0xe9,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] + +v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] +// W32: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x09,0x5d,0xd6,0xe9,0x04,0xae,0x41,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W32: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x12,0x5d,0xd6,0xea,0x04,0xee,0x21,0x01,0x77,0x39,0x05] +// W64-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] +// W64: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x09,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 +// W64: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x12,0x5d,0xd6,0xea,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +// W32-ERR: :[[@LINE-2]]:44: error: invalid operand for instruction + +v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX12: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x43,0x5d,0xd6,0xe9,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] v_cubeid_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] // GFX12: v_cubeid_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt index adcca58776100..05174e3128919 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt @@ -1054,55 +1054,100 @@ # GFX11: v_bfm_b32 v255, 0xaf123456, vcc_hi ; encoding: [0xff,0x00,0x1d,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf] 0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00 -# W32: v_cndmask_b16 v5, v1, src_scc, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] -# W64: v_cndmask_b16 v5, v1, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, v1.l, src_scc, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +# W32-FAKE16: v_cndmask_b16 v5, v1, src_scc, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, v1.l, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +# W64-FAKE16: v_cndmask_b16 v5, v1, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] 0x05,0x00,0x5d,0xd6,0xff,0xe1,0x19,0x00 -# W32: v_cndmask_b16 v5, v255, 0x3800, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] -# W64: v_cndmask_b16 v5, v255, 0x3800, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, v255.l, 0x3800, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v5, v255, 0x3800, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, v255.l, 0x3800, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v5, v255, 0x3800, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] 0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00 -# W32: v_cndmask_b16 v5, s105, s105, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] -# W64: v_cndmask_b16 v5, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, s105, s105, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, s105, s105, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] 0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00 -# W32: v_cndmask_b16 v5, vcc_hi, v2, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] -# W64: v_cndmask_b16 v5, vcc_hi, v2, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, vcc_hi, v2.l, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +# W32-FAKE16: v_cndmask_b16 v5, vcc_hi, v2, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, vcc_hi, v2.l, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +# W64-FAKE16: v_cndmask_b16 v5, vcc_hi, v2, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] 0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00 -# W32: v_cndmask_b16 v5, ttmp15, ttmp15, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] -# W64: v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, ttmp15, ttmp15, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, ttmp15, ttmp15, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] 0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00 -# W32: v_cndmask_b16 v5, m0, v255, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] -# W64: v_cndmask_b16 v5, m0, v255, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, m0, v255.l, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W32-FAKE16: v_cndmask_b16 v5, m0, v255, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, m0, v255.l, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W64-FAKE16: v_cndmask_b16 v5, m0, v255, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] 0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00 -# W32: v_cndmask_b16 v5, exec_lo, exec_lo, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] -# W64: v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, exec_lo, exec_lo, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, exec_lo, exec_lo, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] 0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00 -# W32: v_cndmask_b16 v5, exec_hi, exec_hi, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] -# W64: v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, exec_hi, exec_hi, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, exec_hi, exec_hi, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] 0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00 -# W32: v_cndmask_b16 v5, null, m0, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] -# W64: v_cndmask_b16 v5, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, null, m0, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, null, m0, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] 0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41 -# W32: v_cndmask_b16 v5, -1, -|vcc_lo|, s104 ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] -# W64: v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +# W32-REAL16: v_cndmask_b16 v5.l, -1, -|vcc_lo|, s104 ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +# W32-FAKE16: v_cndmask_b16 v5, -1, -|vcc_lo|, s104 ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +# W64-REAL16: v_cndmask_b16 v5.l, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +# W64-FAKE16: v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] 0x05,0x00,0x5d,0xd6,0xf0,0x82,0xa9,0x01 -# W32: v_cndmask_b16 v5, 0x3800, -1, vcc_lo ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] -# W64: v_cndmask_b16 v5, 0x3800, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, 0x3800, -1, vcc_lo ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v5, 0x3800, -1, vcc_lo ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, 0x3800, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v5, 0x3800, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] 0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21 -# W32: v_cndmask_b16 v5, -|src_scc|, null, ttmp14 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] -# W64: v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +# W32-REAL16: v_cndmask_b16 v5.l, -|src_scc|, null, ttmp14 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +# W32-FAKE16: v_cndmask_b16 v5, -|src_scc|, null, ttmp14 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +# W64-REAL16: v_cndmask_b16 v5.l, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +# W64-FAKE16: v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] 0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00 -# GFX11: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W32-REAL16: v_cndmask_b16 v255.l, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v255.l, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] + +0x05,0x08,0x5d,0xd6,0xff,0xe1,0x19,0x00 +# W32-REAL16: v_cndmask_b16 v5.l, v255.h, 0x3800, s6 ; encoding: [0x05,0x08,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v5, v255, 0x3800, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, v255.h, 0x3800, s[6:7] ; encoding: [0x05,0x08,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v5, v255, 0x3800, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] + +0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x1b,0x00 +# W32-REAL16: v_cndmask_b16 v5.l, m0, v255.h, s6 ; encoding: [0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W32-FAKE16: v_cndmask_b16 v5, m0, v255, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, m0, v255.h, s[6:7] ; encoding: [0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W64-FAKE16: v_cndmask_b16 v5, m0, v255, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] + +0xff,0x43,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00 +# W32-REAL16: v_cndmask_b16 v255.h, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x43,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v255.h, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x43,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] 0x05,0x00,0x0c,0xd6,0x01,0x05,0x0e,0x00 # GFX11: v_cubeid_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x0c,0xd6,0x01,0x05,0x0e,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt index 2964360a77fd2..c9ef3c714213d 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt @@ -738,65 +738,118 @@ # GFX11: v_bfm_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x1d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s104 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s104 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s104 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] 0x05,0x02,0x5d,0xd6,0xfa,0x04,0xf2,0x21,0x01,0x6f,0x09,0x30 -# GFX11: v_cndmask_b16_e64_dpp v5, -v1, |v2|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xf2,0x21,0x01,0x6f,0x09,0x30] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xf2,0x21,0x01,0x6f,0x09,0x30] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xf2,0x21,0x01,0x6f,0x09,0x30] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xf2,0x21,0x01,0x6f,0x09,0x30] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xf2,0x21,0x01,0x6f,0x09,0x30] 0x05,0x01,0x5d,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x6f,0x09,0x30 -# GFX11: v_cndmask_b16_e64_dpp v5, |v1|, -v2, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x6f,0x09,0x30] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x6f,0x09,0x30] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x6f,0x09,0x30] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x6f,0x09,0x30] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x6f,0x09,0x30] 0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30 -# GFX11: v_cndmask_b16_e64_dpp v255, v255, v255, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] +# W32-REAL16: v_cndmask_b16_e64_dpp v255.l, v255.l, v255.l, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] +# W32-FAKE16: v_cndmask_b16_e64_dpp v255, v255, v255, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] +# W64-REAL16: v_cndmask_b16_e64_dpp v255.l, v255.l, v255.l, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] +# W64-FAKE16: v_cndmask_b16_e64_dpp v255, v255, v255, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] + +0x05,0x09,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01 +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x09,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x09,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] + +0x05,0x12,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13 +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x12,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x12,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13] + +0xff,0x43,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30 +# W32-REAL16: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30] +# W32-FAKE16: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30] +# W64-REAL16: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30] +# W64-FAKE16: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30] 0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff # GFX11: v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt index 7a81ba23afa35..1e74b5aec0cf3 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt @@ -396,29 +396,64 @@ # GFX11: v_bfm_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x1d,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00] 0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] 0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s104 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s104 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s104 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] 0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] 0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] 0x05,0x02,0x5d,0xd6,0xe9,0x04,0xf2,0x21,0x01,0x00,0x00,0x00 -# GFX11: v_cndmask_b16_e64_dpp v5, -v1, |v2|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xf2,0x21,0x01,0x00,0x00,0x00] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xf2,0x21,0x01,0x00,0x00,0x00] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xf2,0x21,0x01,0x00,0x00,0x00] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.l|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xf2,0x21,0x01,0x00,0x00,0x00] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xf2,0x21,0x01,0x00,0x00,0x00] 0x05,0x01,0x5d,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x00,0x00,0x00 -# GFX11: v_cndmask_b16_e64_dpp v5, |v1|, -v2, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x00,0x00,0x00] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x00,0x00,0x00] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x00,0x00,0x00] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x00,0x00,0x00] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, null dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x00,0x00,0x00] 0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00 -# GFX11: v_cndmask_b16_e64_dpp v255, v255, v255, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] +# W32-REAL16: v_cndmask_b16_e64_dpp v255.l, v255.l, v255.l, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] +# W32-FAKE16: v_cndmask_b16_e64_dpp v255, v255, v255, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] +# W64-REAL16: v_cndmask_b16_e64_dpp v255.l, v255.l, v255.l, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] +# W64-FAKE16: v_cndmask_b16_e64_dpp v255, v255, v255, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] + +0x05,0x09,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05 +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x09,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x09,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] + +0x05,0x12,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05 +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x12,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x12,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05] + +0xff,0x43,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00 +# W32-REAL16: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] +# W32-FAKE16: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] +# W64-REAL16: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] +# W64-FAKE16: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] 0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05 # GFX11: v_cubeid_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt index 633d3a48634fa..4108fd9c8be62 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt @@ -1018,55 +1018,100 @@ # GFX12: v_bfm_b32 v255, 0xaf123456, vcc_hi ; encoding: [0xff,0x00,0x1d,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf] 0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00 -# W32: v_cndmask_b16 v5, v1, src_scc, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] -# W64: v_cndmask_b16 v5, v1, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, v1.l, src_scc, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +# W32-FAKE16: v_cndmask_b16 v5, v1, src_scc, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, v1.l, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] +# W64-FAKE16: v_cndmask_b16 v5, v1, src_scc, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00] 0x05,0x00,0x5d,0xd6,0xff,0xe1,0x19,0x00 -# W32: v_cndmask_b16 v5, v255, 0x3800, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] -# W64: v_cndmask_b16 v5, v255, 0x3800, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, v255.l, 0x3800, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v5, v255, 0x3800, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, v255.l, 0x3800, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v5, v255, 0x3800, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] 0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00 -# W32: v_cndmask_b16 v5, s105, s105, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] -# W64: v_cndmask_b16 v5, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, s105, s105, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, s105, s105, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, s105, s105, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00] 0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00 -# W32: v_cndmask_b16 v5, vcc_hi, v2, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] -# W64: v_cndmask_b16 v5, vcc_hi, v2, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, vcc_hi, v2.l, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +# W32-FAKE16: v_cndmask_b16 v5, vcc_hi, v2, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, vcc_hi, v2.l, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] +# W64-FAKE16: v_cndmask_b16 v5, vcc_hi, v2, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00] 0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00 -# W32: v_cndmask_b16 v5, ttmp15, ttmp15, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] -# W64: v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, ttmp15, ttmp15, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, ttmp15, ttmp15, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00] 0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00 -# W32: v_cndmask_b16 v5, m0, v255, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] -# W64: v_cndmask_b16 v5, m0, v255, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, m0, v255.l, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W32-FAKE16: v_cndmask_b16 v5, m0, v255, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, m0, v255.l, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W64-FAKE16: v_cndmask_b16 v5, m0, v255, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] 0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00 -# W32: v_cndmask_b16 v5, exec_lo, exec_lo, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] -# W64: v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, exec_lo, exec_lo, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, exec_lo, exec_lo, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00] 0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00 -# W32: v_cndmask_b16 v5, exec_hi, exec_hi, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] -# W64: v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, exec_hi, exec_hi, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, exec_hi, exec_hi, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00] 0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00 -# W32: v_cndmask_b16 v5, null, m0, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] -# W64: v_cndmask_b16 v5, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, null, m0, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +# W32-FAKE16: v_cndmask_b16 v5, null, m0, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] +# W64-FAKE16: v_cndmask_b16 v5, null, m0, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00] 0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41 -# W32: v_cndmask_b16 v5, -1, -|vcc_lo|, s104 ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] -# W64: v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +# W32-REAL16: v_cndmask_b16 v5.l, -1, -|vcc_lo|, s104 ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +# W32-FAKE16: v_cndmask_b16 v5, -1, -|vcc_lo|, s104 ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +# W64-REAL16: v_cndmask_b16 v5.l, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] +# W64-FAKE16: v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105] ; encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41] 0x05,0x00,0x5d,0xd6,0xf0,0x82,0xa9,0x01 -# W32: v_cndmask_b16 v5, 0x3800, -1, vcc_lo ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] -# W64: v_cndmask_b16 v5, 0x3800, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +# W32-REAL16: v_cndmask_b16 v5.l, 0x3800, -1, vcc_lo ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v5, 0x3800, -1, vcc_lo ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, 0x3800, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v5, 0x3800, -1, vcc ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0x82,0xa9,0x01,0x00,0x38,0x00,0x00] 0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21 -# W32: v_cndmask_b16 v5, -|src_scc|, null, ttmp14 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] -# W64: v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +# W32-REAL16: v_cndmask_b16 v5.l, -|src_scc|, null, ttmp14 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +# W32-FAKE16: v_cndmask_b16 v5, -|src_scc|, null, ttmp14 ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +# W64-REAL16: v_cndmask_b16 v5.l, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] +# W64-FAKE16: v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15] ; encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21] 0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00 -# GFX12: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W32-REAL16: v_cndmask_b16 v255.l, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v255.l, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] + +0x05,0x08,0x5d,0xd6,0xff,0xe1,0x19,0x00 +# W32-REAL16: v_cndmask_b16 v5.l, v255.h, 0x3800, s6 ; encoding: [0x05,0x08,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v5, v255, 0x3800, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, v255.h, 0x3800, s[6:7] ; encoding: [0x05,0x08,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v5, v255, 0x3800, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0xff,0xff,0x19,0x00,0x00,0x38,0x00,0x00] + +0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x1b,0x00 +# W32-REAL16: v_cndmask_b16 v5.l, m0, v255.h, s6 ; encoding: [0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W32-FAKE16: v_cndmask_b16 v5, m0, v255, s6 ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W64-REAL16: v_cndmask_b16 v5.l, m0, v255.h, s[6:7] ; encoding: [0x05,0x10,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] +# W64-FAKE16: v_cndmask_b16 v5, m0, v255, s[6:7] ; encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00] + +0xff,0x43,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00 +# W32-REAL16: v_cndmask_b16 v255.h, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x43,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W32-FAKE16: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W64-REAL16: v_cndmask_b16 v255.h, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x43,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] +# W64-FAKE16: v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null ; encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00] 0x05,0x00,0x0c,0xd6,0x01,0x05,0x0e,0x00 # GFX12: v_cubeid_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x0c,0xd6,0x01,0x05,0x0e,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt index 7e30a4a2096b1..0be540da8287b 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt @@ -789,59 +789,106 @@ # GFX12: v_bfm_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x1d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s104 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s104 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s104 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x5f,0x01,0x01] 0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xea,0x01,0x01,0x60,0x01,0x13] 0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30 -# GFX12: v_cndmask_b16_e64_dpp v255, v255, v255, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] +# W32-REAL16: v_cndmask_b16_e64_dpp v255.l, v255.l, v255.l, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] +# W32-FAKE16: v_cndmask_b16_e64_dpp v255, v255, v255, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] +# W64-REAL16: v_cndmask_b16_e64_dpp v255.l, v255.l, v255.l, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] +# W64-FAKE16: v_cndmask_b16_e64_dpp v255, v255, v255, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xfa,0xfe,0xf3,0x01,0xff,0x6f,0x0d,0x30] + +0x05,0x09,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01 +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x09,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x09,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01] + +0x05,0x12,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13 +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x12,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp14 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x12,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x01,0x13] + +0xff,0x43,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30 +# W32-REAL16: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30] +# W32-FAKE16: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30] +# W64-REAL16: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30] +# W64-FAKE16: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x0d,0x30] 0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff # GFX12: v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt index 2aaba2a17fae6..343a71abb27d0 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt @@ -447,23 +447,52 @@ # GFX12: v_bfm_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x1d,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00] 0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05] 0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, s104 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s104 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s104 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05] 0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05] 0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05 -# W32: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] -# W64: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, v1.l, v2.l, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, v1, v2, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xea,0x01,0x01,0x77,0x39,0x05] 0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00 -# GFX12: v_cndmask_b16_e64_dpp v255, v255, v255, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] +# W32-REAL16: v_cndmask_b16_e64_dpp v255.l, v255.l, v255.l, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] +# W32-FAKE16: v_cndmask_b16_e64_dpp v255, v255, v255, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] +# W64-REAL16: v_cndmask_b16_e64_dpp v255.l, v255.l, v255.l, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] +# W64-FAKE16: v_cndmask_b16_e64_dpp v255, v255, v255, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x5d,0xd6,0xea,0xfe,0xf3,0x01,0xff,0x00,0x00,0x00] + +0x05,0x09,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05 +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x09,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, |v1.h|, -v2.l, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x09,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05] + +0x05,0x12,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05 +# W32-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x12,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp14 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +# W64-REAL16: v_cndmask_b16_e64_dpp v5.l, -v1.l, |v2.h|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x12,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x5d,0xd6,0xe9,0x04,0xea,0x21,0x01,0x77,0x39,0x05] + +0xff,0x43,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00 +# W32-REAL16: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] +# W32-FAKE16: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] +# W64-REAL16: v_cndmask_b16_e64_dpp v255.h, -|v255.l|, -|v255.l|, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x43,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] +# W64-FAKE16: v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x03,0x5d,0xd6,0xea,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00] 0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05 # GFX12: v_cubeid_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] diff --git a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td index 7c8181410d400..b7132bf2bcd8c 100644 --- a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td +++ b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td @@ -950,8 +950,8 @@ def MOVcimm8 : I<(outs GPR32:$dst), (ins i32imm:$imm), [(set GPR32:$dst, cimm8:$ // NOOPT-NEXT: GIM_Try, /*On fail goto*//*Label [[LABEL_NUM:[0-9]+]]*/ GIMT_Encode4([[LABEL:[0-9]+]]), // NOOPT-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // NOOPT-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), -// NOOPT-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // NOOPT-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// NOOPT-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // NOOPT-NEXT: // MIs[0] DstI[dst] // NOOPT-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, // NOOPT-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), @@ -973,8 +973,8 @@ def LOAD : I<(outs GPR32:$dst), (ins GPR32:$src1), // NOOPT-NEXT: GIM_Try, /*On fail goto*//*Label [[LABEL_NUM:[0-9]+]]*/ GIMT_Encode4([[LABEL:[0-9]+]]), // NOOPT-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // NOOPT-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), -// NOOPT-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // NOOPT-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// NOOPT-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // NOOPT-NEXT: // MIs[0] DstI[dst] // NOOPT-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_p0s32, // NOOPT-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), @@ -996,8 +996,8 @@ def : Pat<(load GPR32:$src), // NOOPT-NEXT: GIM_Try, /*On fail goto*//*Label [[LABEL_NUM:[0-9]+]]*/ GIMT_Encode4([[LABEL:[0-9]+]]), // NOOPT-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // NOOPT-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_SEXTLOAD), -// NOOPT-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/GIMT_Encode4(2), // NOOPT-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// NOOPT-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/GIMT_Encode4(2), // NOOPT-NEXT: // MIs[0] DstI[dst] // NOOPT-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, // NOOPT-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), diff --git a/llvm/test/TableGen/GlobalISelEmitter/HwModes.td b/llvm/test/TableGen/GlobalISelEmitter/HwModes.td index 3588ba3979411..510368516739d 100644 --- a/llvm/test/TableGen/GlobalISelEmitter/HwModes.td +++ b/llvm/test/TableGen/GlobalISelEmitter/HwModes.td @@ -131,8 +131,8 @@ class I Pat> // CHECK-NEXT: GIM_CheckFeatures, GIMT_Encode2(GIFBS_HwMode0), // CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), -// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: // MIs[0] DstI[dst] // CHECK-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s64, // CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPRRegClassID), @@ -149,8 +149,8 @@ class I Pat> // CHECK-NEXT: GIM_CheckFeatures, GIMT_Encode2(GIFBS_HwMode1), // CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), -// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: // MIs[0] DstI[dst] // CHECK-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, // CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPRRegClassID), @@ -173,8 +173,8 @@ def LOAD : I<(outs GPR:$dst), (ins GPR:$src1), // CHECK-NEXT: GIM_CheckFeatures, GIMT_Encode2(GIFBS_HwMode0), // CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), -// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: // MIs[0] DstI[dst] // CHECK-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_p0s64, // CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPRRegClassID), @@ -191,8 +191,8 @@ def LOAD : I<(outs GPR:$dst), (ins GPR:$src1), // CHECK-NEXT: GIM_CheckFeatures, GIMT_Encode2(GIFBS_HwMode1), // CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), -// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: // MIs[0] DstI[dst] // CHECK-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_p0s32, // CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPRRegClassID), diff --git a/llvm/test/TableGen/GlobalISelEmitter/MatchTableOptimizer.td b/llvm/test/TableGen/GlobalISelEmitter/MatchTableOptimizer.td index c4307258aae9a..6ac6703991c2d 100644 --- a/llvm/test/TableGen/GlobalISelEmitter/MatchTableOptimizer.td +++ b/llvm/test/TableGen/GlobalISelEmitter/MatchTableOptimizer.td @@ -9,8 +9,8 @@ def LOAD8 : I<(outs GPR8:$dst), (ins GPR8:$src), []>; def LOAD32 : I<(outs GPR8:$dst), (ins GPR32:$src), []>; // CHECK: Label 1: @{{[0-9]+}} // CHECK-NEXT: GIM_Try, /*On fail goto*//*Label [[L1_ID:[0-9]+]]*/ GIMT_Encode4([[L1_AT:[0-9]+]]), +// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, // CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, -// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, // CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR8RegClassID), // CHECK-NEXT: GIM_Try, /*On fail goto*//*Label [[L2_ID:[0-9]+]]*/ GIMT_Encode4([[L2_AT:[0-9]+]]), // CHECK-NEXT: // MIs[0] src @@ -47,8 +47,8 @@ def LOAD16 : I<(outs GPR16:$dst), (ins GPR16:$src), []>; def LOAD16Imm : I<(outs GPR16:$dst), (ins GPR16:$src), []>; // CHECK: // Label 2: @{{[0-9]+}} // CHECK-NEXT: GIM_Try, /*On fail goto*//*Label [[L1_ID:[0-9]+]]*/ GIMT_Encode4([[L1_AT:[0-9]+]]), -// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR16RegClassID), // CHECK-NEXT: GIM_CheckPointerToAny, /*MI*/0, /*Op*/1, /*SizeInBits*/16, // CHECK-NEXT: GIM_Try, /*On fail goto*//*Label [[L2_ID:[0-9]+]]*/ GIMT_Encode4([[L2_AT:[0-9]+]]), diff --git a/llvm/test/TableGen/GlobalISelEmitter/OverloadedPtr.td b/llvm/test/TableGen/GlobalISelEmitter/OverloadedPtr.td index 31accba8b1847..43a121f94bd6c 100644 --- a/llvm/test/TableGen/GlobalISelEmitter/OverloadedPtr.td +++ b/llvm/test/TableGen/GlobalISelEmitter/OverloadedPtr.td @@ -13,8 +13,8 @@ let TargetPrefix = "mytarget" in { // Check that iPTR in the destination DAG doesn't prevent the pattern from being imported. // CHECK: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, -// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), // CHECK-NEXT: // MIs[0] src1 // CHECK-NEXT: GIM_CheckPointerToAny, /*MI*/0, /*Op*/1, /*SizeInBits*/0, diff --git a/llvm/test/TableGen/GlobalISelEmitter/atomic-store.td b/llvm/test/TableGen/GlobalISelEmitter/atomic-store.td index 53b8670f47e63..99869cc4e8ef0 100644 --- a/llvm/test/TableGen/GlobalISelEmitter/atomic-store.td +++ b/llvm/test/TableGen/GlobalISelEmitter/atomic-store.td @@ -6,8 +6,8 @@ include "GlobalISelEmitterCommon.td" def ST_ATOM_B32 : I<(outs), (ins GPR32Op:$val, GPR32Op:$ptr), []>; // GISEL: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_STORE), -// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/GIMT_Encode4(1), // GISEL-NEXT: GIM_CheckAtomicOrderingOrStrongerThan, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::Unordered, +// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/GIMT_Encode4(1), // GISEL-NEXT: // MIs[0] val // GISEL-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, // GISEL-NEXT: // MIs[0] ptr diff --git a/llvm/test/TableGen/GlobalISelEmitter/predicated-pattern-order.td b/llvm/test/TableGen/GlobalISelEmitter/predicated-pattern-order.td new file mode 100644 index 0000000000000..ce420dbe01a27 --- /dev/null +++ b/llvm/test/TableGen/GlobalISelEmitter/predicated-pattern-order.td @@ -0,0 +1,82 @@ +// RUN: llvm-tblgen -gen-global-isel -optimize-match-table=false -I %p/../../../include -I %p/../Common %s | FileCheck %s +// RUN: llvm-tblgen -gen-global-isel -optimize-match-table=true -I %p/../../../include -I %p/../Common %s | FileCheck -check-prefix=OPT %s + +include "llvm/Target/Target.td" +include "GlobalISelEmitterCommon.td" + +// Check that IPM_GenericPredicate doesn't influence the final order of patterns. +// https://github.com/llvm/llvm-project/issues/121446 + +def aligned_store: PatFrag<(ops node:$v, node:$a), (store $v, $a), [{ + return true; +}]>{ + let GISelPredicateCode = [{ return true; }]; +} + +// CHECK: GIM_Try +// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, +// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_STORE), +// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, +// CHECK-NEXT: // MIs[0] src0 +// CHECK-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, +// CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), +// CHECK-NEXT: // MIs[0] src1 +// CHECK-NEXT: GIM_CheckPointerToAny, /*MI*/0, /*Op*/1, /*SizeInBits*/32, +// CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/1, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), +// CHECK-NEXT: GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_aligned_store), +// CHECK-NEXT: // (st GPR32:{ *:[i32] }:$src0, GPR32:{ *:[i32] }:$src1)<><><> => (MOVALIGNED GPR32:{ *:[i32] }:$src0, GPR32:{ *:[i32] }:$src1) +// CHECK-NEXT: GIR_MutateOpcode, /*InsnID*/0, /*RecycleInsnID*/0, /*Opcode*/GIMT_Encode2(MyTarget::MOVALIGNED), +// CHECK-NEXT: GIR_RootConstrainSelectedInstOperands, +// CHECK-NEXT: // GIR_Coverage + +// CHECK: GIM_Try +// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, +// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_STORE), +// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, +// CHECK-NEXT: // MIs[0] src0 +// CHECK-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, +// CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), +// CHECK-NEXT: // MIs[0] src1 +// CHECK-NEXT: GIM_CheckPointerToAny, /*MI*/0, /*Op*/1, /*SizeInBits*/32, +// CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/1, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), +// CHECK-NEXT: // (st GPR32:{ *:[i32] }:$src0, GPR32:{ *:[i32] }:$src1)<><> => (MOVUNALIGNED GPR32:{ *:[i32] }:$src0, GPR32:{ *:[i32] }:$src1) +// CHECK-NEXT: GIR_MutateOpcode, /*InsnID*/0, /*RecycleInsnID*/0, /*Opcode*/GIMT_Encode2(MyTarget::MOVUNALIGNED), +// CHECK-NEXT: GIR_RootConstrainSelectedInstOperands, +// CHECK-NEXT: // GIR_Coverage + +// OPT: GIM_Try +// OPT-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_STORE), +// OPT-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, +// OPT-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// OPT-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, + +// OPT-NEXT: GIM_Try +// OPT-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), +// OPT-NEXT: // MIs[0] src1 +// OPT-NEXT: GIM_CheckPointerToAny, /*MI*/0, /*Op*/1, /*SizeInBits*/32, +// OPT-NEXT: GIM_RootCheckRegBankForClass, /*Op*/1, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), +// OPT-NEXT: GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_aligned_store), +// OPT-NEXT: // (st GPR32:{ *:[i32] }:$src0, GPR32:{ *:[i32] }:$src1)<><><> => (MOVALIGNED GPR32:{ *:[i32] }:$src0, GPR32:{ *:[i32] }:$src1) +// OPT-NEXT: GIR_MutateOpcode, /*InsnID*/0, /*RecycleInsnID*/0, /*Opcode*/GIMT_Encode2(MyTarget::MOVALIGNED), +// OPT-NEXT: GIR_RootConstrainSelectedInstOperands, +// OPT-NEXT: // GIR_Coverage + +// OPT: GIM_Try +// OPT-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), +// OPT-NEXT: // MIs[0] src1 +// OPT-NEXT: GIM_CheckPointerToAny, /*MI*/0, /*Op*/1, /*SizeInBits*/32, +// OPT-NEXT: GIM_RootCheckRegBankForClass, /*Op*/1, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), +// OPT-NEXT: // (st GPR32:{ *:[i32] }:$src0, GPR32:{ *:[i32] }:$src1)<><> => (MOVUNALIGNED GPR32:{ *:[i32] }:$src0, GPR32:{ *:[i32] }:$src1) +// OPT-NEXT: GIR_MutateOpcode, /*InsnID*/0, /*RecycleInsnID*/0, /*Opcode*/GIMT_Encode2(MyTarget::MOVUNALIGNED), +// OPT-NEXT: GIR_RootConstrainSelectedInstOperands, +// OPT-NEXT: // GIR_Coverage + +def MOVALIGNED : I<(outs), (ins GPR32:$src0, GPR32:$src1), + [(aligned_store GPR32:$src0, GPR32:$src1)]>; + + +def MOVUNALIGNED : I<(outs), (ins GPR32:$src0, GPR32:$src1), + [(store GPR32:$src0, GPR32:$src1)]>; + diff --git a/llvm/test/TableGen/GlobalISelEmitter/zero-reg.td b/llvm/test/TableGen/GlobalISelEmitter/zero-reg.td index 87e5432093377..dfbe7f902c011 100644 --- a/llvm/test/TableGen/GlobalISelEmitter/zero-reg.td +++ b/llvm/test/TableGen/GlobalISelEmitter/zero-reg.td @@ -22,8 +22,8 @@ def INST : PredI<(outs GPR32:$dst), (ins GPR32:$src), []>; // CHECK: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), -// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// CHECK-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // CHECK-NEXT: // MIs[0] DstI[dst] // CHECK-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, // CHECK-NEXT: GIM_RootCheckRegBankForClass, /*Op*/0, /*RC*/GIMT_Encode2(MyTarget::GPR32RegClassID), diff --git a/llvm/test/TableGen/address-space-patfrags.td b/llvm/test/TableGen/address-space-patfrags.td index 582b97d55a518..a2611df048b06 100644 --- a/llvm/test/TableGen/address-space-patfrags.td +++ b/llvm/test/TableGen/address-space-patfrags.td @@ -60,9 +60,9 @@ def inst_d : Instruction { // GISEL: GIM_Try, /*On fail goto*//*Label 0*/ GIMT_Encode4({{[0-9]+}}), // Rule ID 0 // // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), +// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, // GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*//* 455(*/0xC7, 0x03/*)*/, -// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, def : Pat < (pat_frag_b GPR32:$src), (inst_b GPR32:$src) @@ -80,9 +80,9 @@ def : Pat < // GISEL: GIM_Try, /*On fail goto*//*Label 1*/ GIMT_Encode4({{[0-9]+}}), // Rule ID 1 // // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_LOAD), +// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, // GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // GISEL-NEXT: GIM_CheckMemoryAlignment, /*MI*/0, /*MMO*/0, /*MinAlign*/2, -// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, def : Pat < (pat_frag_a GPR32:$src), (inst_a GPR32:$src) @@ -99,8 +99,8 @@ def truncstorei16_addrspace : PatFrag<(ops node:$val, node:$ptr), // GISEL: GIM_Try, /*On fail goto*//*Label 2*/ GIMT_Encode4({{[0-9]+}}), // Rule ID 2 // // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_STORE), -// GISEL-NEXT: GIM_CheckMemorySizeLessThanLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, +// GISEL-NEXT: GIM_CheckMemorySizeLessThanLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // GISEL-NEXT: // MIs[0] src0 // GISEL-NEXT: GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_s32, def : Pat < @@ -112,6 +112,7 @@ def : Pat < // GISEL: GIM_Try, /*On fail goto*//*Label 3*/ GIMT_Encode4({{[0-9]+}}), // Rule ID 3 // // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_STORE), +// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, // GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, def : Pat < (store GPR32:$src0, GPR32:$src1), @@ -122,6 +123,7 @@ def : Pat < // GISEL: GIM_Try, /*On fail goto*//*Label 4*/ GIMT_Encode4({{[0-9]+}}), // Rule ID 4 // // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2, // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, GIMT_Encode2(TargetOpcode::G_STORE), +// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(uint8_t)AtomicOrdering::NotAtomic, // GISEL-NEXT: GIM_CheckMemorySizeLessThanLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0, // GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/GIMT_Encode4(2), // GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*//* 455(*/0xC7, 0x03/*)*/, diff --git a/llvm/test/Transforms/InstCombine/loadstore-metadata.ll b/llvm/test/Transforms/InstCombine/loadstore-metadata.ll index 54649251e4cb1..5a977882504ce 100644 --- a/llvm/test/Transforms/InstCombine/loadstore-metadata.ll +++ b/llvm/test/Transforms/InstCombine/loadstore-metadata.ll @@ -186,21 +186,36 @@ entry: ret i32 %c } -; FIXME: Should preserve metadata on loads, except !noundef and !invariant.load. +; Preserve none-UB metadata on loads. define ptr @preserve_load_metadata_after_select_transform1(i1 %c, ptr dereferenceable(8) %a, ptr dereferenceable(8) %b) { ; CHECK-LABEL: @preserve_load_metadata_after_select_transform1( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[B_VAL:%.*]] = load ptr, ptr [[B:%.*]], align 1 -; CHECK-NEXT: [[A_VAL:%.*]] = load ptr, ptr [[A:%.*]], align 1 +; CHECK-NEXT: [[B_VAL:%.*]] = load ptr, ptr [[B:%.*]], align 1, !nonnull [[META6]], !align [[META8]] +; CHECK-NEXT: [[A_VAL:%.*]] = load ptr, ptr [[A:%.*]], align 1, !nonnull [[META6]], !align [[META8]] ; CHECK-NEXT: [[L_SEL:%.*]] = select i1 [[C:%.*]], ptr [[B_VAL]], ptr [[A_VAL]] ; CHECK-NEXT: ret ptr [[L_SEL]] ; entry: %ptr.sel = select i1 %c, ptr %b, ptr %a - %l.sel = load ptr, ptr %ptr.sel, align 1, !tbaa !0, !llvm.access.group !7, !dereferenceable !9, !noundef !{}, !invariant.load !7 + %l.sel = load ptr, ptr %ptr.sel, align 1, !tbaa !0, !llvm.access.group !7, !dereferenceable !9, !noundef !{}, !invariant.load !7, !align !9, !nonnull !{} ret ptr %l.sel } +; Preserve none-UB metadata on loads. +define i32 @preserve_load_metadata_after_select_transform_range(i1 %c, ptr dereferenceable(8) %a, ptr dereferenceable(8) %b) { +; CHECK-LABEL: @preserve_load_metadata_after_select_transform_range( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[B_VAL:%.*]] = load i32, ptr [[B:%.*]], align 1, !range [[RNG10:![0-9]+]] +; CHECK-NEXT: [[A_VAL:%.*]] = load i32, ptr [[A:%.*]], align 1, !range [[RNG10]] +; CHECK-NEXT: [[L_SEL:%.*]] = select i1 [[C:%.*]], i32 [[B_VAL]], i32 [[A_VAL]] +; CHECK-NEXT: ret i32 [[L_SEL]] +; +entry: + %ptr.sel = select i1 %c, ptr %b, ptr %a + %l.sel = load i32, ptr %ptr.sel, align 1, !tbaa !0, !llvm.access.group !7, !invariant.load !7, !noundef !{}, !range !6 + ret i32 %l.sel +} + define double @preserve_load_metadata_after_select_transform2(ptr %a, ptr %b) { ; CHECK-LABEL: @preserve_load_metadata_after_select_transform2( ; CHECK-NEXT: entry: @@ -279,7 +294,7 @@ define double @preserve_load_metadata_after_select_transform_metadata_missing_4( ; CHECK-LABEL: @preserve_load_metadata_after_select_transform_metadata_missing_4( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[L_A:%.*]] = load double, ptr [[A:%.*]], align 8, !tbaa [[TBAA0]], !alias.scope [[META3]], !noalias [[META3]], !llvm.access.group [[META6]] -; CHECK-NEXT: [[L_B:%.*]] = load double, ptr [[B:%.*]], align 8, !tbaa [[TBAA0]], !alias.scope [[META10:![0-9]+]], !noalias [[META10]], !llvm.access.group [[ACC_GRP13:![0-9]+]] +; CHECK-NEXT: [[L_B:%.*]] = load double, ptr [[B:%.*]], align 8, !tbaa [[TBAA0]], !alias.scope [[META11:![0-9]+]], !noalias [[META11]], !llvm.access.group [[ACC_GRP14:![0-9]+]] ; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast olt double [[L_A]], [[L_B]] ; CHECK-NEXT: [[L_SEL:%.*]] = select i1 [[CMP_I]], double [[L_B]], double [[L_A]] ; CHECK-NEXT: ret double [[L_SEL]] @@ -322,8 +337,9 @@ entry: ; CHECK: [[META7]] = !{i32 1} ; CHECK: [[META8]] = !{i64 8} ; CHECK: [[ACC_GRP9]] = distinct !{} -; CHECK: [[META10]] = !{[[META11:![0-9]+]]} -; CHECK: [[META11]] = distinct !{[[META11]], [[META12:![0-9]+]]} -; CHECK: [[META12]] = distinct !{[[META12]]} -; CHECK: [[ACC_GRP13]] = distinct !{} +; CHECK: [[RNG10]] = !{i32 0, i32 42} +; CHECK: [[META11]] = !{[[META12:![0-9]+]]} +; CHECK: [[META12]] = distinct !{[[META12]], [[META13:![0-9]+]]} +; CHECK: [[META13]] = distinct !{[[META13]]} +; CHECK: [[ACC_GRP14]] = distinct !{} ;. diff --git a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-fmin-fmax.ll b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-fmin-fmax.ll new file mode 100644 index 0000000000000..4ab6b3cf295bf --- /dev/null +++ b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-fmin-fmax.ll @@ -0,0 +1,918 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=instsimplify -march=nvptx64 --mcpu=sm_86 --mattr=+ptx72 -S | FileCheck %s + +; Check constant-folding for NVVM fmin fmax intrinsics + +;############################################################### +;# FMax(1.25, -2.0) # +;############################################################### + +define double @test_fmax_1_25_neg_2_d() { +; CHECK-LABEL: define double @test_fmax_1_25_neg_2_d() { +; CHECK-NEXT: ret double 1.250000e+00 +; + %res = call double @llvm.nvvm.fmax.d(double 1.25, double -2.0) + ret double %res +} + +define float @test_fmax_1_25_neg_2_f() { +; CHECK-LABEL: define float @test_fmax_1_25_neg_2_f() { +; CHECK-NEXT: ret float 1.250000e+00 +; + %res = call float @llvm.nvvm.fmax.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmax_1_25_neg_2_ftz_f() { +; CHECK-LABEL: define float @test_fmax_1_25_neg_2_ftz_f() { +; CHECK-NEXT: ret float 1.250000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmax_1_25_neg_2_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmax_1_25_neg_2_ftz_nan_f() { +; CHECK-NEXT: ret float 1.250000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmax_1_25_neg_2_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_1_25_neg_2_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float -2.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmax_1_25_neg_2_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_1_25_neg_2_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float -2.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.xorsign.abs.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmax_1_25_neg_2_nan_f() { +; CHECK-LABEL: define float @test_fmax_1_25_neg_2_nan_f() { +; CHECK-NEXT: ret float 1.250000e+00 +; + %res = call float @llvm.nvvm.fmax.nan.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmax_1_25_neg_2_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_1_25_neg_2_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float -2.000000e+00 +; + %res = call float @llvm.nvvm.fmax.nan.xorsign.abs.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmax_1_25_neg_2_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_1_25_neg_2_xorsign_abs_f() { +; CHECK-NEXT: ret float -2.000000e+00 +; + %res = call float @llvm.nvvm.fmax.xorsign.abs.f(float 1.25, float -2.0) + ret float %res +} + +;############################################################### +;# FMax(+Subnormal, 0.0) # +;############################################################### + +define double @test_fmax_pos_subnorm_zero_d() { +; CHECK-LABEL: define double @test_fmax_pos_subnorm_zero_d() { +; CHECK-NEXT: ret double 0x380FFFFFC0000000 +; + %res = call double @llvm.nvvm.fmax.d(double 0x380FFFFFC0000000, double 0.0) + ret double %res +} + +define float @test_fmax_pos_subnorm_zero_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_zero_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.f(float 0x380FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmax_pos_subnorm_zero_ftz_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_zero_ftz_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.f(float 0x380FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmax_pos_subnorm_zero_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_zero_ftz_nan_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.f(float 0x380FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmax_pos_subnorm_zero_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_zero_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmax_pos_subnorm_zero_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_zero_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.xorsign.abs.f(float 0x380FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmax_pos_subnorm_zero_nan_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_zero_nan_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.nan.f(float 0x380FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmax_pos_subnorm_zero_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_zero_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmax_pos_subnorm_zero_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_zero_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.xorsign.abs.f(float 0x380FFFFFC0000000, float 0.0) + ret float %res +} + +;############################################################### +;# FMax(+Subnormal, -Subnormal) # +;############################################################### + +define double @test_fmax_pos_subnorm_neg_subnorm_d() { +; CHECK-LABEL: define double @test_fmax_pos_subnorm_neg_subnorm_d() { +; CHECK-NEXT: ret double 0x380FFFFFC0000000 +; + %res = call double @llvm.nvvm.fmax.d(double 0x380FFFFFC0000000, double 0xB80FFFFFC0000000) + ret double %res +} + +define float @test_fmax_pos_subnorm_neg_subnorm_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_neg_subnorm_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_neg_subnorm_ftz_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_neg_subnorm_ftz_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_neg_subnorm_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_neg_subnorm_ftz_nan_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_neg_subnorm_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_neg_subnorm_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_neg_subnorm_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_neg_subnorm_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.xorsign.abs.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_neg_subnorm_nan_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_neg_subnorm_nan_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.nan.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_neg_subnorm_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_neg_subnorm_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0xB80FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_neg_subnorm_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_neg_subnorm_xorsign_abs_f() { +; CHECK-NEXT: ret float 0xB80FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.xorsign.abs.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +;############################################################### +;# FMax(+Subnormal, NaN) # +;############################################################### + +define double @test_fmax_pos_subnorm_nan_d() { +; CHECK-LABEL: define double @test_fmax_pos_subnorm_nan_d() { +; CHECK-NEXT: ret double 0x380FFFFFC0000000 +; + %res = call double @llvm.nvvm.fmax.d(double 0x380FFFFFC0000000, double 0x7fff444400000000) + ret double %res +} + +define float @test_fmax_pos_subnorm_nan_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_nan_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_nan_ftz_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_nan_ftz_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_nan_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_nan_ftz_nan_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_nan_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_nan_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_nan_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_nan_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.xorsign.abs.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_nan_nan_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_nan_nan_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.nan.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_nan_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_nan_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmax_pos_subnorm_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_pos_subnorm_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.xorsign.abs.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +;############################################################### +;# FMax(+Subnormal, undef) # +;############################################################### + +define double @test_fmax_subnorm_undef_d() { +; CHECK-LABEL: define double @test_fmax_subnorm_undef_d() { +; CHECK-NEXT: ret double 0x380FFFFFC0000000 +; + %res = call double @llvm.nvvm.fmax.d(double 0x380FFFFFC0000000, double undef) + ret double %res +} + +define float @test_fmax_subnorm_undef_f() { +; CHECK-LABEL: define float @test_fmax_subnorm_undef_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmax_subnorm_undef_ftz_f() { +; CHECK-LABEL: define float @test_fmax_subnorm_undef_ftz_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmax_subnorm_undef_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmax_subnorm_undef_ftz_nan_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmax_subnorm_undef_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_subnorm_undef_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmax_subnorm_undef_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_subnorm_undef_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmax.ftz.xorsign.abs.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmax_subnorm_undef_nan_f() { +; CHECK-LABEL: define float @test_fmax_subnorm_undef_nan_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.nan.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmax_subnorm_undef_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_subnorm_undef_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmax_subnorm_undef_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_subnorm_undef_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmax.xorsign.abs.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +;############################################################### +;# FMax(NaN, undef) # +;############################################################### +; Ensure we canonicalize the NaNs for f32 + +define double @test_fmax_nan_undef_d() { +; CHECK-LABEL: define double @test_fmax_nan_undef_d() { +; CHECK-NEXT: ret double 0x7FF4444400000000 +; + %res = call double @llvm.nvvm.fmax.d(double 0x7ff4444400000000, double undef) + ret double %res +} + +define float @test_fmax_nan_undef_f() { +; CHECK-LABEL: define float @test_fmax_nan_undef_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmax_nan_undef_ftz_f() { +; CHECK-LABEL: define float @test_fmax_nan_undef_ftz_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.ftz.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmax_nan_undef_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmax_nan_undef_ftz_nan_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmax_nan_undef_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_nan_undef_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmax_nan_undef_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_nan_undef_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.ftz.xorsign.abs.f(float 0x7ffff4ff00000000, float undef) + ret float %res +} + +define float @test_fmax_nan_undef_nan_f() { +; CHECK-LABEL: define float @test_fmax_nan_undef_nan_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.nan.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmax_nan_undef_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_nan_undef_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.nan.xorsign.abs.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmax_nan_undef_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmax_nan_undef_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmax.xorsign.abs.f(float 0x7fff444400000000, float undef) + ret float %res +} + +;############################################################### +;# FMin(1.25, -2.0) # +;############################################################### + +define double @test_fmin_1_25_neg_2_d() { +; CHECK-LABEL: define double @test_fmin_1_25_neg_2_d() { +; CHECK-NEXT: ret double -2.000000e+00 +; + %res = call double @llvm.nvvm.fmin.d(double 1.25, double -2.0) + ret double %res +} + +define float @test_fmin_1_25_neg_2_f() { +; CHECK-LABEL: define float @test_fmin_1_25_neg_2_f() { +; CHECK-NEXT: ret float -2.000000e+00 +; + %res = call float @llvm.nvvm.fmin.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmin_1_25_neg_2_ftz_f() { +; CHECK-LABEL: define float @test_fmin_1_25_neg_2_ftz_f() { +; CHECK-NEXT: ret float -2.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmin_1_25_neg_2_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmin_1_25_neg_2_ftz_nan_f() { +; CHECK-NEXT: ret float -2.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmin_1_25_neg_2_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_1_25_neg_2_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float -1.250000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmin_1_25_neg_2_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_1_25_neg_2_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float -1.250000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.xorsign.abs.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmin_1_25_neg_2_nan_f() { +; CHECK-LABEL: define float @test_fmin_1_25_neg_2_nan_f() { +; CHECK-NEXT: ret float -2.000000e+00 +; + %res = call float @llvm.nvvm.fmin.nan.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmin_1_25_neg_2_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_1_25_neg_2_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float -1.250000e+00 +; + %res = call float @llvm.nvvm.fmin.nan.xorsign.abs.f(float 1.25, float -2.0) + ret float %res +} + +define float @test_fmin_1_25_neg_2_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_1_25_neg_2_xorsign_abs_f() { +; CHECK-NEXT: ret float -1.250000e+00 +; + %res = call float @llvm.nvvm.fmin.xorsign.abs.f(float 1.25, float -2.0) + ret float %res +} + +;############################################################### +;# FMin(-Subnormal, 0.0) # +;############################################################### + +define double @test_fmin_neg_subnorm_zero_d() { +; CHECK-LABEL: define double @test_fmin_neg_subnorm_zero_d() { +; CHECK-NEXT: ret double 0xB80FFFFFC0000000 +; + %res = call double @llvm.nvvm.fmin.d(double 0xB80FFFFFC0000000, double 0.0) + ret double %res +} + +define float @test_fmin_neg_subnorm_zero_f() { +; CHECK-LABEL: define float @test_fmin_neg_subnorm_zero_f() { +; CHECK-NEXT: ret float 0xB80FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.f(float 0xB80FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmin_neg_subnorm_zero_ftz_f() { +; CHECK-LABEL: define float @test_fmin_neg_subnorm_zero_ftz_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.f(float 0xB80FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmin_neg_subnorm_zero_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmin_neg_subnorm_zero_ftz_nan_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.f(float 0xB80FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmin_neg_subnorm_zero_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_neg_subnorm_zero_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f(float 0xB80FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmin_neg_subnorm_zero_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_neg_subnorm_zero_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.xorsign.abs.f(float 0xB80FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmin_neg_subnorm_zero_nan_f() { +; CHECK-LABEL: define float @test_fmin_neg_subnorm_zero_nan_f() { +; CHECK-NEXT: ret float 0xB80FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.nan.f(float 0xB80FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmin_neg_subnorm_zero_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_neg_subnorm_zero_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.nan.xorsign.abs.f(float 0xB80FFFFFC0000000, float 0.0) + ret float %res +} + +define float @test_fmin_neg_subnorm_zero_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_neg_subnorm_zero_xorsign_abs_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.xorsign.abs.f(float 0xB80FFFFFC0000000, float 0.0) + ret float %res +} + +;############################################################### +;# FMin(+Subnormal, -Subnormal) # +;############################################################### + +define double @test_fmin_pos_subnorm_neg_subnorm_d() { +; CHECK-LABEL: define double @test_fmin_pos_subnorm_neg_subnorm_d() { +; CHECK-NEXT: ret double 0xB80FFFFFC0000000 +; + %res = call double @llvm.nvvm.fmin.d(double 0x380FFFFFC0000000, double 0xB80FFFFFC0000000) + ret double %res +} + +define float @test_fmin_pos_subnorm_neg_subnorm_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_neg_subnorm_f() { +; CHECK-NEXT: ret float 0xB80FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_neg_subnorm_ftz_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_neg_subnorm_ftz_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_neg_subnorm_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_neg_subnorm_ftz_nan_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_neg_subnorm_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_neg_subnorm_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_neg_subnorm_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_neg_subnorm_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float -0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.xorsign.abs.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_neg_subnorm_nan_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_neg_subnorm_nan_f() { +; CHECK-NEXT: ret float 0xB80FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.nan.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_neg_subnorm_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_neg_subnorm_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0xB80FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_neg_subnorm_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_neg_subnorm_xorsign_abs_f() { +; CHECK-NEXT: ret float 0xB80FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.xorsign.abs.f(float 0x380FFFFFC0000000, float 0xB80FFFFFC0000000) + ret float %res +} + +;############################################################### +;# FMin(+Subnormal, NaN) # +;############################################################### + +define double @test_fmin_pos_subnorm_nan_d() { +; CHECK-LABEL: define double @test_fmin_pos_subnorm_nan_d() { +; CHECK-NEXT: ret double 0x380FFFFFC0000000 +; + %res = call double @llvm.nvvm.fmin.d(double 0x380FFFFFC0000000, double 0x7fff444400000000) + ret double %res +} + +define float @test_fmin_pos_subnorm_nan_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_nan_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_nan_ftz_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_nan_ftz_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_nan_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_nan_ftz_nan_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_nan_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_nan_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_nan_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_nan_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.xorsign.abs.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_nan_nan_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_nan_nan_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.nan.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_nan_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_nan_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +define float @test_fmin_pos_subnorm_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_pos_subnorm_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.xorsign.abs.f(float 0x380FFFFFC0000000, float 0x7fff444400000000) + ret float %res +} + +;############################################################### +;# FMin(+Subnormal, undef) # +;############################################################### + +define double @test_fmin_subnorm_undef_d() { +; CHECK-LABEL: define double @test_fmin_subnorm_undef_d() { +; CHECK-NEXT: ret double 0x380FFFFFC0000000 +; + %res = call double @llvm.nvvm.fmin.d(double 0x380FFFFFC0000000, double undef) + ret double %res +} + +define float @test_fmin_subnorm_undef_f() { +; CHECK-LABEL: define float @test_fmin_subnorm_undef_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmin_subnorm_undef_ftz_f() { +; CHECK-LABEL: define float @test_fmin_subnorm_undef_ftz_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmin_subnorm_undef_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmin_subnorm_undef_ftz_nan_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmin_subnorm_undef_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_subnorm_undef_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmin_subnorm_undef_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_subnorm_undef_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float 0.000000e+00 +; + %res = call float @llvm.nvvm.fmin.ftz.xorsign.abs.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmin_subnorm_undef_nan_f() { +; CHECK-LABEL: define float @test_fmin_subnorm_undef_nan_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.nan.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmin_subnorm_undef_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_subnorm_undef_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.nan.xorsign.abs.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +define float @test_fmin_subnorm_undef_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_subnorm_undef_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x380FFFFFC0000000 +; + %res = call float @llvm.nvvm.fmin.xorsign.abs.f(float 0x380FFFFFC0000000, float undef) + ret float %res +} + +;############################################################### +;# FMin(NaN, undef) # +;############################################################### +; Ensure we canonicalize the NaNs for f32 + +define double @test_fmin_nan_undef_d() { +; CHECK-LABEL: define double @test_fmin_nan_undef_d() { +; CHECK-NEXT: ret double 0x7FF4444400000000 +; + %res = call double @llvm.nvvm.fmin.d(double 0x7ff4444400000000, double undef) + ret double %res +} + +define float @test_fmin_nan_undef_f() { +; CHECK-LABEL: define float @test_fmin_nan_undef_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmin_nan_undef_ftz_f() { +; CHECK-LABEL: define float @test_fmin_nan_undef_ftz_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.ftz.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmin_nan_undef_ftz_nan_f() { +; CHECK-LABEL: define float @test_fmin_nan_undef_ftz_nan_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmin_nan_undef_ftz_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_nan_undef_ftz_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmin_nan_undef_ftz_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_nan_undef_ftz_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.ftz.xorsign.abs.f(float 0x7ffff4ff00000000, float undef) + ret float %res +} + +define float @test_fmin_nan_undef_nan_f() { +; CHECK-LABEL: define float @test_fmin_nan_undef_nan_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.nan.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmin_nan_undef_nan_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_nan_undef_nan_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.nan.xorsign.abs.f(float 0x7fff444400000000, float undef) + ret float %res +} + +define float @test_fmin_nan_undef_xorsign_abs_f() { +; CHECK-LABEL: define float @test_fmin_nan_undef_xorsign_abs_f() { +; CHECK-NEXT: ret float 0x7FFFFFFFE0000000 +; + %res = call float @llvm.nvvm.fmin.xorsign.abs.f(float 0x7fff444400000000, float undef) + ret float %res +} diff --git a/llvm/test/Transforms/LICM/hoist-alloc.ll b/llvm/test/Transforms/LICM/hoist-alloc.ll index 76047ec8c2438..fe4f03713926f 100644 --- a/llvm/test/Transforms/LICM/hoist-alloc.ll +++ b/llvm/test/Transforms/LICM/hoist-alloc.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -passes=licm -use-dereferenceable-at-point-semantics=0 < %s | FileCheck %s -; RUN: opt -S -passes=licm -use-dereferenceable-at-point-semantics=1 < %s | FileCheck %s +; RUN: opt -S -passes=licm -use-dereferenceable-at-point-semantics=false < %s | FileCheck %s +; RUN: opt -S -passes=licm -use-dereferenceable-at-point-semantics < %s | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopInterchange/gh54176-scalar-deps.ll b/llvm/test/Transforms/LoopInterchange/gh54176-scalar-deps.ll index b338365566898..bc9f16fbe58d6 100644 --- a/llvm/test/Transforms/LoopInterchange/gh54176-scalar-deps.ll +++ b/llvm/test/Transforms/LoopInterchange/gh54176-scalar-deps.ll @@ -1,5 +1,5 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=loop-interchange -S | FileCheck %s +; RUN: opt < %s -passes=loop-interchange -pass-remarks-output=%t -disable-output +; RUN: FileCheck -input-file %t %s @f = dso_local local_unnamed_addr global [4 x [9 x i32]] [[9 x i32] [i32 5, i32 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0], [9 x i32] zeroinitializer, [9 x i32] zeroinitializer, [9 x i32] zeroinitializer], align 4 @g = common dso_local local_unnamed_addr global i32 0, align 4 @@ -23,54 +23,18 @@ ; return g; ; } ; +define dso_local i32 @test1(i1 %cond) { +; ; FIXME: if there's an output dependency inside the loop and Src doesn't ; dominate Dst, we should not interchange. Thus, this currently miscompiles. ; -define dso_local i32 @test1(i1 %cond) { -; CHECK-LABEL: define dso_local i32 @test1( -; CHECK-SAME: i1 [[COND:%.*]]) { -; CHECK-NEXT: [[FOR_PREHEADER:.*:]] -; CHECK-NEXT: br label %[[INNERLOOP_PREHEADER:.*]] -; CHECK: [[OUTERLOOP_PREHEADER:.*]]: -; CHECK-NEXT: br label %[[OUTERLOOP:.*]] -; CHECK: [[OUTERLOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INDVARS_IV_NEXT21_I:%.*]], %[[FOR_LATCH:.*]] ], [ 0, %[[OUTERLOOP_PREHEADER]] ] -; CHECK-NEXT: br label %[[INNERLOOP_SPLIT:.*]] -; CHECK: [[INNERLOOP_PREHEADER]]: -; CHECK-NEXT: br label %[[INNERLOOP:.*]] -; CHECK: [[INNERLOOP]]: -; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[TMP0:%.*]], %[[IF_END_SPLIT:.*]] ], [ 0, %[[INNERLOOP_PREHEADER]] ] -; CHECK-NEXT: br label %[[OUTERLOOP_PREHEADER]] -; CHECK: [[INNERLOOP_SPLIT]]: -; CHECK-NEXT: [[ARRAYIDX6_I:%.*]] = getelementptr inbounds [4 x [9 x i32]], ptr @f, i64 0, i64 [[J]], i64 [[I]] -; CHECK-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX6_I]], align 4 -; CHECK-NEXT: [[TOBOOL_I:%.*]] = icmp eq i32 [[I1]], 0 -; CHECK-NEXT: br i1 [[TOBOOL_I]], label %[[LAND_END:.*]], label %[[LAND_RHS:.*]] -; CHECK: [[LAND_RHS]]: -; CHECK-NEXT: store i32 3, ptr @g, align 4 -; CHECK-NEXT: br label %[[LAND_END]] -; CHECK: [[LAND_END]]: -; CHECK-NEXT: br i1 [[COND]], label %[[IF_END:.*]], label %[[IF_THEN:.*]] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[I2:%.*]] = load i32, ptr @g, align 4 -; CHECK-NEXT: [[INC_I:%.*]] = add i32 [[I2]], 1 -; CHECK-NEXT: store i32 [[INC_I]], ptr @g, align 4 -; CHECK-NEXT: br label %[[IF_END]] -; CHECK: [[IF_END]]: -; CHECK-NEXT: [[J_NEXT:%.*]] = add nuw nsw i64 [[J]], 1 -; CHECK-NEXT: [[EXITCOND_I:%.*]] = icmp eq i64 [[J_NEXT]], 3 -; CHECK-NEXT: br label %[[FOR_LATCH]] -; CHECK: [[IF_END_SPLIT]]: -; CHECK-NEXT: [[TMP0]] = add nuw nsw i64 [[J]], 1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[TMP0]], 3 -; CHECK-NEXT: br i1 [[TMP1]], label %[[EXIT:.*]], label %[[INNERLOOP]] -; CHECK: [[FOR_LATCH]]: -; CHECK-NEXT: [[INDVARS_IV_NEXT21_I]] = add nsw i64 [[I]], 1 -; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i64 [[I]], 2 -; CHECK-NEXT: br i1 [[CMP_I]], label %[[OUTERLOOP]], label %[[IF_END_SPLIT]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: [[I3:%.*]] = load i32, ptr @g, align 4 -; CHECK-NEXT: ret i32 [[I3]] +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: test1 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. +; CHECK-NEXT: ... ; for.preheader: br label %outerloop @@ -133,54 +97,18 @@ exit: ; return g; ; } ; +define dso_local i32 @test2(i1 %cond) { +; ; FIXME: if there's an output dependency inside the loop and Src doesn't ; dominate Dst, we should not interchange. Thus, this currently miscompiles. ; -define dso_local i32 @test2(i1 %cond) { -; CHECK-LABEL: define dso_local i32 @test2( -; CHECK-SAME: i1 [[COND:%.*]]) { -; CHECK-NEXT: [[FOR_PREHEADER:.*:]] -; CHECK-NEXT: br label %[[INNERLOOP_PREHEADER:.*]] -; CHECK: [[OUTERLOOP_PREHEADER:.*]]: -; CHECK-NEXT: br label %[[OUTERLOOP:.*]] -; CHECK: [[OUTERLOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INDVARS_IV_NEXT21_I:%.*]], %[[FOR_LATCH:.*]] ], [ 0, %[[OUTERLOOP_PREHEADER]] ] -; CHECK-NEXT: br label %[[INNERLOOP_SPLIT:.*]] -; CHECK: [[INNERLOOP_PREHEADER]]: -; CHECK-NEXT: br label %[[INNERLOOP:.*]] -; CHECK: [[INNERLOOP]]: -; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[TMP0:%.*]], %[[IF_END_SPLIT:.*]] ], [ 0, %[[INNERLOOP_PREHEADER]] ] -; CHECK-NEXT: br label %[[OUTERLOOP_PREHEADER]] -; CHECK: [[INNERLOOP_SPLIT]]: -; CHECK-NEXT: [[ARRAYIDX6_I:%.*]] = getelementptr inbounds [4 x [9 x i32]], ptr @f, i64 0, i64 [[J]], i64 [[I]] -; CHECK-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX6_I]], align 4 -; CHECK-NEXT: [[TOBOOL_I:%.*]] = icmp eq i32 [[I1]], 0 -; CHECK-NEXT: store i32 3, ptr @g, align 4 -; CHECK-NEXT: br i1 [[TOBOOL_I]], label %[[LAND_END:.*]], label %[[LAND_RHS:.*]] -; CHECK: [[LAND_RHS]]: -; CHECK-NEXT: br label %[[LAND_END]] -; CHECK: [[LAND_END]]: -; CHECK-NEXT: br i1 [[COND]], label %[[IF_END:.*]], label %[[IF_THEN:.*]] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[I2:%.*]] = load i32, ptr @g, align 4 -; CHECK-NEXT: [[INC_I:%.*]] = add i32 [[I2]], 1 -; CHECK-NEXT: store i32 [[INC_I]], ptr @g, align 4 -; CHECK-NEXT: br label %[[IF_END]] -; CHECK: [[IF_END]]: -; CHECK-NEXT: [[J_NEXT:%.*]] = add nuw nsw i64 [[J]], 1 -; CHECK-NEXT: [[EXITCOND_I:%.*]] = icmp eq i64 [[J_NEXT]], 3 -; CHECK-NEXT: br label %[[FOR_LATCH]] -; CHECK: [[IF_END_SPLIT]]: -; CHECK-NEXT: [[TMP0]] = add nuw nsw i64 [[J]], 1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[TMP0]], 3 -; CHECK-NEXT: br i1 [[TMP1]], label %[[EXIT:.*]], label %[[INNERLOOP]] -; CHECK: [[FOR_LATCH]]: -; CHECK-NEXT: [[INDVARS_IV_NEXT21_I]] = add nsw i64 [[I]], 1 -; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i64 [[I]], 2 -; CHECK-NEXT: br i1 [[CMP_I]], label %[[OUTERLOOP]], label %[[IF_END_SPLIT]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: [[I3:%.*]] = load i32, ptr @g, align 4 -; CHECK-NEXT: ret i32 [[I3]] +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: test2 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. +; CHECK-NEXT: ... ; for.preheader: br label %outerloop diff --git a/llvm/test/Transforms/LoopInterchange/innermost-latch-uses-values-in-middle-header.ll b/llvm/test/Transforms/LoopInterchange/innermost-latch-uses-values-in-middle-header.ll index bad84224d445a..230f7dc2bcfad 100644 --- a/llvm/test/Transforms/LoopInterchange/innermost-latch-uses-values-in-middle-header.ll +++ b/llvm/test/Transforms/LoopInterchange/innermost-latch-uses-values-in-middle-header.ll @@ -1,5 +1,5 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=loop-interchange -verify-dom-info -verify-loop-info -S 2>&1 | FileCheck %s +; RUN: opt < %s -passes=loop-interchange -verify-dom-info -verify-loop-info -pass-remarks-output=%t -disable-output +; RUN: FileCheck -input-file %t %s @a = common global i32 0, align 4 @d = common dso_local local_unnamed_addr global [1 x [6 x i32]] zeroinitializer, align 4 @@ -9,53 +9,29 @@ ; values defined in the new innermost loop not available in the exiting block of ; the entire loop nest. ; -define void @innermost_latch_uses_values_in_middle_header() { -; CHECK-LABEL: define void @innermost_latch_uses_values_in_middle_header() { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 -; CHECK-NEXT: [[B:%.*]] = add i32 80, 1 -; CHECK-NEXT: br label %[[OUTERMOST_HEADER:.*]] -; CHECK: [[OUTERMOST_HEADER]]: -; CHECK-NEXT: [[INDVAR_OUTERMOST:%.*]] = phi i32 [ 10, %[[ENTRY]] ], [ [[INDVAR_OUTERMOST_NEXT:%.*]], %[[OUTERMOST_LATCH:.*]] ] -; CHECK-NEXT: [[TOBOOL71_I:%.*]] = icmp eq i32 [[TMP0]], 0 -; CHECK-NEXT: br i1 [[TOBOOL71_I]], label %[[INNERMOST_HEADER_PREHEADER:.*]], label %[[OUTERMOST_LATCH]] -; CHECK: [[MIDDLE_HEADER_PREHEADER:.*]]: -; CHECK-NEXT: br label %[[MIDDLE_HEADER:.*]] -; CHECK: [[MIDDLE_HEADER]]: -; CHECK-NEXT: [[INDVAR_MIDDLE:%.*]] = phi i64 [ [[INDVAR_MIDDLE_NEXT:%.*]], %[[MIDDLE_LATCH:.*]] ], [ 4, %[[MIDDLE_HEADER_PREHEADER]] ] -; CHECK-NEXT: [[INDVAR_MIDDLE_WIDE:%.*]] = zext i32 [[B]] to i64 -; CHECK-NEXT: br label %[[INNERMOST_BODY:.*]] -; CHECK: [[INNERMOST_HEADER_PREHEADER]]: -; CHECK-NEXT: br label %[[INNERMOST_HEADER:.*]] -; CHECK: [[INNERMOST_HEADER]]: -; CHECK-NEXT: [[INDVAR_INNERMOST:%.*]] = phi i64 [ [[TMP1:%.*]], %[[INNERMOST_LATCH_SPLIT:.*]] ], [ 4, %[[INNERMOST_HEADER_PREHEADER]] ] -; CHECK-NEXT: br label %[[MIDDLE_HEADER_PREHEADER]] -; CHECK: [[INNERMOST_BODY]]: -; CHECK-NEXT: [[ARRAYIDX9_I:%.*]] = getelementptr inbounds [1 x [6 x i32]], ptr @d, i64 0, i64 [[INDVAR_INNERMOST]], i64 [[INDVAR_MIDDLE]] -; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX9_I]], align 4 -; CHECK-NEXT: br label %[[INNERMOST_LATCH:.*]] -; CHECK: [[INNERMOST_LATCH]]: -; CHECK-NEXT: [[INDVAR_INNERMOST_NEXT:%.*]] = add nsw i64 [[INDVAR_INNERMOST]], 1 -; CHECK-NEXT: [[TOBOOL5_I:%.*]] = icmp eq i64 [[INDVAR_INNERMOST_NEXT]], [[INDVAR_MIDDLE_WIDE]] -; CHECK-NEXT: br label %[[MIDDLE_LATCH]] -; CHECK: [[INNERMOST_LATCH_SPLIT]]: -; CHECK-NEXT: [[INDVAR_MIDDLE_WIDE_LCSSA:%.*]] = phi i64 [ [[INDVAR_MIDDLE_WIDE]], %[[MIDDLE_LATCH]] ] -; CHECK-NEXT: [[TMP1]] = add nsw i64 [[INDVAR_INNERMOST]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], [[INDVAR_MIDDLE_WIDE_LCSSA]] -; CHECK-NEXT: br i1 [[TMP2]], label %[[OUTERMOST_LATCH_LOOPEXIT:.*]], label %[[INNERMOST_HEADER]] -; CHECK: [[MIDDLE_LATCH]]: -; CHECK-NEXT: [[INDVAR_MIDDLE_NEXT]] = add nsw i64 [[INDVAR_MIDDLE]], -1 -; CHECK-NEXT: [[TOBOOL2_I:%.*]] = icmp eq i64 [[INDVAR_MIDDLE_NEXT]], 0 -; CHECK-NEXT: br i1 [[TOBOOL2_I]], label %[[INNERMOST_LATCH_SPLIT]], label %[[MIDDLE_HEADER]] -; CHECK: [[OUTERMOST_LATCH_LOOPEXIT]]: -; CHECK-NEXT: br label %[[OUTERMOST_LATCH]] -; CHECK: [[OUTERMOST_LATCH]]: -; CHECK-NEXT: [[INDVAR_OUTERMOST_NEXT]] = add nsw i32 [[INDVAR_OUTERMOST]], -5 -; CHECK-NEXT: [[TOBOOL_I:%.*]] = icmp eq i32 [[INDVAR_OUTERMOST_NEXT]], 0 -; CHECK-NEXT: br i1 [[TOBOOL_I]], label %[[OUTERMOST_EXIT:.*]], label %[[OUTERMOST_HEADER]] -; CHECK: [[OUTERMOST_EXIT]]: -; CHECK-NEXT: ret void +; CHECK: --- !Passed +; CHECK: Pass: loop-interchange +; CHECK: Name: Interchanged +; CHECK: Function: innermost_latch_uses_values_in_middle_header +; CHECK: Args: +; CHECK: - String: Loop interchanged with enclosing loop. +; CHECK: ... +; CHECK: --- !Missed +; CHECK: Pass: loop-interchange +; CHECK: Name: UnsupportedInnerLatchPHI +; CHECK: Function: innermost_latch_uses_values_in_middle_header +; CHECK: Args: +; CHECK: - String: Cannot interchange loops because unsupported PHI nodes found in inner loop latch. +; CHECK: ... +; CHECK: --- !Missed +; CHECK: Pass: loop-interchange +; CHECK: Name: UnsupportedExitPHI +; CHECK: Function: innermost_latch_uses_values_in_middle_header +; CHECK: Args: +; CHECK: - String: Found unsupported PHI node in loop exit. +; CHECK: ... ; +define void @innermost_latch_uses_values_in_middle_header() { entry: %0 = load i32, ptr @a, align 4 %b = add i32 80, 1 diff --git a/llvm/test/Transforms/LoopInterchange/interchange-flow-dep-outer.ll b/llvm/test/Transforms/LoopInterchange/interchange-flow-dep-outer.ll index 6daf61a4ec007..a208c1f46a705 100644 --- a/llvm/test/Transforms/LoopInterchange/interchange-flow-dep-outer.ll +++ b/llvm/test/Transforms/LoopInterchange/interchange-flow-dep-outer.ll @@ -1,5 +1,5 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=loop-interchange -cache-line-size=64 -verify-dom-info -verify-loop-info -S 2>&1 | FileCheck %s +; RUN: opt < %s -passes=loop-interchange -cache-line-size=64 -verify-dom-info -verify-loop-info -pass-remarks-output=%t -disable-output +; RUN: FileCheck -input-file %t %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @@ -24,53 +24,29 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" ; So, loops InnerLoopId = 2 and OuterLoopId = 1 should be interchanged, ; but not InnerLoopId = 1 and OuterLoopId = 0. ; +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: interchange_09 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. +; CHECK-NEXT: ... +; CHECK-NEXT: --- !Missed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: NotTightlyNested +; CHECK-NEXT: Function: interchange_09 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Cannot interchange loops because they are not tightly nested. +; CHECK-NEXT: ... +; CHECK-NEXT: --- !Missed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: InterchangeNotProfitable +; CHECK-NEXT: Function: interchange_09 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Interchanging loops is not considered to improve cache locality nor vectorization. +; CHECK-NEXT: ... + define void @interchange_09(i32 %k) { -; CHECK-LABEL: define void @interchange_09( -; CHECK-SAME: i32 [[K:%.*]]) { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_COND_CLEANUP:.*]]: -; CHECK-NEXT: ret void -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV45:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT46:%.*]], %[[FOR_COND_CLEANUP4:.*]] ] -; CHECK-NEXT: [[CALL:%.*]] = call double @fn1() -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x double], ptr @T, i64 0, i64 [[INDVARS_IV45]] -; CHECK-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: br label %[[FOR_BODY9_PREHEADER:.*]] -; CHECK: [[FOR_COND6_PREHEADER_PREHEADER:.*]]: -; CHECK-NEXT: br label %[[FOR_COND6_PREHEADER:.*]] -; CHECK: [[FOR_COND6_PREHEADER]]: -; CHECK-NEXT: [[INDVARS_IV42:%.*]] = phi i64 [ [[INDVARS_IV_NEXT43:%.*]], %[[FOR_COND_CLEANUP8:.*]] ], [ 0, %[[FOR_COND6_PREHEADER_PREHEADER]] ] -; CHECK-NEXT: br label %[[FOR_BODY9_SPLIT1:.*]] -; CHECK: [[FOR_BODY9_PREHEADER]]: -; CHECK-NEXT: br label %[[FOR_BODY9:.*]] -; CHECK: [[FOR_COND_CLEANUP4]]: -; CHECK-NEXT: [[TMP:%.*]] = load double, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: call void @fn2(double [[TMP]]) -; CHECK-NEXT: [[INDVARS_IV_NEXT46]] = add nuw nsw i64 [[INDVARS_IV45]], 1 -; CHECK-NEXT: [[EXITCOND47:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT46]], 100 -; CHECK-NEXT: br i1 [[EXITCOND47]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] -; CHECK: [[FOR_COND_CLEANUP8]]: -; CHECK-NEXT: [[INDVARS_IV_NEXT43]] = add nuw nsw i64 [[INDVARS_IV42]], 1 -; CHECK-NEXT: [[EXITCOND44:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT43]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND44]], label %[[FOR_COND6_PREHEADER]], label %[[FOR_BODY9_SPLIT:.*]] -; CHECK: [[FOR_BODY9]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[TMP0:%.*]], %[[FOR_BODY9_SPLIT]] ], [ 1, %[[FOR_BODY9_PREHEADER]] ] -; CHECK-NEXT: br label %[[FOR_COND6_PREHEADER_PREHEADER]] -; CHECK: [[FOR_BODY9_SPLIT1]]: -; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [1000 x [1000 x i32]], ptr @Arr, i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV42]] -; CHECK-NEXT: [[T1:%.*]] = load i32, ptr [[ARRAYIDX13]], align 4 -; CHECK-NEXT: [[T2:%.*]] = trunc i64 [[INDVARS_IV45]] to i32 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[T1]], [[T2]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX13]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], 1000 -; CHECK-NEXT: br label %[[FOR_COND_CLEANUP8]] -; CHECK: [[FOR_BODY9_SPLIT]]: -; CHECK-NEXT: [[TMP0]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[TMP0]], 1000 -; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_BODY9]], label %[[FOR_COND_CLEANUP4]] -; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopInterchange/outer-header-jump-to-inner-latch.ll b/llvm/test/Transforms/LoopInterchange/outer-header-jump-to-inner-latch.ll index 6db95c09b175f..aaf8b1daf0414 100644 --- a/llvm/test/Transforms/LoopInterchange/outer-header-jump-to-inner-latch.ll +++ b/llvm/test/Transforms/LoopInterchange/outer-header-jump-to-inner-latch.ll @@ -1,4 +1,5 @@ -; RUN: opt -passes=loop-interchange -cache-line-size=64 -verify-dom-info -verify-loop-info -verify-loop-lcssa -S %s | FileCheck %s +; RUN: opt -passes=loop-interchange -cache-line-size=64 -verify-dom-info -verify-loop-info -verify-loop-lcssa %s -pass-remarks-output=%t -disable-output +; RUN: FileCheck -input-file %t %s @b = global [3 x [5 x [8 x i16]]] [[5 x [8 x i16]] zeroinitializer, [5 x [8 x i16]] [[8 x i16] zeroinitializer, [8 x i16] [i16 0, i16 0, i16 0, i16 6, i16 1, i16 6, i16 0, i16 0], [8 x i16] zeroinitializer, [8 x i16] zeroinitializer, [8 x i16] zeroinitializer], [5 x [8 x i16]] zeroinitializer], align 2 @a = common global i32 0, align 4 @@ -19,47 +20,16 @@ ;; a |= b[d][d][c + 5]; ;; } ;; } - +; +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: test1 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. +; CHECK-NEXT: ... +; define void @test1() { -;CHECK-LABEL: @test1( -;CHECK: entry: -;CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]] -;CHECK: for.body.preheader: -;CHECK-NEXT: br label [[FOR_BODY:%.*]] -;CHECK: for.body: -;CHECK-NEXT: [[INDVARS_IV22:%.*]] = phi i64 [ [[INDVARS_IV_NEXT23:%.*]], [[FOR_INC8:%.*]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ] -;CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i64 [[INDVARS_IV22:%.*]], 0 -;CHECK-NEXT: br i1 [[TOBOOL]], label [[FOR_BODY3_SPLIT1:%.*]], label [[FOR_BODY3_SPLIT:%.*]] -;CHECK: for.cond1.preheader: -;CHECK-NEXT: br label [[FOR_BODY3:%.*]] -;CHECK: for.body3: -;CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ %3, [[FOR_BODY3_SPLIT]] ] -;CHECK-NEXT: br label [[FOR_BODY_PREHEADER]] -;CHECK: for.body3.split1: -;CHECK-NEXT: [[TMP0:%.*]] = add nuw nsw i64 [[INDVARS_IV22]], 5 -;CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [3 x [5 x [8 x i16]]], ptr @b, i64 0, i64 [[INDVARS_IV]], i64 [[INDVARS_IV]], i64 [[TMP0]] -;CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX7]] -;CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 -;CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @a -;CHECK-NEXT: [[TMP_OR:%.*]] = or i32 [[TMP2]], [[CONV]] -;CHECK-NEXT: store i32 [[TMP_OR]], ptr @a -;CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1 -;CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], 3 -;CHECK-NEXT: br label [[FOR_INC8_LOOPEXIT:%.*]] -;CHECK: for.body3.split: -;CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1 -;CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 3 -;CHECK-NEXT: br i1 %4, label [[FOR_BODY3]], label [[FOR_END10:%.*]] -;CHECK: for.inc8.loopexit: -;CHECK-NEXT: br label [[FOR_INC8]] -;CHECK: for.inc8: -;CHECK-NEXT: [[INDVARS_IV_NEXT23]] = add nuw nsw i64 [[INDVARS_IV22]], 1 -;CHECK-NEXT: [[EXITCOND25:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT23]], 3 -;CHECK-NEXT: br i1 [[EXITCOND25]], label [[FOR_BODY]], label [[FOR_BODY3_SPLIT]] -;CHECK: for.end10: -;CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr @a -;CHECK-NEXT: ret void - entry: br label %for.body @@ -100,6 +70,7 @@ for.end10: ; preds = %for.inc8 ; Triply nested loop ; The innermost and the middle loop are interchanged. ; C test case: +; ;; a; ;; d[][6]; ;; void test2() { @@ -116,50 +87,16 @@ for.end10: ; preds = %for.inc8 ;; } ;; } ;; } - -define void @test2() { -; CHECK-LABEL: @test2( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[OUTERMOST_HEADER:%.*]] -; CHECK: outermost.header: -; CHECK-NEXT: [[INDVAR_OUTERMOST:%.*]] = phi i32 [ 10, [[ENTRY:%.*]] ], [ [[INDVAR_OUTERMOST_NEXT:%.*]], [[OUTERMOST_LATCH:%.*]] ] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 -; CHECK-NEXT: [[TOBOOL71_I:%.*]] = icmp eq i32 [[TMP0]], 0 -; CHECK-NEXT: br label [[INNERMOST_PREHEADER:%.*]] -; CHECK: middle.header.preheader: -; CHECK-NEXT: br label [[MIDDLE_HEADER:%.*]] -; CHECK: middle.header: -; CHECK-NEXT: [[INDVAR_MIDDLE:%.*]] = phi i64 [ [[INDVAR_MIDDLE_NEXT:%.*]], [[MIDDLE_LATCH:%.*]] ], [ 4, [[MIDDLE_HEADER_PREHEADER:%.*]] ] -; CHECK-NEXT: br i1 [[TOBOOL71_I]], label [[INNERMOST_BODY_SPLIT1:%.*]], label [[INNERMOST_BODY_SPLIT:%.*]] -; CHECK: innermost.preheader: -; CHECK-NEXT: br label [[INNERMOST_BODY:%.*]] -; CHECK: innermost.body: -; CHECK-NEXT: [[INDVAR_INNERMOST:%.*]] = phi i64 [ [[TMP1:%.*]], [[INNERMOST_BODY_SPLIT]] ], [ 4, [[INNERMOST_PREHEADER]] ] -; CHECK-NEXT: br label [[MIDDLE_HEADER_PREHEADER]] -; CHECK: innermost.body.split1: -; CHECK-NEXT: [[ARRAYIDX9_I:%.*]] = getelementptr inbounds [1 x [6 x i32]], ptr @d, i64 0, i64 [[INDVAR_INNERMOST]], i64 [[INDVAR_MIDDLE]] -; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX9_I]], align 4 -; CHECK-NEXT: [[INDVAR_INNERMOST_NEXT:%.*]] = add nsw i64 [[INDVAR_INNERMOST]], -1 -; CHECK-NEXT: [[TOBOOL5_I:%.*]] = icmp eq i64 [[INDVAR_INNERMOST_NEXT]], 0 -; CHECK-NEXT: br label [[MIDDLE_LATCH_LOOPEXIT:%.*]] -; CHECK: innermost.body.split: -; CHECK-NEXT: [[TMP1]] = add nsw i64 [[INDVAR_INNERMOST]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[TMP2]], label [[OUTERMOST_LATCH]], label [[INNERMOST_BODY]] -; CHECK: innermost.loopexit: -; CHECK-NEXT: br label [[MIDDLE_LATCH]] -; CHECK: middle.latch: -; CHECK-NEXT: [[INDVAR_MIDDLE_NEXT]] = add nsw i64 [[INDVAR_MIDDLE]], -1 -; CHECK-NEXT: [[TOBOOL2_I:%.*]] = icmp eq i64 [[INDVAR_MIDDLE_NEXT]], 0 -; CHECK-NEXT: br i1 [[TOBOOL2_I]], label [[INNERMOST_BODY_SPLIT]], label [[MIDDLE_HEADER]] -; CHECK: outermost.latch: -; CHECK-NEXT: [[INDVAR_OUTERMOST_NEXT]] = add nsw i32 [[INDVAR_OUTERMOST]], -5 -; CHECK-NEXT: [[TOBOOL_I:%.*]] = icmp eq i32 [[INDVAR_OUTERMOST_NEXT]], 0 -; CHECK-NEXT: br i1 [[TOBOOL_I]], label [[OUTERMOST_EXIT:%.*]], label [[OUTERMOST_HEADER]] -; CHECK: outermost.exit: -; CHECK-NEXT: ret void ; - +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: test2 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. +; CHECK-NEXT: ... +; +define void @test2() { entry: br label %outermost.header diff --git a/llvm/test/Transforms/LoopInterchange/pr43797-lcssa-for-multiple-outer-loop-blocks.ll b/llvm/test/Transforms/LoopInterchange/pr43797-lcssa-for-multiple-outer-loop-blocks.ll index a0d0543075ffc..38970354c3d1c 100644 --- a/llvm/test/Transforms/LoopInterchange/pr43797-lcssa-for-multiple-outer-loop-blocks.ll +++ b/llvm/test/Transforms/LoopInterchange/pr43797-lcssa-for-multiple-outer-loop-blocks.ll @@ -1,48 +1,19 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -passes=loop-interchange -cache-line-size=64 -verify-loop-lcssa -S %s | FileCheck %s +; RUN: opt -passes=loop-interchange -cache-line-size=64 -verify-loop-lcssa %s -pass-remarks-output=%t -disable-output +; RUN: FileCheck -input-file %t %s ; Tests for PR43797. @wdtdr = external dso_local global [5 x [5 x double]], align 16 +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: test1 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. +; CHECK-NEXT: ... + define void @test1() { -; CHECK-LABEL: @test1( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[INNER_HEADER_PREHEADER:%.*]] -; CHECK: outer.header.preheader: -; CHECK-NEXT: br label [[OUTER_HEADER:%.*]] -; CHECK: outer.header: -; CHECK-NEXT: [[OUTER_IDX:%.*]] = phi i64 [ [[OUTER_IDX_INC:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, [[OUTER_HEADER_PREHEADER:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [5 x [5 x double]], ptr @wdtdr, i64 0, i64 0, i64 [[OUTER_IDX]] -; CHECK-NEXT: br label [[INNER_HEADER_SPLIT:%.*]] -; CHECK: inner.header.preheader: -; CHECK-NEXT: br label [[INNER_HEADER:%.*]] -; CHECK: inner.header: -; CHECK-NEXT: [[INNER_IDX:%.*]] = phi i64 [ [[TMP3:%.*]], [[INNER_LATCH_SPLIT:%.*]] ], [ 0, [[INNER_HEADER_PREHEADER]] ] -; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]] -; CHECK: inner.header.split: -; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX8]], align 8 -; CHECK-NEXT: store double undef, ptr [[ARRAYIDX8]], align 8 -; CHECK-NEXT: br label [[INNER_LATCH:%.*]] -; CHECK: inner.latch: -; CHECK-NEXT: [[INNER_IDX_INC:%.*]] = add nsw i64 [[INNER_IDX]], 1 -; CHECK-NEXT: br label [[INNER_EXIT:%.*]] -; CHECK: inner.latch.split: -; CHECK-NEXT: [[TMP1:%.*]] = phi i64 [ [[OUTER_V:%.*]], [[OUTER_LATCH]] ] -; CHECK-NEXT: [[TMP2:%.*]] = phi i64 [ [[OUTER_IDX_INC]], [[OUTER_LATCH]] ] -; CHECK-NEXT: [[TMP3]] = add nsw i64 [[INNER_IDX]], 1 -; CHECK-NEXT: br i1 false, label [[INNER_HEADER]], label [[OUTER_EXIT:%.*]] -; CHECK: inner.exit: -; CHECK-NEXT: [[OUTER_V]] = add nsw i64 [[OUTER_IDX]], 1 -; CHECK-NEXT: br label [[OUTER_LATCH]] -; CHECK: outer.latch: -; CHECK-NEXT: [[OUTER_IDX_INC]] = add nsw i64 [[OUTER_IDX]], 1 -; CHECK-NEXT: br i1 false, label [[OUTER_HEADER]], label [[INNER_LATCH_SPLIT]] -; CHECK: outer.exit: -; CHECK-NEXT: [[EXIT1_LCSSA:%.*]] = phi i64 [ [[TMP1]], [[INNER_LATCH_SPLIT]] ] -; CHECK-NEXT: [[EXIT2_LCSSA:%.*]] = phi i64 [ [[TMP2]], [[INNER_LATCH_SPLIT]] ] -; CHECK-NEXT: ret void -; entry: br label %outer.header @@ -75,48 +46,15 @@ outer.exit: ; preds = %for.inc27 ret void } +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: test2 +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. +; CHECK-NEXT: ... + define void @test2(i1 %cond) { -; CHECK-LABEL: @test2( -; CHECK-NEXT: entry: -; CHECK-NEXT: br i1 [[COND:%.*]], label [[INNER_HEADER_PREHEADER:%.*]], label [[OUTER_EXIT:%.*]] -; CHECK: outer.header.preheader: -; CHECK-NEXT: br label [[OUTER_HEADER:%.*]] -; CHECK: outer.header: -; CHECK-NEXT: [[OUTER_IDX:%.*]] = phi i64 [ [[OUTER_IDX_INC:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, [[OUTER_HEADER_PREHEADER:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [5 x [5 x double]], ptr @wdtdr, i64 0, i64 0, i64 [[OUTER_IDX]] -; CHECK-NEXT: br label [[INNER_HEADER_SPLIT:%.*]] -; CHECK: inner.header.preheader: -; CHECK-NEXT: br label [[INNER_HEADER:%.*]] -; CHECK: inner.header: -; CHECK-NEXT: [[INNER_IDX:%.*]] = phi i64 [ [[TMP3:%.*]], [[INNER_LATCH_SPLIT:%.*]] ], [ 0, [[INNER_HEADER_PREHEADER]] ] -; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]] -; CHECK: inner.header.split: -; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[ARRAYIDX8]], align 8 -; CHECK-NEXT: store double undef, ptr [[ARRAYIDX8]], align 8 -; CHECK-NEXT: br label [[INNER_LATCH:%.*]] -; CHECK: inner.latch: -; CHECK-NEXT: [[INNER_IDX_INC:%.*]] = add nsw i64 [[INNER_IDX]], 1 -; CHECK-NEXT: br label [[INNER_EXIT:%.*]] -; CHECK: inner.latch.split: -; CHECK-NEXT: [[TMP1:%.*]] = phi i64 [ [[OUTER_IDX_INC]], [[OUTER_LATCH]] ] -; CHECK-NEXT: [[TMP2:%.*]] = phi i64 [ [[OUTER_V:%.*]], [[OUTER_LATCH]] ] -; CHECK-NEXT: [[TMP3]] = add nsw i64 [[INNER_IDX]], 1 -; CHECK-NEXT: br i1 false, label [[INNER_HEADER]], label [[OUTER_EXIT_LOOPEXIT:%.*]] -; CHECK: inner.exit: -; CHECK-NEXT: [[OUTER_V]] = add nsw i64 [[OUTER_IDX]], 1 -; CHECK-NEXT: br label [[OUTER_LATCH]] -; CHECK: outer.latch: -; CHECK-NEXT: [[OUTER_IDX_INC]] = add nsw i64 [[OUTER_IDX]], 1 -; CHECK-NEXT: br i1 false, label [[OUTER_HEADER]], label [[INNER_LATCH_SPLIT]] -; CHECK: outer.exit.loopexit: -; CHECK-NEXT: [[OUTER_IDX_INC_LCSSA:%.*]] = phi i64 [ [[TMP1]], [[INNER_LATCH_SPLIT]] ] -; CHECK-NEXT: [[OUTER_V_LCSSA:%.*]] = phi i64 [ [[TMP2]], [[INNER_LATCH_SPLIT]] ] -; CHECK-NEXT: br label [[OUTER_EXIT]] -; CHECK: outer.exit: -; CHECK-NEXT: [[EXIT1_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[OUTER_V_LCSSA]], [[OUTER_EXIT_LOOPEXIT]] ] -; CHECK-NEXT: [[EXIT2_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OUTER_IDX_INC_LCSSA]], [[OUTER_EXIT_LOOPEXIT]] ] -; CHECK-NEXT: ret void -; entry: br i1 %cond, label %outer.header, label %outer.exit diff --git a/llvm/test/Transforms/LoopInterchange/vector-gep-operand.ll b/llvm/test/Transforms/LoopInterchange/vector-gep-operand.ll index 03e3b4b7408b5..022cdd44b7f50 100644 --- a/llvm/test/Transforms/LoopInterchange/vector-gep-operand.ll +++ b/llvm/test/Transforms/LoopInterchange/vector-gep-operand.ll @@ -1,43 +1,17 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -passes=loop-interchange -cache-line-size=64 -loop-interchange-threshold=-10 -S %s | FileCheck %s +; RUN: opt -passes=loop-interchange -cache-line-size=64 -loop-interchange-threshold=-10 %s -pass-remarks-output=%t -disable-output +; RUN: FileCheck -input-file %t %s ; The test contains a GEP with an operand that is not SCEV-able. Make sure ; loop-interchange does not crash. -define void @test(ptr noalias %src, ptr %dst) { -; CHECK-LABEL: @test( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[INNER_PREHEADER:%.*]] -; CHECK: outer.header.preheader: -; CHECK-NEXT: br label [[OUTER_HEADER:%.*]] -; CHECK: outer.header: -; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[OUTER_LATCH:%.*]] ], [ 0, [[OUTER_HEADER_PREHEADER:%.*]] ] -; CHECK-NEXT: br label [[INNER_SPLIT1:%.*]] -; CHECK: inner.preheader: -; CHECK-NEXT: br label [[INNER:%.*]] -; CHECK: inner: -; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[TMP0:%.*]], [[INNER_SPLIT:%.*]] ], [ 0, [[INNER_PREHEADER]] ] -; CHECK-NEXT: br label [[OUTER_HEADER_PREHEADER]] -; CHECK: inner.split1: -; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds [256 x float], ptr [[SRC:%.*]], <2 x i64> , i64 [[J]] -; CHECK-NEXT: [[SRC_0:%.*]] = extractelement <2 x ptr> [[SRC_GEP]], i32 0 -; CHECK-NEXT: [[LV_0:%.*]] = load float, ptr [[SRC_0]], align 4 -; CHECK-NEXT: [[ADD_0:%.*]] = fadd float [[LV_0]], 1.000000e+00 -; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 [[J]] -; CHECK-NEXT: store float [[ADD_0]], ptr [[DST_GEP]], align 4 -; CHECK-NEXT: [[J_NEXT:%.*]] = add nuw nsw i64 [[J]], 1 -; CHECK-NEXT: [[INNER_EXITCOND:%.*]] = icmp eq i64 [[J_NEXT]], 100 -; CHECK-NEXT: br label [[OUTER_LATCH]] -; CHECK: inner.split: -; CHECK-NEXT: [[TMP0]] = add nuw nsw i64 [[J]], 1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[TMP0]], 100 -; CHECK-NEXT: br i1 [[TMP1]], label [[EXIT:%.*]], label [[INNER]] -; CHECK: outer.latch: -; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i32 [[I]], 1 -; CHECK-NEXT: [[OUTER_EXITCOND:%.*]] = icmp eq i32 [[I_NEXT]], 100 -; CHECK-NEXT: br i1 [[OUTER_EXITCOND]], label [[INNER_SPLIT]], label [[OUTER_HEADER]] -; CHECK: exit: -; CHECK-NEXT: ret void ; +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: test +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. + +define void @test(ptr noalias %src, ptr %dst) { entry: br label %outer.header diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll b/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll index 0ff98d2abe776..6b55f5291efd8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll @@ -56,9 +56,59 @@ exit: %res = phi i64 [ %red.next, %loop ] ret i64 %res } + +define i32 @add_reduction_select_operand_constant_but_non_uniform() { +; CHECK-LABEL: define i32 @add_reduction_select_operand_constant_but_non_uniform() { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ splat (i32 42), %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ splat (i32 42), %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 +; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 64 +; CHECK-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> splat (i32 84)) +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 64, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ 42, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[ADD2_REASS:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[ADD2_REASS]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[RDX_NEXT]] = add i32 0, [[RDX]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD2_REASS]], 64 +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP1]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ 42, %entry ], [ %rdx.next, %loop ] + + %iv.next = add i32 %iv, 1 + %rdx.next = add i32 0, %rdx + + %cmp = icmp ult i32 %iv.next, 64 + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %rdx.next +} ;. ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll index 815e6bce52c0a..572511a5ffb92 100644 --- a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll +++ b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll @@ -1,11 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -p loop-vectorize -force-vector-width=2 -S %s | FileCheck %s +; RUN: opt -p loop-vectorize -force-vector-width=2 -use-dereferenceable-at-point-semantics=1 -S %s | FileCheck %s declare void @llvm.assume(i1) -define void @deref_assumption_in_header_constant_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_header_constant_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -104,9 +104,9 @@ exit: ret void } -define void @align_deref_assumption_in_header_constant_trip_count_loop_invariant_ptr(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @align_deref_assumption_in_header_constant_trip_count_loop_invariant_ptr(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @align_deref_assumption_in_header_constant_trip_count_loop_invariant_ptr( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4) ] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] @@ -181,9 +181,9 @@ exit: ret void } -define void @deref_assumption_too_small_in_header_constant_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_too_small_in_header_constant_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_too_small_in_header_constant_trip_count( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -282,9 +282,9 @@ exit: ret void } -define void @deref_assumption_in_header_constant_trip_count_align_1(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_header_constant_trip_count_align_1(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_align_1( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -383,9 +383,9 @@ exit: ret void } -define void @deref_assumption_in_header_constant_trip_count_align_via_arg_attribute(ptr noalias align 4 %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_header_constant_trip_count_align_via_arg_attribute(ptr noalias align 4 %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_align_via_arg_attribute( -; CHECK-SAME: ptr noalias align 4 [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias align 4 [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -484,9 +484,9 @@ exit: ret void } -define void @deref_assumption_in_header_constant_trip_count_align_not_known(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_header_constant_trip_count_align_not_known(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_align_not_known( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -585,9 +585,9 @@ exit: ret void } -define void @deref_assumption_in_then_constant_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_then_constant_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_then_constant_trip_count( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -682,9 +682,9 @@ exit: ret void } -define void @deref_assumption_in_latch_constant_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_latch_constant_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_latch_constant_trip_count( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -785,9 +785,9 @@ exit: ret void } -define void @deref_assumption_in_header_variable_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +define void @deref_assumption_in_header_variable_trip_count(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) nofree { ; CHECK-LABEL: define void @deref_assumption_in_header_variable_trip_count( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] @@ -890,9 +890,9 @@ exit: ret void } -define void @deref_assumption_in_preheader_constant_trip_count_align_1(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_preheader_constant_trip_count_align_1(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_preheader_constant_trip_count_align_1( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 4000) ] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] @@ -968,9 +968,9 @@ exit: ret void } -define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_1(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_1(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_1( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 3999) ] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] @@ -1063,9 +1063,9 @@ exit: ret void } -define void @align_and_deref_assumption_in_preheader_constant_trip_count_align_4(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @align_and_deref_assumption_in_preheader_constant_trip_count_align_4(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @align_and_deref_assumption_in_preheader_constant_trip_count_align_4( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] @@ -1142,9 +1142,9 @@ exit: } -define void @deref_assumption_in_preheader_constant_trip_count_align_4_known_via_argument_attr(ptr noalias align 4 %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_preheader_constant_trip_count_align_4_known_via_argument_attr(ptr noalias align 4 %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_preheader_constant_trip_count_align_4_known_via_argument_attr( -; CHECK-SAME: ptr noalias align 4 [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias align 4 [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 4000) ] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] @@ -1220,9 +1220,9 @@ exit: ret void } -define void @deref_assumption_in_preheader_constant_trip_count_align_4_not_known(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_in_preheader_constant_trip_count_align_4_not_known(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_in_preheader_constant_trip_count_align_4_not_known( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 4000) ] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] @@ -1315,9 +1315,9 @@ exit: ret void } -define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_4(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_4(ptr noalias %a, ptr noalias %b, ptr noalias %c) nofree { ; CHECK-LABEL: define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_4( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 3999) ] ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] @@ -1409,6 +1409,90 @@ loop.latch: exit: ret void } + +; %a may be freeed between the dereferenceable assumption and accesses. +; FIXME: It is not safe to use with -use-dereferenceable-at-point-semantics. +define void @may_free_align_deref_assumption_in_header_constant_trip_count_loop_invariant_ptr(ptr noalias %a, ptr noalias %b, ptr noalias %c) { +; CHECK-LABEL: define void @may_free_align_deref_assumption_in_header_constant_trip_count_loop_invariant_ptr( +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4) ] +; CHECK-NEXT: call void @may_free() +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP15]], i32 1 +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP11]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP9]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] +; CHECK: [[LOOP_HEADER]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 +; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 +; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] +; CHECK: [[LOOP_THEN]]: +; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] +; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP33:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4) ] + call void @may_free() + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +declare void @may_free() + ;. ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} @@ -1442,4 +1526,6 @@ exit: ; CHECK: [[LOOP29]] = distinct !{[[LOOP29]], [[META2]], [[META1]]} ; CHECK: [[LOOP30]] = distinct !{[[LOOP30]], [[META1]], [[META2]]} ; CHECK: [[LOOP31]] = distinct !{[[LOOP31]], [[META2]], [[META1]]} +; CHECK: [[LOOP32]] = distinct !{[[LOOP32]], [[META1]], [[META2]]} +; CHECK: [[LOOP33]] = distinct !{[[LOOP33]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/no_outside_user.ll b/llvm/test/Transforms/LoopVectorize/no_outside_user.ll index 713b8f8d97951..81912b078b3b7 100644 --- a/llvm/test/Transforms/LoopVectorize/no_outside_user.ll +++ b/llvm/test/Transforms/LoopVectorize/no_outside_user.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --prefix-filecheck-ir-name unnamed --version 5 ; RUN: opt -S -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 < %s 2>&1 | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128" @@ -23,11 +23,11 @@ define i32 @test1() { ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 4) ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[B_PROMOTED]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[_LR_PH_I1:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[B_PROMOTED]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], @@ -35,32 +35,32 @@ define i32 @test1() { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> splat (i32 1), <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> splat (i32 1), <2 x i32> zeroinitializer ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[PREDPHI]], i32 1 +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[PREDPHI]], i32 1 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[_LR_PH_I1]] -; CHECK: [[_LR_PH_I1]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[OFFSET_IDX]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[BB]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[BB]] ] ; CHECK-NEXT: br label %[[DOTLR_PH_I:.*]] ; CHECK: [[_LR_PH_I:.*:]] -; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[_LR_PH_I1]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP8]], 10 -; CHECK-NEXT: br i1 [[TMP2]], label %[[BB16]], label %[[BB10:.*]] -; CHECK: [[BB10]]: +; CHECK-NEXT: [[UNNAMEDTMP8:%.*]] = phi i32 [ [[UNNAMEDTMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[UNNAMEDTMP2:%.*]] = icmp sgt i32 [[UNNAMEDTMP8]], 10 +; CHECK-NEXT: br i1 [[UNNAMEDTMP2]], label %[[BB16]], label %[[UNNAMEDBB10:.*]] +; CHECK: [[UNNAMEDBB10]]: ; CHECK-NEXT: br label %[[BB16]] ; CHECK: [[BB16]]: -; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ 0, %[[BB10]] ], [ 1, %[[DOTLR_PH_I]] ] -; CHECK-NEXT: [[TMP18]] = add nsw i32 [[TMP8]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp slt i32 [[TMP18]], 4 -; CHECK-NEXT: br i1 [[TMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: [[UNNAMEDTMP17:%.*]] = phi i32 [ 0, %[[UNNAMEDBB10]] ], [ 1, %[[DOTLR_PH_I]] ] +; CHECK-NEXT: [[UNNAMEDTMP18]] = add nsw i32 [[UNNAMEDTMP8]], 1 +; CHECK-NEXT: [[UNNAMEDTMP19:%.*]] = icmp slt i32 [[UNNAMEDTMP18]], 4 +; CHECK-NEXT: br i1 [[UNNAMEDTMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[F1_EXIT_LOOPEXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[BB16]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[UNNAMEDTMP17]], %[[BB16]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTLCSSA]] ; bb: @@ -96,11 +96,11 @@ define i32 @test2() { ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 4) ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[B_PROMOTED]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[_LR_PH_I1:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[B_PROMOTED]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], @@ -108,32 +108,32 @@ define i32 @test2() { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> splat (i32 1), <2 x i32> [[VEC_IND]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> splat (i32 1), <2 x i32> [[VEC_IND]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[PREDPHI]], i32 1 +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[PREDPHI]], i32 1 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[_LR_PH_I1]] -; CHECK: [[_LR_PH_I1]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[OFFSET_IDX]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[BB]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[BB]] ] ; CHECK-NEXT: br label %[[DOTLR_PH_I:.*]] ; CHECK: [[_LR_PH_I:.*:]] -; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[_LR_PH_I1]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP8]], 10 -; CHECK-NEXT: br i1 [[TMP2]], label %[[BB16]], label %[[BB10:.*]] -; CHECK: [[BB10]]: +; CHECK-NEXT: [[UNNAMEDTMP8:%.*]] = phi i32 [ [[UNNAMEDTMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[UNNAMEDTMP2:%.*]] = icmp sgt i32 [[UNNAMEDTMP8]], 10 +; CHECK-NEXT: br i1 [[UNNAMEDTMP2]], label %[[BB16]], label %[[UNNAMEDBB10:.*]] +; CHECK: [[UNNAMEDBB10]]: ; CHECK-NEXT: br label %[[BB16]] ; CHECK: [[BB16]]: -; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ [[TMP8]], %[[BB10]] ], [ 1, %[[DOTLR_PH_I]] ] -; CHECK-NEXT: [[TMP18]] = add nsw i32 [[TMP8]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp slt i32 [[TMP18]], 4 -; CHECK-NEXT: br i1 [[TMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: [[UNNAMEDTMP17:%.*]] = phi i32 [ [[UNNAMEDTMP8]], %[[UNNAMEDBB10]] ], [ 1, %[[DOTLR_PH_I]] ] +; CHECK-NEXT: [[UNNAMEDTMP18]] = add nsw i32 [[UNNAMEDTMP8]], 1 +; CHECK-NEXT: [[UNNAMEDTMP19:%.*]] = icmp slt i32 [[UNNAMEDTMP18]], 4 +; CHECK-NEXT: br i1 [[UNNAMEDTMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[F1_EXIT_LOOPEXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[BB16]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[UNNAMEDTMP17]], %[[BB16]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTLCSSA]] ; bb: @@ -169,11 +169,11 @@ define i32 @test3(i32 %N) { ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 4) ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[B_PROMOTED]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[_LR_PH_I1:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[B_PROMOTED]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], @@ -183,39 +183,39 @@ define i32 @test3(i32 %N) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) -; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) -; CHECK-NEXT: [[TMP4:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP5:%.*]] = select <2 x i1> [[TMP3]], <2 x i1> [[TMP4]], <2 x i1> zeroinitializer -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> splat (i32 1), <2 x i32> zeroinitializer -; CHECK-NEXT: [[PREDPHI1:%.*]] = select <2 x i1> [[TMP5]], <2 x i32> splat (i32 2), <2 x i32> [[PREDPHI]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) +; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[TMP3]], splat (i1 true) +; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = select <2 x i1> [[TMP4]], <2 x i1> [[TMP5]], <2 x i1> zeroinitializer +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> splat (i32 1), <2 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI1:%.*]] = select <2 x i1> [[TMP6]], <2 x i32> splat (i32 2), <2 x i32> [[PREDPHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[PREDPHI1]], i32 1 +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i32> [[PREDPHI1]], i32 1 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[_LR_PH_I1]] -; CHECK: [[_LR_PH_I1]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[OFFSET_IDX]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[BB]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[BB]] ] ; CHECK-NEXT: br label %[[DOTLR_PH_I:.*]] ; CHECK: [[_LR_PH_I:.*:]] -; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[_LR_PH_I1]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP8]], 10 -; CHECK-NEXT: br i1 [[TMP2]], label %[[BB16]], label %[[BB10:.*]] -; CHECK: [[BB10]]: -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], [[N]] -; CHECK-NEXT: br i1 [[CMP]], label %[[BB12:.*]], label %[[BB16]] -; CHECK: [[BB12]]: +; CHECK-NEXT: [[UNNAMEDTMP8:%.*]] = phi i32 [ [[UNNAMEDTMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[UNNAMEDTMP2:%.*]] = icmp sgt i32 [[UNNAMEDTMP8]], 10 +; CHECK-NEXT: br i1 [[UNNAMEDTMP2]], label %[[BB16]], label %[[UNNAMEDBB10:.*]] +; CHECK: [[UNNAMEDBB10]]: +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[UNNAMEDTMP8]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[UNNAMEDBB12:.*]], label %[[BB16]] +; CHECK: [[UNNAMEDBB12]]: ; CHECK-NEXT: br label %[[BB16]] ; CHECK: [[BB16]]: -; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ 0, %[[BB10]] ], [ 1, %[[DOTLR_PH_I]] ], [ 2, %[[BB12]] ] -; CHECK-NEXT: [[TMP18]] = add nsw i32 [[TMP8]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp slt i32 [[TMP18]], 4 -; CHECK-NEXT: br i1 [[TMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: [[UNNAMEDTMP17:%.*]] = phi i32 [ 0, %[[UNNAMEDBB10]] ], [ 1, %[[DOTLR_PH_I]] ], [ 2, %[[UNNAMEDBB12]] ] +; CHECK-NEXT: [[UNNAMEDTMP18]] = add nsw i32 [[UNNAMEDTMP8]], 1 +; CHECK-NEXT: [[UNNAMEDTMP19:%.*]] = icmp slt i32 [[UNNAMEDTMP18]], 4 +; CHECK-NEXT: br i1 [[UNNAMEDTMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[F1_EXIT_LOOPEXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[BB16]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[UNNAMEDTMP17]], %[[BB16]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTLCSSA]] ; bb: @@ -258,11 +258,11 @@ define i32 @test4(i32 %N) { ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 4) ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[B_PROMOTED]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[_LR_PH_I1:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[B_PROMOTED]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], @@ -270,32 +270,32 @@ define i32 @test4(i32 %N) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> splat (i32 1), <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> splat (i32 1), <2 x i32> zeroinitializer ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[PREDPHI]], i32 1 +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[PREDPHI]], i32 1 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT_LOOPEXIT:.*]], label %[[_LR_PH_I1]] -; CHECK: [[_LR_PH_I1]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[OFFSET_IDX]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[DOTLR_PH_I_PREHEADER]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[DOTLR_PH_I_PREHEADER]] ] ; CHECK-NEXT: br label %[[DOTLR_PH_I:.*]] ; CHECK: [[_LR_PH_I:.*:]] -; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[_LR_PH_I1]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP8]], 10 -; CHECK-NEXT: br i1 [[TMP2]], label %[[BB16]], label %[[BB10:.*]] -; CHECK: [[BB10]]: +; CHECK-NEXT: [[UNNAMEDTMP8:%.*]] = phi i32 [ [[UNNAMEDTMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[UNNAMEDTMP2:%.*]] = icmp sgt i32 [[UNNAMEDTMP8]], 10 +; CHECK-NEXT: br i1 [[UNNAMEDTMP2]], label %[[BB16]], label %[[UNNAMEDBB10:.*]] +; CHECK: [[UNNAMEDBB10]]: ; CHECK-NEXT: br label %[[BB16]] ; CHECK: [[BB16]]: -; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ 0, %[[BB10]] ], [ 1, %[[DOTLR_PH_I]] ] -; CHECK-NEXT: [[TMP18]] = add nsw i32 [[TMP8]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp slt i32 [[TMP18]], 4 -; CHECK-NEXT: br i1 [[TMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT_LOOPEXIT]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: [[UNNAMEDTMP17:%.*]] = phi i32 [ 0, %[[UNNAMEDBB10]] ], [ 1, %[[DOTLR_PH_I]] ] +; CHECK-NEXT: [[UNNAMEDTMP18]] = add nsw i32 [[UNNAMEDTMP8]], 1 +; CHECK-NEXT: [[UNNAMEDTMP19:%.*]] = icmp slt i32 [[UNNAMEDTMP18]], 4 +; CHECK-NEXT: br i1 [[UNNAMEDTMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT_LOOPEXIT]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[F1_EXIT_LOOPEXIT_LOOPEXIT]]: -; CHECK-NEXT: [[TMP17_LCSSA:%.*]] = phi i32 [ [[TMP17]], %[[BB16]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[TMP17_LCSSA:%.*]] = phi i32 [ [[UNNAMEDTMP17]], %[[BB16]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label %[[F1_EXIT_LOOPEXIT]] ; CHECK: [[F1_EXIT_LOOPEXIT]]: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ 2, %[[BB]] ], [ [[TMP17_LCSSA]], %[[F1_EXIT_LOOPEXIT_LOOPEXIT]] ] @@ -343,12 +343,12 @@ define i32 @reduction_sum(i32 %n, ptr noalias nocapture %A, ptr noalias nocaptur ; CHECK-NEXT: [[C3:%.*]] = load i32, ptr [[C2]], align 4 ; CHECK-NEXT: [[C4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[C5:%.*]] = load i32, ptr [[C4]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[SUM_02]], 10 -; CHECK-NEXT: br i1 [[TMP2]], label %[[BB16]], label %[[BB10:.*]] -; CHECK: [[BB10]]: +; CHECK-NEXT: [[UNNAMEDTMP2:%.*]] = icmp sgt i32 [[SUM_02]], 10 +; CHECK-NEXT: br i1 [[UNNAMEDTMP2]], label %[[BB16]], label %[[UNNAMEDBB10:.*]] +; CHECK: [[UNNAMEDBB10]]: ; CHECK-NEXT: br label %[[BB16]] ; CHECK: [[BB16]]: -; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ [[SUM_02]], %[[BB10]] ], [ 1, %[[HEADER]] ] +; CHECK-NEXT: [[UNNAMEDTMP17:%.*]] = phi i32 [ [[SUM_02]], %[[UNNAMEDBB10]] ], [ 1, %[[HEADER]] ] ; CHECK-NEXT: [[C6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 ; CHECK-NEXT: [[C7:%.*]] = add i32 [[SUM_02]], [[C6]] ; CHECK-NEXT: [[C8:%.*]] = add i32 [[C7]], [[C3]] @@ -358,7 +358,7 @@ define i32 @reduction_sum(i32 %n, ptr noalias nocapture %A, ptr noalias nocaptur ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[DOT_CRIT_EDGE_LOOPEXIT:.*]], label %[[HEADER]] ; CHECK: [[__CRIT_EDGE_LOOPEXIT:.*:]] -; CHECK-NEXT: [[TMP17_LCSSA:%.*]] = phi i32 [ [[TMP17]], %[[BB16]] ] +; CHECK-NEXT: [[TMP17_LCSSA:%.*]] = phi i32 [ [[UNNAMEDTMP17]], %[[BB16]] ] ; CHECK-NEXT: [[C9_LCSSA:%.*]] = phi i32 [ [[C9]], %[[BB16]] ] ; CHECK-NEXT: br [[DOT_CRIT_EDGE]] ; CHECK: [[__CRIT_EDGE:.*:]] @@ -410,17 +410,17 @@ define i32 @cyclic_dep_with_indvar() { ; CHECK-NEXT: br label %[[DOTLR_PH_I:.*]] ; CHECK: [[_LR_PH_I:.*:]] ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IVNEXT:%.*]], %[[BB16:.*]] ], [ [[B_PROMOTED]], %[[BB]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[IV]], 10 -; CHECK-NEXT: br i1 [[TMP2]], label %[[BB16]], label %[[BB10:.*]] -; CHECK: [[BB10]]: +; CHECK-NEXT: [[UNNAMEDTMP2:%.*]] = icmp sgt i32 [[IV]], 10 +; CHECK-NEXT: br i1 [[UNNAMEDTMP2]], label %[[BB16]], label %[[UNNAMEDBB10:.*]] +; CHECK: [[UNNAMEDBB10]]: ; CHECK-NEXT: br label %[[BB16]] ; CHECK: [[BB16]]: -; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ 0, %[[BB10]] ], [ [[IV]], %[[DOTLR_PH_I]] ] -; CHECK-NEXT: [[IVNEXT]] = add nsw i32 [[TMP17]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp slt i32 [[IVNEXT]], 4 -; CHECK-NEXT: br i1 [[TMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT:.*]] +; CHECK-NEXT: [[UNNAMEDTMP17:%.*]] = phi i32 [ 0, %[[UNNAMEDBB10]] ], [ [[IV]], %[[DOTLR_PH_I]] ] +; CHECK-NEXT: [[IVNEXT]] = add nsw i32 [[UNNAMEDTMP17]], 1 +; CHECK-NEXT: [[UNNAMEDTMP19:%.*]] = icmp slt i32 [[IVNEXT]], 4 +; CHECK-NEXT: br i1 [[UNNAMEDTMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT:.*]] ; CHECK: [[F1_EXIT_LOOPEXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[BB16]] ] +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[UNNAMEDTMP17]], %[[BB16]] ] ; CHECK-NEXT: ret i32 [[DOTLCSSA]] ; bb: @@ -458,16 +458,16 @@ define i32 @not_valid_reduction(i32 %n, ptr noalias nocapture %A) nounwind uwtab ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[LATCH:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] -; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[TMP17:%.*]], %[[LATCH]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[UNNAMEDTMP17:%.*]], %[[LATCH]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[INDVARS_IV]], 10 -; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X_05]], [[TMP0]] -; CHECK-NEXT: br i1 [[TMP2]], label %[[BB16:.*]], label %[[BB10:.*]] -; CHECK: [[BB10]]: -; CHECK-NEXT: br label %[[BB16]] -; CHECK: [[BB16]]: -; CHECK-NEXT: [[TMP17]] = phi i32 [ 1, %[[BB10]] ], [ [[SUB]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[UNNAMEDTMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[UNNAMEDTMP2:%.*]] = icmp sgt i64 [[INDVARS_IV]], 10 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X_05]], [[UNNAMEDTMP0]] +; CHECK-NEXT: br i1 [[UNNAMEDTMP2]], label %[[UNNAMEDBB16:.*]], label %[[UNNAMEDBB10:.*]] +; CHECK: [[UNNAMEDBB10]]: +; CHECK-NEXT: br label %[[UNNAMEDBB16]] +; CHECK: [[UNNAMEDBB16]]: +; CHECK-NEXT: [[UNNAMEDTMP17]] = phi i32 [ 1, %[[UNNAMEDBB10]] ], [ [[SUB]], %[[FOR_BODY]] ] ; CHECK-NEXT: br label %[[LATCH]] ; CHECK: [[LATCH]]: ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 @@ -475,7 +475,7 @@ define i32 @not_valid_reduction(i32 %n, ptr noalias nocapture %A) nounwind uwtab ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT:.*]], label %[[FOR_BODY]] ; CHECK: [[FOR_END_LOOPEXIT]]: -; CHECK-NEXT: [[TMP17_LCSSA:%.*]] = phi i32 [ [[TMP17]], %[[LATCH]] ] +; CHECK-NEXT: [[TMP17_LCSSA:%.*]] = phi i32 [ [[UNNAMEDTMP17]], %[[LATCH]] ] ; CHECK-NEXT: br label %[[FOR_END]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP17_LCSSA]], %[[FOR_END_LOOPEXIT]] ] @@ -520,11 +520,11 @@ define i8 @outside_user_non_phi() { ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 4) ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[B_PROMOTED]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[_LR_PH_I1:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[B_PROMOTED]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], @@ -532,34 +532,34 @@ define i8 @outside_user_non_phi() { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> splat (i32 1), <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i32> [[PREDPHI]] to <2 x i8> +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <2 x i32> [[VEC_IND]], splat (i32 10) +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> splat (i32 1), <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = trunc <2 x i32> [[PREDPHI]] to <2 x i8> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[TMP3]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i8> [[TMP4]], i32 1 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[_LR_PH_I1]] -; CHECK: [[_LR_PH_I1]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[OFFSET_IDX]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[BB]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[BB]] ] ; CHECK-NEXT: br label %[[DOTLR_PH_I:.*]] ; CHECK: [[_LR_PH_I:.*:]] -; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[_LR_PH_I1]] ] -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP8]], 10 -; CHECK-NEXT: br i1 [[TMP2]], label %[[BB16]], label %[[BB10:.*]] -; CHECK: [[BB10]]: +; CHECK-NEXT: [[UNNAMEDTMP8:%.*]] = phi i32 [ [[UNNAMEDTMP18:%.*]], %[[BB16:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[UNNAMEDTMP2:%.*]] = icmp sgt i32 [[UNNAMEDTMP8]], 10 +; CHECK-NEXT: br i1 [[UNNAMEDTMP2]], label %[[BB16]], label %[[UNNAMEDBB10:.*]] +; CHECK: [[UNNAMEDBB10]]: ; CHECK-NEXT: br label %[[BB16]] ; CHECK: [[BB16]]: -; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ 0, %[[BB10]] ], [ 1, %[[DOTLR_PH_I]] ] -; CHECK-NEXT: [[TMP17_TRUNC:%.*]] = trunc i32 [[TMP17]] to i8 -; CHECK-NEXT: [[TMP18]] = add nsw i32 [[TMP8]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp slt i32 [[TMP18]], 4 -; CHECK-NEXT: br i1 [[TMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: [[UNNAMEDTMP17:%.*]] = phi i32 [ 0, %[[UNNAMEDBB10]] ], [ 1, %[[DOTLR_PH_I]] ] +; CHECK-NEXT: [[TMP17_TRUNC:%.*]] = trunc i32 [[UNNAMEDTMP17]] to i8 +; CHECK-NEXT: [[UNNAMEDTMP18]] = add nsw i32 [[UNNAMEDTMP8]], 1 +; CHECK-NEXT: [[UNNAMEDTMP19:%.*]] = icmp slt i32 [[UNNAMEDTMP18]], 4 +; CHECK-NEXT: br i1 [[UNNAMEDTMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[F1_EXIT_LOOPEXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i8 [ [[TMP17_TRUNC]], %[[BB16]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i8 [ [[TMP17_TRUNC]], %[[BB16]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i8 [[DOTLCSSA]] ; bb: @@ -651,46 +651,46 @@ define i32 @sum_arrays_outside_use(ptr %B, ptr %A, ptr %C, i32 %N) { ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N]], i32 [[TMP0]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[B_PROMOTED]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[_LR_PH_I:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[C1]], [[B2]] ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP2]], 8 ; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[C1]], [[A3]] ; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i32 [[TMP3]], 8 ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] -; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[_LR_PH_I]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[B_PROMOTED]], [[N_VEC]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX5:%.*]] = add i32 [[B_PROMOTED]], [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX5]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP7]], align 4 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x i32>, ptr [[TMP9]], align 4 -; CHECK-NEXT: [[TMP10:%.*]] = add nsw <2 x i32> [[WIDE_LOAD]], [[WIDE_LOAD6]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i32 0 -; CHECK-NEXT: store <2 x i32> [[TMP10]], ptr [[TMP12]], align 4 +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[B_PROMOTED]], [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX]], 0 +; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP8]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x i32>, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = add nsw <2 x i32> [[WIDE_LOAD]], [[WIDE_LOAD5]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 0 +; CHECK-NEXT: store <2 x i32> [[TMP11]], ptr [[TMP13]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i32> [[TMP10]], i32 1 +; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i32> [[TMP11]], i32 1 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[_LR_PH_I]] -; CHECK: [[_LR_PH_I]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[OFFSET_IDX]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[VECTOR_MEMCHECK]] ], [ [[B_PROMOTED]], %[[BB]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[F1_EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[B_PROMOTED]], %[[VECTOR_MEMCHECK]] ], [ [[B_PROMOTED]], %[[BB]] ] ; CHECK-NEXT: br label %[[DOTLR_PH_I:.*]] -; CHECK: [[_LR_PH_I1:.*:]] -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IVNEXT:%.*]], %[[DOTLR_PH_I]] ], [ [[BC_RESUME_VAL]], %[[_LR_PH_I]] ] +; CHECK: [[_LR_PH_I:.*:]] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IVNEXT:%.*]], %[[DOTLR_PH_I]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; CHECK-NEXT: [[INDVARS_IV:%.*]] = sext i32 [[IV]] to i64 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[BLOAD:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 @@ -700,10 +700,10 @@ define i32 @sum_arrays_outside_use(ptr %B, ptr %A, ptr %C, i32 %N) { ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store i32 [[SUM]], ptr [[ARRAYIDX3]], align 4 ; CHECK-NEXT: [[IVNEXT]] = add nsw i32 [[IV]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp slt i32 [[IVNEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: [[UNNAMEDTMP19:%.*]] = icmp slt i32 [[IVNEXT]], [[N]] +; CHECK-NEXT: br i1 [[UNNAMEDTMP19]], label %[[DOTLR_PH_I]], label %[[F1_EXIT_LOOPEXIT]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[F1_EXIT_LOOPEXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[SUM]], %[[DOTLR_PH_I]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[SUM]], %[[DOTLR_PH_I]] ], [ [[TMP15]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[DOTLCSSA]] ; bb: diff --git a/llvm/test/Transforms/MemCpyOpt/store-to-memset-is-nonzero-type.ll b/llvm/test/Transforms/MemCpyOpt/store-to-memset-is-nonzero-type.ll index 0455d65fe7521..6b53138342ebf 100644 --- a/llvm/test/Transforms/MemCpyOpt/store-to-memset-is-nonzero-type.ll +++ b/llvm/test/Transforms/MemCpyOpt/store-to-memset-is-nonzero-type.ll @@ -5,7 +5,7 @@ define void @array_zero(ptr %p) { ; CHECK-LABEL: @array_zero( -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P:%.*]], i8 undef, i64 0, i1 false) +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P:%.*]], i8 poison, i64 0, i1 false) ; CHECK-NEXT: ret void ; store [0 x i8] zeroinitializer, ptr %p @@ -25,7 +25,7 @@ define void @array_nonzero(ptr %p) { define void @struct_zero(ptr %p) { ; CHECK-LABEL: @struct_zero( -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P:%.*]], i8 undef, i64 0, i1 false) +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P:%.*]], i8 poison, i64 0, i1 false) ; CHECK-NEXT: ret void ; store { } zeroinitializer, ptr %p diff --git a/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll b/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll index d34c8f88e4b3c..7bc6e5ac3d760 100644 --- a/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll +++ b/llvm/test/Transforms/SandboxVectorizer/bottomup_basic.ll @@ -201,3 +201,23 @@ define void @pack_vectors(ptr %ptr, ptr %ptr2) { store float %ld1, ptr %ptr1 ret void } + +define void @diamond(ptr %ptr) { +; CHECK-LABEL: define void @diamond( +; CHECK-SAME: ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[PTR0:%.*]] = getelementptr float, ptr [[PTR]], i32 0 +; CHECK-NEXT: [[VECL:%.*]] = load <2 x float>, ptr [[PTR0]], align 4 +; CHECK-NEXT: [[VEC:%.*]] = fsub <2 x float> [[VECL]], [[VECL]] +; CHECK-NEXT: store <2 x float> [[VEC]], ptr [[PTR0]], align 4 +; CHECK-NEXT: ret void +; + %ptr0 = getelementptr float, ptr %ptr, i32 0 + %ptr1 = getelementptr float, ptr %ptr, i32 1 + %ld0 = load float, ptr %ptr0 + %ld1 = load float, ptr %ptr1 + %sub0 = fsub float %ld0, %ld0 + %sub1 = fsub float %ld1, %ld1 + store float %sub0, ptr %ptr0 + store float %sub1, ptr %ptr1 + ret void +} diff --git a/llvm/tools/dxil-dis/CMakeLists.txt b/llvm/tools/dxil-dis/CMakeLists.txt index d0541fcf802e9..1e77530106420 100644 --- a/llvm/tools/dxil-dis/CMakeLists.txt +++ b/llvm/tools/dxil-dis/CMakeLists.txt @@ -38,7 +38,7 @@ ExternalProject_Add(DXC ${GIT_SETTINGS} SOURCE_DIR ${SOURCE_DIR} BINARY_DIR ${BINARY_DIR} - CMAKE_ARGS -C ${SOURCE_DIR}/cmake/caches/PredefinedParams.cmake -DLLVM_INCLUDE_TESTS=On + CMAKE_ARGS -C ${SOURCE_DIR}/cmake/caches/PredefinedParams.cmake -DLLVM_INCLUDE_TESTS=Off -DCLANG_INCLUDE_TESTS=Off -DHLSL_INCLUDE_TESTS=Off BUILD_COMMAND ${CMAKE_COMMAND} --build ${BINARY_DIR} --target llvm-dis BUILD_BYPRODUCTS ${BINARY_DIR}/bin/llvm-dis INSTALL_COMMAND "" diff --git a/llvm/tools/llvm-exegesis/lib/AArch64/Target.cpp b/llvm/tools/llvm-exegesis/lib/AArch64/Target.cpp index 51846862f0a73..5a7cc6f5e30d3 100644 --- a/llvm/tools/llvm-exegesis/lib/AArch64/Target.cpp +++ b/llvm/tools/llvm-exegesis/lib/AArch64/Target.cpp @@ -26,7 +26,7 @@ static unsigned getLoadImmediateOpcode(unsigned RegBitWidth) { } // Generates instruction to load an immediate value into a register. -static MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth, +static MCInst loadImmediate(MCRegister Reg, unsigned RegBitWidth, const APInt &Value) { if (Value.getBitWidth() > RegBitWidth) llvm_unreachable("Value must fit in the Register"); @@ -45,7 +45,7 @@ class ExegesisAArch64Target : public ExegesisTarget { : ExegesisTarget(AArch64CpuPfmCounters, AArch64_MC::isOpcodeAvailable) {} private: - std::vector setRegTo(const MCSubtargetInfo &STI, unsigned Reg, + std::vector setRegTo(const MCSubtargetInfo &STI, MCRegister Reg, const APInt &Value) const override { if (AArch64::GPR32RegClass.contains(Reg)) return {loadImmediate(Reg, 32, Value)}; diff --git a/llvm/tools/llvm-exegesis/lib/Assembler.cpp b/llvm/tools/llvm-exegesis/lib/Assembler.cpp index 13c8c2048a5c0..7a53b626c177c 100644 --- a/llvm/tools/llvm-exegesis/lib/Assembler.cpp +++ b/llvm/tools/llvm-exegesis/lib/Assembler.cpp @@ -81,7 +81,7 @@ static bool generateSnippetSetupCode(const ExegesisTarget &ET, // If we're generating memory instructions, don't load in the value for // the register with the stack pointer as it will be used later to finish // the setup. - if (RV.Register == StackPointerRegister) + if (Register(RV.Register) == StackPointerRegister) continue; } // Load a constant in the register. @@ -98,7 +98,7 @@ static bool generateSnippetSetupCode(const ExegesisTarget &ET, // Load in the stack register now as we're done using it elsewhere // and need to set the value in preparation for executing the // snippet. - if (RV.Register != StackPointerRegister) + if (Register(RV.Register) != StackPointerRegister) continue; const auto SetRegisterCode = ET.setRegTo(*MSI, RV.Register, RV.Value); if (SetRegisterCode.empty()) @@ -208,7 +208,7 @@ void BasicBlockFiller::addReturn(const ExegesisTarget &ET, } FunctionFiller::FunctionFiller(MachineFunction &MF, - std::vector RegistersSetUp) + std::vector RegistersSetUp) : MF(MF), MCII(MF.getTarget().getMCInstrInfo()), Entry(addBasicBlock()), RegistersSetUp(std::move(RegistersSetUp)) {} @@ -218,7 +218,7 @@ BasicBlockFiller FunctionFiller::addBasicBlock() { return BasicBlockFiller(MF, MBB, MCII); } -ArrayRef FunctionFiller::getRegistersSetUp() const { +ArrayRef FunctionFiller::getRegistersSetUp() const { return RegistersSetUp; } @@ -241,7 +241,7 @@ BitVector getFunctionReservedRegs(const TargetMachine &TM) { Error assembleToStream(const ExegesisTarget &ET, std::unique_ptr TM, - ArrayRef LiveIns, const FillFunction &Fill, + ArrayRef LiveIns, const FillFunction &Fill, raw_pwrite_stream &AsmStream, const BenchmarkKey &Key, bool GenerateMemoryInstructions) { auto Context = std::make_unique(); @@ -259,19 +259,19 @@ Error assembleToStream(const ExegesisTarget &ET, Properties.reset(MachineFunctionProperties::Property::IsSSA); Properties.set(MachineFunctionProperties::Property::NoPHIs); - for (const unsigned Reg : LiveIns) + for (const MCRegister Reg : LiveIns) MF.getRegInfo().addLiveIn(Reg); if (GenerateMemoryInstructions) { - for (const unsigned Reg : ET.getArgumentRegisters()) + for (const MCRegister Reg : ET.getArgumentRegisters()) MF.getRegInfo().addLiveIn(Reg); // Add a live in for registers that need saving so that the machine verifier // doesn't fail if the register is never defined. - for (const unsigned Reg : ET.getRegistersNeedSaving()) + for (const MCRegister Reg : ET.getRegistersNeedSaving()) MF.getRegInfo().addLiveIn(Reg); } - std::vector RegistersSetUp; + std::vector RegistersSetUp; RegistersSetUp.reserve(Key.RegisterInitialValues.size()); for (const auto &InitValue : Key.RegisterInitialValues) { RegistersSetUp.push_back(InitValue.Register); @@ -279,15 +279,15 @@ Error assembleToStream(const ExegesisTarget &ET, FunctionFiller Sink(MF, std::move(RegistersSetUp)); auto Entry = Sink.getEntry(); - for (const unsigned Reg : LiveIns) + for (const MCRegister Reg : LiveIns) Entry.MBB->addLiveIn(Reg); if (GenerateMemoryInstructions) { - for (const unsigned Reg : ET.getArgumentRegisters()) + for (const MCRegister Reg : ET.getArgumentRegisters()) Entry.MBB->addLiveIn(Reg); // Add a live in for registers that need saving so that the machine verifier // doesn't fail if the register is never defined. - for (const unsigned Reg : ET.getRegistersNeedSaving()) + for (const MCRegister Reg : ET.getRegistersNeedSaving()) Entry.MBB->addLiveIn(Reg); } diff --git a/llvm/tools/llvm-exegesis/lib/Assembler.h b/llvm/tools/llvm-exegesis/lib/Assembler.h index 4d241e0281b5a..1c8854c21b9a7 100644 --- a/llvm/tools/llvm-exegesis/lib/Assembler.h +++ b/llvm/tools/llvm-exegesis/lib/Assembler.h @@ -61,7 +61,7 @@ class BasicBlockFiller { // Helper to fill in a function. class FunctionFiller { public: - FunctionFiller(MachineFunction &MF, std::vector RegistersSetUp); + FunctionFiller(MachineFunction &MF, std::vector RegistersSetUp); // Adds a basic block to the function. BasicBlockFiller addBasicBlock(); @@ -73,12 +73,12 @@ class FunctionFiller { const MCInstrInfo *const MCII; // Returns the set of registers in the snippet setup code. - ArrayRef getRegistersSetUp() const; + ArrayRef getRegistersSetUp() const; private: BasicBlockFiller Entry; // The set of registers that are set up in the basic block. - std::vector RegistersSetUp; + std::vector RegistersSetUp; }; // A callback that fills a function. @@ -90,7 +90,7 @@ using FillFunction = std::function; // AsmStream, the temporary function is eventually discarded. Error assembleToStream(const ExegesisTarget &ET, std::unique_ptr TM, - ArrayRef LiveIns, const FillFunction &Fill, + ArrayRef LiveIns, const FillFunction &Fill, raw_pwrite_stream &AsmStreamm, const BenchmarkKey &Key, bool GenerateMemoryInstructions); diff --git a/llvm/tools/llvm-exegesis/lib/BenchmarkCode.h b/llvm/tools/llvm-exegesis/lib/BenchmarkCode.h index 1db8472e99f7c..5e3c10decf723 100644 --- a/llvm/tools/llvm-exegesis/lib/BenchmarkCode.h +++ b/llvm/tools/llvm-exegesis/lib/BenchmarkCode.h @@ -23,7 +23,7 @@ struct BenchmarkCode { // We also need to provide the registers that are live on entry for the // assembler to generate proper prologue/epilogue. - std::vector LiveIns; + std::vector LiveIns; // Informations about how this configuration was built. std::string Info; diff --git a/llvm/tools/llvm-exegesis/lib/BenchmarkResult.cpp b/llvm/tools/llvm-exegesis/lib/BenchmarkResult.cpp index 84dc23b343c6c..1823a534a301a 100644 --- a/llvm/tools/llvm-exegesis/lib/BenchmarkResult.cpp +++ b/llvm/tools/llvm-exegesis/lib/BenchmarkResult.cpp @@ -65,17 +65,17 @@ struct YamlContext { raw_string_ostream &getErrorStream() { return ErrorStream; } - StringRef getRegName(unsigned RegNo) { - // Special case: RegNo 0 is NoRegister. We have to deal with it explicitly. - if (RegNo == 0) + StringRef getRegName(MCRegister Reg) { + // Special case: Reg may be invalid. We have to deal with it explicitly. + if (!Reg.isValid()) return kNoRegister; - const StringRef RegName = State->getRegInfo().getName(RegNo); + const StringRef RegName = State->getRegInfo().getName(Reg); if (RegName.empty()) - ErrorStream << "No register with enum value '" << RegNo << "'\n"; + ErrorStream << "No register with enum value '" << Reg.id() << "'\n"; return RegName; } - std::optional getRegNo(StringRef RegName) { + std::optional getRegNo(StringRef RegName) { std::optional RegisterNumber = State->getRegisterNumberFromName(RegName); if (!RegisterNumber.has_value()) @@ -261,7 +261,7 @@ template <> struct ScalarTraits { String.split(Pieces, "=0x", /* MaxSplit */ -1, /* KeepEmpty */ false); YamlContext &Context = getTypedContext(Ctx); - std::optional RegNo; + std::optional RegNo; if (Pieces.size() == 2 && (RegNo = Context.getRegNo(Pieces[0]))) { RV.Register = *RegNo; const unsigned BitsNeeded = APInt::getBitsNeeded(Pieces[1], kRadix); diff --git a/llvm/tools/llvm-exegesis/lib/BenchmarkResult.h b/llvm/tools/llvm-exegesis/lib/BenchmarkResult.h index 3c09a8380146e..7984c8805cadc 100644 --- a/llvm/tools/llvm-exegesis/lib/BenchmarkResult.h +++ b/llvm/tools/llvm-exegesis/lib/BenchmarkResult.h @@ -75,7 +75,7 @@ struct BenchmarkKey { // being used supports it. uintptr_t SnippetAddress = 0; // The register that should be used to hold the loop counter. - unsigned LoopRegister; + MCRegister LoopRegister; }; struct BenchmarkMeasure { diff --git a/llvm/tools/llvm-exegesis/lib/CodeTemplate.h b/llvm/tools/llvm-exegesis/lib/CodeTemplate.h index 7aca224302a1f..a65015b45b786 100644 --- a/llvm/tools/llvm-exegesis/lib/CodeTemplate.h +++ b/llvm/tools/llvm-exegesis/lib/CodeTemplate.h @@ -131,7 +131,7 @@ struct CodeTemplate { std::vector Instructions; // If the template uses the provided scratch memory, the register in which // the pointer to this memory is passed in to the function. - unsigned ScratchSpacePointerInReg = 0; + MCRegister ScratchSpacePointerInReg; #if defined(__GNUC__) && (defined(__clang__) || LLVM_GNUC_PREREQ(8, 0, 0)) // FIXME: GCC7 bug workaround. Drop #if after GCC7 no longer supported. diff --git a/llvm/tools/llvm-exegesis/lib/LlvmState.cpp b/llvm/tools/llvm-exegesis/lib/LlvmState.cpp index 4c44c59286ccf..00d0d2cfd1cd3 100644 --- a/llvm/tools/llvm-exegesis/lib/LlvmState.cpp +++ b/llvm/tools/llvm-exegesis/lib/LlvmState.cpp @@ -83,7 +83,7 @@ LLVMState::LLVMState(std::unique_ptr TM, OpcodeNameToOpcodeIdxMapping(createOpcodeNameToOpcodeIdxMapping()), RegNameToRegNoMapping(createRegNameToRegNoMapping()) { BitVector ReservedRegs = getFunctionReservedRegs(getTargetMachine()); - for (const unsigned Reg : TheExegesisTarget->getUnavailableRegisters()) + for (const MCPhysReg Reg : TheExegesisTarget->getUnavailableRegisters()) ReservedRegs.set(Reg); RATC.reset( new RegisterAliasingTrackerCache(getRegInfo(), std::move(ReservedRegs))); diff --git a/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp b/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp index c9225e51213e5..c002f68b427f7 100644 --- a/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp +++ b/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp @@ -38,7 +38,7 @@ bool Operand::isExplicit() const { return Info; } bool Operand::isImplicit() const { return !Info; } -bool Operand::isImplicitReg() const { return ImplicitReg; } +bool Operand::isImplicitReg() const { return ImplicitReg.isValid(); } bool Operand::isDef() const { return IsDef; } @@ -64,7 +64,7 @@ unsigned Operand::getTiedToIndex() const { return *TiedToIndex; } unsigned Operand::getVariableIndex() const { return *VariableIndex; } -unsigned Operand::getImplicitReg() const { +MCRegister Operand::getImplicitReg() const { assert(ImplicitReg); return ImplicitReg; } diff --git a/llvm/tools/llvm-exegesis/lib/MCInstrDescView.h b/llvm/tools/llvm-exegesis/lib/MCInstrDescView.h index d7712e21c32c1..c1af10fa460a3 100644 --- a/llvm/tools/llvm-exegesis/lib/MCInstrDescView.h +++ b/llvm/tools/llvm-exegesis/lib/MCInstrDescView.h @@ -75,7 +75,7 @@ struct Operand { unsigned getIndex() const; unsigned getTiedToIndex() const; unsigned getVariableIndex() const; - unsigned getImplicitReg() const; + MCRegister getImplicitReg() const; const RegisterAliasingTracker &getRegisterAliasing() const; const MCOperandInfo &getExplicitOperandInfo() const; @@ -85,7 +85,7 @@ struct Operand { const RegisterAliasingTracker *Tracker = nullptr; // Set for Register Op. const MCOperandInfo *Info = nullptr; // Set for Explicit Op. std::optional TiedToIndex; // Set for Reg&Explicit Op. - MCPhysReg ImplicitReg = 0; // Non-0 for Implicit Op. + MCRegister ImplicitReg; // Non-0 for Implicit Op. std::optional VariableIndex; // Set for Explicit Op. }; diff --git a/llvm/tools/llvm-exegesis/lib/Mips/Target.cpp b/llvm/tools/llvm-exegesis/lib/Mips/Target.cpp index 731e037c240df..f9666d98e1e81 100644 --- a/llvm/tools/llvm-exegesis/lib/Mips/Target.cpp +++ b/llvm/tools/llvm-exegesis/lib/Mips/Target.cpp @@ -58,12 +58,12 @@ class ExegesisMipsTarget : public ExegesisTarget { : ExegesisTarget(MipsCpuPfmCounters, Mips_MC::isOpcodeAvailable) {} private: - unsigned getScratchMemoryRegister(const Triple &TT) const override; + MCRegister getScratchMemoryRegister(const Triple &TT) const override; unsigned getMaxMemoryAccessSize() const override { return 64; } - void fillMemoryOperands(InstructionTemplate &IT, unsigned Reg, + void fillMemoryOperands(InstructionTemplate &IT, MCRegister Reg, unsigned Offset) const override; - std::vector setRegTo(const MCSubtargetInfo &STI, unsigned Reg, + std::vector setRegTo(const MCSubtargetInfo &STI, MCRegister Reg, const APInt &Value) const override; bool matchesArch(Triple::ArchType Arch) const override { return Arch == Triple::mips || Arch == Triple::mipsel || @@ -73,7 +73,7 @@ class ExegesisMipsTarget : public ExegesisTarget { } // end anonymous namespace // Generates instructions to load an immediate value into a register. -static std::vector loadImmediate(unsigned Reg, bool IsGPR32, +static std::vector loadImmediate(MCRegister Reg, bool IsGPR32, const APInt &Value) { unsigned ZeroReg; unsigned ORi, LUi, SLL; @@ -134,12 +134,13 @@ static std::vector loadImmediate(unsigned Reg, bool IsGPR32, llvm_unreachable("Not implemented for values wider than 32 bits"); } -unsigned ExegesisMipsTarget::getScratchMemoryRegister(const Triple &TT) const { +MCRegister +ExegesisMipsTarget::getScratchMemoryRegister(const Triple &TT) const { return TT.isArch64Bit() ? Mips::A0_64 : Mips::A0; } void ExegesisMipsTarget::fillMemoryOperands(InstructionTemplate &IT, - unsigned Reg, + MCRegister Reg, unsigned Offset) const { assert(!isInvalidMemoryInstr(IT.getInstr()) && "fillMemoryOperands requires a valid memory instruction"); @@ -149,7 +150,7 @@ void ExegesisMipsTarget::fillMemoryOperands(InstructionTemplate &IT, } std::vector ExegesisMipsTarget::setRegTo(const MCSubtargetInfo &STI, - unsigned Reg, + MCRegister Reg, const APInt &Value) const { if (Mips::GPR32RegClass.contains(Reg)) return loadImmediate(Reg, true, Value); diff --git a/llvm/tools/llvm-exegesis/lib/ParallelSnippetGenerator.cpp b/llvm/tools/llvm-exegesis/lib/ParallelSnippetGenerator.cpp index 114e274845e53..03506a2dd757c 100644 --- a/llvm/tools/llvm-exegesis/lib/ParallelSnippetGenerator.cpp +++ b/llvm/tools/llvm-exegesis/lib/ParallelSnippetGenerator.cpp @@ -90,9 +90,9 @@ static bool hasVariablesWithTiedOperands(const Instruction &Instr) { ParallelSnippetGenerator::~ParallelSnippetGenerator() = default; void ParallelSnippetGenerator::instantiateMemoryOperands( - const unsigned ScratchSpacePointerInReg, + const MCRegister ScratchSpacePointerInReg, std::vector &Instructions) const { - if (ScratchSpacePointerInReg == 0) + if (!ScratchSpacePointerInReg) return; // no memory operands. const auto &ET = State.getExegesisTarget(); const unsigned MemStep = ET.getMaxMemoryAccessSize(); @@ -261,10 +261,10 @@ generateSnippetForInstrAvoidingDefUseOverlap( if (Op.isReg() && Op.isImplicit() && !Op.isMemory()) { assert(Op.isImplicitReg() && "Not an implicit register operand?"); if (Op.isUse()) - ImplicitUses.set(Op.getImplicitReg()); + ImplicitUses.set(Op.getImplicitReg().id()); else { assert(Op.isDef() && "Not a use and not a def?"); - ImplicitDefs.set(Op.getImplicitReg()); + ImplicitDefs.set(Op.getImplicitReg().id()); } } } @@ -300,7 +300,7 @@ ParallelSnippetGenerator::generateCodeTemplates( Instr.hasMemoryOperands() ? State.getExegesisTarget().getScratchMemoryRegister( State.getTargetMachine().getTargetTriple()) - : 0; + : MCRegister(); const AliasingConfigurations SelfAliasing(Instr, Instr, ForbiddenRegisters); if (SelfAliasing.empty()) { CT.Info = "instruction is parallel, repeating a random one."; diff --git a/llvm/tools/llvm-exegesis/lib/ParallelSnippetGenerator.h b/llvm/tools/llvm-exegesis/lib/ParallelSnippetGenerator.h index 94eb4e26eb588..8a6b8569c5d4c 100644 --- a/llvm/tools/llvm-exegesis/lib/ParallelSnippetGenerator.h +++ b/llvm/tools/llvm-exegesis/lib/ParallelSnippetGenerator.h @@ -55,7 +55,7 @@ class ParallelSnippetGenerator : public SnippetGenerator { // add eax, [rdi + 192] // mov eax, [rdi + 256] void instantiateMemoryOperands( - unsigned ScratchSpaceReg, + MCRegister ScratchSpaceReg, std::vector &SnippetTemplate) const; }; diff --git a/llvm/tools/llvm-exegesis/lib/PowerPC/Target.cpp b/llvm/tools/llvm-exegesis/lib/PowerPC/Target.cpp index 5c944c90384e3..0e576fa593fb4 100644 --- a/llvm/tools/llvm-exegesis/lib/PowerPC/Target.cpp +++ b/llvm/tools/llvm-exegesis/lib/PowerPC/Target.cpp @@ -33,13 +33,13 @@ class ExegesisPowerPCTarget : public ExegesisTarget { : ExegesisTarget(PPCCpuPfmCounters, PPC_MC::isOpcodeAvailable) {} private: - std::vector setRegTo(const MCSubtargetInfo &STI, unsigned Reg, + std::vector setRegTo(const MCSubtargetInfo &STI, MCRegister Reg, const APInt &Value) const override; bool matchesArch(Triple::ArchType Arch) const override { return Arch == Triple::ppc64le; } - unsigned getScratchMemoryRegister(const Triple &) const override; - void fillMemoryOperands(InstructionTemplate &IT, unsigned Reg, + MCRegister getScratchMemoryRegister(const Triple &) const override; + void fillMemoryOperands(InstructionTemplate &IT, MCRegister Reg, unsigned Offset) const override; }; } // end anonymous namespace @@ -55,7 +55,7 @@ static unsigned getLoadImmediateOpcode(unsigned RegBitWidth) { } // Generates instruction to load an immediate value into a register. -static MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth, +static MCInst loadImmediate(MCRegister Reg, unsigned RegBitWidth, const APInt &Value) { if (Value.getBitWidth() > RegBitWidth) llvm_unreachable("Value must fit in the Register"); @@ -67,7 +67,7 @@ static MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth, .addImm(Value.getZExtValue()); } -unsigned +MCRegister ExegesisPowerPCTarget::getScratchMemoryRegister(const Triple &TT) const { // R13 is reserved as Thread Pointer, we won't use threading in benchmark, so // use it as scratch memory register @@ -75,7 +75,7 @@ ExegesisPowerPCTarget::getScratchMemoryRegister(const Triple &TT) const { } void ExegesisPowerPCTarget::fillMemoryOperands(InstructionTemplate &IT, - unsigned Reg, + MCRegister Reg, unsigned Offset) const { int MemOpIdx = 0; if (IT.getInstr().hasTiedRegisters()) @@ -93,7 +93,7 @@ void ExegesisPowerPCTarget::fillMemoryOperands(InstructionTemplate &IT, } std::vector ExegesisPowerPCTarget::setRegTo(const MCSubtargetInfo &STI, - unsigned Reg, + MCRegister Reg, const APInt &Value) const { // X11 is optional use in function linkage, should be the least used one // Use it as scratch reg to load immediate. diff --git a/llvm/tools/llvm-exegesis/lib/RISCV/Target.cpp b/llvm/tools/llvm-exegesis/lib/RISCV/Target.cpp index 217b423d7b3f3..d70f609c5e080 100644 --- a/llvm/tools/llvm-exegesis/lib/RISCV/Target.cpp +++ b/llvm/tools/llvm-exegesis/lib/RISCV/Target.cpp @@ -29,8 +29,8 @@ namespace exegesis { namespace { // Stores constant value to a general-purpose (integer) register. -static std::vector loadIntReg(const MCSubtargetInfo &STI, unsigned Reg, - const APInt &Value) { +static std::vector loadIntReg(const MCSubtargetInfo &STI, + MCRegister Reg, const APInt &Value) { SmallVector MCInstSeq; MCRegister DestReg = Reg; @@ -40,11 +40,11 @@ static std::vector loadIntReg(const MCSubtargetInfo &STI, unsigned Reg, return MatIntInstrs; } -const unsigned ScratchIntReg = RISCV::X30; // t5 +const MCPhysReg ScratchIntReg = RISCV::X30; // t5 // Stores constant bits to a floating-point register. static std::vector loadFPRegBits(const MCSubtargetInfo &STI, - unsigned Reg, const APInt &Bits, + MCRegister Reg, const APInt &Bits, unsigned FmvOpcode) { std::vector Instrs = loadIntReg(STI, ScratchIntReg, Bits); Instrs.push_back(MCInstBuilder(FmvOpcode).addReg(Reg).addReg(ScratchIntReg)); @@ -57,7 +57,8 @@ static std::vector loadFPRegBits(const MCSubtargetInfo &STI, // and then do FCVT this is only reliable thing in 32-bit mode, otherwise we // need to use __floatsidf static std::vector loadFP64RegBits32(const MCSubtargetInfo &STI, - unsigned Reg, const APInt &Bits) { + MCRegister Reg, + const APInt &Bits) { double D = Bits.bitsToDouble(); double IPart; double FPart = std::modf(D, &IPart); @@ -82,7 +83,7 @@ static MCInst nop() { .addImm(0); } -static bool isVectorRegList(unsigned Reg) { +static bool isVectorRegList(MCRegister Reg) { return RISCV::VRM2RegClass.contains(Reg) || RISCV::VRM4RegClass.contains(Reg) || RISCV::VRM8RegClass.contains(Reg) || @@ -105,22 +106,22 @@ class ExegesisRISCVTarget : public ExegesisTarget { bool matchesArch(Triple::ArchType Arch) const override; - std::vector setRegTo(const MCSubtargetInfo &STI, unsigned Reg, + std::vector setRegTo(const MCSubtargetInfo &STI, MCRegister Reg, const APInt &Value) const override; - unsigned getDefaultLoopCounterRegister(const Triple &) const override; + MCRegister getDefaultLoopCounterRegister(const Triple &) const override; void decrementLoopCounterAndJump(MachineBasicBlock &MBB, MachineBasicBlock &TargetMBB, const MCInstrInfo &MII, - unsigned LoopRegister) const override; + MCRegister LoopRegister) const override; - unsigned getScratchMemoryRegister(const Triple &TT) const override; + MCRegister getScratchMemoryRegister(const Triple &TT) const override; - void fillMemoryOperands(InstructionTemplate &IT, unsigned Reg, + void fillMemoryOperands(InstructionTemplate &IT, MCRegister Reg, unsigned Offset) const override; - ArrayRef getUnavailableRegisters() const override; + ArrayRef getUnavailableRegisters() const override; bool allowAsBackToBack(const Instruction &Instr) const override { return !Instr.Description.isPseudo(); @@ -143,7 +144,7 @@ bool ExegesisRISCVTarget::matchesArch(Triple::ArchType Arch) const { } std::vector ExegesisRISCVTarget::setRegTo(const MCSubtargetInfo &STI, - unsigned Reg, + MCRegister Reg, const APInt &Value) const { if (RISCV::GPRRegClass.contains(Reg)) return loadIntReg(STI, Reg, Value); @@ -173,17 +174,17 @@ std::vector ExegesisRISCVTarget::setRegTo(const MCSubtargetInfo &STI, return {}; } -const unsigned DefaultLoopCounterReg = RISCV::X31; // t6 -const unsigned ScratchMemoryReg = RISCV::X10; // a0 +const MCPhysReg DefaultLoopCounterReg = RISCV::X31; // t6 +const MCPhysReg ScratchMemoryReg = RISCV::X10; // a0 -unsigned +MCRegister ExegesisRISCVTarget::getDefaultLoopCounterRegister(const Triple &) const { return DefaultLoopCounterReg; } void ExegesisRISCVTarget::decrementLoopCounterAndJump( MachineBasicBlock &MBB, MachineBasicBlock &TargetMBB, - const MCInstrInfo &MII, unsigned LoopRegister) const { + const MCInstrInfo &MII, MCRegister LoopRegister) const { BuildMI(&MBB, DebugLoc(), MII.get(RISCV::ADDI)) .addDef(LoopRegister) .addUse(LoopRegister) @@ -194,12 +195,13 @@ void ExegesisRISCVTarget::decrementLoopCounterAndJump( .addMBB(&TargetMBB); } -unsigned ExegesisRISCVTarget::getScratchMemoryRegister(const Triple &TT) const { +MCRegister +ExegesisRISCVTarget::getScratchMemoryRegister(const Triple &TT) const { return ScratchMemoryReg; // a0 } void ExegesisRISCVTarget::fillMemoryOperands(InstructionTemplate &IT, - unsigned Reg, + MCRegister Reg, unsigned Offset) const { // TODO: for now we ignore Offset because have no way // to detect it in instruction. @@ -217,10 +219,10 @@ void ExegesisRISCVTarget::fillMemoryOperands(InstructionTemplate &IT, IT.getValueFor(MemOp) = MCOperand::createReg(Reg); } -const unsigned UnavailableRegisters[4] = {RISCV::X0, DefaultLoopCounterReg, - ScratchIntReg, ScratchMemoryReg}; +const MCPhysReg UnavailableRegisters[4] = {RISCV::X0, DefaultLoopCounterReg, + ScratchIntReg, ScratchMemoryReg}; -ArrayRef ExegesisRISCVTarget::getUnavailableRegisters() const { +ArrayRef ExegesisRISCVTarget::getUnavailableRegisters() const { return UnavailableRegisters; } diff --git a/llvm/tools/llvm-exegesis/lib/RegisterAliasing.cpp b/llvm/tools/llvm-exegesis/lib/RegisterAliasing.cpp index ee612fb0dd6af..96040bbf588e5 100644 --- a/llvm/tools/llvm-exegesis/lib/RegisterAliasing.cpp +++ b/llvm/tools/llvm-exegesis/lib/RegisterAliasing.cpp @@ -39,9 +39,9 @@ RegisterAliasingTracker::RegisterAliasingTracker( } RegisterAliasingTracker::RegisterAliasingTracker(const MCRegisterInfo &RegInfo, - const MCPhysReg PhysReg) + const MCRegister PhysReg) : RegisterAliasingTracker(RegInfo) { - SourceBits.set(PhysReg); + SourceBits.set(PhysReg.id()); FillOriginAndAliasedBits(RegInfo, SourceBits); } @@ -63,8 +63,8 @@ RegisterAliasingTrackerCache::RegisterAliasingTrackerCache( EmptyRegisters(RegInfo.getNumRegs()) {} const RegisterAliasingTracker & -RegisterAliasingTrackerCache::getRegister(MCPhysReg PhysReg) const { - auto &Found = Registers[PhysReg]; +RegisterAliasingTrackerCache::getRegister(MCRegister PhysReg) const { + auto &Found = Registers[PhysReg.id()]; if (!Found) Found.reset(new RegisterAliasingTracker(RegInfo, PhysReg)); return *Found; diff --git a/llvm/tools/llvm-exegesis/lib/RegisterAliasing.h b/llvm/tools/llvm-exegesis/lib/RegisterAliasing.h index b2980854ba2d1..00e699d4c69b9 100644 --- a/llvm/tools/llvm-exegesis/lib/RegisterAliasing.h +++ b/llvm/tools/llvm-exegesis/lib/RegisterAliasing.h @@ -44,9 +44,9 @@ struct RegisterAliasingTracker { const BitVector &ReservedReg, const MCRegisterClass &RegClass); - // Construct a tracker from an MCPhysReg. + // Construct a tracker from an MCRegister. RegisterAliasingTracker(const MCRegisterInfo &RegInfo, - const MCPhysReg Register); + const MCRegister Register); const BitVector &sourceBits() const { return SourceBits; } @@ -88,7 +88,7 @@ struct RegisterAliasingTrackerCache { const MCRegisterInfo ®Info() const { return RegInfo; } // Retrieves the RegisterAliasingTracker for this particular register. - const RegisterAliasingTracker &getRegister(MCPhysReg Reg) const; + const RegisterAliasingTracker &getRegister(MCRegister Reg) const; // Retrieves the RegisterAliasingTracker for this particular register class. const RegisterAliasingTracker &getRegisterClass(unsigned RegClassIndex) const; diff --git a/llvm/tools/llvm-exegesis/lib/RegisterValue.h b/llvm/tools/llvm-exegesis/lib/RegisterValue.h index 3429783a48a30..d0f111b9e40e3 100644 --- a/llvm/tools/llvm-exegesis/lib/RegisterValue.h +++ b/llvm/tools/llvm-exegesis/lib/RegisterValue.h @@ -18,14 +18,15 @@ #include #include +#include namespace llvm { namespace exegesis { // A simple object storing the value for a particular register. struct RegisterValue { - static RegisterValue zero(unsigned Reg) { return {Reg, APInt()}; } - unsigned Register; + static RegisterValue zero(MCRegister Reg) { return {Reg, APInt()}; } + MCRegister Register; APInt Value; }; diff --git a/llvm/tools/llvm-exegesis/lib/SnippetFile.cpp b/llvm/tools/llvm-exegesis/lib/SnippetFile.cpp index b37999ab017f5..01a6e94e76147 100644 --- a/llvm/tools/llvm-exegesis/lib/SnippetFile.cpp +++ b/llvm/tools/llvm-exegesis/lib/SnippetFile.cpp @@ -80,7 +80,7 @@ class BenchmarkCodeStreamer : public MCStreamer, public AsmCommentConsumer { if (CommentText.consume_front("LIVEIN")) { // LLVM-EXEGESIS-LIVEIN const auto RegName = CommentText.ltrim(); - if (unsigned Reg = findRegisterByName(RegName)) + if (MCRegister Reg = findRegisterByName(RegName)) Result->LiveIns.push_back(Reg); else { errs() << "unknown register '" << RegName @@ -179,7 +179,7 @@ class BenchmarkCodeStreamer : public MCStreamer, public AsmCommentConsumer { } if (CommentText.consume_front("LOOP-REGISTER")) { // LLVM-EXEGESIS-LOOP-REGISTER - unsigned LoopRegister; + MCRegister LoopRegister; if (!(LoopRegister = findRegisterByName(CommentText.trim()))) { errs() << "unknown register '" << CommentText @@ -207,13 +207,13 @@ class BenchmarkCodeStreamer : public MCStreamer, public AsmCommentConsumer { void emitZerofill(MCSection *Section, MCSymbol *Symbol, uint64_t Size, Align ByteAlignment, SMLoc Loc) override {} - unsigned findRegisterByName(const StringRef RegName) const { + MCRegister findRegisterByName(const StringRef RegName) const { std::optional RegisterNumber = State.getRegisterNumberFromName(RegName); if (!RegisterNumber.has_value()) { errs() << "'" << RegName << "' is not a valid register name for the target\n"; - return MCRegister::NoRegister; + return MCRegister(); } return *RegisterNumber; } diff --git a/llvm/tools/llvm-exegesis/lib/SnippetGenerator.cpp b/llvm/tools/llvm-exegesis/lib/SnippetGenerator.cpp index 48357d443f713..04064ae1d8441 100644 --- a/llvm/tools/llvm-exegesis/lib/SnippetGenerator.cpp +++ b/llvm/tools/llvm-exegesis/lib/SnippetGenerator.cpp @@ -47,9 +47,9 @@ Error SnippetGenerator::generateConfigurations( // using the scratch register and its aliasing registers. if (Variant.getInstr().hasMemoryOperands()) { const auto &ET = State.getExegesisTarget(); - unsigned ScratchSpacePointerInReg = + MCRegister ScratchSpacePointerInReg = ET.getScratchMemoryRegister(State.getTargetMachine().getTargetTriple()); - if (ScratchSpacePointerInReg == 0) + if (!ScratchSpacePointerInReg.isValid()) return make_error( "Infeasible : target does not support memory instructions"); const auto &ScratchRegAliases = @@ -58,7 +58,7 @@ Error SnippetGenerator::generateConfigurations( // FIXME: We could make a copy of the scratch register. for (const auto &Op : Variant.getInstr().Operands) { if (Op.isDef() && Op.isImplicitReg() && - ScratchRegAliases.test(Op.getImplicitReg())) + ScratchRegAliases.test(Op.getImplicitReg().id())) return make_error( "Infeasible : memory instruction uses scratch memory register"); } @@ -114,38 +114,38 @@ std::vector SnippetGenerator::computeRegisterInitialValues( // If target always expects a scratch memory register as live input, // mark it as defined. const ExegesisTarget &Target = State.getExegesisTarget(); - unsigned ScratchMemoryReg = Target.getScratchMemoryRegister( + MCRegister ScratchMemoryReg = Target.getScratchMemoryRegister( State.getTargetMachine().getTargetTriple()); - DefinedRegs.set(ScratchMemoryReg); + DefinedRegs.set(ScratchMemoryReg.id()); std::vector RIV; for (const InstructionTemplate &IT : Instructions) { // Returns the register that this Operand sets or uses, or 0 if this is not // a register. - const auto GetOpReg = [&IT](const Operand &Op) -> unsigned { + const auto GetOpReg = [&IT](const Operand &Op) -> MCRegister { if (Op.isMemory()) - return 0; + return MCRegister(); if (Op.isImplicitReg()) return Op.getImplicitReg(); if (Op.isExplicit() && IT.getValueFor(Op).isReg()) return IT.getValueFor(Op).getReg(); - return 0; + return MCRegister(); }; // Collect used registers that have never been def'ed. for (const Operand &Op : IT.getInstr().Operands) { if (Op.isUse()) { - const unsigned Reg = GetOpReg(Op); - if (Reg > 0 && !DefinedRegs.test(Reg)) { + const MCRegister Reg = GetOpReg(Op); + if (Reg && !DefinedRegs.test(Reg.id())) { RIV.push_back(RegisterValue::zero(Reg)); - DefinedRegs.set(Reg); + DefinedRegs.set(Reg.id()); } } } // Mark defs as having been def'ed. for (const Operand &Op : IT.getInstr().Operands) { if (Op.isDef()) { - const unsigned Reg = GetOpReg(Op); - if (Reg > 0) - DefinedRegs.set(Reg); + const MCRegister Reg = GetOpReg(Op); + if (Reg) + DefinedRegs.set(Reg.id()); } } } diff --git a/llvm/tools/llvm-exegesis/lib/SnippetRepetitor.cpp b/llvm/tools/llvm-exegesis/lib/SnippetRepetitor.cpp index 0bab30d158200..e4fe27f010c2f 100644 --- a/llvm/tools/llvm-exegesis/lib/SnippetRepetitor.cpp +++ b/llvm/tools/llvm-exegesis/lib/SnippetRepetitor.cpp @@ -48,7 +48,7 @@ class DuplicateSnippetRepetitor : public SnippetRepetitor { class LoopSnippetRepetitor : public SnippetRepetitor { public: - explicit LoopSnippetRepetitor(const LLVMState &State, unsigned LoopRegister) + explicit LoopSnippetRepetitor(const LLVMState &State, MCRegister LoopRegister) : SnippetRepetitor(State), LoopCounter(LoopRegister) {} // Loop over the snippet ceil(MinInstructions / Instructions.Size()) times. @@ -102,7 +102,7 @@ class LoopSnippetRepetitor : public SnippetRepetitor { // The live ins are: the loop counter, the registers that were setup by // the entry block, and entry block live ins. Loop.MBB->addLiveIn(LoopCounter); - for (unsigned Reg : Filler.getRegistersSetUp()) + for (MCRegister Reg : Filler.getRegistersSetUp()) Loop.MBB->addLiveIn(Reg); for (const auto &LiveIn : Entry.MBB->liveins()) Loop.MBB->addLiveIn(LiveIn); @@ -127,7 +127,7 @@ class LoopSnippetRepetitor : public SnippetRepetitor { } private: - const unsigned LoopCounter; + const MCRegister LoopCounter; }; } // namespace @@ -136,7 +136,7 @@ SnippetRepetitor::~SnippetRepetitor() {} std::unique_ptr SnippetRepetitor::Create(Benchmark::RepetitionModeE Mode, - const LLVMState &State, unsigned LoopRegister) { + const LLVMState &State, MCRegister LoopRegister) { switch (Mode) { case Benchmark::Duplicate: case Benchmark::MiddleHalfDuplicate: diff --git a/llvm/tools/llvm-exegesis/lib/SnippetRepetitor.h b/llvm/tools/llvm-exegesis/lib/SnippetRepetitor.h index c62e80f161f12..88dd0f3cb2dbd 100644 --- a/llvm/tools/llvm-exegesis/lib/SnippetRepetitor.h +++ b/llvm/tools/llvm-exegesis/lib/SnippetRepetitor.h @@ -30,7 +30,7 @@ class SnippetRepetitor { public: static std::unique_ptr Create(Benchmark::RepetitionModeE Mode, const LLVMState &State, - unsigned LoopRegister); + MCRegister LoopRegister); virtual ~SnippetRepetitor(); diff --git a/llvm/tools/llvm-exegesis/lib/Target.cpp b/llvm/tools/llvm-exegesis/lib/Target.cpp index 29e58692f0e92..5ea5b4c2c002f 100644 --- a/llvm/tools/llvm-exegesis/lib/Target.cpp +++ b/llvm/tools/llvm-exegesis/lib/Target.cpp @@ -212,7 +212,7 @@ class ExegesisDefaultTarget : public ExegesisTarget { ExegesisDefaultTarget() : ExegesisTarget({}, opcodeIsNotAvailable) {} private: - std::vector setRegTo(const MCSubtargetInfo &STI, unsigned Reg, + std::vector setRegTo(const MCSubtargetInfo &STI, MCRegister Reg, const APInt &Value) const override { llvm_unreachable("Not yet implemented"); } diff --git a/llvm/tools/llvm-exegesis/lib/Target.h b/llvm/tools/llvm-exegesis/lib/Target.h index 92cc1cb248a1c..f3fbe3780616f 100644 --- a/llvm/tools/llvm-exegesis/lib/Target.h +++ b/llvm/tools/llvm-exegesis/lib/Target.h @@ -91,7 +91,8 @@ class ExegesisTarget { // Generates code to move a constant into a the given register. // Precondition: Value must fit into Reg. - virtual std::vector setRegTo(const MCSubtargetInfo &STI, unsigned Reg, + virtual std::vector setRegTo(const MCSubtargetInfo &STI, + MCRegister Reg, const APInt &Value) const = 0; // Generates the code for the lower munmap call. The code generated by this @@ -177,14 +178,14 @@ class ExegesisTarget { // Gets the ABI dependent registers that are used to pass arguments in a // function call. - virtual std::vector getArgumentRegisters() const { + virtual std::vector getArgumentRegisters() const { report_fatal_error( "getArgumentRegisters is not implemented on the current architecture"); }; // Gets the registers that might potentially need to be saved by while // the setup in the test harness executes. - virtual std::vector getRegistersNeedSaving() const { + virtual std::vector getRegistersNeedSaving() const { report_fatal_error("getRegistersNeedSaving is not implemented on the " "current architecture"); }; @@ -192,25 +193,27 @@ class ExegesisTarget { // Returns the register pointing to scratch memory, or 0 if this target // does not support memory operands. The benchmark function uses the // default calling convention. - virtual unsigned getScratchMemoryRegister(const Triple &) const { return 0; } + virtual MCRegister getScratchMemoryRegister(const Triple &) const { + return MCRegister(); + } // Fills memory operands with references to the address at [Reg] + Offset. - virtual void fillMemoryOperands(InstructionTemplate &IT, unsigned Reg, + virtual void fillMemoryOperands(InstructionTemplate &IT, MCRegister Reg, unsigned Offset) const { llvm_unreachable( "fillMemoryOperands() requires getScratchMemoryRegister() > 0"); } // Returns a counter usable as a loop counter. - virtual unsigned getDefaultLoopCounterRegister(const Triple &) const { - return 0; + virtual MCRegister getDefaultLoopCounterRegister(const Triple &) const { + return MCRegister(); } // Adds the code to decrement the loop counter and virtual void decrementLoopCounterAndJump(MachineBasicBlock &MBB, MachineBasicBlock &TargetMBB, const MCInstrInfo &MII, - unsigned LoopRegister) const { + MCRegister LoopRegister) const { llvm_unreachable("decrementLoopCounterAndBranch() requires " "getLoopCounterRegister() > 0"); } @@ -218,7 +221,7 @@ class ExegesisTarget { // Returns a list of unavailable registers. // Targets can use this to prevent some registers to be automatically selected // for use in snippets. - virtual ArrayRef getUnavailableRegisters() const { return {}; } + virtual ArrayRef getUnavailableRegisters() const { return {}; } // Returns the maximum number of bytes a load/store instruction can access at // once. This is typically the size of the largest register available on the diff --git a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp index 3c3bff76fb681..1659cfb31f117 100644 --- a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp +++ b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp @@ -468,7 +468,7 @@ static unsigned getLoadImmediateOpcode(unsigned RegBitWidth) { } // Generates instruction to load an immediate value into a register. -static MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth, +static MCInst loadImmediate(MCRegister Reg, unsigned RegBitWidth, const APInt &Value) { if (Value.getBitWidth() > RegBitWidth) llvm_unreachable("Value must fit in the Register"); @@ -500,7 +500,7 @@ static MCInst fillStackSpace(unsigned MovOpcode, unsigned OffsetBytes, } // Loads scratch memory into register `Reg` using opcode `RMOpcode`. -static MCInst loadToReg(unsigned Reg, unsigned RMOpcode) { +static MCInst loadToReg(MCRegister Reg, unsigned RMOpcode) { return MCInstBuilder(RMOpcode) .addReg(Reg) // Address = ESP @@ -525,12 +525,12 @@ namespace { struct ConstantInliner { explicit ConstantInliner(const APInt &Constant) : Constant_(Constant) {} - std::vector loadAndFinalize(unsigned Reg, unsigned RegBitWidth, + std::vector loadAndFinalize(MCRegister Reg, unsigned RegBitWidth, unsigned Opcode); - std::vector loadX87STAndFinalize(unsigned Reg); + std::vector loadX87STAndFinalize(MCRegister Reg); - std::vector loadX87FPAndFinalize(unsigned Reg); + std::vector loadX87FPAndFinalize(MCRegister Reg); std::vector popFlagAndFinalize(); @@ -554,7 +554,7 @@ struct ConstantInliner { }; } // namespace -std::vector ConstantInliner::loadAndFinalize(unsigned Reg, +std::vector ConstantInliner::loadAndFinalize(MCRegister Reg, unsigned RegBitWidth, unsigned Opcode) { assert((RegBitWidth & 7) == 0 && "RegBitWidth must be a multiple of 8 bits"); @@ -564,7 +564,7 @@ std::vector ConstantInliner::loadAndFinalize(unsigned Reg, return std::move(Instructions); } -std::vector ConstantInliner::loadX87STAndFinalize(unsigned Reg) { +std::vector ConstantInliner::loadX87STAndFinalize(MCRegister Reg) { initStack(kF80Bytes); add(MCInstBuilder(X86::LD_F80m) // Address = ESP @@ -579,7 +579,7 @@ std::vector ConstantInliner::loadX87STAndFinalize(unsigned Reg) { return std::move(Instructions); } -std::vector ConstantInliner::loadX87FPAndFinalize(unsigned Reg) { +std::vector ConstantInliner::loadX87FPAndFinalize(MCRegister Reg) { initStack(kF80Bytes); add(MCInstBuilder(X86::LD_Fp80m) .addReg(Reg) @@ -729,9 +729,9 @@ class ExegesisX86Target : public ExegesisTarget { private: void addTargetSpecificPasses(PassManagerBase &PM) const override; - unsigned getScratchMemoryRegister(const Triple &TT) const override; + MCRegister getScratchMemoryRegister(const Triple &TT) const override; - unsigned getDefaultLoopCounterRegister(const Triple &) const override; + MCRegister getDefaultLoopCounterRegister(const Triple &) const override; unsigned getMaxMemoryAccessSize() const override { return 64; } @@ -739,15 +739,15 @@ class ExegesisX86Target : public ExegesisTarget { MCOperand &AssignedValue, const BitVector &ForbiddenRegs) const override; - void fillMemoryOperands(InstructionTemplate &IT, unsigned Reg, + void fillMemoryOperands(InstructionTemplate &IT, MCRegister Reg, unsigned Offset) const override; void decrementLoopCounterAndJump(MachineBasicBlock &MBB, MachineBasicBlock &TargetMBB, const MCInstrInfo &MII, - unsigned LoopRegister) const override; + MCRegister LoopRegister) const override; - std::vector setRegTo(const MCSubtargetInfo &STI, unsigned Reg, + std::vector setRegTo(const MCSubtargetInfo &STI, MCRegister Reg, const APInt &Value) const override; #ifdef __linux__ @@ -773,12 +773,12 @@ class ExegesisX86Target : public ExegesisTarget { std::vector configurePerfCounter(long Request, bool SaveRegisters) const override; - std::vector getArgumentRegisters() const override; + std::vector getArgumentRegisters() const override; - std::vector getRegistersNeedSaving() const override; + std::vector getRegistersNeedSaving() const override; #endif // __linux__ - ArrayRef getUnavailableRegisters() const override { + ArrayRef getUnavailableRegisters() const override { if (DisableUpperSSERegisters) return ArrayRef(kUnavailableRegistersSSE); @@ -844,25 +844,25 @@ class ExegesisX86Target : public ExegesisTarget { return std::make_unique(); } - static const unsigned kUnavailableRegisters[4]; - static const unsigned kUnavailableRegistersSSE[12]; + static const MCPhysReg kUnavailableRegisters[4]; + static const MCPhysReg kUnavailableRegistersSSE[12]; }; // We disable a few registers that cannot be encoded on instructions with a REX // prefix. -const unsigned ExegesisX86Target::kUnavailableRegisters[4] = {X86::AH, X86::BH, - X86::CH, X86::DH}; +const MCPhysReg ExegesisX86Target::kUnavailableRegisters[4] = { + X86::AH, X86::BH, X86::CH, X86::DH}; // Optionally, also disable the upper (x86_64) SSE registers to reduce frontend // decoder load. -const unsigned ExegesisX86Target::kUnavailableRegistersSSE[12] = { +const MCPhysReg ExegesisX86Target::kUnavailableRegistersSSE[12] = { X86::AH, X86::BH, X86::CH, X86::DH, X86::XMM8, X86::XMM9, X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, X86::XMM14, X86::XMM15}; // We're using one of R8-R15 because these registers are never hardcoded in // instructions (e.g. MOVS writes to EDI, ESI, EDX), so they have less // conflicts. -constexpr const unsigned kDefaultLoopCounterReg = X86::R8; +constexpr const MCPhysReg kDefaultLoopCounterReg = X86::R8; } // namespace @@ -871,19 +871,19 @@ void ExegesisX86Target::addTargetSpecificPasses(PassManagerBase &PM) const { PM.add(createX86FloatingPointStackifierPass()); } -unsigned ExegesisX86Target::getScratchMemoryRegister(const Triple &TT) const { +MCRegister ExegesisX86Target::getScratchMemoryRegister(const Triple &TT) const { if (!TT.isArch64Bit()) { // FIXME: This would require popping from the stack, so we would have to // add some additional setup code. - return 0; + return MCRegister(); } return TT.isOSWindows() ? X86::RCX : X86::RDI; } -unsigned +MCRegister ExegesisX86Target::getDefaultLoopCounterRegister(const Triple &TT) const { if (!TT.isArch64Bit()) { - return 0; + return MCRegister(); } return kDefaultLoopCounterReg; } @@ -910,7 +910,7 @@ Error ExegesisX86Target::randomizeTargetMCOperand( } void ExegesisX86Target::fillMemoryOperands(InstructionTemplate &IT, - unsigned Reg, + MCRegister Reg, unsigned Offset) const { assert(!isInvalidMemoryInstr(IT.getInstr()) && "fillMemoryOperands requires a valid memory instruction"); @@ -927,7 +927,7 @@ void ExegesisX86Target::fillMemoryOperands(InstructionTemplate &IT, void ExegesisX86Target::decrementLoopCounterAndJump( MachineBasicBlock &MBB, MachineBasicBlock &TargetMBB, - const MCInstrInfo &MII, unsigned LoopRegister) const { + const MCInstrInfo &MII, MCRegister LoopRegister) const { BuildMI(&MBB, DebugLoc(), MII.get(X86::ADD64ri8)) .addDef(LoopRegister) .addUse(LoopRegister) @@ -988,7 +988,7 @@ static void restoreSyscallRegisters(std::vector &GeneratedCode, } #endif // __linux__ -static std::vector loadImmediateSegmentRegister(unsigned Reg, +static std::vector loadImmediateSegmentRegister(MCRegister Reg, const APInt &Value) { #if defined(__x86_64__) && defined(__linux__) assert(Value.getBitWidth() <= 64 && "Value must fit in the register."); @@ -1021,7 +1021,7 @@ static std::vector loadImmediateSegmentRegister(unsigned Reg, } std::vector ExegesisX86Target::setRegTo(const MCSubtargetInfo &STI, - unsigned Reg, + MCRegister Reg, const APInt &Value) const { if (X86::SEGMENT_REGRegClass.contains(Reg)) return loadImmediateSegmentRegister(Reg, Value); @@ -1298,11 +1298,11 @@ ExegesisX86Target::configurePerfCounter(long Request, bool SaveRegisters) const return ConfigurePerfCounterCode; } -std::vector ExegesisX86Target::getArgumentRegisters() const { +std::vector ExegesisX86Target::getArgumentRegisters() const { return {X86::RDI, X86::RSI}; } -std::vector ExegesisX86Target::getRegistersNeedSaving() const { +std::vector ExegesisX86Target::getRegistersNeedSaving() const { return {X86::RAX, X86::RDI, X86::RSI, X86::RCX, X86::R11}; } diff --git a/llvm/tools/llvm-exegesis/llvm-exegesis.cpp b/llvm/tools/llvm-exegesis/llvm-exegesis.cpp index fa37e05956be8..b9938a92855a4 100644 --- a/llvm/tools/llvm-exegesis/llvm-exegesis.cpp +++ b/llvm/tools/llvm-exegesis/llvm-exegesis.cpp @@ -520,7 +520,7 @@ void benchmarkMain() { const auto Opcodes = getOpcodesOrDie(State); std::vector Configurations; - unsigned LoopRegister = + MCRegister LoopRegister = State.getExegesisTarget().getDefaultLoopCounterRegister( State.getTargetMachine().getTargetTriple()); diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp index 0145ee70a14c1..ee44aac45594d 100644 --- a/llvm/unittests/Analysis/ValueTrackingTest.cpp +++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp @@ -2869,7 +2869,7 @@ const std::pair IsBytewiseValueTests[] = { "ptr inttoptr (i96 -1 to ptr)", }, { - "i8 undef", + "i8 poison", "[0 x i8] zeroinitializer", }, { @@ -2877,7 +2877,7 @@ const std::pair IsBytewiseValueTests[] = { "[0 x i8] undef", }, { - "i8 undef", + "i8 poison", "[5 x [0 x i8]] zeroinitializer", }, { @@ -2959,7 +2959,7 @@ const std::pair IsBytewiseValueTests[] = { "[2 x i16] [i16 -21836, i16 -21846]]", }, { - "i8 undef", + "i8 poison", "{ } zeroinitializer", }, { @@ -2967,7 +2967,7 @@ const std::pair IsBytewiseValueTests[] = { "{ } undef", }, { - "i8 undef", + "i8 poison", "{ {}, {} } zeroinitializer", }, { diff --git a/llvm/unittests/CodeGen/AsmPrinterDwarfTest.cpp b/llvm/unittests/CodeGen/AsmPrinterDwarfTest.cpp index dc738d85547bb..6c08173f78622 100644 --- a/llvm/unittests/CodeGen/AsmPrinterDwarfTest.cpp +++ b/llvm/unittests/CodeGen/AsmPrinterDwarfTest.cpp @@ -384,10 +384,13 @@ class AsmPrinterHandlerTest : public AsmPrinterFixtureBase { public: TestHandler(AsmPrinterHandlerTest &Test) : Test(Test) {} virtual ~TestHandler() {} + virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override {} virtual void beginModule(Module *M) override { Test.BeginCount++; } virtual void endModule() override { Test.EndCount++; } virtual void beginFunction(const MachineFunction *MF) override {} virtual void endFunction(const MachineFunction *MF) override {} + virtual void beginInstruction(const MachineInstr *MI) override {} + virtual void endInstruction() override {} }; protected: @@ -424,54 +427,4 @@ TEST_F(AsmPrinterHandlerTest, Basic) { ASSERT_EQ(EndCount, 3); } -class AsmPrinterDebugHandlerTest : public AsmPrinterFixtureBase { - class TestDebugHandler : public DebugHandlerBase { - AsmPrinterDebugHandlerTest &Test; - - public: - TestDebugHandler(AsmPrinterDebugHandlerTest &Test, AsmPrinter *AP) - : DebugHandlerBase(AP), Test(Test) {} - virtual ~TestDebugHandler() {} - virtual void beginModule(Module *M) override { Test.BeginCount++; } - virtual void endModule() override { Test.EndCount++; } - virtual void beginFunctionImpl(const MachineFunction *MF) override {} - virtual void endFunctionImpl(const MachineFunction *MF) override {} - virtual void beginInstruction(const MachineInstr *MI) override {} - virtual void endInstruction() override {} - }; - -protected: - bool init(const std::string &TripleStr, unsigned DwarfVersion, - dwarf::DwarfFormat DwarfFormat) { - if (!AsmPrinterFixtureBase::init(TripleStr, DwarfVersion, DwarfFormat)) - return false; - - auto *AP = TestPrinter->getAP(); - AP->addDebugHandler(std::make_unique(*this, AP)); - TargetMachine *TM = &AP->TM; - legacy::PassManager PM; - PM.add(new MachineModuleInfoWrapperPass(TM)); - PM.add(TestPrinter->releaseAP()); // Takes ownership of destroying AP - LLVMContext Context; - std::unique_ptr M(new Module("TestModule", Context)); - M->setDataLayout(TM->createDataLayout()); - PM.run(*M); - // Now check that we can run it twice. - AP->addDebugHandler(std::make_unique(*this, AP)); - PM.run(*M); - return true; - } - - int BeginCount = 0; - int EndCount = 0; -}; - -TEST_F(AsmPrinterDebugHandlerTest, Basic) { - if (!init("x86_64-pc-linux", /*DwarfVersion=*/4, dwarf::DWARF32)) - GTEST_SKIP(); - - ASSERT_EQ(BeginCount, 3); - ASSERT_EQ(EndCount, 3); -} - } // end namespace diff --git a/llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt b/llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt index bbf6b1bf1e0ed..a1882ea73c35c 100644 --- a/llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt +++ b/llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt @@ -17,6 +17,7 @@ add_llvm_unittest(JITLinkTests MachOLinkGraphTests.cpp MemoryManagerErrorTests.cpp StubsTests.cpp + X86_64Tests.cpp ) target_link_libraries(JITLinkTests PRIVATE LLVMTestingSupport) diff --git a/llvm/unittests/ExecutionEngine/JITLink/X86_64Tests.cpp b/llvm/unittests/ExecutionEngine/JITLink/X86_64Tests.cpp new file mode 100644 index 0000000000000..8c79f0a8a9ee1 --- /dev/null +++ b/llvm/unittests/ExecutionEngine/JITLink/X86_64Tests.cpp @@ -0,0 +1,90 @@ +//===-------- X86_64Tests.cpp - Unit tests for the AArch64 backend --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include +#include + +#include "gtest/gtest.h" + +using namespace llvm; +using namespace llvm::jitlink; +using namespace llvm::jitlink::x86_64; + +TEST(X86_64, EmptyLinkGraph) { + LinkGraph G("foo", std::make_shared(), + Triple("x86_64-apple-darwin"), SubtargetFeatures(), + getEdgeKindName); + EXPECT_EQ(G.getName(), "foo"); + EXPECT_EQ(G.getTargetTriple().str(), "x86_64-apple-darwin"); + EXPECT_EQ(G.getPointerSize(), 8U); + EXPECT_EQ(G.getEndianness(), llvm::endianness::little); + EXPECT_TRUE(G.external_symbols().empty()); + EXPECT_TRUE(G.absolute_symbols().empty()); + EXPECT_TRUE(G.defined_symbols().empty()); + EXPECT_TRUE(G.blocks().empty()); +} + +TEST(X86_64, GOTAndStubs) { + LinkGraph G("foo", std::make_shared(), + Triple("x86_64-apple-darwin"), SubtargetFeatures(), + getEdgeKindName); + + auto &External = G.addExternalSymbol("external", 0, false); + + // First table accesses. We expect the graph to be empty: + EXPECT_EQ(G.findSectionByName(GOTTableManager::getSectionName()), nullptr); + EXPECT_EQ(G.findSectionByName(PLTTableManager::getSectionName()), nullptr); + + { + // Create first GOT and PLT table managers and request a PLT stub. This + // should force creation of both a PLT stub and GOT entry. + GOTTableManager GOT(G); + PLTTableManager PLT(G, GOT); + + PLT.getEntryForTarget(G, External); + } + + auto *GOTSec = G.findSectionByName(GOTTableManager::getSectionName()); + EXPECT_NE(GOTSec, nullptr); + if (GOTSec) { + // Expect one entry in the GOT now. + EXPECT_EQ(GOTSec->symbols_size(), 1U); + EXPECT_EQ(GOTSec->blocks_size(), 1U); + } + + auto *PLTSec = G.findSectionByName(PLTTableManager::getSectionName()); + EXPECT_NE(PLTSec, nullptr); + if (PLTSec) { + // Expect one entry in the PLT. + EXPECT_EQ(PLTSec->symbols_size(), 1U); + EXPECT_EQ(PLTSec->blocks_size(), 1U); + } + + { + // Create second GOT and PLT table managers and request a PLT stub. This + // should force creation of both a PLT stub and GOT entry. + GOTTableManager GOT(G); + PLTTableManager PLT(G, GOT); + + PLT.getEntryForTarget(G, External); + } + + EXPECT_EQ(G.findSectionByName(GOTTableManager::getSectionName()), GOTSec); + if (GOTSec) { + // Expect the same one entry in the GOT. + EXPECT_EQ(GOTSec->symbols_size(), 1U); + EXPECT_EQ(GOTSec->blocks_size(), 1U); + } + + EXPECT_EQ(G.findSectionByName(PLTTableManager::getSectionName()), PLTSec); + if (PLTSec) { + // Expect the same one entry in the GOT. + EXPECT_EQ(PLTSec->symbols_size(), 1U); + EXPECT_EQ(PLTSec->blocks_size(), 1U); + } +} diff --git a/llvm/unittests/Support/CrashRecoveryTest.cpp b/llvm/unittests/Support/CrashRecoveryTest.cpp index a22e532ec4c83..ceafba5b36f11 100644 --- a/llvm/unittests/Support/CrashRecoveryTest.cpp +++ b/llvm/unittests/Support/CrashRecoveryTest.cpp @@ -26,10 +26,8 @@ #endif #ifdef LLVM_ON_UNIX -#ifdef HAVE_SIGNAL_H #include #endif -#endif using namespace llvm; using namespace llvm::sys; diff --git a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/CMakeLists.txt b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/CMakeLists.txt index df689767b7724..bbfbcc730a4cb 100644 --- a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/CMakeLists.txt +++ b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/CMakeLists.txt @@ -9,6 +9,7 @@ set(LLVM_LINK_COMPONENTS add_llvm_unittest(SandboxVectorizerTests DependencyGraphTest.cpp + InstrMapsTest.cpp IntervalTest.cpp LegalityTest.cpp SchedulerTest.cpp diff --git a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/InstrMapsTest.cpp b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/InstrMapsTest.cpp new file mode 100644 index 0000000000000..1c8ef7e351aba --- /dev/null +++ b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/InstrMapsTest.cpp @@ -0,0 +1,78 @@ +//===- InstrMapsTest.cpp --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/AsmParser/Parser.h" +#include "llvm/SandboxIR/Function.h" +#include "llvm/SandboxIR/Instruction.h" +#include "llvm/Support/SourceMgr.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using namespace llvm; + +struct InstrMapsTest : public testing::Test { + LLVMContext C; + std::unique_ptr M; + + void parseIR(LLVMContext &C, const char *IR) { + SMDiagnostic Err; + M = parseAssemblyString(IR, Err, C); + if (!M) + Err.print("InstrMapsTest", errs()); + } +}; + +TEST_F(InstrMapsTest, Basic) { + parseIR(C, R"IR( +define void @foo(i8 %v0, i8 %v1, i8 %v2, i8 %v3, <2 x i8> %vec) { + %add0 = add i8 %v0, %v0 + %add1 = add i8 %v1, %v1 + %add2 = add i8 %v2, %v2 + %add3 = add i8 %v3, %v3 + %vadd0 = add <2 x i8> %vec, %vec + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + + auto *Add0 = cast(&*It++); + auto *Add1 = cast(&*It++); + auto *Add2 = cast(&*It++); + auto *Add3 = cast(&*It++); + auto *VAdd0 = cast(&*It++); + [[maybe_unused]] auto *Ret = cast(&*It++); + + sandboxir::InstrMaps IMaps; + // Check with empty IMaps. + EXPECT_EQ(IMaps.getVectorForOrig(Add0), nullptr); + EXPECT_EQ(IMaps.getVectorForOrig(Add1), nullptr); + EXPECT_FALSE(IMaps.getOrigLane(Add0, Add0)); + // Check with 1 match. + IMaps.registerVector({Add0, Add1}, VAdd0); + EXPECT_EQ(IMaps.getVectorForOrig(Add0), VAdd0); + EXPECT_EQ(IMaps.getVectorForOrig(Add1), VAdd0); + EXPECT_FALSE(IMaps.getOrigLane(VAdd0, VAdd0)); // Bad Orig value + EXPECT_FALSE(IMaps.getOrigLane(Add0, Add0)); // Bad Vector value + EXPECT_EQ(*IMaps.getOrigLane(VAdd0, Add0), 0U); + EXPECT_EQ(*IMaps.getOrigLane(VAdd0, Add1), 1U); + // Check when the same vector maps to different original values (which is + // common for vector constants). + IMaps.registerVector({Add2, Add3}, VAdd0); + EXPECT_EQ(*IMaps.getOrigLane(VAdd0, Add2), 0U); + EXPECT_EQ(*IMaps.getOrigLane(VAdd0, Add3), 1U); + // Check when we register for a second time. +#ifndef NDEBUG + EXPECT_DEATH(IMaps.registerVector({Add1, Add0}, VAdd0), ".*exists.*"); +#endif // NDEBUG +} diff --git a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/LegalityTest.cpp b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/LegalityTest.cpp index b5e2c302f5901..2e90462a633c1 100644 --- a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/LegalityTest.cpp +++ b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/LegalityTest.cpp @@ -18,6 +18,7 @@ #include "llvm/SandboxIR/Function.h" #include "llvm/SandboxIR/Instruction.h" #include "llvm/Support/SourceMgr.h" +#include "llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h" #include "gtest/gtest.h" using namespace llvm; @@ -110,7 +111,8 @@ define void @foo(ptr %ptr, <2 x float> %vec2, <3 x float> %vec3, i8 %arg, float auto *CmpSLT = cast(&*It++); auto *CmpSGT = cast(&*It++); - sandboxir::LegalityAnalysis Legality(*AA, *SE, DL, Ctx); + llvm::sandboxir::InstrMaps IMaps; + sandboxir::LegalityAnalysis Legality(*AA, *SE, DL, Ctx, IMaps); const auto &Result = Legality.canVectorize({St0, St1}, /*SkipScheduling=*/true); EXPECT_TRUE(isa(Result)); @@ -228,7 +230,8 @@ define void @foo(ptr %ptr) { auto *St0 = cast(&*It++); auto *St1 = cast(&*It++); - sandboxir::LegalityAnalysis Legality(*AA, *SE, DL, Ctx); + llvm::sandboxir::InstrMaps IMaps; + sandboxir::LegalityAnalysis Legality(*AA, *SE, DL, Ctx, IMaps); { // Can vectorize St0,St1. const auto &Result = Legality.canVectorize({St0, St1}); @@ -263,7 +266,8 @@ define void @foo() { }; sandboxir::Context Ctx(C); - sandboxir::LegalityAnalysis Legality(*AA, *SE, DL, Ctx); + llvm::sandboxir::InstrMaps IMaps; + sandboxir::LegalityAnalysis Legality(*AA, *SE, DL, Ctx, IMaps); EXPECT_TRUE( Matches(Legality.createLegalityResult(), "Widen")); EXPECT_TRUE(Matches(Legality.createLegalityResult( @@ -283,3 +287,68 @@ define void @foo() { "Pack Reason: DiffWrapFlags")); } #endif // NDEBUG + +TEST_F(LegalityTest, CollectDescr) { + parseIR(C, R"IR( +define void @foo(ptr %ptr) { + %gep0 = getelementptr float, ptr %ptr, i32 0 + %gep1 = getelementptr float, ptr %ptr, i32 1 + %ld0 = load float, ptr %gep0 + %ld1 = load float, ptr %gep1 + %vld = load <4 x float>, ptr %ptr + ret void +} +)IR"); + llvm::Function *LLVMF = &*M->getFunction("foo"); + getAnalyses(*LLVMF); + sandboxir::Context Ctx(C); + auto *F = Ctx.createFunction(LLVMF); + auto *BB = &*F->begin(); + auto It = BB->begin(); + [[maybe_unused]] auto *Gep0 = cast(&*It++); + [[maybe_unused]] auto *Gep1 = cast(&*It++); + auto *Ld0 = cast(&*It++); + [[maybe_unused]] auto *Ld1 = cast(&*It++); + auto *VLd = cast(&*It++); + + sandboxir::CollectDescr::DescrVecT Descrs; + using EEDescr = sandboxir::CollectDescr::ExtractElementDescr; + + { + // Check single input, no shuffle. + Descrs.push_back(EEDescr(VLd, 0)); + Descrs.push_back(EEDescr(VLd, 1)); + sandboxir::CollectDescr CD(std::move(Descrs)); + EXPECT_TRUE(CD.getSingleInput()); + EXPECT_EQ(CD.getSingleInput()->first, VLd); + EXPECT_EQ(CD.getSingleInput()->second, false); + EXPECT_TRUE(CD.hasVectorInputs()); + } + { + // Check single input, shuffle. + Descrs.push_back(EEDescr(VLd, 1)); + Descrs.push_back(EEDescr(VLd, 0)); + sandboxir::CollectDescr CD(std::move(Descrs)); + EXPECT_TRUE(CD.getSingleInput()); + EXPECT_EQ(CD.getSingleInput()->first, VLd); + EXPECT_EQ(CD.getSingleInput()->second, true); + EXPECT_TRUE(CD.hasVectorInputs()); + } + { + // Check multiple inputs. + Descrs.push_back(EEDescr(Ld0)); + Descrs.push_back(EEDescr(VLd, 0)); + Descrs.push_back(EEDescr(VLd, 1)); + sandboxir::CollectDescr CD(std::move(Descrs)); + EXPECT_FALSE(CD.getSingleInput()); + EXPECT_TRUE(CD.hasVectorInputs()); + } + { + // Check multiple inputs only scalars. + Descrs.push_back(EEDescr(Ld0)); + Descrs.push_back(EEDescr(Ld1)); + sandboxir::CollectDescr CD(std::move(Descrs)); + EXPECT_FALSE(CD.getSingleInput()); + EXPECT_FALSE(CD.hasVectorInputs()); + } +} diff --git a/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp index f098ba0bce497..f818b49fdbe7f 100644 --- a/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp +++ b/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp @@ -20,14 +20,17 @@ using VPVerifierTest = VPlanTestBase; namespace { TEST_F(VPVerifierTest, VPInstructionUseBeforeDefSameBB) { VPlan &Plan = getPlan(); - VPInstruction *DefI = new VPInstruction(Instruction::Add, {}); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(Type::getInt32Ty(C), 0)); + VPInstruction *DefI = new VPInstruction(Instruction::Add, {Zero}); VPInstruction *UseI = new VPInstruction(Instruction::Sub, {DefI}); + auto *CanIV = new VPCanonicalIVPHIRecipe(Zero, {}); VPBasicBlock *VPBB1 = Plan.getEntry(); VPBB1->appendRecipe(UseI); VPBB1->appendRecipe(DefI); VPBasicBlock *VPBB2 = Plan.createVPBasicBlock(""); + VPBB2->appendRecipe(CanIV); VPRegionBlock *R1 = Plan.createVPRegionBlock(VPBB2, VPBB2, "R1"); VPBlockUtils::connectBlocks(VPBB1, R1); VPBlockUtils::connectBlocks(R1, Plan.getScalarHeader()); @@ -44,9 +47,10 @@ TEST_F(VPVerifierTest, VPInstructionUseBeforeDefSameBB) { TEST_F(VPVerifierTest, VPInstructionUseBeforeDefDifferentBB) { VPlan &Plan = getPlan(); - VPInstruction *DefI = new VPInstruction(Instruction::Add, {}); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(Type::getInt32Ty(C), 0)); + VPInstruction *DefI = new VPInstruction(Instruction::Add, {Zero}); VPInstruction *UseI = new VPInstruction(Instruction::Sub, {DefI}); - auto *CanIV = new VPCanonicalIVPHIRecipe(UseI, {}); + auto *CanIV = new VPCanonicalIVPHIRecipe(Zero, {}); VPInstruction *BranchOnCond = new VPInstruction(VPInstruction::BranchOnCond, {CanIV}); @@ -73,23 +77,22 @@ TEST_F(VPVerifierTest, VPInstructionUseBeforeDefDifferentBB) { } TEST_F(VPVerifierTest, VPBlendUseBeforeDefDifferentBB) { + VPlan &Plan = getPlan(); IntegerType *Int32 = IntegerType::get(C, 32); auto *Phi = PHINode::Create(Int32, 1); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 0)); - VPInstruction *I1 = new VPInstruction(Instruction::Add, {}); - VPInstruction *DefI = new VPInstruction(Instruction::Add, {}); - auto *CanIV = new VPCanonicalIVPHIRecipe(I1, {}); + VPInstruction *DefI = new VPInstruction(Instruction::Add, {Zero}); + auto *CanIV = new VPCanonicalIVPHIRecipe(Zero, {}); VPInstruction *BranchOnCond = new VPInstruction(VPInstruction::BranchOnCond, {CanIV}); auto *Blend = new VPBlendRecipe(Phi, {DefI}); - VPlan &Plan = getPlan(); VPBasicBlock *VPBB1 = Plan.getEntry(); VPBasicBlock *VPBB2 = Plan.createVPBasicBlock(""); VPBasicBlock *VPBB3 = Plan.createVPBasicBlock(""); VPBasicBlock *VPBB4 = Plan.createVPBasicBlock(""); - VPBB1->appendRecipe(I1); VPBB2->appendRecipe(CanIV); VPBB3->appendRecipe(Blend); VPBB4->appendRecipe(DefI); @@ -116,14 +119,15 @@ TEST_F(VPVerifierTest, VPBlendUseBeforeDefDifferentBB) { } TEST_F(VPVerifierTest, DuplicateSuccessorsOutsideRegion) { - VPInstruction *I1 = new VPInstruction(Instruction::Add, {}); - auto *CanIV = new VPCanonicalIVPHIRecipe(I1, {}); + VPlan &Plan = getPlan(); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(Type::getInt32Ty(C), 0)); + VPInstruction *I1 = new VPInstruction(Instruction::Add, {Zero}); + auto *CanIV = new VPCanonicalIVPHIRecipe(Zero, {}); VPInstruction *BranchOnCond = new VPInstruction(VPInstruction::BranchOnCond, {CanIV}); VPInstruction *BranchOnCond2 = new VPInstruction(VPInstruction::BranchOnCond, {I1}); - VPlan &Plan = getPlan(); VPBasicBlock *VPBB1 = Plan.getEntry(); VPBasicBlock *VPBB2 = Plan.createVPBasicBlock(""); @@ -149,14 +153,15 @@ TEST_F(VPVerifierTest, DuplicateSuccessorsOutsideRegion) { } TEST_F(VPVerifierTest, DuplicateSuccessorsInsideRegion) { - VPInstruction *I1 = new VPInstruction(Instruction::Add, {}); - auto *CanIV = new VPCanonicalIVPHIRecipe(I1, {}); + VPlan &Plan = getPlan(); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(Type::getInt32Ty(C), 0)); + VPInstruction *I1 = new VPInstruction(Instruction::Add, {Zero}); + auto *CanIV = new VPCanonicalIVPHIRecipe(Zero, {}); VPInstruction *BranchOnCond = new VPInstruction(VPInstruction::BranchOnCond, {CanIV}); VPInstruction *BranchOnCond2 = new VPInstruction(VPInstruction::BranchOnCond, {I1}); - VPlan &Plan = getPlan(); VPBasicBlock *VPBB1 = Plan.getEntry(); VPBasicBlock *VPBB2 = Plan.createVPBasicBlock(""); VPBasicBlock *VPBB3 = Plan.createVPBasicBlock(""); @@ -186,10 +191,15 @@ TEST_F(VPVerifierTest, DuplicateSuccessorsInsideRegion) { TEST_F(VPVerifierTest, BlockOutsideRegionWithParent) { VPlan &Plan = getPlan(); + VPBasicBlock *VPBB1 = Plan.getEntry(); VPBasicBlock *VPBB2 = Plan.createVPBasicBlock(""); - VPInstruction *DefI = new VPInstruction(Instruction::Add, {}); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(Type::getInt32Ty(C), 0)); + auto *CanIV = new VPCanonicalIVPHIRecipe(Zero, {}); + VPBB2->appendRecipe(CanIV); + + VPInstruction *DefI = new VPInstruction(Instruction::Add, {Zero}); VPInstruction *BranchOnCond = new VPInstruction(VPInstruction::BranchOnCond, {DefI}); diff --git a/llvm/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp b/llvm/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp index bf110c8ad76bb..60c726212062d 100644 --- a/llvm/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp @@ -183,7 +183,7 @@ TEST_F(X86SerialSnippetGeneratorTest, ASSERT_THAT(IT.getVariableValues(), SizeIs(3)); for (const auto &Var : IT.getVariableValues()) { if (Var.isReg()) { - EXPECT_FALSE(ForbiddenRegisters[Var.getReg()]); + EXPECT_FALSE(ForbiddenRegisters[Var.getReg().id()]); } } } @@ -288,8 +288,8 @@ TEST_F(X86ParallelSnippetGeneratorTest, ReadAfterWrite_CMOV32rr) { EXPECT_THAT(CT.Info, HasSubstr("avoiding Read-After-Write issue")); EXPECT_THAT(CT.Execution, ExecutionMode::UNKNOWN); ASSERT_GT(CT.Instructions.size(), 1U); - std::unordered_set AllDefRegisters; - std::unordered_set AllUseRegisters; + std::set AllDefRegisters; + std::set AllUseRegisters; for (const auto &IT : CT.Instructions) { ASSERT_THAT(IT.getVariableValues(), SizeIs(3)); AllDefRegisters.insert(IT.getVariableValues()[0].getReg()); @@ -328,8 +328,8 @@ TEST_F(X86ParallelSnippetGeneratorTest, ReadAfterWrite_VFMADD132PDr) { EXPECT_THAT(CT.Info, HasSubstr("avoiding Read-After-Write issue")); EXPECT_THAT(CT.Execution, ExecutionMode::UNKNOWN); ASSERT_GT(CT.Instructions.size(), 1U); - std::unordered_set AllDefRegisters; - std::unordered_set AllUseRegisters; + std::set AllDefRegisters; + std::set AllUseRegisters; for (const auto &IT : CT.Instructions) { ASSERT_THAT(IT.getVariableValues(), SizeIs(3)); AllDefRegisters.insert(IT.getVariableValues()[0].getReg()); @@ -412,9 +412,9 @@ TEST_F(X86ParallelSnippetGeneratorTest, MemoryUse) { EXPECT_THAT(IT.getOpcode(), Opcode); ASSERT_THAT(IT.getVariableValues(), SizeIs(6)); EXPECT_EQ(IT.getVariableValues()[2].getImm(), 1); - EXPECT_EQ(IT.getVariableValues()[3].getReg(), 0u); + EXPECT_FALSE(IT.getVariableValues()[3].getReg().isValid()); EXPECT_EQ(IT.getVariableValues()[4].getImm(), 0); - EXPECT_EQ(IT.getVariableValues()[5].getReg(), 0u); + EXPECT_FALSE(IT.getVariableValues()[5].getReg().isValid()); } } diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp index f2f6066538e1a..139b1f9d897fa 100644 --- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp +++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp @@ -1299,7 +1299,7 @@ void AsmMatcherInfo::buildRegisterClasses( if (!ContainingSet.empty()) { RegisterSets.insert(ContainingSet); - RegisterMap.insert(std::pair(CGR.TheDef, ContainingSet)); + RegisterMap.try_emplace(CGR.TheDef, ContainingSet); } } @@ -1320,7 +1320,7 @@ void AsmMatcherInfo::buildRegisterClasses( CI->DiagnosticType = ""; CI->IsOptional = false; CI->DefaultMethod = ""; // unused - RegisterSetClasses.insert(std::pair(RS, CI)); + RegisterSetClasses.try_emplace(RS, CI); ++Index; } @@ -1362,7 +1362,7 @@ void AsmMatcherInfo::buildRegisterClasses( if (!CI->DiagnosticString.empty() && CI->DiagnosticType.empty()) CI->DiagnosticType = RC.getName(); - RegisterClassClasses.insert(std::pair(Def, CI)); + RegisterClassClasses.try_emplace(Def, CI); } // Populate the map for individual registers. @@ -2823,7 +2823,7 @@ emitMnemonicAliasVariant(raw_ostream &OS, const AsmMatcherInfo &Info, MatchCode += "return;"; - Cases.push_back(std::pair(AliasEntry.first, MatchCode)); + Cases.emplace_back(AliasEntry.first, MatchCode); } StringMatcher("Mnemonic", Cases, OS).Emit(Indent); } diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp index 9880214a37368..e7606b9df4626 100644 --- a/llvm/utils/TableGen/AsmWriterEmitter.cpp +++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp @@ -144,14 +144,14 @@ static void EmitInstructions(std::vector &Insts, raw_ostream &O, O << " switch (MI->getOpcode()) {\n"; O << " default: llvm_unreachable(\"Unexpected opcode.\");\n"; std::vector> OpsToPrint; - OpsToPrint.push_back(std::pair(FirstInst.CGI->Namespace.str() + "::" + - FirstInst.CGI->TheDef->getName().str(), - FirstInst.Operands[i])); + OpsToPrint.emplace_back(FirstInst.CGI->Namespace.str() + + "::" + FirstInst.CGI->TheDef->getName().str(), + FirstInst.Operands[i]); for (const AsmWriterInst &AWI : SimilarInsts) { - OpsToPrint.push_back(std::pair( - AWI.CGI->Namespace.str() + "::" + AWI.CGI->TheDef->getName().str(), - AWI.Operands[i])); + OpsToPrint.emplace_back(AWI.CGI->Namespace.str() + + "::" + AWI.CGI->TheDef->getName().str(), + AWI.Operands[i]); } std::reverse(OpsToPrint.begin(), OpsToPrint.end()); while (!OpsToPrint.empty()) @@ -722,7 +722,7 @@ class IAPrinter { void addOperand(StringRef Op, int OpIdx, int PrintMethodIdx = -1) { assert(OpIdx >= 0 && OpIdx < 0xFE && "Idx out of range"); assert(PrintMethodIdx >= -1 && PrintMethodIdx < 0xFF && "Idx out of range"); - OpMap[Op] = std::pair(OpIdx, PrintMethodIdx); + OpMap[Op] = {OpIdx, PrintMethodIdx}; } unsigned getNumMIOps() { return NumMIOps; } @@ -753,7 +753,7 @@ class IAPrinter { Next = I; } - return std::pair(StringRef(Start, I - Start), Next); + return {StringRef(Start, I - Start), Next}; } std::string formatAliasString(uint32_t &UnescapedSize) { @@ -854,8 +854,8 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) { continue; // Aliases with priority 0 are never emitted. const DagInit *DI = R->getValueAsDag("ResultInst"); - AliasMap[getQualifiedName(DI->getOperatorAsDef(R->getLoc()))].insert( - std::pair(CodeGenInstAlias(R, Target), Priority)); + AliasMap[getQualifiedName(DI->getOperatorAsDef(R->getLoc()))].emplace( + CodeGenInstAlias(R, Target), Priority); } // A map of which conditions need to be met for each instruction operand diff --git a/llvm/utils/TableGen/Basic/DirectiveEmitter.cpp b/llvm/utils/TableGen/Basic/DirectiveEmitter.cpp index fd815f4a31dad..ab68e028f1e96 100644 --- a/llvm/utils/TableGen/Basic/DirectiveEmitter.cpp +++ b/llvm/utils/TableGen/Basic/DirectiveEmitter.cpp @@ -495,7 +495,7 @@ static void emitLeafTable(const DirectiveLanguage &DirLang, raw_ostream &OS, DenseMap DirId; // Record * -> llvm::omp::Directive for (auto [Idx, Rec] : enumerate(Directives)) - DirId.insert(std::make_pair(Rec, Idx)); + DirId.try_emplace(Rec, Idx); using LeafList = std::vector; int MaxLeafCount = getMaxLeafCount(DirLang); @@ -675,7 +675,7 @@ static void generateGetDirectiveAssociation(const DirectiveLanguage &DirLang, D.getAssociation()->getName() + "'"); } if (AS != Association::FromLeaves) { - AsMap.insert(std::make_pair(R, AS)); + AsMap.try_emplace(R, AS); return AS; } // Compute the association from leaf constructs. @@ -701,7 +701,7 @@ static void generateGetDirectiveAssociation(const DirectiveLanguage &DirLang, assert(Result != Association::Invalid); assert(Result != Association::FromLeaves); - AsMap.insert(std::make_pair(R, Result)); + AsMap.try_emplace(R, Result); return Result; }; diff --git a/llvm/utils/TableGen/Basic/RISCVTargetDefEmitter.cpp b/llvm/utils/TableGen/Basic/RISCVTargetDefEmitter.cpp index 723f1d72b5159..8e4acf96c3da9 100644 --- a/llvm/utils/TableGen/Basic/RISCVTargetDefEmitter.cpp +++ b/llvm/utils/TableGen/Basic/RISCVTargetDefEmitter.cpp @@ -241,8 +241,7 @@ static void emitRISCVExtensionBitmask(const RecordKeeper &RK, raw_ostream &OS) { ExtName.consume_front("experimental-"); #ifndef NDEBUG - assert(Seen.insert(std::make_pair(GroupIDVal, BitPosVal)).second && - "duplicated bitmask"); + assert(Seen.insert({GroupIDVal, BitPosVal}).second && "duplicated bitmask"); #endif OS.indent(4) << "{" diff --git a/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h b/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h index c918365b2289b..35a9abdc37c82 100644 --- a/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h +++ b/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h @@ -93,7 +93,7 @@ class SequenceToOffsetTable { if (I != Seqs.end() && isSuffix(Seq, I->first)) return; - I = Seqs.insert(I, std::pair(Seq, 0u)); + I = Seqs.insert(I, {Seq, 0u}); // The entry before I may be a suffix of Seq that can now be erased. if (I != Seqs.begin() && isSuffix((--I)->first, Seq)) diff --git a/llvm/utils/TableGen/CodeEmitterGen.cpp b/llvm/utils/TableGen/CodeEmitterGen.cpp index 407ee81b7e0b6..475699ae3e78e 100644 --- a/llvm/utils/TableGen/CodeEmitterGen.cpp +++ b/llvm/utils/TableGen/CodeEmitterGen.cpp @@ -338,11 +338,11 @@ CodeEmitterGen::getInstructionCases(const Record *R, Append(" }\n"); } Append(" }\n"); - return std::pair(std::move(Case), std::move(BitOffsetCase)); + return {std::move(Case), std::move(BitOffsetCase)}; } } addInstructionCasesForEncoding(R, R, Target, Case, BitOffsetCase); - return std::pair(std::move(Case), std::move(BitOffsetCase)); + return {std::move(Case), std::move(BitOffsetCase)}; } void CodeEmitterGen::addInstructionCasesForEncoding( diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp index 1a61d32b4869a..013135a9def1f 100644 --- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp @@ -3006,7 +3006,7 @@ TreePatternNodePtr TreePattern::ParseTreePattern(const Init *TheInit, // Check that the ComplexPattern uses are consistent: "(MY_PAT $a, $b)" // and "(MY_PAT $b, $a)" should not be allowed in the same pattern; // neither should "(MY_PAT_1 $a, $b)" and "(MY_PAT_2 $a, $b)". - auto OperandId = std::make_pair(Operator, i); + auto OperandId = std::pair(Operator, i); auto [PrevOp, Inserted] = ComplexPatternOperands.try_emplace(Child->getName(), OperandId); if (!Inserted && PrevOp->getValue() != OperandId) { @@ -3218,7 +3218,7 @@ void CodeGenDAGPatterns::ParseNodeInfo() { const CodeGenHwModes &CGH = getTargetInfo().getHwModes(); for (const Record *R : reverse(Records.getAllDerivedDefinitions("SDNode"))) - SDNodes.insert(std::pair(R, SDNodeInfo(R, CGH))); + SDNodes.try_emplace(R, SDNodeInfo(R, CGH)); // Get the builtin intrinsic nodes. intrinsic_void_sdnode = getSDNodeNamed("intrinsic_void"); @@ -3348,8 +3348,7 @@ void CodeGenDAGPatterns::ParseDefaultOperands() { // SomeSDnode so that we can parse this. std::vector> Ops; for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op) - Ops.push_back( - std::pair(DefaultInfo->getArg(op), DefaultInfo->getArgName(op))); + Ops.emplace_back(DefaultInfo->getArg(op), DefaultInfo->getArgName(op)); const DagInit *DI = DagInit::get(SomeSDNode, nullptr, Ops); // Create a TreePattern to parse this. diff --git a/llvm/utils/TableGen/Common/CodeGenHwModes.cpp b/llvm/utils/TableGen/Common/CodeGenHwModes.cpp index f5b5d3feed7c3..c744691ae9e08 100644 --- a/llvm/utils/TableGen/Common/CodeGenHwModes.cpp +++ b/llvm/utils/TableGen/Common/CodeGenHwModes.cpp @@ -51,7 +51,7 @@ HwModeSelect::HwModeSelect(const Record *R, CodeGenHwModes &CGH) { } for (auto [Mode, Object] : zip_equal(Modes, Objects)) { unsigned ModeId = CGH.getHwModeId(Mode); - Items.push_back(std::pair(ModeId, Object)); + Items.emplace_back(ModeId, Object); } } @@ -70,13 +70,13 @@ CodeGenHwModes::CodeGenHwModes(const RecordKeeper &RK) : Records(RK) { if (R->getName() == DefaultModeName) continue; Modes.emplace_back(R); - ModeIds.insert(std::pair(R, Modes.size())); + ModeIds.try_emplace(R, Modes.size()); } assert(Modes.size() <= 32 && "number of HwModes exceeds maximum of 32"); for (const Record *R : Records.getAllDerivedDefinitions("HwModeSelect")) { - auto P = ModeSelects.emplace(std::pair(R, HwModeSelect(R, *this))); + auto P = ModeSelects.emplace(R, HwModeSelect(R, *this)); assert(P.second); (void)P; } diff --git a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp index 30694ac2bb213..5537a2fa8b980 100644 --- a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp +++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp @@ -229,7 +229,7 @@ CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T) InstOpRec->getValueAsDef("ParserMatchClass") ->getValueAsString("Name") != "Imm")) { ResultOperands.push_back(std::move(ResOp)); - ResultInstOperandIndex.push_back(std::pair(i, -1)); + ResultInstOperandIndex.emplace_back(i, -1); ++AliasOpNo; // Otherwise, we need to match each of the suboperands individually. @@ -244,7 +244,7 @@ CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T) Result->getArgName(AliasOpNo)->getAsUnquotedString() + "." + MIOI->getArgName(SubOp)->getAsUnquotedString(), SubRec); - ResultInstOperandIndex.push_back(std::pair(i, SubOp)); + ResultInstOperandIndex.emplace_back(i, SubOp); } ++AliasOpNo; } @@ -262,7 +262,7 @@ CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T) if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false, R->getLoc(), T, ResOp)) { ResultOperands.push_back(ResOp); - ResultInstOperandIndex.push_back(std::pair(i, SubOp)); + ResultInstOperandIndex.emplace_back(i, SubOp); ++AliasOpNo; } else { PrintFatalError( diff --git a/llvm/utils/TableGen/Common/CodeGenInstruction.cpp b/llvm/utils/TableGen/Common/CodeGenInstruction.cpp index 344c4c15e2ebd..ecef9caa9c3d8 100644 --- a/llvm/utils/TableGen/Common/CodeGenInstruction.cpp +++ b/llvm/utils/TableGen/Common/CodeGenInstruction.cpp @@ -175,7 +175,7 @@ CGIOperandList::CGIOperandList(const Record *R) : TheDef(R) { } OpInfo.SubOpNames[j] = SubArgName; - SubOpAliases[SubArgName] = std::pair(i, j); + SubOpAliases[SubArgName] = {i, j}; } } else if (!EncoderMethod.empty()) { // If we have no explicit sub-op dag, but have an top-level encoder @@ -276,7 +276,7 @@ CGIOperandList::ParseOperandName(StringRef Op, bool AllowWholeOp) { Op + "'"); // Otherwise, return the operand. - return std::pair(OpIdx, 0U); + return {OpIdx, 0U}; } // Find the suboperand number involved. @@ -289,13 +289,13 @@ CGIOperandList::ParseOperandName(StringRef Op, bool AllowWholeOp) { // Find the operand with the right name. for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i) if (MIOpInfo->getArgNameStr(i) == SubOpName) - return std::pair(OpIdx, i); + return {OpIdx, i}; // Otherwise, didn't find it! PrintFatalError(TheDef->getLoc(), TheDef->getName() + ": unknown suboperand name in '" + Op + "'"); - return std::pair(0U, 0U); + return {0U, 0U}; } static void ParseConstraint(StringRef CStr, CGIOperandList &Ops, diff --git a/llvm/utils/TableGen/Common/CodeGenInstruction.h b/llvm/utils/TableGen/Common/CodeGenInstruction.h index a799d023b1af4..44c0ab70dc615 100644 --- a/llvm/utils/TableGen/Common/CodeGenInstruction.h +++ b/llvm/utils/TableGen/Common/CodeGenInstruction.h @@ -204,7 +204,7 @@ class CGIOperandList { for (unsigned i = 0;; ++i) { assert(i < OperandList.size() && "Invalid flat operand #"); if (OperandList[i].MIOperandNo + OperandList[i].MINumOperands > Op) - return std::pair(i, Op - OperandList[i].MIOperandNo); + return {i, Op - OperandList[i].MIOperandNo}; } } diff --git a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp index 2dbee94d7e540..973c86c6e5a55 100644 --- a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp +++ b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp @@ -287,13 +287,13 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) { CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i]; if (!SR->Artificial) Idx->Artificial = false; - if (!SubRegs.insert(std::pair(Idx, SR)).second) + if (!SubRegs.try_emplace(Idx, SR).second) PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() + " appears twice in Register " + getName()); // Map explicit sub-registers first, so the names take precedence. // The inherited sub-registers are mapped below. - SubReg2Idx.insert(std::pair(SR, Idx)); + SubReg2Idx.try_emplace(SR, Idx); } // Keep track of inherited subregs and how they can be reached. @@ -333,7 +333,7 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) { if (SubRegs.count(Comp.second) || !Orphans.erase(SRI->second)) continue; // We found a new name for the orphaned sub-register. - SubRegs.insert(std::pair(Comp.second, SRI->second)); + SubRegs.try_emplace(Comp.second, SRI->second); Indices.push_back(Comp.second); } } @@ -380,7 +380,7 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) { // Ensure that every sub-register has a unique name. DenseMap::iterator Ins = - SubReg2Idx.insert(std::pair(SubReg.second, SubReg.first)).first; + SubReg2Idx.try_emplace(SubReg.second, SubReg.first).first; if (Ins->second == SubReg.first) continue; // Trouble: Two different names for SubReg.second. @@ -532,8 +532,8 @@ void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) { // a sub-register with a concatenated sub-register index. CodeGenSubRegIndex *Concat = RegBank.getConcatSubRegIndex(Parts, RegBank.getHwModes()); - std::pair NewSubReg = - std::pair(Concat, Cand); + std::pair NewSubReg = {Concat, + Cand}; if (!SubRegs.insert(NewSubReg).second) continue; @@ -541,7 +541,7 @@ void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) { // We inserted a new subregister. NewSubRegs.push_back(NewSubReg); SubRegQueue.push(NewSubReg); - SubReg2Idx.insert(std::pair(Cand, Concat)); + SubReg2Idx.try_emplace(Cand, Concat); } } @@ -1098,7 +1098,7 @@ CodeGenRegisterClass::getMatchingSubClassWithSubRegs( BitVector SuperRegClassesBV(RegClasses.size()); RC.getSuperRegClasses(SubIdx, SuperRegClassesBV); if (SuperRegClassesBV.any()) - SuperRegClasses.push_back(std::pair(&RC, SuperRegClassesBV)); + SuperRegClasses.emplace_back(&RC, SuperRegClassesBV); } llvm::stable_sort(SuperRegClasses, [&](const std::pair &A, @@ -1247,8 +1247,7 @@ CodeGenRegBank::CodeGenRegBank(const RecordKeeper &Records, // causes some failures in MIPS - perhaps they have duplicate register name // entries? (or maybe there's a reason for it - I don't know much about this // code, just drive-by refactoring) - RegistersByName.insert( - std::pair(Reg.TheDef->getValueAsString("AsmName"), &Reg)); + RegistersByName.try_emplace(Reg.TheDef->getValueAsString("AsmName"), &Reg); // Precompute all sub-register maps. // This will create Composite entries for all inferred sub-register indices. @@ -1260,10 +1259,10 @@ CodeGenRegBank::CodeGenRegBank(const RecordKeeper &Records, for (CodeGenSubRegIndex &SRI : SubRegIndices) { SRI.computeConcatTransitiveClosure(); if (!SRI.ConcatenationOf.empty()) - ConcatIdx.insert( - std::pair(SmallVector( - SRI.ConcatenationOf.begin(), SRI.ConcatenationOf.end()), - &SRI)); + ConcatIdx.try_emplace( + SmallVector(SRI.ConcatenationOf.begin(), + SRI.ConcatenationOf.end()), + &SRI); } // Infer even more sub-registers by combining leading super-registers. @@ -1353,12 +1352,12 @@ CodeGenRegister *CodeGenRegBank::getReg(const Record *Def) { void CodeGenRegBank::addToMaps(CodeGenRegisterClass *RC) { if (const Record *Def = RC->getDef()) - Def2RC.insert(std::pair(Def, RC)); + Def2RC.try_emplace(Def, RC); // Duplicate classes are rejected by insert(). // That's OK, we only care about the properties handled by CGRC::Key. CodeGenRegisterClass::Key K(*RC); - Key2RC.insert(std::pair(K, RC)); + Key2RC.try_emplace(K, RC); } // Create a synthetic sub-class if it is missing. @@ -1509,7 +1508,7 @@ void CodeGenRegBank::computeComposites() { SmallSet UserDefined; for (const CodeGenSubRegIndex &Idx : SubRegIndices) for (auto P : Idx.getComposites()) - UserDefined.insert(std::pair(&Idx, P.first)); + UserDefined.insert({&Idx, P.first}); // Keep track of TopoSigs visited. We only need to visit each TopoSig once, // and many registers will share TopoSigs on regular architectures. diff --git a/llvm/utils/TableGen/Common/CodeGenRegisters.h b/llvm/utils/TableGen/Common/CodeGenRegisters.h index 2fa6cab2afb89..5e2d1977545c1 100644 --- a/llvm/utils/TableGen/Common/CodeGenRegisters.h +++ b/llvm/utils/TableGen/Common/CodeGenRegisters.h @@ -110,7 +110,7 @@ class CodeGenSubRegIndex { CodeGenSubRegIndex *addComposite(CodeGenSubRegIndex *A, CodeGenSubRegIndex *B, const CodeGenHwModes &CGH) { assert(A && B); - std::pair Ins = Composed.insert(std::pair(A, B)); + std::pair Ins = Composed.try_emplace(A, B); // Synthetic subreg indices that aren't contiguous (for instance ARM // register tuples) don't have a bit range, so it's OK to let @@ -729,7 +729,7 @@ class CodeGenRegBank { // This function is only for use by CodeGenRegister::computeSuperRegs(). // Others should simply use Reg->getTopoSig(). unsigned getTopoSig(const TopoSigId &Id) { - return TopoSigs.insert(std::pair(Id, TopoSigs.size())).first->second; + return TopoSigs.try_emplace(Id, TopoSigs.size()).first->second; } // Create a native register unit that is associated with one or two root diff --git a/llvm/utils/TableGen/Common/CodeGenSchedule.cpp b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp index 1fe322c88bb0f..a5ca060533bce 100644 --- a/llvm/utils/TableGen/Common/CodeGenSchedule.cpp +++ b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp @@ -334,7 +334,7 @@ static void processSTIPredicate(STIPredicateFunction &Fn, APInt DefaultProcMask(ProcModelMap.size(), 0); APInt DefaultPredMask(NumUniquePredicates, 0); for (std::pair &MaskPair : OpcodeMasks) - MaskPair = std::pair(DefaultProcMask, DefaultPredMask); + MaskPair = {DefaultProcMask, DefaultPredMask}; // Construct a OpcodeInfo object for every unique opcode declared by an // InstructionEquivalenceClass definition. @@ -384,7 +384,7 @@ static void processSTIPredicate(STIPredicateFunction &Fn, auto PopulationCountAndLeftBit = [](const APInt &Other) -> std::pair { - return std::pair(Other.popcount(), -Other.countl_zero()); + return {Other.popcount(), -Other.countl_zero()}; }; auto lhsmask_first = PopulationCountAndLeftBit(LhsMasks.first); auto rhsmask_first = PopulationCountAndLeftBit(RhsMasks.first); @@ -545,7 +545,7 @@ void CodeGenSchedModels::collectProcModels() { /// ProcessorItineraries. void CodeGenSchedModels::addProcModel(const Record *ProcDef) { const Record *ModelKey = getModelOrItinDef(ProcDef); - if (!ProcModelMap.insert(std::pair(ModelKey, ProcModels.size())).second) + if (!ProcModelMap.try_emplace(ModelKey, ProcModels.size()).second) return; std::string Name = std::string(ModelKey->getName()); diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp index a81f2b53f2846..f0cd98dd2dee0 100644 --- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp +++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp @@ -840,8 +840,7 @@ Error RuleMatcher::defineComplexSubOperand(StringRef SymbolicName, return Error::success(); } - ComplexSubOperands[SymbolicName] = - std::tuple(ComplexPattern, RendererID, SubOperandID); + ComplexSubOperands[SymbolicName] = {ComplexPattern, RendererID, SubOperandID}; ComplexSubOperandsParentName[SymbolicName] = std::move(ParentName); return Error::success(); @@ -875,10 +874,8 @@ unsigned RuleMatcher::getInsnVarID(InstructionMatcher &InsnMatcher) const { } void RuleMatcher::defineOperand(StringRef SymbolicName, OperandMatcher &OM) { - if (!DefinedOperands.contains(SymbolicName)) { - DefinedOperands[SymbolicName] = &OM; + if (DefinedOperands.try_emplace(SymbolicName, &OM).second) return; - } // If the operand is already defined, then we must ensure both references in // the matcher have the exact same node. @@ -889,8 +886,7 @@ void RuleMatcher::defineOperand(StringRef SymbolicName, OperandMatcher &OM) { } void RuleMatcher::definePhysRegOperand(const Record *Reg, OperandMatcher &OM) { - if (!PhysRegOperands.contains(Reg)) - PhysRegOperands[Reg] = &OM; + PhysRegOperands.try_emplace(Reg, &OM); } InstructionMatcher & diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h index 8e6de80d6083c..e7914a613973b 100644 --- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h +++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h @@ -233,7 +233,7 @@ class MatchTable { unsigned allocateLabelID() { return CurrentLabelID++; } void defineLabel(unsigned LabelID) { - LabelMap.insert(std::pair(LabelID, CurrentSize)); + LabelMap.try_emplace(LabelID, CurrentSize); } unsigned getLabelIndex(unsigned LabelID) const { diff --git a/llvm/utils/TableGen/Common/InfoByHwMode.cpp b/llvm/utils/TableGen/Common/InfoByHwMode.cpp index e5e8225518b58..c2368cb31dbbf 100644 --- a/llvm/utils/TableGen/Common/InfoByHwMode.cpp +++ b/llvm/utils/TableGen/Common/InfoByHwMode.cpp @@ -71,9 +71,9 @@ MVT &ValueTypeByHwMode::getOrCreateTypeForMode(unsigned Mode, MVT Type) { // make a copy of it for Mode and return it. auto D = Map.begin(); if (D != Map.end() && D->first == DefaultMode) - return Map.insert(std::pair(Mode, D->second)).first->second; + return Map.try_emplace(Mode, D->second).first->second; // If default mode is not present either, use provided Type. - return Map.insert(std::pair(Mode, Type)).first->second; + return Map.try_emplace(Mode, Type).first->second; } StringRef ValueTypeByHwMode::getMVTName(MVT T) { diff --git a/llvm/utils/TableGen/Common/InfoByHwMode.h b/llvm/utils/TableGen/Common/InfoByHwMode.h index 4f11e8ecc7fcb..bff164c6a6aa7 100644 --- a/llvm/utils/TableGen/Common/InfoByHwMode.h +++ b/llvm/utils/TableGen/Common/InfoByHwMode.h @@ -144,7 +144,7 @@ template struct InfoByHwMode { assert(hasMode(Mode) || hasDefault()); InfoT I = get(Mode); Map.clear(); - Map.insert(std::pair(DefaultMode, I)); + Map.try_emplace(DefaultMode, I); } protected: @@ -212,7 +212,7 @@ struct RegSizeInfoByHwMode : public InfoByHwMode { void writeToStream(raw_ostream &OS) const; void insertRegSizeForMode(unsigned Mode, RegSizeInfo Info) { - Map.insert(std::pair(Mode, Info)); + Map.try_emplace(Mode, Info); } }; @@ -233,7 +233,7 @@ struct SubRegRangeByHwMode : public InfoByHwMode { SubRegRangeByHwMode() = default; void insertSubRegRangeForMode(unsigned Mode, SubRegRange Info) { - Map.insert(std::pair(Mode, Info)); + Map.try_emplace(Mode, Info); } }; diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/llvm/utils/TableGen/DAGISelMatcherGen.cpp index dd05f4df0d723..e1c25075e384d 100644 --- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp +++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp @@ -252,7 +252,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode &N) { if (LeafRec->isSubClassOf("Register")) { AddMatcher(new RecordMatcher("physreg input " + LeafRec->getName().str(), NextRecordedOperandNo)); - PhysRegInputs.push_back(std::pair(LeafRec, NextRecordedOperandNo++)); + PhysRegInputs.emplace_back(LeafRec, NextRecordedOperandNo++); return; } @@ -272,7 +272,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode &N) { // Remember this ComplexPattern so that we can emit it after all the other // structural matches are done. unsigned InputOperand = VariableMap[N.getName()] - 1; - MatchedComplexPatterns.push_back(std::pair(&N, InputOperand)); + MatchedComplexPatterns.emplace_back(&N, InputOperand); return; } diff --git a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp index 590786bb7fced..f747944543cfd 100644 --- a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp +++ b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp @@ -426,7 +426,7 @@ static void FactorNodes(std::unique_ptr &InputMatcherPtr) { CheckOpcodeMatcher *COM = cast(OptionsToMatch[i]); assert(Opcodes.insert(COM->getOpcode().getEnumName()).second && "Duplicate opcodes not factored?"); - Cases.push_back(std::pair(&COM->getOpcode(), COM->takeNext())); + Cases.emplace_back(&COM->getOpcode(), COM->takeNext()); delete COM; } @@ -463,7 +463,7 @@ static void FactorNodes(std::unique_ptr &InputMatcherPtr) { } Entry = Cases.size() + 1; - Cases.push_back(std::pair(CTMTy, MatcherWithoutCTM)); + Cases.emplace_back(CTMTy, MatcherWithoutCTM); } // Make sure we recursively factor any scopes we may have created. diff --git a/llvm/utils/TableGen/DFAEmitter.cpp b/llvm/utils/TableGen/DFAEmitter.cpp index c150620b74175..a77397dd7d260 100644 --- a/llvm/utils/TableGen/DFAEmitter.cpp +++ b/llvm/utils/TableGen/DFAEmitter.cpp @@ -349,7 +349,7 @@ void CustomDfaEmitter::printActionType(raw_ostream &OS) { OS << TypeName; } void CustomDfaEmitter::printActionValue(action_type A, raw_ostream &OS) { const ActionTuple &AT = Actions[A]; if (AT.size() > 1) - OS << "std::tuple("; + OS << "{"; ListSeparator LS; for (const auto &SingleAction : AT) { OS << LS; @@ -361,7 +361,7 @@ void CustomDfaEmitter::printActionValue(action_type A, raw_ostream &OS) { OS << std::get(SingleAction); } if (AT.size() > 1) - OS << ")"; + OS << "}"; } static TableGen::Emitter::OptClass diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp index 3f79de3139fbd..b847031fdc00a 100644 --- a/llvm/utils/TableGen/DecoderEmitter.cpp +++ b/llvm/utils/TableGen/DecoderEmitter.cpp @@ -640,11 +640,11 @@ void Filter::recurse() { // Delegates to an inferior filter chooser for further processing on this // group of instructions whose segment values are variable. - FilterChooserMap.insert(std::pair( + FilterChooserMap.try_emplace( NO_FIXED_SEGMENTS_SENTINEL, std::make_unique(Owner->AllInstructions, VariableInstructions, Owner->Operands, - BitValueArray, *Owner))); + BitValueArray, *Owner)); } // No need to recurse for a singleton filtered instruction. @@ -667,10 +667,10 @@ void Filter::recurse() { // Delegates to an inferior filter chooser for further processing on this // category of instructions. - FilterChooserMap.insert( - std::pair(Inst.first, std::make_unique( - Owner->AllInstructions, Inst.second, - Owner->Operands, BitValueArray, *Owner))); + FilterChooserMap.try_emplace(Inst.first, + std::make_unique( + Owner->AllInstructions, Inst.second, + Owner->Operands, BitValueArray, *Owner)); } } @@ -1943,7 +1943,7 @@ static void parseVarLenInstOperand(const Record &Def, int TiedReg = TiedTo[OpSubOpPair.first]; if (TiedReg != -1) { unsigned OpIdx = CGI.Operands.getFlattenedOperandNumber( - std::pair(TiedReg, OpSubOpPair.second)); + {TiedReg, OpSubOpPair.second}); Operands[OpIdx].addField(CurrBitPos, EncodingSegment.BitWidth, Offset); } } @@ -2039,9 +2039,9 @@ populateInstruction(const CodeGenTarget &Target, const Record &EncodingDef, const DagInit *Out = Def.getValueAsDag("OutOperandList"); const DagInit *In = Def.getValueAsDag("InOperandList"); for (const auto &[Idx, Arg] : enumerate(Out->getArgs())) - InOutOperands.push_back(std::pair(Arg, Out->getArgNameStr(Idx))); + InOutOperands.emplace_back(Arg, Out->getArgNameStr(Idx)); for (const auto &[Idx, Arg] : enumerate(In->getArgs())) - InOutOperands.push_back(std::pair(Arg, In->getArgNameStr(Idx))); + InOutOperands.emplace_back(Arg, In->getArgNameStr(Idx)); // Search for tied operands, so that we can correctly instantiate // operands that are not explicitly represented in the encoding. @@ -2587,7 +2587,7 @@ namespace llvm { if (!NumberedEncoding.HwModeName.empty()) DecoderNamespace += std::string("_") + NumberedEncoding.HwModeName.str(); - OpcMap[std::pair(DecoderNamespace, Size)].emplace_back( + OpcMap[{DecoderNamespace, Size}].emplace_back( NEI, Target.getInstrIntValue(Def)); } else { NumEncodingsOmitted++; diff --git a/llvm/utils/TableGen/FastISelEmitter.cpp b/llvm/utils/TableGen/FastISelEmitter.cpp index f60c63c212d61..6963bb239e9d8 100644 --- a/llvm/utils/TableGen/FastISelEmitter.cpp +++ b/llvm/utils/TableGen/FastISelEmitter.cpp @@ -593,7 +593,7 @@ void FastISelMap::collectPatterns(const CodeGenDAGPatterns &CGP) { int complexity = Pattern.getPatternComplexity(CGP); auto inserted_simple_pattern = SimplePatternsCheck.insert( - std::tuple(Operands, OpcodeName, VT, RetVT, PredicateCheck)); + {Operands, OpcodeName, VT, RetVT, PredicateCheck}); if (!inserted_simple_pattern.second) { PrintFatalError(Pattern.getSrcRecord()->getLoc(), "Duplicate predicate in FastISel table!"); diff --git a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp index bc300c3461100..770494405810d 100644 --- a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp +++ b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp @@ -2653,10 +2653,10 @@ GICombinerEmitter::buildMatchTable(MutableArrayRef Rules) { const Matcher *B) { auto *L = static_cast(A); auto *R = static_cast(B); - return std::make_tuple(OpcodeOrder[L->getOpcode()], - L->insnmatchers_front().getNumOperandMatchers()) < - std::make_tuple(OpcodeOrder[R->getOpcode()], - R->insnmatchers_front().getNumOperandMatchers()); + return std::tuple(OpcodeOrder[L->getOpcode()], + L->insnmatchers_front().getNumOperandMatchers()) < + std::tuple(OpcodeOrder[R->getOpcode()], + R->insnmatchers_front().getNumOperandMatchers()); }); for (Matcher *Rule : InputRules) diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp index 04ebdbb0ffc90..5466d315c05a4 100644 --- a/llvm/utils/TableGen/GlobalISelEmitter.cpp +++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp @@ -765,6 +765,18 @@ Expected GlobalISelEmitter::createAndImportSelDAGMatcher( InsnMatcher.addPredicate(SrcGIOrNull); } + // Since there are no opcodes for atomic loads and stores comparing to + // SelectionDAG, we add CheckMMOIsNonAtomic predicate immediately after the + // opcode predicate to make a logical combination of them. + if (SrcGIEquivOrNull && + SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic")) + InsnMatcher.addPredicate("NotAtomic"); + else if (SrcGIEquivOrNull && + SrcGIEquivOrNull->getValueAsBit("CheckMMOIsAtomic")) { + InsnMatcher.addPredicate( + "Unordered", AtomicOrderingMMOPredicateMatcher::AO_OrStronger); + } + unsigned OpIdx = 0; for (const TypeSetByHwMode &VTy : Src.getExtTypes()) { // Results don't have a name unless they are the root node. The caller will @@ -827,15 +839,6 @@ Expected GlobalISelEmitter::createAndImportSelDAGMatcher( } } - if (SrcGIEquivOrNull && - SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic")) - InsnMatcher.addPredicate("NotAtomic"); - else if (SrcGIEquivOrNull && - SrcGIEquivOrNull->getValueAsBit("CheckMMOIsAtomic")) { - InsnMatcher.addPredicate( - "Unordered", AtomicOrderingMMOPredicateMatcher::AO_OrStronger); - } - if (Src.isLeaf()) { const Init *SrcInit = Src.getLeafValue(); if (const IntInit *SrcIntInit = dyn_cast(SrcInit)) { diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp index 8c0e27215a736..7811734d5fdac 100644 --- a/llvm/utils/TableGen/InstrInfoEmitter.cpp +++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp @@ -259,8 +259,7 @@ void InstrInfoEmitter::initOperandMapData( StrUintMapIter I = Operands.find(Info.Name); if (I == Operands.end()) { - I = Operands.insert(Operands.begin(), std::pair( - Info.Name, NumOperands++)); + I = Operands.insert(Operands.begin(), {Info.Name, NumOperands++}); } OpList[I->second] = Info.MIOperandNo; } diff --git a/llvm/utils/TableGen/OptionParserEmitter.cpp b/llvm/utils/TableGen/OptionParserEmitter.cpp index eca828cad5f4d..8b92d25239219 100644 --- a/llvm/utils/TableGen/OptionParserEmitter.cpp +++ b/llvm/utils/TableGen/OptionParserEmitter.cpp @@ -232,8 +232,7 @@ static void emitHelpTextsForVariants( assert(Visibilities.size() <= MaxVisibilityPerHelp && "Too many visibilities to store in an " "OptTable::HelpTextsForVariants entry"); - OS << "std::make_pair(std::array{{"; + OS << "{std::array{{"; auto VisibilityEnd = Visibilities.cend(); for (auto Visibility = Visibilities.cbegin(); Visibility != VisibilityEnd; @@ -249,7 +248,7 @@ static void emitHelpTextsForVariants( writeCstring(OS, Help); else OS << "nullptr"; - OS << ")"; + OS << "}"; if (std::next(VisibilityHelp) != VisibilityHelpEnd) OS << ", "; @@ -516,8 +515,8 @@ static void emitOptionParser(const RecordKeeper &Records, raw_ostream &OS) { for (const Init *Visibility : Visibilities) VisibilityNames.push_back(Visibility->getAsUnquotedString()); - HelpTextsForVariants.push_back(std::make_pair( - VisibilityNames, VisibilityHelp->getValueAsString("Text"))); + HelpTextsForVariants.emplace_back( + VisibilityNames, VisibilityHelp->getValueAsString("Text")); } emitHelpTextsForVariants(OS, std::move(HelpTextsForVariants)); diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp index 0c1f5d205ca0f..8247b2d8f5a40 100644 --- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp +++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp @@ -1882,9 +1882,8 @@ void RegisterInfoEmitter::debugDump(raw_ostream &OS) { OS << '\n'; OS << "\tCoveredBySubregs: " << R.CoveredBySubRegs << '\n'; OS << "\tHasDisjunctSubRegs: " << R.HasDisjunctSubRegs << '\n'; - for (std::pair P : - R.getSubRegs()) { - OS << "\tSubReg " << P.first->getName() << " = " << P.second->getName() + for (auto &[SubIdx, SubReg] : R.getSubRegs()) { + OS << "\tSubReg " << SubIdx->getName() << " = " << SubReg->getName() << '\n'; } } diff --git a/llvm/utils/TableGen/SearchableTableEmitter.cpp b/llvm/utils/TableGen/SearchableTableEmitter.cpp index 91fde0c663057..38b6f2b395137 100644 --- a/llvm/utils/TableGen/SearchableTableEmitter.cpp +++ b/llvm/utils/TableGen/SearchableTableEmitter.cpp @@ -642,7 +642,7 @@ void SearchableTableEmitter::collectEnumEntries( Value = getInt(EntryRec, ValueField); Enum.Entries.push_back(std::make_unique(Name, Value)); - Enum.EntryMap.insert(std::pair(EntryRec, Enum.Entries.back().get())); + Enum.EntryMap.try_emplace(EntryRec, Enum.Entries.back().get()); } if (ValueField.empty()) { @@ -745,7 +745,7 @@ void SearchableTableEmitter::run(raw_ostream &OS) { collectEnumEntries(*Enum, NameField, ValueField, Records.getAllDerivedDefinitions(FilterClass)); - EnumMap.insert(std::pair(EnumRec, Enum.get())); + EnumMap.try_emplace(EnumRec, Enum.get()); Enums.emplace_back(std::move(Enum)); } @@ -814,7 +814,7 @@ void SearchableTableEmitter::run(raw_ostream &OS) { }); } - TableMap.insert(std::pair(TableRec, Table.get())); + TableMap.try_emplace(TableRec, Table.get()); Tables.emplace_back(std::move(Table)); } diff --git a/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp b/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp index 7373494e8b12f..5aa573ac857dc 100644 --- a/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp +++ b/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp @@ -75,7 +75,7 @@ void llvm::emitWebAssemblyDisassemblerTables( } } // Set this instruction as the one to use. - CGIP = std::pair(I, &CGI); + CGIP = {I, &CGI}; } OS << "#include \"MCTargetDesc/WebAssemblyMCTargetDesc.h\"\n"; OS << "\n"; diff --git a/llvm/utils/TableGen/X86DisassemblerTables.cpp b/llvm/utils/TableGen/X86DisassemblerTables.cpp index 294923b250eea..5e7983a101e0b 100644 --- a/llvm/utils/TableGen/X86DisassemblerTables.cpp +++ b/llvm/utils/TableGen/X86DisassemblerTables.cpp @@ -874,7 +874,7 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o, for (auto Operand : InstructionSpecifiers[Index].operands) { OperandEncoding Encoding = (OperandEncoding)Operand.encoding; OperandType Type = (OperandType)Operand.type; - OperandList.push_back(std::pair(Encoding, Type)); + OperandList.emplace_back(Encoding, Type); } unsigned &N = OperandSets[OperandList]; if (N != 0) @@ -906,7 +906,7 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o, for (auto Operand : InstructionSpecifiers[index].operands) { OperandEncoding Encoding = (OperandEncoding)Operand.encoding; OperandType Type = (OperandType)Operand.type; - OperandList.push_back(std::pair(Encoding, Type)); + OperandList.emplace_back(Encoding, Type); } o.indent(i * 2) << (OperandSets[OperandList] - 1) << ",\n"; diff --git a/llvm/utils/TableGen/X86InstrMappingEmitter.cpp b/llvm/utils/TableGen/X86InstrMappingEmitter.cpp index 10fab469a0803..1ee79aa27fa98 100644 --- a/llvm/utils/TableGen/X86InstrMappingEmitter.cpp +++ b/llvm/utils/TableGen/X86InstrMappingEmitter.cpp @@ -236,7 +236,7 @@ void X86InstrMappingEmitter::emitCompressEVEXTable( if (!NewInst) continue; - Table.push_back(std::pair(Inst, NewInst)); + Table.emplace_back(Inst, NewInst); auto Predicates = NewInst->TheDef->getValueAsListOfDefs("Predicates"); auto It = llvm::find_if(Predicates, [](const Record *R) { StringRef Name = R->getName(); @@ -293,7 +293,7 @@ void X86InstrMappingEmitter::emitNFTransformTable( report_fatal_error("EFLAGS should be clobbered by " + NewRec->getName()); #endif - Table.push_back(std::pair(&Target.getInstruction(NewRec), Inst)); + Table.emplace_back(&Target.getInstruction(NewRec), Inst); } } printTable(Table, "X86NFTransformTable", "GET_X86_NF_TRANSFORM_TABLE", OS); @@ -321,7 +321,7 @@ void X86InstrMappingEmitter::emitND2NonNDTable( const auto *NewRec = Records.getDef(ManualMap.at(Rec->getName())); assert(NewRec && "Instruction not found!"); auto &NewInst = Target.getInstruction(NewRec); - Table.push_back(std::pair(Inst, &NewInst)); + Table.emplace_back(Inst, &NewInst); continue; } @@ -332,7 +332,7 @@ void X86InstrMappingEmitter::emitND2NonNDTable( continue; const auto &NewInst = Target.getInstruction(NewRec); if (isRegisterOperand(NewInst.Operands[0].Rec)) - Table.push_back(std::pair(Inst, &NewInst)); + Table.emplace_back(Inst, &NewInst); } printTable(Table, "X86ND2NonNDTable", "GET_X86_ND2NONND_TABLE", OS); } @@ -355,7 +355,7 @@ void X86InstrMappingEmitter::emitSSE2AVXTable( const auto *NewRec = Records.getDef(ManualMap.at(Rec->getName())); assert(NewRec && "Instruction not found!"); const auto &NewInst = Target.getInstruction(NewRec); - Table.push_back(std::pair(Inst, &NewInst)); + Table.emplace_back(Inst, &NewInst); continue; } @@ -364,7 +364,7 @@ void X86InstrMappingEmitter::emitSSE2AVXTable( if (!AVXRec) continue; auto &AVXInst = Target.getInstruction(AVXRec); - Table.push_back(std::pair(Inst, &AVXInst)); + Table.emplace_back(Inst, &AVXInst); } printTable(Table, "X86SSE2AVXTable", "GET_X86_SSE2AVX_TABLE", OS); } diff --git a/llvm/utils/gn/secondary/clang/include/clang/Basic/BUILD.gn b/llvm/utils/gn/secondary/clang/include/clang/Basic/BUILD.gn index d8c4d8abdfd11..70af9760a858d 100644 --- a/llvm/utils/gn/secondary/clang/include/clang/Basic/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/include/clang/Basic/BUILD.gn @@ -40,6 +40,14 @@ foreach(diag_group, diag_groups) { ] td_file = "Diagnostic.td" } + + clang_tablegen("Diagnostic${diag_group}Enums") { + args = [ + "-gen-clang-diags-enums", + "-clang-component=${diag_group}", + ] + td_file = "Diagnostic.td" + } } group("diags_tablegen") { # DiagnosticGroups and DiagnosticIndexName are intentionally not part of this @@ -47,7 +55,10 @@ group("diags_tablegen") { # but almost nothing needs DiagnosticGroups.inc or DiagnosticIndexName.inc. public_deps = [] foreach(diag_group, diag_groups) { - public_deps += [ ":Diagnostic${diag_group}Kinds" ] + public_deps += [ + ":Diagnostic${diag_group}Kinds", + ":Diagnostic${diag_group}Enums", + ] } } diff --git a/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn index fd2ac58714664..5b067cb382c14 100644 --- a/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn @@ -94,6 +94,7 @@ static_library("Sema") { "SemaObjC.cpp", "SemaObjCProperty.cpp", "SemaOpenACC.cpp", + "SemaOpenACCClause.cpp", "SemaOpenCL.cpp", "SemaOpenMP.cpp", "SemaOverload.cpp", diff --git a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn index 350eaac84d668..9b8990b5a6bcf 100644 --- a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn @@ -97,7 +97,6 @@ write_cmake_config("config") { "HAVE_MALLCTL=", "HAVE_PTHREAD_GET_NAME_NP=", "HAVE_PTHREAD_SET_NAME_NP=", - "HAVE_SIGNAL_H=1", "HAVE_VALGRIND_VALGRIND_H=", "HAVE__ALLOCA=", "HAVE___ALLOCA=", @@ -207,7 +206,6 @@ write_cmake_config("config") { "HAVE_DLOPEN=", "HAVE_FUTIMES=", "HAVE_GETPAGESIZE=", - "HAVE_GETRLIMIT=", "HAVE_GETRUSAGE=", "HAVE_ISATTY=", "HAVE_LIBPTHREAD=", @@ -218,15 +216,10 @@ write_cmake_config("config") { "HAVE_PTHREAD_RWLOCK_INIT=", "HAVE_SBRK=", "HAVE_SETENV=", - "HAVE_SETRLIMIT=", "HAVE_SIGALTSTACK=", "HAVE_STRERROR_R=", "HAVE_SYSCONF=", - "HAVE_SYS_IOCTL_H=", "HAVE_SYS_MMAN_H=", - "HAVE_SYS_RESOURCE_H=", - "HAVE_SYS_TIME_H=", - "HAVE_TERMIOS_H=", "HAVE_UNISTD_H=", "HAVE__CHSIZE_S=1", "HAVE__UNWIND_BACKTRACE=", @@ -242,7 +235,6 @@ write_cmake_config("config") { "HAVE_DLOPEN=1", "HAVE_FUTIMES=1", "HAVE_GETPAGESIZE=1", - "HAVE_GETRLIMIT=1", "HAVE_GETRUSAGE=1", "HAVE_ISATTY=1", "HAVE_LIBPTHREAD=1", @@ -253,15 +245,10 @@ write_cmake_config("config") { "HAVE_PTHREAD_RWLOCK_INIT=1", "HAVE_SBRK=1", "HAVE_SETENV=1", - "HAVE_SETRLIMIT=1", "HAVE_SIGALTSTACK=1", "HAVE_STRERROR_R=1", "HAVE_SYSCONF=1", - "HAVE_SYS_IOCTL_H=1", "HAVE_SYS_MMAN_H=1", - "HAVE_SYS_RESOURCE_H=1", - "HAVE_SYS_TIME_H=1", - "HAVE_TERMIOS_H=1", "HAVE_UNISTD_H=1", "HAVE__CHSIZE_S=", "HAVE__UNWIND_BACKTRACE=1", diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn index ea0f9b8723082..dab25dd6ad6d2 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn @@ -14,6 +14,7 @@ static_library("Vectorize") { "LoopVectorize.cpp", "SLPVectorizer.cpp", "SandboxVectorizer/DependencyGraph.cpp", + "SandboxVectorizer/InstrMaps.cpp", "SandboxVectorizer/Interval.cpp", "SandboxVectorizer/Legality.cpp", "SandboxVectorizer/Passes/BottomUpVec.cpp", diff --git a/llvm/utils/gn/secondary/llvm/unittests/ExecutionEngine/JITLink/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/ExecutionEngine/JITLink/BUILD.gn index 78802e5cc2368..a83e9f5102668 100644 --- a/llvm/utils/gn/secondary/llvm/unittests/ExecutionEngine/JITLink/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/unittests/ExecutionEngine/JITLink/BUILD.gn @@ -23,5 +23,6 @@ unittest("JITLinkTests") { "MachOLinkGraphTests.cpp", "MemoryManagerErrorTests.cpp", "StubsTests.cpp", + "X86_64Tests.cpp", ] } diff --git a/llvm/utils/gn/secondary/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/BUILD.gn index 97df71c6279ef..919512919cacc 100644 --- a/llvm/utils/gn/secondary/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/BUILD.gn @@ -11,6 +11,7 @@ unittest("SandboxVectorizerTests") { ] sources = [ "DependencyGraphTest.cpp", + "InstrMapsTest.cpp", "IntervalTest.cpp", "LegalityTest.cpp", "SchedulerTest.cpp", diff --git a/mlir/CMakeLists.txt b/mlir/CMakeLists.txt index a888ac243b044..9e786154a2b40 100644 --- a/mlir/CMakeLists.txt +++ b/mlir/CMakeLists.txt @@ -174,6 +174,8 @@ configure_file( # disable all package setup and control it themselves. #------------------------------------------------------------------------------- +set(MLIR_BINDINGS_PYTHON_NB_DOMAIN "mlir" + CACHE STRING "nanobind domain for MLIR python bindings.") set(MLIR_ENABLE_BINDINGS_PYTHON 0 CACHE BOOL "Enables building of Python bindings.") set(MLIR_DETECT_PYTHON_ENV_PRIME_SEARCH 1 CACHE BOOL diff --git a/mlir/cmake/modules/AddMLIRPython.cmake b/mlir/cmake/modules/AddMLIRPython.cmake index 0679db9cf93e1..815f65b106d94 100644 --- a/mlir/cmake/modules/AddMLIRPython.cmake +++ b/mlir/cmake/modules/AddMLIRPython.cmake @@ -667,7 +667,7 @@ function(add_mlir_python_extension libname extname) ) elseif(ARG_PYTHON_BINDINGS_LIBRARY STREQUAL "nanobind") nanobind_add_module(${libname} - NB_DOMAIN mlir + NB_DOMAIN ${MLIR_BINDINGS_PYTHON_NB_DOMAIN} FREE_THREADED ${ARG_SOURCES} ) diff --git a/mlir/include/mlir/Conversion/GPUCommon/GPUCommonPass.h b/mlir/include/mlir/Conversion/GPUCommon/GPUCommonPass.h index 094360e75ab61..cf0c96f0eba00 100644 --- a/mlir/include/mlir/Conversion/GPUCommon/GPUCommonPass.h +++ b/mlir/include/mlir/Conversion/GPUCommon/GPUCommonPass.h @@ -64,7 +64,8 @@ struct FunctionCallBuilder { /// populate converter for gpu types. void populateGpuToLLVMConversionPatterns(LLVMTypeConverter &converter, RewritePatternSet &patterns, - bool kernelBarePtrCallConv = false); + bool kernelBarePtrCallConv = false, + bool typeCheckKernelArgs = false); /// A function that maps a MemorySpace enum to a target-specific integer value. using MemorySpaceMapping = std::function; diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td index 71d8284d3d373..61f754d67dc13 100644 --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -517,6 +517,12 @@ def GpuToLLVMConversionPass : Pass<"gpu-to-llvm", "ModuleOp"> { /*default=*/"false", "Use bare pointers to pass memref arguments to kernels. " "The kernel must use the same setting for this option." + >, + Option<"typeCheckKernelArgs", "type-check-kernel-args", "bool", + /*default=*/"false", + "Require all kernel arguments to be memrefs of rank 1 and with a " + "32-bit element size. This is a temporary option that will be " + "removed; TODO(https://github.com/llvm/llvm-project/issues/73457)." > ]; diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td index c36b738e38f42..8aa2c55570153 100644 --- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td +++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td @@ -46,26 +46,6 @@ def FloatTypeInterface : TypeInterface<"FloatType"> { ]; let extraClassDeclaration = [{ - // Convenience factories. - static FloatType getBF16(MLIRContext *ctx); - static FloatType getF16(MLIRContext *ctx); - static FloatType getF32(MLIRContext *ctx); - static FloatType getTF32(MLIRContext *ctx); - static FloatType getF64(MLIRContext *ctx); - static FloatType getF80(MLIRContext *ctx); - static FloatType getF128(MLIRContext *ctx); - static FloatType getFloat8E5M2(MLIRContext *ctx); - static FloatType getFloat8E4M3(MLIRContext *ctx); - static FloatType getFloat8E4M3FN(MLIRContext *ctx); - static FloatType getFloat8E5M2FNUZ(MLIRContext *ctx); - static FloatType getFloat8E4M3FNUZ(MLIRContext *ctx); - static FloatType getFloat8E4M3B11FNUZ(MLIRContext *ctx); - static FloatType getFloat8E3M4(MLIRContext *ctx); - static FloatType getFloat4E2M1FN(MLIRContext *ctx); - static FloatType getFloat6E2M3FN(MLIRContext *ctx); - static FloatType getFloat6E3M2FN(MLIRContext *ctx); - static FloatType getFloat8E8M0FNU(MLIRContext *ctx); - /// Return the bitwidth of this float type. unsigned getWidth(); diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h index 2b3c2b6d1753d..19c5361124aac 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -401,78 +401,6 @@ inline bool BaseMemRefType::isValidElementType(Type type) { llvm::isa(type); } -inline FloatType FloatType::getFloat4E2M1FN(MLIRContext *ctx) { - return Float4E2M1FNType::get(ctx); -} - -inline FloatType FloatType::getFloat6E2M3FN(MLIRContext *ctx) { - return Float6E2M3FNType::get(ctx); -} - -inline FloatType FloatType::getFloat6E3M2FN(MLIRContext *ctx) { - return Float6E3M2FNType::get(ctx); -} - -inline FloatType FloatType::getFloat8E5M2(MLIRContext *ctx) { - return Float8E5M2Type::get(ctx); -} - -inline FloatType FloatType::getFloat8E4M3(MLIRContext *ctx) { - return Float8E4M3Type::get(ctx); -} - -inline FloatType FloatType::getFloat8E4M3FN(MLIRContext *ctx) { - return Float8E4M3FNType::get(ctx); -} - -inline FloatType FloatType::getFloat8E5M2FNUZ(MLIRContext *ctx) { - return Float8E5M2FNUZType::get(ctx); -} - -inline FloatType FloatType::getFloat8E4M3FNUZ(MLIRContext *ctx) { - return Float8E4M3FNUZType::get(ctx); -} - -inline FloatType FloatType::getFloat8E4M3B11FNUZ(MLIRContext *ctx) { - return Float8E4M3B11FNUZType::get(ctx); -} - -inline FloatType FloatType::getFloat8E3M4(MLIRContext *ctx) { - return Float8E3M4Type::get(ctx); -} - -inline FloatType FloatType::getFloat8E8M0FNU(MLIRContext *ctx) { - return Float8E8M0FNUType::get(ctx); -} - -inline FloatType FloatType::getBF16(MLIRContext *ctx) { - return BFloat16Type::get(ctx); -} - -inline FloatType FloatType::getF16(MLIRContext *ctx) { - return Float16Type::get(ctx); -} - -inline FloatType FloatType::getTF32(MLIRContext *ctx) { - return FloatTF32Type::get(ctx); -} - -inline FloatType FloatType::getF32(MLIRContext *ctx) { - return Float32Type::get(ctx); -} - -inline FloatType FloatType::getF64(MLIRContext *ctx) { - return Float64Type::get(ctx); -} - -inline FloatType FloatType::getF80(MLIRContext *ctx) { - return Float80Type::get(ctx); -} - -inline FloatType FloatType::getF128(MLIRContext *ctx) { - return Float128Type::get(ctx); -} - inline bool TensorType::classof(Type type) { return llvm::isa(type); } diff --git a/mlir/include/mlir/IR/TypeRange.h b/mlir/include/mlir/IR/TypeRange.h index fa63435b188e9..99fabab334f92 100644 --- a/mlir/include/mlir/IR/TypeRange.h +++ b/mlir/include/mlir/IR/TypeRange.h @@ -29,12 +29,11 @@ namespace mlir { /// a SmallVector/std::vector. This class should be used in places that are not /// suitable for a more derived type (e.g. ArrayRef) or a template range /// parameter. -class TypeRange - : public llvm::detail::indexed_accessor_range_base< - TypeRange, - llvm::PointerUnion, - Type, Type, Type> { +class TypeRange : public llvm::detail::indexed_accessor_range_base< + TypeRange, + llvm::PointerUnion, + Type, Type, Type> { public: using RangeBaseT::RangeBaseT; TypeRange(ArrayRef types = std::nullopt); @@ -45,11 +44,8 @@ class TypeRange TypeRange(ValueTypeRange values) : TypeRange(ValueRange(ValueRangeT(values.begin().getCurrent(), values.end().getCurrent()))) {} - - TypeRange(Type type) : TypeRange(type, /*count=*/1) {} - template , Arg> && - !std::is_constructible_v>> + template , Arg>::value>> TypeRange(Arg &&arg) : TypeRange(ArrayRef(std::forward(arg))) {} TypeRange(std::initializer_list types) : TypeRange(ArrayRef(types)) {} @@ -60,9 +56,8 @@ class TypeRange /// * A pointer to the first element of an array of types. /// * A pointer to the first element of an array of operands. /// * A pointer to the first element of an array of results. - /// * A single 'Type' instance. using OwnerT = llvm::PointerUnion; + detail::OpResultImpl *>; /// See `llvm::detail::indexed_accessor_range_base` for details. static OwnerT offset_base(OwnerT object, ptrdiff_t index); diff --git a/mlir/include/mlir/IR/ValueRange.h b/mlir/include/mlir/IR/ValueRange.h index d5b067a79200d..4b421c08d8418 100644 --- a/mlir/include/mlir/IR/ValueRange.h +++ b/mlir/include/mlir/IR/ValueRange.h @@ -374,16 +374,16 @@ class ResultRange::UseIterator final /// SmallVector/std::vector. This class should be used in places that are not /// suitable for a more derived type (e.g. ArrayRef) or a template range /// parameter. -class ValueRange final : public llvm::detail::indexed_accessor_range_base< - ValueRange, - PointerUnion, - Value, Value, Value> { +class ValueRange final + : public llvm::detail::indexed_accessor_range_base< + ValueRange, + PointerUnion, + Value, Value, Value> { public: /// The type representing the owner of a ValueRange. This is either a list of - /// values, operands, or results or a single value. + /// values, operands, or results. using OwnerT = - PointerUnion; + PointerUnion; using RangeBaseT::RangeBaseT; @@ -392,7 +392,7 @@ class ValueRange final : public llvm::detail::indexed_accessor_range_base< std::is_constructible, Arg>::value && !std::is_convertible::value>> ValueRange(Arg &&arg) : ValueRange(ArrayRef(std::forward(arg))) {} - ValueRange(Value value) : ValueRange(value, /*count=*/1) {} + ValueRange(const Value &value) : ValueRange(&value, /*count=*/1) {} ValueRange(const std::initializer_list &values) : ValueRange(ArrayRef(values)) {} ValueRange(iterator_range values) diff --git a/mlir/lib/CAPI/IR/BuiltinTypes.cpp b/mlir/lib/CAPI/IR/BuiltinTypes.cpp index 252ff54afe0c5..250e4a6bbf8df 100644 --- a/mlir/lib/CAPI/IR/BuiltinTypes.cpp +++ b/mlir/lib/CAPI/IR/BuiltinTypes.cpp @@ -94,7 +94,7 @@ bool mlirTypeIsAFloat4E2M1FN(MlirType type) { } MlirType mlirFloat4E2M1FNTypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat4E2M1FN(unwrap(ctx))); + return wrap(Float4E2M1FNType::get(unwrap(ctx))); } MlirTypeID mlirFloat6E2M3FNTypeGetTypeID() { @@ -106,7 +106,7 @@ bool mlirTypeIsAFloat6E2M3FN(MlirType type) { } MlirType mlirFloat6E2M3FNTypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat6E2M3FN(unwrap(ctx))); + return wrap(Float6E2M3FNType::get(unwrap(ctx))); } MlirTypeID mlirFloat6E3M2FNTypeGetTypeID() { @@ -118,7 +118,7 @@ bool mlirTypeIsAFloat6E3M2FN(MlirType type) { } MlirType mlirFloat6E3M2FNTypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat6E3M2FN(unwrap(ctx))); + return wrap(Float6E3M2FNType::get(unwrap(ctx))); } MlirTypeID mlirFloat8E5M2TypeGetTypeID() { @@ -130,7 +130,7 @@ bool mlirTypeIsAFloat8E5M2(MlirType type) { } MlirType mlirFloat8E5M2TypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat8E5M2(unwrap(ctx))); + return wrap(Float8E5M2Type::get(unwrap(ctx))); } MlirTypeID mlirFloat8E4M3TypeGetTypeID() { @@ -142,7 +142,7 @@ bool mlirTypeIsAFloat8E4M3(MlirType type) { } MlirType mlirFloat8E4M3TypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat8E4M3(unwrap(ctx))); + return wrap(Float8E4M3Type::get(unwrap(ctx))); } MlirTypeID mlirFloat8E4M3FNTypeGetTypeID() { @@ -154,7 +154,7 @@ bool mlirTypeIsAFloat8E4M3FN(MlirType type) { } MlirType mlirFloat8E4M3FNTypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat8E4M3FN(unwrap(ctx))); + return wrap(Float8E4M3FNType::get(unwrap(ctx))); } MlirTypeID mlirFloat8E5M2FNUZTypeGetTypeID() { @@ -166,7 +166,7 @@ bool mlirTypeIsAFloat8E5M2FNUZ(MlirType type) { } MlirType mlirFloat8E5M2FNUZTypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat8E5M2FNUZ(unwrap(ctx))); + return wrap(Float8E5M2FNUZType::get(unwrap(ctx))); } MlirTypeID mlirFloat8E4M3FNUZTypeGetTypeID() { @@ -178,7 +178,7 @@ bool mlirTypeIsAFloat8E4M3FNUZ(MlirType type) { } MlirType mlirFloat8E4M3FNUZTypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat8E4M3FNUZ(unwrap(ctx))); + return wrap(Float8E4M3FNUZType::get(unwrap(ctx))); } MlirTypeID mlirFloat8E4M3B11FNUZTypeGetTypeID() { @@ -190,7 +190,7 @@ bool mlirTypeIsAFloat8E4M3B11FNUZ(MlirType type) { } MlirType mlirFloat8E4M3B11FNUZTypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat8E4M3B11FNUZ(unwrap(ctx))); + return wrap(Float8E4M3B11FNUZType::get(unwrap(ctx))); } MlirTypeID mlirFloat8E3M4TypeGetTypeID() { @@ -202,7 +202,7 @@ bool mlirTypeIsAFloat8E3M4(MlirType type) { } MlirType mlirFloat8E3M4TypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat8E3M4(unwrap(ctx))); + return wrap(Float8E3M4Type::get(unwrap(ctx))); } MlirTypeID mlirFloat8E8M0FNUTypeGetTypeID() { @@ -214,7 +214,7 @@ bool mlirTypeIsAFloat8E8M0FNU(MlirType type) { } MlirType mlirFloat8E8M0FNUTypeGet(MlirContext ctx) { - return wrap(FloatType::getFloat8E8M0FNU(unwrap(ctx))); + return wrap(Float8E8M0FNUType::get(unwrap(ctx))); } MlirTypeID mlirBFloat16TypeGetTypeID() { @@ -224,7 +224,7 @@ MlirTypeID mlirBFloat16TypeGetTypeID() { bool mlirTypeIsABF16(MlirType type) { return unwrap(type).isBF16(); } MlirType mlirBF16TypeGet(MlirContext ctx) { - return wrap(FloatType::getBF16(unwrap(ctx))); + return wrap(BFloat16Type::get(unwrap(ctx))); } MlirTypeID mlirFloat16TypeGetTypeID() { return wrap(Float16Type::getTypeID()); } @@ -232,7 +232,7 @@ MlirTypeID mlirFloat16TypeGetTypeID() { return wrap(Float16Type::getTypeID()); } bool mlirTypeIsAF16(MlirType type) { return unwrap(type).isF16(); } MlirType mlirF16TypeGet(MlirContext ctx) { - return wrap(FloatType::getF16(unwrap(ctx))); + return wrap(Float16Type::get(unwrap(ctx))); } MlirTypeID mlirFloatTF32TypeGetTypeID() { @@ -242,7 +242,7 @@ MlirTypeID mlirFloatTF32TypeGetTypeID() { bool mlirTypeIsATF32(MlirType type) { return unwrap(type).isTF32(); } MlirType mlirTF32TypeGet(MlirContext ctx) { - return wrap(FloatType::getTF32(unwrap(ctx))); + return wrap(FloatTF32Type::get(unwrap(ctx))); } MlirTypeID mlirFloat32TypeGetTypeID() { return wrap(Float32Type::getTypeID()); } @@ -250,7 +250,7 @@ MlirTypeID mlirFloat32TypeGetTypeID() { return wrap(Float32Type::getTypeID()); } bool mlirTypeIsAF32(MlirType type) { return unwrap(type).isF32(); } MlirType mlirF32TypeGet(MlirContext ctx) { - return wrap(FloatType::getF32(unwrap(ctx))); + return wrap(Float32Type::get(unwrap(ctx))); } MlirTypeID mlirFloat64TypeGetTypeID() { return wrap(Float64Type::getTypeID()); } @@ -258,7 +258,7 @@ MlirTypeID mlirFloat64TypeGetTypeID() { return wrap(Float64Type::getTypeID()); } bool mlirTypeIsAF64(MlirType type) { return unwrap(type).isF64(); } MlirType mlirF64TypeGet(MlirContext ctx) { - return wrap(FloatType::getF64(unwrap(ctx))); + return wrap(Float64Type::get(unwrap(ctx))); } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp index 83208e0c42da2..ca9883a79dc16 100644 --- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp @@ -427,9 +427,11 @@ class LegalizeLaunchFuncOpPattern : public ConvertOpToGpuRuntimeCallPattern { public: LegalizeLaunchFuncOpPattern(const LLVMTypeConverter &typeConverter, - bool kernelBarePtrCallConv) + bool kernelBarePtrCallConv, + bool typeCheckKernelArgs) : ConvertOpToGpuRuntimeCallPattern(typeConverter), - kernelBarePtrCallConv(kernelBarePtrCallConv) {} + kernelBarePtrCallConv(kernelBarePtrCallConv), + typeCheckKernelArgs(typeCheckKernelArgs) {} private: LogicalResult @@ -437,6 +439,7 @@ class LegalizeLaunchFuncOpPattern ConversionPatternRewriter &rewriter) const override; bool kernelBarePtrCallConv; + bool typeCheckKernelArgs; }; /// A rewrite pattern to convert gpu.memcpy operations into a GPU runtime @@ -563,8 +566,8 @@ void GpuToLLVMConversionPass::runOnOperation() { populateFinalizeMemRefToLLVMConversionPatterns(converter, patterns); populateAsyncStructuralTypeConversionsAndLegality(converter, patterns, target); - populateGpuToLLVMConversionPatterns(converter, patterns, - kernelBarePtrCallConv); + populateGpuToLLVMConversionPatterns( + converter, patterns, kernelBarePtrCallConv, typeCheckKernelArgs); if (failed( applyPartialConversion(getOperation(), target, std::move(patterns)))) @@ -966,6 +969,28 @@ LogicalResult LegalizeLaunchFuncOpPattern::matchAndRewrite( // stream must be created to pass to subsequent operations. else if (launchOp.getAsyncToken()) stream = streamCreateCallBuilder.create(loc, rewriter, {}).getResult(); + + if (typeCheckKernelArgs) { + // The current non-bare-pointer ABI is a bad fit for `mgpuLaunchKernel`, + // which takes an untyped list of arguments. The type check here prevents + // accidentally violating the assumption made in vulkan-runtime-wrappers.cpp + // and creating a unchecked runtime ABI mismatch. + // TODO(https://github.com/llvm/llvm-project/issues/73457): Change the ABI + // here to remove the need for this type check. + for (Value arg : launchOp.getKernelOperands()) { + if (auto memrefTy = dyn_cast(arg.getType())) { + if (memrefTy.getRank() != 1 || + memrefTy.getElementTypeBitWidth() != 32) { + return rewriter.notifyMatchFailure( + launchOp, "Operand to launch op is not a rank-1 memref with " + "32-bit element type."); + } + } else { + return rewriter.notifyMatchFailure( + launchOp, "Operand to launch op is not a memref."); + } + } + } // Lower the kernel operands to match kernel parameters. // Note: If `useBarePtrCallConv` is set in the type converter's options, // the value of `kernelBarePtrCallConv` will be ignored. @@ -1737,7 +1762,8 @@ LogicalResult ConvertCreateBsrOpToGpuRuntimeCallPattern::matchAndRewrite( void mlir::populateGpuToLLVMConversionPatterns(LLVMTypeConverter &converter, RewritePatternSet &patterns, - bool kernelBarePtrCallConv) { + bool kernelBarePtrCallConv, + bool typeCheckKernelArgs) { addOpaquePointerConversion(converter); addOpaquePointerConversion(converter); addOpaquePointerConversion(converter); @@ -1774,7 +1800,8 @@ void mlir::populateGpuToLLVMConversionPatterns(LLVMTypeConverter &converter, ConvertSpGEMMCopyOpToGpuRuntimeCallPattern, ConvertSpMatGetSizeOpToGpuRuntimeCallPattern, ConvertSetCsrPointersOpToGpuRuntimeCallPattern>(converter); - patterns.add(converter, kernelBarePtrCallConv); + patterns.add(converter, kernelBarePtrCallConv, + typeCheckKernelArgs); } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp index 47b5fcd4014a0..671dea8bb415f 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp @@ -40,7 +40,7 @@ createSplitPart(RewriterBase &b, Location loc, TilingInterface op, sizesCopy[dimension] = size; offsetsCopy[dimension] = offset; - // Create the part as it it were a single tile. + // Create the part as if it were a single tile. FailureOr tilingResult = op.getTiledImplementation(b, offsetsCopy, sizesCopy); diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp index edc4709ec3c6e..8439b063f2634 100644 --- a/mlir/lib/IR/Builders.cpp +++ b/mlir/lib/IR/Builders.cpp @@ -35,62 +35,56 @@ Location Builder::getFusedLoc(ArrayRef locs, Attribute metadata) { //===----------------------------------------------------------------------===// FloatType Builder::getFloat4E2M1FNType() { - return FloatType::getFloat4E2M1FN(context); + return Float4E2M1FNType::get(context); } FloatType Builder::getFloat6E2M3FNType() { - return FloatType::getFloat6E2M3FN(context); + return Float6E2M3FNType::get(context); } FloatType Builder::getFloat6E3M2FNType() { - return FloatType::getFloat6E3M2FN(context); + return Float6E3M2FNType::get(context); } -FloatType Builder::getFloat8E5M2Type() { - return FloatType::getFloat8E5M2(context); -} +FloatType Builder::getFloat8E5M2Type() { return Float8E5M2Type::get(context); } -FloatType Builder::getFloat8E4M3Type() { - return FloatType::getFloat8E4M3(context); -} +FloatType Builder::getFloat8E4M3Type() { return Float8E4M3Type::get(context); } FloatType Builder::getFloat8E4M3FNType() { - return FloatType::getFloat8E4M3FN(context); + return Float8E4M3FNType::get(context); } FloatType Builder::getFloat8E5M2FNUZType() { - return FloatType::getFloat8E5M2FNUZ(context); + return Float8E5M2FNUZType::get(context); } FloatType Builder::getFloat8E4M3FNUZType() { - return FloatType::getFloat8E4M3FNUZ(context); + return Float8E4M3FNUZType::get(context); } FloatType Builder::getFloat8E4M3B11FNUZType() { - return FloatType::getFloat8E4M3B11FNUZ(context); + return Float8E4M3B11FNUZType::get(context); } -FloatType Builder::getFloat8E3M4Type() { - return FloatType::getFloat8E3M4(context); -} +FloatType Builder::getFloat8E3M4Type() { return Float8E3M4Type::get(context); } FloatType Builder::getFloat8E8M0FNUType() { - return FloatType::getFloat8E8M0FNU(context); + return Float8E8M0FNUType::get(context); } -FloatType Builder::getBF16Type() { return FloatType::getBF16(context); } +FloatType Builder::getBF16Type() { return BFloat16Type::get(context); } -FloatType Builder::getF16Type() { return FloatType::getF16(context); } +FloatType Builder::getF16Type() { return Float16Type::get(context); } -FloatType Builder::getTF32Type() { return FloatType::getTF32(context); } +FloatType Builder::getTF32Type() { return FloatTF32Type::get(context); } -FloatType Builder::getF32Type() { return FloatType::getF32(context); } +FloatType Builder::getF32Type() { return Float32Type::get(context); } -FloatType Builder::getF64Type() { return FloatType::getF64(context); } +FloatType Builder::getF64Type() { return Float64Type::get(context); } -FloatType Builder::getF80Type() { return FloatType::getF80(context); } +FloatType Builder::getF80Type() { return Float80Type::get(context); } -FloatType Builder::getF128Type() { return FloatType::getF128(context); } +FloatType Builder::getF128Type() { return Float128Type::get(context); } IndexType Builder::getIndexType() { return IndexType::get(context); } diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp index 41b794bc0aec5..bd1163bddf7ee 100644 --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -117,23 +117,23 @@ FLOAT_TYPE_SEMANTICS(Float128Type, IEEEquad) FloatType Float16Type::scaleElementBitwidth(unsigned scale) const { if (scale == 2) - return FloatType::getF32(getContext()); + return Float32Type::get(getContext()); if (scale == 4) - return FloatType::getF64(getContext()); + return Float64Type::get(getContext()); return FloatType(); } FloatType BFloat16Type::scaleElementBitwidth(unsigned scale) const { if (scale == 2) - return FloatType::getF32(getContext()); + return Float32Type::get(getContext()); if (scale == 4) - return FloatType::getF64(getContext()); + return Float64Type::get(getContext()); return FloatType(); } FloatType Float32Type::scaleElementBitwidth(unsigned scale) const { if (scale == 2) - return FloatType::getF64(getContext()); + return Float64Type::get(getContext()); return FloatType(); } diff --git a/mlir/lib/IR/OperationSupport.cpp b/mlir/lib/IR/OperationSupport.cpp index 803fcd8d18fbd..957195202d78d 100644 --- a/mlir/lib/IR/OperationSupport.cpp +++ b/mlir/lib/IR/OperationSupport.cpp @@ -653,15 +653,6 @@ ValueRange::ValueRange(ResultRange values) /// See `llvm::detail::indexed_accessor_range_base` for details. ValueRange::OwnerT ValueRange::offset_base(const OwnerT &owner, ptrdiff_t index) { - if (llvm::isa_and_nonnull(owner)) { - // Prevent out-of-bounds indexing for single values. - // Note that we do allow an index of 1 as is required by 'slice'ing that - // returns an empty range. This also matches the usual rules of C++ of being - // allowed to index past the last element of an array. - assert(index <= 1 && "out-of-bound offset into single-value 'ValueRange'"); - // Return nullptr to quickly cause segmentation faults on misuse. - return index == 0 ? owner : nullptr; - } if (const auto *value = llvm::dyn_cast_if_present(owner)) return {value + index}; if (auto *operand = llvm::dyn_cast_if_present(owner)) @@ -670,10 +661,6 @@ ValueRange::OwnerT ValueRange::offset_base(const OwnerT &owner, } /// See `llvm::detail::indexed_accessor_range_base` for details. Value ValueRange::dereference_iterator(const OwnerT &owner, ptrdiff_t index) { - if (auto value = llvm::dyn_cast_if_present(owner)) { - assert(index == 0 && "cannot offset into single-value 'ValueRange'"); - return value; - } if (const auto *value = llvm::dyn_cast_if_present(owner)) return value[index]; if (auto *operand = llvm::dyn_cast_if_present(owner)) diff --git a/mlir/lib/IR/TypeRange.cpp b/mlir/lib/IR/TypeRange.cpp index 7e5f99c884512..f8878303727d4 100644 --- a/mlir/lib/IR/TypeRange.cpp +++ b/mlir/lib/IR/TypeRange.cpp @@ -31,23 +31,12 @@ TypeRange::TypeRange(ValueRange values) : TypeRange(OwnerT(), values.size()) { this->base = result; else if (auto *operand = llvm::dyn_cast_if_present(owner)) this->base = operand; - else if (auto value = llvm::dyn_cast_if_present(owner)) - this->base = value.getType(); else this->base = cast(owner); } /// See `llvm::detail::indexed_accessor_range_base` for details. TypeRange::OwnerT TypeRange::offset_base(OwnerT object, ptrdiff_t index) { - if (llvm::isa_and_nonnull(object)) { - // Prevent out-of-bounds indexing for single values. - // Note that we do allow an index of 1 as is required by 'slice'ing that - // returns an empty range. This also matches the usual rules of C++ of being - // allowed to index past the last element of an array. - assert(index <= 1 && "out-of-bound offset into single-value 'ValueRange'"); - // Return nullptr to quickly cause segmentation faults on misuse. - return index == 0 ? object : nullptr; - } if (const auto *value = llvm::dyn_cast_if_present(object)) return {value + index}; if (auto *operand = llvm::dyn_cast_if_present(object)) @@ -59,10 +48,6 @@ TypeRange::OwnerT TypeRange::offset_base(OwnerT object, ptrdiff_t index) { /// See `llvm::detail::indexed_accessor_range_base` for details. Type TypeRange::dereference_iterator(OwnerT object, ptrdiff_t index) { - if (auto type = llvm::dyn_cast_if_present(object)) { - assert(index == 0 && "cannot offset into single-value 'TypeRange'"); - return type; - } if (const auto *value = llvm::dyn_cast_if_present(object)) return (value + index)->getType(); if (auto *operand = llvm::dyn_cast_if_present(object)) diff --git a/mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp b/mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp index 35001757f214e..35fdbc0be22c3 100644 --- a/mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp +++ b/mlir/lib/Target/LLVMIR/DataLayoutImporter.cpp @@ -29,15 +29,15 @@ FloatType mlir::LLVM::detail::getFloatType(MLIRContext *context, unsigned width) { switch (width) { case 16: - return FloatType::getF16(context); + return Float16Type::get(context); case 32: - return FloatType::getF32(context); + return Float32Type::get(context); case 64: - return FloatType::getF64(context); + return Float64Type::get(context); case 80: - return FloatType::getF80(context); + return Float80Type::get(context); case 128: - return FloatType::getF128(context); + return Float128Type::get(context); default: return {}; } diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp index fdc9cee5b5dca..de3c1ab1a3f5e 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -1350,11 +1350,9 @@ allocatePrivateVars(llvm::IRBuilderBase &builder, // Allocate private vars llvm::BranchInst *allocaTerminator = llvm::cast(allocaIP.getBlock()->getTerminator()); - if (allocaTerminator->getNumSuccessors() != 1) { - splitBB(llvm::OpenMPIRBuilder::InsertPointTy( - allocaIP.getBlock(), allocaTerminator->getIterator()), - true, "omp.region.after_alloca"); - } + splitBB(llvm::OpenMPIRBuilder::InsertPointTy(allocaIP.getBlock(), + allocaTerminator->getIterator()), + true, "omp.region.after_alloca"); llvm::IRBuilderBase::InsertPointGuard guard(builder); // Update the allocaTerminator in case the alloca block was split above. @@ -1891,6 +1889,59 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, llvm::OpenMPIRBuilder::InsertPointTy allocaIP = findAllocaInsertPoint(builder, moduleTranslation); + // The following loop is workaround until we private ops' alloca regions to be + // "pure". See + // https://discourse.llvm.org/t/rfc-openmp-supporting-delayed-task-execution-with-firstprivate-variables/83084/7 + // and https://discourse.llvm.org/t/delayed-privatization-for-omp-wsloop/83989 + // for more info. + for (auto [privateVar, privateDeclOp] : + llvm::zip_equal(mlirPrivateVars, privateDecls)) { + llvm::Value *llvmValue = moduleTranslation.lookupValue(privateVar); + bool isAllocArgUsed = + !privateDeclOp.getAllocRegion().args_begin()->use_empty(); + + // If the alloc region argument is not used, we can skip the workaround. + if (!isAllocArgUsed) + continue; + + llvm::Instruction *definingInst = + llvm::dyn_cast(llvmValue); + + // If the alloc region argument is not defined by an op, it has to dominate + // the current alloc IP. So we skip the workaround. + if (!definingInst) + continue; + + llvm::BasicBlock *definingBlock = definingInst->getParent(); + llvm::Function *definingFun = definingBlock->getParent(); + llvm::Function *allocaFun = allocaIP.getBlock()->getParent(); + + // If the alloc region argument is defined in a different function that + // current one where allocs are being inserted (for example, we are building + // the outlined function of a target region), we skip the workaround. + if (definingFun != allocaFun) + continue; + + llvm::DominatorTree dt(*definingFun); + // If the defining instruction of the alloc region argument dominates the + // alloca insertion point already, we can skip the workaround. + if (dt.dominates(definingInst, allocaIP.getPoint())) + continue; + + // If all the above conditions are violated, then we have to move the alloca + // insertion point below the defining instruction. + + if (definingBlock->getTerminator() == nullptr) { + assert(builder.GetInsertBlock() == definingBlock); + builder.SetInsertPoint(splitBB(llvm::OpenMPIRBuilder::InsertPointTy( + definingBlock, definingBlock->end()), + true, "omp.region.after_defining_block")); + } + + allocaIP = llvm::OpenMPIRBuilder::InsertPointTy( + definingBlock, definingBlock->getTerminator()->getIterator()); + } + SmallVector privateReductionVariables( wsloopOp.getNumReductionVars()); diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp index eba86f06d0905..f6826a2362bfd 100644 --- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp @@ -816,7 +816,7 @@ static TypedAttr getScalarConstantAsAttr(OpBuilder &builder, llvm::Type *type = constFloat->getType(); FloatType floatType = type->isBFloatTy() - ? FloatType::getBF16(context) + ? BFloat16Type::get(context) : LLVM::detail::getFloatType(context, type->getScalarSizeInBits()); if (!floatType) { emitError(UnknownLoc::get(builder.getContext())) diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir index a5e64fc332754..390ecabaef21b 100644 --- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir +++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir @@ -2766,7 +2766,9 @@ llvm.func @task(%arg0 : !llvm.ptr) { // CHECK: %[[VAL_19:.*]] = load i32, ptr %[[VAL_14]], align 4 // CHECK: store i32 %[[VAL_19]], ptr %[[VAL_15]], align 4 // CHECK: br label %[[VAL_20:.*]] -// CHECK: task.body: ; preds = %omp.private.copy +// CHECK: [[VAL_20]]: +// CHECK: br label %task.body +// CHECK: task.body: ; preds = %[[VAL_20]] // CHECK: br label %omp.task.region // CHECK: omp.task.region: ; preds = %task.body // CHECK: call void @foo(ptr %[[VAL_15]]) diff --git a/mlir/test/Target/LLVMIR/openmp-parallel-reduction-multiblock.mlir b/mlir/test/Target/LLVMIR/openmp-parallel-reduction-multiblock.mlir index 75161bac2faf4..d2e394b2cf6a8 100644 --- a/mlir/test/Target/LLVMIR/openmp-parallel-reduction-multiblock.mlir +++ b/mlir/test/Target/LLVMIR/openmp-parallel-reduction-multiblock.mlir @@ -56,8 +56,10 @@ llvm.func @missordered_blocks_(%arg0: !llvm.ptr {fir.bindc_name = "x"}, %arg1: ! // CHECK: %[[VAL_20:.*]] = alloca ptr, align 8 // CHECK: %[[VAL_21:.*]] = alloca ptr, align 8 // CHECK: %[[VAL_22:.*]] = alloca [2 x ptr], align 8 +// CHECK: br label %[[AFTER_ALLOC:omp.region.after_alloca]] +// CHECK: [[AFTER_ALLOC]]: ; preds = %[[PAR_ENTRY]] // CHECK: br label %[[VAL_23:omp.par.region]] -// CHECK: [[VAL_23]]: ; preds = %[[PAR_ENTRY]] +// CHECK: [[VAL_23]]: ; preds = %[[AFTER_ALLOC]] // CHECK: br label %[[VAL_42:.*]] // CHECK: [[RED_INIT:omp.reduction.init]]: // CHECK: br label %[[VAL_25:omp.reduction.neutral]] diff --git a/mlir/test/Target/LLVMIR/openmp-reduction-array-sections.mlir b/mlir/test/Target/LLVMIR/openmp-reduction-array-sections.mlir index 912d5568c5f26..d6ed3086969fb 100644 --- a/mlir/test/Target/LLVMIR/openmp-reduction-array-sections.mlir +++ b/mlir/test/Target/LLVMIR/openmp-reduction-array-sections.mlir @@ -91,9 +91,12 @@ llvm.func @sectionsreduction_(%arg0: !llvm.ptr {fir.bindc_name = "x"}) attribute // CHECK: %[[VAL_14:.*]] = alloca [1 x ptr], align 8 // CHECK: br label %[[VAL_15:.*]] -// CHECK: omp.par.region: ; preds = %[[PAR_ENTRY]] +// CHECK: [[VAL_15]]: +// CHECK: br label %[[PAR_REG:omp.par.region]] + +// CHECK: [[PAR_REG]]: ; preds = %[[VAL_15]] // CHECK: br label %[[VAL_18:.*]] -// CHECK: omp.par.region1: ; preds = %[[VAL_15]] +// CHECK: omp.par.region1: ; preds = %[[PAR_REG]] // CHECK: %[[VAL_19:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, i64 1, align 8 // CHECK: br label %[[VAL_22:.*]] diff --git a/mlir/test/Target/LLVMIR/openmp-reduction-init-arg.mlir b/mlir/test/Target/LLVMIR/openmp-reduction-init-arg.mlir index 7f2424381e846..8d329bd8ff817 100644 --- a/mlir/test/Target/LLVMIR/openmp-reduction-init-arg.mlir +++ b/mlir/test/Target/LLVMIR/openmp-reduction-init-arg.mlir @@ -63,7 +63,11 @@ module { // CHECK: %[[VAL_23:.*]] = alloca ptr, align 8 // CHECK: %[[VAL_24:.*]] = alloca [2 x ptr], align 8 // CHECK: br label %[[VAL_25:.*]] -// CHECK: omp.par.region: ; preds = %[[PAR_ENTRY]] + +// CHECK: [[VAL_25]]: +// CHECK: br label %[[PAR_REG:omp.par.region]] + +// CHECK: [[PAR_REG]]: ; preds = %[[VAL_25]] // CHECK: br label %[[INIT_LABEL:.*]] // CHECK: [[INIT_LABEL]]: // CHECK: %[[VAL_20:.*]] = load { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr %[[VAL_13]], align 8 diff --git a/mlir/test/Target/LLVMIR/openmp-reduction-sections.mlir b/mlir/test/Target/LLVMIR/openmp-reduction-sections.mlir index 05af32622246a..de3b997feb674 100644 --- a/mlir/test/Target/LLVMIR/openmp-reduction-sections.mlir +++ b/mlir/test/Target/LLVMIR/openmp-reduction-sections.mlir @@ -50,9 +50,13 @@ llvm.func @sections_(%arg0: !llvm.ptr {fir.bindc_name = "x"}) attributes {fir.in // CHECK: %[[VAL_20:.*]] = alloca float, align 4 // CHECK: %[[VAL_21:.*]] = alloca [1 x ptr], align 8 // CHECK: br label %[[VAL_22:.*]] -// CHECK: omp.par.region: ; preds = %[[PAR_ENTRY]] + +// CHECK: [[VAL_22]]: +// CHECK: br label %[[PAR_REG:omp.par.region]] + +// CHECK: [[PAR_REG]]: ; preds = %[[VAL_22]] // CHECK: br label %[[VAL_25:.*]] -// CHECK: omp.par.region1: ; preds = %[[VAL_22]] +// CHECK: omp.par.region1: ; preds = %[[PAR_REG]] // CHECK: br label %[[VAL_26:.*]] // CHECK: [[RED_INIT:omp.reduction.init]]: diff --git a/mlir/test/Target/LLVMIR/openmp-simd-private.mlir b/mlir/test/Target/LLVMIR/openmp-simd-private.mlir index 09d76f8edd007..61542aa1aa4d7 100644 --- a/mlir/test/Target/LLVMIR/openmp-simd-private.mlir +++ b/mlir/test/Target/LLVMIR/openmp-simd-private.mlir @@ -12,6 +12,9 @@ omp.private {type = private} @i_privatizer : !llvm.ptr alloc { // CHECK: %{{.*}} = alloca i32, i64 1, align 4 // CHECK: %[[DUMMY:.*]] = alloca float, i64 1, align 4 // CHECK: %[[PRIV_I:.*]] = alloca i32, i64 1, align 4 +// CHECK: br label %[[LATE_ALLOC:.*]] + +// CHECK: [[LATE_ALLOC]]: // CHECK: br label %[[AFTER_ALLOC:.*]] // CHECK: [[AFTER_ALLOC]]: diff --git a/mlir/test/Target/LLVMIR/openmp-target-use-device-nested.mlir b/mlir/test/Target/LLVMIR/openmp-target-use-device-nested.mlir index 3872d908e7a20..ff580e5fea634 100644 --- a/mlir/test/Target/LLVMIR/openmp-target-use-device-nested.mlir +++ b/mlir/test/Target/LLVMIR/openmp-target-use-device-nested.mlir @@ -12,6 +12,9 @@ // CHECK-NEXT: br i1 %[[VAL_7]], label %[[VAL_8:.*]], label %[[VAL_9:.*]] // CHECK: user_code.entry: ; preds = %[[VAL_10:.*]] // CHECK-NEXT: %[[VAL_11:.*]] = load ptr, ptr %[[VAL_3]], align 8 +// CHECK-NEXT: br label %[[AFTER_ALLOC:.*]] + +// CHECK: [[AFTER_ALLOC]]: // CHECK-NEXT: br label %[[VAL_12:.*]] // CHECK: [[VAL_12]]: diff --git a/mlir/test/Target/LLVMIR/openmp-wsloop-private-late-alloca-workaround.mlir b/mlir/test/Target/LLVMIR/openmp-wsloop-private-late-alloca-workaround.mlir new file mode 100644 index 0000000000000..4d732bbb4e3b6 --- /dev/null +++ b/mlir/test/Target/LLVMIR/openmp-wsloop-private-late-alloca-workaround.mlir @@ -0,0 +1,47 @@ +// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s + +// Tests the "impure" alloc region workaround until `omp.private` is updated. +// See +// https://discourse.llvm.org/t/rfc-openmp-supporting-delayed-task-execution-with-firstprivate-variables/83084/7 +// and https://discourse.llvm.org/t/delayed-privatization-for-omp-wsloop/83989 +// for more info. + +omp.private {type = private} @impure_alloca_privatizer : !llvm.ptr alloc { +^bb0(%arg0: !llvm.ptr): + %0 = llvm.mlir.constant(1 : i64) : i64 + %1 = llvm.alloca %0 x i32 {bindc_name = "i", pinned} : (i64) -> !llvm.ptr + %3 = llvm.getelementptr %arg0[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr)> + omp.yield(%1 : !llvm.ptr) +} + +llvm.func @test_alloca_ip_workaround() { + omp.target { + %65 = llvm.mlir.constant(1 : i32) : i32 + %66 = llvm.alloca %65 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr + %67 = llvm.mlir.constant(0 : index) : i64 + %68 = llvm.mlir.constant(10 : i32) : i32 + %69 = llvm.mlir.constant(1 : i32) : i32 + omp.wsloop private(@impure_alloca_privatizer %66 -> %arg6 : !llvm.ptr) { + omp.loop_nest (%arg8) : i32 = (%69) to (%68) inclusive step (%69) { + omp.yield + } + } + omp.terminator + } + llvm.return +} + +// CHECK-LABEL: define {{.*}} @__omp_offloading_{{.*}}_test_alloca_ip_workaround + +// CHECK: omp.target: +// CHECK: %[[ALLOC_REG_ARG:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, align 8 +// CHECK: br label %omp.private.latealloc + +// CHECK: omp.private.latealloc: +// CHECK: %[[PRIV_ALLOC:.*]] = alloca i32, i64 1, align 4 +// The usage of `ALLOC_REG_ARG` in the inlined alloc region is the reason for +// introducing the workaround. +// CHECK: %{{.*}} = getelementptr { ptr }, ptr %[[ALLOC_REG_ARG]], i32 0 +// CHECK: br label %omp.region.after_defining_block + + diff --git a/mlir/test/Target/LLVMIR/openmp-wsloop-test-block-structure.mlir b/mlir/test/Target/LLVMIR/openmp-wsloop-test-block-structure.mlir new file mode 100644 index 0000000000000..19ae425e20403 --- /dev/null +++ b/mlir/test/Target/LLVMIR/openmp-wsloop-test-block-structure.mlir @@ -0,0 +1,54 @@ +// RUN: mlir-translate -mlir-to-llvmir -split-input-file %s | FileCheck %s + +// Tests regression uncovered by "1009/1009_0029.f90" (from the Fujitsu test +// suite). This test replicates a simplified version of the block structure +// produced by the Fujitsu test. + +llvm.func @test_block_structure() { + %i1 = llvm.mlir.constant(1 : index) : i1 + %i64 = llvm.mlir.constant(1 : index) : i64 + llvm.br ^bb1(%i64, %i64 : i64, i64) + +^bb1(%20: i64, %21: i64): // 2 preds: ^bb0, ^bb5 + llvm.cond_br %i1, ^bb2, ^bb6 + +^bb2: // pred: ^bb1 + llvm.br ^bb3(%i64, %i64 : i64, i64) + +^bb3(%25: i64, %26: i64): // 2 preds: ^bb2, ^bb4 + llvm.cond_br %i1, ^bb4, ^bb5 + +^bb4: // pred: ^bb3 + omp.wsloop { + omp.loop_nest (%arg0) : i64 = (%i64) to (%i64) inclusive step (%i64) { + omp.yield + } + } + llvm.br ^bb1(%i64, %i64 : i64, i64) + +^bb5: // pred: ^bb3 + llvm.br ^bb1(%i64, %i64 : i64, i64) + +^bb6: // pred: ^bb1 + llvm.return +} + +// CHECK: define void @test_block_structure +// CHECK: br label %[[AFTER_ALLOCA:.*]] + +// CHECK: [[AFTER_ALLOCA:]]: +// CHECK: br label %[[BB1:.*]] + +// CHECK: [[BB1:]]: +// CHECK: %{{.*}} = phi i64 +// CHECK: br i1 true, label %[[BB2:.*]], label %{{.*}} + +// CHECK: [[BB2]]: +// CHECK: br label %[[BB3:.*]] + +// CHECK: [[BB3]]: +// CHECK: %{{.*}} = phi i64 +// CHECK: br i1 true, label %[[BB4:.*]], label %{{.*}} + +// CHECK: [[BB4]]: +// CHECK: br label %omp_loop.preheader diff --git a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp index 598678f64cb46..c32bd24014215 100644 --- a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp +++ b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp @@ -101,8 +101,7 @@ void VectorizerTestPass::testVectorShapeRatio(llvm::raw_ostream &outs) { using affine::matcher::Op; SmallVector shape(clTestVectorShapeRatio.begin(), clTestVectorShapeRatio.end()); - auto subVectorType = - VectorType::get(shape, FloatType::getF32(f.getContext())); + auto subVectorType = VectorType::get(shape, Float32Type::get(f.getContext())); // Only filter operations that operate on a strict super-vector and have one // return. This makes testing easier. auto filter = [&](Operation &op) { diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp index 5b7c36c9b97bf..b20e0816bd17c 100644 --- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp +++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp @@ -1286,7 +1286,7 @@ struct TestTypeConverter : public TypeConverter { // Convert I64 to F64. if (t.isSignlessInteger(64)) { - results.push_back(FloatType::getF64(t.getContext())); + results.push_back(Float64Type::get(t.getContext())); return success(); } @@ -1298,7 +1298,7 @@ struct TestTypeConverter : public TypeConverter { // Split F32 into F16,F16. if (t.isF32()) { - results.assign(2, FloatType::getF16(t.getContext())); + results.assign(2, Float16Type::get(t.getContext())); return success(); } @@ -1826,7 +1826,7 @@ struct TestTypeConversionDriver return type; // Allow converting BF16/F16/F32 to F64. if (type.isBF16() || type.isF16() || type.isF32()) - return FloatType::getF64(type.getContext()); + return Float64Type::get(type.getContext()); // Otherwise, the type is illegal. return nullptr; }); diff --git a/mlir/test/lib/Pass/TestVulkanRunnerPipeline.cpp b/mlir/test/lib/Pass/TestVulkanRunnerPipeline.cpp index 789c4d76cee0d..a3624eb31e26e 100644 --- a/mlir/test/lib/Pass/TestVulkanRunnerPipeline.cpp +++ b/mlir/test/lib/Pass/TestVulkanRunnerPipeline.cpp @@ -11,9 +11,13 @@ //===----------------------------------------------------------------------===// #include "mlir/Conversion/ConvertToSPIRV/ConvertToSPIRVPass.h" +#include "mlir/Conversion/GPUCommon/GPUCommonPass.h" #include "mlir/Conversion/GPUToSPIRV/GPUToSPIRVPass.h" +#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/GPU/IR/GPUDialect.h" #include "mlir/Dialect/GPU/Transforms/Passes.h" +#include "mlir/Dialect/LLVMIR/Transforms/RequestCWrappers.h" #include "mlir/Dialect/MemRef/Transforms/Passes.h" #include "mlir/Dialect/SPIRV/IR/SPIRVOps.h" #include "mlir/Dialect/SPIRV/Transforms/Passes.h" @@ -29,6 +33,9 @@ struct VulkanRunnerPipelineOptions Option spirvWebGPUPrepare{ *this, "spirv-webgpu-prepare", llvm::cl::desc("Run MLIR transforms used when targetting WebGPU")}; + Option toLlvm{*this, "to-llvm", + llvm::cl::desc("Run MLIR transforms to lower host code " + "to LLVM, intended for mlir-cpu-runner")}; }; void buildTestVulkanRunnerPipeline(OpPassManager &passManager, @@ -56,6 +63,19 @@ void buildTestVulkanRunnerPipeline(OpPassManager &passManager, spirvModulePM.addPass(spirv::createSPIRVWebGPUPreparePass()); passManager.addPass(createGpuModuleToBinaryPass()); + + if (options.toLlvm) { + passManager.addPass(createFinalizeMemRefToLLVMConversionPass()); + passManager.nest().addPass( + LLVM::createRequestCWrappersPass()); + // vulkan-runtime-wrappers.cpp uses the non-bare-pointer calling convention, + // and the type check is needed to prevent accidental ABI mismatches. + GpuToLLVMConversionPassOptions opt; + opt.hostBarePtrCallConv = false; + opt.kernelBarePtrCallConv = false; + opt.typeCheckKernelArgs = true; + passManager.addPass(createGpuToLLVMConversionPass(opt)); + } } } // namespace @@ -65,7 +85,7 @@ void registerTestVulkanRunnerPipeline() { PassPipelineRegistration( "test-vulkan-runner-pipeline", "Runs a series of passes for lowering GPU-dialect MLIR to " - "SPIR-V-dialect MLIR intended for mlir-vulkan-runner.", + "SPIR-V-dialect MLIR intended for mlir-vulkan-runner or mlir-cpu-runner.", buildTestVulkanRunnerPipeline); } } // namespace mlir::test diff --git a/mlir/test/lib/Transforms/TestDialectConversion.cpp b/mlir/test/lib/Transforms/TestDialectConversion.cpp index a03bf0a1023d5..8278937a1014c 100644 --- a/mlir/test/lib/Transforms/TestDialectConversion.cpp +++ b/mlir/test/lib/Transforms/TestDialectConversion.cpp @@ -34,7 +34,7 @@ struct PDLLTypeConverter : public TypeConverter { static LogicalResult convertType(Type t, SmallVectorImpl &results) { // Convert I64 to F64. if (t.isSignlessInteger(64)) { - results.push_back(FloatType::getF64(t.getContext())); + results.push_back(Float64Type::get(t.getContext())); return success(); } diff --git a/mlir/test/mlir-vulkan-runner/addf.mlir b/mlir/test/mlir-vulkan-runner/addf.mlir index d435f75a28805..71f87a8b0d5c8 100644 --- a/mlir/test/mlir-vulkan-runner/addf.mlir +++ b/mlir/test/mlir-vulkan-runner/addf.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils --entry-point-result=void | FileCheck %s +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils --entry-point-result=void | FileCheck %s // CHECK: [3.3, 3.3, 3.3, 3.3, 3.3, 3.3, 3.3, 3.3] module attributes { diff --git a/mlir/test/mlir-vulkan-runner/addf_if.mlir b/mlir/test/mlir-vulkan-runner/addf_if.mlir index 8ae995c65e7e8..6fe51a83482dc 100644 --- a/mlir/test/mlir-vulkan-runner/addf_if.mlir +++ b/mlir/test/mlir-vulkan-runner/addf_if.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils --entry-point-result=void | FileCheck %s +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils --entry-point-result=void | FileCheck %s // CHECK: [3.3, 3.3, 3.3, 3.3, 0, 0, 0, 0] module attributes { diff --git a/mlir/test/mlir-vulkan-runner/addui_extended.mlir b/mlir/test/mlir-vulkan-runner/addui_extended.mlir index b8db451421459..0894bc301f2e3 100644 --- a/mlir/test/mlir-vulkan-runner/addui_extended.mlir +++ b/mlir/test/mlir-vulkan-runner/addui_extended.mlir @@ -1,13 +1,13 @@ // Make sure that addition with carry produces expected results // with and without expansion to primitive add/cmp ops for WebGPU. -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s -// RUN: mlir-opt %s -test-vulkan-runner-pipeline=spirv-webgpu-prepare \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline="spirv-webgpu-prepare to-llvm" \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s diff --git a/mlir/test/mlir-vulkan-runner/smul_extended.mlir b/mlir/test/mlir-vulkan-runner/smul_extended.mlir index 334aec843e197..0ef86f46562e8 100644 --- a/mlir/test/mlir-vulkan-runner/smul_extended.mlir +++ b/mlir/test/mlir-vulkan-runner/smul_extended.mlir @@ -1,13 +1,13 @@ // Make sure that signed extended multiplication produces expected results // with and without expansion to primitive mul/add ops for WebGPU. -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s -// RUN: mlir-opt %s -test-vulkan-runner-pipeline=spirv-webgpu-prepare \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline="spirv-webgpu-prepare to-llvm" \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s diff --git a/mlir/test/mlir-vulkan-runner/time.mlir b/mlir/test/mlir-vulkan-runner/time.mlir index 6a0bfef36793b..f628447874238 100644 --- a/mlir/test/mlir-vulkan-runner/time.mlir +++ b/mlir/test/mlir-vulkan-runner/time.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils --entry-point-result=void | FileCheck %s +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils --entry-point-result=void | FileCheck %s // CHECK: Compute shader execution time // CHECK: Command buffer submit time diff --git a/mlir/test/mlir-vulkan-runner/umul_extended.mlir b/mlir/test/mlir-vulkan-runner/umul_extended.mlir index 803b8c3d336d3..5936c808435c1 100644 --- a/mlir/test/mlir-vulkan-runner/umul_extended.mlir +++ b/mlir/test/mlir-vulkan-runner/umul_extended.mlir @@ -1,13 +1,13 @@ // Make sure that unsigned extended multiplication produces expected results // with and without expansion to primitive mul/add ops for WebGPU. -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s -// RUN: mlir-opt %s -test-vulkan-runner-pipeline=spirv-webgpu-prepare \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline="spirv-webgpu-prepare to-llvm" \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s diff --git a/mlir/test/mlir-vulkan-runner/vector-deinterleave.mlir b/mlir/test/mlir-vulkan-runner/vector-deinterleave.mlir index 097f3905949d8..ebeb19cd6bcc5 100644 --- a/mlir/test/mlir-vulkan-runner/vector-deinterleave.mlir +++ b/mlir/test/mlir-vulkan-runner/vector-deinterleave.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s diff --git a/mlir/test/mlir-vulkan-runner/vector-interleave.mlir b/mlir/test/mlir-vulkan-runner/vector-interleave.mlir index 5dd4abbd1fb19..9314baf9b39c7 100644 --- a/mlir/test/mlir-vulkan-runner/vector-interleave.mlir +++ b/mlir/test/mlir-vulkan-runner/vector-interleave.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s diff --git a/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir b/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir index be97b48b1812e..cf3e2c569426b 100644 --- a/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir +++ b/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -test-vulkan-runner-pipeline \ -// RUN: | mlir-vulkan-runner - \ +// RUN: mlir-opt %s -test-vulkan-runner-pipeline=to-llvm \ +// RUN: | mlir-cpu-runner - \ // RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ // RUN: --entry-point-result=void | FileCheck %s diff --git a/mlir/tools/mlir-vulkan-runner/vulkan-runtime-wrappers.cpp b/mlir/tools/mlir-vulkan-runner/vulkan-runtime-wrappers.cpp index f1ed571734459..ffd1114cec6aa 100644 --- a/mlir/tools/mlir-vulkan-runner/vulkan-runtime-wrappers.cpp +++ b/mlir/tools/mlir-vulkan-runner/vulkan-runtime-wrappers.cpp @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include "VulkanRuntime.h" @@ -26,6 +28,38 @@ namespace { +class VulkanModule; + +// Class to be a thing that can be returned from `mgpuModuleGetFunction`. +struct VulkanFunction { + VulkanModule *module; + std::string name; + + VulkanFunction(VulkanModule *module, const char *name) + : module(module), name(name) {} +}; + +// Class to own a copy of the SPIR-V provided to `mgpuModuleLoad` and to manage +// allocation of pointers returned from `mgpuModuleGetFunction`. +class VulkanModule { +public: + VulkanModule(const uint8_t *ptr, size_t sizeInBytes) + : blob(ptr, ptr + sizeInBytes) {} + ~VulkanModule() = default; + + VulkanFunction *getFunction(const char *name) { + return functions.emplace_back(std::make_unique(this, name)) + .get(); + } + + uint8_t *blobData() { return blob.data(); } + size_t blobSizeInBytes() const { return blob.size(); } + +private: + std::vector blob; + std::vector> functions; +}; + class VulkanRuntimeManager { public: VulkanRuntimeManager() = default; @@ -91,6 +125,94 @@ void bindMemRef(void *vkRuntimeManager, DescriptorSetIndex setIndex, } extern "C" { + +//===----------------------------------------------------------------------===// +// +// New wrappers, intended for mlir-cpu-runner. Calls to these are generated by +// GPUToLLVMConversionPass. +// +//===----------------------------------------------------------------------===// + +VULKAN_WRAPPER_SYMBOL_EXPORT void *mgpuStreamCreate() { + return new VulkanRuntimeManager(); +} + +VULKAN_WRAPPER_SYMBOL_EXPORT void mgpuStreamDestroy(void *vkRuntimeManager) { + delete static_cast(vkRuntimeManager); +} + +VULKAN_WRAPPER_SYMBOL_EXPORT void mgpuStreamSynchronize(void *) { + // Currently a no-op as the other operations are synchronous. +} + +VULKAN_WRAPPER_SYMBOL_EXPORT void *mgpuModuleLoad(const void *data, + size_t gpuBlobSize) { + // gpuBlobSize is the size of the data in bytes. + return new VulkanModule(static_cast(data), gpuBlobSize); +} + +VULKAN_WRAPPER_SYMBOL_EXPORT void mgpuModuleUnload(void *vkModule) { + delete static_cast(vkModule); +} + +VULKAN_WRAPPER_SYMBOL_EXPORT void *mgpuModuleGetFunction(void *vkModule, + const char *name) { + if (!vkModule) + abort(); + return static_cast(vkModule)->getFunction(name); +} + +VULKAN_WRAPPER_SYMBOL_EXPORT void +mgpuLaunchKernel(void *vkKernel, size_t gridX, size_t gridY, size_t gridZ, + size_t /*blockX*/, size_t /*blockY*/, size_t /*blockZ*/, + size_t /*smem*/, void *vkRuntimeManager, void **params, + void ** /*extra*/, size_t paramsCount) { + auto manager = static_cast(vkRuntimeManager); + + // The non-bare-pointer memref ABI interacts badly with mgpuLaunchKernel's + // signature: + // - The memref descriptor struct gets split into several elements, each + // passed as their own "param". + // - No metadata is provided as to the rank or element type/size of a memref. + // Here we assume that all MemRefs have rank 1 and an element size of + // 4 bytes. This means each descriptor struct will have five members. + // TODO(https://github.com/llvm/llvm-project/issues/73457): Refactor the + // ABI/API of mgpuLaunchKernel to use a different ABI for memrefs, so + // that other memref types can also be used. This will allow migrating + // the remaining tests and removal of mlir-vulkan-runner. + const size_t paramsPerMemRef = 5; + if (paramsCount % paramsPerMemRef != 0) { + abort(); + } + const DescriptorSetIndex setIndex = 0; + BindingIndex bindIndex = 0; + for (size_t i = 0; i < paramsCount; i += paramsPerMemRef) { + auto memref = static_cast *>(params[i]); + bindMemRef(manager, setIndex, bindIndex, memref); + ++bindIndex; + } + + manager->setNumWorkGroups(NumWorkGroups{static_cast(gridX), + static_cast(gridY), + static_cast(gridZ)}); + + auto function = static_cast(vkKernel); + // Expected size should be in bytes. + manager->setShaderModule( + function->module->blobData(), + static_cast(function->module->blobSizeInBytes())); + manager->setEntryPoint(function->name.c_str()); + + manager->runOnVulkan(); +} + +//===----------------------------------------------------------------------===// +// +// Old wrappers, intended for mlir-vulkan-runner. Calls to these are generated +// by LaunchFuncToVulkanCallsPass. +// +//===----------------------------------------------------------------------===// + /// Initializes `VulkanRuntimeManager` and returns a pointer to it. VULKAN_WRAPPER_SYMBOL_EXPORT void *initVulkan() { return new VulkanRuntimeManager(); diff --git a/mlir/tools/tblgen-to-irdl/OpDefinitionsGen.cpp b/mlir/tools/tblgen-to-irdl/OpDefinitionsGen.cpp index a763105fa0fd6..4e354e535dd3a 100644 --- a/mlir/tools/tblgen-to-irdl/OpDefinitionsGen.cpp +++ b/mlir/tools/tblgen-to-irdl/OpDefinitionsGen.cpp @@ -107,15 +107,15 @@ std::optional recordToType(MLIRContext *ctx, const Record &predRec) { auto width = predRec.getValueAsInt("bitwidth"); switch (width) { case 16: - return FloatType::getF16(ctx); + return Float16Type::get(ctx); case 32: - return FloatType::getF32(ctx); + return Float32Type::get(ctx); case 64: - return FloatType::getF64(ctx); + return Float64Type::get(ctx); case 80: - return FloatType::getF80(ctx); + return Float80Type::get(ctx); case 128: - return FloatType::getF128(ctx); + return Float128Type::get(ctx); } } @@ -124,39 +124,39 @@ std::optional recordToType(MLIRContext *ctx, const Record &predRec) { } if (predRec.getName() == "BF16") { - return FloatType::getBF16(ctx); + return BFloat16Type::get(ctx); } if (predRec.getName() == "TF32") { - return FloatType::getTF32(ctx); + return FloatTF32Type::get(ctx); } if (predRec.getName() == "F8E4M3FN") { - return FloatType::getFloat8E4M3FN(ctx); + return Float8E4M3FNType::get(ctx); } if (predRec.getName() == "F8E5M2") { - return FloatType::getFloat8E5M2(ctx); + return Float8E5M2Type::get(ctx); } if (predRec.getName() == "F8E4M3") { - return FloatType::getFloat8E4M3(ctx); + return Float8E4M3Type::get(ctx); } if (predRec.getName() == "F8E4M3FNUZ") { - return FloatType::getFloat8E4M3FNUZ(ctx); + return Float8E4M3FNUZType::get(ctx); } if (predRec.getName() == "F8E4M3B11FNUZ") { - return FloatType::getFloat8E4M3B11FNUZ(ctx); + return Float8E4M3B11FNUZType::get(ctx); } if (predRec.getName() == "F8E5M2FNUZ") { - return FloatType::getFloat8E5M2FNUZ(ctx); + return Float8E5M2FNUZType::get(ctx); } if (predRec.getName() == "F8E3M4") { - return FloatType::getFloat8E3M4(ctx); + return Float8E3M4Type::get(ctx); } if (predRec.isSubClassOf("Complex")) { diff --git a/mlir/unittests/IR/AttributeTest.cpp b/mlir/unittests/IR/AttributeTest.cpp index b6033a38d0d4f..9203248a83baf 100644 --- a/mlir/unittests/IR/AttributeTest.cpp +++ b/mlir/unittests/IR/AttributeTest.cpp @@ -154,7 +154,7 @@ TEST(DenseSplatTest, IntAttrSplat) { TEST(DenseSplatTest, F32Splat) { MLIRContext context; - FloatType floatTy = FloatType::getF32(&context); + FloatType floatTy = Float32Type::get(&context); float value = 10.0; testSplat(floatTy, value); @@ -162,7 +162,7 @@ TEST(DenseSplatTest, F32Splat) { TEST(DenseSplatTest, F64Splat) { MLIRContext context; - FloatType floatTy = FloatType::getF64(&context); + FloatType floatTy = Float64Type::get(&context); double value = 10.0; testSplat(floatTy, APFloat(value)); @@ -170,7 +170,7 @@ TEST(DenseSplatTest, F64Splat) { TEST(DenseSplatTest, FloatAttrSplat) { MLIRContext context; - FloatType floatTy = FloatType::getF32(&context); + FloatType floatTy = Float32Type::get(&context); Attribute value = FloatAttr::get(floatTy, 10.0); testSplat(floatTy, value); @@ -178,7 +178,7 @@ TEST(DenseSplatTest, FloatAttrSplat) { TEST(DenseSplatTest, BF16Splat) { MLIRContext context; - FloatType floatTy = FloatType::getBF16(&context); + FloatType floatTy = BFloat16Type::get(&context); Attribute value = FloatAttr::get(floatTy, 10.0); testSplat(floatTy, value); @@ -204,7 +204,7 @@ TEST(DenseSplatTest, StringAttrSplat) { TEST(DenseComplexTest, ComplexFloatSplat) { MLIRContext context; - ComplexType complexType = ComplexType::get(FloatType::getF32(&context)); + ComplexType complexType = ComplexType::get(Float32Type::get(&context)); std::complex value(10.0, 15.0); testSplat(complexType, value); } @@ -218,7 +218,7 @@ TEST(DenseComplexTest, ComplexIntSplat) { TEST(DenseComplexTest, ComplexAPFloatSplat) { MLIRContext context; - ComplexType complexType = ComplexType::get(FloatType::getF32(&context)); + ComplexType complexType = ComplexType::get(Float32Type::get(&context)); std::complex value(APFloat(10.0f), APFloat(15.0f)); testSplat(complexType, value); } @@ -409,7 +409,7 @@ TEST(SparseElementsAttrTest, GetZero) { context.allowUnregisteredDialects(); IntegerType intTy = IntegerType::get(&context, 32); - FloatType floatTy = FloatType::getF32(&context); + FloatType floatTy = Float32Type::get(&context); Type stringTy = OpaqueType::get(StringAttr::get(&context, "test"), "string"); ShapedType tensorI32 = RankedTensorType::get({2, 2}, intTy); diff --git a/mlir/unittests/IR/OperationSupportTest.cpp b/mlir/unittests/IR/OperationSupportTest.cpp index 2a1b8d2ef7f55..f94dc78445807 100644 --- a/mlir/unittests/IR/OperationSupportTest.cpp +++ b/mlir/unittests/IR/OperationSupportTest.cpp @@ -313,21 +313,4 @@ TEST(OperationEquivalenceTest, HashWorksWithFlags) { op2->destroy(); } -TEST(ValueRangeTest, ValueConstructable) { - MLIRContext context; - Builder builder(&context); - - Operation *useOp = - createOp(&context, /*operands=*/std::nullopt, builder.getIntegerType(16)); - // Valid construction despite a temporary 'OpResult'. - ValueRange operands = useOp->getResult(0); - - useOp->setOperands(operands); - EXPECT_EQ(useOp->getNumOperands(), 1u); - EXPECT_EQ(useOp->getOperand(0), useOp->getResult(0)); - - useOp->dropAllUses(); - useOp->destroy(); -} - } // namespace diff --git a/mlir/unittests/IR/ShapedTypeTest.cpp b/mlir/unittests/IR/ShapedTypeTest.cpp index 7a5b0722a03ba..c2900b5aaeeeb 100644 --- a/mlir/unittests/IR/ShapedTypeTest.cpp +++ b/mlir/unittests/IR/ShapedTypeTest.cpp @@ -24,7 +24,7 @@ TEST(ShapedTypeTest, CloneMemref) { MLIRContext context; Type i32 = IntegerType::get(&context, 32); - Type f32 = FloatType::getF32(&context); + Type f32 = Float32Type::get(&context); Attribute memSpace = IntegerAttr::get(IntegerType::get(&context, 64), 7); Type memrefOriginalType = i32; llvm::SmallVector memrefOriginalShape({10, 20}); @@ -71,7 +71,7 @@ TEST(ShapedTypeTest, CloneTensor) { MLIRContext context; Type i32 = IntegerType::get(&context, 32); - Type f32 = FloatType::getF32(&context); + Type f32 = Float32Type::get(&context); Type tensorOriginalType = i32; llvm::SmallVector tensorOriginalShape({10, 20}); @@ -111,7 +111,7 @@ TEST(ShapedTypeTest, CloneVector) { MLIRContext context; Type i32 = IntegerType::get(&context, 32); - Type f32 = FloatType::getF32(&context); + Type f32 = Float32Type::get(&context); Type vectorOriginalType = i32; llvm::SmallVector vectorOriginalShape({10, 20}); @@ -134,7 +134,7 @@ TEST(ShapedTypeTest, CloneVector) { TEST(ShapedTypeTest, VectorTypeBuilder) { MLIRContext context; - Type f32 = FloatType::getF32(&context); + Type f32 = Float32Type::get(&context); SmallVector shape{2, 4, 8, 9, 1}; SmallVector scalableDims{true, false, true, false, false}; @@ -192,7 +192,7 @@ TEST(ShapedTypeTest, VectorTypeBuilder) { TEST(ShapedTypeTest, RankedTensorTypeBuilder) { MLIRContext context; - Type f32 = FloatType::getF32(&context); + Type f32 = Float32Type::get(&context); SmallVector shape{2, 4, 8, 16, 32}; RankedTensorType tensorType = RankedTensorType::get(shape, f32); @@ -254,7 +254,7 @@ class TensorWithString : public RankedTensorType { TEST(ShapedTypeTest, RankedTensorTypeView) { MLIRContext context; - Type f32 = FloatType::getF32(&context); + Type f32 = Float32Type::get(&context); Type noEncodingRankedTensorType = RankedTensorType::get({10, 20}, f32); diff --git a/offload/DeviceRTL/include/Synchronization.h b/offload/DeviceRTL/include/Synchronization.h index e1968675550d4..a4d4fc08837b2 100644 --- a/offload/DeviceRTL/include/Synchronization.h +++ b/offload/DeviceRTL/include/Synchronization.h @@ -66,7 +66,7 @@ V add(Ty *Address, V Val, atomic::OrderingTy Ordering) { template > V load(Ty *Address, atomic::OrderingTy Ordering) { - return add(Address, Ty(0), Ordering); + return __scoped_atomic_load_n(Address, Ordering, __MEMORY_SCOPE_DEVICE); } template > diff --git a/utils/bazel/WORKSPACE b/utils/bazel/WORKSPACE index 69373ed574e2a..eeb1c692ac871 100644 --- a/utils/bazel/WORKSPACE +++ b/utils/bazel/WORKSPACE @@ -5,11 +5,11 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") -SKYLIB_VERSION = "1.3.0" +SKYLIB_VERSION = "1.7.1" http_archive( name = "bazel_skylib", - sha256 = "74d544d96f4a5bb630d465ca8bbcfe231e3594e5aae57e1edbf17a6eb3ca2506", + sha256 = "bc283cdfcd526a52c3201279cda4bc298652efa898b10b4db0837dc51652756f", urls = [ "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz".format(version = SKYLIB_VERSION), "https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz".format(version = SKYLIB_VERSION), @@ -18,9 +18,9 @@ http_archive( http_archive( name = "rules_cc", - urls = ["https://github.com/bazelbuild/rules_cc/releases/download/0.0.17/rules_cc-0.0.17.tar.gz"], sha256 = "abc605dd850f813bb37004b77db20106a19311a96b2da1c92b789da529d28fe1", strip_prefix = "rules_cc-0.0.17", + urls = ["https://github.com/bazelbuild/rules_cc/releases/download/0.0.17/rules_cc-0.0.17.tar.gz"], ) new_local_repository( @@ -158,9 +158,9 @@ maybe( maybe( http_archive, name = "robin_map", - strip_prefix = "robin-map-1.3.0", - sha256 = "a8424ad3b0affd4c57ed26f0f3d8a29604f0e1f2ef2089f497f614b1c94c7236", build_file = "@llvm-raw//utils/bazel/third_party_build:robin_map.BUILD", + sha256 = "a8424ad3b0affd4c57ed26f0f3d8a29604f0e1f2ef2089f497f614b1c94c7236", + strip_prefix = "robin-map-1.3.0", url = "https://github.com/Tessil/robin-map/archive/refs/tags/v1.3.0.tar.gz", ) diff --git a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel index a86c295b04cb1..f72babb646a85 100644 --- a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel @@ -712,6 +712,7 @@ cc_library( "include/clang/Basic/DiagnosticFrontendKinds.inc", "include/clang/Basic/DiagnosticGroups.inc", "include/clang/Basic/DiagnosticIndexName.inc", + "include/clang/Basic/DiagnosticInstallAPIKinds.inc", "include/clang/Basic/DiagnosticLexKinds.inc", "include/clang/Basic/DiagnosticParseKinds.inc", "include/clang/Basic/DiagnosticRefactoringKinds.inc", @@ -1459,7 +1460,6 @@ cc_library( hdrs = glob(["include/clang/Tooling/DependencyScanning/**/*.h"]), deps = [ ":basic", - ":codegen", ":driver", ":frontend", ":lex", @@ -1951,14 +1951,12 @@ cc_library( "lib/Interpreter/*.cpp", "lib/Interpreter/*.h", ], - exclude = ["lib/Interpreter/Wasm.cpp"], - ), - hdrs = glob( - [ - "include/clang/Interpreter/*.h", + exclude = [ + "lib/Interpreter/Wasm.cpp", + "lib/Interpreter/Wasm.h", ], - exclude = ["lib/Interpreter/Wasm.cpp"], ), + hdrs = glob(["include/clang/Interpreter/*.h"]), includes = ["include"], deps = [ ":analysis", @@ -2025,7 +2023,6 @@ cc_library( "//llvm:BitWriter", "//llvm:BitstreamReader", "//llvm:CodeGen", - "//llvm:CodeGenTypes", "//llvm:Core", "//llvm:Coroutines", "//llvm:Coverage", @@ -2148,7 +2145,6 @@ cc_library( "include/clang/Serialization/AttrPCHRead.inc", "include/clang/Serialization/AttrPCHWrite.inc", ] + glob([ - "include/clang/Frontend/*.h", "lib/Serialization/*.cpp", "lib/Serialization/*.h", ]), @@ -2160,15 +2156,11 @@ cc_library( "include/clang/Serialization/*.def", ]), deps = [ - ":apinotes", ":ast", ":basic", - ":driver", ":lex", ":sema", ":serialization_attr_gen", - ":static_analyzer_core_options", - ":support", ":type_nodes_gen", "//llvm:BitReader", "//llvm:BitWriter", @@ -2763,6 +2755,30 @@ cc_library( ], ) +proto_library( + name = "cxx-proto", + srcs = ["tools/clang-fuzzer/cxx_proto.proto"], +) + +cc_proto_library( + name = "cxx_cc_proto", + deps = [":cxx-proto"], +) + +cc_library( + name = "proto-to-cxx-lib", + srcs = ["tools/clang-fuzzer/proto-to-cxx/proto_to_cxx.cpp"], + hdrs = ["tools/clang-fuzzer/proto-to-cxx/proto_to_cxx.h"], + includes = ["tools/clang-fuzzer"], + deps = [":cxx_cc_proto"], +) + +cc_binary( + name = "clang-proto-to-cxx", + srcs = ["tools/clang-fuzzer/proto-to-cxx/proto_to_cxx_main.cpp"], + deps = [":proto-to-cxx-lib"], +) + cc_library( name = "clang-fuzzer-initialize", srcs = ["tools/clang-fuzzer/fuzzer-initialize/fuzzer_initialize.cpp"], diff --git a/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h b/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h index dba7129e4c87d..7a8e14e06ddc5 100644 --- a/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h +++ b/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h @@ -102,9 +102,6 @@ /* Define to 1 if you have the `getpagesize' function. */ #define HAVE_GETPAGESIZE 1 -/* Define to 1 if you have the `getrlimit' function. */ -#define HAVE_GETRLIMIT 1 - /* Define to 1 if you have the `getrusage' function. */ #define HAVE_GETRUSAGE 1 @@ -168,42 +165,24 @@ /* Define to 1 if you have the `setenv' function. */ /* HAVE_SETENV defined in Bazel */ -/* Define to 1 if you have the `setrlimit' function. */ -#define HAVE_SETRLIMIT 1 - /* Define to 1 if you have the `sigaltstack' function. */ #define HAVE_SIGALTSTACK 1 -/* Define to 1 if you have the header file. */ -#define HAVE_SIGNAL_H 1 - /* Define to 1 if you have the `strerror_r' function. */ /* HAVE_STRERROR_R defined in Bazel */ /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_IOCTL_H 1 - /* Define to 1 if you have the header file. */ #define HAVE_SYS_MMAN_H 1 -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_RESOURCE_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TIME_H 1 - /* Define to 1 if stat struct has st_mtimespec member .*/ /* #undef HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC */ /* Define to 1 if stat struct has st_mtim member. */ /* HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC defined in Bazel */ -/* Define to 1 if you have the header file. */ -#define HAVE_TERMIOS_H 1 - /* Define to 1 if you have the header file. */ /* HAVE_UNISTD_H defined in Bazel */ diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel index 873fb2d18bfb2..f2e32b1b8c6db 100644 --- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel @@ -549,9 +549,12 @@ cc_library( "//mlir:ConvertToSPIRV", "//mlir:FuncDialect", "//mlir:GPUDialect", + "//mlir:GPUToGPURuntimeTransforms", "//mlir:GPUToSPIRV", "//mlir:GPUTransforms", "//mlir:IR", + "//mlir:LLVMIRTransforms", + "//mlir:MemRefToLLVM", "//mlir:MemRefTransforms", "//mlir:Pass", "//mlir:SPIRVDialect", diff --git a/utils/bazel/llvm_configs/config.h.cmake b/utils/bazel/llvm_configs/config.h.cmake index d51d01017d986..1d2d00a3b758b 100644 --- a/utils/bazel/llvm_configs/config.h.cmake +++ b/utils/bazel/llvm_configs/config.h.cmake @@ -90,9 +90,6 @@ /* Define to 1 if you have the `getpagesize' function. */ #cmakedefine HAVE_GETPAGESIZE ${HAVE_GETPAGESIZE} -/* Define to 1 if you have the `getrlimit' function. */ -#cmakedefine HAVE_GETRLIMIT ${HAVE_GETRLIMIT} - /* Define to 1 if you have the `getrusage' function. */ #cmakedefine HAVE_GETRUSAGE ${HAVE_GETRUSAGE} @@ -165,42 +162,24 @@ /* Define to 1 if you have the `setenv' function. */ #cmakedefine HAVE_SETENV ${HAVE_SETENV} -/* Define to 1 if you have the `setrlimit' function. */ -#cmakedefine HAVE_SETRLIMIT ${HAVE_SETRLIMIT} - /* Define to 1 if you have the `sigaltstack' function. */ #cmakedefine HAVE_SIGALTSTACK ${HAVE_SIGALTSTACK} -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SIGNAL_H ${HAVE_SIGNAL_H} - /* Define to 1 if you have the `strerror_r' function. */ #cmakedefine HAVE_STRERROR_R ${HAVE_STRERROR_R} /* Define to 1 if you have the `sysconf' function. */ #cmakedefine HAVE_SYSCONF ${HAVE_SYSCONF} -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_IOCTL_H ${HAVE_SYS_IOCTL_H} - /* Define to 1 if you have the header file. */ #cmakedefine HAVE_SYS_MMAN_H ${HAVE_SYS_MMAN_H} -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_RESOURCE_H ${HAVE_SYS_RESOURCE_H} - -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_TIME_H ${HAVE_SYS_TIME_H} - /* Define to 1 if stat struct has st_mtimespec member .*/ #cmakedefine HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC ${HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC} /* Define to 1 if stat struct has st_mtim member. */ #cmakedefine HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC ${HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC} -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_TERMIOS_H ${HAVE_TERMIOS_H} - /* Define to 1 if you have the header file. */ #cmakedefine HAVE_UNISTD_H ${HAVE_UNISTD_H}