diff --git a/.github/workflows/clang-cir-tests.yml b/.github/workflows/clang-cir-tests.yml new file mode 100644 index 000000000000..c38e952d1f02 --- /dev/null +++ b/.github/workflows/clang-cir-tests.yml @@ -0,0 +1,38 @@ +name: Clang CIR Tests + +permissions: + contents: read + +on: + workflow_dispatch: + push: + branches: + - 'main' + paths: + - 'clang/**' + - '.github/workflows/clang-cir-tests.yml' + - '.github/workflows/llvm-project-tests.yml' + - '!llvm/**' + pull_request: + branches: + - 'main' + paths: + - 'clang/**' + - '.github/workflows/clang-cir-tests.yml' + - '.github/workflows/llvm-project-tests.yml' + - '!llvm/**' + +concurrency: + # Skip intermediate builds: always. + # Cancel intermediate builds: only if it is a pull request build. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + check_clang_cir: + name: Test clang-cir + uses: ./.github/workflows/llvm-project-tests.yml + with: + build_target: check-clang-cir + projects: clang;mlir + extra_cmake_args: -DCLANG_ENABLE_CIR=ON diff --git a/.github/workflows/pr-code-format.yml b/.github/workflows/pr-code-format.yml index 983838858ba4..93e37b0e11ca 100644 --- a/.github/workflows/pr-code-format.yml +++ b/.github/workflows/pr-code-format.yml @@ -11,7 +11,7 @@ on: jobs: code_formatter: runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' + if: github.repository == 'llvm/clangir' steps: - name: Fetch LLVM sources uses: actions/checkout@v4 diff --git a/README.md b/README.md index a9b29ecbc1a3..3dd79abc4b3e 100644 --- a/README.md +++ b/README.md @@ -1,44 +1,3 @@ -# The LLVM Compiler Infrastructure +# ClangIR (CIR) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/llvm/llvm-project/badge)](https://securityscorecards.dev/viewer/?uri=github.com/llvm/llvm-project) -[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8273/badge)](https://www.bestpractices.dev/projects/8273) -[![libc++](https://github.com/llvm/llvm-project/actions/workflows/libcxx-build-and-test.yaml/badge.svg?branch=main&event=schedule)](https://github.com/llvm/llvm-project/actions/workflows/libcxx-build-and-test.yaml?query=event%3Aschedule) - -Welcome to the LLVM project! - -This repository contains the source code for LLVM, a toolkit for the -construction of highly optimized compilers, optimizers, and run-time -environments. - -The LLVM project has multiple components. The core of the project is -itself called "LLVM". This contains all of the tools, libraries, and header -files needed to process intermediate representations and convert them into -object files. Tools include an assembler, disassembler, bitcode analyzer, and -bitcode optimizer. - -C-like languages use the [Clang](https://clang.llvm.org/) frontend. This -component compiles C, C++, Objective-C, and Objective-C++ code into LLVM bitcode --- and from there into object files, using LLVM. - -Other components include: -the [libc++ C++ standard library](https://libcxx.llvm.org), -the [LLD linker](https://lld.llvm.org), and more. - -## Getting the Source Code and Building LLVM - -Consult the -[Getting Started with LLVM](https://llvm.org/docs/GettingStarted.html#getting-the-source-code-and-building-llvm) -page for information on building and running LLVM. - -For information on how to contribute to the LLVM project, please take a look at -the [Contributing to LLVM](https://llvm.org/docs/Contributing.html) guide. - -## Getting in touch - -Join the [LLVM Discourse forums](https://discourse.llvm.org/), [Discord -chat](https://discord.gg/xS7Z362), -[LLVM Office Hours](https://llvm.org/docs/GettingInvolved.html#office-hours) or -[Regular sync-ups](https://llvm.org/docs/GettingInvolved.html#online-sync-ups). - -The LLVM project has adopted a [code of conduct](https://llvm.org/docs/CodeOfConduct.html) for -participants to all modes of communication within the project. +Check https://clangir.org for general information, build instructions and documentation. diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt index 7e1905aa897b..8dfa6d81b204 100644 --- a/clang-tools-extra/clang-tidy/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/CMakeLists.txt @@ -23,7 +23,7 @@ add_clang_library(clangTidy ClangSACheckers omp_gen ClangDriverOptions - ) +) clang_target_link_libraries(clangTidy PRIVATE @@ -77,6 +77,9 @@ add_subdirectory(performance) add_subdirectory(portability) add_subdirectory(readability) add_subdirectory(zircon) +if(CLANG_ENABLE_CIR) + add_subdirectory(cir) +endif() set(ALL_CLANG_TIDY_CHECKS clangTidyAndroidModule clangTidyAbseilModule @@ -105,6 +108,9 @@ set(ALL_CLANG_TIDY_CHECKS if(CLANG_TIDY_ENABLE_STATIC_ANALYZER) list(APPEND ALL_CLANG_TIDY_CHECKS clangTidyMPIModule) endif() +if(CLANG_ENABLE_CIR) + list(APPEND ALL_CLANG_TIDY_CHECKS clangTidyCIRModule) +endif() set(ALL_CLANG_TIDY_CHECKS ${ALL_CLANG_TIDY_CHECKS} PARENT_SCOPE) # Other subtargets. These may reference ALL_CLANG_TIDY_CHECKS diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp index b877ea06dc05..0f2394628000 100644 --- a/clang-tools-extra/clang-tidy/ClangTidy.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp @@ -20,7 +20,10 @@ #include "ClangTidyModuleRegistry.h" #include "ClangTidyProfiling.h" #include "ExpandModularHeadersPPCallbacks.h" +#ifndef CLANG_TIDY_CONFIG_H #include "clang-tidy-config.h" +#endif +#include "utils/OptionsUtils.h" #include "clang/AST/ASTConsumer.h" #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/Format/Format.h" @@ -466,6 +469,7 @@ ClangTidyASTConsumerFactory::createASTConsumer( Consumers.push_back(std::move(AnalysisConsumer)); } #endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER + return std::make_unique( std::move(Consumers), std::move(Profiling), std::move(Finder), std::move(Checks)); diff --git a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h index 9280eb1e1f21..4c587089add0 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h +++ b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h @@ -20,6 +20,18 @@ #include "llvm/Support/Regex.h" #include +// Workaround unitests not needing to change unittests to require +// "clang-tidy-config.h" being generated. +#if __has_include("clang-tidy-config.h") +#ifndef CLANG_TIDY_CONFIG_H +#include "clang-tidy-config.h" +#endif +#endif + +#if CLANG_ENABLE_CIR +#include "clang/Basic/CodeGenOptions.h" +#endif + namespace clang { class ASTContext; @@ -137,6 +149,12 @@ class ClangTidyContext { /// Gets the language options from the AST context. const LangOptions &getLangOpts() const { return LangOpts; } +#if CLANG_ENABLE_CIR + /// Get and set CodeGenOpts + CodeGenOptions &getCodeGenOpts() { return CodeGenOpts; }; + void setCodeGenOpts(CodeGenOptions &CGO) { CodeGenOpts = CGO; } +#endif + /// Returns the name of the clang-tidy check which produced this /// diagnostic ID. std::string getCheckName(unsigned DiagnosticID) const; @@ -242,6 +260,10 @@ class ClangTidyContext { LangOptions LangOpts; +#if CLANG_ENABLE_CIR + CodeGenOptions CodeGenOpts; +#endif + ClangTidyStats Stats; std::string CurrentBuildDirectory; diff --git a/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h b/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h index adde9136ff1d..6d3ffa743460 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h +++ b/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h @@ -9,7 +9,9 @@ #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CLANGTIDYFORCELINKER_H #define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CLANGTIDYFORCELINKER_H +#ifndef CLANG_TIDY_CONFIG_H #include "clang-tidy-config.h" +#endif #include "llvm/Support/Compiler.h" namespace clang::tidy { @@ -137,6 +139,13 @@ extern volatile int ZirconModuleAnchorSource; static int LLVM_ATTRIBUTE_UNUSED ZirconModuleAnchorDestination = ZirconModuleAnchorSource; +#if CLANG_ENABLE_CIR +// This anchor is used to force the linker to link the CIRModule. +extern volatile int CIRModuleAnchorSource; +static int LLVM_ATTRIBUTE_UNUSED CIRModuleAnchorDestination = + CIRModuleAnchorSource; +#endif + } // namespace clang::tidy #endif diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt new file mode 100644 index 000000000000..f31eba82228e --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt @@ -0,0 +1,50 @@ +include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) + +set(LLVM_LINK_COMPONENTS + AllTargetsAsmParsers + AllTargetsDescs + AllTargetsInfos + FrontendOpenMP + support + ) + +# Needed by LLVM's CMake checks because this file defines multiple targets. +set(LLVM_OPTIONAL_SOURCES CIRTidyMain.cpp CIRTidyToolMain.cpp) + +add_clang_library(CIRTidyMain + CIRTidyMain.cpp + + LINK_LIBS + CIRTidy + clangTidy + MLIRIR + ${ALL_CLANG_TIDY_CHECKS} + MLIRIR + + DEPENDS + omp_gen + ) + +clang_target_link_libraries(CIRTidyMain + PRIVATE + clangBasic + clangTooling + clangToolingCore + ) + +add_clang_tool(cir-tidy + CIRTidyToolMain.cpp + ) +add_dependencies(cir-tidy + clang-resource-headers + ) + +target_link_libraries(cir-tidy + PRIVATE + CIRTidyMain + CIRTidy + ) + +install(TARGETS cir-tidy + DESTINATION bin + ) diff --git a/clang-tools-extra/clang-tidy/cir/CIRTidyModule.cpp b/clang-tools-extra/clang-tidy/cir/CIRTidyModule.cpp new file mode 100644 index 000000000000..0c54cde3d0f0 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/CIRTidyModule.cpp @@ -0,0 +1,34 @@ +//===--- CIRTidyModule.cpp - clang-tidy -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "../ClangTidy.h" +#include "../ClangTidyModule.h" +#include "../ClangTidyModuleRegistry.h" +#include "Lifetime.h" + +namespace clang::tidy { +namespace cir { + +class CIRModule : public ClangTidyModule { +public: + void addCheckFactories(ClangTidyCheckFactories &CheckFactories) override { + CheckFactories.registerCheck("cir-lifetime-check"); + } +}; + +} // namespace cir + +// Register the CIRTidyModule using this statically initialized variable. +static ClangTidyModuleRegistry::Add + X("cir-module", "Adds ClangIR (CIR) based clang-tidy checks."); + +// This anchor is used to force the linker to link in the generated object file +// and thus register the CIRModule. +volatile int CIRModuleAnchorSource = 0; + +} // namespace clang::tidy diff --git a/clang-tools-extra/clang-tidy/cir/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt new file mode 100644 index 000000000000..0b892f332790 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt @@ -0,0 +1,57 @@ +set(LLVM_LINK_COMPONENTS + FrontendOpenMP + Support + ) + +include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangTidyCIRModule + Lifetime.cpp + CIRTidyModule.cpp + + LINK_LIBS + clangASTMatchers + clangCIR + clangFrontend + clangSerialization + clangTidy + clangTidyUtils + ${dialect_libs} + MLIRCIR + MLIRCIRTransforms + MLIRAffineToStandard + MLIRAnalysis + MLIRIR + MLIRLLVMCommonConversion + MLIRLLVMDialect + MLIRLLVMToLLVMIRTranslation + MLIRMemRefDialect + MLIRMemRefToLLVM + MLIRParser + MLIRPass + MLIRSideEffectInterfaces + MLIRSCFToControlFlow + MLIRFuncToLLVM + MLIRSupport + MLIRMemRefDialect + MLIRTargetLLVMIRExport + MLIRTransforms + + DEPENDS + omp_gen + ) + +clang_target_link_libraries(clangTidyCIRModule + PRIVATE + clangAnalysis + clangAST + clangASTMatchers + clangBasic + clangLex + clangTooling + clangToolingCore + ) diff --git a/clang-tools-extra/clang-tidy/cir/Lifetime.cpp b/clang-tools-extra/clang-tidy/cir/Lifetime.cpp new file mode 100644 index 000000000000..e74b34825318 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/Lifetime.cpp @@ -0,0 +1,197 @@ +//===--- Lifetime.cpp - clang-tidy ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Lifetime.h" +#include "../utils/OptionsUtils.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclGroup.h" +#include "clang/ASTMatchers/ASTMatchFinder.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/Format/Format.h" +#include "clang/Frontend/ASTConsumers.h" +#include "clang/Tooling/FixIt.h" +#include + +using namespace clang::ast_matchers; +using namespace clang; + +namespace clang::tidy::cir { + +Lifetime::Lifetime(StringRef Name, ClangTidyContext *Context) + : ClangTidyCheck(Name, Context), codeGenOpts(Context->getCodeGenOpts()), + cirOpts{} { + auto OV = OptionsView(Name, Context->getOptions().CheckOptions, Context); + codeGenOpts.ClangIRBuildDeferredThreshold = + OV.get("CodeGenBuildDeferredThreshold", 500U); + codeGenOpts.ClangIRSkipFunctionsFromSystemHeaders = + OV.get("CodeGenSkipFunctionsFromSystemHeaders", false); + + cirOpts.RemarksList = + utils::options::parseStringList(OV.get("RemarksList", "")); + cirOpts.HistoryList = + utils::options::parseStringList(OV.get("HistoryList", "all")); + cirOpts.HistLimit = OV.get("HistLimit", 1U); +} + +void Lifetime::registerMatchers(MatchFinder *Finder) { + Finder->addMatcher(translationUnitDecl(), this); +} + +void Lifetime::setupAndRunClangIRLifetimeChecker(ASTContext &astCtx) { + auto *TU = astCtx.getTranslationUnitDecl(); + // This is the hook used to build clangir and run the lifetime checker + // pass. Perhaps in the future it's possible to come up with a better + // integration story. + + // Create an instance of CIRGenerator and use it to build CIR, followed by + // MLIR module verification. + std::unique_ptr<::cir::CIRGenerator> Gen = + std::make_unique<::cir::CIRGenerator>(astCtx.getDiagnostics(), nullptr, + codeGenOpts); + Gen->Initialize(astCtx); + Gen->HandleTopLevelDecl(DeclGroupRef(TU)); + Gen->HandleTranslationUnit(astCtx); + Gen->verifyModule(); + + mlir::ModuleOp mlirMod = Gen->getModule(); + std::unique_ptr mlirCtx = Gen->takeContext(); + + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(/*prettyForm=*/false); + + clang::SourceManager &clangSrcMgr = astCtx.getSourceManager(); + FileID MainFileID = clangSrcMgr.getMainFileID(); + + // Do some big dance with diagnostics here: hijack clang's diagnostics with + // MLIR one. + llvm::MemoryBufferRef MainFileBuf = clangSrcMgr.getBufferOrFake(MainFileID); + std::unique_ptr FileBuf = + llvm::MemoryBuffer::getMemBuffer(MainFileBuf); + + llvm::SourceMgr llvmSrcMgr; + llvmSrcMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + class CIRTidyDiagnosticHandler : public mlir::SourceMgrDiagnosticHandler { + ClangTidyCheck &tidyCheck; + clang::SourceManager &clangSrcMgr; + + clang::SourceLocation getClangFromFileLineCol(mlir::FileLineColLoc loc) { + clang::SourceLocation clangLoc; + FileManager &fileMgr = clangSrcMgr.getFileManager(); + assert(loc && "not a valid mlir::FileLineColLoc"); + // The column and line may be zero to represent unknown column + // and/or unknown line/column information. + if (loc.getLine() == 0 || loc.getColumn() == 0) { + llvm_unreachable("How should we workaround this?"); + return clangLoc; + } + if (auto FE = fileMgr.getFile(loc.getFilename())) { + return clangSrcMgr.translateFileLineCol(*FE, loc.getLine(), + loc.getColumn()); + } + llvm_unreachable("location doesn't map to a file?"); + } + + clang::SourceLocation getClangSrcLoc(mlir::Location loc) { + // Direct maps into a clang::SourceLocation. + if (auto fileLoc = loc.dyn_cast()) { + return getClangFromFileLineCol(fileLoc); + } + + // FusedLoc needs to be decomposed but the canonical one + // is the first location, we handle source ranges somewhere + // else. + if (auto fileLoc = loc.dyn_cast()) { + auto locArray = fileLoc.getLocations(); + assert(locArray.size() > 0 && "expected multiple locs"); + return getClangFromFileLineCol( + locArray[0].dyn_cast()); + } + + // Many loc styles are yet to be handled. + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::UnknownLoc not implemented!"); + } + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::CallSiteLoc not implemented!"); + } + llvm_unreachable("Unknown location style"); + } + + clang::DiagnosticIDs::Level + translateToClangDiagLevel(const mlir::DiagnosticSeverity &sev) { + switch (sev) { + case mlir::DiagnosticSeverity::Note: + return clang::DiagnosticIDs::Level::Note; + case mlir::DiagnosticSeverity::Warning: + return clang::DiagnosticIDs::Level::Warning; + case mlir::DiagnosticSeverity::Error: + return clang::DiagnosticIDs::Level::Error; + case mlir::DiagnosticSeverity::Remark: + return clang::DiagnosticIDs::Level::Remark; + } + llvm_unreachable("should not get here!"); + } + + public: + void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { + auto clangBeginLoc = getClangSrcLoc(diag.getLocation()); + tidyCheck.diag(clangBeginLoc, diag.str(), + translateToClangDiagLevel(diag.getSeverity())); + for (const auto ¬e : diag.getNotes()) { + auto clangNoteBeginLoc = getClangSrcLoc(note.getLocation()); + tidyCheck.diag(clangNoteBeginLoc, note.str(), + translateToClangDiagLevel(note.getSeverity())); + } + } + + CIRTidyDiagnosticHandler(llvm::SourceMgr &mgr, mlir::MLIRContext *ctx, + ClangTidyCheck &tidyCheck, + clang::SourceManager &clangMgr, + ShouldShowLocFn &&shouldShowLocFn = {}) + : SourceMgrDiagnosticHandler(mgr, ctx, llvm::errs(), + std::move(shouldShowLocFn)), + tidyCheck(tidyCheck), clangSrcMgr(clangMgr) { + setHandler( + [this](mlir::Diagnostic &diag) { emitClangTidyDiagnostic(diag); }); + } + ~CIRTidyDiagnosticHandler() = default; + }; + + // Use a custom diagnostic handler that can allow both regular printing + // to stderr but also populates clang-tidy context with diagnostics (and + // allow for instance, diagnostics to be later converted to YAML). + CIRTidyDiagnosticHandler sourceMgrHandler(llvmSrcMgr, mlirCtx.get(), *this, + clangSrcMgr); + + mlir::PassManager pm(mlirCtx.get()); + + // Add pre-requisite passes to the pipeline + pm.addPass(mlir::createMergeCleanupsPass()); + + // Insert the lifetime checker. + pm.addPass(mlir::createLifetimeCheckPass( + cirOpts.RemarksList, cirOpts.HistoryList, cirOpts.HistLimit, &astCtx)); + + bool passResult = !mlir::failed(pm.run(mlirMod)); + if (!passResult) + llvm::report_fatal_error( + "The pass manager failed to run pass on the module!"); +} + +void Lifetime::check(const MatchFinder::MatchResult &Result) { + setupAndRunClangIRLifetimeChecker(*Result.Context); +} + +} // namespace clang::tidy::cir diff --git a/clang-tools-extra/clang-tidy/cir/Lifetime.h b/clang-tools-extra/clang-tidy/cir/Lifetime.h new file mode 100644 index 000000000000..fb65bbf5be80 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/Lifetime.h @@ -0,0 +1,35 @@ +//===--- Lifetime.h - clang-tidy --------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIR_LIFETIME_H +#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIR_LIFETIME_H + +#include "../ClangTidyCheck.h" +#include + +namespace clang::tidy::cir { + +struct CIROpts { + std::vector RemarksList; + std::vector HistoryList; + unsigned HistLimit; +}; +class Lifetime : public ClangTidyCheck { +public: + Lifetime(StringRef Name, ClangTidyContext *Context); + void registerMatchers(ast_matchers::MatchFinder *Finder) override; + void check(const ast_matchers::MatchFinder::MatchResult &Result) override; + void setupAndRunClangIRLifetimeChecker(ASTContext &astCtx); + + CodeGenOptions codeGenOpts; + CIROpts cirOpts; +}; + +} // namespace clang::tidy::cir + +#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIR_LIFETIME_H diff --git a/clang-tools-extra/clang-tidy/clang-tidy-config.h.cmake b/clang-tools-extra/clang-tidy/clang-tidy-config.h.cmake index f4d1a4b38004..7397c1a65249 100644 --- a/clang-tools-extra/clang-tidy/clang-tidy-config.h.cmake +++ b/clang-tools-extra/clang-tidy/clang-tidy-config.h.cmake @@ -7,4 +7,6 @@ #cmakedefine01 CLANG_TIDY_ENABLE_STATIC_ANALYZER +#cmakedefine01 CLANG_ENABLE_CIR + #endif diff --git a/clang-tools-extra/test/CMakeLists.txt b/clang-tools-extra/test/CMakeLists.txt index f4c529ee8af2..d42306898fe0 100644 --- a/clang-tools-extra/test/CMakeLists.txt +++ b/clang-tools-extra/test/CMakeLists.txt @@ -10,6 +10,7 @@ set(CLANG_TOOLS_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/..") llvm_canonicalize_cmake_booleans( CLANG_TIDY_ENABLE_STATIC_ANALYZER CLANG_PLUGIN_SUPPORT + CLANG_ENABLE_CIR LLVM_INSTALL_TOOLCHAIN_ONLY ) diff --git a/clang-tools-extra/test/clang-tidy/checkers/cir/lifetime-basic.cpp b/clang-tools-extra/test/clang-tidy/checkers/cir/lifetime-basic.cpp new file mode 100644 index 000000000000..c65781190663 --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/checkers/cir/lifetime-basic.cpp @@ -0,0 +1,39 @@ +// RUN: %check_clang_tidy %s cir-lifetime-check %t \ +// RUN: --export-fixes=%t.yaml \ +// RUN: -config='{CheckOptions: \ +// RUN: [{key: cir-lifetime-check.RemarksList, value: "all"}, \ +// RUN: {key: cir-lifetime-check.HistLimit, value: "1"}, \ +// RUN: {key: cir-lifetime-check.CodeGenBuildDeferredThreshold, value: "500"}, \ +// RUN: {key: cir-lifetime-check.CodeGenSkipFunctionsFromSystemHeaders, value: "false"}, \ +// RUN: {key: cir-lifetime-check.HistoryList, value: "invalid;null"}]}' \ +// RUN: -- +// RUN: FileCheck -input-file=%t.yaml -check-prefix=CHECK-YAML %s + +int *p0() { + int *p = nullptr; + { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; // CHECK-MESSAGES: :[[@LINE]]:4: warning: use of invalid pointer 'p' + return p; +} + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'pset => { x }' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Remark + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'pset => { invalid }' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Remark + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'use of invalid pointer ''p''' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Notes: +// CHECK-YAML: - Message: 'pointee ''x'' invalidated at end of scope' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Warning \ No newline at end of file diff --git a/clang-tools-extra/test/clang-tidy/checkers/cir/lit.local.cfg b/clang-tools-extra/test/clang-tidy/checkers/cir/lit.local.cfg new file mode 100644 index 000000000000..e479c3e74cb6 --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/checkers/cir/lit.local.cfg @@ -0,0 +1,2 @@ +if not config.clang_enable_cir: + config.unsupported = True \ No newline at end of file diff --git a/clang-tools-extra/test/lit.site.cfg.py.in b/clang-tools-extra/test/lit.site.cfg.py.in index 4eb830a1baf1..0c3cf6d43f92 100644 --- a/clang-tools-extra/test/lit.site.cfg.py.in +++ b/clang-tools-extra/test/lit.site.cfg.py.in @@ -11,6 +11,7 @@ config.target_triple = "@LLVM_TARGET_TRIPLE@" config.host_triple = "@LLVM_HOST_TRIPLE@" config.clang_tidy_staticanalyzer = @CLANG_TIDY_ENABLE_STATIC_ANALYZER@ config.has_plugins = @CLANG_PLUGIN_SUPPORT@ & ~@LLVM_INSTALL_TOOLCHAIN_ONLY@ +config.clang_enable_cir = @CLANG_ENABLE_CIR@ # Support substitution of the tools and libs dirs with user parameters. This is # used when we can't determine the tool dir at configuration time. config.llvm_tools_dir = lit_config.substitute("@LLVM_TOOLS_DIR@") diff --git a/clang/asf b/clang/asf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def index 340b08dd7e2a..c19f80b1c8c2 100644 --- a/clang/include/clang/Basic/CodeGenOptions.def +++ b/clang/include/clang/Basic/CodeGenOptions.def @@ -449,6 +449,16 @@ CODEGENOPT(CtorDtorReturnThis, 1, 0) /// FIXME: Make DebugOptions its own top-level .def file. #include "DebugOptions.def" +/// ClangIR specific (internal): limits recursion depth for buildDeferred() +/// calls. This helps incremental progress while building large C++ TUs, once +/// CIRGen is mature we should probably remove it. +VALUE_CODEGENOPT(ClangIRBuildDeferredThreshold, 32, 500) + +/// ClangIR specific (internal): Only build deferred functions not coming from +/// system headers. This helps incremental progress while building large C++ +/// TUs, once CIRGen is mature we should probably remove it. +CODEGENOPT(ClangIRSkipFunctionsFromSystemHeaders, 1, 0) + #undef CODEGENOPT #undef ENUM_CODEGENOPT #undef VALUE_CODEGENOPT diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td index ed3fd9b1c4a5..29b4437046d6 100644 --- a/clang/include/clang/Basic/DiagnosticDriverKinds.td +++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td @@ -349,6 +349,8 @@ def err_drv_incompatible_omp_arch : Error<"OpenMP target architecture '%0' point def err_drv_omp_host_ir_file_not_found : Error< "provided host compiler IR file '%0' is required to generate code for OpenMP " "target regions but cannot be found">; +def err_drv_cir_pass_opt_parsing : Error< + "clangir pass option '%0' not recognized">; def err_drv_omp_host_target_not_supported : Error< "target '%0' is not a supported OpenMP host target">; def err_drv_expecting_fopenmp_with_fopenmp_targets : Error< diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index 8ef6700ecdc7..337734205c00 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -416,6 +416,8 @@ LANGOPT(RetainCommentsFromSystemHeaders, 1, 0, "retain documentation comments fr LANGOPT(APINotes, 1, 0, "use external API notes") LANGOPT(APINotesModules, 1, 0, "use module-based external API notes") +LANGOPT(CIRWarnings, 1, 0, "emit warnings with ClangIR") + LANGOPT(SanitizeAddressFieldPadding, 2, 0, "controls how aggressive is ASan " "field padding (0: none, 1:least " "aggressive, 2: more aggressive)") diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h new file mode 100644 index 000000000000..2dedb3b66385 --- /dev/null +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -0,0 +1,107 @@ +//===- CIRGenerator.h - CIR Generation from Clang AST ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares a simple interface to perform CIR generation from Clang +// AST +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CIRGENERATOR_H_ +#define CLANG_CIRGENERATOR_H_ + +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/Decl.h" +#include "clang/Basic/CodeGenOptions.h" + +#include "llvm/ADT/IntrusiveRefCntPtr.h" +#include "llvm/Support/ToolOutputFile.h" +#include "llvm/Support/VirtualFileSystem.h" + +#include + +namespace mlir { +class MLIRContext; +class ModuleOp; +class OwningModuleRef; +} // namespace mlir + +namespace clang { +class ASTContext; +class DeclGroupRef; +class FunctionDecl; +} // namespace clang + +namespace cir { +class CIRGenModule; +class CIRGenTypes; + +class CIRGenerator : public clang::ASTConsumer { + virtual void anchor(); + clang::DiagnosticsEngine &Diags; + clang::ASTContext *astCtx; + llvm::IntrusiveRefCntPtr + fs; // Only used for debug info. + + const clang::CodeGenOptions codeGenOpts; // Intentionally copied in. + + unsigned HandlingTopLevelDecls; + + /// Use this when emitting decls to block re-entrant decl emission. It will + /// emit all deferred decls on scope exit. Set EmitDeferred to false if decl + /// emission must be deferred longer, like at the end of a tag definition. + struct HandlingTopLevelDeclRAII { + CIRGenerator &Self; + bool EmitDeferred; + HandlingTopLevelDeclRAII(CIRGenerator &Self, bool EmitDeferred = true) + : Self{Self}, EmitDeferred{EmitDeferred} { + ++Self.HandlingTopLevelDecls; + } + ~HandlingTopLevelDeclRAII() { + unsigned Level = --Self.HandlingTopLevelDecls; + if (Level == 0 && EmitDeferred) + Self.buildDeferredDecls(); + } + }; + +protected: + std::unique_ptr mlirCtx; + std::unique_ptr CGM; + +private: + llvm::SmallVector DeferredInlineMemberFuncDefs; + +public: + CIRGenerator(clang::DiagnosticsEngine &diags, + llvm::IntrusiveRefCntPtr FS, + const clang::CodeGenOptions &CGO); + ~CIRGenerator(); + void Initialize(clang::ASTContext &Context) override; + bool EmitFunction(const clang::FunctionDecl *FD); + + bool HandleTopLevelDecl(clang::DeclGroupRef D) override; + void HandleTranslationUnit(clang::ASTContext &Ctx) override; + void HandleInlineFunctionDefinition(clang::FunctionDecl *D) override; + void HandleTagDeclDefinition(clang::TagDecl *D) override; + void HandleTagDeclRequiredDefinition(const clang::TagDecl *D) override; + void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *D) override; + void CompleteTentativeDefinition(clang::VarDecl *D) override; + + mlir::ModuleOp getModule(); + std::unique_ptr takeContext() { + return std::move(mlirCtx); + }; + + bool verifyModule(); + + void buildDeferredDecls(); + void buildDefaultMethods(); +}; + +} // namespace cir + +#endif // CLANG_CIRGENERATOR_H_ diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h new file mode 100644 index 000000000000..ed089cd966f4 --- /dev/null +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -0,0 +1,42 @@ +//====- CIRToCIRPasses.h- Lowering from CIR to LLVM -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares an interface for converting CIR modules to LLVM IR. +// +//===----------------------------------------------------------------------===// +#ifndef CLANG_CIR_CIRTOCIRPASSES_H +#define CLANG_CIR_CIRTOCIRPASSES_H + +#include "mlir/Pass/Pass.h" + +#include + +namespace clang { +class ASTContext; +} + +namespace mlir { +class MLIRContext; +class ModuleOp; +} // namespace mlir + +namespace cir { + +// Run set of cleanup/prepare/etc passes CIR <-> CIR. +mlir::LogicalResult +runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, + bool enableLifetime, llvm::StringRef lifetimeOpts, + bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, + std::string &passOptParsingFailure, bool flattenCIR); + +} // namespace cir + +#endif // CLANG_CIR_CIRTOCIRPASSES_H_ diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt index f8d6f407a03d..25497fc222d1 100644 --- a/clang/include/clang/CIR/CMakeLists.txt +++ b/clang/include/clang/CIR/CMakeLists.txt @@ -1,6 +1,8 @@ +set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) include_directories(${MLIR_INCLUDE_DIR}) include_directories(${MLIR_TABLEGEN_OUTPUT_DIR}) add_subdirectory(Dialect) +add_subdirectory(Interfaces) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h new file mode 100644 index 000000000000..a3df0ef0dcdc --- /dev/null +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -0,0 +1,353 @@ +//===-- CIRBaseBuilder.h - CIRBuilder implementation -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIRBASEBUILDER_H +#define LLVM_CLANG_LIB_CIRBASEBUILDER_H + +#include "clang/AST/Decl.h" +#include "clang/AST/Type.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/FPEnv.h" + +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Location.h" +#include "mlir/IR/Types.h" +#include "llvm/ADT/APSInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/FloatingPointMode.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include +#include + +namespace cir { + +class CIRBaseBuilderTy : public mlir::OpBuilder { + +public: + CIRBaseBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} + + mlir::Value getConstAPSInt(mlir::Location loc, const llvm::APSInt &val) { + auto ty = mlir::cir::IntType::get(getContext(), val.getBitWidth(), + val.isSigned()); + return create(loc, ty, + getAttr(ty, val)); + } + + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, + const llvm::APInt &val) { + return create(loc, typ, + getAttr(typ, val)); + } + + mlir::cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) { + return create(loc, attr.getType(), attr); + } + + mlir::cir::BoolType getBoolTy() { + return ::mlir::cir::BoolType::get(getContext()); + } + + mlir::cir::VoidType getVoidTy() { + return ::mlir::cir::VoidType::get(getContext()); + } + + mlir::cir::PointerType getPointerTo(mlir::Type ty, + unsigned addressSpace = 0) { + assert(!addressSpace && "address space is NYI"); + return mlir::cir::PointerType::get(getContext(), ty); + } + + mlir::cir::PointerType getVoidPtrTy(unsigned addressSpace = 0) { + return getPointerTo(::mlir::cir::VoidType::get(getContext()), addressSpace); + } + + mlir::Value createNot(mlir::Value value) { + return create(value.getLoc(), value.getType(), + mlir::cir::UnaryOpKind::Not, value); + } + + mlir::cir::CmpOp createCompare(mlir::Location loc, mlir::cir::CmpOpKind kind, + mlir::Value lhs, mlir::Value rhs) { + return create(loc, getBoolTy(), kind, lhs, rhs); + } + + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + const llvm::APInt &rhs) { + return create( + lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + } + + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + mlir::Value rhs) { + return create(lhs.getLoc(), lhs.getType(), kind, lhs, + rhs); + } + + mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, + bool isShiftLeft) { + return create( + lhs.getLoc(), lhs.getType(), lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); + } + + mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { + auto width = lhs.getType().dyn_cast().getWidth(); + auto shift = llvm::APInt(width, bits); + return createShift(lhs, shift, isShiftLeft); + } + + mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, true); + } + + mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, false); + } + + mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, + unsigned bits) { + auto val = llvm::APInt::getLowBitsSet(size, bits); + auto typ = mlir::cir::IntType::get(getContext(), size, false); + return getConstAPInt(loc, typ, val); + } + + mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::And, val); + } + + mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); + } + + mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::Or, val); + } + + mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); + } + + mlir::Value createMul(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::Mul, rhs); + } + + mlir::Value createMul(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::Mul, val); + } + + mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, + mlir::Value dst, bool _volatile = false, + ::mlir::cir::MemOrderAttr order = {}) { + return create(loc, val, dst, _volatile, order); + } + + mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + mlir::IntegerAttr alignment, + mlir::Value dynAllocSize) { + return create(loc, addrType, type, name, alignment, + dynAllocSize); + } + + mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + clang::CharUnits alignment, + mlir::Value dynAllocSize) { + auto alignmentIntAttr = getSizeFromCharUnits(getContext(), alignment); + return createAlloca(loc, addrType, type, name, alignmentIntAttr, + dynAllocSize); + } + + mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + mlir::IntegerAttr alignment) { + return create(loc, addrType, type, name, alignment); + } + + mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + clang::CharUnits alignment) { + auto alignmentIntAttr = getSizeFromCharUnits(getContext(), alignment); + return createAlloca(loc, addrType, type, name, alignmentIntAttr); + } + + mlir::Value createSub(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, + bool hasNSW = false) { + auto op = create(lhs.getLoc(), lhs.getType(), + mlir::cir::BinOpKind::Sub, lhs, rhs); + if (hasNUW) + op.setNoUnsignedWrap(true); + if (hasNSW) + op.setNoSignedWrap(true); + return op; + } + + mlir::Value createNSWSub(mlir::Value lhs, mlir::Value rhs) { + return createSub(lhs, rhs, false, true); + } + + //===--------------------------------------------------------------------===// + // Cast/Conversion Operators + //===--------------------------------------------------------------------===// + + mlir::Value createCast(mlir::Location loc, mlir::cir::CastKind kind, + mlir::Value src, mlir::Type newTy) { + if (newTy == src.getType()) + return src; + return create(loc, newTy, kind, src); + } + + mlir::Value createCast(mlir::cir::CastKind kind, mlir::Value src, + mlir::Type newTy) { + if (newTy == src.getType()) + return src; + return createCast(src.getLoc(), kind, src, newTy); + } + + mlir::Value createIntCast(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::integral, src, newTy); + } + + mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::int_to_ptr, src, newTy); + } + + mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::ptr_to_int, src, newTy); + } + + mlir::Value createPtrToBoolCast(mlir::Value v) { + return createCast(mlir::cir::CastKind::ptr_to_bool, v, getBoolTy()); + } + + // TODO(cir): the following function was introduced to keep in sync with LLVM + // codegen. CIR does not have "zext" operations. It should eventually be + // renamed or removed. For now, we just add whatever cast is required here. + mlir::Value createZExtOrBitCast(mlir::Location loc, mlir::Value src, + mlir::Type newTy) { + auto srcTy = src.getType(); + + if (srcTy == newTy) + return src; + + if (srcTy.isa() && newTy.isa()) + return createBoolToInt(src, newTy); + + llvm_unreachable("unhandled extension cast"); + } + + mlir::Value createBoolToInt(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::bool_to_int, src, newTy); + } + + mlir::Value createBitcast(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::bitcast, src, newTy); + } + + mlir::Value createBitcast(mlir::Location loc, mlir::Value src, + mlir::Type newTy) { + return createCast(loc, mlir::cir::CastKind::bitcast, src, newTy); + } + + mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy) { + assert(src.getType().isa() && "expected ptr src"); + return createBitcast(src, getPointerTo(newPointeeTy)); + } + + mlir::Value createPtrIsNull(mlir::Value ptr) { + return createNot(createPtrToBoolCast(ptr)); + } + + // + // Block handling helpers + // ---------------------- + // + OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { + auto lastAlloca = + std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { + return mlir::isa(&op); + }); + + if (lastAlloca != block->rend()) + return OpBuilder::InsertPoint(block, + ++mlir::Block::iterator(&*lastAlloca)); + return OpBuilder::InsertPoint(block, block->begin()); + }; + + mlir::IntegerAttr getSizeFromCharUnits(mlir::MLIRContext *ctx, + clang::CharUnits size) { + // Note that mlir::IntegerType is used instead of mlir::cir::IntType here + // because we don't need sign information for this to be useful, so keep + // it simple. + return mlir::IntegerAttr::get(mlir::IntegerType::get(ctx, 64), + size.getQuantity()); + } + + /// Create a do-while operation. + mlir::cir::DoWhileOp createDoWhile( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder) { + return create(loc, condBuilder, bodyBuilder); + } + + /// Create a while operation. + mlir::cir::WhileOp createWhile( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder) { + return create(loc, condBuilder, bodyBuilder); + } + + /// Create a for operation. + mlir::cir::ForOp createFor( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder, + llvm::function_ref stepBuilder) { + return create(loc, condBuilder, bodyBuilder, stepBuilder); + } + + mlir::TypedAttr getConstPtrAttr(mlir::Type t, uint64_t v) { + assert(t.isa() && "expected cir.ptr"); + return mlir::cir::ConstPtrAttr::get(getContext(), t, v); + } + + // Creates constant nullptr for pointer type ty. + mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { + return create(loc, ty, getConstPtrAttr(ty, 0)); + } + + /// Create a loop condition. + mlir::cir::ConditionOp createCondition(mlir::Value condition) { + return create(condition.getLoc(), condition); + } + + /// Create a yield operation. + mlir::cir::YieldOp createYield(mlir::Location loc, + mlir::ValueRange value = {}) { + return create(loc, value); + } +}; + +} // namespace cir +#endif diff --git a/clang/include/clang/CIR/Dialect/CMakeLists.txt b/clang/include/clang/CIR/Dialect/CMakeLists.txt index f33061b2d87c..cd837615e82f 100644 --- a/clang/include/clang/CIR/Dialect/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/CMakeLists.txt @@ -1 +1,28 @@ +add_custom_target(clang-cir-doc) + +# This replicates part of the add_mlir_doc cmake function from MLIR that cannot +# be used here. This happens because it expects to be run inside MLIR directory +# which is not the case for CIR (and also FIR, both have similar workarounds). +function(add_clang_mlir_doc doc_filename output_file output_directory command) + set(LLVM_TARGET_DEFINITIONS ${doc_filename}.td) + tablegen(MLIR ${output_file}.md ${command} ${ARGN} "-I${MLIR_MAIN_SRC_DIR}" "-I${MLIR_INCLUDE_DIR}") + set(GEN_DOC_FILE ${CLANG_BINARY_DIR}/docs/${output_directory}${output_file}.md) + add_custom_command( + OUTPUT ${GEN_DOC_FILE} + COMMAND ${CMAKE_COMMAND} -E copy + ${CMAKE_CURRENT_BINARY_DIR}/${output_file}.md + ${GEN_DOC_FILE} + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${output_file}.md) + add_custom_target(${output_file}DocGen DEPENDS ${GEN_DOC_FILE}) + add_dependencies(clang-cir-doc ${output_file}DocGen) +endfunction() + add_subdirectory(IR) + +set(LLVM_TARGET_DEFINITIONS Passes.td) +mlir_tablegen(Passes.h.inc -gen-pass-decls -name CIR) +mlir_tablegen(Passes.capi.h.inc -gen-pass-capi-header --prefix CIR) +mlir_tablegen(Passes.capi.cpp.inc -gen-pass-capi-impl --prefix CIR) +add_public_tablegen_target(MLIRCIRPassIncGen) + +add_clang_mlir_doc(Passes CIRPasses ./ -gen-pass-doc) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrDefs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrDefs.td new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h new file mode 100644 index 000000000000..5961f77629b5 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -0,0 +1,49 @@ +//===- CIRAttrs.h - MLIR CIR Attrs ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the attributes in the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_IR_CIRATTRS_H_ +#define MLIR_DIALECT_CIR_IR_CIRATTRS_H_ + +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" + +#include "llvm/ADT/SmallVector.h" + +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" + +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" + +//===----------------------------------------------------------------------===// +// CIR Dialect Attrs +//===----------------------------------------------------------------------===// + +namespace clang { +class FunctionDecl; +class VarDecl; +class RecordDecl; +} // namespace clang + +namespace mlir { +namespace cir { +class ArrayType; +class StructType; +class BoolType; +} // namespace cir +} // namespace mlir + +#define GET_ATTRDEF_CLASSES +#include "clang/CIR/Dialect/IR/CIROpsAttributes.h.inc" + +#endif // MLIR_DIALECT_CIR_IR_CIRATTRS_H_ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td new file mode 100644 index 000000000000..136929f84995 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -0,0 +1,806 @@ +//===- CIRAttrs.td - CIR dialect types ---------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the CIR dialect attributes. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_DIALECT_CIR_ATTRS +#define MLIR_CIR_DIALECT_CIR_ATTRS + +include "mlir/IR/BuiltinAttributeInterfaces.td" +include "mlir/IR/EnumAttr.td" + +include "clang/CIR/Dialect/IR/CIRDialect.td" + +include "clang/CIR/Interfaces/ASTAttrInterfaces.td" + +//===----------------------------------------------------------------------===// +// CIR Attrs +//===----------------------------------------------------------------------===// + +class CIR_Attr traits = []> + : AttrDef { + let mnemonic = attrMnemonic; +} + +class CIRUnitAttr traits = []> + : CIR_Attr { + let returnType = "bool"; + let defaultValue = "false"; + let valueType = NoneType; + let isOptional = 1; +} + +//===----------------------------------------------------------------------===// +// LangAttr +//===----------------------------------------------------------------------===// + +def C : I32EnumAttrCase<"C", 1, "c">; +def CXX : I32EnumAttrCase<"CXX", 2, "cxx">; + +def SourceLanguage : I32EnumAttr<"SourceLanguage", "Source language", [ + C, CXX +]> { + let cppNamespace = "::mlir::cir"; + let genSpecializedAttr = 0; +} + +def LangAttr : CIR_Attr<"Lang", "lang"> { + let summary = "Module source language"; + let parameters = (ins SourceLanguage:$lang); + let description = [{ + Represents the source language used to generate the module. + + Example: + ``` + // Module compiled from C. + module attributes {cir.lang = cir.lang} {} + // Module compiled from C++. + module attributes {cir.lang = cir.lang} {} + ``` + }]; + let hasCustomAssemblyFormat = 1; + let extraClassDeclaration = [{ + bool isC() const { return getLang() == SourceLanguage::C; }; + bool isCXX() const { return getLang() == SourceLanguage::CXX; }; + }]; +} + +//===----------------------------------------------------------------------===// +// BoolAttr +//===----------------------------------------------------------------------===// + +def CIR_BoolAttr : CIR_Attr<"Bool", "bool", [TypedAttrInterface]> { + let summary = "Represent true/false for !cir.bool types"; + let description = [{ + The BoolAttr represents a 'true' or 'false' value. + }]; + + let parameters = (ins AttributeSelfTypeParameter< + "", "mlir::cir::BoolType">:$type, + "bool":$value); + + let assemblyFormat = [{ + `<` $value `>` + }]; +} + +//===----------------------------------------------------------------------===// +// ZeroAttr +//===----------------------------------------------------------------------===// + +def ZeroAttr : CIR_Attr<"Zero", "zero", [TypedAttrInterface]> { + let summary = "Attribute to represent zero initialization"; + let description = [{ + The ZeroAttr is used to indicate zero initialization on structs. + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type); + let assemblyFormat = [{}]; +} + +//===----------------------------------------------------------------------===// +// ConstArrayAttr +//===----------------------------------------------------------------------===// + +def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> { + let summary = "A constant array from ArrayAttr or StringRefAttr"; + let description = [{ + An CIR array attribute is an array of literals of the specified attr types. + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "Attribute":$elts, + "int":$trailingZerosNum); + + // Define a custom builder for the type; that removes the need to pass + // in an MLIRContext instance, as it can be infered from the `type`. + let builders = [ + AttrBuilderWithInferredContext<(ins "mlir::cir::ArrayType":$type, + "Attribute":$elts), [{ + int zeros = 0; + auto typeSize = type.cast().getSize(); + if (auto str = elts.dyn_cast()) + zeros = typeSize - str.size(); + else + zeros = typeSize - elts.cast().size(); + + return $_get(type.getContext(), type, elts, zeros); + }]> + ]; + + // Printing and parsing available in CIRDialect.cpp + let hasCustomAssemblyFormat = 1; + + // Enable verifier. + let genVerifyDecl = 1; + + let extraClassDeclaration = [{ + bool hasTrailingZeros() const { return getTrailingZerosNum() != 0; }; + }]; +} + +//===----------------------------------------------------------------------===// +// ConstStructAttr +//===----------------------------------------------------------------------===// + +def ConstStructAttr : CIR_Attr<"ConstStruct", "const_struct", + [TypedAttrInterface]> { + let summary = "Represents a constant struct"; + let description = [{ + Effectively supports "struct-like" constants. It's must be built from + an `mlir::ArrayAttr `instance where each elements is a typed attribute + (`mlir::TypedAttribute`). + + Example: + ``` + cir.global external @rgb2 = #cir.const_struct<{0 : i8, + 5 : i64, #cir.null : !cir.ptr + }> : !cir.struct<"", i8, i64, !cir.ptr> + ``` + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "ArrayAttr":$members); + + let builders = [ + AttrBuilderWithInferredContext<(ins "mlir::cir::StructType":$type, + "ArrayAttr":$members), [{ + return $_get(type.getContext(), type, members); + }]> + ]; + + let assemblyFormat = [{ + `<` custom($members) `>` + }]; + + let genVerifyDecl = 1; +} + +//===----------------------------------------------------------------------===// +// IntegerAttr +//===----------------------------------------------------------------------===// + +def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { + let summary = "An Attribute containing a integer value"; + let description = [{ + An integer attribute is a literal attribute that represents an integral + value of the specified integer type. + }]; + let parameters = (ins AttributeSelfTypeParameter<"">:$type, "APInt":$value); + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "const APInt &":$value), [{ + return $_get(type.getContext(), type, value); + }]>, + AttrBuilderWithInferredContext<(ins "Type":$type, "int64_t":$value), [{ + IntType intType = type.cast(); + mlir::APInt apValue(intType.getWidth(), value, intType.isSigned()); + return $_get(intType.getContext(), intType, apValue); + }]>, + ]; + let extraClassDeclaration = [{ + int64_t getSInt() const { return getValue().getSExtValue(); } + uint64_t getUInt() const { return getValue().getZExtValue(); } + bool isNullValue() const { return getValue() == 0; } + uint64_t getBitWidth() const { return getType().cast().getWidth(); } + }]; + let genVerifyDecl = 1; + let hasCustomAssemblyFormat = 1; +} + +//===----------------------------------------------------------------------===// +// FPAttr +//===----------------------------------------------------------------------===// + +def FPAttr : CIR_Attr<"FP", "fp", [TypedAttrInterface]> { + let summary = "An attribute containing a floating-point value"; + let description = [{ + An fp attribute is a literal attribute that represents a floating-point + value of the specified floating-point type. Supporting only CIR FP types. + }]; + let parameters = (ins + AttributeSelfTypeParameter<"", "::mlir::cir::CIRFPTypeInterface">:$type, + APFloatParameter<"">:$value + ); + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "const APFloat &":$value), [{ + return $_get(type.getContext(), type.cast(), value); + }]>, + AttrBuilder<(ins "Type":$type, + "const APFloat &":$value), [{ + return $_get($_ctxt, type.cast(), value); + }]>, + ]; + let extraClassDeclaration = [{ + static FPAttr getZero(mlir::Type type); + }]; + let genVerifyDecl = 1; + + let assemblyFormat = [{ + `<` custom($value, ref($type)) `>` + }]; +} + +//===----------------------------------------------------------------------===// +// ConstPointerAttr +//===----------------------------------------------------------------------===// + +def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { + let summary = "Holds a constant pointer value"; + let parameters = (ins AttributeSelfTypeParameter<"">:$type, "uint64_t":$value); + let description = [{ + A pointer attribute is a literal attribute that represents an integral + value of a pointer type. + }]; + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, "uint64_t":$value), [{ + return $_get(type.getContext(), type, value); + }]>, + ]; + let extraClassDeclaration = [{ + bool isNullValue() const { return getValue() == 0; } + }]; + let hasCustomAssemblyFormat = 1; +} + +//===----------------------------------------------------------------------===// +// CmpThreeWayInfoAttr +//===----------------------------------------------------------------------===// + +def CmpOrdering_Strong : I32EnumAttrCase<"Strong", 1, "strong">; +def CmpOrdering_Partial : I32EnumAttrCase<"Partial", 2, "partial">; + +def CmpOrdering : I32EnumAttr< + "CmpOrdering", "three-way comparison ordering kind", + [CmpOrdering_Strong, CmpOrdering_Partial] +> { + let cppNamespace = "::mlir::cir"; +} + +def CmpThreeWayInfoAttr : CIR_Attr<"CmpThreeWayInfo", "cmp3way_info"> { + let summary = "Holds information about a three-way comparison operation"; + let description = [{ + The `#cmp3way_info` attribute contains information about a three-way + comparison operation `cir.cmp3way`. + + The `ordering` parameter gives the ordering kind of the three-way comparison + operation. It may be either strong ordering or partial ordering. + + Given the two input operands of the three-way comparison operation `lhs` and + `rhs`, the `lt`, `eq`, `gt`, and `unordered` parameters gives the result + value that should be produced by the three-way comparison operation when the + ordering between `lhs` and `rhs` is `lhs < rhs`, `lhs == rhs`, `lhs > rhs`, + or neither, respectively. + }]; + + let parameters = (ins "CmpOrdering":$ordering, "int64_t":$lt, "int64_t":$eq, + "int64_t":$gt, + OptionalParameter<"std::optional">:$unordered); + + let builders = [ + AttrBuilder<(ins "int64_t":$lt, "int64_t":$eq, "int64_t":$gt), [{ + return $_get($_ctxt, CmpOrdering::Strong, lt, eq, gt, std::nullopt); + }]>, + AttrBuilder<(ins "int64_t":$lt, "int64_t":$eq, "int64_t":$gt, + "int64_t":$unordered), [{ + return $_get($_ctxt, CmpOrdering::Partial, lt, eq, gt, unordered); + }]>, + ]; + + let extraClassDeclaration = [{ + /// Get attribute alias name for this attribute. + std::string getAlias() const; + }]; + + let assemblyFormat = [{ + `<` + $ordering `,` + `lt` `=` $lt `,` + `eq` `=` $eq `,` + `gt` `=` $gt + (`,` `unordered` `=` $unordered^)? + `>` + }]; + + let genVerifyDecl = 1; +} + +//===----------------------------------------------------------------------===// +// DataMemberAttr +//===----------------------------------------------------------------------===// + +def DataMemberAttr : CIR_Attr<"DataMember", "data_member", + [TypedAttrInterface]> { + let summary = "Holds a constant data member pointer value"; + let parameters = (ins AttributeSelfTypeParameter< + "", "mlir::cir::DataMemberType">:$type, + OptionalParameter< + "std::optional">:$memberIndex); + let description = [{ + A data member attribute is a literal attribute that represents a constant + pointer-to-data-member value. + + The `memberIndex` parameter represents the index of the pointed-to member + within its containing struct. It is an optional parameter; lack of this + parameter indicates a null pointer-to-data-member value. + + Example: + ``` + #ptr = #cir.data_member<1> : !cir.data_member + + #null = #cir.data_member : !cir.data_member + ``` + }]; + + let genVerifyDecl = 1; + + let assemblyFormat = [{ + `<` ($memberIndex^):(`null`)? `>` + }]; +} + +//===----------------------------------------------------------------------===// +// SignedOverflowBehaviorAttr +//===----------------------------------------------------------------------===// + +def SignedOverflowBehaviorAttr : AttrDef { + let mnemonic = "signed_overflow_behavior"; + let parameters = (ins + "sob::SignedOverflowBehavior":$behavior + ); + let hasCustomAssemblyFormat = 1; +} + +//===----------------------------------------------------------------------===// +// GlobalViewAttr +//===----------------------------------------------------------------------===// + +def GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [TypedAttrInterface]> { + let summary = "Provides constant access to a global address"; + let description = [{ + Get constant address of global `symbol` and optionally apply offsets to + access existing subelements. It provides a way to access globals from other + global and always produces a pointer. + + The type of the input symbol can be different from `#cir.global_view` + output type, since a given view of the global might require a static + cast for initializing other globals. + + A list of indices can be optionally passed and each element subsequently + indexes underlying types. For `symbol` types like `!cir.array` + and `!cir.struct`, it leads to the constant address of sub-elements, while + for `!cir.ptr`, an offset is applied. The first index is relative to the + original symbol type, not the produced one. + + Example: + + ``` + cir.global external @s = @".str2": !cir.ptr + cir.global external @x = #cir.global_view<@s> : !cir.ptr + + cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> + cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr + cir.global external @table_of_ptrs = #cir.const_array<[#cir.global_view<@rgb, [1]> : !cir.ptr] : !cir.array x 1>> + ``` + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "FlatSymbolRefAttr":$symbol, + OptionalParameter<"ArrayAttr">:$indices); + + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "FlatSymbolRefAttr":$symbol, + CArg<"ArrayAttr", "{}">:$indices), [{ + return $_get(type.getContext(), type, symbol, indices); + }]> + ]; + + // let genVerifyDecl = 1; + let assemblyFormat = [{ + `<` + $symbol + (`,` $indices^)? + `>` + }]; +} + +//===----------------------------------------------------------------------===// +// TypeInfoAttr +//===----------------------------------------------------------------------===// + +def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { + let summary = "Represents a typeinfo used for RTTI"; + let description = [{ + The typeinfo data for a given class is stored into an ArrayAttr. The + layout is determined by the C++ ABI used (clang only implements + itanium on CIRGen). + + The verifier enforces that the output type is always a `!cir.struct`, + and that the ArrayAttr element types match the equivalent member type + for the resulting struct, i.e, a GlobalViewAttr for symbol reference or + an IntAttr for flags. + + Example: + + ``` + cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr + + cir.global external @type_info_B = #cir.typeinfo<< + {#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr} + >> : !cir.struct<"", !cir.ptr> + ``` + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "mlir::ArrayAttr":$data); + + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "mlir::ArrayAttr":$data), [{ + return $_get(type.getContext(), type, data); + }]> + ]; + + // Checks struct element types should match the array for every equivalent + // element type. + let genVerifyDecl = 1; + let assemblyFormat = [{ + `<` custom($data) `>` + }]; +} + +//===----------------------------------------------------------------------===// +// VTableAttr +//===----------------------------------------------------------------------===// + +def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { + let summary = "Represents a C++ vtable"; + let description = [{ + Wraps a #cir.const_struct containing vtable data. + + Example: + ``` + cir.global linkonce_odr @_ZTV1B = #cir.vtable<< + {#cir.const_array<[#cir.null : !cir.ptr, + #cir.global_view<@_ZTI1B> : !cir.ptr, + #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, + #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, + #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> + : !cir.array x 5>}>> + : !cir.struct<"", !cir.array x 5>> + ``` + }]; + + // `vtable_data` is const struct with one element, containing an array of + // vtable information. + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "ArrayAttr":$vtable_data); + + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "ArrayAttr":$vtable_data), [{ + return $_get(type.getContext(), type, vtable_data); + }]> + ]; + + let genVerifyDecl = 1; + let assemblyFormat = [{ + `<` custom($vtable_data) `>` + }]; +} + +//===----------------------------------------------------------------------===// +// StructLayoutAttr +//===----------------------------------------------------------------------===// + +// Used to decouple layout information from the struct type. StructType's +// uses this attribute to cache that information. + +def StructLayoutAttr : CIR_Attr<"StructLayout", "struct_layout"> { + let summary = "ABI specific information about a struct layout"; + let description = [{ + Holds layout information often queried by !cir.struct users + during lowering passes and optimizations. + }]; + + let parameters = (ins "unsigned":$size, + "unsigned":$alignment, + "bool":$padded, + "mlir::Type":$largest_member, + "mlir::ArrayAttr":$offsets); + + let builders = [ + AttrBuilderWithInferredContext<(ins "unsigned":$size, + "unsigned":$alignment, + "bool":$padded, + "mlir::Type":$largest_member, + "mlir::ArrayAttr":$offsets), [{ + return $_get(largest_member.getContext(), size, alignment, padded, + largest_member, offsets); + }]>, + ]; + + let genVerifyDecl = 1; + let assemblyFormat = [{ + `<` + struct($size, $alignment, $padded, $largest_member, $offsets) + `>` + }]; +} + +//===----------------------------------------------------------------------===// +// DynamicCastInfoAttr +//===----------------------------------------------------------------------===// + +def DynamicCastInfoAttr + : CIR_Attr<"DynamicCastInfo", "dyn_cast_info"> { + let summary = "ABI specific information about a dynamic cast"; + let description = [{ + Provide ABI specific information about a dynamic cast operation. + + The `srcRtti` and the `destRtti` parameters give the RTTI of the source + struct type and the destination struct type, respectively. + + The `runtimeFunc` parameter gives the `__dynamic_cast` function which is + provided by the runtime. The `badCastFunc` parameter gives the + `__cxa_bad_cast` function which is also provided by the runtime. + + The `offsetHint` parameter gives the hint value that should be passed to the + `__dynamic_cast` runtime function. + }]; + + let parameters = (ins GlobalViewAttr:$srcRtti, + GlobalViewAttr:$destRtti, + "FlatSymbolRefAttr":$runtimeFunc, + "FlatSymbolRefAttr":$badCastFunc, + IntAttr:$offsetHint); + + let builders = [ + AttrBuilderWithInferredContext<(ins "GlobalViewAttr":$srcRtti, + "GlobalViewAttr":$destRtti, + "FlatSymbolRefAttr":$runtimeFunc, + "FlatSymbolRefAttr":$badCastFunc, + "IntAttr":$offsetHint), [{ + return $_get(srcRtti.getContext(), srcRtti, destRtti, runtimeFunc, + badCastFunc, offsetHint); + }]>, + ]; + + let genVerifyDecl = 1; + let assemblyFormat = [{ + `<` + qualified($srcRtti) `,` qualified($destRtti) `,` + $runtimeFunc `,` $badCastFunc `,` qualified($offsetHint) + `>` + }]; + + let extraClassDeclaration = [{ + /// Get attribute alias name for this attribute. + std::string getAlias() const; + }]; +} + +//===----------------------------------------------------------------------===// +// AST Wrappers +//===----------------------------------------------------------------------===// + +class AST traits = []> + : CIR_Attr { + string clang_name = !strconcat("const clang::", name, " *"); + + let summary = !strconcat("Wraps a '", clang_name, "' AST node."); + let description = [{ + Operations optionally refer to this node, they could be available depending + on the CIR lowering stage. Whether it's attached to the appropriated + CIR operation is delegated to the operation verifier. + + This always implies a non-null AST reference (verified). + }]; + let parameters = (ins clang_name:$ast); + + // Printing and parsing available in CIRDialect.cpp + let hasCustomAssemblyFormat = 1; + + // Enable verifier. + let genVerifyDecl = 1; + + let extraClassDefinition = [{ + ::mlir::Attribute $cppClass::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + // We cannot really parse anything AST related at this point + // since we have no serialization/JSON story. + return $cppClass::get(parser.getContext(), nullptr); + } + + void $cppClass::print(::mlir::AsmPrinter &printer) const { + // Nothing to print besides the mnemonics. + } + + LogicalResult $cppClass::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + }] # clang_name # [{ decl) { + return success(); + } + }]; +} + +def ASTDeclAttr : AST<"Decl", "decl", [ASTDeclInterface]>; + +def ASTFunctionDeclAttr : AST<"FunctionDecl", "function.decl", + [ASTFunctionDeclInterface]>; + +def ASTCXXMethodDeclAttr : AST<"CXXMethodDecl", "cxxmethod.decl", + [ASTCXXMethodDeclInterface]>; + +def ASTCXXConstructorDeclAttr : AST<"CXXConstructorDecl", + "cxxconstructor.decl", [ASTCXXConstructorDeclInterface]>; + +def ASTCXXConversionDeclAttr : AST<"CXXConversionDecl", + "cxxconversion.decl", [ASTCXXConversionDeclInterface]>; + +def ASTCXXDestructorDeclAttr : AST<"CXXDestructorDecl", + "cxxdestructor.decl", [ASTCXXDestructorDeclInterface]>; + +def ASTVarDeclAttr : AST<"VarDecl", "var.decl", + [ASTVarDeclInterface]>; + +def ASTTypeDeclAttr: AST<"TypeDecl", "type.decl", + [ASTTypeDeclInterface]>; + +def ASTTagDeclAttr : AST<"TagDecl", "tag.decl", + [ASTTagDeclInterface]>; + +def ASTRecordDeclAttr : AST<"RecordDecl", "record.decl", + [ASTRecordDeclInterface]>; + +def ASTExprAttr : AST<"Expr", "expr", + [ASTExprInterface]>; + +def ASTCallExprAttr : AST<"CallExpr", "call.expr", + [ASTCallExprInterface]>; + +//===----------------------------------------------------------------------===// +// ExtraFuncAttr +//===----------------------------------------------------------------------===// + +def ExtraFuncAttr : CIR_Attr<"ExtraFuncAttributes", "extra"> { + let summary = "Represents aggregated attributes for a function"; + let description = [{ + This is a wrapper of dictionary attrbiute that contains extra attributes of + a function. + }]; + + let parameters = (ins "DictionaryAttr":$elements); + + let assemblyFormat = [{ `(` $elements `)` }]; + + // Printing and parsing also available in CIRDialect.cpp +} + + +def NoInline : I32EnumAttrCase<"NoInline", 1, "no">; +def AlwaysInline : I32EnumAttrCase<"AlwaysInline", 2, "always">; +def InlineHint : I32EnumAttrCase<"InlineHint", 3, "hint">; + +def InlineKind : I32EnumAttr<"InlineKind", "inlineKind", [ + NoInline, AlwaysInline, InlineHint +]> { + let cppNamespace = "::mlir::cir"; +} + +def InlineAttr : CIR_Attr<"Inline", "inline"> { + let summary = "Inline attribute"; + let description = [{ + Inline attributes represents user directives. + }]; + + let parameters = (ins "InlineKind":$value); + + let assemblyFormat = [{ + `<` $value `>` + }]; + + let extraClassDeclaration = [{ + bool isNoInline() const { return getValue() == InlineKind::NoInline; }; + bool isAlwaysInline() const { return getValue() == InlineKind::AlwaysInline; }; + bool isInlineHint() const { return getValue() == InlineKind::InlineHint; }; + }]; +} + +def OptNoneAttr : CIRUnitAttr<"OptNone", "optnone"> { + let storageType = [{ OptNoneAttr }]; +} + +def NoThrowAttr : CIRUnitAttr<"NoThrow", "nothrow"> { + let storageType = [{ NoThrowAttr }]; +} + +class CIR_GlobalCtorDtor + : CIR_Attr<"Global" # name, "global_" # attrMnemonic> { + let summary = sum; + let description = desc; + + let parameters = (ins "StringAttr":$name, "int":$priority); + let assemblyFormat = [{ + `<` + $name `,` $priority + `>` + }]; + let builders = [ + AttrBuilder<(ins "StringRef":$name, + CArg<"int", "65536">:$priority), [{ + return $_get($_ctxt, StringAttr::get($_ctxt, name), priority); + }]> + ]; + let extraClassDeclaration = [{ + bool isDefaultPriority() const { return getPriority() == 65536; }; + }]; + let skipDefaultBuilders = 1; +} + +def GlobalCtorAttr : CIR_GlobalCtorDtor<"Ctor", "ctor", + "Marks a function as a global constructor", + "A function with this attribute executes before main()" +>; +def GlobalDtorAttr : CIR_GlobalCtorDtor<"Dtor", "dtor", + "Marks a function as a global destructor", + "A function with this attribute excutes before module unloading" +>; + +def BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> { + let summary = "Represents a bit field info"; + let description = [{ + Holds the next information about bitfields: name, storage type, a bitfield size + and position in the storage, if the bitfield is signed or not. + }]; + let parameters = (ins "StringAttr":$name, + "Type":$storage_type, + "uint64_t":$size, + "uint64_t":$offset, + "bool":$is_signed); + + let assemblyFormat = "`<` struct($name, $storage_type, $size, $offset, $is_signed) `>`"; + + let builders = [ + AttrBuilder<(ins "StringRef":$name, + "Type":$storage_type, + "uint64_t":$size, + "uint64_t":$offset, + "bool":$is_signed + ), [{ + return $_get($_ctxt, StringAttr::get($_ctxt, name), storage_type, size, offset, is_signed); + }]> + ]; +} + +#endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index d53e5d1663d6..e78471035cf1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -13,4 +13,85 @@ #ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H #define LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Dialect.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/Interfaces/CallInterfaces.h" +#include "mlir/Interfaces/ControlFlowInterfaces.h" +#include "mlir/Interfaces/FunctionInterfaces.h" +#include "mlir/Interfaces/InferTypeOpInterface.h" +#include "mlir/Interfaces/LoopLikeInterface.h" +#include "mlir/Interfaces/SideEffectInterfaces.h" + +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIROpsDialect.h.inc" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIROpsStructs.h.inc" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h" +#include "clang/CIR/Interfaces/CIROpInterfaces.h" + +namespace mlir { +namespace OpTrait { + +namespace impl { +// These functions are out-of-line implementations of the methods in the +// corresponding trait classes. This avoids them being template +// instantiated/duplicated. +LogicalResult verifySameFirstOperandAndResultType(Operation *op); +LogicalResult verifySameSecondOperandAndResultType(Operation *op); +LogicalResult verifySameFirstSecondOperandAndResultType(Operation *op); +} // namespace impl + +/// This class provides verification for ops that are known to have the same +/// first operand and result type. +/// +template +class SameFirstOperandAndResultType + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameFirstOperandAndResultType(op); + } +}; + +/// This class provides verification for ops that are known to have the same +/// second operand and result type. +/// +template +class SameSecondOperandAndResultType + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameSecondOperandAndResultType(op); + } +}; + +/// This class provides verification for ops that are known to have the same +/// first, second operand and result type. +/// +template +class SameFirstSecondOperandAndResultType + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameFirstSecondOperandAndResultType(op); + } +}; + +} // namespace OpTrait + +namespace cir { +void buildTerminatedBody(OpBuilder &builder, Location loc); +} // namespace cir + +} // namespace mlir + +#define GET_OP_CLASSES +#include "clang/CIR/Dialect/IR/CIROps.h.inc" + #endif // LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7311c8db783e..e65416068964 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -15,5 +15,3796 @@ #define LLVM_CLANG_CIR_DIALECT_IR_CIROPS include "clang/CIR/Dialect/IR/CIRDialect.td" +include "clang/CIR/Dialect/IR/CIRTypes.td" +include "clang/CIR/Dialect/IR/CIRAttrs.td" + +include "clang/CIR/Interfaces/ASTAttrInterfaces.td" +include "clang/CIR/Interfaces/CIROpInterfaces.td" +include "clang/CIR/Interfaces/CIRLoopOpInterface.td" + +include "mlir/Interfaces/ControlFlowInterfaces.td" +include "mlir/Interfaces/FunctionInterfaces.td" +include "mlir/Interfaces/InferTypeOpInterface.td" +include "mlir/Interfaces/LoopLikeInterface.td" +include "mlir/Interfaces/SideEffectInterfaces.td" + +include "mlir/IR/BuiltinAttributeInterfaces.td" +include "mlir/IR/EnumAttr.td" +include "mlir/IR/SymbolInterfaces.td" +include "mlir/IR/CommonAttrConstraints.td" + +//===----------------------------------------------------------------------===// +// CIR Ops +//===----------------------------------------------------------------------===// + +class CIR_Op traits = []> : + Op; + +//===----------------------------------------------------------------------===// +// CIR Op Traits +//===----------------------------------------------------------------------===// + +def SameFirstOperandAndResultType : + NativeOpTrait<"SameFirstOperandAndResultType">; +def SameSecondOperandAndResultType : + NativeOpTrait<"SameSecondOperandAndResultType">; +def SameFirstSecondOperandAndResultType : + NativeOpTrait<"SameFirstSecondOperandAndResultType">; + +//===----------------------------------------------------------------------===// +// CastOp +//===----------------------------------------------------------------------===// + +// The enumaration value isn't in sync with clang. +def CK_IntegralToBoolean : I32EnumAttrCase<"int_to_bool", 1>; +def CK_ArrayToPointerDecay : I32EnumAttrCase<"array_to_ptrdecay", 2>; +def CK_IntegralCast : I32EnumAttrCase<"integral", 3>; +def CK_BitCast : I32EnumAttrCase<"bitcast", 4>; +def CK_FloatingCast : I32EnumAttrCase<"floating", 5>; +def CK_PtrToBoolean : I32EnumAttrCase<"ptr_to_bool", 6>; +def CK_FloatToIntegral : I32EnumAttrCase<"float_to_int", 7>; +def CK_IntegralToPointer : I32EnumAttrCase<"int_to_ptr", 8>; +def CK_PointerToIntegral : I32EnumAttrCase<"ptr_to_int", 9>; +def CK_FloatToBoolean : I32EnumAttrCase<"float_to_bool", 10>; +def CK_BooleanToIntegral : I32EnumAttrCase<"bool_to_int", 11>; +def CK_IntegralToFloat : I32EnumAttrCase<"int_to_float", 12>; +def CK_BooleanToFloat : I32EnumAttrCase<"bool_to_float", 13>; + +def CastKind : I32EnumAttr< + "CastKind", + "cast kind", + [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, + CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral, + CK_IntegralToPointer, CK_PointerToIntegral, CK_FloatToBoolean, + CK_BooleanToIntegral, CK_IntegralToFloat, CK_BooleanToFloat]> { + let cppNamespace = "::mlir::cir"; +} + +def CastOp : CIR_Op<"cast", [Pure]> { + // FIXME: not all conversions are free of side effects. + let summary = "Conversion between values of different types"; + let description = [{ + Apply C/C++ usual conversions rules between values. Currently supported kinds: + + - `array_to_ptrdecay` + - `bitcast` + - `integral` + - `int_to_bool` + - `int_to_float` + - `floating` + - `float_to_int` + - `float_to_bool` + - `ptr_to_int` + - `ptr_to_bool` + - `bool_to_int` + - `bool_to_float` + + This is effectively a subset of the rules from + `llvm-project/clang/include/clang/AST/OperationKinds.def`; but note that some + of the conversions aren't implemented in terms of `cir.cast`, `lvalue-to-rvalue` + for instance is modeled as a regular `cir.load`. + + ```mlir + %4 = cir.cast (int_to_bool, %3 : i32), !cir.bool + ... + %x = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + ``` + }]; + + let arguments = (ins CastKind:$kind, CIR_AnyType:$src); + let results = (outs CIR_AnyType:$result); + + let assemblyFormat = [{ + `(` $kind `,` $src `:` type($src) `)` + `,` type($result) attr-dict + }]; + + // The input and output types should match the cast kind. + let hasVerifier = 1; + let hasFolder = 1; +} + +//===----------------------------------------------------------------------===// +// DynamicCastOp +//===----------------------------------------------------------------------===// + +def DCK_PtrCast : I32EnumAttrCase<"ptr", 1>; +def DCK_RefCast : I32EnumAttrCase<"ref", 2>; + +def DynamicCastKind : I32EnumAttr< + "DynamicCastKind", "dynamic cast kind", [DCK_PtrCast, DCK_RefCast]> { + let cppNamespace = "::mlir::cir"; +} + +def DynamicCastOp : CIR_Op<"dyn_cast"> { + let summary = "Perform dynamic cast on struct pointers"; + let description = [{ + The `cir.dyn_cast` operation models part of the semantics of the + `dynamic_cast` operator in C++. It can be used to perform 2 kinds of casts + on struct pointers: + + - Down-cast, which casts a base class pointer to a derived class pointer; + - Side-cast, which casts a class pointer to a sibling class pointer. + + The input of the operation must be a struct pointer. The result of the + operation is also a struct pointer. + + The parameter `kind` specifies the semantics of this operation. If its value + is `ptr`, then the operation models dynamic casts on pointers. Otherwise, if + its value is `ref`, the operation models dynamic casts on references. + Specifically: + + - When the input pointer is a null pointer value: + - If `kind` is `ref`, the operation will invoke undefined behavior. A + sanitizer check will be emitted if sanitizer is on. + - Otherwise, the operation will return a null pointer value as its result. + - When the runtime type check fails: + - If `kind` is `ref`, the operation will throw a `bad_cast` exception. + - Otherwise, the operation will return a null pointer value as its result. + + The `info` argument gives detailed information about the requested dynamic + cast operation. + }]; + + let arguments = (ins DynamicCastKind:$kind, + StructPtr:$src, + DynamicCastInfoAttr:$info); + let results = (outs StructPtr:$result); + + let assemblyFormat = [{ + `(` $kind `,` $src `:` type($src) `,` qualified($info) `)` + `->` type($result) attr-dict + }]; + + let extraClassDeclaration = [{ + /// Determine whether this operation models reference casting in C++. + bool isRefcast() { + return getKind() == ::mlir::cir::DynamicCastKind::ref; + } + }]; +} + +//===----------------------------------------------------------------------===// +// ObjSizeOp +//===----------------------------------------------------------------------===// + +def SizeInfoTypeMin : I32EnumAttrCase<"min", 0>; +def SizeInfoTypeMax : I32EnumAttrCase<"max", 1>; + +def SizeInfoType : I32EnumAttr< + "SizeInfoType", + "size info type", + [SizeInfoTypeMin, SizeInfoTypeMax]> { + let cppNamespace = "::mlir::cir"; +} + +def ObjSizeOp : CIR_Op<"objsize", [Pure]> { + let summary = "Conversion between values of different types"; + let description = [{ + }]; + + let arguments = (ins CIR_PointerType:$ptr, SizeInfoType:$kind, + UnitAttr:$dynamic); + let results = (outs PrimitiveInt:$result); + + let assemblyFormat = [{ + `(` + $ptr `:` type($ptr) `,` + $kind + (`,` `dynamic` $dynamic^)? + `)` + `->` type($result) attr-dict + }]; + + // Nothing to verify that isn't already covered by constraints. + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// PtrDiffOp +//===----------------------------------------------------------------------===// + +def PtrDiffOp : CIR_Op<"ptr_diff", [Pure, SameTypeOperands]> { + + let summary = "Pointer subtraction arithmetic"; + let description = [{ + `cir.ptr_diff` performs a subtraction between two pointer types with the + same element type and produces a `mlir::cir::IntType` result. + + Note that the result considers the pointer size according to the ABI for + the pointee sizes, e.g. the subtraction between two `!cir.ptr` might + yield 1, meaning 8 bytes, whereas for `void` or function type pointees, + yielding 8 means 8 bytes. + + ```mlir + %7 = "cir.ptr_diff"(%0, %1) : !cir.ptr -> !u64i + ``` + }]; + + let results = (outs PrimitiveInt:$result); + let arguments = (ins CIR_PointerType:$lhs, CIR_PointerType:$rhs); + + let assemblyFormat = [{ + `(` $lhs `,` $rhs `)` `:` qualified(type($lhs)) `->` qualified(type($result)) attr-dict + }]; + + // Already covered by the traits + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// PtrStrideOp +//===----------------------------------------------------------------------===// + +def PtrStrideOp : CIR_Op<"ptr_stride", + [Pure, SameFirstOperandAndResultType]> { + let summary = "Pointer access with stride"; + let description = [{ + Given a base pointer as first operand, provides a new pointer after applying + a stride (second operand). + + ```mlir + %3 = cir.const(0 : i32) : i32 + %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr + ``` + }]; + + let arguments = (ins CIR_PointerType:$base, PrimitiveInt:$stride); + let results = (outs CIR_PointerType:$result); + + let assemblyFormat = [{ + `(` $base `:` qualified(type($base)) `,` $stride `:` qualified(type($stride)) `)` + `,` qualified(type($result)) attr-dict + }]; + + let extraClassDeclaration = [{ + // Get type pointed by the base pointer. + mlir::Type getElementTy() { + return getBase().getType().cast().getPointee(); + } + }]; + + // SameFirstOperandAndResultType already checks all we need. + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// ConstantOp +//===----------------------------------------------------------------------===// + +def ConstantOp : CIR_Op<"const", + [ConstantLike, Pure]> { + // FIXME: Use SameOperandsAndResultType or similar and prevent eye bleeding + // type repetition in the assembly form. + + let summary = "Defines a CIR constant"; + let description = [{ + The `cir.const` operation turns a literal into an SSA value. The data is + attached to the operation as an attribute. + + ```mlir + %0 = cir.const(42 : i32) : i32 + %1 = cir.const(4.2 : f32) : f32 + %2 = cir.const(nullptr : !cir.ptr) : !cir.ptr + ``` + }]; + + // The constant operation takes an attribute as the only input. + let arguments = (ins TypedAttrInterface:$value); + + // The constant operation returns a single value of CIR_AnyType. + let results = (outs CIR_AnyType:$res); + + let assemblyFormat = [{ + `(` custom($value) `)` attr-dict `:` type($res) + }]; + + let hasVerifier = 1; + + let extraClassDeclaration = [{ + bool isNullPtr() { + if (const auto ptrAttr = getValue().dyn_cast()) + return ptrAttr.isNullValue(); + return false; + } + }]; + + let hasFolder = 1; +} + +//===----------------------------------------------------------------------===// +// C/C++ memory order definitions +//===----------------------------------------------------------------------===// + +def MemOrderRelaxed : I32EnumAttrCase<"Relaxed", 0, "relaxed">; +def MemOrderConsume : I32EnumAttrCase<"Consume", 1, "consume">; +def MemOrderAcquire : I32EnumAttrCase<"Acquire", 2, "acquire">; +def MemOrderRelease : I32EnumAttrCase<"Release", 3, "release">; +def MemOrderAcqRel : I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">; +def MemOrderSeqCst : I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">; + +def MemOrder : I32EnumAttr< + "MemOrder", + "Memory order according to C++11 memory model", + [MemOrderRelaxed, MemOrderConsume, MemOrderAcquire, + MemOrderRelease, MemOrderAcqRel, MemOrderSeqCst]> { + let cppNamespace = "::mlir::cir"; +} + +//===----------------------------------------------------------------------===// +// AllocaOp +//===----------------------------------------------------------------------===// + +class AllocaTypesMatchWith + : PredOpTrait> { + string lhs = lhsArg; + string rhs = rhsArg; + string transformer = transform; +} + +def AllocaOp : CIR_Op<"alloca", [ + AllocaTypesMatchWith<"'allocaType' matches pointee type of 'addr'", + "addr", "allocaType", + "$_self.cast().getPointee()">]> { + let summary = "Defines a scope-local variable"; + let description = [{ + The `cir.alloca` operation defines a scope-local variable. + + The presence `init` attribute indicates that the local variable represented + by this alloca was originally initialized in C/C++ source code. In such + cases, the first use contains the initialization (a cir.store, a cir.call + to a ctor, etc). + + The `dynAllocSize` specifies the size to dynamically allocate on the stack + and ignores the allocation size based on the original type. This is useful + when handling VLAs and is omitted when declaring regular local variables. + + The result type is a pointer to the input's type. + + Example: + + ```mlir + // int count = 3; + %0 = cir.alloca i32, !cir.ptr, ["count", init] {alignment = 4 : i64} + + // int *ptr; + %1 = cir.alloca !cir.ptr, cir.ptr >, ["ptr"] {alignment = 8 : i64} + ... + ``` + }]; + + let arguments = (ins + Optional:$dynAllocSize, + TypeAttr:$allocaType, + StrAttr:$name, + UnitAttr:$init, + ConfinedAttr, [IntMinValue<0>]>:$alignment, + OptionalAttr:$ast + ); + + let results = (outs Res]>:$addr); + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "Type":$addr, "Type":$allocaType, + "StringRef":$name, + "IntegerAttr":$alignment)>, + + OpBuilder<(ins "Type":$addr, + "Type":$allocaType, + "StringRef":$name, + "IntegerAttr":$alignment, + "Value":$dynAllocSize), + [{ + if (dynAllocSize) + $_state.addOperands(dynAllocSize); + build($_builder, $_state, addr, allocaType, name, alignment); + }]> + ]; + + let extraClassDeclaration = [{ + // Whether the alloca input type is a pointer. + bool isPointerType() { return getAllocaType().isa<::mlir::cir::PointerType>(); } + + bool isDynamic() { return (bool)getDynAllocSize(); } + }]; + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = [{ + $allocaType `,` `cir.ptr` type($addr) `,` + ($dynAllocSize^ `:` type($dynAllocSize) `,`)? + `[` $name + (`,` `init` $init^)? + `]` + (`ast` $ast^)? attr-dict + }]; + + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// LoadOp +//===----------------------------------------------------------------------===// + +def LoadOp : CIR_Op<"load", [ + TypesMatchWith<"type of 'result' matches pointee type of 'addr'", + "addr", "result", + "$_self.cast().getPointee()">]> { + + let summary = "Load value from memory adddress"; + let description = [{ + `cir.load` reads a value (lvalue to rvalue conversion) given an address + backed up by a `cir.ptr` type. A unit attribute `deref` can be used to + mark the resulting value as used by another operation to dereference + a pointer. A unit attribute `volatile` can be used to indicate a volatile + loading. Load can be marked atomic by using `atomic()`. + + Example: + + ```mlir + + // Read from local variable, address in %0. + %1 = cir.load %0 : !cir.ptr, i32 + + // Load address from memory at address %0. %3 is used by at least one + // operation that dereferences a pointer. + %3 = cir.load deref %0 : cir.ptr > + + // Perform a volatile load from address in %0. + %4 = cir.load volatile %0 : !cir.ptr, i32 + ``` + }]; + + let arguments = (ins Arg:$addr, UnitAttr:$isDeref, + UnitAttr:$is_volatile, + OptionalAttr:$mem_order); + let results = (outs CIR_AnyType:$result); + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = [{ + (`deref` $isDeref^)? + (`volatile` $is_volatile^)? + (`atomic` `(` $mem_order^ `)`)? + $addr `:` `cir.ptr` type($addr) `,` type($result) attr-dict + }]; + + // FIXME: add verifier. +} + +//===----------------------------------------------------------------------===// +// StoreOp +//===----------------------------------------------------------------------===// + +def StoreOp : CIR_Op<"store", [ + TypesMatchWith<"type of 'value' matches pointee type of 'addr'", + "addr", "value", + "$_self.cast().getPointee()">]> { + + let summary = "Store value to memory address"; + let description = [{ + `cir.store` stores a value (first operand) to the memory address specified + in the second operand. A unit attribute `volatile` can be used to indicate + a volatile store. Store's can be marked atomic by using + `atomic()`. + + Example: + + ```mlir + // Store a function argument to local storage, address in %0. + cir.store %arg0, %0 : i32, !cir.ptr + + // Perform a volatile store into memory location at the address in %0. + cir.store volatile %arg0, %0 : i32, !cir.ptr + ``` + }]; + + let arguments = (ins CIR_AnyType:$value, + Arg:$addr, + UnitAttr:$is_volatile, + OptionalAttr:$mem_order); + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = [{ + (`volatile` $is_volatile^)? + (`atomic` `(` $mem_order^ `)`)? + $value `,` $addr attr-dict `:` type($value) `,` `cir.ptr` type($addr) + }]; + + // FIXME: add verifier. +} + +//===----------------------------------------------------------------------===// +// ReturnOp +//===----------------------------------------------------------------------===// + +def ReturnOp : CIR_Op<"return", [ParentOneOf<["FuncOp", "ScopeOp", "IfOp", + "SwitchOp", "DoWhileOp", + "WhileOp", "ForOp"]>, + Terminator]> { + let summary = "Return from function"; + let description = [{ + The "return" operation represents a return operation within a function. + The operation takes an optional operand and produces no results. + The operand type must match the signature of the function that contains + the operation. + + ```mlir + func @foo() -> i32 { + ... + cir.return %0 : i32 + } + ``` + }]; + + // The return operation takes an optional input operand to return. This + // value must match the return type of the enclosing function. + let arguments = (ins Variadic:$input); + + // The return operation only emits the input in the format if it is present. + let assemblyFormat = "($input^ `:` type($input))? attr-dict "; + + // Allow building a ReturnOp with no return operand. + let builders = [ + OpBuilder<(ins), [{ build($_builder, $_state, std::nullopt); }]> + ]; + + // Provide extra utility definitions on the c++ operation class definition. + let extraClassDeclaration = [{ + bool hasOperand() { return getNumOperands() != 0; } + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// IfOp +//===----------------------------------------------------------------------===// + +def IfOp : CIR_Op<"if", + [DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { + let summary = "The if-then-else operation"; + let description = [{ + The `cir.if` operation represents an if-then-else construct for + conditionally executing two regions of code. The operand is a `cir.bool` + type. + + Examples: + + ```mlir + cir.if %b { + ... + } else { + ... + } + + cir.if %c { + ... + } + + cir.if %c { + ... + cir.br ^a + ^a: + cir.yield + } + ``` + + `cir.if` defines no values and the 'else' can be omitted. `cir.yield` must + explicitly terminate the region if it has more than one block. + }]; + let arguments = (ins CIR_BoolType:$condition); + let regions = (region AnyRegion:$thenRegion, AnyRegion:$elseRegion); + + let hasCustomAssemblyFormat = 1; + let hasVerifier = 1; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "Value":$cond, "bool":$withElseRegion, + CArg<"function_ref", + "buildTerminatedBody">:$thenBuilder, + CArg<"function_ref", + "nullptr">:$elseBuilder)> + ]; +} + +//===----------------------------------------------------------------------===// +// TernaryOp +//===----------------------------------------------------------------------===// + +def TernaryOp : CIR_Op<"ternary", + [DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { + let summary = "The `cond ? a : b` C/C++ ternary operation"; + let description = [{ + The `cir.ternary` operation represents C/C++ ternary, much like a `select` + operation. First argument is a `cir.bool` condition to evaluate, followed + by two regions to execute (true or false). This is different from `cir.if` + since each region is one block sized and the `cir.yield` closing the block + scope should have one argument. + + Example: + + ```mlir + // x = cond ? a : b; + + %x = cir.ternary (%cond, true_region { + ... + cir.yield %a : i32 + }, false_region { + ... + cir.yield %b : i32 + }) -> i32 + ``` + }]; + let arguments = (ins CIR_BoolType:$cond); + let regions = (region SizedRegion<1>:$trueRegion, + SizedRegion<1>:$falseRegion); + let results = (outs Optional:$result); + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "Value":$cond, + "function_ref":$trueBuilder, + "function_ref":$falseBuilder) + > + ]; + + // All constraints already verified elsewhere. + let hasVerifier = 0; + + let assemblyFormat = [{ + `(` $cond `,` + `true` $trueRegion `,` + `false` $falseRegion + `)` `:` functional-type(operands, results) attr-dict + }]; +} + +//===----------------------------------------------------------------------===// +// ConditionOp +//===----------------------------------------------------------------------===// + +def ConditionOp : CIR_Op<"condition", [ + Terminator, + DeclareOpInterfaceMethods +]> { + let summary = "Loop continuation condition."; + let description = [{ + The `cir.condition` terminates conditional regions. It takes a single + `cir.bool` operand and, depending on its value, may branch to different + regions: + + - When in the `cond` region of a `cir.loop`, it continues the loop + if true, or exits it if false. + - When in the `ready` region of a `cir.await`, it branches to the `resume` + region when true, and to the `suspend` region when false. + + Example: + + ```mlir + cir.loop for(cond : { + cir.condition(%arg0) // Branches to `step` region or exits. + }, step : { + [...] + }) { + [...] + } + + cir.await(user, ready : { + cir.condition(%arg0) // Branches to `resume` or `suspend` region. + }, suspend : { + [...] + }, resume : { + [...] + },) + ``` + }]; + let arguments = (ins CIR_BoolType:$condition); + let assemblyFormat = " `(` $condition `)` attr-dict "; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// YieldOp +//===----------------------------------------------------------------------===// + +def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, + ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "ForOp", "AwaitOp", + "TernaryOp", "GlobalOp", "DoWhileOp", "CatchOp", "TryOp", + "ArrayCtor", "ArrayDtor"]>]> { + let summary = "Represents the default branching behaviour of a region"; + let description = [{ + The `cir.yield` operation terminates regions on different CIR operations, + and it is used to represent the default branching behaviour of a region. + Said branching behaviour is determinted by the parent operation. For + example, a yield in a `switch-case` region implies a fallthrough, while + a yield in a `cir.if` region implies a branch to the exit block, and so + on. + + In some cases, it might yield an SSA value and the semantics of how the + values are yielded is defined by the parent operation. For example, a + `cir.ternary` operation yields a value from one of its regions. + + As a general rule, `cir.yield` must be explicitly used whenever a region has + more than one block and no terminator, or within `cir.switch` regions not + `cir.return` terminated. + + Examples: + ```mlir + cir.if %4 { + ... + cir.yield + } + + cir.switch (%5) [ + case (equal, 3) { + ... + cir.yield + }, ... + ] + + cir.scope { + ... + cir.yield + } + + %x = cir.scope { + ... + cir.yield %val + } + + %y = cir.ternary { + ... + cir.yield %val : i32 + } : i32 + ``` + }]; + + let arguments = (ins Variadic:$args); + let assemblyFormat = "($args^ `:` type($args))? attr-dict"; + let builders = [ + OpBuilder<(ins), [{ /* nothing to do */ }]>, + ]; +} + +//===----------------------------------------------------------------------===// +// BreakOp +//===----------------------------------------------------------------------===// + +def BreakOp : CIR_Op<"break", [Terminator]> { + let summary = "C/C++ `break` statement equivalent"; + let description = [{ + The `cir.break` operation is used to cease the control flow to the parent + operation, exiting its region's control flow. It is only allowed if it is + within a breakable operation (loops and `switch`). + }]; + let assemblyFormat = "attr-dict"; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// ContinueOp +//===----------------------------------------------------------------------===// + +def ContinueOp : CIR_Op<"continue", [Terminator]> { + let summary = "C/C++ `continue` statement equivalent"; + let description = [{ + The `cir.continue` operation is used to continue execution to the next + iteration of a loop. It is only allowed within `cir.loop` regions. + }]; + let assemblyFormat = "attr-dict"; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// Resume +//===----------------------------------------------------------------------===// + +def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator, + ParentOneOf<["CatchOp"]>]> { + let summary = "Resumes execution after not catching exceptions"; + let description = [{ + The `cir.resume` operation terminates a region on `cir.catch`, "resuming" + or continuing the unwind process. + + Examples: + ```mlir + cir.catch ... { + ... + fallback { cir.resume }; + } + ``` + }]; + + let arguments = (ins); + let assemblyFormat = "attr-dict"; +} + +//===----------------------------------------------------------------------===// +// ScopeOp +//===----------------------------------------------------------------------===// + +def ScopeOp : CIR_Op<"scope", [ + DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope, + NoRegionArguments]> { + let summary = "Represents a C/C++ scope"; + let description = [{ + `cir.scope` contains one region and defines a strict "scope" for all new + values produced within its blocks. + + The region can contain an arbitrary number of blocks but usually defaults + to one and can optionally return a value (useful for representing values + coming out of C++ full-expressions) via `cir.yield`: + + + ```mlir + %rvalue = cir.scope { + ... + cir.yield %value + } + ``` + + If `cir.scope` yields no value, the `cir.yield` can be left out, and + will be inserted implicitly. + }]; + + let results = (outs Optional:$results); + let regions = (region AnyRegion:$scopeRegion); + + let hasVerifier = 1; + let skipDefaultBuilders = 1; + let assemblyFormat = [{ + custom($scopeRegion) (`:` type($results)^)? attr-dict + }]; + + let builders = [ + // Scopes for yielding values. + OpBuilder<(ins + "function_ref":$scopeBuilder)>, + // Scopes without yielding values. + OpBuilder<(ins "function_ref":$scopeBuilder)> + ]; +} + +//===----------------------------------------------------------------------===// +// UnaryOp +//===----------------------------------------------------------------------===// + +def UnaryOpKind_Inc : I32EnumAttrCase<"Inc", 1, "inc">; +def UnaryOpKind_Dec : I32EnumAttrCase<"Dec", 2, "dec">; +def UnaryOpKind_Plus : I32EnumAttrCase<"Plus", 3, "plus">; +def UnaryOpKind_Minus : I32EnumAttrCase<"Minus", 4, "minus">; +def UnaryOpKind_Not : I32EnumAttrCase<"Not", 5, "not">; + +def UnaryOpKind : I32EnumAttr< + "UnaryOpKind", + "unary operation kind", + [UnaryOpKind_Inc, + UnaryOpKind_Dec, + UnaryOpKind_Plus, + UnaryOpKind_Minus, + UnaryOpKind_Not, + ]> { + let cppNamespace = "::mlir::cir"; +} + +// FIXME: Pure won't work when we add overloading. +def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> { + let summary = "Unary operations"; + let description = [{ + `cir.unary` performs the unary operation according to + the specified opcode kind: [inc, dec, plus, minus, not]. + + It requires one input operand and has one result, both types + should be the same. + + ```mlir + %7 = cir.unary(inc, %1) : i32 -> i32 + %8 = cir.unary(dec, %2) : i32 -> i32 + ``` + }]; + + let results = (outs CIR_AnyType:$result); + let arguments = (ins Arg:$kind, Arg:$input); + + let assemblyFormat = [{ + `(` $kind `,` $input `)` `:` type($input) `,` type($result) attr-dict + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// BinOp +//===----------------------------------------------------------------------===// + +// FIXME: represent Commutative, Idempotent traits for appropriate binops +def BinOpKind_Mul : I32EnumAttrCase<"Mul", 1, "mul">; +def BinOpKind_Div : I32EnumAttrCase<"Div", 2, "div">; +def BinOpKind_Rem : I32EnumAttrCase<"Rem", 3, "rem">; +def BinOpKind_Add : I32EnumAttrCase<"Add", 4, "add">; +def BinOpKind_Sub : I32EnumAttrCase<"Sub", 5, "sub">; +def BinOpKind_And : I32EnumAttrCase<"And", 8, "and">; +def BinOpKind_Xor : I32EnumAttrCase<"Xor", 9, "xor">; +def BinOpKind_Or : I32EnumAttrCase<"Or", 10, "or">; + +def BinOpKind : I32EnumAttr< + "BinOpKind", + "binary operation (arith and logic) kind", + [BinOpKind_Mul, BinOpKind_Div, BinOpKind_Rem, + BinOpKind_Add, BinOpKind_Sub, + BinOpKind_And, BinOpKind_Xor, + BinOpKind_Or]> { + let cppNamespace = "::mlir::cir"; +} + +// FIXME: Pure won't work when we add overloading. +def BinOp : CIR_Op<"binop", [Pure, + SameTypeOperands, SameOperandsAndResultType]> { + + let summary = "Binary operations (arith and logic)"; + let description = [{ + cir.binop performs the binary operation according to + the specified opcode kind: [mul, div, rem, add, sub, + and, xor, or]. + + It requires two input operands and has one result, all types + should be the same. + + ```mlir + %7 = cir.binop(add, %1, %2) : !s32i + %7 = cir.binop(mul, %1, %2) : !u8i + ``` + }]; + + // TODO: get more accurate than CIR_AnyType + let results = (outs CIR_AnyType:$result); + let arguments = (ins Arg:$kind, + CIR_AnyType:$lhs, CIR_AnyType:$rhs, + UnitAttr:$no_unsigned_wrap, + UnitAttr:$no_signed_wrap); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` + (`nsw` $no_signed_wrap^)? + (`nuw` $no_unsigned_wrap^)? + `:` type($lhs) attr-dict + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// ShiftOp +//===----------------------------------------------------------------------===// + +def ShiftOp : CIR_Op<"shift", [Pure]> { + let summary = "Shift"; + let description = [{ + Shift `left` or `right`, according to the first operand. Second operand is + the shift target and the third the amount. + + ```mlir + %7 = cir.shift(left, %1 : !u64i, %4 : !s32i) -> !u64i + ``` + }]; + + let results = (outs CIR_IntType:$result); + let arguments = (ins CIR_IntType:$value, CIR_IntType:$amount, + UnitAttr:$isShiftleft); + + let assemblyFormat = [{ + `(` + (`left` $isShiftleft^) : (`right`)? + `,` $value `:` type($value) + `,` $amount `:` type($amount) + `)` `->` type($result) attr-dict + }]; + + // Already covered by the traits + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// CmpOp +//===----------------------------------------------------------------------===// + +def CmpOpKind_LT : I32EnumAttrCase<"lt", 1>; +def CmpOpKind_LE : I32EnumAttrCase<"le", 2>; +def CmpOpKind_GT : I32EnumAttrCase<"gt", 3>; +def CmpOpKind_GE : I32EnumAttrCase<"ge", 4>; +def CmpOpKind_EQ : I32EnumAttrCase<"eq", 5>; +def CmpOpKind_NE : I32EnumAttrCase<"ne", 6>; + +def CmpOpKind : I32EnumAttr< + "CmpOpKind", + "compare operation kind", + [CmpOpKind_LT, CmpOpKind_LE, CmpOpKind_GT, + CmpOpKind_GE, CmpOpKind_EQ, CmpOpKind_NE]> { + let cppNamespace = "::mlir::cir"; +} + +// FIXME: Pure might not work when we add overloading. +def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { + + let summary = "Compare values two values and produce a boolean result"; + let description = [{ + `cir.cmp` compares two input operands of the same type and produces a + `cir.bool` result. The kinds of comparison available are: + [lt,gt,ge,eq,ne] + + ```mlir + %7 = cir.cmp(gt, %1, %2) : i32, !cir.bool + ``` + }]; + + // TODO: get more accurate than CIR_AnyType + let results = (outs CIR_AnyType:$result); + let arguments = (ins Arg:$kind, + CIR_AnyType:$lhs, CIR_AnyType:$rhs); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,` type($result) attr-dict + }]; + + // Already covered by the traits + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// BitsOp +//===----------------------------------------------------------------------===// + +class CIR_BitOp + : CIR_Op { + let arguments = (ins inputTy:$input); + let results = (outs SInt32:$result); + + let assemblyFormat = [{ + `(` $input `:` type($input) `)` `:` type($result) attr-dict + }]; +} + +def BitClrsbOp : CIR_BitOp<"bit.clrsb", AnyTypeOf<[SInt32, SInt64]>> { + let summary = "Get the number of leading redundant sign bits in the input"; + let description = [{ + Compute the number of leading redundant sign bits in the input integer. + + The input integer must be a signed integer. The most significant bit of the + input integer is the sign bit. The `cir.bit.clrsb` operation returns the + number of redundant sign bits in the input, that is, the number of bits + following the most significant bit that are identical to it. + + The bit width of the input integer must be either 32 or 64. + + Examples: + + ```mlir + !s32i = !cir.int + + // %0 = 0xDEADBEEF, 0b1101_1110_1010_1101_1011_1110_1110_1111 + %0 = cir.const(#cir.int<3735928559> : !s32i) : !s32i + // %1 will be 1 because there is 1 bit following the most significant bit + // that is identical to it. + %1 = cir.bit.clrsb(%0 : !s32i) : !s32i + + // %2 = 1, 0b0000_0000_0000_0000_0000_0000_0000_0001 + %2 = cir.const(#cir.int<1> : !s32i) : !s32i + // %3 will be 30 + %3 = cir.bit.clrsb(%2 : !s32i) : !s32i + ``` + }]; +} + +def BitClzOp : CIR_BitOp<"bit.clz", AnyTypeOf<[UInt16, UInt32, UInt64]>> { + let summary = "Get the number of leading 0-bits in the input"; + let description = [{ + Compute the number of leading 0-bits in the input. + + The input integer must be an unsigned integer. The `cir.bit.clz` operation + returns the number of consecutive 0-bits at the most significant bit + position in the input. + + This operation invokes undefined behavior if the input value is 0. + + Example: + + ```mlir + !s32i = !cir.int + !u32i = !cir.int + + // %0 = 0b0000_0000_0000_0000_0000_0000_0000_1000 + %0 = cir.const(#cir.int<8> : !u32i) : !u32i + // %1 will be 28 + %1 = cir.bit.clz(%0 : !u32i) : !s32i + ``` + }]; +} + +def BitCtzOp : CIR_BitOp<"bit.ctz", AnyTypeOf<[UInt16, UInt32, UInt64]>> { + let summary = "Get the number of trailing 0-bits in the input"; + let description = [{ + Compute the number of trailing 0-bits in the input. + + The input integer must be an unsigned integer. The `cir.bit.ctz` operation + returns the number of consecutive 0-bits at the least significant bit + position in the input. + + This operation invokes undefined behavior if the input value is 0. + + Example: + + ```mlir + !s32i = !cir.int + !u32i = !cir.int + + // %0 = 0b1000 + %0 = cir.const(#cir.int<8> : !u32i) : !u32i + // %1 will be 3 + %1 = cir.bit.ctz(%0 : !u32i) : !s32i + ``` + }]; +} + +def BitFfsOp : CIR_BitOp<"bit.ffs", AnyTypeOf<[SInt32, SInt64]>> { + let summary = "Get the position of the least significant 1-bit of input"; + let description = [{ + Compute the position of the least significant 1-bit of the input. + + The input integer must be a signed integer. The `cir.bit.ffs` operation + returns one plus the index of the least significant 1-bit of the input + signed integer. As a special case, if the input integer is 0, `cir.bit.ffs` + returns 0. + + Example: + + ```mlir + !s32i = !cir.int + + // %0 = 0x0010_1000 + %0 = cir.const(#cir.int<40> : !s32i) : !s32i + // #1 will be 4 since the 4th least significant bit is 1. + %1 = cir.bit.ffs(%0 : !s32i) : !s32i + ``` + }]; +} + +def BitParityOp : CIR_BitOp<"bit.parity", AnyTypeOf<[UInt32, UInt64]>> { + let summary = "Get the parity of input"; + let description = [{ + Compute the parity of the input. The parity of an integer is the number of + 1-bits in it modulo 2. + + The input must be an unsigned integer. + + Example: + + ```mlir + !s32i = !cir.int + !u32i = !cir.int + + // %0 = 0x0110_1000 + %0 = cir.const(#cir.int<104> : !u32i) : !s32i + // %1 will be 1 since there are 3 1-bits in %0 + %1 = cir.bit.parity(%0 : !u32i) : !s32i + ``` + }]; +} + +def BitPopcountOp + : CIR_BitOp<"bit.popcount", AnyTypeOf<[UInt16, UInt32, UInt64]>> { + let summary = "Get the number of 1-bits in input"; + let description = [{ + Compute the number of 1-bits in the input. + + The input must be an unsigned integer. + + Example: + + ```mlir + !s32i = !cir.int + !u32i = !cir.int + + // %0 = 0x0110_1000 + %0 = cir.const(#cir.int<104> : !u32i) : !s32i + // %1 will be 3 since there are 3 1-bits in %0 + %1 = cir.bit.popcount(%0 : !u32i) : !s32i + ``` + }]; +} + +//===----------------------------------------------------------------------===// +// ByteswapOp +//===----------------------------------------------------------------------===// + +def ByteswapOp : CIR_Op<"bswap", [Pure, SameOperandsAndResultType]> { + let summary = "Reverse the bytes that constitute the operand integer"; + let description = [{ + The `cir.bswap` operation takes an integer as operand, and returns it with + the order of bytes that constitute the operand reversed. + + The operand integer must be an unsigned integer. Its widths must be either + 16, 32, or 64. + + Example: + + ```mlir + !u32i = !cir.int + + // %0 = 0x12345678 + %0 = cir.const(#cir.int<305419896> : !u32i) : !u32i + + // %1 should be 0x78563412 + %1 = cir.bswap(%0 : !u32i) : !u32i + ``` + }]; + + let results = (outs CIR_IntType:$result); + let arguments = (ins AnyTypeOf<[UInt16, UInt32, UInt64]>:$input); + + let assemblyFormat = [{ + `(` $input `:` type($input) `)` `:` type($result) attr-dict + }]; +} + +//===----------------------------------------------------------------------===// +// CmpThreeWayOp +//===----------------------------------------------------------------------===// + +def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { + let summary = "Compare two values with C++ three-way comparison semantics"; + let description = [{ + The `cir.cmp3way` operation models the `<=>` operator in C++20. It takes two + operands with the same type and produces a result indicating the ordering + between the two input operands. + + The result of the operation is a signed integer that indicates the ordering + between the two input operands. + + There are two kinds of ordering: strong ordering and partial ordering. + Comparing different types of values yields different kinds of orderings. + The `info` parameter gives the ordering kind and other necessary information + about the comparison. + + Example: + + ```mlir + !s32i = !cir.int + + #cmp3way_strong = #cmp3way_info + #cmp3way_partial = #cmp3way_info + + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + %2 = cir.cmp3way(%0 : !s32i, %1, #cmp3way_strong) : !s8i + + %3 = cir.const(#cir.fp<0.0> : !cir.float) : !cir.float + %4 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float + %5 = cir.cmp3way(%3 : !cir.float, %4, #cmp3way_partial) : !s8i + ``` + }]; + + let results = (outs PrimitiveSInt:$result); + let arguments = (ins CIR_AnyType:$lhs, CIR_AnyType:$rhs, + CmpThreeWayInfoAttr:$info); + + let assemblyFormat = [{ + `(` $lhs `:` type($lhs) `,` $rhs `,` qualified($info) `)` + `:` type($result) attr-dict + }]; + + let hasVerifier = 0; + + let extraClassDeclaration = [{ + /// Determine whether this three-way comparison produces a strong ordering. + bool isStrongOrdering() { + return getInfo().getOrdering() == mlir::cir::CmpOrdering::Strong; + } + + /// Determine whether this three-way comparison compares integral operands. + bool isIntegralComparison() { + return getLhs().getType().isa(); + } + }]; +} + +//===----------------------------------------------------------------------===// +// SwitchOp +//===----------------------------------------------------------------------===// + +def CaseOpKind_DT : I32EnumAttrCase<"Default", 1, "default">; +def CaseOpKind_EQ : I32EnumAttrCase<"Equal", 2, "equal">; +def CaseOpKind_AO : I32EnumAttrCase<"Anyof", 3, "anyof">; + +def CaseOpKind : I32EnumAttr< + "CaseOpKind", + "case kind", + [CaseOpKind_DT, CaseOpKind_EQ, CaseOpKind_AO]> { + let cppNamespace = "::mlir::cir"; +} + +def CaseEltValueListAttr : + TypedArrayAttrBase { + let constBuilderCall = ?; +} + +def CaseAttr : AttrDef { + // FIXME: value should probably be optional for more clear "default" + // representation. + let parameters = (ins "ArrayAttr":$value, "CaseOpKindAttr":$kind); + let mnemonic = "case"; + let assemblyFormat = "`<` struct(params) `>`"; +} + +def CaseArrayAttr : + TypedArrayAttrBase { + let constBuilderCall = ?; +} + +def SwitchOp : CIR_Op<"switch", + [SameVariadicOperandSize, + DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { + let summary = "Switch operation"; + let description = [{ + The `cir.switch` operation represents C/C++ switch functionality for + conditionally executing multiple regions of code. The operand to an switch + is an integral condition value. + + A variadic list of "case" attribute operands and regions track the possible + control flow within `cir.switch`. A `case` must be in one of the following forms: + - `equal, `: equality of the second case operand against the + condition. + - `anyof, [constant-list]`: equals to any of the values in a subsequent + following list. + - `default`: any other value. + + Each case region must be explicitly terminated. + + Examples: + + ```mlir + cir.switch (%b : i32) [ + case (equal, 20) { + ... + cir.yield break + }, + case (anyof, [1, 2, 3] : i32) { + ... + cir.return ... + } + case (default) { + ... + cir.yield fallthrough + } + ] + ``` + }]; + + let arguments = (ins CIR_IntType:$condition, + OptionalAttr:$cases); + + let regions = (region VariadicRegion:$regions); + + let hasVerifier = 1; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "Value":$condition, + "function_ref":$switchBuilder)> + ]; + + let assemblyFormat = [{ + custom( + $regions, $cases, $condition, type($condition) + ) + attr-dict + }]; +} + +//===----------------------------------------------------------------------===// +// BrOp +//===----------------------------------------------------------------------===// + +def BrOp : CIR_Op<"br", + [DeclareOpInterfaceMethods, + Pure, Terminator]> { + let summary = "Unconditional branch"; + let description = [{ + The `cir.br` branches unconditionally to a block. Used to represent C/C++ + goto's and general block branching. + + Example: + + ```mlir + ... + cir.br ^bb3 + ^bb3: + cir.return + ``` + }]; + + let builders = [ + OpBuilder<(ins "Block *":$dest, + CArg<"ValueRange", "{}">:$destOperands), [{ + $_state.addSuccessors(dest); + $_state.addOperands(destOperands); + }]> + ]; + + let arguments = (ins Variadic:$destOperands); + let successors = (successor AnySuccessor:$dest); + let assemblyFormat = [{ + $dest (`(` $destOperands^ `:` type($destOperands) `)`)? attr-dict + }]; +} + +//===----------------------------------------------------------------------===// +// BrCondOp +//===----------------------------------------------------------------------===// + +def BrCondOp : CIR_Op<"brcond", + [DeclareOpInterfaceMethods, + Pure, Terminator, SameVariadicOperandSize]> { + let summary = "Conditional branch"; + let description = [{ + The `cir.brcond %cond, ^bb0, ^bb1` branches to 'bb0' block in case + %cond (which must be a !cir.bool type) evaluates to true, otherwise + it branches to 'bb1'. + + Example: + + ```mlir + ... + cir.brcond %a, ^bb3, ^bb4 + ^bb3: + cir.return + ^bb4: + cir.yield + ``` + }]; + + let builders = [ + OpBuilder<(ins "Value":$cond, "Block *":$destTrue, "Block *":$destFalse, + CArg<"ValueRange", "{}">:$destOperandsTrue, + CArg<"ValueRange", "{}">:$destOperandsFalse), [{ + $_state.addOperands(cond); + $_state.addSuccessors(destTrue); + $_state.addSuccessors(destFalse); + $_state.addOperands(destOperandsTrue); + $_state.addOperands(destOperandsFalse); + }]> + ]; + + let arguments = (ins CIR_BoolType:$cond, + Variadic:$destOperandsTrue, + Variadic:$destOperandsFalse); + let successors = (successor AnySuccessor:$destTrue, AnySuccessor:$destFalse); + let assemblyFormat = [{ + $cond + $destTrue (`(` $destOperandsTrue^ `:` type($destOperandsTrue) `)`)? + `,` + $destFalse (`(` $destOperandsFalse^ `:` type($destOperandsFalse) `)`)? + attr-dict + }]; +} + +//===----------------------------------------------------------------------===// +// While & DoWhileOp +//===----------------------------------------------------------------------===// + +class WhileOpBase : CIR_Op { + defvar isWhile = !eq(mnemonic, "while"); + let summary = "C/C++ " # !if(isWhile, "while", "do-while") # " loop"; + let builders = [ + OpBuilder<(ins "function_ref":$condBuilder, + "function_ref":$bodyBuilder), [{ + OpBuilder::InsertionGuard guard($_builder); + $_builder.createBlock($_state.addRegion()); + }] # !if(isWhile, [{ + condBuilder($_builder, $_state.location); + $_builder.createBlock($_state.addRegion()); + bodyBuilder($_builder, $_state.location); + }], [{ + bodyBuilder($_builder, $_state.location); + $_builder.createBlock($_state.addRegion()); + condBuilder($_builder, $_state.location); + }])> + ]; +} + +def WhileOp : WhileOpBase<"while"> { + let regions = (region SizedRegion<1>:$cond, MinSizedRegion<1>:$body); + let assemblyFormat = "$cond `do` $body attr-dict"; + + let description = [{ + Represents a C/C++ while loop. It consists of two regions: + + - `cond`: single block region with the loop's condition. Should be + terminated with a `cir.condition` operation. + - `body`: contains the loop body and an arbitrary number of blocks. + + Example: + + ```mlir + cir.while { + cir.break + ^bb2: + cir.yield + } do { + cir.condition %cond : cir.bool + } + ``` + }]; +} + +def DoWhileOp : WhileOpBase<"do"> { + let regions = (region MinSizedRegion<1>:$body, SizedRegion<1>:$cond); + let assemblyFormat = " $body `while` $cond attr-dict"; + + let extraClassDeclaration = [{ + Region &getEntry() { return getBody(); } + }]; + + let description = [{ + Represents a C/C++ do-while loop. Identical to `cir.while` but the + condition is evaluated after the body. + + Example: + + ```mlir + cir.do { + cir.break + ^bb2: + cir.yield + } while { + cir.condition %cond : cir.bool + } + ``` + }]; +} + +//===----------------------------------------------------------------------===// +// ForOp +//===----------------------------------------------------------------------===// + +def ForOp : CIR_Op<"for", [LoopOpInterface, NoRegionArguments]> { + let summary = "C/C++ for loop counterpart"; + let description = [{ + Represents a C/C++ for loop. It consists of three regions: + + - `cond`: single block region with the loop's condition. Should be + terminated with a `cir.condition` operation. + - `body`: contains the loop body and an arbitrary number of blocks. + - `step`: single block region with the loop's step. + + Example: + + ```mlir + cir.for cond { + cir.condition(%val) + } body { + cir.break + ^bb2: + cir.yield + } step { + cir.yield + } + ``` + }]; + + let regions = (region SizedRegion<1>:$cond, + MinSizedRegion<1>:$body, + SizedRegion<1>:$step); + let assemblyFormat = [{ + `:` `cond` $cond + `body` $body + `step` $step + attr-dict + }]; + + let builders = [ + OpBuilder<(ins "function_ref":$condBuilder, + "function_ref":$bodyBuilder, + "function_ref":$stepBuilder), [{ + OpBuilder::InsertionGuard guard($_builder); + + // Build condition region. + $_builder.createBlock($_state.addRegion()); + condBuilder($_builder, $_state.location); + + // Build body region. + $_builder.createBlock($_state.addRegion()); + bodyBuilder($_builder, $_state.location); + + // Build step region. + $_builder.createBlock($_state.addRegion()); + stepBuilder($_builder, $_state.location); + }]> + ]; + + let extraClassDeclaration = [{ + Region *maybeGetStep() { return &getStep(); } + llvm::SmallVector getRegionsInExecutionOrder() { + return llvm::SmallVector{&getCond(), &getBody(), &getStep()}; + } + }]; +} + +//===----------------------------------------------------------------------===// +// GlobalOp +//===----------------------------------------------------------------------===// + +// Linkage types. This is currently a replay of llvm/IR/GlobalValue.h, this is +// currently handy as part of forwarding appropriate linkage types for LLVM +// lowering, specially useful for C++ support. + +// Externally visible function +def Global_ExternalLinkage : + I32EnumAttrCase<"ExternalLinkage", 0, "external">; +// Available for inspection, not emission. +def Global_AvailableExternallyLinkage : + I32EnumAttrCase<"AvailableExternallyLinkage", 1, "available_externally">; +// Keep one copy of function when linking (inline) +def Global_LinkOnceAnyLinkage : + I32EnumAttrCase<"LinkOnceAnyLinkage", 2, "linkonce">; +// Same, but only replaced by something equivalent. +def Global_LinkOnceODRLinkage : + I32EnumAttrCase<"LinkOnceODRLinkage", 3, "linkonce_odr">; +// Keep one copy of named function when linking (weak) +def Global_WeakAnyLinkage : + I32EnumAttrCase<"WeakAnyLinkage", 4, "weak">; +// Same, but only replaced by something equivalent. +def Global_WeakODRLinkage : + I32EnumAttrCase<"WeakODRLinkage", 5, "weak_odr">; +// TODO: should we add something like appending linkage too? +// Special purpose, only applies to global arrays +// def Global_AppendingLinkage : +// I32EnumAttrCase<"AppendingLinkage", 6, "appending">; +// Rename collisions when linking (static functions). +def Global_InternalLinkage : + I32EnumAttrCase<"InternalLinkage", 7, "internal">; +// Like Internal, but omit from symbol table, prefix it with +// "cir_" to prevent clash with MLIR's symbol "private". +def Global_PrivateLinkage : + I32EnumAttrCase<"PrivateLinkage", 8, "cir_private">; +// ExternalWeak linkage description. +def Global_ExternalWeakLinkage : + I32EnumAttrCase<"ExternalWeakLinkage", 9, "extern_weak">; +// Tentative definitions. +def Global_CommonLinkage : + I32EnumAttrCase<"CommonLinkage", 10, "common">; + +/// An enumeration for the kinds of linkage for global values. +def GlobalLinkageKind : I32EnumAttr< + "GlobalLinkageKind", + "Linkage type/kind", + [Global_ExternalLinkage, Global_AvailableExternallyLinkage, + Global_LinkOnceAnyLinkage, Global_LinkOnceODRLinkage, + Global_WeakAnyLinkage, Global_WeakODRLinkage, + Global_InternalLinkage, Global_PrivateLinkage, + Global_ExternalWeakLinkage, Global_CommonLinkage + ]> { + let cppNamespace = "::mlir::cir"; +} + +def SOB_Undefined : I32EnumAttrCase<"undefined", 1>; +def SOB_Defined : I32EnumAttrCase<"defined", 2>; +def SOB_Trapping : I32EnumAttrCase<"trapping", 3>; + +def SignedOverflowBehaviorEnum : I32EnumAttr< + "SignedOverflowBehavior", + "the behavior for signed overflow", + [SOB_Undefined, SOB_Defined, SOB_Trapping]> { + let cppNamespace = "::mlir::cir::sob"; +} + +/// Definition of TLS related kinds. +def TLS_GeneralDynamic : + I32EnumAttrCase<"GeneralDynamic", 0, "tls_dyn">; +def TLS_LocalDynamic : + I32EnumAttrCase<"LocalDynamic", 1, "tls_local_dyn">; +def TLS_InitialExec : + I32EnumAttrCase<"InitialExec", 2, "tls_init_exec">; +def TLS_LocalExec : + I32EnumAttrCase<"LocalExec", 3, "tls_local_exec">; + +def TLSModel : I32EnumAttr< + "TLS_Model", + "TLS model", + [TLS_GeneralDynamic, TLS_LocalDynamic, TLS_InitialExec, TLS_LocalExec]> { + let cppNamespace = "::mlir::cir"; +} + +def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods, NoRegionArguments]> { + let summary = "Declares or defines a global variable"; + let description = [{ + The `cir.global` operation declares or defines a named global variable. + + The backing memory for the variable is allocated statically and is + described by the type of the variable. + + The operation is a declaration if no `inital_value` is + specified, else it is a definition. + + The global variable can also be marked constant using the + `constant` unit attribute. Writing to such constant global variables is + undefined. + + The `linkage` tracks C/C++ linkage types, currently very similar to LLVM's. + Symbol visibility in `sym_visibility` is defined in terms of MLIR's visibility + and verified to be in accordance to `linkage`. + + Example: + + ```mlir + // Public and constant variable with initial value. + cir.global public constant @c : i32 = 4; + ``` + }]; + + // Note that both sym_name and sym_visibility are tied to Symbol trait. + // TODO: sym_visibility can possibly be represented by implementing the + // necessary Symbol's interface in terms of linkage instead. + let arguments = (ins SymbolNameAttr:$sym_name, + OptionalAttr:$sym_visibility, + TypeAttr:$sym_type, + Arg:$linkage, + OptionalAttr:$tls_model, + // Note this can also be a FlatSymbolRefAttr + OptionalAttr:$initial_value, + UnitAttr:$constant, + OptionalAttr:$alignment, + OptionalAttr:$ast, + OptionalAttr:$section + ); + let regions = (region AnyRegion:$ctorRegion, AnyRegion:$dtorRegion); + let assemblyFormat = [{ + ($sym_visibility^)? + (`constant` $constant^)? + $linkage + ($tls_model^)? + $sym_name + custom($sym_type, $initial_value, $ctorRegion, $dtorRegion) + attr-dict + }]; + + let extraClassDeclaration = [{ + bool isDeclaration() { + return !getInitialValue() && getCtorRegion().empty() && getDtorRegion().empty(); + } + bool hasInitializer() { return !isDeclaration(); } + bool hasAvailableExternallyLinkage() { + return mlir::cir::isAvailableExternallyLinkage(getLinkage()); + } + bool isDeclarationForLinker() { + if (hasAvailableExternallyLinkage()) + return true; + + return isDeclaration(); + } + + /// Whether the definition of this global may be replaced at link time. + bool isWeakForLinker() { return cir::isWeakForLinker(getLinkage()); } + }]; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins + // MLIR's default visibility is public. + "StringRef":$sym_name, + "Type":$sym_type, + CArg<"bool", "false">:$isConstant, + // CIR defaults to external linkage. + CArg<"cir::GlobalLinkageKind", + "cir::GlobalLinkageKind::ExternalLinkage">:$linkage, + CArg<"function_ref", + "nullptr">:$ctorBuilder, + CArg<"function_ref", + "nullptr">:$dtorBuilder)> + ]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// GetGlobalOp +//===----------------------------------------------------------------------===// + +def GetGlobalOp : CIR_Op<"get_global", + [Pure, DeclareOpInterfaceMethods]> { + let summary = "Get the address of a global variable"; + let description = [{ + The `cir.get_global` operation retrieves the address pointing to a + named global variable. If the global variable is marked constant, writing + to the resulting address (such as through a `cir.store` operation) is + undefined. Resulting type must always be a `!cir.ptr<...>` type. + + Addresses of thread local globals can only be retrieved if this operation + is marked `thread_local`, which indicates the address isn't constant. + + Example: + ```mlir + %x = cir.get_global @foo : !cir.ptr + ... + %y = cir.get_global thread_local @batata : !cir.ptr + ``` + }]; + + let arguments = (ins FlatSymbolRefAttr:$name, UnitAttr:$tls); + let results = (outs Res:$addr); + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = [{ + (`thread_local` $tls^)? + $name `:` `cir.ptr` type($addr) attr-dict + }]; + + // `GetGlobalOp` is fully verified by its traits. + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// VTableAddrPointOp +//===----------------------------------------------------------------------===// + +def VTableAddrPointOp : CIR_Op<"vtable.address_point", + [Pure, DeclareOpInterfaceMethods]> { + let summary = "Get the vtable (global variable) address point"; + let description = [{ + The `vtable.address_point` operation retrieves the "effective" address + (address point) of a C++ virtual table. An object internal `__vptr` + gets initializated on top of the value returned by this operation. + + `vtable_index` provides the appropriate vtable within the vtable group + (as specified by Itanium ABI), and `addr_point_index` the actual address + point within that vtable. + + The return type is always a `!cir.ptr i32>>`. + + Example: + ```mlir + cir.global linkonce_odr @_ZTV1B = ... + ... + %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr i32>> + ``` + }]; + + let arguments = (ins OptionalAttr:$name, + Optional:$sym_addr, + I32Attr:$vtable_index, + I32Attr:$address_point_index); + let results = (outs Res:$addr); + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = [{ + `(` + ($name^)? + ($sym_addr^ `:` type($sym_addr))? + `,` + `vtable_index` `=` $vtable_index `,` + `address_point_index` `=` $address_point_index + `)` + `:` `cir.ptr` type($addr) attr-dict + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// SetBitfieldOp +//===----------------------------------------------------------------------===// + +def SetBitfieldOp : CIR_Op<"set_bitfield"> { + let summary = "Set a bitfield"; + let description = [{ + The `cir.set_bitfield` operation provides a store-like access to + a bit field of a record. + + It expects an address of a storage where to store, a type of the storage, + a value being stored, a name of a bit field, a pointer to the storage in the + base record, a size of the storage, a size the bit field, an offset + of the bit field and a sign. Returns a value being stored. + + A unit attribute `volatile` can be used to indicate a volatile load of the + bitfield. + + Example. + Suppose we have a struct with multiple bitfields stored in + different storages. The `cir.set_bitfield` operation sets the value + of the bitfield. + ```C++ + typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + } S; + + void store_bitfield(S& s) { + s.d = 3; + } + ``` + + ```mlir + // 'd' is in the storage with the index 1 + !struct_type = !cir.struct, !cir.int, !cir.int} #cir.record.decl.ast> + #bfi_d = #cir.bitfield_info + + %1 = cir.const(#cir.int<3> : !s32i) : !s32i + %2 = cir.load %0 : cir.ptr >, !cir.ptr + %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr + %4 = cir.set_bitfield(#bfi_d, %3 : !cir.ptr, %1 : !s32i) -> !s32i + ``` + }]; + + let arguments = (ins + Arg:$addr, + CIR_AnyType:$src, + BitfieldInfoAttr:$bitfield_info, + UnitAttr:$is_volatile + ); + + let results = (outs CIR_IntType:$result); + + let assemblyFormat = [{ `(`$bitfield_info`,` $addr`:`qualified(type($addr))`,` + $src`:`type($src) `)` attr-dict `->` type($result) }]; + + let builders = [ + OpBuilder<(ins "Type":$type, + "Value":$addr, + "Type":$storage_type, + "Value":$src, + "StringRef":$name, + "unsigned":$size, + "unsigned":$offset, + "bool":$is_signed, + "bool":$is_volatile + ), + [{ + BitfieldInfoAttr info = + BitfieldInfoAttr::get($_builder.getContext(), + name, storage_type, + size, offset, is_signed); + build($_builder, $_state, type, addr, src, info, is_volatile); + }]> + ]; +} + +//===----------------------------------------------------------------------===// +// GetBitfieldOp +//===----------------------------------------------------------------------===// + +def GetBitfieldOp : CIR_Op<"get_bitfield"> { + let summary = "Get a bitfield"; + let description = [{ + The `cir.get_bitfield` operation provides a load-like access to + a bit field of a record. + + It expects a name if a bit field, a pointer to a storage in the + base record, a type of the storage, a name of the bitfield, + a size the bit field, an offset of the bit field and a sign. + + A unit attribute `volatile` can be used to indicate a volatile load of the + bitfield. + + Example: + Suppose we have a struct with multiple bitfields stored in + different storages. The `cir.get_bitfield` operation gets the value + of the bitfield + ```C++ + typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + } S; + + int load_bitfield(S& s) { + return s.d; + } + ``` + + ```mlir + // 'd' is in the storage with the index 1 + !struct_type = !cir.struct, !cir.int, !cir.int} #cir.record.decl.ast> + #bfi_d = #cir.bitfield_info + + %2 = cir.load %0 : cir.ptr >, !cir.ptr + %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr + %4 = cir.get_bitfield(#bfi_d, %3 : !cir.ptr) -> !s32i + ``` + }]; + + let arguments = (ins + Arg:$addr, + BitfieldInfoAttr:$bitfield_info, + UnitAttr:$is_volatile + ); + + let results = (outs CIR_IntType:$result); + + let assemblyFormat = [{ `(`$bitfield_info `,` $addr attr-dict `:` + qualified(type($addr)) `)` `->` type($result) }]; + + let builders = [ + OpBuilder<(ins "Type":$type, + "Value":$addr, + "Type":$storage_type, + "StringRef":$name, + "unsigned":$size, + "unsigned":$offset, + "bool":$is_signed, + "bool":$is_volatile + ), + [{ + BitfieldInfoAttr info = + BitfieldInfoAttr::get($_builder.getContext(), + name, storage_type, + size, offset, is_signed); + build($_builder, $_state, type, addr, info, is_volatile); + }]> + ]; +} + +//===----------------------------------------------------------------------===// +// GetMemberOp +//===----------------------------------------------------------------------===// + +def GetMemberOp : CIR_Op<"get_member"> { + let summary = "Get the address of a member of a struct"; + let description = [{ + The `cir.get_member` operation gets the address of a particular named + member from the input record. + + It expects a pointer to the base record as well as the name of the member + and its field index. + + Example: + ```mlir + // Suppose we have a struct with multiple members. + !s32i = !cir.int + !s8i = !cir.int + !struct_ty = !cir.struct<"struct.Bar" {!s32i, !s8i}> + + // Get the address of the member at index 1. + %1 = cir.get_member %0[1] {name = "i"} : (!cir.ptr) -> !cir.ptr + ``` + }]; + + let arguments = (ins + Arg:$addr, + StrAttr:$name, + IndexAttr:$index_attr); + + let results = (outs Res:$result); + + let assemblyFormat = [{ + $addr `[` $index_attr `]` attr-dict + `:` qualified(type($addr)) `->` qualified(type($result)) + }]; + + let builders = [ + OpBuilder<(ins "Type":$type, + "Value":$value, + "llvm::StringRef":$name, + "unsigned":$index), + [{ + mlir::APInt fieldIdx(64, index); + build($_builder, $_state, type, value, name, fieldIdx); + }]> + ]; + + let extraClassDeclaration = [{ + /// Return the index of the struct member being accessed. + uint64_t getIndex() { return getIndexAttr().getZExtValue(); } + + /// Return the record type pointed by the base pointer. + mlir::cir::PointerType getAddrTy() { return getAddr().getType(); } + + /// Return the result type. + mlir::cir::PointerType getResultTy() { + return getResult().getType().cast(); + } + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// GetRuntimeMemberOp +//===----------------------------------------------------------------------===// + +def GetRuntimeMemberOp : CIR_Op<"get_runtime_member"> { + let summary = "Get the address of a member of a struct"; + let description = [{ + The `cir.get_runtime_member` operation gets the address of a member from + the input record. The target member is given by a value of type + `!cir.data_member` (i.e. a pointer-to-data-member value). + + This operation differs from `cir.get_member` in when the target member can + be determined. For the `cir.get_member` operation, the target member is + specified as a constant index so the member it returns access to is known + when the operation is constructed. For the `cir.get_runtime_member` + operation, the target member is given through a pointer-to-data-member + value which is unknown until the program being compiled is executed. In + other words, `cir.get_member` represents a normal member access through the + `.` operator in C/C++: + + ```cpp + struct Foo { int x; }; + Foo f; + (void)f.x; // cir.get_member + ``` + + And `cir.get_runtime_member` represents a member access through the `.*` or + the `->*` operator in C++: + + ```cpp + struct Foo { int x; } + Foo f; + Foo *p; + int Foo::*member; + + (void)f.*member; // cir.get_runtime_member + (void)f->*member; // cir.get_runtime_member + ``` + + This operation expects a pointer to the base record as well as the pointer + to the target member. + }]; + + let arguments = (ins + Arg:$addr, + Arg:$member); + + let results = (outs Res:$result); + + let assemblyFormat = [{ + $addr `[` $member `:` qualified(type($member)) `]` attr-dict + `:` qualified(type($addr)) `->` qualified(type($result)) + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// VecInsertOp +//===----------------------------------------------------------------------===// + +def VecInsertOp : CIR_Op<"vec.insert", [Pure, + TypesMatchWith<"argument type matches vector element type", "vec", "value", + "$_self.cast().getEltType()">, + AllTypesMatch<["result", "vec"]>]> { + + let summary = "Insert one element into a vector object"; + let description = [{ + The `cir.vec.insert` operation replaces the element of the given vector at + the given index with the given value. The new vector with the inserted + element is returned. + }]; + + let arguments = (ins CIR_VectorType:$vec, AnyType:$value, PrimitiveInt:$index); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + $value `,` $vec `[` $index `:` type($index) `]` attr-dict `:` + qualified(type($vec)) + }]; + + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// VecExtractOp +//===----------------------------------------------------------------------===// + +def VecExtractOp : CIR_Op<"vec.extract", [Pure, + TypesMatchWith<"type of 'result' matches element type of 'vec'", "vec", + "result", "$_self.cast().getEltType()">]> { + + let summary = "Extract one element from a vector object"; + let description = [{ + The `cir.vec.extract` operation extracts the element at the given index + from a vector object. + }]; + + let arguments = (ins CIR_VectorType:$vec, PrimitiveInt:$index); + let results = (outs CIR_AnyType:$result); + + let assemblyFormat = [{ + $vec `[` $index `:` type($index) `]` attr-dict `:` qualified(type($vec)) + }]; + + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// VecCreate +//===----------------------------------------------------------------------===// + +def VecCreateOp : CIR_Op<"vec.create", [Pure]> { + + let summary = "Create a vector value"; + let description = [{ + The `cir.vec.create` operation creates a vector value with the given element + values. The number of element arguments must match the number of elements + in the vector type. + }]; + + let arguments = (ins Variadic:$elements); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + `(` ($elements^ `:` type($elements))? `)` `:` qualified(type($result)) + attr-dict + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// VecSplat +//===----------------------------------------------------------------------===// + +// cir.vec.splat is a separate operation from cir.vec.create because more +// efficient LLVM IR can be generated for it, and because some optimization and +// analysis passes can benefit from knowing that all elements of the vector +// have the same value. + +def VecSplatOp : CIR_Op<"vec.splat", [Pure, + TypesMatchWith<"type of 'value' matches element type of 'result'", "result", + "value", "$_self.cast().getEltType()">]> { + + let summary = "Convert a scalar into a vector"; + let description = [{ + The `cir.vec.splat` operation creates a vector value from a scalar value. + All elements of the vector have the same value, that of the given scalar. + }]; + + let arguments = (ins CIR_AnyType:$value); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + $value `:` type($value) `,` qualified(type($result)) attr-dict + }]; + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// VecCmp +//===----------------------------------------------------------------------===// + +def VecCmpOp : CIR_Op<"vec.cmp", [Pure, SameTypeOperands]> { + + let summary = "Compare two vectors"; + let description = [{ + The `cir.vec.cmp` operation does an element-wise comparison of two vectors + of the same type. The result is a vector of the same size as the operands + whose element type is the signed integral type that is the same size as the + element type of the operands. The values in the result are 0 or -1. + }]; + + let arguments = (ins Arg:$kind, CIR_VectorType:$lhs, + CIR_VectorType:$rhs); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` `:` qualified(type($lhs)) `,` + qualified(type($result)) attr-dict + }]; + + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// VecTernary +//===----------------------------------------------------------------------===// + +def VecTernaryOp : CIR_Op<"vec.ternary", + [Pure, AllTypesMatch<["result", "vec1", "vec2"]>]> { + let summary = "The `cond ? a : b` ternary operator for vector types"; + let description = [{ + The `cir.vec.ternary` operation represents the C/C++ ternary operator, + `?:`, for vector types, which does a `select` on individual elements of the + vectors. Unlike a regular `?:` operator, there is no short circuiting. All + three arguments are always evaluated. Because there is no short + circuiting, there are no regions in this operation, unlike cir.ternary. + + The first argument is a vector of integral type. The second and third + arguments are vectors of the same type and have the same number of elements + as the first argument. + + The result is a vector of the same type as the second and third arguments. + Each element of the result is `(bool)a[n] ? b[n] : c[n]`. + }]; + let arguments = (ins IntegerVector:$cond, CIR_VectorType:$vec1, + CIR_VectorType:$vec2); + let results = (outs CIR_VectorType:$result); + let assemblyFormat = [{ + `(` $cond `,` $vec1 `,` $vec2 `)` `:` qualified(type($cond)) `,` + qualified(type($vec1)) attr-dict + }]; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// VecShuffle +//===----------------------------------------------------------------------===// + +// TODO: Create an interface that both VecShuffleOp and VecShuffleDynamicOp +// implement. This could be useful for passes that don't care how the vector +// shuffle was specified. + +def VecShuffleOp : CIR_Op<"vec.shuffle", + [Pure, AllTypesMatch<["vec1", "vec2"]>]> { + let summary = "Combine two vectors using indices passed as constant integers"; + let description = [{ + The `cir.vec.shuffle` operation implements the documented form of Clang's + __builtin_shufflevector, where the indices of the shuffled result are + integer constants. + + The two input vectors, which must have the same type, are concatenated. + Each of the integer constant arguments is interpreted as an index into that + concatenated vector, with a value of -1 meaning that the result value + doesn't matter. The result vector, which must have the same element type as + the input vectors and the same number of elements as the list of integer + constant indices, is constructed by taking the elements at the given + indices from the concatenated vector. The size of the result vector does + not have to match the size of the individual input vectors or of the + concatenated vector. + }]; + let arguments = (ins CIR_VectorType:$vec1, CIR_VectorType:$vec2, + ArrayAttr:$indices); + let results = (outs CIR_VectorType:$result); + let assemblyFormat = [{ + `(` $vec1 `,` $vec2 `:` qualified(type($vec1)) `)` $indices `:` + qualified(type($result)) attr-dict + }]; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// VecShuffleDynamic +//===----------------------------------------------------------------------===// + +def VecShuffleDynamicOp : CIR_Op<"vec.shuffle.dynamic", + [Pure, AllTypesMatch<["vec", "result"]>]> { + let summary = "Shuffle a vector using indices in another vector"; + let description = [{ + The `cir.vec.shuffle.dynamic` operation implements the undocumented form of + Clang's __builtin_shufflevector, where the indices of the shuffled result + can be runtime values. + + There are two input vectors, which must have the same number of elements. + The second input vector must have an integral element type. The elements of + the second vector are interpreted as indices into the first vector. The + result vector is constructed by taking the elements from the first input + vector from the indices indicated by the elements of the second vector. + }]; + let arguments = (ins CIR_VectorType:$vec, IntegerVector:$indices); + let results = (outs CIR_VectorType:$result); + let assemblyFormat = [{ + $vec `:` qualified(type($vec)) `,` $indices `:` qualified(type($indices)) + attr-dict + }]; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// BaseClassAddr +//===----------------------------------------------------------------------===// + +def BaseClassAddrOp : CIR_Op<"base_class_addr"> { + let summary = "Get the base class address for a class/struct"; + let description = [{ + The `cir.base_class_addr` operaration gets the address of a particular + base class given a derived class pointer. + + Example: + ```mlir + TBD + ``` + }]; + + let arguments = (ins + Arg:$derived_addr); + + let results = (outs Res:$base_addr); + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = [{ + `(` + $derived_addr `:` `cir.ptr` type($derived_addr) + `)` `->` `cir.ptr` type($base_addr) attr-dict + }]; + + // FIXME: add verifier. + // Check whether both src/dst pointee's are compatible. + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +def FuncOp : CIR_Op<"func", [ + AutomaticAllocationScope, CallableOpInterface, FunctionOpInterface, + IsolatedFromAbove, Symbol +]> { + let summary = "Declare or define a function"; + let description = [{ + + Similar to `mlir::FuncOp` built-in: + > Operations within the function cannot implicitly capture values defined + > outside of the function, i.e. Functions are `IsolatedFromAbove`. All + > external references must use function arguments or attributes that establish + > a symbolic connection (e.g. symbols referenced by name via a string + > attribute like SymbolRefAttr). An external function declaration (used when + > referring to a function declared in some other module) has no body. While + > the MLIR textual form provides a nice inline syntax for function arguments, + > they are internally represented as “block arguments” to the first block in + > the region. + > + > Only dialect attribute names may be specified in the attribute dictionaries + > for function arguments, results, or the function itself. + + The function linkage information is specified by `linkage`, as defined by + `GlobalLinkageKind` attribute. + + A compiler builtin function must be marked as `builtin` for further + processing when lowering from CIR. + + The `coroutine` keyword is used to mark coroutine function, which requires + at least one `cir.await` instruction to be used in its body. + + The `lambda` translates to a C++ `operator()` that implements a lambda, this + allow callsites to make certain assumptions about the real function nature + when writing analysis. The verifier should, but do act on this keyword yet. + + The `no_proto` keyword is used to identify functions that were declared + without a prototype and, consequently, may contain calls with invalid + arguments and undefined behavior. + + The `extra_attrs`, which is an aggregate of function-specific attributes is + required and mandatory to describle additional attributes that are not listed + above. Though mandatory, the prining of the attribute can be omitted if it is + empty. + + The `global_ctor` indicates whether a function should execute before `main()` + function, as specified by `__attribute__((constructor))`. A execution priority + can also be specified `global_ctor()`. Similarly, for global destructors + both `global_dtor` and `global_dtor()` are available. + + Example: + + ```mlir + // External function definitions. + cir.func @abort() + + // A function with internal linkage. + cir.func internal @count(%x: i64) -> (i64) + return %x : i64 + } + + // Linkage information + cir.func linkonce_odr @some_method(...) + + // Builtin function + cir.func builtin @__builtin_coro_end(!cir.ptr, !cir.bool) -> !cir.bool + + // Coroutine + cir.func coroutine @_Z10silly_taskv() -> !CoroTask { + ... + cir.await(...) + ... + } + ``` + }]; + + let arguments = (ins SymbolNameAttr:$sym_name, + TypeAttrOf:$function_type, + UnitAttr:$builtin, + UnitAttr:$coroutine, + UnitAttr:$lambda, + UnitAttr:$no_proto, + DefaultValuedAttr:$linkage, + ExtraFuncAttr:$extra_attrs, + OptionalAttr:$sym_visibility, + OptionalAttr:$arg_attrs, + OptionalAttr:$res_attrs, + OptionalAttr:$aliasee, + OptionalAttr:$global_ctor, + OptionalAttr:$global_dtor, + OptionalAttr:$ast); + let regions = (region AnyRegion:$body); + let skipDefaultBuilders = 1; + + let builders = [OpBuilder<(ins + "StringRef":$name, "FuncType":$type, + CArg<"GlobalLinkageKind", "GlobalLinkageKind::ExternalLinkage">:$linkage, + CArg<"ArrayRef", "{}">:$attrs, + CArg<"ArrayRef", "{}">:$argAttrs) + >]; + + let extraClassDeclaration = [{ + /// Returns the region on the current operation that is callable. This may + /// return null in the case of an external callable object, e.g. an external + /// function. + ::mlir::Region *getCallableRegion(); + + /// Returns the results types that the callable region produces when + /// executed. + ArrayRef getCallableResults() { + if (::llvm::isa(getFunctionType().getReturnType())) + return {}; + return getFunctionType().getReturnTypes(); + } + + /// Returns the argument attributes for all callable region arguments or + /// null if there are none. + ::mlir::ArrayAttr getCallableArgAttrs() { + return getArgAttrs().value_or(nullptr); + } + + /// Returns the result attributes for all callable region results or null if + /// there are none. + ::mlir::ArrayAttr getCallableResAttrs() { + return getResAttrs().value_or(nullptr); + } + + /// Returns the argument types of this function. + ArrayRef getArgumentTypes() { return getFunctionType().getInputs(); } + + /// Returns the result types of this function. + ArrayRef getResultTypes() { return getFunctionType().getReturnTypes(); } + + /// Hook for OpTrait::FunctionOpInterfaceTrait, called after verifying that + /// the 'type' attribute is present and checks if it holds a function type. + /// Ensures getType, getNumFuncArguments, and getNumFuncResults can be + /// called safely. + LogicalResult verifyType(); + + //===------------------------------------------------------------------===// + // SymbolOpInterface Methods + //===------------------------------------------------------------------===// + + bool isDeclaration(); + + // FIXME: should be shared with GlobalOp extra declaration. + bool isDeclarationForLinker() { + if (mlir::cir::isAvailableExternallyLinkage(getLinkage())) + return true; + + return isDeclaration(); + } + }]; + + let hasCustomAssemblyFormat = 1; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// CallOp +//===----------------------------------------------------------------------===// + +class CIR_CallOp extra_traits = []> : + Op, + DeclareOpInterfaceMethods])> { + let extraClassDeclaration = [{ + /// Get the argument operands to the called function. + OperandRange getArgOperands() { + return {arg_operand_begin(), arg_operand_end()}; + } + + MutableOperandRange getArgOperandsMutable() { + llvm_unreachable("NYI"); + } + + /// Return the callee of this operation + CallInterfaceCallable getCallableForCallee() { + return (*this)->getAttrOfType("callee"); + } + + /// Set the callee for this operation. + void setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { + if (auto calling = + (*this)->getAttrOfType(getCalleeAttrName())) + (*this)->setAttr(getCalleeAttrName(), callee.get()); + setOperand(0, callee.get()); + } + + bool isIndirect() { return !getCallee(); } + mlir::Value getIndirectCall(); + }]; + + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; + let hasVerifier = 0; + + dag commonArgs = (ins + OptionalAttr:$callee, + Variadic:$arg_ops, + OptionalAttr:$ast + ); +} + +def CallOp : CIR_CallOp<"call"> { + let summary = "call operation"; + let description = [{ + Direct and indirect calls. + + For direct calls, the `call` operation represents a direct call to a + function that is within the same symbol scope as the call. The operands + and result types of the call must match the specified function type. + The callee is encoded as a aymbol reference attribute named "callee". + + For indirect calls, the first `mlir::Operation` operand is the call target. + + Given the way indirect calls are encoded, avoid using `mlir::Operation` + methods to walk the operands for this operation, instead use the methods + provided by `CIRCallOpInterface`. + `` + + Example: + + ```mlir + // Direct call + %2 = cir.call @my_add(%0, %1) : (f32, f32) -> f32 + ... + // Indirect call + %20 = cir.call %18(%17) + ``` + }]; + + let arguments = commonArgs; + let results = (outs Variadic); + + let builders = [ + OpBuilder<(ins "FuncOp":$callee, CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", SymbolRefAttr::get(callee)); + if (!callee.getFunctionType().isVoid()) + $_state.addTypes(callee.getFunctionType().getReturnType()); + }]>, + OpBuilder<(ins "Value":$ind_target, + "FuncType":$fn_type, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(ValueRange{ind_target}); + $_state.addOperands(operands); + if (!fn_type.isVoid()) + $_state.addTypes(fn_type.getReturnType()); + }]>, + OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", callee); + $_state.addTypes(resType); + }]>, + OpBuilder<(ins "SymbolRefAttr":$callee, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", callee); + }]>]; +} + +//===----------------------------------------------------------------------===// +// TryCallOp +//===----------------------------------------------------------------------===// + +def TryCallOp : CIR_CallOp<"try_call"> { + let summary = "try call operation"; + let description = [{ + Similar to `cir.call`, direct and indirect properties are the same. The + difference relies in an exception object address operand. It's encoded + as the first operands or second (for indirect calls). + + Similarly to `cir.call`, avoid using `mlir::Operation` methods to walk the + operands for this operation, instead use the methods provided by + `CIRCallOpInterface`. + + Example: + + ```mlir + cir.try { + %0 = cir.alloca !cir.ptr, cir.ptr > + ... + %r = cir.try_call %exception(%0) @division(%1, %2) + } ... + ``` + }]; + + let arguments = !con((ins + ExceptionInfoPtrPtr:$exceptionInfo + ), commonArgs); + + let results = (outs Variadic); + + let builders = [ + OpBuilder<(ins "FuncOp":$callee, "mlir::Value":$exception, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(ValueRange{exception}); + $_state.addOperands(operands); + $_state.addAttribute("callee", SymbolRefAttr::get(callee)); + if (!callee.getFunctionType().isVoid()) + $_state.addTypes(callee.getFunctionType().getReturnType()); + }]>, + OpBuilder<(ins "Value":$ind_target, "mlir::Value":$exception, + "FuncType":$fn_type, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(ValueRange{exception}); + $_state.addOperands(ValueRange{ind_target}); + $_state.addOperands(operands); + if (!fn_type.isVoid()) + $_state.addTypes(fn_type.getReturnType()); + }]>, + OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Value":$exception, + "mlir::Type":$resType, CArg<"ValueRange", "{}">:$operands), + [{ + $_state.addOperands(ValueRange{exception}); + $_state.addOperands(operands); + $_state.addAttribute("callee", callee); + $_state.addTypes(resType); + }]>]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// AwaitOp +//===----------------------------------------------------------------------===// + +def AK_Initial : I32EnumAttrCase<"init", 1>; +def AK_User : I32EnumAttrCase<"user", 2>; +def AK_Final : I32EnumAttrCase<"final", 3>; + +def AwaitKind : I32EnumAttr< + "AwaitKind", + "await kind", + [AK_Initial, AK_User, AK_Final]> { + let cppNamespace = "::mlir::cir"; +} + +def AwaitOp : CIR_Op<"await", + [DeclareOpInterfaceMethods, + RecursivelySpeculatable, NoRegionArguments]> { + let summary = "Wraps C++ co_await implicit logic"; + let description = [{ + The under the hood effect of using C++ `co_await expr` roughly + translates to: + + ```c++ + // co_await expr; + + auto &&x = CommonExpr(); + if (!x.await_ready()) { + ... + x.await_suspend(...); + ... + } + x.await_resume(); + ``` + + `cir.await` represents this logic by using 3 regions: + - ready: covers veto power from x.await_ready() + - suspend: wraps actual x.await_suspend() logic + - resume: handles x.await_resume() + + Breaking this up in regions allow individual scrutiny of conditions + which might lead to folding some of them out. Lowerings coming out + of CIR, e.g. LLVM, should use the `suspend` region to track more + lower level codegen (e.g. intrinsic emission for coro.save/coro.suspend). + + There are also 3 flavors of `cir.await` available: + - `init`: compiler generated initial suspend via implicit `co_await`. + - `user`: also known as normal, representing user written co_await's. + - `final`: compiler generated final suspend via implicit `co_await`. + + From the C++ snippet we get: + + ```mlir + cir.scope { + ... // auto &&x = CommonExpr(); + cir.await(user, ready : { + ... // x.await_ready() + }, suspend : { + ... // x.await_suspend() + }, resume : { + ... // x.await_resume() + }) + } + ``` + + Note that resulution of the common expression is assumed to happen + as part of the enclosing await scope. + }]; + + let arguments = (ins AwaitKind:$kind); + let regions = (region SizedRegion<1>:$ready, + SizedRegion<1>:$suspend, + SizedRegion<1>:$resume); + let assemblyFormat = [{ + `(` $kind `,` + `ready` `:` $ready `,` + `suspend` `:` $suspend `,` + `resume` `:` $resume `,` + `)` + attr-dict + }]; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins + "mlir::cir::AwaitKind":$kind, + CArg<"function_ref", + "nullptr">:$readyBuilder, + CArg<"function_ref", + "nullptr">:$suspendBuilder, + CArg<"function_ref", + "nullptr">:$resumeBuilder + )> + ]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// TryOp +//===----------------------------------------------------------------------===// + +def TryOp : CIR_Op<"try", + [DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope, + NoRegionArguments]> { + let summary = ""; + let description = [{ + ```mlir + TBD + ``` + + Note that variables declared inside a `try {}` in C++ will + have their allocas places in the surrounding (parent) scope. + }]; + + let regions = (region SizedRegion<1>:$body); + let results = (outs ExceptionInfoPtr:$result); + + let assemblyFormat = [{ + $body `:` functional-type(operands, results) attr-dict + }]; + + // Everything already covered elsewhere. + let hasVerifier = 0; + let builders = [ + OpBuilder<(ins + "function_ref":$tryBuilder)>, + ]; +} + +//===----------------------------------------------------------------------===// +// CatchOp +//===----------------------------------------------------------------------===// + +// Represents the unwind region where unwind continues or +// the program std::terminate's. +def CatchUnwind : CIRUnitAttr<"CatchUnwind", "unwind"> { + let storageType = [{ CatchUnwind }]; +} + +// Represents the catch_all region. +def CatchAllAttr : CIRUnitAttr<"CatchAll", "all"> { + let storageType = [{ CatchAllAttr }]; +} + +def CatchOp : CIR_Op<"catch", + [SameVariadicOperandSize, + DeclareOpInterfaceMethods, + RecursivelySpeculatable, NoRegionArguments]> { + let summary = "Catch operation"; + let description = [{ + }]; + + let arguments = (ins CIR_AnyType:$exception_info, + OptionalAttr:$catchers); + let regions = (region VariadicRegion:$regions); + + // Already verified elsewhere + let hasVerifier = 0; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins + "Value":$exception_info, + "function_ref" + :$catchBuilder)> + ]; + + let assemblyFormat = [{ + `(` + $exception_info `:` type($exception_info) `,` + custom($regions, $catchers) + `)` attr-dict + }]; +} + +//===----------------------------------------------------------------------===// +// CatchParamOp +//===----------------------------------------------------------------------===// + +def CatchParamOp : CIR_Op<"catch_param"> { + let summary = "Materialize the catch clause formal parameter"; + let description = [{ + The `cir.catch_param` binds to a the C/C++ catch clause param and allow + it to be materialized. This operantion grabs the param by looking into + a exception info `!cir.eh_info` argument. + + Example: + ```mlir + // TBD + ``` + }]; + + let arguments = (ins ExceptionInfoPtr:$exception_info); + let results = (outs CIR_AnyType:$param); + let assemblyFormat = [{ + `(` $exception_info `)` `->` qualified(type($param)) attr-dict + }]; + + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// CopyOp +//===----------------------------------------------------------------------===// + +def CopyOp : CIR_Op<"copy", [SameTypeOperands]> { + let arguments = (ins Arg:$dst, + Arg:$src); + let summary = "Copies contents from a CIR pointer to another"; + let description = [{ + Given two CIR pointers, `src` and `dst`, `cir.copy` will copy the memory + pointed by `src` to the memory pointed by `dst`. + + The amount of bytes copied is inferred from the pointee type. Naturally, + the pointee type of both `src` and `dst` must match and must implement + the `DataLayoutTypeInterface`. + + Examples: + + ```mlir + // Copying contents from one struct to another: + cir.copy %0 to %1 : !cir.ptr + ``` + }]; + + let assemblyFormat = "$src `to` $dst attr-dict `:` qualified(type($dst))"; + let hasVerifier = 1; + + let extraClassDeclaration = [{ + /// Returns the pointer type being copied. + mlir::cir::PointerType getType() { return getSrc().getType(); } + + /// Returns the number of bytes to be copied. + unsigned getLength() { + return DataLayout::closest(*this).getTypeSize(getType().getPointee()); + } + }]; +} + +//===----------------------------------------------------------------------===// +// MemCpyOp +//===----------------------------------------------------------------------===// + +def MemCpyOp : CIR_Op<"libc.memcpy"> { + let arguments = (ins Arg:$dst, + Arg:$src, + PrimitiveInt:$len); + let summary = "Equivalent to libc's `memcpy`"; + let description = [{ + Given two CIR pointers, `src` and `dst`, `cir.libc.memcpy` will copy `len` + bytes from the memory pointed by `src` to the memory pointed by `dst`. + + While `cir.copy` is meant to be used for implicit copies in the code where + the length of the copy is known, `cir.memcpy` copies only from and to void + pointers, requiring the copy length to be passed as an argument. + + Examples: + + ```mlir + // Copying 2 bytes from one array to a struct: + %2 = cir.const(#cir.int<2> : !u32i) : !u32i + cir.libc.memcpy %2 bytes from %arr to %struct : !cir.ptr -> !cir.ptr + ``` + }]; + + let assemblyFormat = [{ + $len `bytes` `from` $src `to` $dst attr-dict + `:` type($len) `` `,` qualified(type($src)) `->` qualified(type($dst)) + }]; + let hasVerifier = 1; + + let extraClassDeclaration = [{ + /// Returns the data source pointer type. + mlir::cir::PointerType getSrcTy() { return getSrc().getType(); } + + /// Returns the data destination pointer type. + mlir::cir::PointerType getDstTy() { return getDst().getType(); } + + /// Returns the byte length type. + mlir::cir::IntType getLenTy() { return getLen().getType(); } + }]; +} + +//===----------------------------------------------------------------------===// +// MemChrOp +//===----------------------------------------------------------------------===// + +def MemChrOp : CIR_Op<"libc.memchr"> { + // TODO: instead of using UInt64 for len, we could make it constrained on + // size_t (64 or 32) and have a builder that does the right job. + let arguments = (ins Arg:$src, + SInt32:$pattern, + UInt64:$len); + let summary = "libc's `memchr`"; + let results = (outs Res:$result); + + let description = [{ + Search for `pattern` in data range from `src` to `src` + `len`. + provides a bound to the search in `src`. `result` is a pointer to found + `pattern` or a null pointer. + + Examples: + + ```mlir + %p = cir.libc.memchr(%src, %pattern, %len) -> !cir.ptr + ``` + }]; + + let assemblyFormat = [{ + `(` + $src `,` $pattern `,` $len `)` attr-dict + }]; + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// StdFindOp +//===----------------------------------------------------------------------===// + +def StdFindOp : CIR_Op<"std.find", [SameFirstSecondOperandAndResultType]> { + let arguments = (ins FlatSymbolRefAttr:$original_fn, + CIR_AnyType:$first, + CIR_AnyType:$last, + CIR_AnyType:$pattern); + let summary = "std:find()"; + let results = (outs CIR_AnyType:$result); + + let description = [{ + Search for `pattern` in data range from `first` to `last`. This currently + maps to only one form of `std::find`. The `original_fn` operand tracks the + mangled named that can be used when lowering to a `cir.call`. + + Example: + + ```mlir + ... + %result = cir.std.find(@original_fn, + %first : !T, %last : !T, %pattern : !P) -> !T + ``` + }]; + + let assemblyFormat = [{ + `(` + $original_fn + `,` $first `:` type($first) + `,` $last `:` type($last) + `,` $pattern `:` type($pattern) + `)` `->` type($result) attr-dict + }]; + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// IterBegin/End +//===----------------------------------------------------------------------===// + +def IterBeginOp : CIR_Op<"iterator_begin"> { + let arguments = (ins FlatSymbolRefAttr:$original_fn, CIR_AnyType:$container); + let summary = "Returns an iterator to the first element of a container"; + let results = (outs CIR_AnyType:$result); + let assemblyFormat = [{ + `(` + $original_fn `,` $container `:` type($container) + `)` `->` type($result) attr-dict + }]; + let hasVerifier = 0; +} + +def IterEndOp : CIR_Op<"iterator_end"> { + let arguments = (ins FlatSymbolRefAttr:$original_fn, CIR_AnyType:$container); + let summary = "Returns an iterator to the element following the last element" + " of a container"; + let results = (outs CIR_AnyType:$result); + let assemblyFormat = [{ + `(` + $original_fn `,` $container `:` type($container) + `)` `->` type($result) attr-dict + }]; + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// Floating Point Ops +//===----------------------------------------------------------------------===// + +class UnaryFPToFPBuiltinOp + : CIR_Op { + let arguments = (ins CIR_AnyFloat:$src); + let results = (outs CIR_AnyFloat:$result); + let summary = "libc builtin equivalent ignoring " + "floating point exceptions and errno"; + let assemblyFormat = "$src `:` type($src) attr-dict"; +} + +def CeilOp : UnaryFPToFPBuiltinOp<"ceil">; +def CosOp : UnaryFPToFPBuiltinOp<"cos">; +def ExpOp : UnaryFPToFPBuiltinOp<"exp">; +def Exp2Op : UnaryFPToFPBuiltinOp<"exp2">; +def FloorOp : UnaryFPToFPBuiltinOp<"floor">; +def FAbsOp : UnaryFPToFPBuiltinOp<"fabs">; +def LogOp : UnaryFPToFPBuiltinOp<"log">; +def Log10Op : UnaryFPToFPBuiltinOp<"log10">; +def Log2Op : UnaryFPToFPBuiltinOp<"log2">; +def NearbyintOp : UnaryFPToFPBuiltinOp<"nearbyint">; +def RintOp : UnaryFPToFPBuiltinOp<"rint">; +def RoundOp : UnaryFPToFPBuiltinOp<"round">; +def SinOp : UnaryFPToFPBuiltinOp<"sin">; +def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt">; +def TruncOp : UnaryFPToFPBuiltinOp<"trunc">; + +//===----------------------------------------------------------------------===// +// Branch Probability Operations +//===----------------------------------------------------------------------===// + +def ExpectOp : CIR_Op<"expect", + [Pure, AllTypesMatch<["result", "val", "expected"]>]> { + let summary = + "Compute whether expression is likely to evaluate to a specified value"; + let description = [{ + Provides __builtin_expect functionality in Clang IR. + + If $prob is not specified, then behaviour is same as __builtin_expect. + If specified, then behaviour is same as __builtin_expect_with_probability, + where probability = $prob. + }]; + + let arguments = (ins PrimitiveInt:$val, + PrimitiveInt:$expected, + OptionalAttr:$prob); + let results = (outs PrimitiveInt:$result); + let assemblyFormat = [{ + `(` $val`,` $expected (`,` $prob^)? `)` `:` type($val) attr-dict + }]; +} + +//===----------------------------------------------------------------------===// +// Variadic Operations +//===----------------------------------------------------------------------===// + +def VAStartOp : CIR_Op<"va.start">, Arguments<(ins CIR_PointerType:$arg_list)> { + let summary = "Starts a variable argument list"; + let assemblyFormat = "$arg_list attr-dict `:` type(operands)"; + let hasVerifier = 0; +} + +def VAEndOp : CIR_Op<"va.end">, Arguments<(ins CIR_PointerType:$arg_list)> { + let summary = "Ends a variable argument list"; + let assemblyFormat = "$arg_list attr-dict `:` type(operands)"; + let hasVerifier = 0; +} + +def VACopyOp : CIR_Op<"va.copy">, + Arguments<(ins CIR_PointerType:$dst_list, + CIR_PointerType:$src_list)> { + let summary = "Copies a variable argument list"; + let assemblyFormat = "$src_list `to` $dst_list attr-dict `:` type(operands)"; + let hasVerifier = 0; +} + +def VAArgOp : CIR_Op<"va.arg">, + Results<(outs CIR_AnyType:$result)>, + Arguments<(ins CIR_PointerType:$arg_list)> { + let summary = "Fetches next variadic element as a given type"; + let assemblyFormat = "$arg_list attr-dict `:` functional-type(operands, $result)"; + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// AllocException +//===----------------------------------------------------------------------===// + +def AllocException : CIR_Op<"alloc_exception", [ + AllocaTypesMatchWith<"'allocType' matches pointee type of 'addr'", + "addr", "allocType", + "$_self.cast().getPointee()">]> { + let summary = "Defines a scope-local variable"; + let description = [{ + Implements a slightly higher level __cxa_allocate_exception: + + `void *__cxa_allocate_exception(size_t thrown_size);` + + If operation fails, program terminates, not throw. + + Example: + + ```mlir + // if (b == 0) { + // ... + // throw "..."; + cir.if %10 { + %11 = cir.alloc_exception(!cir.ptr) -> > + ... // store exception content into %11 + cir.throw(%11 : !cir.ptr>, ... + ``` + }]; + + let arguments = (ins TypeAttr:$allocType); + let results = (outs Res]>:$addr); + + let assemblyFormat = [{ + `(` $allocType `)` `->` type($addr) attr-dict + }]; + + // Constraints verified elsewhere. + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// ThrowOp +//===----------------------------------------------------------------------===// + +def ThrowOp : CIR_Op<"throw", [ + ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", + "DoWhileOp", "WhileOp", "ForOp"]>, + Terminator]> { + let summary = "(Re)Throws an exception"; + let description = [{ + Very similar to __cxa_throw: + + ``` + void __cxa_throw(void *thrown_exception, std::type_info *tinfo, + void (*dest) (void *)); + ``` + + The absense of arguments for `cir.throw` means it rethrows. + + For the no-rethrow version, it must have at least two operands, the RTTI + information, a pointer to the exception object (likely allocated via + `cir.cxa.allocate_exception`) and finally an optional dtor, which might + run as part of this operation. + + ```mlir + // if (b == 0) + // throw "Division by zero condition!"; + cir.if %10 { + %11 = cir.alloc_exception(!cir.ptr) -> > + ... + cir.store %13, %11 : // Store string addr for "Division by zero condition!" + cir.throw(%11 : !cir.ptr>, @"typeinfo for char const*") + ``` + }]; + + let arguments = (ins Optional:$exception_ptr, + OptionalAttr:$type_info, + OptionalAttr:$dtor); + + let assemblyFormat = [{ + `(` + ($exception_ptr^ `:` type($exception_ptr))? + (`,` $type_info^)? + (`,` $dtor^)? + `)` attr-dict + }]; + + let extraClassDeclaration = [{ + bool rethrows() { return getNumOperands() == 0; } + }]; + + let hasVerifier = 1; +} + +def StackSaveOp : CIR_Op<"stack_save"> { + let summary = "remembers the current state of the function stack"; + let description = [{ + Remembers the current state of the function stack. Returns a pointer + that later can be passed into cir.stack_restore. + Useful for implementing language features like variable length arrays. + + ```mlir + %0 = cir.stack_save : + ``` + + }]; + + let results = (outs CIR_PointerType:$result); + let assemblyFormat = "attr-dict `:` qualified(type($result))"; +} + +def StackRestoreOp : CIR_Op<"stack_restore"> { + let summary = "restores the state of the function stack"; + let description = [{ + Restore the state of the function stack to the state it was + in when the corresponding cir.stack_save executed. + Useful for implementing language features like variable length arrays. + + ```mlir + %0 = cir.alloca !cir.ptr, cir.ptr >, ["saved_stack"] {alignment = 8 : i64} + %1 = cir.stack_save : + cir.store %1, %0 : !cir.ptr, cir.ptr > + %2 = cir.load %0 : cir.ptr >, !cir.ptr + cir.stack_restore %2 : !cir.ptr + ``` + }]; + + let arguments = (ins CIR_PointerType:$ptr); + let assemblyFormat = "$ptr attr-dict `:` qualified(type($ptr))"; +} + +def AsmATT : I32EnumAttrCase<"x86_att", 0>; +def AsmIntel : I32EnumAttrCase<"x86_intel", 1>; + +def AsmFlavor : I32EnumAttr< + "AsmFlavor", + "ATT or Intel", + [AsmATT, AsmIntel]> { + let cppNamespace = "::mlir::cir"; +} + +def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { + let description = [{ + The `cir.asm` operation represents C/C++ asm inline. + + CIR constraints strings follow barelly the same rules that are established + for the C level assembler constraints with several differences caused by + clang::AsmStmt processing. + + Thus, numbers that appears in the constraint string may also refer to: + - the output variable index referenced by the input operands. + - the index of early-clobber operand + + Operand attributes is a storage, where each element corresponds to the operand with + the same index. The first index relates to the operation result (if any). + Note, the operands themselves are stored as VariadicOfVariadic in the next order: + output, input and then in/out operands. + + Note, when several output operands are present, the result type may be represented as + an anon struct type. + + Example: + ```C++ + __asm__("foo" : : : ); + __asm__("bar $42 %[val]" : [val] "=r" (x), "+&r"(x)); + __asm__("baz $42 %[val]" : [val] "=r" (x), "+&r"(x) : "[val]"(y)); + ``` + + ```mlir + !ty_22anon2E022 = !cir.struct, !cir.int}> + !ty_22anon2E122 = !cir.struct, !cir.int}> + ... + %0 = cir.alloca !s32i, cir.ptr , ["x", init] + %1 = cir.alloca !s32i, cir.ptr , ["y", init] + ... + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.load %1 : cir.ptr , !s32i + + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"foo" "~{dirflag},~{fpsr},~{flags}"}) side_effects + + cir.asm(x86_att, + out = [], + in = [], + in_out = [%2 : !s32i], + {"bar $$42 $0" "=r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) -> !ty_22anon2E022 + + cir.asm(x86_att, + out = [], + in = [%3 : !s32i], + in_out = [%2 : !s32i], + {"baz $$42 $0" "=r,=&r,0,1,~{dirflag},~{fpsr},~{flags}"}) -> !ty_22anon2E122 + ``` + }]; + + let results = (outs Optional:$res); + + let arguments = ( + ins VariadicOfVariadic:$operands, + StrAttr:$asm_string, + StrAttr:$constraints, + UnitAttr:$side_effects, + AsmFlavor:$asm_flavor, + ArrayAttr:$operand_attrs, + DenseI32ArrayAttr:$operands_segments + ); + + let builders = [OpBuilder<(ins + "ArrayRef":$operands, + "StringRef":$asm_string, + "StringRef":$constraints, + "bool":$side_effects, + "AsmFlavor":$asm_flavor, + "ArrayRef":$operand_attrs + )> + ]; + + let hasCustomAssemblyFormat = 1; +} + +//===----------------------------------------------------------------------===// +// UnreachableOp +//===----------------------------------------------------------------------===// + +def UnreachableOp : CIR_Op<"unreachable", [Terminator]> { + let summary = "invoke immediate undefined behavior"; + let description = [{ + If the program control flow reaches a `cir.unreachable` operation, the + program exhibits undefined behavior immediately. This operation is useful + in cases where the unreachability of a program point needs to be explicitly + marked. + }]; + + let assemblyFormat = "attr-dict"; +} + +//===----------------------------------------------------------------------===// +// TrapOp +//===----------------------------------------------------------------------===// + +def TrapOp : CIR_Op<"trap", [Terminator]> { + let summary = "Exit the program abnormally"; + let description = [{ + The cir.trap operation causes the program to exit abnormally. The + implementations may implement this operation with different mechanisms. For + example, an implementation may implement this operation by calling abort, + while another implementation may implement this operation by executing an + illegal instruction. + }]; + + let assemblyFormat = "attr-dict"; +} + +//===----------------------------------------------------------------------===// +// PrefetchOp +//===----------------------------------------------------------------------===// + +def PrefetchOp : CIR_Op<"prefetch"> { + let summary = "prefetch operation"; + let description = [{ + The `cir.prefetch` op prefetches data from the memmory address. + + ```mlir + cir.prefetch(%0 : !cir.ptr) locality(1) write + ``` + + This opcode has the three attributes: + 1. The $locality is a temporal locality specifier + ranging from (0) - no locality, to (3) - extremely local keep in cache. + 2. The $isWrite is the specifier determining if the prefetch is prepaired + for a 'read' or 'write'. + If $isWrite doesn't specified it means that prefetch is prepared for 'read'. + }]; + + let arguments = ( + ins VoidPtr:$addr, + ConfinedAttr, + IntMaxValue<3>]>:$locality, + UnitAttr:$isWrite); + + let assemblyFormat = [{ + `(` $addr `:` qualified(type($addr)) `)` + `locality``(` $locality `)` + (`write` $isWrite^) : (`read`)? + attr-dict + }]; +} + +//===----------------------------------------------------------------------===// +// ArrayCtor & ArrayDtor +//===----------------------------------------------------------------------===// + +class CIR_ArrayInitDestroy : CIR_Op { + let arguments = (ins Arg:$addr); + let regions = (region SizedRegion<1>:$body); + let assemblyFormat = [{ + `(` $addr `:` qualified(type($addr)) `)` $body attr-dict + }]; + + let builders = [ + OpBuilder<(ins "mlir::Value":$addr, + "function_ref":$regionBuilder), [{ + assert(regionBuilder && "builder callback expected"); + OpBuilder::InsertionGuard guard($_builder); + Region *r = $_state.addRegion(); + $_state.addOperands(ValueRange{addr}); + $_builder.createBlock(r); + regionBuilder($_builder, $_state.location); + }]> + ]; +} + +def ArrayCtor : CIR_ArrayInitDestroy<"array.ctor"> { + let summary = "Initialize array elements with C++ constructors"; + let description = [{ + Initialize each array element using the same C++ constructor. This + operation has one region, with one single block. The block has an + incoming argument for the current array index to initialize. + }]; +} + +def ArrayDtor : CIR_ArrayInitDestroy<"array.dtor"> { + let summary = "Destroy array elements with C++ dtors"; + let description = [{ + Destroy each array element using the same C++ destructor. This + operation has one region, with one single block. The block has an + incoming argument for the current array index to initialize. + }]; +} + +//===----------------------------------------------------------------------===// +// IsConstantOp +//===----------------------------------------------------------------------===// + +def IsConstantOp : CIR_Op<"is_constant", [Pure]> { + let description = [{ + Returns `true` if the argument is known to be a compile-time constant + otherwise returns 'false'. + }]; + let arguments = (ins CIR_AnyType:$val); + let results = (outs CIR_BoolType:$result); + + let assemblyFormat = [{ + `(` $val `:` type($val) `)` `:` type($result) attr-dict + }]; +} + + +def SwitchFlatOp : CIR_Op<"switch.flat", [AttrSizedOperandSegments, Terminator]> { + + let description = [{ + The `cir.switch.flat` operation is a region-less and simplified version of the `cir.switch`. + It's representation is closer to LLVM IR dialect than the C/C++ language feature. + }]; + + let arguments = (ins + CIR_IntType:$condition, + Variadic:$defaultOperands, + VariadicOfVariadic:$caseOperands, + ArrayAttr:$case_values, + DenseI32ArrayAttr:$case_operand_segments + ); + + let successors = (successor + AnySuccessor:$defaultDestination, + VariadicSuccessor:$caseDestinations + ); + + let assemblyFormat = [{ + $condition `:` type($condition) `,` + $defaultDestination (`(` $defaultOperands^ `:` type($defaultOperands) `)`)? + custom(ref(type($condition)), $case_values, $caseDestinations, + $caseOperands, type($caseOperands)) + attr-dict + }]; + + let builders = [ + OpBuilder<(ins "Value":$condition, + "Block *":$defaultDestination, + "ValueRange":$defaultOperands, + CArg<"ArrayRef", "{}">:$caseValues, + CArg<"BlockRange", "{}">:$caseDestinations, + CArg<"ArrayRef", "{}">:$caseOperands)> + ]; +} + +//===----------------------------------------------------------------------===// +// Atomic operations +//===----------------------------------------------------------------------===// + +// Binary opcodes for atomic fetch. +def Atomic_Add : I32EnumAttrCase<"Add", 0, "add">; +def Atomic_Sub : I32EnumAttrCase<"Sub", 1, "sub">; +def Atomic_And : I32EnumAttrCase<"And", 2, "and">; +def Atomic_Xor : I32EnumAttrCase<"Xor", 3, "xor">; +def Atomic_Or : I32EnumAttrCase<"Or", 4, "or">; +def Atomic_Nand : I32EnumAttrCase<"Nand", 5, "nand">; +def Atomic_Max : I32EnumAttrCase<"Max", 6, "max">; +def Atomic_Min : I32EnumAttrCase<"Min", 7, "min">; + +def AtomicFetchKind : I32EnumAttr< + "AtomicFetchKind", + "Binary opcode for atomic fetch operations", + [Atomic_Add, Atomic_Sub, Atomic_And, Atomic_Xor, Atomic_Or, Atomic_Nand, + Atomic_Max, Atomic_Min]> { + let cppNamespace = "::mlir::cir"; +} + +def AtomicFetch : CIR_Op<"atomic.fetch", + [AllTypesMatch<["result", "val"]>]> { + let summary = "Atomic fetch with unary and binary operations"; + let description = [{ + Represents `__atomic__fetch` and `__atomic_fetch_` builtins, + where `binop` is on of the binary opcodes : `add`, `sub`, `and`, `xor`, + `or`, `nand`, `max` and `min`. + + `ptr` is an integer or fp pointer, followed by `val`, which must be + an integer or fp (only supported for `add` and `sub`). The operation + can also be marked `volatile`. + + If `fetch_first` is present, the operation works like + `__atomic_fetch_binop` and returns the value that had + previously been in *ptr, otherwise it returns the final result + of the computation (`__atomic_binop_fetch`). + + Example: + %res = cir.atomic.fetch(add, %ptr : !cir.ptr, + %val : !s32i, seq_cst) : !s32i + }]; + let results = (outs CIR_AnyIntOrFloat:$result); + let arguments = (ins Arg:$ptr, + CIR_AnyIntOrFloat:$val, + AtomicFetchKind:$binop, + Arg:$mem_order, + UnitAttr:$is_volatile, + UnitAttr:$fetch_first); + + let assemblyFormat = [{ + `(` + $binop `,` + $ptr `:` type($ptr) `,` + $val `:` type($val) `,` + $mem_order `)` + (`volatile` $is_volatile^)? + (`fetch_first` $fetch_first^)? + `:` type($result) attr-dict + }]; + + let hasVerifier = 1; +} + +def AtomicXchg : CIR_Op<"atomic.xchg", [AllTypesMatch<["result", "val"]>]> { + let summary = "Atomic exchange"; + let description = [{ + Atomic exchange operations. Implements C/C++ builtins such as + `__atomic_exchange`and `__atomic_exchange_n`. + + Example: + %res = cir.atomic.xchg(%ptr : !cir.ptr, + %val : !u64i, seq_cst) : !u64i + }]; + let results = (outs CIR_AnyType:$result); + let arguments = (ins Arg:$ptr, + CIR_AnyType:$val, + Arg:$mem_order, + UnitAttr:$is_volatile); + + let assemblyFormat = [{ + `(` + $ptr `:` qualified(type($ptr)) `,` + $val `:` type($val) `,` + $mem_order `)` + (`volatile` $is_volatile^)? + `:` type($result) attr-dict + }]; + + let hasVerifier = 0; +} + +def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", + [AllTypesMatch<["old", "expected", "desired"]>]> { + let summary = "Atomic compare exchange"; + let description = [{ + C/C++ Atomic compare and exchange operation. Implements builtins like + `__atomic_compare_exchange_n` and `__atomic_compare_exchange`. + + Example: + %old, %cmp = cir.atomic.cmp_xchg(%ptr : !cir.ptr, + %expected : !u64i, + %desired : !u64i, + success = seq_cst, + failure = seq_cst) weak + : (!u64i, !cir.bool) + + }]; + let results = (outs CIR_AnyType:$old, CIR_BoolType:$cmp); + let arguments = (ins Arg:$ptr, + CIR_AnyType:$expected, + CIR_AnyType:$desired, + Arg:$succ_order, + Arg:$fail_order, + UnitAttr:$weak, + UnitAttr:$is_volatile); + + let assemblyFormat = [{ + `(` + $ptr `:` qualified(type($ptr)) `,` + $expected `:` type($expected) `,` + $desired `:` type($desired) `,` + `success` `=` $succ_order `,` + `failure` `=` $fail_order + `)` + (`weak` $weak^)? + (`volatile` $is_volatile^)? + `:` `(` type($old) `,` type($cmp) `)` attr-dict + }]; + + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// Operations Lowered Directly to LLVM IR +// +// These operations are hacks to get around missing features in LLVM's dialect. +// Use it sparingly and remove it once the features are added. +//===----------------------------------------------------------------------===// + +def ZeroInitConstOp : CIR_Op<"llvmir.zeroinit", [Pure]>, + Results<(outs AnyType:$result)> { + let summary = "Zero initializes a constant value of a given type"; + let description = [{ + This operation circumvents the lack of a zeroinitializer operation in LLVM + Dialect. It can zeroinitialize any LLVM type. + }]; + let assemblyFormat = "attr-dict `:` type($result)"; + let hasVerifier = 0; +} #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h new file mode 100644 index 000000000000..06851947f24c --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h @@ -0,0 +1,133 @@ +//===- CIROpsEnumsDialect.h - MLIR Dialect for CIR ----------------------*- C++ +//-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the Target dialect for CIR in MLIR. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_CIROPSENUMS_H_ +#define MLIR_DIALECT_CIR_CIROPSENUMS_H_ + +#include "mlir/IR/BuiltinAttributes.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h.inc" + +namespace mlir { +namespace cir { + +static bool isExternalLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::ExternalLinkage; +} +static bool isAvailableExternallyLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::AvailableExternallyLinkage; +} +static bool isLinkOnceAnyLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::LinkOnceAnyLinkage; +} +static bool isLinkOnceODRLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::LinkOnceODRLinkage; +} +static bool isLinkOnceLinkage(GlobalLinkageKind Linkage) { + return isLinkOnceAnyLinkage(Linkage) || isLinkOnceODRLinkage(Linkage); +} +static bool isWeakAnyLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::WeakAnyLinkage; +} +static bool isWeakODRLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::WeakODRLinkage; +} +static bool isWeakLinkage(GlobalLinkageKind Linkage) { + return isWeakAnyLinkage(Linkage) || isWeakODRLinkage(Linkage); +} +static bool isInternalLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::InternalLinkage; +} +static bool isPrivateLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::PrivateLinkage; +} +static bool isLocalLinkage(GlobalLinkageKind Linkage) { + return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage); +} +static bool isExternalWeakLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::ExternalWeakLinkage; +} +LLVM_ATTRIBUTE_UNUSED static bool isCommonLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::CommonLinkage; +} +LLVM_ATTRIBUTE_UNUSED static bool +isValidDeclarationLinkage(GlobalLinkageKind Linkage) { + return isExternalWeakLinkage(Linkage) || isExternalLinkage(Linkage); +} + +/// Whether the definition of this global may be replaced by something +/// non-equivalent at link time. For example, if a function has weak linkage +/// then the code defining it may be replaced by different code. +LLVM_ATTRIBUTE_UNUSED static bool +isInterposableLinkage(GlobalLinkageKind Linkage) { + switch (Linkage) { + case GlobalLinkageKind::WeakAnyLinkage: + case GlobalLinkageKind::LinkOnceAnyLinkage: + case GlobalLinkageKind::CommonLinkage: + case GlobalLinkageKind::ExternalWeakLinkage: + return true; + + case GlobalLinkageKind::AvailableExternallyLinkage: + case GlobalLinkageKind::LinkOnceODRLinkage: + case GlobalLinkageKind::WeakODRLinkage: + // The above three cannot be overridden but can be de-refined. + + case GlobalLinkageKind::ExternalLinkage: + case GlobalLinkageKind::InternalLinkage: + case GlobalLinkageKind::PrivateLinkage: + return false; + } + llvm_unreachable("Fully covered switch above!"); +} + +/// Whether the definition of this global may be discarded if it is not used +/// in its compilation unit. +LLVM_ATTRIBUTE_UNUSED static bool +isDiscardableIfUnused(GlobalLinkageKind Linkage) { + return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage) || + isAvailableExternallyLinkage(Linkage); +} + +/// Whether the definition of this global may be replaced at link time. NB: +/// Using this method outside of the code generators is almost always a +/// mistake: when working at the IR level use isInterposable instead as it +/// knows about ODR semantics. +LLVM_ATTRIBUTE_UNUSED static bool isWeakForLinker(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::WeakAnyLinkage || + Linkage == GlobalLinkageKind::WeakODRLinkage || + Linkage == GlobalLinkageKind::LinkOnceAnyLinkage || + Linkage == GlobalLinkageKind::LinkOnceODRLinkage || + Linkage == GlobalLinkageKind::CommonLinkage || + Linkage == GlobalLinkageKind::ExternalWeakLinkage; +} + +LLVM_ATTRIBUTE_UNUSED static bool isValidLinkage(GlobalLinkageKind L) { + return isExternalLinkage(L) || isLocalLinkage(L) || isWeakLinkage(L) || + isLinkOnceLinkage(L); +} + +bool operator<(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator>(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator<=(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator>=(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; + +// Validate an integral value which isn't known to fit within the enum's range +// is a valid AtomicOrderingCABI. +template inline bool isValidCIRAtomicOrderingCABI(Int I) { + return (Int)mlir::cir::MemOrder::Relaxed <= I && + I <= (Int)mlir::cir::MemOrder::SequentiallyConsistent; +} + +} // namespace cir +} // namespace mlir + +#endif // MLIR_DIALECT_CIR_CIROPSENUMS_H_ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h new file mode 100644 index 000000000000..0096e79f72e2 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -0,0 +1,189 @@ +//===- CIRTypes.h - MLIR CIR Types ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the types in the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_IR_CIRTYPES_H_ +#define MLIR_DIALECT_CIR_IR_CIRTYPES_H_ + +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Types.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/CIR/Interfaces/CIRFPTypeInterface.h" + +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" + +//===----------------------------------------------------------------------===// +// CIR StructType +// +// The base type for all RecordDecls. +//===----------------------------------------------------------------------===// + +namespace mlir { +namespace cir { + +namespace detail { +struct StructTypeStorage; +} // namespace detail + +/// Each unique clang::RecordDecl is mapped to a `cir.struct` and any object in +/// C/C++ that has a struct type will have a `cir.struct` in CIR. +/// +/// There are three possible formats for this type: +/// +/// - Identified and complete structs: unique name and a known body. +/// - Identified and incomplete structs: unique name and unkonwn body. +/// - Anonymous structs: no name and a known body. +/// +/// Identified structs are uniqued by their name, and anonymous structs are +/// uniqued by their body. This means that two anonymous structs with the same +/// body will be the same type, and two identified structs with the same name +/// will be the same type. Attempting to build a struct with a existing name, +/// but a different body will result in an error. +/// +/// A few examples: +/// +/// ```mlir +/// !complete = !cir.struct}> +/// !incomplete = !cir.struct +/// !anonymous = !cir.struct}> +/// ``` +/// +/// Incomplete structs are mutable, meaning the can be later completed with a +/// body automatically updating in place every type in the code that uses the +/// incomplete struct. Mutability allows for recursive types to be represented, +/// meaning the struct can have members that refer to itself. This is useful for +/// representing recursive records and is implemented through a special syntax. +/// In the example below, the `Node` struct has a member that is a pointer to a +/// `Node` struct: +/// +/// ```mlir +/// !struct = !cir.struct>}> +/// ``` +class StructType + : public Type::TypeBase { + // FIXME(cir): migrate this type to Tablegen once mutable types are supported. +public: + using Base::Base; + using Base::getChecked; + using Base::verify; + + static constexpr StringLiteral name = "cir.struct"; + + enum RecordKind : uint32_t { Class, Union, Struct }; + + /// Create a identified and complete struct type. + static StructType get(MLIRContext *context, ArrayRef members, + StringAttr name, bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); + static StructType getChecked(function_ref emitError, + MLIRContext *context, ArrayRef members, + StringAttr name, bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); + + /// Create a identified and incomplete struct type. + static StructType get(MLIRContext *context, StringAttr name, RecordKind kind); + static StructType getChecked(function_ref emitError, + MLIRContext *context, StringAttr name, + RecordKind kind); + + /// Create a anonymous struct type (always complete). + static StructType get(MLIRContext *context, ArrayRef members, + bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); + static StructType getChecked(function_ref emitError, + MLIRContext *context, ArrayRef members, + bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); + + /// Validate the struct about to be constructed. + static LogicalResult verify(function_ref emitError, + ArrayRef members, StringAttr name, + bool incomplete, bool packed, + StructType::RecordKind kind, + ASTRecordDeclInterface ast); + + // Parse/print methods. + static constexpr StringLiteral getMnemonic() { return {"struct"}; } + static Type parse(AsmParser &odsParser); + void print(AsmPrinter &odsPrinter) const; + + // Accessors + ASTRecordDeclInterface getAst() const; + ArrayRef getMembers() const; + StringAttr getName() const; + StructType::RecordKind getKind() const; + bool getIncomplete() const; + bool getPacked() const; + void dropAst(); + + // Predicates + bool isClass() const { return getKind() == RecordKind::Class; }; + bool isStruct() const { return getKind() == RecordKind::Struct; }; + bool isUnion() const { return getKind() == RecordKind::Union; }; + bool isComplete() const { return !isIncomplete(); }; + bool isIncomplete() const; + + // Utilities + Type getLargestMember(const DataLayout &dataLayout) const; + size_t getNumElements() const { return getMembers().size(); }; + std::string getKindAsStr() { + switch (getKind()) { + case RecordKind::Class: + return "class"; + case RecordKind::Union: + return "union"; + case RecordKind::Struct: + return "struct"; + } + } + std::string getPrefixedName() { + return getKindAsStr() + "." + getName().getValue().str(); + } + + /// Complete the struct type by mutating its members and attributes. + void complete(ArrayRef members, bool packed, + ASTRecordDeclInterface ast = {}); + + /// DataLayoutTypeInterface methods. + llvm::TypeSize getTypeSizeInBits(const DataLayout &dataLayout, + DataLayoutEntryListRef params) const; + uint64_t getABIAlignment(const DataLayout &dataLayout, + DataLayoutEntryListRef params) const; + uint64_t getPreferredAlignment(const DataLayout &dataLayout, + DataLayoutEntryListRef params) const; + uint64_t getElementOffset(const DataLayout &dataLayout, unsigned idx) const; + + bool isLayoutIdentical(const StructType &other); + + // Utilities for lazily computing and cacheing data layout info. +private: + // FIXME: currently opaque because there's a cycle if CIRTypes.types include + // from CIRAttrs.h. The implementation operates in terms of StructLayoutAttr + // instead. + mutable mlir::Attribute layoutInfo; + bool isPadded(const DataLayout &dataLayout) const; + void computeSizeAndAlignment(const DataLayout &dataLayout) const; +}; + +} // namespace cir +} // namespace mlir + +//===----------------------------------------------------------------------===// +// CIR Dialect Tablegen'd Types +//===----------------------------------------------------------------------===// + +#define GET_TYPEDEF_CLASSES +#include "clang/CIR/Dialect/IR/CIROpsTypes.h.inc" + +#endif // MLIR_DIALECT_CIR_IR_CIRTYPES_H_ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td new file mode 100644 index 000000000000..82a12963b425 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -0,0 +1,475 @@ +//===- CIRTypes.td - CIR dialect types ---------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the CIR dialect types. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_DIALECT_CIR_TYPES +#define MLIR_CIR_DIALECT_CIR_TYPES + +include "clang/CIR/Dialect/IR/CIRDialect.td" +include "clang/CIR/Interfaces/ASTAttrInterfaces.td" +include "clang/CIR/Interfaces/CIRFPTypeInterface.td" +include "mlir/Interfaces/DataLayoutInterfaces.td" +include "mlir/IR/AttrTypeBase.td" +include "mlir/IR/EnumAttr.td" + +//===----------------------------------------------------------------------===// +// CIR Types +//===----------------------------------------------------------------------===// + +class CIR_Type traits = [], + string baseCppClass = "::mlir::Type"> + : TypeDef { + let mnemonic = typeMnemonic; +} + +//===----------------------------------------------------------------------===// +// IntType +//===----------------------------------------------------------------------===// + +def CIR_IntType : CIR_Type<"Int", "int", + [DeclareTypeInterfaceMethods]> { + let summary = "Integer type with arbitrary precision up to a fixed limit"; + let description = [{ + CIR type that represents C/C++ primitive integer types. + Said types are: `char`, `short`, `int`, `long`, `long long`, and their \ + unsigned variations. + }]; + let parameters = (ins "unsigned":$width, "bool":$isSigned); + let hasCustomAssemblyFormat = 1; + let extraClassDeclaration = [{ + /// Return true if this is a signed integer type. + bool isSigned() const { return getIsSigned(); } + /// Return true if this is an unsigned integer type. + bool isUnsigned() const { return !getIsSigned(); } + /// Return type alias. + std::string getAlias() const { + return (isSigned() ? 's' : 'u') + std::to_string(getWidth()) + 'i'; + }; + /// Return true if this is a primitive integer type (i.e. signed or unsigned + /// integer types whose bit width is 8, 16, 32, or 64). + bool isPrimitive() const { + return isValidPrimitiveIntBitwidth(getWidth()); + } + + /// Returns a minimum bitwidth of cir::IntType + static unsigned minBitwidth() { return 1; } + /// Returns a maximum bitwidth of cir::IntType + static unsigned maxBitwidth() { return 64; } + + /// Returns true if cir::IntType that represents a primitive integer type + /// can be constructed from the provided bitwidth. + static bool isValidPrimitiveIntBitwidth(unsigned width) { + return width == 8 || width == 16 || width == 32 || width == 64; + } + }]; + let genVerifyDecl = 1; +} + +// Constraints + +// Unsigned integer type of a specific width. +class UInt + : Type()">, + CPred<"$_self.cast<::mlir::cir::IntType>().isUnsigned()">, + CPred<"$_self.cast<::mlir::cir::IntType>().getWidth() == " # width> + ]>, width # "-bit unsigned integer", "::mlir::cir::IntType">, + BuildableType< + "mlir::cir::IntType::get($_builder.getContext(), " + # width # ", /*isSigned=*/false)"> { + int bitwidth = width; +} + +def UInt1 : UInt<1>; +def UInt8 : UInt<8>; +def UInt16 : UInt<16>; +def UInt32 : UInt<32>; +def UInt64 : UInt<64>; + +// Signed integer type of a specific width. +class SInt + : Type()">, + CPred<"$_self.cast<::mlir::cir::IntType>().isSigned()">, + CPred<"$_self.cast<::mlir::cir::IntType>().getWidth() == " # width> + ]>, width # "-bit signed integer", "::mlir::cir::IntType">, + BuildableType< + "mlir::cir::IntType::get($_builder.getContext(), " + # width # ", /*isSigned=*/true)"> { + int bitwidth = width; +} + +def SInt1 : SInt<1>; +def SInt8 : SInt<8>; +def SInt16 : SInt<16>; +def SInt32 : SInt<32>; +def SInt64 : SInt<64>; + +def PrimitiveUInt + : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64], "primitive unsigned int", + "::mlir::cir::IntType">; +def PrimitiveSInt + : AnyTypeOf<[SInt8, SInt16, SInt32, SInt64], "primitive signed int", + "::mlir::cir::IntType">; +def PrimitiveInt + : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64, SInt8, SInt16, SInt32, SInt64], + "primitive int", "::mlir::cir::IntType">; + +//===----------------------------------------------------------------------===// +// FloatType +//===----------------------------------------------------------------------===// + +class CIR_FloatType + : CIR_Type, + DeclareTypeInterfaceMethods, + ]> {} + +def CIR_Single : CIR_FloatType<"Single", "float"> { + let summary = "CIR single-precision float type"; + let description = [{ + Floating-point type that represents the `float` type in C/C++. Its + underlying floating-point format is the IEEE-754 binary32 format. + }]; +} + +def CIR_Double : CIR_FloatType<"Double", "double"> { + let summary = "CIR double-precision float type"; + let description = [{ + Floating-point type that represents the `double` type in C/C++. Its + underlying floating-point format is the IEEE-754 binar64 format. + }]; +} + +def CIR_FP80 : CIR_FloatType<"FP80", "f80"> { + let summary = "CIR type that represents x87 80-bit floating-point format"; + let description = [{ + Floating-point type that represents the x87 80-bit floating-point format. + }]; +} + +def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { + let summary = "CIR extended-precision float type"; + let description = [{ + Floating-point type that represents the `long double` type in C/C++. + + The underlying floating-point format of a long double value depends on the + implementation. The `underlying` parameter specifies the CIR floating-point + type that corresponds to this format. For now, it can only be either + `!cir.double` or `!cir.fp80`. + }]; + + let parameters = (ins "mlir::Type":$underlying); + + let assemblyFormat = [{ + `<` $underlying `>` + }]; + + let genVerifyDecl = 1; +} + +// Constraints + +def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_LongDouble]>; +def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; + +//===----------------------------------------------------------------------===// +// PointerType +//===----------------------------------------------------------------------===// + +def CIR_PointerType : CIR_Type<"Pointer", "ptr", + [DeclareTypeInterfaceMethods]> { + + let summary = "CIR pointer type"; + let description = [{ + `CIR.ptr` is a type returned by any op generating a pointer in C++. + }]; + + let parameters = (ins "mlir::Type":$pointee); + + let hasCustomAssemblyFormat = 1; +} + +//===----------------------------------------------------------------------===// +// DataMemberType +//===----------------------------------------------------------------------===// + +def CIR_DataMemberType : CIR_Type<"DataMember", "data_member", + [DeclareTypeInterfaceMethods]> { + + let summary = "CIR type that represents pointer-to-data-member type in C++"; + let description = [{ + `cir.member_ptr` models the pointer-to-data-member type in C++. Values of + this type are essentially offsets of the pointed-to member within one of + its containing struct. + }]; + + let parameters = (ins "mlir::Type":$memberTy, + "mlir::cir::StructType":$clsTy); + + let assemblyFormat = [{ + `<` $memberTy `in` $clsTy `>` + }]; +} + +//===----------------------------------------------------------------------===// +// BoolType +// +// An alternative here is to represent bool as mlir::i1, but let's be more +// generic. +// +//===----------------------------------------------------------------------===// + +def CIR_BoolType : + CIR_Type<"Bool", "bool", + [DeclareTypeInterfaceMethods]> { + + let summary = "CIR bool type"; + let description = [{ + `cir.bool` represent's C++ bool type. + }]; + + let hasCustomAssemblyFormat = 1; +} + +//===----------------------------------------------------------------------===// +// ArrayType +//===----------------------------------------------------------------------===// + +def CIR_ArrayType : CIR_Type<"Array", "array", + [DeclareTypeInterfaceMethods]> { + + let summary = "CIR array type"; + let description = [{ + `CIR.array` represents C/C++ constant arrays. + }]; + + let parameters = (ins "mlir::Type":$eltType, "uint64_t":$size); + + let assemblyFormat = [{ + `<` $eltType `x` $size `>` + }]; +} + +//===----------------------------------------------------------------------===// +// VectorType (fixed size) +//===----------------------------------------------------------------------===// + +def CIR_VectorType : CIR_Type<"Vector", "vector", + [DeclareTypeInterfaceMethods]> { + + let summary = "CIR vector type"; + let description = [{ + `cir.vector' represents fixed-size vector types. The parameters are the + element type and the number of elements. + }]; + + let parameters = (ins "mlir::Type":$eltType, "uint64_t":$size); + + let assemblyFormat = [{ + `<` $eltType `x` $size `>` + }]; +} + +//===----------------------------------------------------------------------===// +// FuncType +//===----------------------------------------------------------------------===// + +def CIR_FuncType : CIR_Type<"Func", "func"> { + let summary = "CIR function type"; + let description = [{ + The `!cir.func` is a function type. It consists of a single return type, a + list of parameter types and can optionally be variadic. + + Example: + + ```mlir + !cir.func + !cir.func + !cir.func + ``` + }]; + + let parameters = (ins ArrayRefParameter<"Type">:$inputs, "Type":$returnType, + "bool":$varArg); + let assemblyFormat = [{ + `<` $returnType ` ` `(` custom($inputs, $varArg) `>` + }]; + + let builders = [ + TypeBuilderWithInferredContext<(ins + "ArrayRef":$inputs, "Type":$returnType, + CArg<"bool", "false">:$isVarArg), [{ + return $_get(returnType.getContext(), inputs, returnType, isVarArg); + }]> + ]; + + let extraClassDeclaration = [{ + /// Returns whether the function is variadic. + bool isVarArg() const { return getVarArg(); } + + /// Returns the `i`th input operand type. Asserts if out of bounds. + Type getInput(unsigned i) const { return getInputs()[i]; } + + /// Returns the number of arguments to the function. + unsigned getNumInputs() const { return getInputs().size(); } + + /// Returns the result type of the function as an ArrayRef, enabling better + /// integration with generic MLIR utilities. + ArrayRef getReturnTypes() const; + + /// Returns whether the function is returns void. + bool isVoid() const; + + /// Returns a clone of this function type with the given argument + /// and result types. + FuncType clone(TypeRange inputs, TypeRange results) const; + }]; +} + +//===----------------------------------------------------------------------===// +// Exception info type +// +// By introducing an exception info type, exception related operations can be +// more descriptive. +// +// This basically wraps a uint8_t* and a uint32_t +// +//===----------------------------------------------------------------------===// + +def CIR_ExceptionInfo : CIR_Type<"ExceptionInfo", "eh.info"> { + let summary = "CIR exception info"; + let description = [{ + Represents the content necessary for a `cir.call` to pass back an exception + object pointer + some extra selector information. This type is required for + some exception related operations, like `cir.catch`, `cir.eh.selector_slot` + and `cir.eh.slot`. + }]; +} + +//===----------------------------------------------------------------------===// +// Void type +//===----------------------------------------------------------------------===// + +def CIR_VoidType : CIR_Type<"Void", "void"> { + let summary = "CIR void type"; + let description = [{ + The `!cir.void` type represents the C/C++ `void` type. + }]; + let extraClassDeclaration = [{ + /// Returns a clone of this type with the given context. + std::string getAlias() const { return "void"; }; + }]; +} + +// Constraints + +// Pointer to void +def VoidPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::VoidType>()">, + ]>, "void*">, + BuildableType< + "mlir::cir::PointerType::get($_builder.getContext()," + "mlir::cir::VoidType::get($_builder.getContext()))"> { +} + +// Pointer to a primitive int, float or double +def PrimitiveIntOrFPPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::IntType," + "::mlir::cir::SingleType, ::mlir::cir::DoubleType>()">, + ]>, "{int,void}*"> { +} + +// Pointer to struct +def StructPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::StructType>()">, + ]>, "!cir.struct*"> { +} + +// Pointers to exception info +def ExceptionInfoPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::ExceptionInfoType>()">, + ]>, "!cir.eh_info*">, + BuildableType< + "mlir::cir::PointerType::get($_builder.getContext()," + "mlir::cir::ExceptionInfoType::get($_builder.getContext()))"> { +} + +// Pooint to pointers to exception info +def ExceptionInfoPtrPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + And<[ + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::ExceptionInfoType>()">, + ]> + ]>, "!cir.eh_info**">, + BuildableType< + "mlir::cir::PointerType::get($_builder.getContext()," + "mlir::cir::PointerType::get($_builder.getContext()," + "mlir::cir::ExceptionInfoType::get($_builder.getContext())))"> { +} + +// Vector of integral type +def IntegerVector : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::VectorType>()">, + CPred<"$_self.cast<::mlir::cir::VectorType>()" + ".getEltType().isa<::mlir::cir::IntType>()">, + CPred<"$_self.cast<::mlir::cir::VectorType>()" + ".getEltType().cast<::mlir::cir::IntType>()" + ".isPrimitive()"> + ]>, "!cir.vector of !cir.int"> { +} + +// Pointer to Arrays +def ArrayPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::ArrayType>()">, + ]>, "!cir.ptr"> { +} + +//===----------------------------------------------------------------------===// +// StructType (defined in cpp files) +//===----------------------------------------------------------------------===// + +def CIR_StructType : Type()">, + "CIR struct type">; + +//===----------------------------------------------------------------------===// +// Global type constraints +//===----------------------------------------------------------------------===// + +def CIR_AnyType : AnyTypeOf<[ + CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_BoolType, CIR_ArrayType, + CIR_VectorType, CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, + CIR_AnyFloat, +]>; + +#endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h new file mode 100644 index 000000000000..5eba4ac460a7 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h @@ -0,0 +1,115 @@ +//===- CIRTypesDetails.h - Details of CIR dialect types ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains implementation details, such as storage structures, of +// CIR dialect types. +// +//===----------------------------------------------------------------------===// +#ifndef CIR_DIALECT_IR_CIRTYPESDETAILS_H +#define CIR_DIALECT_IR_CIRTYPESDETAILS_H + +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/Support/LogicalResult.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/Hashing.h" + +namespace mlir { +namespace cir { +namespace detail { + +//===----------------------------------------------------------------------===// +// CIR StructTypeStorage +//===----------------------------------------------------------------------===// + +/// Type storage for CIR record types. +struct StructTypeStorage : public TypeStorage { + struct KeyTy { + ArrayRef members; + StringAttr name; + bool incomplete; + bool packed; + StructType::RecordKind kind; + ASTRecordDeclInterface ast; + + KeyTy(ArrayRef members, StringAttr name, bool incomplete, bool packed, + StructType::RecordKind kind, ASTRecordDeclInterface ast) + : members(members), name(name), incomplete(incomplete), packed(packed), + kind(kind), ast(ast) {} + }; + + ArrayRef members; + StringAttr name; + bool incomplete; + bool packed; + StructType::RecordKind kind; + ASTRecordDeclInterface ast; + + StructTypeStorage(ArrayRef members, StringAttr name, bool incomplete, + bool packed, StructType::RecordKind kind, + ASTRecordDeclInterface ast) + : members(members), name(name), incomplete(incomplete), packed(packed), + kind(kind), ast(ast) {} + + KeyTy getAsKey() const { + return KeyTy(members, name, incomplete, packed, kind, ast); + } + + bool operator==(const KeyTy &key) const { + if (name) + return (name == key.name) && (kind == key.kind); + return (members == key.members) && (name == key.name) && + (incomplete == key.incomplete) && (packed == key.packed) && + (kind == key.kind) && (ast == key.ast); + } + + static llvm::hash_code hashKey(const KeyTy &key) { + if (key.name) + return llvm::hash_combine(key.name, key.kind); + return llvm::hash_combine(key.members, key.incomplete, key.packed, key.kind, + key.ast); + } + + static StructTypeStorage *construct(TypeStorageAllocator &allocator, + const KeyTy &key) { + return new (allocator.allocate()) + StructTypeStorage(allocator.copyInto(key.members), key.name, + key.incomplete, key.packed, key.kind, key.ast); + } + + /// Mutates the members and attributes an identified struct. + /// + /// Once a record is mutated, it is marked as complete, preventing further + /// mutations. Anonymous structs are always complete and cannot be mutated. + /// This method does not fail if a mutation of a complete struct does not + /// change the struct. + LogicalResult mutate(TypeStorageAllocator &allocator, ArrayRef members, + bool packed, ASTRecordDeclInterface ast) { + // Anonymous structs cannot mutate. + if (!name) + return failure(); + + // Mutation of complete structs are allowed if they change nothing. + if (!incomplete) + return mlir::success((this->members == members) && + (this->packed == packed) && (this->ast == ast)); + + // Mutate incomplete struct. + this->members = allocator.copyInto(members); + this->packed = packed; + this->ast = ast; + + incomplete = false; + return success(); + } +}; + +} // namespace detail +} // namespace cir +} // namespace mlir + +#endif // CIR_DIALECT_IR_CIRTYPESDETAILS_H diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index 28ae30dab8df..c502525d30e8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -14,3 +14,16 @@ mlir_tablegen(CIROpsDialect.cpp.inc -gen-dialect-defs) add_public_tablegen_target(MLIRCIROpsIncGen) add_dependencies(mlir-headers MLIRCIROpsIncGen) +# Equivalent to add_mlir_doc +add_clang_mlir_doc(CIROps CIROps Dialects/ -gen-op-doc) +add_clang_mlir_doc(CIRAttrs CIRAttrs Dialects/ -gen-attrdef-doc) +add_clang_mlir_doc(CIRTypes CIRTypes Dialects/ -gen-typedef-doc) + +# Generate extra headers for custom enum and attrs. +mlir_tablegen(CIROpsEnums.h.inc -gen-enum-decls) +mlir_tablegen(CIROpsEnums.cpp.inc -gen-enum-defs) +mlir_tablegen(CIROpsStructs.h.inc -gen-attrdef-decls) +mlir_tablegen(CIROpsStructs.cpp.inc -gen-attrdef-defs) +mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) +mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) +add_public_tablegen_target(MLIRCIREnumsGen) diff --git a/clang/include/clang/CIR/Dialect/IR/FPEnv.h b/clang/include/clang/CIR/Dialect/IR/FPEnv.h new file mode 100644 index 000000000000..aceba9ee57d0 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/FPEnv.h @@ -0,0 +1,50 @@ +//===- FPEnv.h ---- FP Environment ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// @file +/// This file contains the declarations of entities that describe floating +/// point environment and related functions. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CIR_DIALECT_IR_FPENV_H +#define CLANG_CIR_DIALECT_IR_FPENV_H + +#include "llvm/ADT/FloatingPointMode.h" + +#include + +namespace cir { + +namespace fp { + +/// Exception behavior used for floating point operations. +/// +/// Each of these values corresponds to some LLVMIR metadata argument value of a +/// constrained floating point intrinsic. See the LLVM Language Reference Manual +/// for details. +enum ExceptionBehavior : uint8_t { + ebIgnore, ///< This corresponds to "fpexcept.ignore". + ebMayTrap, ///< This corresponds to "fpexcept.maytrap". + ebStrict, ///< This corresponds to "fpexcept.strict". +}; + +} // namespace fp + +/// For any RoundingMode enumerator, returns a string valid as input in +/// constrained intrinsic rounding mode metadata. +std::optional convertRoundingModeToStr(llvm::RoundingMode); + +/// For any ExceptionBehavior enumerator, returns a string valid as input in +/// constrained intrinsic exception behavior metadata. +std::optional + convertExceptionBehaviorToStr(fp::ExceptionBehavior); + +} // namespace cir + +#endif diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h new file mode 100644 index 000000000000..2f713240944f --- /dev/null +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -0,0 +1,51 @@ +//===- Passes.h - CIR pass entry points -------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This header file defines prototypes that expose pass constructors. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_PASSES_H_ +#define MLIR_DIALECT_CIR_PASSES_H_ + +#include "mlir/Pass/Pass.h" + +namespace clang { +class ASTContext; +} +namespace mlir { + +std::unique_ptr createLifetimeCheckPass(); +std::unique_ptr createLifetimeCheckPass(clang::ASTContext *astCtx); +std::unique_ptr createLifetimeCheckPass(ArrayRef remark, + ArrayRef hist, + unsigned hist_limit, + clang::ASTContext *astCtx); +std::unique_ptr createMergeCleanupsPass(); +std::unique_ptr createDropASTPass(); +std::unique_ptr createLoweringPreparePass(); +std::unique_ptr createLoweringPreparePass(clang::ASTContext *astCtx); +std::unique_ptr createIdiomRecognizerPass(); +std::unique_ptr createIdiomRecognizerPass(clang::ASTContext *astCtx); +std::unique_ptr createLibOptPass(); +std::unique_ptr createLibOptPass(clang::ASTContext *astCtx); +std::unique_ptr createFlattenCFGPass(); + +void populateCIRPreLoweringPasses(mlir::OpPassManager &pm); + +//===----------------------------------------------------------------------===// +// Registration +//===----------------------------------------------------------------------===// + +/// Generate the code for registering passes. +#define GEN_PASS_REGISTRATION +#include "clang/CIR/Dialect/Passes.h.inc" + +} // namespace mlir + +#endif // MLIR_DIALECT_CIR_PASSES_H_ diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td new file mode 100644 index 000000000000..e63b97469980 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -0,0 +1,131 @@ +//===-- Passes.td - CIR pass definition file ---------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_PASSES +#define MLIR_DIALECT_CIR_PASSES + +include "mlir/Pass/PassBase.td" + +def MergeCleanups : Pass<"cir-merge-cleanups"> { + let summary = "Remove unnecessary branches to cleanup blocks"; + let description = [{ + Canonicalize pass is too aggressive for CIR when the pipeline is + used for C/C++ analysis. This pass runs some rewrites for scopes, + merging some blocks and eliminating unnecessary control-flow. + }]; + let constructor = "mlir::createMergeCleanupsPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + +def LifetimeCheck : Pass<"cir-lifetime-check"> { + let summary = "Check lifetime safety and generate diagnostics"; + let description = [{ + This pass relies on a lifetime analysis pass and uses the diagnostics + mechanism to report to the user. It does not change any code. + + A default ctor is specified but is solely in order to make + tablegen happy, since this pass requires the presence of an ASTContext, + one can set that up using `mlir::createLifetimeCheckPass(clang::ASTContext &)` + instead. + }]; + let constructor = "mlir::createLifetimeCheckPass()"; + let dependentDialects = ["cir::CIRDialect"]; + + let options = [ + ListOption<"historyList", "history", "std::string", + "List of history styles to emit as part of diagnostics." + " Supported styles: {all|null|invalid}", "llvm::cl::ZeroOrMore">, + ListOption<"remarksList", "remarks", "std::string", + "List of remark styles to enable as part of diagnostics." + " Supported styles: {all|pset}", "llvm::cl::ZeroOrMore">, + Option<"historyLimit", "history_limit", "unsigned", /*default=*/"1", + "Max amount of diagnostics to emit on pointer history"> + ]; +} + +def DropAST : Pass<"cir-drop-ast"> { + let summary = "Remove clang AST nodes attached to CIR operations"; + let description = [{ + Some CIR operations have references back to Clang AST, this is + necessary to perform lots of useful checks without having to + duplicate all rich AST information in CIR. As we move down in the + pipeline (e.g. generating LLVM or other MLIR dialects), the need + for such nodes diminish and AST information can be dropped. + + Right now this is enabled by default in Clang prior to dialect + codegen from CIR, but not before lifetime check, where AST is + required to be present. + }]; + let constructor = "mlir::createDropASTPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + +def LoweringPrepare : Pass<"cir-lowering-prepare"> { + let summary = "Preparation work before lowering to LLVM dialect"; + let description = [{ + This pass does preparation work for LLVM lowering. For example, it may + expand the global variable initialziation in a more ABI-friendly form. + }]; + let constructor = "mlir::createLoweringPreparePass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + +def FlattenCFG : Pass<"cir-flatten-cfg"> { + let summary = "Produces flatten cfg"; + let description = [{ + This pass transforms CIR and inline all the nested regions. Thus, + the next post condtions are met after the pass applied: + - there is not any nested region in a function body + - all the blocks in a function belong to the parent region + In other words, this pass removes such CIR operations like IfOp, LoopOp, + ScopeOp and etc. and produces a flat CIR. + }]; + let constructor = "mlir::createFlattenCFGPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + +def IdiomRecognizer : Pass<"cir-idiom-recognizer"> { + let summary = "Raise calls to C/C++ libraries to CIR operations"; + let description = [{ + This pass recognize idiomatic C++ usage and incorporate C++ standard + containers, library functions calls, and types into CIR operation, + attributes and types. + + Detections done by this pass can be inspected by users by using + remarks. Currently supported are `all` and `found-calls`. + }]; + let constructor = "mlir::createIdiomRecognizerPass()"; + let dependentDialects = ["cir::CIRDialect"]; + + let options = [ + ListOption<"remarksList", "remarks", "std::string", + "Diagnostic remarks to enable" + " Supported styles: {all|found-calls}", "llvm::cl::ZeroOrMore">, + ]; +} + +def LibOpt : Pass<"cir-lib-opt"> { + let summary = "Optimize C/C++ library calls"; + let description = [{ + By using higher level information from `cir-idiom-recognize`, this pass + apply transformations to CIR based on specific C/C++ library semantics. + + Transformations done by this pass can be inspected by users by using + remarks. Currently supported are `all` and `transforms`. + }]; + let constructor = "mlir::createLibOptPass()"; + let dependentDialects = ["cir::CIRDialect"]; + + let options = [ + ListOption<"remarksList", "remarks", "std::string", + "Diagnostic remarks to enable" + " Supported styles: {all|transforms}", "llvm::cl::ZeroOrMore">, + ]; +} + +#endif // MLIR_DIALECT_CIR_PASSES diff --git a/clang/include/clang/CIR/Dialect/Transforms/CMakeLists.txt b/clang/include/clang/CIR/Dialect/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h new file mode 100644 index 000000000000..e2f1e16eb511 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h @@ -0,0 +1,45 @@ +//===- ASTAttrInterfaces.h - CIR AST Interfaces -----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_INTERFACES_CIR_AST_ATTR_INTERFACES_H_ +#define MLIR_INTERFACES_CIR_AST_ATTR_INTERFACES_H_ + +#include "mlir/IR/Attributes.h" + +#include "clang/AST/Attr.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Mangle.h" + +namespace mlir { +namespace cir { + +mlir::Attribute makeFuncDeclAttr(const clang::Decl *decl, + mlir::MLIRContext *ctx); + +} // namespace cir +} // namespace mlir + +/// Include the generated interface declarations. +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h.inc" + +namespace mlir { +namespace cir { + +template bool hasAttr(ASTDeclInterface decl) { + if constexpr (std::is_same_v) + return decl.hasOwnerAttr(); + if constexpr (std::is_same_v) + return decl.hasPointerAttr(); + if constexpr (std::is_same_v) + return decl.hasInitPriorityAttr(); +} + +} // namespace cir +} // namespace mlir + +#endif // MLIR_INTERFACES_CIR_AST_ATAR_INTERFACES_H_ diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td new file mode 100644 index 000000000000..fc162c11f42c --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -0,0 +1,282 @@ +//===- ASTAttrInterfaces.td - CIR AST Interface Definitions -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_INTERFACES_AST_ATTR_INTERFACES +#define MLIR_CIR_INTERFACES_AST_ATTR_INTERFACES + +include "mlir/IR/OpBase.td" + +let cppNamespace = "::mlir::cir" in { + def ASTDeclInterface : AttrInterface<"ASTDeclInterface"> { + let methods = [ + InterfaceMethod<"", "bool", "hasOwnerAttr", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->template hasAttr(); + }] + >, + InterfaceMethod<"", "bool", "hasPointerAttr", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->template hasAttr(); + }] + >, + InterfaceMethod<"", "bool", "hasInitPriorityAttr", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->template hasAttr(); + }] + > + ]; + } + + def ASTNamedDeclInterface : AttrInterface<"ASTNamedDeclInterface", + [ASTDeclInterface]> { + let methods = [ + InterfaceMethod<"", "clang::DeclarationName", "getDeclName", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->getDeclName(); + }] + >, + InterfaceMethod<"", "llvm::StringRef", "getName", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->getName(); + }] + > + ]; + } + + def ASTValueDeclInterface : AttrInterface<"ASTValueDeclInterface", + [ASTNamedDeclInterface]>; + + def ASTDeclaratorDeclInterface : AttrInterface<"ASTDeclaratorDeclInterface", + [ASTValueDeclInterface]>; + + def ASTVarDeclInterface : AttrInterface<"ASTVarDeclInterface", + [ASTDeclaratorDeclInterface]> { + let methods = [ + InterfaceMethod<"", "void", "mangleDynamicInitializer", (ins "llvm::raw_ostream&":$Out), [{}], + /*defaultImplementation=*/ [{ + std::unique_ptr MangleCtx( + $_attr.getAst()->getASTContext().createMangleContext()); + MangleCtx->mangleDynamicInitializer($_attr.getAst(), Out); + }] + >, + InterfaceMethod<"", "clang::VarDecl::TLSKind", "getTLSKind", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->getTLSKind(); + }] + > + ]; + } + + def ASTFunctionDeclInterface : AttrInterface<"ASTFunctionDeclInterface", + [ASTDeclaratorDeclInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isOverloadedOperator", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->isOverloadedOperator(); + }] + >, + InterfaceMethod<"", "bool", "isStatic", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->isStatic(); + }] + > + ]; + } + + def ASTCXXMethodDeclInterface : AttrInterface<"ASTCXXMethodDeclInterface", + [ASTFunctionDeclInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isCopyAssignmentOperator", (ins), [{}], + /*defaultImplementation=*/ [{ + if (auto decl = dyn_cast($_attr.getAst())) + return decl->isCopyAssignmentOperator(); + return false; + }] + >, + InterfaceMethod<"", "bool", "isMoveAssignmentOperator", (ins), [{}], + /*defaultImplementation=*/ [{ + if (auto decl = dyn_cast($_attr.getAst())) + return decl->isMoveAssignmentOperator(); + return false; + }] + >, + InterfaceMethod<"", "bool", "isConst", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->isConst(); + }] + > + ]; + } + + def ASTCXXConstructorDeclInterface : AttrInterface<"ASTCXXConstructorDeclInterface", + [ASTCXXMethodDeclInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isDefaultConstructor", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->isDefaultConstructor(); + }] + >, + InterfaceMethod<"", "bool", "isCopyConstructor", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->isCopyConstructor(); + }] + > + ]; + } + + def ASTCXXConversionDeclInterface : AttrInterface<"ASTCXXConversionDeclInterface", + [ASTCXXMethodDeclInterface]>; + + def ASTCXXDestructorDeclInterface : AttrInterface<"ASTCXXDestructorDeclInterface", + [ASTCXXMethodDeclInterface]>; + + def ASTTypeDeclInterface : AttrInterface<"ASTTypeDeclInterface", + [ASTNamedDeclInterface]>; + + def ASTTagDeclInterface : AttrInterface<"ASTTagDeclInterface", + [ASTTypeDeclInterface]> { + let methods = [ + InterfaceMethod<"", "clang::TagTypeKind", "getTagKind", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->getTagKind(); + }] + > + ]; + } + + def ASTRecordDeclInterface : AttrInterface<"ASTRecordDeclInterface", + [ASTTagDeclInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isLambda", (ins), [{}], + /*defaultImplementation=*/ [{ + if (auto ast = clang::dyn_cast($_attr.getAst())) + return ast->isLambda(); + return false; + }] + >, + InterfaceMethod<"", "bool", "hasPromiseType", (ins), [{}], + /*defaultImplementation=*/ [{ + if (!clang::isa($_attr.getAst())) + return false; + for (const auto *sub : $_attr.getAst()->decls()) { + if (auto subRec = clang::dyn_cast(sub)) { + if (subRec->getDeclName().isIdentifier() && + subRec->getName() == "promise_type") { + return true; + } + } + } + return false; + }] + >, + InterfaceMethod<"", "bool", "isInStdNamespace", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->getDeclContext()->isStdNamespace(); + }] + >, + // Note: `getRawDecl` is useful for debugging because it allows dumping + // the RecordDecl - it should not be used in regular code. + InterfaceMethod<"", "const clang::RecordDecl *", "getRawDecl", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst(); + }] + > + ]; + } + + def AnyASTFunctionDeclAttr : Attr< + CPred<"::mlir::isa<::mlir::cir::ASTFunctionDeclInterface>($_self)">, + "AST Function attribute"> { + let storageType = "::mlir::Attribute"; + let returnType = "::mlir::Attribute"; + let convertFromStorage = "$_self"; + let constBuilderCall = "$0"; + } + + def ASTExprInterface : AttrInterface<"ASTExprInterface"> {} + + def ASTCallExprInterface : AttrInterface<"ASTCallExprInterface", + [ASTExprInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isCalleeInStdNamespace", + (ins), [{}], /*defaultImplementation=*/ [{ + // Check that the entity being called is in standard + // "std" namespace. + auto callee = $_attr.getAst()->getCallee(); + if (!callee) + return false; + auto *ice = dyn_cast(callee); + if (!ice) + return false; + + auto *dre = dyn_cast_or_null(ice->getSubExpr()); + if (!dre) + return false; + auto qual = dre->getQualifier(); + if (!qual) + return false; + + // FIXME: should we check NamespaceAlias as well? + auto nqual = qual->getAsNamespace(); + if (!nqual || !nqual->getIdentifier() || + nqual->getName().compare("std") != 0) + return false; + + return true; + }] + >, + InterfaceMethod<"", "bool", "isStdFunctionCall", + (ins "llvm::StringRef":$fn), + [{}], /*defaultImplementation=*/ [{ + if (!isCalleeInStdNamespace()) + return false; + auto fnDecl = $_attr.getAst()->getDirectCallee(); + if (!fnDecl) + return false; + // We're looking for `std::`. + if (!fnDecl->getIdentifier() || + fnDecl->getName().compare(fn) != 0) + return false; + return true; + }] + >, + InterfaceMethod<"", "bool", "isMemberCallTo", + (ins "llvm::StringRef":$fn), + [{}], /*defaultImplementation=*/ [{ + auto memberCall = dyn_cast($_attr.getAst()); + if (!memberCall) + return false; + auto methodDecl = memberCall->getMethodDecl(); + if (!methodDecl) + return false; + if (!methodDecl->getIdentifier() || + methodDecl->getName().compare(fn) != 0) + return false; + return true; + }] + >, + InterfaceMethod<"", "bool", "isIteratorBeginCall", + (ins), + [{}], /*defaultImplementation=*/ [{ + return isMemberCallTo("begin"); + }] + >, + InterfaceMethod<"", "bool", "isIteratorEndCall", + (ins), + [{}], /*defaultImplementation=*/ [{ + return isMemberCallTo("end"); + }] + > + ]; + + } + + +} // namespace mlir::cir + +#endif // MLIR_CIR_INTERFACES_AST_ATTR_INTERFACES diff --git a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h new file mode 100644 index 000000000000..b2d75d40496f --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h @@ -0,0 +1,22 @@ +//===- CIRFPTypeInterface.h - Interface for CIR FP types -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// Defines the interface to generically handle CIR floating-point types. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_INTERFACES_CIR_CIR_FPTYPEINTERFACE_H +#define CLANG_INTERFACES_CIR_CIR_FPTYPEINTERFACE_H + +#include "mlir/IR/Types.h" +#include "llvm/ADT/APFloat.h" + +/// Include the tablegen'd interface declarations. +#include "clang/CIR/Interfaces/CIRFPTypeInterface.h.inc" + +#endif // CLANG_INTERFACES_CIR_CIR_FPTYPEINTERFACE_H diff --git a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td new file mode 100644 index 000000000000..7438c8be52d9 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td @@ -0,0 +1,52 @@ +//===- CIRFPTypeInterface.td - CIR FP Interface Definitions -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_INTERFACES_CIR_FP_TYPE_INTERFACE +#define MLIR_CIR_INTERFACES_CIR_FP_TYPE_INTERFACE + +include "mlir/IR/OpBase.td" + +def CIRFPTypeInterface : TypeInterface<"CIRFPTypeInterface"> { + let description = [{ + Contains helper functions to query properties about a floating-point type. + }]; + let cppNamespace = "::mlir::cir"; + + let methods = [ + InterfaceMethod<[{ + Returns the bit width of this floating-point type. + }], + /*retTy=*/"unsigned", + /*methodName=*/"getWidth", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + return llvm::APFloat::semanticsSizeInBits($_type.getFloatSemantics()); + }] + >, + InterfaceMethod<[{ + Return the mantissa width. + }], + /*retTy=*/"unsigned", + /*methodName=*/"getFPMantissaWidth", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + return llvm::APFloat::semanticsPrecision($_type.getFloatSemantics()); + }] + >, + InterfaceMethod<[{ + Return the float semantics of this floating-point type. + }], + /*retTy=*/"const llvm::fltSemantics &", + /*methodName=*/"getFloatSemantics" + >, + ]; +} + +#endif // MLIR_CIR_INTERFACES_CIR_FP_TYPE_INTERFACE diff --git a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h new file mode 100644 index 000000000000..2e8a0c8e8a94 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h @@ -0,0 +1,36 @@ +//===- CIRLoopOpInterface.h - Interface for CIR loop-like ops --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// Defines the interface to generically handle CIR loop operations. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_INTERFACES_CIR_CIRLOOPOPINTERFACE_H_ +#define CLANG_INTERFACES_CIR_CIRLOOPOPINTERFACE_H_ + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/IR/Operation.h" +#include "mlir/Interfaces/ControlFlowInterfaces.h" +#include "mlir/Interfaces/LoopLikeInterface.h" + +namespace mlir { +namespace cir { +namespace detail { + +/// Verify invariants of the LoopOpInterface. +::mlir::LogicalResult verifyLoopOpInterface(::mlir::Operation *op); + +} // namespace detail +} // namespace cir +} // namespace mlir + +/// Include the tablegen'd interface declarations. +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h.inc" + +#endif // CLANG_INTERFACES_CIR_CIRLOOPOPINTERFACE_H_ diff --git a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td new file mode 100644 index 000000000000..c2b871785ffd --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td @@ -0,0 +1,100 @@ +//===- CIRLoopOpInterface.td - Interface for CIR loop-like ops -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef CLANG_CIR_INTERFACES_CIRLOOPOPINTERFACE +#define CLANG_CIR_INTERFACES_CIRLOOPOPINTERFACE + +include "mlir/IR/OpBase.td" +include "mlir/Interfaces/ControlFlowInterfaces.td" +include "mlir/Interfaces/LoopLikeInterface.td" + +def LoopOpInterface : OpInterface<"LoopOpInterface", [ + DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods +]> { + let description = [{ + Contains helper functions to query properties and perform transformations + on a loop. + }]; + let cppNamespace = "::mlir::cir"; + + let methods = [ + InterfaceMethod<[{ + Returns the loop's conditional region. + }], + /*retTy=*/"mlir::Region &", + /*methodName=*/"getCond" + >, + InterfaceMethod<[{ + Returns the loop's body region. + }], + /*retTy=*/"mlir::Region &", + /*methodName=*/"getBody" + >, + InterfaceMethod<[{ + Returns a pointer to the loop's step region or nullptr. + }], + /*retTy=*/"mlir::Region *", + /*methodName=*/"maybeGetStep", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/"return nullptr;" + >, + InterfaceMethod<[{ + Returns the first region to be executed in the loop. + }], + /*retTy=*/"mlir::Region &", + /*methodName=*/"getEntry", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/"return $_op.getCond();" + >, + InterfaceMethod<[{ + Returns a list of regions in order of execution. + }], + /*retTy=*/"llvm::SmallVector", + /*methodName=*/"getRegionsInExecutionOrder", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + return llvm::SmallVector{&$_op.getRegion(0), &$_op.getRegion(1)}; + }] + >, + InterfaceMethod<[{ + Recursively walks the body of the loop in pre-order while skipping + nested loops and executing a callback on every other operation. + }], + /*retTy=*/"mlir::WalkResult", + /*methodName=*/"walkBodySkippingNestedLoops", + /*args=*/(ins "::llvm::function_ref":$callback), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + return $_op.getBody().template walk([&](Operation *op) { + if (isa(op)) + return mlir::WalkResult::skip(); + callback(op); + return mlir::WalkResult::advance(); + }); + }] + > + ]; + + let extraClassDeclaration = [{ + /// Generic method to retrieve the successors of a LoopOpInterface operation. + static void getLoopOpSuccessorRegions( + ::mlir::cir::LoopOpInterface op, ::mlir::RegionBranchPoint point, + ::mlir::SmallVectorImpl<::mlir::RegionSuccessor> ®ions); + }]; + + let verify = [{ + /// Verify invariants of the LoopOpInterface. + return detail::verifyLoopOpInterface($_op); + }]; +} + +#endif // CLANG_CIR_INTERFACES_CIRLOOPOPINTERFACE diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h new file mode 100644 index 000000000000..fcef7a33eb20 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h @@ -0,0 +1,32 @@ +//===- CIROpInterfaces.h - CIR Op Interfaces --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_INTERFACES_CIR_OP_H_ +#define MLIR_INTERFACES_CIR_OP_H_ + +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Operation.h" +#include "mlir/IR/Value.h" +#include "mlir/Interfaces/CallInterfaces.h" + +#include "clang/AST/Attr.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Mangle.h" + +namespace mlir { +namespace cir {} // namespace cir +} // namespace mlir + +/// Include the generated interface declarations. +#include "clang/CIR/Interfaces/CIROpInterfaces.h.inc" + +namespace mlir { +namespace cir {} // namespace cir +} // namespace mlir + +#endif // MLIR_INTERFACES_CIR_OP_H_ diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td new file mode 100644 index 000000000000..b08e07a63d67 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -0,0 +1,36 @@ +//===- CIROpInterfaces.td - CIR Op Interface Definitions --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_OP_INTERFACES +#define MLIR_CIR_OP_INTERFACES + +include "mlir/IR/OpBase.td" +include "mlir/Interfaces/CallInterfaces.td" + +let cppNamespace = "::mlir::cir" in { + // The CIRCallOpInterface must be used instead of CallOpInterface when looking + // at arguments and other bits of CallOp. This creates a level of abstraction + // that's useful for handling indirect calls and other details. + def CIRCallOpInterface : OpInterface<"CIRCallOpInterface", [CallOpInterface]> { + let methods = [ + InterfaceMethod<"", "mlir::Operation::operand_iterator", + "arg_operand_begin", (ins)>, + InterfaceMethod<"", "mlir::Operation::operand_iterator", + "arg_operand_end", (ins)>, + InterfaceMethod< + "Return the operand at index 'i', accounts for indirect call or " + "exception info", "mlir::Value", "getArgOperand", (ins "unsigned":$i)>, + InterfaceMethod< + "Return the number of operands, accounts for indirect call or " + "exception info", "unsigned", "getNumArgOperands", (ins)>, + ]; + } + +} // namespace mlir::cir + +#endif // MLIR_CIR_OP_INTERFACES diff --git a/clang/include/clang/CIR/Interfaces/CMakeLists.txt b/clang/include/clang/CIR/Interfaces/CMakeLists.txt new file mode 100644 index 000000000000..86fffa3f9307 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CMakeLists.txt @@ -0,0 +1,34 @@ +# This replicates part of the add_mlir_interface cmake function from MLIR that +# cannot be used here. This happens because it expects to be run inside MLIR +# directory which is not the case for CIR (and also FIR, both have similar +# workarounds). + +# Declare a dialect in the include directory +function(add_clang_mlir_attr_interface interface) + set(LLVM_TARGET_DEFINITIONS ${interface}.td) + mlir_tablegen(${interface}.h.inc -gen-attr-interface-decls) + mlir_tablegen(${interface}.cpp.inc -gen-attr-interface-defs) + add_public_tablegen_target(MLIRCIR${interface}IncGen) + add_dependencies(mlir-generic-headers MLIRCIR${interface}IncGen) +endfunction() + +function(add_clang_mlir_op_interface interface) + set(LLVM_TARGET_DEFINITIONS ${interface}.td) + mlir_tablegen(${interface}.h.inc -gen-op-interface-decls) + mlir_tablegen(${interface}.cpp.inc -gen-op-interface-defs) + add_public_tablegen_target(MLIR${interface}IncGen) + add_dependencies(mlir-generic-headers MLIR${interface}IncGen) +endfunction() + +function(add_clang_mlir_type_interface interface) + set(LLVM_TARGET_DEFINITIONS ${interface}.td) + mlir_tablegen(${interface}.h.inc -gen-type-interface-decls) + mlir_tablegen(${interface}.cpp.inc -gen-type-interface-defs) + add_public_tablegen_target(MLIR${interface}IncGen) + add_dependencies(mlir-generic-headers MLIR${interface}IncGen) +endfunction() + +add_clang_mlir_attr_interface(ASTAttrInterfaces) +add_clang_mlir_op_interface(CIROpInterfaces) +add_clang_mlir_op_interface(CIRLoopOpInterface) +add_clang_mlir_type_interface(CIRFPTypeInterface) diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h new file mode 100644 index 000000000000..88713bf6e07f --- /dev/null +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -0,0 +1,48 @@ +//====- LowerToLLVM.h- Lowering from CIR to LLVM --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares an interface for converting CIR modules to LLVM IR. +// +//===----------------------------------------------------------------------===// +#ifndef CLANG_CIR_LOWERTOLLVM_H +#define CLANG_CIR_LOWERTOLLVM_H + +#include "mlir/Pass/Pass.h" + +#include + +namespace llvm { +class LLVMContext; +class Module; +} // namespace llvm + +namespace mlir { +class MLIRContext; +class ModuleOp; +} // namespace mlir + +namespace cir { + +namespace direct { +std::unique_ptr +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, + llvm::LLVMContext &llvmCtx, + bool disableVerifier = false); +} + +// Lower directly from pristine CIR to LLVMIR. +std::unique_ptr +lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + llvm::LLVMContext &llvmCtx); + +mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, + mlir::MLIRContext *mlirCtx); +} // namespace cir + +#endif // CLANG_CIR_LOWERTOLLVM_H_ diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h new file mode 100644 index 000000000000..6b1d2fdc75c4 --- /dev/null +++ b/clang/include/clang/CIR/Passes.h @@ -0,0 +1,38 @@ +//===- Passes.h - CIR Passes Definition -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file exposes the entry points to create compiler passes for ClangIR. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CIR_PASSES_H +#define CLANG_CIR_PASSES_H + +#include "mlir/Pass/Pass.h" + +#include + +namespace cir { +/// Create a pass for lowering from MLIR builtin dialects such as `Affine` and +/// `Std`, to the LLVM dialect for codegen. +std::unique_ptr createConvertMLIRToLLVMPass(); + +/// Create a pass that fully lowers CIR to the MLIR in-tree dialects. +std::unique_ptr createConvertCIRToMLIRPass(); + +namespace direct { +/// Create a pass that fully lowers CIR to the LLVMIR dialect. +std::unique_ptr createConvertCIRToLLVMPass(); + +/// Adds passes that fully lower CIR to the LLVMIR dialect. +void populateCIRToLLVMPasses(mlir::OpPassManager &pm); + +} // namespace direct +} // end namespace cir + +#endif // CLANG_CIR_PASSES_H diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h new file mode 100644 index 000000000000..74d5e5e32611 --- /dev/null +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -0,0 +1,125 @@ +//===---- CIRGenAction.h - CIR Code Generation Frontend Action -*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_CIRGENACTION_H +#define LLVM_CLANG_CIR_CIRGENACTION_H + +#include "clang/Frontend/FrontendAction.h" +#include + +namespace llvm { +class LLVMIRContext; +class Module; +} // namespace llvm + +namespace mlir { +class MLIRContext; +class ModuleOp; +template class OwningOpRef; +} // namespace mlir + +namespace cir { +class CIRGenConsumer; +class CIRGenerator; + +class CIRGenAction : public clang::ASTFrontendAction { +public: + enum class OutputType { + EmitAssembly, + EmitCIR, + EmitCIRFlat, + EmitLLVM, + EmitMLIR, + EmitObj, + None + }; + +private: + friend class CIRGenConsumer; + + // TODO: this is redundant but just using the OwningModuleRef requires more of + // clang against MLIR. Hide this somewhere else. + std::unique_ptr> mlirModule; + std::unique_ptr llvmModule; + + mlir::MLIRContext *mlirContext; + + mlir::OwningOpRef loadModule(llvm::MemoryBufferRef mbRef); + +protected: + CIRGenAction(OutputType action, mlir::MLIRContext *_MLIRContext = nullptr); + + std::unique_ptr + CreateASTConsumer(clang::CompilerInstance &CI, + llvm::StringRef InFile) override; + + void ExecuteAction() override; + + void EndSourceFileAction() override; + +public: + ~CIRGenAction() override; + + virtual bool hasCIRSupport() const override { return true; } + + CIRGenConsumer *cgConsumer; + OutputType action; +}; + +class EmitCIRAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitCIRAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitCIRFlatAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitCIRFlatAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitCIROnlyAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitCIROnlyAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitMLIRAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitMLIRAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitLLVMAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitLLVMAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitAssemblyAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitAssemblyAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitObjAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitObjAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +} // namespace cir + +#endif diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 086aedefc118..13d421cafc7e 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -2886,14 +2886,86 @@ def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group, Group, HelpText<"Force linking the clang builtins runtime library">; + def flto_EQ : Joined<["-"], "flto=">, Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, Group, HelpText<"Set LTO mode">, Values<"thin,full">; -def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Visibility<[ClangOption, FlangOption]>, Group, - Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group, Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; +def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Visibility<[ClangOption, FlangOption]>, Group, + Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; + +/// ClangIR-specific options - BEGIN +defm clangir : BoolFOption<"clangir", + FrontendOpts<"UseClangIRPipeline">, DefaultFalse, + PosFlag, + NegFlag LLVM pipeline to compile">, + BothFlags<[], [ClangOption, CC1Option], "">>; +def fclangir_disable_deferred_EQ : Joined<["-"], "fclangir-build-deferred-threshold=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"ClangIR (internal): Control the recursion level for calls to buildDeferred (defaults to 500)">, + MarshallingInfoInt, "500u">; +def fclangir_skip_system_headers : Joined<["-"], "fclangir-skip-system-headers">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"ClangIR (internal): buildDeferred skip functions defined in system headers">, + MarshallingInfoFlag>; +def fclangir_lifetime_check_EQ : Joined<["-"], "fclangir-lifetime-check=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Run lifetime checker">, + MarshallingInfoString>; +def fclangir_lifetime_check : Flag<["-"], "fclangir-lifetime-check">, + Visibility<[ClangOption, CC1Option]>, Group, + Alias, AliasArgs<["history=invalid,null"]>, + HelpText<"Run lifetime checker">; +def fclangir_idiom_recognizer_EQ : Joined<["-"], "fclangir-idiom-recognizer=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Enable C/C++ idiom recognizer">, + MarshallingInfoString>; +def fclangir_idiom_recognizer : Flag<["-"], "fclangir-idiom-recognizer">, + Visibility<[ClangOption, CC1Option]>, Group, + Alias, + HelpText<"Enable C/C++ idiom recognizer">; +def fclangir_lib_opt_EQ : Joined<["-"], "fclangir-lib-opt=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Enable C/C++ library based optimizations (with options)">, + MarshallingInfoString>; +def fclangir_lib_opt : Flag<["-"], "fclangir-lib-opt">, + Visibility<[ClangOption, CC1Option]>, Group, + Alias, + HelpText<"Enable C/C++ library based optimizations">; + +def clangir_disable_passes : Flag<["-"], "clangir-disable-passes">, + Visibility<[ClangOption, CC1Option]>, + HelpText<"Disable CIR transformations pipeline">, + MarshallingInfoFlag>; +def clangir_disable_verifier : Flag<["-"], "clangir-disable-verifier">, + Visibility<[ClangOption, CC1Option]>, + HelpText<"ClangIR: Disable MLIR module verifier">, + MarshallingInfoFlag>; +def clangir_disable_emit_cxx_default : Flag<["-"], "clangir-disable-emit-cxx-default">, + Visibility<[ClangOption, CC1Option]>, + HelpText<"ClangIR: Disable emission of c++ default (compiler implemented) methods.">, + MarshallingInfoFlag>; +def clangir_verify_diagnostics : Flag<["-"], "clangir-verify-diagnostics">, + Visibility<[ClangOption, CC1Option]>, + HelpText<"ClangIR: Enable diagnostic verification in MLIR, similar to clang's -verify">, + MarshallingInfoFlag>; +defm clangir_direct_lowering : BoolFOption<"clangir-direct-lowering", + FrontendOpts<"ClangIRDirectLowering">, DefaultTrue, + PosFlag, + NegFlag>; + +def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, + Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; +def emit_cir_only : Flag<["-"], "emit-cir-only">, + HelpText<"Build ASTs and convert to CIR, discarding output">; +def emit_cir_flat : Flag<["-"], "emit-cir-flat">, Visibility<[ClangOption, CC1Option]>, + Group, HelpText<"Similar to -emit-cir but also lowers structured CFG into basic blocks.">; +def emit_mlir : Flag<["-"], "emit-mlir">, Visibility<[CC1Option]>, Group, + HelpText<"Build ASTs and then lower through ClangIR to MLIR, emit the .milr file">; +/// ClangIR-specific options - END + def flto : Flag<["-"], "flto">, Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, Group, @@ -4605,9 +4677,9 @@ def mllvm : Separate<["-"], "mllvm">, def : Joined<["-"], "mllvm=">, Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>, Alias, HelpText<"Alias for -mllvm">, MetaVarName<"">; -def mmlir : Separate<["-"], "mmlir">, - Visibility<[ClangOption, CLOption, FC1Option, FlangOption]>, - HelpText<"Additional arguments to forward to MLIR's option processing">; +def mmlir : Separate<["-"], "mmlir">, Visibility<[ClangOption,CC1Option,FC1Option,FlangOption]>, + HelpText<"Additional arguments to forward to MLIR's option processing">, + MarshallingInfoStringVector>; def ffuchsia_api_level_EQ : Joined<["-"], "ffuchsia-api-level=">, Group, Visibility<[ClangOption, CC1Option]>, HelpText<"Set Fuchsia API level">, @@ -6651,7 +6723,7 @@ defm analyzed_objects_for_unparse : OptOutFC1FFlag<"analyzed-objects-for-unparse def emit_fir : Flag<["-"], "emit-fir">, Group, HelpText<"Build the parse tree, then lower it to FIR">; -def emit_mlir : Flag<["-"], "emit-mlir">, Alias; +// def emit_mlir : Flag<["-"], "emit-mlir">, Alias; def emit_hlfir : Flag<["-"], "emit-hlfir">, Group, HelpText<"Build the parse tree, then lower it to HLFIR">; diff --git a/clang/include/clang/Driver/Types.def b/clang/include/clang/Driver/Types.def index 0e0cae5fb706..9d76e949d4ac 100644 --- a/clang/include/clang/Driver/Types.def +++ b/clang/include/clang/Driver/Types.def @@ -91,6 +91,9 @@ TYPE("lto-ir", LTO_IR, INVALID, "s", phases TYPE("lto-bc", LTO_BC, INVALID, "o", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("cir", CIR, INVALID, "cir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) +TYPE("cir-flat", CIR_FLAT, INVALID, "cir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) +TYPE("mlir", MLIR, INVALID, "mlir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) + // Misc. TYPE("ast", AST, INVALID, "ast", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("ifs", IFS, INVALID, "ifs", phases::IfsMerge) diff --git a/clang/include/clang/Frontend/FrontendAction.h b/clang/include/clang/Frontend/FrontendAction.h index 039f6f247b6d..effc505d9a3e 100644 --- a/clang/include/clang/Frontend/FrontendAction.h +++ b/clang/include/clang/Frontend/FrontendAction.h @@ -196,6 +196,9 @@ class FrontendAction { /// Does this action support use with IR files? virtual bool hasIRSupport() const { return false; } + /// Does this action support use with CIR files? + virtual bool hasCIRSupport() const { return false; } + /// Does this action support use with code completion? virtual bool hasCodeCompletionSupport() const { return false; } diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index a738c1f37576..45c5cd1f9ec3 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -65,6 +65,18 @@ enum ActionKind { /// Translate input source into HTML. EmitHTML, + /// Emit a .cir file + EmitCIR, + + /// Emit a .cir file with flat ClangIR + EmitCIRFlat, + + /// Generate CIR, bud don't emit anything. + EmitCIROnly, + + /// Emit a .mlir file + EmitMLIR, + /// Emit a .ll file. EmitLLVM, @@ -154,11 +166,7 @@ enum ActionKind { class InputKind { public: /// The input file format. - enum Format { - Source, - ModuleMap, - Precompiled - }; + enum Format { Source, ModuleMap, Precompiled }; // If we are building a header unit, what kind it is; this affects whether // we look for the file in the user or system include search paths before @@ -408,6 +416,34 @@ class FrontendOptions { LLVM_PREFERRED_TYPE(bool) unsigned GenReducedBMI : 1; + /// Use Clang IR pipeline to emit code + LLVM_PREFERRED_TYPE(bool) + unsigned UseClangIRPipeline : 1; + + /// Lower directly from ClangIR to LLVM + unsigned ClangIRDirectLowering : 1; + + /// Disable Clang IR specific (CIR) passes + unsigned ClangIRDisablePasses : 1; + + /// Disable Clang IR (CIR) verifier + unsigned ClangIRDisableCIRVerifier : 1; + + /// Disable ClangIR emission for CXX default (compiler generated methods). + unsigned ClangIRDisableEmitCXXDefault : 1; + + /// Enable diagnostic verification for CIR + unsigned ClangIRVerifyDiags : 1; + + // Enable Clang IR based lifetime check + unsigned ClangIRLifetimeCheck : 1; + + // Enable Clang IR idiom recognizer + unsigned ClangIRIdiomRecognizer : 1; + + // Enable Clang IR library optimizations + unsigned ClangIRLibOpt : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -465,11 +501,11 @@ class FrontendOptions { /// Enable converting setter/getter expressions to property-dot syntx. ObjCMT_PropertyDotSyntax = 0x1000, - ObjCMT_MigrateDecls = (ObjCMT_ReadonlyProperty | ObjCMT_ReadwriteProperty | - ObjCMT_Annotation | ObjCMT_Instancetype | - ObjCMT_NsMacros | ObjCMT_ProtocolConformance | - ObjCMT_NsAtomicIOSOnlyProperty | - ObjCMT_DesignatedInitializer), + ObjCMT_MigrateDecls = + (ObjCMT_ReadonlyProperty | ObjCMT_ReadwriteProperty | + ObjCMT_Annotation | ObjCMT_Instancetype | ObjCMT_NsMacros | + ObjCMT_ProtocolConformance | ObjCMT_NsAtomicIOSOnlyProperty | + ObjCMT_DesignatedInitializer), ObjCMT_MigrateAll = (ObjCMT_Literals | ObjCMT_Subscripting | ObjCMT_MigrateDecls | ObjCMT_PropertyDotSyntax) }; @@ -479,6 +515,10 @@ class FrontendOptions { std::string MTMigrateDir; std::string ARCMTMigrateReportOut; + std::string ClangIRLifetimeCheckOpts; + std::string ClangIRIdiomRecognizerOpts; + std::string ClangIRLibOptOpts; + /// The input kind, either specified via -x argument or deduced from the input /// file name. InputKind DashX; @@ -550,6 +590,10 @@ class FrontendOptions { /// should only be used for debugging and experimental features. std::vector LLVMArgs; + /// A list of arguments to forward to MLIR's option processing; this + /// should only be used for debugging and experimental features. + std::vector MLIRArgs; + /// File name of the file that will provide record layouts /// (in the format produced by -fdump-record-layouts). std::string OverrideRecordLayoutsFile; @@ -590,6 +634,10 @@ class FrontendOptions { EmitSymbolGraph(false), EmitExtensionSymbolGraphs(false), EmitSymbolGraphSymbolLabelsForTesting(false), EmitPrettySymbolGraphs(false), GenReducedBMI(false), + UseClangIRPipeline(false), ClangIRDirectLowering(false), + ClangIRDisablePasses(false), ClangIRDisableCIRVerifier(false), + ClangIRDisableEmitCXXDefault(false), ClangIRLifetimeCheck(false), + ClangIRIdiomRecognizer(false), ClangIRLibOpt(false), TimeTraceGranularity(500) {} /// getInputKindForExtension - Return the appropriate input kind for a file diff --git a/clang/include/clang/Sema/AnalysisBasedWarnings.h b/clang/include/clang/Sema/AnalysisBasedWarnings.h index aafe227b8408..6aac70021ec7 100644 --- a/clang/include/clang/Sema/AnalysisBasedWarnings.h +++ b/clang/include/clang/Sema/AnalysisBasedWarnings.h @@ -23,6 +23,7 @@ class Decl; class FunctionDecl; class QualType; class Sema; + namespace sema { class FunctionScopeInfo; } diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index d2ff200e0da5..093420b4fee3 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -2,3 +2,7 @@ include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) add_subdirectory(Dialect) +add_subdirectory(CodeGen) +add_subdirectory(FrontendAction) +add_subdirectory(Lowering) +add_subdirectory(Interfaces) diff --git a/clang/lib/CIR/CodeGen/ABIInfo.h b/clang/lib/CIR/CodeGen/ABIInfo.h new file mode 100644 index 000000000000..5a2e3ff56ca4 --- /dev/null +++ b/clang/lib/CIR/CodeGen/ABIInfo.h @@ -0,0 +1,45 @@ +//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_ABIINFO_H +#define LLVM_CLANG_LIB_CIR_ABIINFO_H + +#include "clang/AST/Type.h" + +namespace cir { + +class ABIArgInfo; +class CIRGenCXXABI; +class CIRGenFunctionInfo; +class CIRGenTypes; + +/// ABIInfo - Target specific hooks for defining how a type should be passed or +/// returned from functions. +class ABIInfo { + ABIInfo() = delete; + +public: + CIRGenTypes &CGT; + + ABIInfo(CIRGenTypes &cgt) : CGT{cgt} {} + + virtual ~ABIInfo(); + + CIRGenCXXABI &getCXXABI() const; + clang::ASTContext &getContext() const; + + virtual void computeInfo(CIRGenFunctionInfo &FI) const = 0; + + // Implement the Type::IsPromotableIntegerType for ABI specific needs. The + // only difference is that this consideres bit-precise integer types as well. + bool isPromotableIntegerTypeForABI(clang::QualType Ty) const; +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h new file mode 100644 index 000000000000..ac3afd779919 --- /dev/null +++ b/clang/lib/CIR/CodeGen/Address.h @@ -0,0 +1,138 @@ +//===-- Address.h - An aligned address -------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class provides a simple wrapper for a pair of a pointer and an +// alignment. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_ADDRESS_H +#define LLVM_CLANG_LIB_CIR_ADDRESS_H + +#include "clang/AST/CharUnits.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "llvm/IR/Constants.h" + +#include "mlir/IR/Value.h" + +namespace cir { + +// Indicates whether a pointer is known not to be null. +enum KnownNonNull_t { NotKnownNonNull, KnownNonNull }; + +class Address { + llvm::PointerIntPair PointerAndKnownNonNull; + mlir::Type ElementType; + clang::CharUnits Alignment; + +protected: + Address(std::nullptr_t) : ElementType(nullptr) {} + +public: + Address(mlir::Value pointer, mlir::Type elementType, + clang::CharUnits alignment, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) + : PointerAndKnownNonNull(pointer, IsKnownNonNull), + ElementType(elementType), Alignment(alignment) { + assert(pointer.getType().isa() && + "Expected cir.ptr type"); + + assert(pointer && "Pointer cannot be null"); + assert(elementType && "Element type cannot be null"); + assert(!alignment.isZero() && "Alignment cannot be zero"); + } + Address(mlir::Value pointer, clang::CharUnits alignment) + : Address(pointer, + pointer.getType().cast().getPointee(), + alignment) { + + assert((!alignment.isZero() || pointer == nullptr) && + "creating valid address with invalid alignment"); + } + + static Address invalid() { return Address(nullptr); } + bool isValid() const { + return PointerAndKnownNonNull.getPointer() != nullptr; + } + + /// Return address with different pointer, but same element type and + /// alignment. + Address withPointer(mlir::Value NewPointer, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) const { + return Address(NewPointer, getElementType(), getAlignment(), + IsKnownNonNull); + } + + /// Return address with different alignment, but same pointer and element + /// type. + Address withAlignment(clang::CharUnits NewAlignment) const { + return Address(getPointer(), getElementType(), NewAlignment, + isKnownNonNull()); + } + + /// Return address with different element type, but same pointer and + /// alignment. + Address withElementType(mlir::Type ElemTy) const { + // TODO(cir): hasOffset() check + return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull()); + } + + mlir::Value getPointer() const { + assert(isValid()); + return PointerAndKnownNonNull.getPointer(); + } + + /// Return the alignment of this pointer. + clang::CharUnits getAlignment() const { + // assert(isValid()); + return Alignment; + } + + /// Return the pointer contained in this class after authenticating it and + /// adding offset to it if necessary. + mlir::Value emitRawPointer() const { + // TODO(cir): update this class with latest traditional LLVM codegen bits + // and the replace the call below to getBasePointer(). + return getPointer(); + } + + /// Return the type of the pointer value. + mlir::cir::PointerType getType() const { + return getPointer().getType().cast(); + } + + mlir::Type getElementType() const { + assert(isValid()); + return ElementType; + } + + /// Whether the pointer is known not to be null. + KnownNonNull_t isKnownNonNull() const { + assert(isValid()); + return (KnownNonNull_t)PointerAndKnownNonNull.getInt(); + } + + /// Set the non-null bit. + Address setKnownNonNull() { + assert(isValid()); + PointerAndKnownNonNull.setInt(true); + return *this; + } + + /// Get the operation which defines this address. + mlir::Operation *getDefiningOp() const { + if (!isValid()) + return nullptr; + return getPointer().getDefiningOp(); + } +}; + +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_ADDRESS_H diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp new file mode 100644 index 000000000000..27193f718ece --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -0,0 +1,700 @@ +#include "clang/Basic/DiagnosticSema.h" +#include "llvm/ADT/StringExtras.h" + +#include "CIRGenFunction.h" +#include "TargetInfo.h" +#include "UnimplementedFeatureGuarding.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +static bool isAggregateType(mlir::Type typ) { + return isa(typ); +} + +static AsmFlavor inferFlavor(const CIRGenModule &cgm, const AsmStmt &S) { + AsmFlavor GnuAsmFlavor = + cgm.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT + ? AsmFlavor::x86_att + : AsmFlavor::x86_intel; + + return isa(&S) ? AsmFlavor::x86_intel : GnuAsmFlavor; +} + +// FIXME(cir): This should be a common helper between CIRGen +// and traditional CodeGen +static std::string SimplifyConstraint( + const char *Constraint, const TargetInfo &Target, + SmallVectorImpl *OutCons = nullptr) { + std::string Result; + + while (*Constraint) { + switch (*Constraint) { + default: + Result += Target.convertConstraint(Constraint); + break; + // Ignore these + case '*': + case '?': + case '!': + case '=': // Will see this and the following in mult-alt constraints. + case '+': + break; + case '#': // Ignore the rest of the constraint alternative. + while (Constraint[1] && Constraint[1] != ',') + Constraint++; + break; + case '&': + case '%': + Result += *Constraint; + while (Constraint[1] && Constraint[1] == *Constraint) + Constraint++; + break; + case ',': + Result += "|"; + break; + case 'g': + Result += "imr"; + break; + case '[': { + assert(OutCons && + "Must pass output names to constraints with a symbolic name"); + unsigned Index; + bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); + assert(result && "Could not resolve symbolic name"); + (void)result; + Result += llvm::utostr(Index); + break; + } + } + + Constraint++; + } + + return Result; +} + +// FIXME(cir): This should be a common helper between CIRGen +// and traditional CodeGen +/// Look at AsmExpr and if it is a variable declared +/// as using a particular register add that as a constraint that will be used +/// in this asm stmt. +static std::string +AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, + const TargetInfo &Target, CIRGenModule &CGM, + const AsmStmt &Stmt, const bool EarlyClobber, + std::string *GCCReg = nullptr) { + const DeclRefExpr *AsmDeclRef = dyn_cast(&AsmExpr); + if (!AsmDeclRef) + return Constraint; + const ValueDecl &Value = *AsmDeclRef->getDecl(); + const VarDecl *Variable = dyn_cast(&Value); + if (!Variable) + return Constraint; + if (Variable->getStorageClass() != SC_Register) + return Constraint; + AsmLabelAttr *Attr = Variable->getAttr(); + if (!Attr) + return Constraint; + StringRef Register = Attr->getLabel(); + assert(Target.isValidGCCRegisterName(Register)); + // We're using validateOutputConstraint here because we only care if + // this is a register constraint. + TargetInfo::ConstraintInfo Info(Constraint, ""); + if (Target.validateOutputConstraint(Info) && !Info.allowsRegister()) { + CGM.ErrorUnsupported(&Stmt, "__asm__"); + return Constraint; + } + // Canonicalize the register here before returning it. + Register = Target.getNormalizedGCCRegisterName(Register); + if (GCCReg != nullptr) + *GCCReg = Register.str(); + return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; +} + +static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &S, + std::string &constraints, bool &hasUnwindClobber, + bool &readOnly, bool readNone) { + + hasUnwindClobber = false; + auto &cgm = cgf.getCIRGenModule(); + + // Clobbers + for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { + StringRef clobber = S.getClobber(i); + if (clobber == "memory") + readOnly = readNone = false; + else if (clobber == "unwind") { + hasUnwindClobber = true; + continue; + } else if (clobber != "cc") { + clobber = cgf.getTarget().getNormalizedGCCRegisterName(clobber); + if (cgm.getCodeGenOpts().StackClashProtector && + cgf.getTarget().isSPRegName(clobber)) { + cgm.getDiags().Report(S.getAsmLoc(), + diag::warn_stack_clash_protection_inline_asm); + } + } + + if (isa(&S)) { + if (clobber == "eax" || clobber == "edx") { + if (constraints.find("=&A") != std::string::npos) + continue; + std::string::size_type position1 = + constraints.find("={" + clobber.str() + "}"); + if (position1 != std::string::npos) { + constraints.insert(position1 + 1, "&"); + continue; + } + std::string::size_type position2 = constraints.find("=A"); + if (position2 != std::string::npos) { + constraints.insert(position2 + 1, "&"); + continue; + } + } + } + if (!constraints.empty()) + constraints += ','; + + constraints += "~{"; + constraints += clobber; + constraints += '}'; + } + + // Add machine specific clobbers + std::string_view machineClobbers = cgf.getTarget().getClobbers(); + if (!machineClobbers.empty()) { + if (!constraints.empty()) + constraints += ','; + constraints += machineClobbers; + } +} + +using constraintInfos = SmallVector; + +static void collectInOutConstrainsInfos(const CIRGenFunction &cgf, + const AsmStmt &S, constraintInfos &out, + constraintInfos &in) { + + for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { + StringRef Name; + if (const GCCAsmStmt *GAS = dyn_cast(&S)) + Name = GAS->getOutputName(i); + TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); + bool IsValid = cgf.getTarget().validateOutputConstraint(Info); + (void)IsValid; + assert(IsValid && "Failed to parse output constraint"); + out.push_back(Info); + } + + for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { + StringRef Name; + if (const GCCAsmStmt *GAS = dyn_cast(&S)) + Name = GAS->getInputName(i); + TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); + bool IsValid = cgf.getTarget().validateInputConstraint(out, Info); + assert(IsValid && "Failed to parse input constraint"); + (void)IsValid; + in.push_back(Info); + } +} + +std::pair CIRGenFunction::buildAsmInputLValue( + const TargetInfo::ConstraintInfo &Info, LValue InputValue, + QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { + + if (Info.allowsRegister() || !Info.allowsMemory()) { + if (hasScalarEvaluationKind(InputType)) + return {buildLoadOfLValue(InputValue, Loc).getScalarVal(), mlir::Type()}; + + mlir::Type Ty = convertType(InputType); + uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); + if ((Size <= 64 && llvm::isPowerOf2_64(Size)) || + getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { + Ty = mlir::cir::IntType::get(builder.getContext(), Size, false); + + return {builder.createLoad(getLoc(Loc), + InputValue.getAddress().withElementType(Ty)), + mlir::Type()}; + } + } + + Address Addr = InputValue.getAddress(); + ConstraintStr += '*'; + return {Addr.getPointer(), Addr.getElementType()}; +} + +std::pair +CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, + const Expr *InputExpr, + std::string &ConstraintStr) { + auto loc = getLoc(InputExpr->getExprLoc()); + + // If this can't be a register or memory, i.e., has to be a constant + // (immediate or symbolic), try to emit it as such. + if (!Info.allowsRegister() && !Info.allowsMemory()) { + if (Info.requiresImmediateConstant()) { + Expr::EvalResult EVResult; + InputExpr->EvaluateAsRValue(EVResult, getContext(), true); + + llvm::APSInt IntResult; + if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(), + getContext())) + return {builder.getConstAPSInt(loc, IntResult), mlir::Type()}; + } + + Expr::EvalResult Result; + if (InputExpr->EvaluateAsInt(Result, getContext())) + return {builder.getConstAPSInt(loc, Result.Val.getInt()), mlir::Type()}; + } + + if (Info.allowsRegister() || !Info.allowsMemory()) + if (CIRGenFunction::hasScalarEvaluationKind(InputExpr->getType())) + return {buildScalarExpr(InputExpr), mlir::Type()}; + if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) + return {buildScalarExpr(InputExpr), mlir::Type()}; + InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); + LValue Dest = buildLValue(InputExpr); + return buildAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, + InputExpr->getExprLoc()); +} + +static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, + const llvm::ArrayRef RegResults, + const llvm::ArrayRef ResultRegTypes, + const llvm::ArrayRef ResultTruncRegTypes, + const llvm::ArrayRef ResultRegDests, + const llvm::ArrayRef ResultRegQualTys, + const llvm::BitVector &ResultTypeRequiresCast, + const llvm::BitVector &ResultRegIsFlagReg) { + CIRGenBuilderTy &Builder = CGF.getBuilder(); + CIRGenModule &CGM = CGF.CGM; + auto CTX = Builder.getContext(); + + assert(RegResults.size() == ResultRegTypes.size()); + assert(RegResults.size() == ResultTruncRegTypes.size()); + assert(RegResults.size() == ResultRegDests.size()); + // ResultRegDests can be also populated by addReturnRegisterOutputs() above, + // in which case its size may grow. + assert(ResultTypeRequiresCast.size() <= ResultRegDests.size()); + assert(ResultRegIsFlagReg.size() <= ResultRegDests.size()); + + for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { + mlir::Value Tmp = RegResults[i]; + mlir::Type TruncTy = ResultTruncRegTypes[i]; + + if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) { + assert(!UnimplementedFeature::asm_llvm_assume()); + } + + // If the result type of the LLVM IR asm doesn't match the result type of + // the expression, do the conversion. + if (ResultRegTypes[i] != TruncTy) { + + // Truncate the integer result to the right size, note that TruncTy can be + // a pointer. + if (TruncTy.isa()) + Tmp = Builder.createFloatingCast(Tmp, TruncTy); + else if (isa(TruncTy) && + isa(Tmp.getType())) { + uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); + Tmp = Builder.createIntCast( + Tmp, mlir::cir::IntType::get(CTX, (unsigned)ResSize, false)); + Tmp = Builder.createIntToPtr(Tmp, TruncTy); + } else if (isa(Tmp.getType()) && + isa(TruncTy)) { + uint64_t TmpSize = CGM.getDataLayout().getTypeSizeInBits(Tmp.getType()); + Tmp = Builder.createPtrToInt( + Tmp, mlir::cir::IntType::get(CTX, (unsigned)TmpSize, false)); + Tmp = Builder.createIntCast(Tmp, TruncTy); + } else if (isa(TruncTy)) { + Tmp = Builder.createIntCast(Tmp, TruncTy); + } else if (false /*TruncTy->isVectorTy()*/) { + assert(!UnimplementedFeature::asm_vector_type()); + } + } + + LValue Dest = ResultRegDests[i]; + // ResultTypeRequiresCast elements correspond to the first + // ResultTypeRequiresCast.size() elements of RegResults. + if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { + unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]); + Address A = Dest.getAddress().withElementType(ResultRegTypes[i]); + if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) { + Builder.createStore(CGF.getLoc(S.getAsmLoc()), Tmp, A); + continue; + } + + QualType Ty = + CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false); + if (Ty.isNull()) { + const Expr *OutExpr = S.getOutputExpr(i); + CGM.getDiags().Report(OutExpr->getExprLoc(), + diag::err_store_value_to_reg); + return; + } + Dest = CGF.makeAddrLValue(A, Ty); + } + + CGF.buildStoreThroughLValue(RValue::get(Tmp), Dest); + } +} + +mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { + // Assemble the final asm string. + std::string AsmString = S.generateAsmString(getContext()); + + // Get all the output and input constraints together. + constraintInfos OutputConstraintInfos; + constraintInfos InputConstraintInfos; + collectInOutConstrainsInfos(*this, S, OutputConstraintInfos, + InputConstraintInfos); + + std::string Constraints; + std::vector ResultRegDests; + std::vector ResultRegQualTys; + std::vector ResultRegTypes; + std::vector ResultTruncRegTypes; + std::vector ArgTypes; + std::vector ArgElemTypes; + std::vector OutArgs; + std::vector InArgs; + std::vector InOutArgs; + std::vector Args; + llvm::BitVector ResultTypeRequiresCast; + llvm::BitVector ResultRegIsFlagReg; + + // Keep track of input constraints. + std::string InOutConstraints; + std::vector InOutArgTypes; + std::vector InOutArgElemTypes; + + // Keep track of out constraints for tied input operand. + std::vector OutputConstraints; + + // Keep track of defined physregs. + llvm::SmallSet PhysRegOutputs; + + // An inline asm can be marked readonly if it meets the following conditions: + // - it doesn't have any sideeffects + // - it doesn't clobber memory + // - it doesn't return a value by-reference + // It can be marked readnone if it doesn't have any input memory constraints + // in addition to meeting the conditions listed above. + bool ReadOnly = true, ReadNone = true; + + for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { + TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; + + // Simplify the output constraint. + std::string OutputConstraint(S.getOutputConstraint(i)); + OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, + getTarget(), &OutputConstraintInfos); + + const Expr *OutExpr = S.getOutputExpr(i); + OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); + + std::string GCCReg; + OutputConstraint = + AddVariableConstraints(OutputConstraint, *OutExpr, getTarget(), CGM, S, + Info.earlyClobber(), &GCCReg); + + // Give an error on multiple outputs to same physreg. + if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second) + CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg); + + OutputConstraints.push_back(OutputConstraint); + LValue Dest = buildLValue(OutExpr); + + if (!Constraints.empty()) + Constraints += ','; + + // If this is a register output, then make the inline a sm return it + // by-value. If this is a memory result, return the value by-reference. + QualType QTy = OutExpr->getType(); + const bool IsScalarOrAggregate = + hasScalarEvaluationKind(QTy) || hasAggregateEvaluationKind(QTy); + if (!Info.allowsMemory() && IsScalarOrAggregate) { + Constraints += "=" + OutputConstraint; + ResultRegQualTys.push_back(QTy); + ResultRegDests.push_back(Dest); + + bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc"); + ResultRegIsFlagReg.push_back(IsFlagReg); + + mlir::Type Ty = convertTypeForMem(QTy); + const bool RequiresCast = + Info.allowsRegister() && + (getTargetHooks().isScalarizableAsmOperand(*this, Ty) || + isAggregateType(Ty)); + + ResultTruncRegTypes.push_back(Ty); + ResultTypeRequiresCast.push_back(RequiresCast); + + if (RequiresCast) { + unsigned Size = getContext().getTypeSize(QTy); + Ty = mlir::cir::IntType::get(builder.getContext(), Size, false); + } + ResultRegTypes.push_back(Ty); + // If this output is tied to an input, and if the input is larger, then + // we need to set the actual result type of the inline asm node to be the + // same as the input type. + if (Info.hasMatchingInput()) { + unsigned InputNo; + for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { + TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; + if (Input.hasTiedOperand() && Input.getTiedOperand() == i) + break; + } + assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); + + QualType InputTy = S.getInputExpr(InputNo)->getType(); + QualType OutputType = OutExpr->getType(); + + uint64_t InputSize = getContext().getTypeSize(InputTy); + if (getContext().getTypeSize(OutputType) < InputSize) { + // Form the asm to return the value as a larger integer or fp type. + ResultRegTypes.back() = ConvertType(InputTy); + } + } + if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( + *this, OutputConstraint, ResultRegTypes.back())) + ResultRegTypes.back() = AdjTy; + else { + CGM.getDiags().Report(S.getAsmLoc(), + diag::err_asm_invalid_type_in_input) + << OutExpr->getType() << OutputConstraint; + } + + // Update largest vector width for any vector types. + assert(!UnimplementedFeature::asm_vector_type()); + } else { + Address DestAddr = Dest.getAddress(); + + // Matrix types in memory are represented by arrays, but accessed through + // vector pointers, with the alignment specified on the access operation. + // For inline assembly, update pointer arguments to use vector pointers. + // Otherwise there will be a mis-match if the matrix is also an + // input-argument which is represented as vector. + if (isa(OutExpr->getType().getCanonicalType())) + DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType())); + + ArgTypes.push_back(DestAddr.getType()); + ArgElemTypes.push_back(DestAddr.getElementType()); + OutArgs.push_back(DestAddr.getPointer()); + Args.push_back(DestAddr.getPointer()); + Constraints += "=*"; + Constraints += OutputConstraint; + ReadOnly = ReadNone = false; + } + + if (Info.isReadWrite()) { + InOutConstraints += ','; + const Expr *InputExpr = S.getOutputExpr(i); + + mlir::Value Arg; + mlir::Type ArgElemType; + std::tie(Arg, ArgElemType) = + buildAsmInputLValue(Info, Dest, InputExpr->getType(), + InOutConstraints, InputExpr->getExprLoc()); + + if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( + *this, OutputConstraint, Arg.getType())) + Arg = builder.createBitcast(Arg, AdjTy); + + // Update largest vector width for any vector types. + assert(!UnimplementedFeature::asm_vector_type()); + + // Only tie earlyclobber physregs. + if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) + InOutConstraints += llvm::utostr(i); + else + InOutConstraints += OutputConstraint; + + InOutArgTypes.push_back(Arg.getType()); + InOutArgElemTypes.push_back(ArgElemType); + InOutArgs.push_back(Arg); + } + } // iterate over output operands + + // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) + // to the return value slot. Only do this when returning in registers. + if (isa(&S)) { + const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); + if (RetAI.isDirect() || RetAI.isExtend()) { + // Make a fake lvalue for the return value slot. + LValue ReturnSlot = makeAddrLValue(ReturnValue, FnRetTy); + CGM.getTargetCIRGenInfo().addReturnRegisterOutputs( + *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, + ResultRegDests, AsmString, S.getNumOutputs()); + SawAsmBlock = true; + } + } + + for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { + const Expr *InputExpr = S.getInputExpr(i); + + TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; + + if (Info.allowsMemory()) + ReadNone = false; + + if (!Constraints.empty()) + Constraints += ','; + + // Simplify the input constraint. + std::string InputConstraint(S.getInputConstraint(i)); + InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), + &OutputConstraintInfos); + + InputConstraint = AddVariableConstraints( + InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), + getTarget(), CGM, S, false /* No EarlyClobber */); + + std::string ReplaceConstraint(InputConstraint); + mlir::Value Arg; + mlir::Type ArgElemType; + std::tie(Arg, ArgElemType) = buildAsmInput(Info, InputExpr, Constraints); + + // If this input argument is tied to a larger output result, extend the + // input to be the same size as the output. The LLVM backend wants to see + // the input and output of a matching constraint be the same size. Note + // that GCC does not define what the top bits are here. We use zext because + // that is usually cheaper, but LLVM IR should really get an anyext someday. + if (Info.hasTiedOperand()) { + unsigned Output = Info.getTiedOperand(); + QualType OutputType = S.getOutputExpr(Output)->getType(); + QualType InputTy = InputExpr->getType(); + + if (getContext().getTypeSize(OutputType) > + getContext().getTypeSize(InputTy)) { + // Use ptrtoint as appropriate so that we can do our extension. + if (isa(Arg.getType())) + Arg = builder.createPtrToInt(Arg, UIntPtrTy); + mlir::Type OutputTy = convertType(OutputType); + if (isa(OutputTy)) + Arg = builder.createIntCast(Arg, OutputTy); + else if (isa(OutputTy)) + Arg = builder.createIntCast(Arg, UIntPtrTy); + else if (isa(OutputTy)) + Arg = builder.createFloatingCast(Arg, OutputTy); + } + + // Deal with the tied operands' constraint code in adjustInlineAsmType. + ReplaceConstraint = OutputConstraints[Output]; + } + + if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( + *this, ReplaceConstraint, Arg.getType())) + Arg = builder.createBitcast(Arg, AdjTy); + else + CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) + << InputExpr->getType() << InputConstraint; + + // Update largest vector width for any vector types. + assert(!UnimplementedFeature::asm_vector_type()); + + ArgTypes.push_back(Arg.getType()); + ArgElemTypes.push_back(ArgElemType); + InArgs.push_back(Arg); + Args.push_back(Arg); + Constraints += InputConstraint; + } // iterate over input operands + + // Append the "input" part of inout constraints. + for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { + Args.push_back(InOutArgs[i]); + ArgTypes.push_back(InOutArgTypes[i]); + ArgElemTypes.push_back(InOutArgElemTypes[i]); + } + Constraints += InOutConstraints; + + bool HasUnwindClobber = false; + collectClobbers(*this, S, Constraints, HasUnwindClobber, ReadOnly, ReadNone); + + mlir::Type ResultType; + + if (ResultRegTypes.size() == 1) + ResultType = ResultRegTypes[0]; + else if (ResultRegTypes.size() > 1) { + auto sname = builder.getUniqueAnonRecordName(); + ResultType = + builder.getCompleteStructTy(ResultRegTypes, sname, false, nullptr); + } + + bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; + std::vector RegResults; + + llvm::SmallVector operands; + operands.push_back(OutArgs); + operands.push_back(InArgs); + operands.push_back(InOutArgs); + + auto IA = builder.create( + getLoc(S.getAsmLoc()), ResultType, operands, AsmString, Constraints, + HasSideEffect, inferFlavor(CGM, S), mlir::ArrayAttr()); + + if (false /*IsGCCAsmGoto*/) { + assert(!UnimplementedFeature::asm_goto()); + } else if (HasUnwindClobber) { + assert(!UnimplementedFeature::asm_unwind_clobber()); + } else { + assert(!UnimplementedFeature::asm_memory_effects()); + + mlir::Value result; + if (IA.getNumResults()) + result = IA.getResult(0); + + llvm::SmallVector operandAttrs; + + int i = 0; + for (auto typ : ArgElemTypes) { + if (typ) { + auto op = Args[i++]; + assert(op.getType().isa() && + "pointer type expected"); + assert(cast(op.getType()).getPointee() == typ && + "element type differs from pointee type!"); + + operandAttrs.push_back(mlir::UnitAttr::get(builder.getContext())); + } else { + // We need to add an attribute for every arg since later, during + // the lowering to LLVM IR the attributes will be assigned to the + // CallInsn argument by index, i.e. we can't skip null type here + operandAttrs.push_back(mlir::Attribute()); + } + } + + assert(Args.size() == operandAttrs.size() && + "The number of attributes is not even with the number of operands"); + + IA.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs)); + + if (ResultRegTypes.size() == 1) { + RegResults.push_back(result); + } else if (ResultRegTypes.size() > 1) { + auto alignment = CharUnits::One(); + auto sname = cast(ResultType).getName(); + auto dest = buildAlloca(sname, ResultType, getLoc(S.getAsmLoc()), + alignment, false); + auto addr = Address(dest, alignment); + builder.createStore(getLoc(S.getAsmLoc()), result, addr); + + for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { + auto typ = builder.getPointerTo(ResultRegTypes[i]); + auto ptr = + builder.createGetMember(getLoc(S.getAsmLoc()), typ, dest, "", i); + auto tmp = + builder.createLoad(getLoc(S.getAsmLoc()), Address(ptr, alignment)); + RegResults.push_back(tmp); + } + } + } + + buildAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, + ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, + ResultRegIsFlagReg); + + return mlir::success(); +} diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h new file mode 100644 index 000000000000..ef9f737a5620 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -0,0 +1,95 @@ +//===--- CIRDataLayout.h - CIR Data Layout Information ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Provides a LLVM-like API wrapper to DLTI and MLIR layout queries. This makes +// it easier to port some of LLVM codegen layout logic to CIR. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRDATALAYOUT_H +#define LLVM_CLANG_LIB_CIR_CIRDATALAYOUT_H + +#include "UnimplementedFeatureGuarding.h" +#include "mlir/Dialect/DLTI/DLTI.h" +#include "mlir/IR/BuiltinOps.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +namespace cir { + +class CIRDataLayout { + bool bigEndian = false; + +public: + mlir::DataLayout layout; + + CIRDataLayout(mlir::ModuleOp modOp); + bool isBigEndian() const { return bigEndian; } + + // `useABI` is `true` if not using prefered alignment. + unsigned getAlignment(mlir::Type ty, bool useABI) const { + if (llvm::isa(ty)) { + auto sTy = ty.cast(); + if (sTy.getPacked() && useABI) + return 1; + } else if (llvm::isa(ty)) { + return getAlignment(ty.cast().getEltType(), useABI); + } + + return useABI ? layout.getTypeABIAlignment(ty) + : layout.getTypePreferredAlignment(ty); + } + + unsigned getABITypeAlign(mlir::Type ty) const { + return getAlignment(ty, true); + } + + /// Returns the maximum number of bytes that may be overwritten by + /// storing the specified type. + /// + /// If Ty is a scalable vector type, the scalable property will be set and + /// the runtime size will be a positive integer multiple of the base size. + /// + /// For example, returns 5 for i36 and 10 for x86_fp80. + unsigned getTypeStoreSize(mlir::Type Ty) const { + // FIXME: this is a bit inaccurate, see DataLayout::getTypeStoreSize for + // more information. + return llvm::divideCeil(layout.getTypeSizeInBits(Ty), 8); + } + + /// Returns the offset in bytes between successive objects of the + /// specified type, including alignment padding. + /// + /// If Ty is a scalable vector type, the scalable property will be set and + /// the runtime size will be a positive integer multiple of the base size. + /// + /// This is the amount that alloca reserves for this type. For example, + /// returns 12 or 16 for x86_fp80, depending on alignment. + unsigned getTypeAllocSize(mlir::Type Ty) const { + // Round up to the next alignment boundary. + return llvm::alignTo(getTypeStoreSize(Ty), getABITypeAlign(Ty)); + } + + unsigned getPointerTypeSizeInBits(mlir::Type Ty) const { + assert(Ty.isa() && + "This should only be called with a pointer type"); + return layout.getTypeSizeInBits(Ty); + } + + unsigned getTypeSizeInBits(mlir::Type Ty) const { + return layout.getTypeSizeInBits(Ty); + } + + mlir::Type getIntPtrType(mlir::Type Ty) const { + assert(Ty.isa() && "Expected pointer type"); + auto IntTy = mlir::cir::IntType::get(Ty.getContext(), + getPointerTypeSizeInBits(Ty), false); + return IntTy; + } +}; + +} // namespace cir + +#endif \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp new file mode 100644 index 000000000000..aa88b4bedf17 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -0,0 +1,1101 @@ +//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the code for emitting atomic operations. +// +//===----------------------------------------------------------------------===// + +#include "Address.h" +#include "CIRDataLayout.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "CIRGenOpenMPRuntime.h" +#include "TargetInfo.h" +#include "UnimplementedFeatureGuarding.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CodeGen/CGFunctionInfo.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "llvm/Support/ErrorHandling.h" +#include + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Value.h" + +using namespace cir; +using namespace clang; + +namespace { +class AtomicInfo { + CIRGenFunction &CGF; + QualType AtomicTy; + QualType ValueTy; + uint64_t AtomicSizeInBits; + uint64_t ValueSizeInBits; + CharUnits AtomicAlign; + CharUnits ValueAlign; + TypeEvaluationKind EvaluationKind; + bool UseLibcall; + LValue LVal; + CIRGenBitFieldInfo BFI; + mlir::Location loc; + +public: + AtomicInfo(CIRGenFunction &CGF, LValue &lvalue, mlir::Location l) + : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), + EvaluationKind(TEK_Scalar), UseLibcall(true), loc(l) { + assert(!lvalue.isGlobalReg()); + ASTContext &C = CGF.getContext(); + if (lvalue.isSimple()) { + AtomicTy = lvalue.getType(); + if (auto *ATy = AtomicTy->getAs()) + ValueTy = ATy->getValueType(); + else + ValueTy = AtomicTy; + EvaluationKind = CGF.getEvaluationKind(ValueTy); + + uint64_t ValueAlignInBits; + uint64_t AtomicAlignInBits; + TypeInfo ValueTI = C.getTypeInfo(ValueTy); + ValueSizeInBits = ValueTI.Width; + ValueAlignInBits = ValueTI.Align; + + TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); + AtomicSizeInBits = AtomicTI.Width; + AtomicAlignInBits = AtomicTI.Align; + + assert(ValueSizeInBits <= AtomicSizeInBits); + assert(ValueAlignInBits <= AtomicAlignInBits); + + AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); + ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); + if (lvalue.getAlignment().isZero()) + lvalue.setAlignment(AtomicAlign); + + LVal = lvalue; + } else if (lvalue.isBitField()) { + llvm_unreachable("NYI"); + } else if (lvalue.isVectorElt()) { + ValueTy = lvalue.getType()->castAs()->getElementType(); + ValueSizeInBits = C.getTypeSize(ValueTy); + AtomicTy = lvalue.getType(); + AtomicSizeInBits = C.getTypeSize(AtomicTy); + AtomicAlign = ValueAlign = lvalue.getAlignment(); + LVal = lvalue; + } else { + llvm_unreachable("NYI"); + } + UseLibcall = !C.getTargetInfo().hasBuiltinAtomic( + AtomicSizeInBits, C.toBits(lvalue.getAlignment())); + } + + QualType getAtomicType() const { return AtomicTy; } + QualType getValueType() const { return ValueTy; } + CharUnits getAtomicAlignment() const { return AtomicAlign; } + uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; } + uint64_t getValueSizeInBits() const { return ValueSizeInBits; } + TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } + bool shouldUseLibcall() const { return UseLibcall; } + const LValue &getAtomicLValue() const { return LVal; } + mlir::Value getAtomicPointer() const { + if (LVal.isSimple()) + return LVal.getPointer(); + else if (LVal.isBitField()) + return LVal.getBitFieldPointer(); + else if (LVal.isVectorElt()) + return LVal.getVectorPointer(); + assert(LVal.isExtVectorElt()); + // TODO(cir): return LVal.getExtVectorPointer(); + llvm_unreachable("NYI"); + } + Address getAtomicAddress() const { + mlir::Type ElTy; + if (LVal.isSimple()) + ElTy = LVal.getAddress().getElementType(); + else if (LVal.isBitField()) + ElTy = LVal.getBitFieldAddress().getElementType(); + else if (LVal.isVectorElt()) + ElTy = LVal.getVectorAddress().getElementType(); + else // TODO(cir): ElTy = LVal.getExtVectorAddress().getElementType(); + llvm_unreachable("NYI"); + return Address(getAtomicPointer(), ElTy, getAtomicAlignment()); + } + + Address getAtomicAddressAsAtomicIntPointer() const { + return castToAtomicIntPointer(getAtomicAddress()); + } + + /// Is the atomic size larger than the underlying value type? + /// + /// Note that the absence of padding does not mean that atomic + /// objects are completely interchangeable with non-atomic + /// objects: we might have promoted the alignment of a type + /// without making it bigger. + bool hasPadding() const { return (ValueSizeInBits != AtomicSizeInBits); } + + bool emitMemSetZeroIfNecessary() const; + + mlir::Value getAtomicSizeValue() const { llvm_unreachable("NYI"); } + + /// Cast the given pointer to an integer pointer suitable for atomic + /// operations if the source. + Address castToAtomicIntPointer(Address Addr) const; + + /// If Addr is compatible with the iN that will be used for an atomic + /// operation, bitcast it. Otherwise, create a temporary that is suitable + /// and copy the value across. + Address convertToAtomicIntPointer(Address Addr) const; + + /// Turn an atomic-layout object into an r-value. + RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot, + SourceLocation loc, bool AsValue) const; + + /// Converts a rvalue to integer value. + mlir::Value convertRValueToInt(RValue RVal) const; + + RValue ConvertIntToValueOrAtomic(mlir::Value IntVal, AggValueSlot ResultSlot, + SourceLocation Loc, bool AsValue) const; + + /// Copy an atomic r-value into atomic-layout memory. + void emitCopyIntoMemory(RValue rvalue) const; + + /// Project an l-value down to the value field. + LValue projectValue() const { + assert(LVal.isSimple()); + Address addr = getAtomicAddress(); + if (hasPadding()) + llvm_unreachable("NYI"); + + return LValue::makeAddr(addr, getValueType(), CGF.getContext(), + LVal.getBaseInfo()); + } + + /// Emits atomic load. + /// \returns Loaded value. + RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, + bool AsValue, llvm::AtomicOrdering AO, bool IsVolatile); + + /// Emits atomic compare-and-exchange sequence. + /// \param Expected Expected value. + /// \param Desired Desired value. + /// \param Success Atomic ordering for success operation. + /// \param Failure Atomic ordering for failed operation. + /// \param IsWeak true if atomic operation is weak, false otherwise. + /// \returns Pair of values: previous value from storage (value type) and + /// boolean flag (i1 type) with true if success and false otherwise. + std::pair + EmitAtomicCompareExchange(RValue Expected, RValue Desired, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent, + bool IsWeak = false); + + /// Emits atomic update. + /// \param AO Atomic ordering. + /// \param UpdateOp Update operation for the current lvalue. + void EmitAtomicUpdate(llvm::AtomicOrdering AO, + const llvm::function_ref &UpdateOp, + bool IsVolatile); + /// Emits atomic update. + /// \param AO Atomic ordering. + void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, + bool IsVolatile); + + /// Materialize an atomic r-value in atomic-layout memory. + Address materializeRValue(RValue rvalue) const; + + /// Creates temp alloca for intermediate operations on atomic value. + Address CreateTempAlloca() const; + +private: + bool requiresMemSetZero(llvm::Type *type) const; + + /// Emits atomic load as a libcall. + void EmitAtomicLoadLibcall(mlir::Value AddForLoaded, llvm::AtomicOrdering AO, + bool IsVolatile); + /// Emits atomic load as LLVM instruction. + mlir::Value EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile); + /// Emits atomic compare-and-exchange op as a libcall. + mlir::Value EmitAtomicCompareExchangeLibcall( + mlir::Value ExpectedAddr, mlir::Value DesiredAddr, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent); + /// Emits atomic compare-and-exchange op as LLVM instruction. + std::pair + EmitAtomicCompareExchangeOp(mlir::Value ExpectedVal, mlir::Value DesiredVal, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent, + bool IsWeak = false); + /// Emit atomic update as libcalls. + void + EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, + const llvm::function_ref &UpdateOp, + bool IsVolatile); + /// Emit atomic update as LLVM instructions. + void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, + const llvm::function_ref &UpdateOp, + bool IsVolatile); + /// Emit atomic update as libcalls. + void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal, + bool IsVolatile); + /// Emit atomic update as LLVM instructions. + void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal, + bool IsVolatile); +}; +} // namespace + +// This function emits any expression (scalar, complex, or aggregate) +// into a temporary alloca. +static Address buildValToTemp(CIRGenFunction &CGF, Expr *E) { + Address DeclPtr = CGF.CreateMemTemp( + E->getType(), CGF.getLoc(E->getSourceRange()), ".atomictmp"); + CGF.buildAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); + return DeclPtr; +} + +Address AtomicInfo::castToAtomicIntPointer(Address addr) const { + auto intTy = addr.getElementType().dyn_cast(); + // Don't bother with int casts if the integer size is the same. + if (intTy && intTy.getWidth() == AtomicSizeInBits) + return addr; + auto ty = CGF.getBuilder().getUIntNTy(AtomicSizeInBits); + return addr.withElementType(ty); +} + +Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const { + auto Ty = Addr.getElementType(); + uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty); + if (SourceSizeInBits != AtomicSizeInBits) { + llvm_unreachable("NYI"); + } + + return castToAtomicIntPointer(Addr); +} + +Address AtomicInfo::CreateTempAlloca() const { + Address TempAlloca = CGF.CreateMemTemp( + (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy + : AtomicTy, + getAtomicAlignment(), loc, "atomic-temp"); + // Cast to pointer to value type for bitfields. + if (LVal.isBitField()) { + llvm_unreachable("NYI"); + } + return TempAlloca; +} + +// If the value comes from a ConstOp + IntAttr, retrieve and skip a series +// of casts if necessary. +// +// FIXME(cir): figure out warning issue and move this to CIRBaseBuilder.h +static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { + mlir::Operation *op = v.getDefiningOp(); + mlir::cir::IntAttr constVal; + while (auto c = dyn_cast(op)) + op = c.getOperand().getDefiningOp(); + if (auto c = dyn_cast(op)) { + if (c.getType().isa()) + constVal = c.getValue().cast(); + } + return constVal; +} + +static bool isCstWeak(mlir::Value weakVal, uint64_t &val) { + auto intAttr = getConstOpIntAttr(weakVal); + if (!intAttr) + return false; + val = intAttr.getUInt(); + return true; +} + +static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, + Address Dest, Address Ptr, Address Val1, + Address Val2, uint64_t Size, + mlir::cir::MemOrder SuccessOrder, + mlir::cir::MemOrder FailureOrder, + llvm::SyncScope::ID Scope) { + auto &builder = CGF.getBuilder(); + auto loc = CGF.getLoc(E->getSourceRange()); + auto Expected = builder.createLoad(loc, Val1); + auto Desired = builder.createLoad(loc, Val2); + auto boolTy = builder.getBoolTy(); + auto cmpxchg = builder.create( + loc, Expected.getType(), boolTy, Ptr.getPointer(), Expected, Desired, + SuccessOrder, FailureOrder); + cmpxchg.setIsVolatile(E->isVolatile()); + cmpxchg.setWeak(IsWeak); + + auto cmp = builder.createNot(cmpxchg.getCmp()); + builder.create( + loc, cmp, false, [&](mlir::OpBuilder &, mlir::Location) { + auto ptrTy = Val1.getPointer().getType().cast(); + if (Val1.getElementType() != ptrTy.getPointee()) { + Val1 = Val1.withPointer(builder.createPtrBitcast( + Val1.getPointer(), Val1.getElementType())); + } + builder.createStore(loc, cmpxchg.getOld(), Val1); + builder.createYield(loc); + }); + + // Update the memory at Dest with Cmp's value. + CGF.buildStoreOfScalar(cmpxchg.getCmp(), + CGF.makeAddrLValue(Dest, E->getType())); +} + +/// Given an ordering required on success, emit all possible cmpxchg +/// instructions to cope with the provided (but possibly only dynamically known) +/// FailureOrder. +static void buildAtomicCmpXchgFailureSet( + CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, + Address Val1, Address Val2, mlir::Value FailureOrderVal, uint64_t Size, + mlir::cir::MemOrder SuccessOrder, llvm::SyncScope::ID Scope) { + + mlir::cir::MemOrder FailureOrder; + if (auto ordAttr = getConstOpIntAttr(FailureOrderVal)) { + // We should not ever get to a case where the ordering isn't a valid CABI + // value, but it's hard to enforce that in general. + auto ord = ordAttr.getUInt(); + if (!mlir::cir::isValidCIRAtomicOrderingCABI(ord)) { + FailureOrder = mlir::cir::MemOrder::Relaxed; + } else { + switch ((mlir::cir::MemOrder)ord) { + case mlir::cir::MemOrder::Relaxed: + // 31.7.2.18: "The failure argument shall not be memory_order_release + // nor memory_order_acq_rel". Fallback to monotonic. + case mlir::cir::MemOrder::Release: + case mlir::cir::MemOrder::AcquireRelease: + FailureOrder = mlir::cir::MemOrder::Relaxed; + break; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + FailureOrder = mlir::cir::MemOrder::Acquire; + break; + case mlir::cir::MemOrder::SequentiallyConsistent: + FailureOrder = mlir::cir::MemOrder::SequentiallyConsistent; + break; + } + } + // Prior to c++17, "the failure argument shall be no stronger than the + // success argument". This condition has been lifted and the only + // precondition is 31.7.2.18. Effectively treat this as a DR and skip + // language version checks. + buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, FailureOrder, Scope); + return; + } + + llvm_unreachable("NYI"); +} + +static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, + Address Ptr, Address Val1, Address Val2, + mlir::Value IsWeak, mlir::Value FailureOrder, + uint64_t Size, mlir::cir::MemOrder Order, + uint8_t Scope) { + assert(!UnimplementedFeature::syncScopeID()); + StringRef Op; + + auto &builder = CGF.getBuilder(); + auto loc = CGF.getLoc(E->getSourceRange()); + auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order); + mlir::cir::AtomicFetchKindAttr fetchAttr; + bool fetchFirst = true; + + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + case AtomicExpr::AO__opencl_atomic_init: + llvm_unreachable("Already handled!"); + + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__hip_atomic_compare_exchange_strong: + case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: + buildAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); + return; + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: + case AtomicExpr::AO__hip_atomic_compare_exchange_weak: + llvm_unreachable("NYI"); + return; + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__atomic_compare_exchange_n: + case AtomicExpr::AO__scoped_atomic_compare_exchange: + case AtomicExpr::AO__scoped_atomic_compare_exchange_n: { + uint64_t weakVal; + if (isCstWeak(IsWeak, weakVal)) { + buildAtomicCmpXchgFailureSet(CGF, E, weakVal, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); + } else { + llvm_unreachable("NYI"); + } + return; + } + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__opencl_atomic_load: + case AtomicExpr::AO__hip_atomic_load: + case AtomicExpr::AO__atomic_load_n: + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__scoped_atomic_load_n: + case AtomicExpr::AO__scoped_atomic_load: { + auto *load = builder.createLoad(loc, Ptr).getDefiningOp(); + // FIXME(cir): add scope information. + assert(!UnimplementedFeature::syncScopeID()); + load->setAttr("mem_order", orderAttr); + if (E->isVolatile()) + load->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); + + // TODO(cir): this logic should be part of createStore, but doing so + // currently breaks CodeGen/union.cpp and CodeGen/union.cpp. + auto ptrTy = Dest.getPointer().getType().cast(); + if (Dest.getElementType() != ptrTy.getPointee()) { + Dest = Dest.withPointer( + builder.createPtrBitcast(Dest.getPointer(), Dest.getElementType())); + } + builder.createStore(loc, load->getResult(0), Dest); + return; + } + + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__opencl_atomic_store: + case AtomicExpr::AO__hip_atomic_store: + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__scoped_atomic_store: + case AtomicExpr::AO__scoped_atomic_store_n: { + auto loadVal1 = builder.createLoad(loc, Val1); + // FIXME(cir): add scope information. + assert(!UnimplementedFeature::syncScopeID()); + builder.createStore(loc, loadVal1, Ptr, E->isVolatile(), orderAttr); + return; + } + + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__hip_atomic_exchange: + case AtomicExpr::AO__opencl_atomic_exchange: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange_n: + case AtomicExpr::AO__scoped_atomic_exchange: + Op = mlir::cir::AtomicXchg::getOperationName(); + break; + + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__scoped_atomic_add_fetch: + fetchFirst = false; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__hip_atomic_fetch_add: + case AtomicExpr::AO__opencl_atomic_fetch_add: + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__scoped_atomic_fetch_add: + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Add); + break; + + case AtomicExpr::AO__atomic_sub_fetch: + case AtomicExpr::AO__scoped_atomic_sub_fetch: + fetchFirst = false; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__hip_atomic_fetch_sub: + case AtomicExpr::AO__opencl_atomic_fetch_sub: + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__scoped_atomic_fetch_sub: + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Sub); + break; + + case AtomicExpr::AO__atomic_min_fetch: + case AtomicExpr::AO__scoped_atomic_min_fetch: + fetchFirst = false; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_min: + case AtomicExpr::AO__hip_atomic_fetch_min: + case AtomicExpr::AO__opencl_atomic_fetch_min: + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_min: + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Min); + break; + + case AtomicExpr::AO__atomic_max_fetch: + case AtomicExpr::AO__scoped_atomic_max_fetch: + fetchFirst = false; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_max: + case AtomicExpr::AO__hip_atomic_fetch_max: + case AtomicExpr::AO__opencl_atomic_fetch_max: + case AtomicExpr::AO__atomic_fetch_max: + case AtomicExpr::AO__scoped_atomic_fetch_max: + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Max); + break; + + case AtomicExpr::AO__atomic_and_fetch: + case AtomicExpr::AO__scoped_atomic_and_fetch: + fetchFirst = false; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__hip_atomic_fetch_and: + case AtomicExpr::AO__opencl_atomic_fetch_and: + case AtomicExpr::AO__atomic_fetch_and: + case AtomicExpr::AO__scoped_atomic_fetch_and: + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::And); + break; + + case AtomicExpr::AO__atomic_or_fetch: + case AtomicExpr::AO__scoped_atomic_or_fetch: + fetchFirst = false; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__hip_atomic_fetch_or: + case AtomicExpr::AO__opencl_atomic_fetch_or: + case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__scoped_atomic_fetch_or: + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Or); + break; + + case AtomicExpr::AO__atomic_xor_fetch: + case AtomicExpr::AO__scoped_atomic_xor_fetch: + fetchFirst = false; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__hip_atomic_fetch_xor: + case AtomicExpr::AO__opencl_atomic_fetch_xor: + case AtomicExpr::AO__atomic_fetch_xor: + case AtomicExpr::AO__scoped_atomic_fetch_xor: + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Xor); + break; + + case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__scoped_atomic_nand_fetch: + fetchFirst = false; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_nand: + case AtomicExpr::AO__atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_fetch_nand: + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Nand); + break; + } + + assert(Op.size() && "expected operation name to build"); + auto LoadVal1 = builder.createLoad(loc, Val1); + + SmallVector atomicOperands = {Ptr.getPointer(), LoadVal1}; + SmallVector atomicResTys = {LoadVal1.getType()}; + auto RMWI = builder.create(loc, builder.getStringAttr(Op), atomicOperands, + atomicResTys, {}); + + if (fetchAttr) + RMWI->setAttr("binop", fetchAttr); + RMWI->setAttr("mem_order", orderAttr); + if (E->isVolatile()) + RMWI->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); + if (fetchFirst && Op == mlir::cir::AtomicFetch::getOperationName()) + RMWI->setAttr("fetch_first", mlir::UnitAttr::get(builder.getContext())); + + auto Result = RMWI->getResult(0); + + // TODO(cir): this logic should be part of createStore, but doing so currently + // breaks CodeGen/union.cpp and CodeGen/union.cpp. + auto ptrTy = Dest.getPointer().getType().cast(); + if (Dest.getElementType() != ptrTy.getPointee()) { + Dest = Dest.withPointer( + builder.createPtrBitcast(Dest.getPointer(), Dest.getElementType())); + } + builder.createStore(loc, Result, Dest); +} + +static RValue buildAtomicLibcall(CIRGenFunction &CGF, StringRef fnName, + QualType resultType, CallArgList &args) { + [[maybe_unused]] const CIRGenFunctionInfo &fnInfo = + CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args); + [[maybe_unused]] auto fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo); + llvm_unreachable("NYI"); +} + +static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, + Address Ptr, Address Val1, Address Val2, + mlir::Value IsWeak, mlir::Value FailureOrder, + uint64_t Size, mlir::cir::MemOrder Order, + mlir::Value Scope) { + auto ScopeModel = Expr->getScopeModel(); + + // LLVM atomic instructions always have synch scope. If clang atomic + // expression has no scope operand, use default LLVM synch scope. + if (!ScopeModel) { + assert(!UnimplementedFeature::syncScopeID()); + buildAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, + Order, /*FIXME(cir): LLVM default scope*/ 1); + return; + } + + // Handle constant scope. + if (getConstOpIntAttr(Scope)) { + assert(!UnimplementedFeature::syncScopeID()); + llvm_unreachable("NYI"); + return; + } + + // Handle non-constant scope. + llvm_unreachable("NYI"); +} + +RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { + QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); + QualType MemTy = AtomicTy; + if (const AtomicType *AT = AtomicTy->getAs()) + MemTy = AT->getValueType(); + mlir::Value IsWeak = nullptr, OrderFail = nullptr; + + Address Val1 = Address::invalid(); + Address Val2 = Address::invalid(); + Address Dest = Address::invalid(); + Address Ptr = buildPointerWithAlignment(E->getPtr()); + + if (E->getOp() == AtomicExpr::AO__c11_atomic_init || + E->getOp() == AtomicExpr::AO__opencl_atomic_init) { + llvm_unreachable("NYI"); + } + + auto TInfo = getContext().getTypeInfoInChars(AtomicTy); + uint64_t Size = TInfo.Width.getQuantity(); + unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth(); + + CharUnits MaxInlineWidth = + getContext().toCharUnitsFromBits(MaxInlineWidthInBits); + DiagnosticsEngine &Diags = CGM.getDiags(); + bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0; + bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits; + if (Misaligned) { + Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned) + << (int)TInfo.Width.getQuantity() + << (int)Ptr.getAlignment().getQuantity(); + } + if (Oversized) { + Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized) + << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity(); + } + + auto Order = buildScalarExpr(E->getOrder()); + auto Scope = E->getScopeModel() ? buildScalarExpr(E->getScope()) : nullptr; + bool ShouldCastToIntPtrTy = true; + + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + case AtomicExpr::AO__opencl_atomic_init: + llvm_unreachable("Already handled above with EmitAtomicInit!"); + + case AtomicExpr::AO__atomic_load_n: + case AtomicExpr::AO__scoped_atomic_load_n: + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__opencl_atomic_load: + case AtomicExpr::AO__hip_atomic_load: + break; + + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__scoped_atomic_load: + Dest = buildPointerWithAlignment(E->getVal1()); + break; + + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__scoped_atomic_store: + Val1 = buildPointerWithAlignment(E->getVal1()); + break; + + case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange: + Val1 = buildPointerWithAlignment(E->getVal1()); + Dest = buildPointerWithAlignment(E->getVal2()); + break; + + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__atomic_compare_exchange_n: + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__hip_atomic_compare_exchange_weak: + case AtomicExpr::AO__hip_atomic_compare_exchange_strong: + case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: + case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: + case AtomicExpr::AO__scoped_atomic_compare_exchange: + case AtomicExpr::AO__scoped_atomic_compare_exchange_n: + Val1 = buildPointerWithAlignment(E->getVal1()); + if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange || + E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) + Val2 = buildPointerWithAlignment(E->getVal2()); + else + Val2 = buildValToTemp(*this, E->getVal2()); + OrderFail = buildScalarExpr(E->getOrderFail()); + if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n || + E->getOp() == AtomicExpr::AO__atomic_compare_exchange || + E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n || + E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) { + IsWeak = buildScalarExpr(E->getWeak()); + } + break; + + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__hip_atomic_fetch_add: + case AtomicExpr::AO__hip_atomic_fetch_sub: + case AtomicExpr::AO__opencl_atomic_fetch_add: + case AtomicExpr::AO__opencl_atomic_fetch_sub: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__atomic_fetch_max: + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__atomic_max_fetch: + case AtomicExpr::AO__atomic_min_fetch: + case AtomicExpr::AO__atomic_sub_fetch: + case AtomicExpr::AO__c11_atomic_fetch_max: + case AtomicExpr::AO__c11_atomic_fetch_min: + case AtomicExpr::AO__opencl_atomic_fetch_max: + case AtomicExpr::AO__opencl_atomic_fetch_min: + case AtomicExpr::AO__hip_atomic_fetch_max: + case AtomicExpr::AO__hip_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_add: + case AtomicExpr::AO__scoped_atomic_fetch_max: + case AtomicExpr::AO__scoped_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_sub: + case AtomicExpr::AO__scoped_atomic_add_fetch: + case AtomicExpr::AO__scoped_atomic_max_fetch: + case AtomicExpr::AO__scoped_atomic_min_fetch: + case AtomicExpr::AO__scoped_atomic_sub_fetch: + ShouldCastToIntPtrTy = !MemTy->isFloatingType(); + [[fallthrough]]; + + case AtomicExpr::AO__atomic_fetch_and: + case AtomicExpr::AO__atomic_fetch_nand: + case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__atomic_fetch_xor: + case AtomicExpr::AO__atomic_and_fetch: + case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__atomic_or_fetch: + case AtomicExpr::AO__atomic_xor_fetch: + case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__c11_atomic_fetch_nand: + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__hip_atomic_fetch_and: + case AtomicExpr::AO__hip_atomic_fetch_or: + case AtomicExpr::AO__hip_atomic_fetch_xor: + case AtomicExpr::AO__hip_atomic_store: + case AtomicExpr::AO__hip_atomic_exchange: + case AtomicExpr::AO__opencl_atomic_fetch_and: + case AtomicExpr::AO__opencl_atomic_fetch_or: + case AtomicExpr::AO__opencl_atomic_fetch_xor: + case AtomicExpr::AO__opencl_atomic_store: + case AtomicExpr::AO__opencl_atomic_exchange: + case AtomicExpr::AO__scoped_atomic_fetch_and: + case AtomicExpr::AO__scoped_atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_fetch_or: + case AtomicExpr::AO__scoped_atomic_fetch_xor: + case AtomicExpr::AO__scoped_atomic_and_fetch: + case AtomicExpr::AO__scoped_atomic_nand_fetch: + case AtomicExpr::AO__scoped_atomic_or_fetch: + case AtomicExpr::AO__scoped_atomic_xor_fetch: + case AtomicExpr::AO__scoped_atomic_store_n: + case AtomicExpr::AO__scoped_atomic_exchange_n: + Val1 = buildValToTemp(*this, E->getVal1()); + break; + } + + QualType RValTy = E->getType().getUnqualifiedType(); + + // The inlined atomics only function on iN types, where N is a power of 2. We + // need to make sure (via temporaries if necessary) that all incoming values + // are compatible. + LValue AtomicVal = makeAddrLValue(Ptr, AtomicTy); + AtomicInfo Atomics(*this, AtomicVal, getLoc(E->getSourceRange())); + + if (ShouldCastToIntPtrTy) { + Ptr = Atomics.castToAtomicIntPointer(Ptr); + if (Val1.isValid()) + Val1 = Atomics.convertToAtomicIntPointer(Val1); + if (Val2.isValid()) + Val2 = Atomics.convertToAtomicIntPointer(Val2); + } + if (Dest.isValid()) { + if (ShouldCastToIntPtrTy) + Dest = Atomics.castToAtomicIntPointer(Dest); + } else if (E->isCmpXChg()) + Dest = CreateMemTemp(RValTy, getLoc(E->getSourceRange()), "cmpxchg.bool"); + else if (!RValTy->isVoidType()) { + Dest = Atomics.CreateTempAlloca(); + if (ShouldCastToIntPtrTy) + Dest = Atomics.castToAtomicIntPointer(Dest); + } + + bool PowerOf2Size = (Size & (Size - 1)) == 0; + bool UseLibcall = !PowerOf2Size || (Size > 16); + + // For atomics larger than 16 bytes, emit a libcall from the frontend. This + // avoids the overhead of dealing with excessively-large value types in IR. + // Non-power-of-2 values also lower to libcall here, as they are not currently + // permitted in IR instructions (although that constraint could be relaxed in + // the future). For other cases where a libcall is required on a given + // platform, we let the backend handle it (this includes handling for all of + // the size-optimized libcall variants, which are only valid up to 16 bytes.) + // + // See: https://llvm.org/docs/Atomics.html#libcalls-atomic + if (UseLibcall) { + CallArgList Args; + // For non-optimized library calls, the size is the first parameter. + Args.add(RValue::get(builder.getConstInt(getLoc(E->getSourceRange()), + SizeTy, Size)), + getContext().getSizeType()); + + // The atomic address is the second parameter. + // The OpenCL atomic library functions only accept pointer arguments to + // generic address space. + auto CastToGenericAddrSpace = [&](mlir::Value V, QualType PT) { + if (!E->isOpenCL()) + return V; + llvm_unreachable("NYI"); + }; + + Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(), + E->getPtr()->getType())), + getContext().VoidPtrTy); + + // The next 1-3 parameters are op-dependent. + std::string LibCallName; + QualType RetTy; + bool HaveRetTy = false; + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + case AtomicExpr::AO__opencl_atomic_init: + llvm_unreachable("Already handled!"); + + // There is only one libcall for compare an exchange, because there is no + // optimisation benefit possible from a libcall version of a weak compare + // and exchange. + // bool __atomic_compare_exchange(size_t size, void *mem, void *expected, + // void *desired, int success, int failure) + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__atomic_compare_exchange_n: + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__hip_atomic_compare_exchange_weak: + case AtomicExpr::AO__hip_atomic_compare_exchange_strong: + case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: + case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: + case AtomicExpr::AO__scoped_atomic_compare_exchange: + case AtomicExpr::AO__scoped_atomic_compare_exchange_n: + LibCallName = "__atomic_compare_exchange"; + llvm_unreachable("NYI"); + break; + // void __atomic_exchange(size_t size, void *mem, void *val, void *return, + // int order) + case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__hip_atomic_exchange: + case AtomicExpr::AO__opencl_atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange_n: + LibCallName = "__atomic_exchange"; + llvm_unreachable("NYI"); + break; + // void __atomic_store(size_t size, void *mem, void *val, int order) + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__hip_atomic_store: + case AtomicExpr::AO__opencl_atomic_store: + case AtomicExpr::AO__scoped_atomic_store: + case AtomicExpr::AO__scoped_atomic_store_n: + LibCallName = "__atomic_store"; + llvm_unreachable("NYI"); + break; + // void __atomic_load(size_t size, void *mem, void *return, int order) + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__atomic_load_n: + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__hip_atomic_load: + case AtomicExpr::AO__opencl_atomic_load: + case AtomicExpr::AO__scoped_atomic_load: + case AtomicExpr::AO__scoped_atomic_load_n: + LibCallName = "__atomic_load"; + break; + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__scoped_atomic_add_fetch: + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__hip_atomic_fetch_add: + case AtomicExpr::AO__opencl_atomic_fetch_add: + case AtomicExpr::AO__scoped_atomic_fetch_add: + case AtomicExpr::AO__atomic_and_fetch: + case AtomicExpr::AO__scoped_atomic_and_fetch: + case AtomicExpr::AO__atomic_fetch_and: + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__hip_atomic_fetch_and: + case AtomicExpr::AO__opencl_atomic_fetch_and: + case AtomicExpr::AO__scoped_atomic_fetch_and: + case AtomicExpr::AO__atomic_or_fetch: + case AtomicExpr::AO__scoped_atomic_or_fetch: + case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__hip_atomic_fetch_or: + case AtomicExpr::AO__opencl_atomic_fetch_or: + case AtomicExpr::AO__scoped_atomic_fetch_or: + case AtomicExpr::AO__atomic_sub_fetch: + case AtomicExpr::AO__scoped_atomic_sub_fetch: + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__hip_atomic_fetch_sub: + case AtomicExpr::AO__opencl_atomic_fetch_sub: + case AtomicExpr::AO__scoped_atomic_fetch_sub: + case AtomicExpr::AO__atomic_xor_fetch: + case AtomicExpr::AO__scoped_atomic_xor_fetch: + case AtomicExpr::AO__atomic_fetch_xor: + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__hip_atomic_fetch_xor: + case AtomicExpr::AO__opencl_atomic_fetch_xor: + case AtomicExpr::AO__scoped_atomic_fetch_xor: + case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__atomic_fetch_nand: + case AtomicExpr::AO__c11_atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_nand_fetch: + case AtomicExpr::AO__atomic_min_fetch: + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__c11_atomic_fetch_min: + case AtomicExpr::AO__hip_atomic_fetch_min: + case AtomicExpr::AO__opencl_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_min_fetch: + case AtomicExpr::AO__atomic_max_fetch: + case AtomicExpr::AO__atomic_fetch_max: + case AtomicExpr::AO__c11_atomic_fetch_max: + case AtomicExpr::AO__hip_atomic_fetch_max: + case AtomicExpr::AO__opencl_atomic_fetch_max: + case AtomicExpr::AO__scoped_atomic_fetch_max: + case AtomicExpr::AO__scoped_atomic_max_fetch: + llvm_unreachable("Integral atomic operations always become atomicrmw!"); + } + + if (E->isOpenCL()) { + LibCallName = + std::string("__opencl") + StringRef(LibCallName).drop_front(1).str(); + } + // By default, assume we return a value of the atomic type. + if (!HaveRetTy) { + llvm_unreachable("NYI"); + } + // Order is always the last parameter. + Args.add(RValue::get(Order), getContext().IntTy); + if (E->isOpenCL()) { + llvm_unreachable("NYI"); + } + + [[maybe_unused]] RValue Res = + buildAtomicLibcall(*this, LibCallName, RetTy, Args); + // The value is returned directly from the libcall. + if (E->isCmpXChg()) { + llvm_unreachable("NYI"); + } + + if (RValTy->isVoidType()) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + } + + [[maybe_unused]] bool IsStore = + E->getOp() == AtomicExpr::AO__c11_atomic_store || + E->getOp() == AtomicExpr::AO__opencl_atomic_store || + E->getOp() == AtomicExpr::AO__hip_atomic_store || + E->getOp() == AtomicExpr::AO__atomic_store || + E->getOp() == AtomicExpr::AO__atomic_store_n || + E->getOp() == AtomicExpr::AO__scoped_atomic_store || + E->getOp() == AtomicExpr::AO__scoped_atomic_store_n; + [[maybe_unused]] bool IsLoad = + E->getOp() == AtomicExpr::AO__c11_atomic_load || + E->getOp() == AtomicExpr::AO__opencl_atomic_load || + E->getOp() == AtomicExpr::AO__hip_atomic_load || + E->getOp() == AtomicExpr::AO__atomic_load || + E->getOp() == AtomicExpr::AO__atomic_load_n || + E->getOp() == AtomicExpr::AO__scoped_atomic_load || + E->getOp() == AtomicExpr::AO__scoped_atomic_load_n; + + if (auto ordAttr = getConstOpIntAttr(Order)) { + // We should not ever get to a case where the ordering isn't a valid CABI + // value, but it's hard to enforce that in general. + auto ord = ordAttr.getUInt(); + if (mlir::cir::isValidCIRAtomicOrderingCABI(ord)) { + switch ((mlir::cir::MemOrder)ord) { + case mlir::cir::MemOrder::Relaxed: + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + mlir::cir::MemOrder::Relaxed, Scope); + break; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + if (IsStore) + break; // Avoid crashing on code with undefined behavior + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + mlir::cir::MemOrder::Acquire, Scope); + break; + case mlir::cir::MemOrder::Release: + if (IsLoad) + break; // Avoid crashing on code with undefined behavior + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + mlir::cir::MemOrder::Release, Scope); + break; + case mlir::cir::MemOrder::AcquireRelease: + if (IsLoad || IsStore) + break; // Avoid crashing on code with undefined behavior + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + mlir::cir::MemOrder::AcquireRelease, Scope); + break; + case mlir::cir::MemOrder::SequentiallyConsistent: + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + mlir::cir::MemOrder::SequentiallyConsistent, Scope); + break; + } + } + if (RValTy->isVoidType()) + return RValue::get(nullptr); + + return convertTempToRValue(Dest.withElementType(convertTypeForMem(RValTy)), + RValTy, E->getExprLoc()); + } + + // Long case, when Order isn't obviously constant. + llvm_unreachable("NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h new file mode 100644 index 000000000000..40f57796f59a --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -0,0 +1,879 @@ +//===-- CIRGenBuilder.h - CIRBuilder implementation ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H +#define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H + +#include "Address.h" +#include "CIRDataLayout.h" +#include "CIRGenRecordLayout.h" +#include "CIRGenTypeCache.h" +#include "UnimplementedFeatureGuarding.h" + +#include "clang/AST/Decl.h" +#include "clang/AST/Type.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/FPEnv.h" + +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Location.h" +#include "mlir/IR/Types.h" +#include "llvm/ADT/APSInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/FloatingPointMode.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include +#include + +namespace cir { + +class CIRGenFunction; + +class CIRGenBuilderTy : public CIRBaseBuilderTy { + const CIRGenTypeCache &typeCache; + bool IsFPConstrained = false; + fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; + llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic; + + llvm::StringMap GlobalsVersioning; + llvm::StringSet<> anonRecordNames; + +public: + CIRGenBuilderTy(mlir::MLIRContext &C, const CIRGenTypeCache &tc) + : CIRBaseBuilderTy(C), typeCache(tc) {} + + std::string getUniqueAnonRecordName() { + std::string name = "anon." + std::to_string(anonRecordNames.size()); + anonRecordNames.insert(name); + return name; + } + + // + // Floating point specific helpers + // ------------------------------- + // + + /// Enable/Disable use of constrained floating point math. When enabled the + /// CreateF() calls instead create constrained floating point intrinsic + /// calls. Fast math flags are unaffected by this setting. + void setIsFPConstrained(bool IsCon) { + if (IsCon) + llvm_unreachable("Constrained FP NYI"); + IsFPConstrained = IsCon; + } + + /// Query for the use of constrained floating point math + bool getIsFPConstrained() { + if (IsFPConstrained) + llvm_unreachable("Constrained FP NYI"); + return IsFPConstrained; + } + + /// Set the exception handling to be used with constrained floating point + void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { +#ifndef NDEBUG + std::optional ExceptStr = + convertExceptionBehaviorToStr(NewExcept); + assert(ExceptStr && "Garbage strict exception behavior!"); +#endif + DefaultConstrainedExcept = NewExcept; + } + + /// Set the rounding mode handling to be used with constrained floating point + void setDefaultConstrainedRounding(llvm::RoundingMode NewRounding) { +#ifndef NDEBUG + std::optional RoundingStr = + convertRoundingModeToStr(NewRounding); + assert(RoundingStr && "Garbage strict rounding mode!"); +#endif + DefaultConstrainedRounding = NewRounding; + } + + /// Get the exception handling used with constrained floating point + fp::ExceptionBehavior getDefaultConstrainedExcept() { + return DefaultConstrainedExcept; + } + + /// Get the rounding mode handling used with constrained floating point + llvm::RoundingMode getDefaultConstrainedRounding() { + return DefaultConstrainedRounding; + } + + // + // Attribute helpers + // ----------------- + // + + /// Get constant address of a global variable as an MLIR attribute. + /// This wrapper infers the attribute type through the global op. + mlir::cir::GlobalViewAttr getGlobalViewAttr(mlir::cir::GlobalOp globalOp, + mlir::ArrayAttr indices = {}) { + auto type = getPointerTo(globalOp.getSymType()); + return getGlobalViewAttr(type, globalOp, indices); + } + + /// Get constant address of a global variable as an MLIR attribute. + mlir::cir::GlobalViewAttr getGlobalViewAttr(mlir::cir::PointerType type, + mlir::cir::GlobalOp globalOp, + mlir::ArrayAttr indices = {}) { + auto symbol = mlir::FlatSymbolRefAttr::get(globalOp.getSymNameAttr()); + return mlir::cir::GlobalViewAttr::get(type, symbol, indices); + } + + mlir::TypedAttr getZeroAttr(mlir::Type t) { + return mlir::cir::ZeroAttr::get(getContext(), t); + } + + mlir::cir::BoolAttr getCIRBoolAttr(bool state) { + return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); + } + + mlir::TypedAttr getConstNullPtrAttr(mlir::Type t) { + assert(t.isa() && "expected cir.ptr"); + return mlir::cir::ConstPtrAttr::get(getContext(), t, 0); + } + + mlir::Attribute getString(llvm::StringRef str, mlir::Type eltTy, + unsigned size = 0) { + unsigned finalSize = size ? size : str.size(); + + // If the string is full of null bytes, emit a #cir.zero rather than + // a #cir.const_array. + if (str.count('\0') == str.size()) { + auto arrayTy = mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); + return getZeroAttr(arrayTy); + } + + auto arrayTy = mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); + return getConstArray(mlir::StringAttr::get(str, arrayTy), arrayTy); + } + + mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, + mlir::cir::ArrayType arrayTy) { + return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); + } + + mlir::Attribute getConstStructOrZeroAttr(mlir::ArrayAttr arrayAttr, + bool packed = false, + mlir::Type type = {}) { + llvm::SmallVector members; + auto structTy = type.dyn_cast(); + assert(structTy && "expected cir.struct"); + + // Collect members and check if they are all zero. + bool isZero = true; + for (auto &attr : arrayAttr) { + const auto typedAttr = attr.dyn_cast(); + members.push_back(typedAttr.getType()); + isZero &= isNullValue(typedAttr); + } + + // Struct type not specified: create anon struct type from members. + if (!structTy) + structTy = getType(members, packed, + mlir::cir::StructType::Struct, + /*ast=*/nullptr); + + // Return zero or anonymous constant struct. + if (isZero) + return mlir::cir::ZeroAttr::get(getContext(), structTy); + return mlir::cir::ConstStructAttr::get(structTy, arrayAttr); + } + + mlir::cir::ConstStructAttr getAnonConstStruct(mlir::ArrayAttr arrayAttr, + bool packed = false, + mlir::Type ty = {}) { + llvm::SmallVector members; + for (auto &f : arrayAttr) { + auto ta = f.dyn_cast(); + assert(ta && "expected typed attribute member"); + members.push_back(ta.getType()); + } + + if (!ty) + ty = getAnonStructTy(members, packed); + + auto sTy = ty.dyn_cast(); + assert(sTy && "expected struct type"); + return mlir::cir::ConstStructAttr::get(sTy, arrayAttr); + } + + mlir::cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { + auto anonStruct = getAnonConstStruct(fieldsAttr); + return mlir::cir::TypeInfoAttr::get(anonStruct.getType(), fieldsAttr); + } + + mlir::cir::CmpThreeWayInfoAttr getCmpThreeWayInfoStrongOrdering( + const llvm::APSInt <, const llvm::APSInt &eq, const llvm::APSInt >) { + return mlir::cir::CmpThreeWayInfoAttr::get( + getContext(), lt.getSExtValue(), eq.getSExtValue(), gt.getSExtValue()); + } + + mlir::cir::CmpThreeWayInfoAttr getCmpThreeWayInfoPartialOrdering( + const llvm::APSInt <, const llvm::APSInt &eq, const llvm::APSInt >, + const llvm::APSInt &unordered) { + return mlir::cir::CmpThreeWayInfoAttr::get( + getContext(), lt.getSExtValue(), eq.getSExtValue(), gt.getSExtValue(), + unordered.getSExtValue()); + } + + mlir::cir::DataMemberAttr getDataMemberAttr(mlir::cir::DataMemberType ty, + size_t memberIndex) { + return mlir::cir::DataMemberAttr::get(getContext(), ty, memberIndex); + } + + mlir::cir::DataMemberAttr + getNullDataMemberAttr(mlir::cir::DataMemberType ty) { + return mlir::cir::DataMemberAttr::get(getContext(), ty, std::nullopt); + } + + mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { + if (ty.isa()) + return mlir::cir::IntAttr::get(ty, 0); + if (auto fltType = ty.dyn_cast()) + return mlir::cir::FPAttr::getZero(fltType); + if (auto fltType = ty.dyn_cast()) + return mlir::cir::FPAttr::getZero(fltType); + if (auto arrTy = ty.dyn_cast()) + return getZeroAttr(arrTy); + if (auto ptrTy = ty.dyn_cast()) + return getConstPtrAttr(ptrTy, 0); + if (auto structTy = ty.dyn_cast()) + return getZeroAttr(structTy); + if (ty.isa()) { + return getCIRBoolAttr(false); + } + llvm_unreachable("Zero initializer for given type is NYI"); + } + + // TODO(cir): Once we have CIR float types, replace this by something like a + // NullableValueInterface to allow for type-independent queries. + bool isNullValue(mlir::Attribute attr) const { + if (attr.isa()) + return true; + if (const auto ptrVal = attr.dyn_cast()) + return ptrVal.isNullValue(); + + if (attr.isa()) + return false; + + // TODO(cir): introduce char type in CIR and check for that instead. + if (const auto intVal = attr.dyn_cast()) + return intVal.isNullValue(); + + if (const auto boolVal = attr.dyn_cast()) + return !boolVal.getValue(); + + if (auto fpAttr = attr.dyn_cast()) { + auto fpVal = fpAttr.getValue(); + bool ignored; + llvm::APFloat FV(+0.0); + FV.convert(fpVal.getSemantics(), llvm::APFloat::rmNearestTiesToEven, + &ignored); + return FV.bitwiseIsEqual(fpVal); + } + + if (const auto structVal = attr.dyn_cast()) { + for (const auto elt : structVal.getMembers()) { + // FIXME(cir): the struct's ID should not be considered a member. + if (elt.isa()) + continue; + if (!isNullValue(elt)) + return false; + } + return true; + } + + if (const auto arrayVal = attr.dyn_cast()) { + if (arrayVal.getElts().isa()) + return false; + for (const auto elt : arrayVal.getElts().cast()) { + if (!isNullValue(elt)) + return false; + } + return true; + } + + llvm_unreachable("NYI"); + } + + // + // Type helpers + // ------------ + // + mlir::cir::IntType getUIntNTy(int N) { + switch (N) { + case 8: + return getUInt8Ty(); + case 16: + return getUInt16Ty(); + case 32: + return getUInt32Ty(); + case 64: + return getUInt64Ty(); + default: + return mlir::cir::IntType::get(getContext(), N, false); + } + } + + mlir::cir::IntType getSIntNTy(int N) { + switch (N) { + case 8: + return getSInt8Ty(); + case 16: + return getSInt16Ty(); + case 32: + return getSInt32Ty(); + case 64: + return getSInt64Ty(); + default: + return mlir::cir::IntType::get(getContext(), N, true); + } + } + + mlir::cir::VoidType getVoidTy() { return typeCache.VoidTy; } + + mlir::cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; } + mlir::cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; } + mlir::cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; } + mlir::cir::IntType getSInt64Ty() { return typeCache.SInt64Ty; } + + mlir::cir::IntType getUInt8Ty() { return typeCache.UInt8Ty; } + mlir::cir::IntType getUInt16Ty() { return typeCache.UInt16Ty; } + mlir::cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } + mlir::cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } + + bool isInt8Ty(mlir::Type i) { + return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty; + } + bool isInt16Ty(mlir::Type i) { + return i == typeCache.UInt16Ty || i == typeCache.SInt16Ty; + } + bool isInt32Ty(mlir::Type i) { + return i == typeCache.UInt32Ty || i == typeCache.SInt32Ty; + } + bool isInt64Ty(mlir::Type i) { + return i == typeCache.UInt64Ty || i == typeCache.SInt64Ty; + } + bool isInt(mlir::Type i) { return i.isa(); } + + mlir::cir::LongDoubleType + getLongDoubleTy(const llvm::fltSemantics &format) const { + if (&format == &llvm::APFloat::IEEEdouble()) + return mlir::cir::LongDoubleType::get(getContext(), typeCache.DoubleTy); + if (&format == &llvm::APFloat::x87DoubleExtended()) + return mlir::cir::LongDoubleType::get(getContext(), typeCache.FP80Ty); + if (&format == &llvm::APFloat::IEEEquad()) + llvm_unreachable("NYI"); + if (&format == &llvm::APFloat::PPCDoubleDouble()) + llvm_unreachable("NYI"); + + llvm_unreachable("unsupported long double format"); + } + + mlir::Type getVirtualFnPtrType(bool isVarArg = false) { + // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special + // type so it's a bit more clear and C++ idiomatic. + auto fnTy = mlir::cir::FuncType::get({}, getUInt32Ty(), isVarArg); + assert(!UnimplementedFeature::isVarArg()); + return getPointerTo(getPointerTo(fnTy)); + } + + mlir::cir::FuncType getFuncType(llvm::ArrayRef params, + mlir::Type retTy, bool isVarArg = false) { + return mlir::cir::FuncType::get(params, retTy, isVarArg); + } + + // Fetch the type representing a pointer to unsigned int values. + mlir::cir::PointerType getUInt8PtrTy(unsigned AddrSpace = 0) { + return typeCache.UInt8PtrTy; + } + mlir::cir::PointerType getUInt32PtrTy(unsigned AddrSpace = 0) { + return mlir::cir::PointerType::get(getContext(), typeCache.UInt32Ty); + } + + /// Get a CIR anonymous struct type. + mlir::cir::StructType + getAnonStructTy(llvm::ArrayRef members, bool packed = false, + const clang::RecordDecl *ast = nullptr) { + mlir::cir::ASTRecordDeclAttr astAttr = nullptr; + auto kind = mlir::cir::StructType::RecordKind::Struct; + if (ast) { + astAttr = getAttr(ast); + kind = getRecordKind(ast->getTagKind()); + } + return getType(members, packed, kind, astAttr); + } + + /// Get a CIR record kind from a AST declaration tag. + mlir::cir::StructType::RecordKind + getRecordKind(const clang::TagTypeKind kind) { + switch (kind) { + case clang::TagTypeKind::Struct: + return mlir::cir::StructType::Struct; + case clang::TagTypeKind::Union: + return mlir::cir::StructType::Union; + case clang::TagTypeKind::Class: + return mlir::cir::StructType::Class; + case clang::TagTypeKind::Interface: + llvm_unreachable("interface records are NYI"); + case clang::TagTypeKind::Enum: + llvm_unreachable("enum records are NYI"); + } + } + + /// Get a incomplete CIR struct type. + mlir::cir::StructType getIncompleteStructTy(llvm::StringRef name, + const clang::RecordDecl *ast) { + const auto nameAttr = getStringAttr(name); + auto kind = mlir::cir::StructType::RecordKind::Struct; + if (ast) + kind = getRecordKind(ast->getTagKind()); + return getType(nameAttr, kind); + } + + /// Get a CIR named struct type. + /// + /// If a struct already exists and is complete, but the client tries to fetch + /// it with a different set of attributes, this method will crash. + mlir::cir::StructType getCompleteStructTy(llvm::ArrayRef members, + llvm::StringRef name, bool packed, + const clang::RecordDecl *ast) { + const auto nameAttr = getStringAttr(name); + mlir::cir::ASTRecordDeclAttr astAttr = nullptr; + auto kind = mlir::cir::StructType::RecordKind::Struct; + if (ast) { + astAttr = getAttr(ast); + kind = getRecordKind(ast->getTagKind()); + } + + // Create or get the struct. + auto type = getType(members, nameAttr, packed, kind, + astAttr); + + // Complete an incomplete struct or ensure the existing complete struct + // matches the requested attributes. + type.complete(members, packed, astAttr); + + return type; + } + + mlir::cir::StructType + getCompleteStructType(mlir::ArrayAttr fields, bool packed = false, + llvm::StringRef name = "", + const clang::RecordDecl *ast = nullptr) { + llvm::SmallVector members; + for (auto &attr : fields) { + const auto typedAttr = attr.dyn_cast(); + members.push_back(typedAttr.getType()); + } + + if (name.empty()) + return getAnonStructTy(members, packed, ast); + else + return getCompleteStructTy(members, name, packed, ast); + } + + mlir::cir::ArrayType getArrayType(mlir::Type eltType, unsigned size) { + return mlir::cir::ArrayType::get(getContext(), eltType, size); + } + + bool isSized(mlir::Type ty) { + if (ty.isa()) + return true; + assert(0 && "Unimplemented size for type"); + return false; + } + + // + // Constant creation helpers + // ------------------------- + // + mlir::cir::ConstantOp getSInt32(uint32_t c, mlir::Location loc) { + auto sInt32Ty = getSInt32Ty(); + return create(loc, sInt32Ty, + mlir::cir::IntAttr::get(sInt32Ty, c)); + } + mlir::cir::ConstantOp getUInt32(uint32_t C, mlir::Location loc) { + auto uInt32Ty = getUInt32Ty(); + return create(loc, uInt32Ty, + mlir::cir::IntAttr::get(uInt32Ty, C)); + } + mlir::cir::ConstantOp getSInt64(uint64_t C, mlir::Location loc) { + auto sInt64Ty = getSInt64Ty(); + return create(loc, sInt64Ty, + mlir::cir::IntAttr::get(sInt64Ty, C)); + } + mlir::cir::ConstantOp getUInt64(uint64_t C, mlir::Location loc) { + auto uInt64Ty = getUInt64Ty(); + return create(loc, uInt64Ty, + mlir::cir::IntAttr::get(uInt64Ty, C)); + } + + mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal) { + bool isSigned = intVal.isSigned(); + auto width = intVal.getBitWidth(); + mlir::cir::IntType t = isSigned ? getSIntNTy(width) : getUIntNTy(width); + return getConstInt( + loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); + } + + mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, + uint64_t C) { + auto intTy = t.dyn_cast(); + assert(intTy && "expected mlir::cir::IntType"); + return create(loc, intTy, + mlir::cir::IntAttr::get(t, C)); + } + + mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { + return create(loc, getBoolTy(), + getCIRBoolAttr(state)); + } + mlir::cir::ConstantOp getFalse(mlir::Location loc) { + return getBool(false, loc); + } + mlir::cir::ConstantOp getTrue(mlir::Location loc) { + return getBool(true, loc); + } + + /// Create constant nullptr for pointer-to-data-member type ty. + mlir::cir::ConstantOp getNullDataMemberPtr(mlir::cir::DataMemberType ty, + mlir::Location loc) { + return create(loc, ty, getNullDataMemberAttr(ty)); + } + + // Creates constant null value for integral type ty. + mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { + return create(loc, ty, getZeroInitAttr(ty)); + } + + mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { + // TODO: dispatch creation for primitive types. + assert( + (ty.isa() || ty.isa()) && + "NYI for other types"); + return create(loc, ty, getZeroAttr(ty)); + } + + // + // Operation creation helpers + // -------------------------- + // + + /// Create a copy with inferred length. + mlir::cir::CopyOp createCopy(mlir::Value dst, mlir::Value src) { + return create(dst.getLoc(), dst, src); + } + + /// Create a break operation. + mlir::cir::BreakOp createBreak(mlir::Location loc) { + return create(loc); + } + + /// Create a continue operation. + mlir::cir::ContinueOp createContinue(mlir::Location loc) { + return create(loc); + } + + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, + mlir::Value src, mlir::Value len) { + return create(loc, dst, src, len); + } + + mlir::Value createNeg(mlir::Value value) { + + if (auto intTy = value.getType().dyn_cast()) { + // Source is a unsigned integer: first cast it to signed. + if (intTy.isUnsigned()) + value = createIntCast(value, getSIntNTy(intTy.getWidth())); + return create(value.getLoc(), value.getType(), + mlir::cir::UnaryOpKind::Minus, value); + } + + llvm_unreachable("negation for the given type is NYI"); + } + + // TODO: split this to createFPExt/createFPTrunc when we have dedicated cast + // operations. + mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType) { + if (getIsFPConstrained()) + llvm_unreachable("constrainedfp NYI"); + + return create(v.getLoc(), destType, + mlir::cir::CastKind::floating, v); + } + + mlir::Value createFSub(mlir::Value lhs, mlir::Value rhs) { + assert(!UnimplementedFeature::metaDataNode()); + if (IsFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!UnimplementedFeature::foldBinOpFMF()); + return create(lhs.getLoc(), mlir::cir::BinOpKind::Sub, + lhs, rhs); + } + + mlir::Value createDynCast(mlir::Location loc, mlir::Value src, + mlir::cir::PointerType destType, bool isRefCast, + mlir::cir::DynamicCastInfoAttr info) { + auto castKind = isRefCast ? mlir::cir::DynamicCastKind::ref + : mlir::cir::DynamicCastKind::ptr; + return create(loc, destType, castKind, src, info); + } + + cir::Address createBaseClassAddr(mlir::Location loc, cir::Address addr, + mlir::Type destType) { + if (destType == addr.getElementType()) + return addr; + + auto ptrTy = getPointerTo(destType); + auto baseAddr = + create(loc, ptrTy, addr.getPointer()); + + return Address(baseAddr, ptrTy, addr.getAlignment()); + } + + // FIXME(cir): CIRGenBuilder class should have an attribute with a reference + // to the module so that we don't have search for it or pass it around. + // FIXME(cir): Track a list of globals, or at least the last one inserted, so + // that we can insert globals in the same order they are defined by CIRGen. + + /// Creates a versioned global variable. If the symbol is already taken, an ID + /// will be appended to the symbol. The returned global must always be queried + /// for its name so it can be referenced correctly. + [[nodiscard]] mlir::cir::GlobalOp + createVersionedGlobal(mlir::ModuleOp module, mlir::Location loc, + mlir::StringRef name, mlir::Type type, bool isConst, + mlir::cir::GlobalLinkageKind linkage) { + mlir::OpBuilder::InsertionGuard guard(*this); + setInsertionPointToStart(module.getBody()); + + // Create a unique name if the given name is already taken. + std::string uniqueName; + if (unsigned version = GlobalsVersioning[name.str()]++) + uniqueName = name.str() + "." + std::to_string(version); + else + uniqueName = name.str(); + + return create(loc, uniqueName, type, isConst, linkage); + } + + mlir::Value createGetGlobal(mlir::cir::GlobalOp global, + bool threadLocal = false) { + return create(global.getLoc(), + getPointerTo(global.getSymType()), + global.getName(), threadLocal); + } + + mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, + mlir::Value addr, mlir::Type storageType, + const CIRGenBitFieldInfo &info, + bool isLvalueVolatile, bool useVolatile) { + auto offset = useVolatile ? info.VolatileOffset : info.Offset; + return create(loc, resultType, addr, storageType, + info.Name, info.Size, offset, + info.IsSigned, isLvalueVolatile); + } + + mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, + mlir::Value dstAddr, mlir::Type storageType, + mlir::Value src, const CIRGenBitFieldInfo &info, + bool isLvalueVolatile, bool useVolatile) { + auto offset = useVolatile ? info.VolatileOffset : info.Offset; + return create( + loc, resultType, dstAddr, storageType, src, info.Name, info.Size, + offset, info.IsSigned, isLvalueVolatile); + } + + /// Create a pointer to a record member. + mlir::Value createGetMember(mlir::Location loc, mlir::Type result, + mlir::Value base, llvm::StringRef name, + unsigned index) { + return create(loc, result, base, name, index); + } + + /// Cast the element type of the given address to a different type, + /// preserving information like the alignment. + cir::Address createElementBitCast(mlir::Location loc, cir::Address addr, + mlir::Type destType) { + if (destType == addr.getElementType()) + return addr; + + auto ptrTy = getPointerTo(destType); + return Address(createBitcast(loc, addr.getPointer(), ptrTy), destType, + addr.getAlignment()); + } + + mlir::Value createLoad(mlir::Location loc, Address addr) { + auto ptrTy = addr.getPointer().getType().dyn_cast(); + if (addr.getElementType() != ptrTy.getPointee()) + addr = addr.withPointer( + createPtrBitcast(addr.getPointer(), addr.getElementType())); + + return create(loc, addr.getElementType(), + addr.getPointer()); + } + + mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, + mlir::Value ptr, + [[maybe_unused]] llvm::MaybeAlign align, + [[maybe_unused]] bool isVolatile) { + assert(!UnimplementedFeature::volatileLoadOrStore()); + assert(!UnimplementedFeature::alignedLoad()); + return create(loc, ty, ptr); + } + + mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, + mlir::Value ptr, llvm::MaybeAlign align) { + return createAlignedLoad(loc, ty, ptr, align, /*isVolatile=*/false); + } + + mlir::Value + createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value addr, + clang::CharUnits align = clang::CharUnits::One()) { + return createAlignedLoad(loc, ty, addr, align.getAsAlign()); + } + + mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, + Address dst, bool _volatile = false, + ::mlir::cir::MemOrderAttr order = {}) { + return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), _volatile, + order); + } + + mlir::cir::StoreOp createFlagStore(mlir::Location loc, bool val, + mlir::Value dst) { + auto flag = getBool(val, loc); + return CIRBaseBuilderTy::createStore(loc, flag, dst); + } + + // Convert byte offset to sequence of high-level indices suitable for + // GlobalViewAttr. Ideally we shouldn't deal with low-level offsets at all + // but currently some parts of Clang AST, which we don't want to touch just + // yet, return them. + void computeGlobalViewIndicesFromFlatOffset( + int64_t Offset, mlir::Type Ty, CIRDataLayout Layout, + llvm::SmallVectorImpl &Indices) { + if (!Offset) + return; + + mlir::Type SubType; + + if (auto ArrayTy = Ty.dyn_cast()) { + auto EltSize = Layout.getTypeAllocSize(ArrayTy.getEltType()); + Indices.push_back(Offset / EltSize); + SubType = ArrayTy.getEltType(); + Offset %= EltSize; + } else if (auto PtrTy = Ty.dyn_cast()) { + auto EltSize = Layout.getTypeAllocSize(PtrTy.getPointee()); + Indices.push_back(Offset / EltSize); + SubType = PtrTy.getPointee(); + Offset %= EltSize; + } else if (auto StructTy = Ty.dyn_cast()) { + auto Elts = StructTy.getMembers(); + unsigned Pos = 0; + for (size_t I = 0; I < Elts.size(); ++I) { + auto EltSize = Layout.getTypeAllocSize(Elts[I]); + unsigned AlignMask = Layout.getABITypeAlign(Elts[I]) - 1; + Pos = (Pos + AlignMask) & ~AlignMask; + if (Offset < Pos + EltSize) { + Indices.push_back(I); + SubType = Elts[I]; + Offset -= Pos; + break; + } + Pos += EltSize; + } + } else { + llvm_unreachable("unexpected type"); + } + + assert(SubType); + computeGlobalViewIndicesFromFlatOffset(Offset, SubType, Layout, Indices); + } + + mlir::cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) { + return create(loc, ty); + } + + mlir::cir::StackRestoreOp createStackRestore(mlir::Location loc, + mlir::Value v) { + return create(loc, v); + } + + // TODO(cir): Change this to hoist alloca to the parent *scope* instead. + /// Move alloca operation to the parent region. + void hoistAllocaToParentRegion(mlir::cir::AllocaOp alloca) { + auto &block = alloca->getParentOp()->getParentRegion()->front(); + const auto allocas = block.getOps(); + if (allocas.empty()) { + alloca->moveBefore(&block, block.begin()); + } else { + alloca->moveAfter(*std::prev(allocas.end())); + } + } + + mlir::cir::CmpThreeWayOp + createThreeWayCmpStrong(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, + const llvm::APSInt <Res, const llvm::APSInt &eqRes, + const llvm::APSInt >Res) { + assert(ltRes.getBitWidth() == eqRes.getBitWidth() && + ltRes.getBitWidth() == gtRes.getBitWidth() && + "the three comparison results must have the same bit width"); + auto cmpResultTy = getSIntNTy(ltRes.getBitWidth()); + auto infoAttr = getCmpThreeWayInfoStrongOrdering(ltRes, eqRes, gtRes); + return create(loc, cmpResultTy, lhs, rhs, + infoAttr); + } + + mlir::cir::CmpThreeWayOp + createThreeWayCmpPartial(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, + const llvm::APSInt <Res, const llvm::APSInt &eqRes, + const llvm::APSInt >Res, + const llvm::APSInt &unorderedRes) { + assert(ltRes.getBitWidth() == eqRes.getBitWidth() && + ltRes.getBitWidth() == gtRes.getBitWidth() && + ltRes.getBitWidth() == unorderedRes.getBitWidth() && + "the four comparison results must have the same bit width"); + auto cmpResultTy = getSIntNTy(ltRes.getBitWidth()); + auto infoAttr = + getCmpThreeWayInfoPartialOrdering(ltRes, eqRes, gtRes, unorderedRes); + return create(loc, cmpResultTy, lhs, rhs, + infoAttr); + } + + mlir::cir::GetRuntimeMemberOp createGetIndirectMember(mlir::Location loc, + mlir::Value objectPtr, + mlir::Value memberPtr) { + auto memberPtrTy = memberPtr.getType().cast(); + + // TODO(cir): consider address space. + assert(!UnimplementedFeature::addressSpace()); + auto resultTy = getPointerTo(memberPtrTy.getMemberTy()); + + return create(loc, resultTy, objectPtr, + memberPtr); + } +}; + +} // namespace cir +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp new file mode 100644 index 000000000000..4061e54811a6 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -0,0 +1,958 @@ +//===---- CIRGenBuiltin.cpp - Emit CIR for builtins -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Builtin calls as CIR or a function call to be +// later resolved. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenCall.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "UnimplementedFeatureGuarding.h" + +// TODO(cir): we shouldn't need this but we currently reuse intrinsic IDs for +// convenience. +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/IR/Intrinsics.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/Basic/Builtins.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; +using namespace llvm; + +static RValue buildLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, + const CallExpr *E, + mlir::Operation *calleeValue) { + auto callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(FD)); + return CGF.buildCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); +} + +template +static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { + auto Arg = CGF.buildScalarExpr(E.getArg(0)); + + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, &E); + if (CGF.getBuilder().getIsFPConstrained()) + llvm_unreachable("constraint FP operations are NYI"); + + auto Call = + CGF.getBuilder().create(Arg.getLoc(), Arg.getType(), Arg); + return RValue::get(Call->getResult(0)); +} + +template +static RValue +buildBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, + std::optional CK) { + mlir::Value arg; + if (CK.has_value()) + arg = CGF.buildCheckedArgForBuiltin(E->getArg(0), *CK); + else + arg = CGF.buildScalarExpr(E->getArg(0)); + + auto resultTy = CGF.ConvertType(E->getType()); + auto op = + CGF.getBuilder().create(CGF.getLoc(E->getExprLoc()), resultTy, arg); + return RValue::get(op); +} + +// Initialize the alloca with the given size and alignment according to the lang +// opts. Supporting only the trivial non-initialization for now. +static void initializeAlloca(CIRGenFunction &CGF, + [[maybe_unused]] mlir::Value AllocaAddr, + [[maybe_unused]] mlir::Value Size, + [[maybe_unused]] CharUnits AlignmentInBytes) { + + switch (CGF.getLangOpts().getTrivialAutoVarInit()) { + case LangOptions::TrivialAutoVarInitKind::Uninitialized: + // Nothing to initialize. + return; + case LangOptions::TrivialAutoVarInitKind::Zero: + case LangOptions::TrivialAutoVarInitKind::Pattern: + assert(false && "unexpected trivial auto var init kind NYI"); + return; + } +} + +RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue) { + const FunctionDecl *FD = GD.getDecl()->getAsFunction(); + + // See if we can constant fold this builtin. If so, don't emit it at all. + // TODO: Extend this handling to all builtin calls that we can constant-fold. + Expr::EvalResult Result; + if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getASTContext()) && + !Result.hasSideEffects()) { + if (Result.Val.isInt()) { + return RValue::get(builder.getConstInt(getLoc(E->getSourceRange()), + Result.Val.getInt())); + } + if (Result.Val.isFloat()) + llvm_unreachable("NYI"); + } + + // If current long-double semantics is IEEE 128-bit, replace math builtins + // of long-double with f128 equivalent. + // TODO: This mutation should also be applied to other targets other than PPC, + // after backend supports IEEE 128-bit style libcalls. + if (getTarget().getTriple().isPPC64() && + &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad()) + llvm_unreachable("NYI"); + + // If the builtin has been declared explicitly with an assembler label, + // disable the specialized emitting below. Ideally we should communicate the + // rename in IR, or at least avoid generating the intrinsic calls that are + // likely to get lowered to the renamed library functions. + const unsigned BuiltinIDIfNoAsmLabel = + FD->hasAttr() ? 0 : BuiltinID; + + // There are LLVM math intrinsics/instructions corresponding to math library + // functions except the LLVM op will never set errno while the math library + // might. Also, math builtins have the same semantics as their math library + // twins. Thus, we can transform math library and builtin calls to their + // LLVM counterparts if the call is marked 'const' (known to never set errno). + // In case FP exceptions are enabled, the experimental versions of the + // intrinsics model those. + bool ConstWithoutErrnoAndExceptions = + getContext().BuiltinInfo.isConstWithoutErrnoAndExceptions(BuiltinID); + bool ConstWithoutExceptions = + getContext().BuiltinInfo.isConstWithoutExceptions(BuiltinID); + if (FD->hasAttr() || + ((ConstWithoutErrnoAndExceptions || ConstWithoutExceptions) && + (!ConstWithoutErrnoAndExceptions || (!getLangOpts().MathErrno)))) { + switch (BuiltinIDIfNoAsmLabel) { + case Builtin::BIceil: + case Builtin::BIceilf: + case Builtin::BIceill: + case Builtin::BI__builtin_ceil: + case Builtin::BI__builtin_ceilf: + case Builtin::BI__builtin_ceilf16: + case Builtin::BI__builtin_ceill: + case Builtin::BI__builtin_ceilf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIcopysign: + case Builtin::BIcopysignf: + case Builtin::BIcopysignl: + case Builtin::BI__builtin_copysign: + case Builtin::BI__builtin_copysignf: + case Builtin::BI__builtin_copysignf16: + case Builtin::BI__builtin_copysignl: + case Builtin::BI__builtin_copysignf128: + llvm_unreachable("NYI"); + + case Builtin::BIcos: + case Builtin::BIcosf: + case Builtin::BIcosl: + case Builtin::BI__builtin_cos: + case Builtin::BI__builtin_cosf: + case Builtin::BI__builtin_cosf16: + case Builtin::BI__builtin_cosl: + case Builtin::BI__builtin_cosf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIexp: + case Builtin::BIexpf: + case Builtin::BIexpl: + case Builtin::BI__builtin_exp: + case Builtin::BI__builtin_expf: + case Builtin::BI__builtin_expf16: + case Builtin::BI__builtin_expl: + case Builtin::BI__builtin_expf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIexp2: + case Builtin::BIexp2f: + case Builtin::BIexp2l: + case Builtin::BI__builtin_exp2: + case Builtin::BI__builtin_exp2f: + case Builtin::BI__builtin_exp2f16: + case Builtin::BI__builtin_exp2l: + case Builtin::BI__builtin_exp2f128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIfabs: + case Builtin::BIfabsf: + case Builtin::BIfabsl: + case Builtin::BI__builtin_fabs: + case Builtin::BI__builtin_fabsf: + case Builtin::BI__builtin_fabsf16: + case Builtin::BI__builtin_fabsl: + case Builtin::BI__builtin_fabsf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIfloor: + case Builtin::BIfloorf: + case Builtin::BIfloorl: + case Builtin::BI__builtin_floor: + case Builtin::BI__builtin_floorf: + case Builtin::BI__builtin_floorf16: + case Builtin::BI__builtin_floorl: + case Builtin::BI__builtin_floorf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIfma: + case Builtin::BIfmaf: + case Builtin::BIfmal: + case Builtin::BI__builtin_fma: + case Builtin::BI__builtin_fmaf: + case Builtin::BI__builtin_fmaf16: + case Builtin::BI__builtin_fmal: + case Builtin::BI__builtin_fmaf128: + llvm_unreachable("NYI"); + + case Builtin::BIfmax: + case Builtin::BIfmaxf: + case Builtin::BIfmaxl: + case Builtin::BI__builtin_fmax: + case Builtin::BI__builtin_fmaxf: + case Builtin::BI__builtin_fmaxf16: + case Builtin::BI__builtin_fmaxl: + case Builtin::BI__builtin_fmaxf128: + llvm_unreachable("NYI"); + + case Builtin::BIfmin: + case Builtin::BIfminf: + case Builtin::BIfminl: + case Builtin::BI__builtin_fmin: + case Builtin::BI__builtin_fminf: + case Builtin::BI__builtin_fminf16: + case Builtin::BI__builtin_fminl: + case Builtin::BI__builtin_fminf128: + llvm_unreachable("NYI"); + + // fmod() is a special-case. It maps to the frem instruction rather than an + // LLVM intrinsic. + case Builtin::BIfmod: + case Builtin::BIfmodf: + case Builtin::BIfmodl: + case Builtin::BI__builtin_fmod: + case Builtin::BI__builtin_fmodf: + case Builtin::BI__builtin_fmodf16: + case Builtin::BI__builtin_fmodl: + case Builtin::BI__builtin_fmodf128: { + llvm_unreachable("NYI"); + } + + case Builtin::BIlog: + case Builtin::BIlogf: + case Builtin::BIlogl: + case Builtin::BI__builtin_log: + case Builtin::BI__builtin_logf: + case Builtin::BI__builtin_logf16: + case Builtin::BI__builtin_logl: + case Builtin::BI__builtin_logf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIlog10: + case Builtin::BIlog10f: + case Builtin::BIlog10l: + case Builtin::BI__builtin_log10: + case Builtin::BI__builtin_log10f: + case Builtin::BI__builtin_log10f16: + case Builtin::BI__builtin_log10l: + case Builtin::BI__builtin_log10f128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIlog2: + case Builtin::BIlog2f: + case Builtin::BIlog2l: + case Builtin::BI__builtin_log2: + case Builtin::BI__builtin_log2f: + case Builtin::BI__builtin_log2f16: + case Builtin::BI__builtin_log2l: + case Builtin::BI__builtin_log2f128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BInearbyint: + case Builtin::BInearbyintf: + case Builtin::BInearbyintl: + case Builtin::BI__builtin_nearbyint: + case Builtin::BI__builtin_nearbyintf: + case Builtin::BI__builtin_nearbyintl: + case Builtin::BI__builtin_nearbyintf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIpow: + case Builtin::BIpowf: + case Builtin::BIpowl: + case Builtin::BI__builtin_pow: + case Builtin::BI__builtin_powf: + case Builtin::BI__builtin_powf16: + case Builtin::BI__builtin_powl: + case Builtin::BI__builtin_powf128: + llvm_unreachable("NYI"); + + case Builtin::BIrint: + case Builtin::BIrintf: + case Builtin::BIrintl: + case Builtin::BI__builtin_rint: + case Builtin::BI__builtin_rintf: + case Builtin::BI__builtin_rintf16: + case Builtin::BI__builtin_rintl: + case Builtin::BI__builtin_rintf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIround: + case Builtin::BIroundf: + case Builtin::BIroundl: + case Builtin::BI__builtin_round: + case Builtin::BI__builtin_roundf: + case Builtin::BI__builtin_roundf16: + case Builtin::BI__builtin_roundl: + case Builtin::BI__builtin_roundf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIsin: + case Builtin::BIsinf: + case Builtin::BIsinl: + case Builtin::BI__builtin_sin: + case Builtin::BI__builtin_sinf: + case Builtin::BI__builtin_sinf16: + case Builtin::BI__builtin_sinl: + case Builtin::BI__builtin_sinf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIsqrt: + case Builtin::BIsqrtf: + case Builtin::BIsqrtl: + case Builtin::BI__builtin_sqrt: + case Builtin::BI__builtin_sqrtf: + case Builtin::BI__builtin_sqrtf16: + case Builtin::BI__builtin_sqrtl: + case Builtin::BI__builtin_sqrtf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BItrunc: + case Builtin::BItruncf: + case Builtin::BItruncl: + case Builtin::BI__builtin_trunc: + case Builtin::BI__builtin_truncf: + case Builtin::BI__builtin_truncf16: + case Builtin::BI__builtin_truncl: + case Builtin::BI__builtin_truncf128: + return buildUnaryFPBuiltin(*this, *E); + + case Builtin::BIlround: + case Builtin::BIlroundf: + case Builtin::BIlroundl: + case Builtin::BI__builtin_lround: + case Builtin::BI__builtin_lroundf: + case Builtin::BI__builtin_lroundl: + case Builtin::BI__builtin_lroundf128: + llvm_unreachable("NYI"); + + case Builtin::BIllround: + case Builtin::BIllroundf: + case Builtin::BIllroundl: + case Builtin::BI__builtin_llround: + case Builtin::BI__builtin_llroundf: + case Builtin::BI__builtin_llroundl: + case Builtin::BI__builtin_llroundf128: + llvm_unreachable("NYI"); + + case Builtin::BIlrint: + case Builtin::BIlrintf: + case Builtin::BIlrintl: + case Builtin::BI__builtin_lrint: + case Builtin::BI__builtin_lrintf: + case Builtin::BI__builtin_lrintl: + case Builtin::BI__builtin_lrintf128: + llvm_unreachable("NYI"); + + case Builtin::BIllrint: + case Builtin::BIllrintf: + case Builtin::BIllrintl: + case Builtin::BI__builtin_llrint: + case Builtin::BI__builtin_llrintf: + case Builtin::BI__builtin_llrintl: + case Builtin::BI__builtin_llrintf128: + llvm_unreachable("NYI"); + + default: + break; + } + } + + switch (BuiltinIDIfNoAsmLabel) { + default: + break; + + case Builtin::BIprintf: + if (getTarget().getTriple().isNVPTX() || + getTarget().getTriple().isAMDGCN()) { + llvm_unreachable("NYI"); + } + break; + + // C stdarg builtins. + case Builtin::BI__builtin_stdarg_start: + case Builtin::BI__builtin_va_start: + case Builtin::BI__va_start: + case Builtin::BI__builtin_va_end: { + buildVAStartEnd(BuiltinID == Builtin::BI__va_start + ? buildScalarExpr(E->getArg(0)) + : buildVAListRef(E->getArg(0)).getPointer(), + BuiltinID != Builtin::BI__builtin_va_end); + return {}; + } + case Builtin::BI__builtin_va_copy: { + auto dstPtr = buildVAListRef(E->getArg(0)).getPointer(); + auto srcPtr = buildVAListRef(E->getArg(1)).getPointer(); + builder.create(dstPtr.getLoc(), dstPtr, srcPtr); + return {}; + } + + case Builtin::BI__builtin_expect: + case Builtin::BI__builtin_expect_with_probability: { + auto ArgValue = buildScalarExpr(E->getArg(0)); + auto ExpectedValue = buildScalarExpr(E->getArg(1)); + + // Don't generate cir.expect on -O0 as the backend won't use it for + // anything. Note, we still IRGen ExpectedValue because it could have + // side-effects. + if (CGM.getCodeGenOpts().OptimizationLevel == 0) + return RValue::get(ArgValue); + + mlir::FloatAttr ProbAttr = {}; + if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) { + llvm::APFloat Probability(0.0); + const Expr *ProbArg = E->getArg(2); + bool EvalSucceed = + ProbArg->EvaluateAsFloat(Probability, CGM.getASTContext()); + assert(EvalSucceed && "probability should be able to evaluate as float"); + (void)EvalSucceed; + bool LoseInfo = false; + Probability.convert(llvm::APFloat::IEEEdouble(), + llvm::RoundingMode::Dynamic, &LoseInfo); + ProbAttr = mlir::FloatAttr::get( + mlir::FloatType::getF64(builder.getContext()), Probability); + } + + auto result = builder.create( + getLoc(E->getSourceRange()), ArgValue.getType(), ArgValue, + ExpectedValue, ProbAttr); + + return RValue::get(result); + } + case Builtin::BI__builtin_unpredictable: { + if (CGM.getCodeGenOpts().OptimizationLevel != 0) + assert(!UnimplementedFeature::insertBuiltinUnpredictable()); + return RValue::get(buildScalarExpr(E->getArg(0))); + } + + case Builtin::BI__builtin_prefetch: { + auto evaluateOperandAsInt = [&](const Expr *Arg) { + Expr::EvalResult Res; + [[maybe_unused]] bool EvalSucceed = + Arg->EvaluateAsInt(Res, CGM.getASTContext()); + assert(EvalSucceed && "expression should be able to evaluate as int"); + return Res.Val.getInt().getZExtValue(); + }; + + bool IsWrite = false; + if (E->getNumArgs() > 1) + IsWrite = evaluateOperandAsInt(E->getArg(1)); + + int Locality = 0; + if (E->getNumArgs() > 2) + Locality = evaluateOperandAsInt(E->getArg(2)); + + mlir::Value Address = buildScalarExpr(E->getArg(0)); + builder.create(getLoc(E->getSourceRange()), Address, + Locality, IsWrite); + return RValue::get(nullptr); + } + + // C++ std:: builtins. + case Builtin::BImove: + case Builtin::BImove_if_noexcept: + case Builtin::BIforward: + case Builtin::BIas_const: + return RValue::get(buildLValue(E->getArg(0)).getPointer()); + case Builtin::BI__GetExceptionInfo: { + llvm_unreachable("NYI"); + } + + case Builtin::BI__fastfail: + llvm_unreachable("NYI"); + + case Builtin::BI__builtin_coro_id: + case Builtin::BI__builtin_coro_promise: + case Builtin::BI__builtin_coro_resume: + case Builtin::BI__builtin_coro_noop: + case Builtin::BI__builtin_coro_destroy: + case Builtin::BI__builtin_coro_done: + case Builtin::BI__builtin_coro_alloc: + case Builtin::BI__builtin_coro_begin: + case Builtin::BI__builtin_coro_end: + case Builtin::BI__builtin_coro_suspend: + case Builtin::BI__builtin_coro_align: + llvm_unreachable("NYI"); + + case Builtin::BI__builtin_coro_frame: { + return buildCoroutineFrame(); + } + case Builtin::BI__builtin_coro_free: + case Builtin::BI__builtin_coro_size: { + GlobalDecl gd{FD}; + mlir::Type ty = CGM.getTypes().GetFunctionType( + CGM.getTypes().arrangeGlobalDeclaration(GD)); + const auto *ND = cast(GD.getDecl()); + auto fnOp = + CGM.GetOrCreateCIRFunction(ND->getName(), ty, gd, /*ForVTable=*/false, + /*DontDefer=*/false); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), + E, ReturnValue); + } + case Builtin::BI__builtin_dynamic_object_size: { + // Fallthrough below, assert until we have a testcase. + llvm_unreachable("NYI"); + } + case Builtin::BI__builtin_object_size: { + unsigned Type = + E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); + auto ResType = ConvertType(E->getType()).dyn_cast(); + assert(ResType && "not sure what to do?"); + + // We pass this builtin onto the optimizer so that it can figure out the + // object size in more complex cases. + bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; + return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, + /*EmittedE=*/nullptr, IsDynamic)); + } + case Builtin::BI__builtin_unreachable: { + buildUnreachable(E->getExprLoc()); + + // We do need to preserve an insertion point. + builder.createBlock(builder.getBlock()->getParent()); + + return RValue::get(nullptr); + } + case Builtin::BI__builtin_trap: { + builder.create(getLoc(E->getExprLoc())); + + // Note that cir.trap is a terminator so we need to start a new block to + // preserve the insertion point. + builder.createBlock(builder.getBlock()->getParent()); + + return RValue::get(nullptr); + } + case Builtin::BImemcpy: + case Builtin::BI__builtin_memcpy: + case Builtin::BImempcpy: + case Builtin::BI__builtin_mempcpy: { + Address Dest = buildPointerWithAlignment(E->getArg(0)); + Address Src = buildPointerWithAlignment(E->getArg(1)); + mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); + buildNonNullArgCheck(RValue::get(Dest.getPointer()), + E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), + FD, 0); + buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); + builder.createMemCpy(getLoc(E->getSourceRange()), Dest.getPointer(), + Src.getPointer(), SizeVal); + if (BuiltinID == Builtin::BImempcpy || + BuiltinID == Builtin::BI__builtin_mempcpy) + llvm_unreachable("mempcpy is NYI"); + else + return RValue::get(Dest.getPointer()); + } + + case Builtin::BI__builtin_clrsb: + case Builtin::BI__builtin_clrsbl: + case Builtin::BI__builtin_clrsbll: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__builtin_ctzs: + case Builtin::BI__builtin_ctz: + case Builtin::BI__builtin_ctzl: + case Builtin::BI__builtin_ctzll: + case Builtin::BI__builtin_ctzg: + return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); + + case Builtin::BI__builtin_clzs: + case Builtin::BI__builtin_clz: + case Builtin::BI__builtin_clzl: + case Builtin::BI__builtin_clzll: + case Builtin::BI__builtin_clzg: + return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); + + case Builtin::BI__builtin_ffs: + case Builtin::BI__builtin_ffsl: + case Builtin::BI__builtin_ffsll: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__builtin_parity: + case Builtin::BI__builtin_parityl: + case Builtin::BI__builtin_parityll: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__popcnt16: + case Builtin::BI__popcnt: + case Builtin::BI__popcnt64: + case Builtin::BI__builtin_popcount: + case Builtin::BI__builtin_popcountl: + case Builtin::BI__builtin_popcountll: + case Builtin::BI__builtin_popcountg: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__builtin_bswap16: + case Builtin::BI__builtin_bswap32: + case Builtin::BI__builtin_bswap64: + case Builtin::BI_byteswap_ushort: + case Builtin::BI_byteswap_ulong: + case Builtin::BI_byteswap_uint64: { + auto arg = buildScalarExpr(E->getArg(0)); + return RValue::get(builder.create( + getLoc(E->getSourceRange()), arg)); + } + + case Builtin::BI__builtin_constant_p: { + mlir::Type ResultType = ConvertType(E->getType()); + + const Expr *Arg = E->getArg(0); + QualType ArgType = Arg->getType(); + // FIXME: The allowance for Obj-C pointers and block pointers is historical + // and likely a mistake. + if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && + !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) + // Per the GCC documentation, only numeric constants are recognized after + // inlining. + return RValue::get( + builder.getConstInt(getLoc(E->getSourceRange()), + ResultType.cast(), 0)); + + if (Arg->HasSideEffects(getContext())) + // The argument is unevaluated, so be conservative if it might have + // side-effects. + return RValue::get( + builder.getConstInt(getLoc(E->getSourceRange()), + ResultType.cast(), 0)); + + mlir::Value ArgValue = buildScalarExpr(Arg); + if (ArgType->isObjCObjectPointerType()) + // Convert Objective-C objects to id because we cannot distinguish between + // LLVM types for Obj-C classes as they are opaque. + ArgType = CGM.getASTContext().getObjCIdType(); + ArgValue = builder.createBitcast(ArgValue, ConvertType(ArgType)); + + mlir::Value Result = builder.create( + getLoc(E->getSourceRange()), ArgValue); + if (Result.getType() != ResultType) + Result = builder.createBoolToInt(Result, ResultType); + return RValue::get(Result); + } + + case Builtin::BIalloca: + case Builtin::BI_alloca: + case Builtin::BI__builtin_alloca_uninitialized: + case Builtin::BI__builtin_alloca: { + // Get alloca size input + mlir::Value Size = buildScalarExpr(E->getArg(0)); + + // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. + const TargetInfo &TI = getContext().getTargetInfo(); + const CharUnits SuitableAlignmentInBytes = + getContext().toCharUnitsFromBits(TI.getSuitableAlign()); + + // Emit the alloca op with type `u8 *` to match the semantics of + // `llvm.alloca`. We later bitcast the type to `void *` to match the + // semantics of C/C++ + // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a + // pointer of type `void *`. This will require a change to the allocaOp + // verifier. + auto AllocaAddr = builder.createAlloca( + getLoc(E->getSourceRange()), builder.getUInt8PtrTy(), + builder.getUInt8Ty(), "bi_alloca", SuitableAlignmentInBytes, Size); + + // Initialize the allocated buffer if required. + if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized) + initializeAlloca(*this, AllocaAddr, Size, SuitableAlignmentInBytes); + + // An alloca will always return a pointer to the alloca (stack) address + // space. This address space need not be the same as the AST / Language + // default (e.g. in C / C++ auto vars are in the generic address space). At + // the AST level this is handled within CreateTempAlloca et al., but for the + // builtin / dynamic alloca we have to handle it here. + assert(!UnimplementedFeature::addressSpace()); + LangAS AAS = getASTAllocaAddressSpace(); + LangAS EAS = E->getType()->getPointeeType().getAddressSpace(); + if (EAS != AAS) { + assert(false && "Non-default address space for alloca NYI"); + } + + // Bitcast the alloca to the expected type. + return RValue::get( + builder.createBitcast(AllocaAddr, builder.getVoidPtrTy())); + } + } + + // If this is an alias for a lib function (e.g. __builtin_sin), emit + // the call using the normal call path, but using the unmangled + // version of the function name. + if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) + return buildLibraryCall(*this, FD, E, + CGM.getBuiltinLibFunction(FD, BuiltinID)); + + // If this is a predefined lib function (e.g. malloc), emit the call + // using exactly the normal call path. + if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) + return buildLibraryCall(*this, FD, E, + buildScalarExpr(E->getCallee()).getDefiningOp()); + + // Check that a call to a target specific builtin has the correct target + // features. + // This is down here to avoid non-target specific builtins, however, if + // generic builtins start to require generic target features then we + // can move this up to the beginning of the function. + // checkTargetFeatures(E, FD); + + if (unsigned VectorWidth = + getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) + llvm_unreachable("NYI"); + + // See if we have a target specific intrinsic. + auto Name = getContext().BuiltinInfo.getName(BuiltinID).str(); + Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; + StringRef Prefix = + llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()); + if (!Prefix.empty()) { + IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name); + // NOTE we don't need to perform a compatibility flag check here since the + // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the + // MS builtins via ALL_MS_LANGUAGES and are filtered earlier. + if (IntrinsicID == Intrinsic::not_intrinsic) + IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name); + } + + if (IntrinsicID != Intrinsic::not_intrinsic) { + llvm_unreachable("NYI"); + } + + // Some target-specific builtins can have aggregate return values, e.g. + // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force + // ReturnValue to be non-null, so that the target-specific emission code can + // always just emit into it. + TypeEvaluationKind EvalKind = getEvaluationKind(E->getType()); + if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) { + llvm_unreachable("NYI"); + } + + // Now see if we can emit a target-specific builtin. + if (auto v = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + // ErrorUnsupported(E, "builtin function"); + + // Unknown builtin, for now just dump it out and return undef. + return GetUndefRValue(E->getType()); +} + +mlir::Value CIRGenFunction::buildCheckedArgForBuiltin(const Expr *E, + BuiltinCheckKind Kind) { + assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && + "Unsupported builtin check kind"); + + auto value = buildScalarExpr(E); + if (!SanOpts.has(SanitizerKind::Builtin)) + return value; + + assert(!UnimplementedFeature::sanitizerBuiltin()); + llvm_unreachable("NYI"); +} + +static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, + unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { + llvm_unreachable("NYI"); + return {}; +} + +mlir::Value +CIRGenFunction::buildTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue) { + if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { + assert(getContext().getAuxTargetInfo() && "Missing aux target info"); + return buildTargetArchBuiltinExpr( + this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, + ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); + } + + return buildTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, + getTarget().getTriple().getArch()); +} + +void CIRGenFunction::buildVAStartEnd(mlir::Value ArgValue, bool IsStart) { + // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this + // early, defer to LLVM lowering. + if (IsStart) + builder.create(ArgValue.getLoc(), ArgValue); + else + builder.create(ArgValue.getLoc(), ArgValue); +} + +/// Checks if using the result of __builtin_object_size(p, @p From) in place of +/// __builtin_object_size(p, @p To) is correct +static bool areBOSTypesCompatible(int From, int To) { + // Note: Our __builtin_object_size implementation currently treats Type=0 and + // Type=2 identically. Encoding this implementation detail here may make + // improving __builtin_object_size difficult in the future, so it's omitted. + return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); +} + +/// Returns a Value corresponding to the size of the given expression. +/// This Value may be either of the following: +/// +/// - Reference an argument if `pass_object_size` is used. +/// - A call to a `cir.objsize`. +/// +/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null +/// and we wouldn't otherwise try to reference a pass_object_size parameter, +/// we'll call `cir.objsize` on EmittedE, rather than emitting E. +mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, + mlir::cir::IntType ResType, + mlir::Value EmittedE, + bool IsDynamic) { + // We need to reference an argument if the pointer is a parameter with the + // pass_object_size attribute. + if (auto *D = dyn_cast(E->IgnoreParenImpCasts())) { + auto *Param = dyn_cast(D->getDecl()); + auto *PS = D->getDecl()->getAttr(); + if (Param != nullptr && PS != nullptr && + areBOSTypesCompatible(PS->getType(), Type)) { + auto Iter = SizeArguments.find(Param); + assert(Iter != SizeArguments.end()); + + const ImplicitParamDecl *D = Iter->second; + auto DIter = LocalDeclMap.find(D); + assert(DIter != LocalDeclMap.end()); + + return buildLoadOfScalar(DIter->second, /*Volatile=*/false, + getContext().getSizeType(), E->getBeginLoc()); + } + } + + // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't + // evaluate E for side-effects. In either case, just like original LLVM + // lowering, we shouldn't lower to `cir.objsize`. + if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) + llvm_unreachable("NYI"); + + auto Ptr = EmittedE ? EmittedE : buildScalarExpr(E); + assert(Ptr.getType().isa() && + "Non-pointer passed to __builtin_object_size?"); + + // LLVM intrinsics (which CIR lowers to at some point, only supports 0 + // and 2, account for that right now. + mlir::cir::SizeInfoType sizeInfoTy = ((Type & 2) != 0) + ? mlir::cir::SizeInfoType::min + : mlir::cir::SizeInfoType::max; + // TODO(cir): Heads up for LLVM lowering, For GCC compatibility, + // __builtin_object_size treat NULL as unknown size. + return builder.create( + getLoc(E->getSourceRange()), ResType, Ptr, sizeInfoTy, IsDynamic); +} + +mlir::Value CIRGenFunction::evaluateOrEmitBuiltinObjectSize( + const Expr *E, unsigned Type, mlir::cir::IntType ResType, + mlir::Value EmittedE, bool IsDynamic) { + uint64_t ObjectSize; + if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) + return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); + return builder.getConstInt(getLoc(E->getSourceRange()), ResType, ObjectSize); +} + +/// Given a builtin id for a function like "__builtin_fabsf", return a Function* +/// for "fabsf". +mlir::cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, + unsigned BuiltinID) { + assert(astCtx.BuiltinInfo.isLibFunction(BuiltinID)); + + // Get the name, skip over the __builtin_ prefix (if necessary). + StringRef Name; + GlobalDecl D(FD); + + // TODO: This list should be expanded or refactored after all GCC-compatible + // std libcall builtins are implemented. + static SmallDenseMap F128Builtins{ + {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"}, + {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"}, + {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"}, + {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"}, + {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"}, + {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"}, + {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"}, + {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"}, + {Builtin::BI__builtin_fprintf, "__fprintfieee128"}, + {Builtin::BI__builtin_printf, "__printfieee128"}, + {Builtin::BI__builtin_snprintf, "__snprintfieee128"}, + {Builtin::BI__builtin_sprintf, "__sprintfieee128"}, + {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"}, + {Builtin::BI__builtin_vprintf, "__vprintfieee128"}, + {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"}, + {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"}, + {Builtin::BI__builtin_fscanf, "__fscanfieee128"}, + {Builtin::BI__builtin_scanf, "__scanfieee128"}, + {Builtin::BI__builtin_sscanf, "__sscanfieee128"}, + {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"}, + {Builtin::BI__builtin_vscanf, "__vscanfieee128"}, + {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"}, + {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"}, + }; + + // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit + // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions + // if it is 64-bit 'long double' mode. + static SmallDenseMap AIXLongDouble64Builtins{ + {Builtin::BI__builtin_frexpl, "frexp"}, + {Builtin::BI__builtin_ldexpl, "ldexp"}, + {Builtin::BI__builtin_modfl, "modf"}, + }; + + // If the builtin has been declared explicitly with an assembler label, + // use the mangled name. This differs from the plain label on platforms + // that prefix labels. + if (FD->hasAttr()) + Name = getMangledName(D); + else { + // TODO: This mutation should also be applied to other targets other than + // PPC, after backend supports IEEE 128-bit style libcalls. + if (getTriple().isPPC64() && + &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() && + F128Builtins.find(BuiltinID) != F128Builtins.end()) + Name = F128Builtins[BuiltinID]; + else if (getTriple().isOSAIX() && + &getTarget().getLongDoubleFormat() == + &llvm::APFloat::IEEEdouble() && + AIXLongDouble64Builtins.find(BuiltinID) != + AIXLongDouble64Builtins.end()) + Name = AIXLongDouble64Builtins[BuiltinID]; + else + Name = astCtx.BuiltinInfo.getName(BuiltinID).substr(10); + } + + auto Ty = getTypes().ConvertType(FD->getType()); + return GetOrCreateCIRFunction(Name, Ty, D, /*ForVTable=*/false); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp new file mode 100644 index 000000000000..31dba6be75eb --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -0,0 +1,291 @@ +//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation. +// +//===----------------------------------------------------------------------===// + +// We might split this into multiple files if it gets too unwieldy + +#include "CIRGenCXXABI.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +#include "clang/AST/GlobalDecl.h" +#include "llvm/Support/ErrorHandling.h" +#include + +using namespace clang; +using namespace cir; + +/// Try to emit a base destructor as an alias to its primary +/// base-class destructor. +bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { + if (!getCodeGenOpts().CXXCtorDtorAliases) + return true; + + // Producing an alias to a base class ctor/dtor can degrade debug quality + // as the debugger cannot tell them apart. + if (getCodeGenOpts().OptimizationLevel == 0) + return true; + + // If sanitizing memory to check for use-after-dtor, do not emit as + // an alias, unless this class owns no members. + if (getCodeGenOpts().SanitizeMemoryUseAfterDtor && + !D->getParent()->field_empty()) + assert(!UnimplementedFeature::sanitizeDtor()); + + // If the destructor doesn't have a trivial body, we have to emit it + // separately. + if (!D->hasTrivialBody()) + return true; + + const CXXRecordDecl *Class = D->getParent(); + + // We are going to instrument this destructor, so give up even if it is + // currently empty. + if (Class->mayInsertExtraPadding()) + return true; + + // If we need to manipulate a VTT parameter, give up. + if (Class->getNumVBases()) { + // Extra Credit: passing extra parameters is perfectly safe + // in many calling conventions, so only bail out if the ctor's + // calling convention is nonstandard. + return true; + } + + // If any field has a non-trivial destructor, we have to emit the + // destructor separately. + for (const auto *I : Class->fields()) + if (I->getType().isDestructedType()) + return true; + + // Try to find a unique base class with a non-trivial destructor. + const CXXRecordDecl *UniqueBase = nullptr; + for (const auto &I : Class->bases()) { + + // We're in the base destructor, so skip virtual bases. + if (I.isVirtual()) + continue; + + // Skip base classes with trivial destructors. + const auto *Base = + cast(I.getType()->castAs()->getDecl()); + if (Base->hasTrivialDestructor()) + continue; + + // If we've already found a base class with a non-trivial + // destructor, give up. + if (UniqueBase) + return true; + UniqueBase = Base; + } + + // If we didn't find any bases with a non-trivial destructor, then + // the base destructor is actually effectively trivial, which can + // happen if it was needlessly user-defined or if there are virtual + // bases with non-trivial destructors. + if (!UniqueBase) + return true; + + // If the base is at a non-zero offset, give up. + const ASTRecordLayout &ClassLayout = astCtx.getASTRecordLayout(Class); + if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero()) + return true; + + // Give up if the calling conventions don't match. We could update the call, + // but it is probably not worth it. + const CXXDestructorDecl *BaseD = UniqueBase->getDestructor(); + if (BaseD->getType()->castAs()->getCallConv() != + D->getType()->castAs()->getCallConv()) + return true; + + GlobalDecl AliasDecl(D, Dtor_Base); + GlobalDecl TargetDecl(BaseD, Dtor_Base); + + // The alias will use the linkage of the referent. If we can't + // support aliases with that linkage, fail. + auto Linkage = getFunctionLinkage(AliasDecl); + + // We can't use an alias if the linkage is not valid for one. + if (!mlir::cir::isValidLinkage(Linkage)) + return true; + + auto TargetLinkage = getFunctionLinkage(TargetDecl); + + // Check if we have it already. + StringRef MangledName = getMangledName(AliasDecl); + auto Entry = getGlobalValue(MangledName); + auto fnOp = dyn_cast_or_null(Entry); + if (Entry && fnOp && !fnOp.isDeclaration()) + return false; + if (Replacements.count(MangledName)) + return false; + + assert(fnOp && "only knows how to handle FuncOp"); + [[maybe_unused]] auto AliasValueType = getTypes().GetFunctionType(AliasDecl); + + // Find the referent. + auto Aliasee = cast(GetAddrOfGlobal(TargetDecl)); + + // Instead of creating as alias to a linkonce_odr, replace all of the uses + // of the aliasee. + if (mlir::cir::isDiscardableIfUnused(Linkage) && + !(TargetLinkage == + mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage && + TargetDecl.getDecl()->hasAttr())) { + // FIXME: An extern template instantiation will create functions with + // linkage "AvailableExternally". In libc++, some classes also define + // members with attribute "AlwaysInline" and expect no reference to + // be generated. It is desirable to reenable this optimisation after + // corresponding LLVM changes. + llvm_unreachable("NYI"); + } + + // If we have a weak, non-discardable alias (weak, weak_odr), like an + // extern template instantiation or a dllexported class, avoid forming it on + // COFF. A COFF weak external alias cannot satisfy a normal undefined + // symbol reference from another TU. The other TU must also mark the + // referenced symbol as weak, which we cannot rely on. + if (mlir::cir::isWeakForLinker(Linkage) && getTriple().isOSBinFormatCOFF()) { + llvm_unreachable("NYI"); + } + + // If we don't have a definition for the destructor yet or the definition + // is + // avaialable_externally, don't emit an alias. We can't emit aliases to + // declarations; that's just not how aliases work. + if (Aliasee.isDeclarationForLinker()) + return true; + + // Don't create an alias to a linker weak symbol. This avoids producing + // different COMDATs in different TUs. Another option would be to + // output the alias both for weak_odr and linkonce_odr, but that + // requires explicit comdat support in the IL. + if (mlir::cir::isWeakForLinker(TargetLinkage)) + llvm_unreachable("NYI"); + + // Create the alias with no name. + buildAliasForGlobal("", Entry, AliasDecl, Aliasee, Linkage); + return false; +} + +static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, + Address DeclPtr) { + assert((D->hasGlobalStorage() || + (D->hasLocalStorage() && + CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && + "VarDecl must have global or local (in the case of OpenCL) storage!"); + assert(!D->getType()->isReferenceType() && + "Should not call buildDeclInit on a reference!"); + + QualType type = D->getType(); + LValue lv = CGF.makeAddrLValue(DeclPtr, type); + + const Expr *Init = D->getInit(); + switch (CIRGenFunction::getEvaluationKind(type)) { + case TEK_Aggregate: + CGF.buildAggExpr( + Init, AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); + return; + case TEK_Scalar: + CGF.buildScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false); + return; + case TEK_Complex: + llvm_unreachable("complext evaluation NYI"); + } +} + +static void buildDeclDestory(CIRGenFunction &CGF, const VarDecl *D, + Address DeclPtr) { + // Honor __attribute__((no_destroy)) and bail instead of attempting + // to emit a reference to a possibly nonexistent destructor, which + // in turn can cause a crash. This will result in a global constructor + // that isn't balanced out by a destructor call as intended by the + // attribute. This also checks for -fno-c++-static-destructors and + // bails even if the attribute is not present. + assert(D->needsDestruction(CGF.getContext()) == QualType::DK_cxx_destructor); + + auto &CGM = CGF.CGM; + + // If __cxa_atexit is disabled via a flag, a different helper function is + // generated elsewhere which uses atexit instead, and it takes the destructor + // directly. + auto UsingExternalHelper = CGM.getCodeGenOpts().CXAAtExit; + QualType type = D->getType(); + const CXXRecordDecl *Record = type->getAsCXXRecordDecl(); + bool CanRegisterDestructor = + Record && (!CGM.getCXXABI().HasThisReturn( + GlobalDecl(Record->getDestructor(), Dtor_Complete)) || + CGM.getCXXABI().canCallMismatchedFunctionType()); + if (Record && (CanRegisterDestructor || UsingExternalHelper)) { + assert(!D->getTLSKind() && "TLS NYI"); + CXXDestructorDecl *Dtor = Record->getDestructor(); + CGM.getCXXABI().buildDestructorCall(CGF, Dtor, Dtor_Complete, + /*ForVirtualBase=*/false, + /*Delegating=*/false, DeclPtr, type); + } else { + llvm_unreachable("array destructors not yet supported!"); + } +} + +mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { + const auto &FnInfo = getTypes().arrangeCXXStructorDeclaration(GD); + auto Fn = getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr, + /*DontDefer=*/true, ForDefinition); + + setFunctionLinkage(GD, Fn); + CIRGenFunction CGF{*this, builder}; + CurCGF = &CGF; + { + mlir::OpBuilder::InsertionGuard guard(builder); + CGF.generateCode(GD, Fn, FnInfo); + } + CurCGF = nullptr; + + // TODO: setNonAliasAttributes + // TODO: SetLLVMFunctionAttributesForDefinition + return Fn; +} + +void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, + mlir::cir::GlobalOp Addr, + bool NeedsCtor, + bool NeedsDtor) { + assert(D && " Expected a global declaration!"); + CIRGenFunction CGF{*this, builder, true}; + CurCGF = &CGF; + CurCGF->CurFn = Addr; + Addr.setAstAttr(mlir::cir::ASTVarDeclAttr::get(builder.getContext(), D)); + + if (NeedsCtor) { + mlir::OpBuilder::InsertionGuard guard(builder); + auto block = builder.createBlock(&Addr.getCtorRegion()); + builder.setInsertionPointToStart(block); + Address DeclAddr(getAddrOfGlobalVar(D), getASTContext().getDeclAlign(D)); + buildDeclInit(CGF, D, DeclAddr); + builder.setInsertionPointToEnd(block); + builder.create(Addr->getLoc()); + } + + if (NeedsDtor) { + mlir::OpBuilder::InsertionGuard guard(builder); + auto block = builder.createBlock(&Addr.getDtorRegion()); + builder.setInsertionPointToStart(block); + Address DeclAddr(getAddrOfGlobalVar(D), getASTContext().getDeclAlign(D)); + buildDeclDestory(CGF, D, DeclAddr); + builder.setInsertionPointToEnd(block); + builder.create(Addr->getLoc()); + } + + CurCGF = nullptr; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp new file mode 100644 index 000000000000..b17206772c3f --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -0,0 +1,78 @@ +//===----- CirGenCXXABI.cpp - Interface to C++ ABIs -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for C++ code generation. Concrete subclasses +// of this implement code generation for specific C++ ABIs. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" + +#include "clang/AST/Decl.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/Mangle.h" +#include "clang/AST/RecordLayout.h" + +using namespace cir; +using namespace clang; + +CIRGenCXXABI::~CIRGenCXXABI() {} + +CIRGenCXXABI::AddedStructorArgCounts CIRGenCXXABI::addImplicitConstructorArgs( + CIRGenFunction &CGF, const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating, + CallArgList &Args) { + auto AddedArgs = + getImplicitConstructorArgs(CGF, D, Type, ForVirtualBase, Delegating); + for (size_t i = 0; i < AddedArgs.Prefix.size(); ++i) + Args.insert(Args.begin() + 1 + i, + CallArg(RValue::get(AddedArgs.Prefix[i].Value), + AddedArgs.Prefix[i].Type)); + for (const auto &arg : AddedArgs.Suffix) + Args.add(RValue::get(arg.Value), arg.Type); + return AddedStructorArgCounts(AddedArgs.Prefix.size(), + AddedArgs.Suffix.size()); +} + +CatchTypeInfo CIRGenCXXABI::getCatchAllTypeInfo() { + return CatchTypeInfo{nullptr, 0}; +} + +bool CIRGenCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } + +void CIRGenCXXABI::buildThisParam(CIRGenFunction &CGF, + FunctionArgList ¶ms) { + const auto *MD = cast(CGF.CurGD.getDecl()); + + // FIXME: I'm not entirely sure I like using a fake decl just for code + // generation. Maybe we can come up with a better way? + auto *ThisDecl = + ImplicitParamDecl::Create(CGM.getASTContext(), nullptr, MD->getLocation(), + &CGM.getASTContext().Idents.get("this"), + MD->getThisType(), ImplicitParamKind::CXXThis); + params.push_back(ThisDecl); + CGF.CXXABIThisDecl = ThisDecl; + + // Compute the presumed alignment of 'this', which basically comes down to + // whether we know it's a complete object or not. + auto &Layout = CGF.getContext().getASTRecordLayout(MD->getParent()); + if (MD->getParent()->getNumVBases() == 0 || + MD->getParent()->isEffectivelyFinal() || + isThisCompleteObject(CGF.CurGD)) { + CGF.CXXABIThisAlignment = Layout.getAlignment(); + } else { + llvm_unreachable("NYI"); + } +} + +mlir::cir::GlobalLinkageKind CIRGenCXXABI::getCXXDestructorLinkage( + GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const { + // Delegate back to CGM by default. + return CGM.getCIRLinkageForDeclarator(Dtor, Linkage, + /*IsConstantVariable=*/false); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h new file mode 100644 index 000000000000..0e1ffd53c79d --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -0,0 +1,322 @@ +//===----- CIRGenCXXABI.h - Interface to C++ ABIs ---------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for C++ code generation. Concrete subclasses +// of this implement code generation for specific C++ ABIs. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H +#define LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H + +#include "CIRGenCall.h" +#include "CIRGenCleanup.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +#include "mlir/IR/Attributes.h" +#include "clang/AST/Mangle.h" + +namespace cir { + +class CIRGenFunction; +class CIRGenFunctionInfo; + +/// Implements C++ ABI-specific code generation functions. +class CIRGenCXXABI { +protected: + cir::CIRGenModule &CGM; + std::unique_ptr MangleCtx; + + CIRGenCXXABI(CIRGenModule &CGM) + : CGM{CGM}, MangleCtx(CGM.getASTContext().createMangleContext()) {} + + clang::ASTContext &getContext() const { return CGM.getASTContext(); } + +public: + /// Similar to AddedStructorArgs, but only notes the number of additional + /// arguments. + struct AddedStructorArgCounts { + unsigned Prefix = 0; + unsigned Suffix = 0; + AddedStructorArgCounts() = default; + AddedStructorArgCounts(unsigned P, unsigned S) : Prefix(P), Suffix(S) {} + static AddedStructorArgCounts prefix(unsigned N) { return {N, 0}; } + static AddedStructorArgCounts suffix(unsigned N) { return {0, N}; } + }; + + /// Additional implicit arguments to add to the beginning (Prefix) and end + /// (Suffix) of a constructor / destructor arg list. + /// + /// Note that Prefix should actually be inserted *after* the first existing + /// arg; `this` arguments always come first. + struct AddedStructorArgs { + struct Arg { + mlir::Value Value; + clang::QualType Type; + }; + llvm::SmallVector Prefix; + llvm::SmallVector Suffix; + AddedStructorArgs() = default; + AddedStructorArgs(llvm::SmallVector P, llvm::SmallVector S) + : Prefix(std::move(P)), Suffix(std::move(S)) {} + static AddedStructorArgs prefix(llvm::SmallVector Args) { + return {std::move(Args), {}}; + } + static AddedStructorArgs suffix(llvm::SmallVector Args) { + return {{}, std::move(Args)}; + } + }; + + /// Build the signature of the given constructor or destructor vairant by + /// adding any required parameters. For convenience, ArgTys has been + /// initialized with the type of 'this'. + virtual AddedStructorArgCounts + buildStructorSignature(clang::GlobalDecl GD, + llvm::SmallVectorImpl &ArgTys) = 0; + + AddedStructorArgCounts + addImplicitConstructorArgs(CIRGenFunction &CGF, + const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, CallArgList &Args); + + clang::ImplicitParamDecl *getThisDecl(CIRGenFunction &CGF) { + return CGF.CXXABIThisDecl; + } + + virtual AddedStructorArgs getImplicitConstructorArgs( + CIRGenFunction &CGF, const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating) = 0; + + /// Emit the ABI-specific prolog for the function + virtual void buildInstanceFunctionProlog(CIRGenFunction &CGF) = 0; + + /// Get the type of the implicit "this" parameter used by a method. May return + /// zero if no specific type is applicable, e.g. if the ABI expects the "this" + /// parameter to point to some artificial offset in a complete object due to + /// vbases being reordered. + virtual const clang::CXXRecordDecl * + getThisArgumentTypeForMethod(const clang::CXXMethodDecl *MD) { + return MD->getParent(); + } + + /// Return whether the given global decl needs a VTT parameter. + virtual bool NeedsVTTParameter(clang::GlobalDecl GD); + + /// If the C++ ABI requires the given type be returned in a particular way, + /// this method sets RetAI and returns true. + virtual bool classifyReturnType(CIRGenFunctionInfo &FI) const = 0; + + /// Gets the mangle context. + clang::MangleContext &getMangleContext() { return *MangleCtx; } + + clang::ImplicitParamDecl *&getStructorImplicitParamDecl(CIRGenFunction &CGF) { + return CGF.CXXStructorImplicitParamDecl; + } + + /// Perform ABI-specific "this" argument adjustment required prior to + /// a call of a virtual function. + /// The "VirtualCall" argument is true iff the call itself is virtual. + virtual Address adjustThisArgumentForVirtualFunctionCall(CIRGenFunction &CGF, + GlobalDecl GD, + Address This, + bool VirtualCall) { + return This; + } + + /// Build a parameter variable suitable for 'this'. + void buildThisParam(CIRGenFunction &CGF, FunctionArgList &Params); + + /// Loads the incoming C++ this pointer as it was passed by the caller. + mlir::Value loadIncomingCXXThis(CIRGenFunction &CGF); + + virtual CatchTypeInfo getCatchAllTypeInfo(); + + /// Determine whether there's something special about the rules of the ABI + /// tell us that 'this' is a complete object within the given function. + /// Obvious common logic like being defined on a final class will have been + /// taken care of by the caller. + virtual bool isThisCompleteObject(clang::GlobalDecl GD) const = 0; + + /// Get the implicit (second) parameter that comes after the "this" pointer, + /// or nullptr if there is isn't one. + virtual mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &CGF, + const CXXDestructorDecl *DD, + CXXDtorType Type, + bool ForVirtualBase, + bool Delegating) = 0; + + /// Emit constructor variants required by this ABI. + virtual void buildCXXConstructors(const clang::CXXConstructorDecl *D) = 0; + /// Emit dtor variants required by this ABI. + virtual void buildCXXDestructors(const clang::CXXDestructorDecl *D) = 0; + + /// Emit the destructor call. + virtual void buildDestructorCall(CIRGenFunction &CGF, + const CXXDestructorDecl *DD, + CXXDtorType Type, bool ForVirtualBase, + bool Delegating, Address This, + QualType ThisTy) = 0; + + virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, + FunctionArgList &Args) const = 0; + + virtual void emitBeginCatch(CIRGenFunction &CGF, const CXXCatchStmt *C) = 0; + + /// Get the address of the vtable for the given record decl which should be + /// used for the vptr at the given offset in RD. + virtual mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) = 0; + + /// Build a virtual function pointer in the ABI-specific way. + virtual CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &CGF, + GlobalDecl GD, Address This, + mlir::Type Ty, + SourceLocation Loc) = 0; + + /// Checks if ABI requires extra virtual offset for vtable field. + virtual bool + isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, + CIRGenFunction::VPtr Vptr) = 0; + + /// Determine whether it's possible to emit a vtable for \p RD, even + /// though we do not know that the vtable has been marked as used by semantic + /// analysis. + virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const = 0; + + /// Emits the VTable definitions required for the given record type. + virtual void emitVTableDefinitions(CIRGenVTables &CGVT, + const CXXRecordDecl *RD) = 0; + + /// Emit any tables needed to implement virtual inheritance. For Itanium, + /// this emits virtual table tables. + virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD) = 0; + + virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, + QualType Ty) = 0; + virtual CatchTypeInfo + getAddrOfCXXCatchHandlerType(mlir::Location loc, QualType Ty, + QualType CatchHandlerType) = 0; + + /// Returns true if the given destructor type should be emitted as a linkonce + /// delegating thunk, regardless of whether the dtor is defined in this TU or + /// not. + virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, + CXXDtorType DT) const = 0; + + virtual mlir::cir::GlobalLinkageKind + getCXXDestructorLinkage(GVALinkage Linkage, const CXXDestructorDecl *Dtor, + CXXDtorType DT) const; + + /// Get the address point of the vtable for the given base subobject. + virtual mlir::Value + getVTableAddressPoint(BaseSubobject Base, + const CXXRecordDecl *VTableClass) = 0; + + /// Get the address point of the vtable for the given base subobject while + /// building a constructor or a destructor. + virtual mlir::Value + getVTableAddressPointInStructor(CIRGenFunction &CGF, const CXXRecordDecl *RD, + BaseSubobject Base, + const CXXRecordDecl *NearestVBase) = 0; + + /// Specify how one should pass an argument of a record type. + enum class RecordArgABI { + /// Pass it using the normal C aggregate rules for the ABI, potentially + /// introducing extra copies and passing some or all of it in registers. + Default = 0, + + /// Pass it on the stack using its defined layout. The argument must be + /// evaluated directly into the correct stack position in the arguments + /// area, and the call machinery must not move it or introduce extra copies. + DirectInMemory, + + /// Pass it as a pointer to temporary memory. + Indirect + }; + + /// Returns how an argument of the given record type should be passed. + virtual RecordArgABI + getRecordArgABI(const clang::CXXRecordDecl *RD) const = 0; + + /// Insert any ABI-specific implicit parameters into the parameter list for a + /// function. This generally involves extra data for constructors and + /// destructors. + /// + /// ABIs may also choose to override the return type, which has been + /// initialized with the type of 'this' if HasThisReturn(CGF.CurGD) is true or + /// the formal return type of the function otherwise. + virtual void addImplicitStructorParams(CIRGenFunction &CGF, + clang::QualType &ResTy, + FunctionArgList &Params) = 0; + + /// Checks if ABI requires to initialize vptrs for given dynamic class. + virtual bool + doStructorsInitializeVPtrs(const clang::CXXRecordDecl *VTableClass) = 0; + + /// Returns true if the given constructor or destructor is one of the kinds + /// that the ABI says returns 'this' (only applies when called non-virtually + /// for destructors). + /// + /// There currently is no way to indicate if a destructor returns 'this' when + /// called virtually, and CIR generation does not support this case. + virtual bool HasThisReturn(clang::GlobalDecl GD) const { return false; } + + virtual bool hasMostDerivedReturn(clang::GlobalDecl GD) const { + return false; + } + + /// Returns true if the target allows calling a function through a pointer + /// with a different signature than the actual function (or equivalently, + /// bitcasting a function or function pointer to a different function type). + /// In principle in the most general case this could depend on the target, the + /// calling convention, and the actual types of the arguments and return + /// value. Here it just means whether the signature mismatch could *ever* be + /// allowed; in other words, does the target do strict checking of signatures + /// for all calls. + virtual bool canCallMismatchedFunctionType() const { return true; } + + virtual ~CIRGenCXXABI(); + + void setCXXABIThisValue(CIRGenFunction &CGF, mlir::Value ThisPtr); + + // Determine if references to thread_local global variables can be made + // directly or require access through a thread wrapper function. + virtual bool usesThreadWrapperFunction(const VarDecl *VD) const = 0; + + /// Emit the code to initialize hidden members required to handle virtual + /// inheritance, if needed by the ABI. + virtual void + initializeHiddenVirtualInheritanceMembers(CIRGenFunction &CGF, + const CXXRecordDecl *RD) {} + + /// Emit a single constructor/destructor with the gien type from a C++ + /// constructor Decl. + virtual void buildCXXStructor(clang::GlobalDecl GD) = 0; + + virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) = 0; + virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) = 0; + + virtual void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; + + virtual mlir::cir::DynamicCastInfoAttr + buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, QualType DestRecordTy) = 0; + + virtual mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, + mlir::Location Loc, Address Value, + QualType SrcRecordTy) = 0; +}; + +/// Creates and Itanium-family ABI +CIRGenCXXABI *CreateCIRGenItaniumCXXABI(CIRGenModule &CGM); + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp new file mode 100644 index 000000000000..00aa6511169e --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -0,0 +1,1493 @@ +//===--- CIRGenCall.cpp - Encapsulate calling convention details ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenBuilder.h" +#include "CIRGenCXXABI.h" +#include "CIRGenFunction.h" +#include "CIRGenFunctionInfo.h" +#include "CIRGenTypes.h" +#include "TargetInfo.h" + +#include "clang/AST/Attr.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" +#include + +#include "UnimplementedFeatureGuarding.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/IR/Types.h" + +using namespace cir; +using namespace clang; + +CIRGenFunctionInfo *CIRGenFunctionInfo::create( + unsigned cirCC, bool instanceMethod, bool chainCall, + const FunctionType::ExtInfo &info, + llvm::ArrayRef paramInfos, CanQualType resultType, + llvm::ArrayRef argTypes, RequiredArgs required) { + assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); + assert(!required.allowsOptionalArgs() || + required.getNumRequiredArgs() <= argTypes.size()); + + void *buffer = operator new(totalSizeToAlloc( + argTypes.size() + 1, paramInfos.size())); + + CIRGenFunctionInfo *FI = new (buffer) CIRGenFunctionInfo(); + FI->CallingConvention = cirCC; + FI->EffectiveCallingConvention = cirCC; + FI->ASTCallingConvention = info.getCC(); + FI->InstanceMethod = instanceMethod; + FI->ChainCall = chainCall; + FI->CmseNSCall = info.getCmseNSCall(); + FI->NoReturn = info.getNoReturn(); + FI->ReturnsRetained = info.getProducesResult(); + FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); + FI->NoCfCheck = info.getNoCfCheck(); + FI->Required = required; + FI->HasRegParm = info.getHasRegParm(); + FI->RegParm = info.getRegParm(); + FI->ArgStruct = nullptr; + FI->ArgStructAlign = 0; + FI->NumArgs = argTypes.size(); + FI->HasExtParameterInfos = !paramInfos.empty(); + FI->getArgsBuffer()[0].type = resultType; + for (unsigned i = 0; i < argTypes.size(); ++i) + FI->getArgsBuffer()[i + 1].type = argTypes[i]; + for (unsigned i = 0; i < paramInfos.size(); ++i) + FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; + + return FI; +} + +namespace { + +/// Encapsulates information about the way function arguments from +/// CIRGenFunctionInfo should be passed to actual CIR function. +class ClangToCIRArgMapping { + static const unsigned InvalidIndex = ~0U; + unsigned InallocaArgNo; + unsigned SRetArgNo; + unsigned TotalCIRArgs; + + /// Arguments of CIR function corresponding to single Clang argument. + struct CIRArgs { + unsigned PaddingArgIndex = 0; + // Argument is expanded to CIR arguments at positions + // [FirstArgIndex, FirstArgIndex + NumberOfArgs). + unsigned FirstArgIndex = 0; + unsigned NumberOfArgs = 0; + + CIRArgs() + : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), + NumberOfArgs(0) {} + }; + + SmallVector ArgInfo; + +public: + ClangToCIRArgMapping(const ASTContext &Context, const CIRGenFunctionInfo &FI, + bool OnlyRequiredArgs = false) + : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalCIRArgs(0), + ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { + construct(Context, FI, OnlyRequiredArgs); + } + + bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } + + bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } + + unsigned totalCIRArgs() const { return TotalCIRArgs; } + + bool hasPaddingArg(unsigned ArgNo) const { + assert(ArgNo < ArgInfo.size()); + return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; + } + + /// Returns index of first CIR argument corresponding to ArgNo, and their + /// quantity. + std::pair getCIRArgs(unsigned ArgNo) const { + assert(ArgNo < ArgInfo.size()); + return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, + ArgInfo[ArgNo].NumberOfArgs); + } + +private: + void construct(const ASTContext &Context, const CIRGenFunctionInfo &FI, + bool OnlyRequiredArgs); +}; + +void ClangToCIRArgMapping::construct(const ASTContext &Context, + const CIRGenFunctionInfo &FI, + bool OnlyRequiredArgs) { + unsigned CIRArgNo = 0; + bool SwapThisWithSRet = false; + const ABIArgInfo &RetAI = FI.getReturnInfo(); + + assert(RetAI.getKind() != ABIArgInfo::Indirect && "NYI"); + + unsigned ArgNo = 0; + unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); + for (CIRGenFunctionInfo::const_arg_iterator I = FI.arg_begin(); + ArgNo < NumArgs; ++I, ++ArgNo) { + assert(I != FI.arg_end()); + const ABIArgInfo &AI = I->info; + // Collect data about CIR arguments corresponding to Clang argument ArgNo. + auto &CIRArgs = ArgInfo[ArgNo]; + + assert(!AI.getPaddingType() && "NYI"); + + switch (AI.getKind()) { + default: + llvm_unreachable("NYI"); + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + // Postpone splitting structs into elements since this makes it way + // more complicated for analysis to obtain information on the original + // arguments. + // + // TODO(cir): a LLVM lowering prepare pass should break this down into + // the appropriated pieces. + assert(!UnimplementedFeature::constructABIArgDirectExtend()); + CIRArgs.NumberOfArgs = 1; + break; + } + } + + if (CIRArgs.NumberOfArgs > 0) { + CIRArgs.FirstArgIndex = CIRArgNo; + CIRArgNo += CIRArgs.NumberOfArgs; + } + + assert(!SwapThisWithSRet && "NYI"); + } + assert(ArgNo == ArgInfo.size()); + + assert(!FI.usesInAlloca() && "NYI"); + + TotalCIRArgs = CIRArgNo; +} + +} // namespace + +static bool hasInAllocaArgs(CIRGenModule &CGM, CallingConv ExplicitCC, + ArrayRef ArgTypes) { + assert(ExplicitCC != CC_Swift && ExplicitCC != CC_SwiftAsync && "Swift NYI"); + assert(!CGM.getTarget().getCXXABI().isMicrosoft() && "MSABI NYI"); + + return false; +} + +mlir::cir::FuncType CIRGenTypes::GetFunctionType(GlobalDecl GD) { + const CIRGenFunctionInfo &FI = arrangeGlobalDeclaration(GD); + return GetFunctionType(FI); +} + +mlir::cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { + bool Inserted = FunctionsBeingProcessed.insert(&FI).second; + (void)Inserted; + assert(Inserted && "Recursively being processed?"); + + mlir::Type resultType = nullptr; + const ABIArgInfo &retAI = FI.getReturnInfo(); + switch (retAI.getKind()) { + case ABIArgInfo::Ignore: + // TODO(CIR): This should probably be the None type from the builtin + // dialect. + resultType = nullptr; + break; + + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: + resultType = retAI.getCoerceToType(); + break; + + default: + assert(false && "NYI"); + } + + ClangToCIRArgMapping CIRFunctionArgs(getContext(), FI, true); + SmallVector ArgTypes(CIRFunctionArgs.totalCIRArgs()); + + assert(!CIRFunctionArgs.hasSRetArg() && "NYI"); + assert(!CIRFunctionArgs.hasInallocaArg() && "NYI"); + + // Add in all of the required arguments. + unsigned ArgNo = 0; + CIRGenFunctionInfo::const_arg_iterator it = FI.arg_begin(), + ie = it + FI.getNumRequiredArgs(); + + for (; it != ie; ++it, ++ArgNo) { + const auto &ArgInfo = it->info; + + assert(!CIRFunctionArgs.hasPaddingArg(ArgNo) && "NYI"); + + unsigned FirstCIRArg, NumCIRArgs; + std::tie(FirstCIRArg, NumCIRArgs) = CIRFunctionArgs.getCIRArgs(ArgNo); + + switch (ArgInfo.getKind()) { + default: + llvm_unreachable("NYI"); + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + mlir::Type argType = ArgInfo.getCoerceToType(); + // TODO: handle the test against llvm::StructType from codegen + assert(NumCIRArgs == 1); + ArgTypes[FirstCIRArg] = argType; + break; + } + } + } + + bool Erased = FunctionsBeingProcessed.erase(&FI); + (void)Erased; + assert(Erased && "Not in set?"); + + return mlir::cir::FuncType::get( + ArgTypes, (resultType ? resultType : Builder.getVoidTy()), + FI.isVariadic()); +} + +mlir::cir::FuncType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { + const CXXMethodDecl *MD = cast(GD.getDecl()); + const FunctionProtoType *FPT = MD->getType()->getAs(); + + if (!isFuncTypeConvertible(FPT)) { + llvm_unreachable("NYI"); + // return llvm::StructType::get(getLLVMContext()); + } + + return GetFunctionType(GD); +} + +CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &CGF) const { + if (isVirtual()) { + const CallExpr *CE = getVirtualCallExpr(); + return CGF.CGM.getCXXABI().getVirtualFunctionPointer( + CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), + CE ? CE->getBeginLoc() : SourceLocation()); + } + return *this; +} + +void CIRGenFunction::buildAggregateStore(mlir::Value Val, Address Dest, + bool DestIsVolatile) { + // In LLVM codegen: + // Function to store a first-class aggregate into memory. We prefer to + // store the elements rather than the aggregate to be more friendly to + // fast-isel. + // In CIR codegen: + // Emit the most simple cir.store possible (e.g. a store for a whole + // struct), which can later be broken down in other CIR levels (or prior + // to dialect codegen). + (void)DestIsVolatile; + builder.createStore(*currSrcLoc, Val, Dest); +} + +static Address emitAddressAtOffset(CIRGenFunction &CGF, Address addr, + const ABIArgInfo &info) { + if (unsigned offset = info.getDirectOffset()) { + llvm_unreachable("NYI"); + } + return addr; +} + +static void AddAttributesFromFunctionProtoType(CIRGenBuilderTy &builder, + ASTContext &Ctx, + mlir::NamedAttrList &FuncAttrs, + const FunctionProtoType *FPT) { + if (!FPT) + return; + + if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && + FPT->isNothrow()) { + auto nu = mlir::cir::NoThrowAttr::get(builder.getContext()); + FuncAttrs.set(nu.getMnemonic(), nu); + } +} + +/// Construct the CIR attribute list of a function or call. +/// +/// When adding an attribute, please consider where it should be handled: +/// +/// - getDefaultFunctionAttributes is for attributes that are essentially +/// part of the global target configuration (but perhaps can be +/// overridden on a per-function basis). Adding attributes there +/// will cause them to also be set in frontends that build on Clang's +/// target-configuration logic, as well as for code defined in library +/// modules such as CUDA's libdevice. +/// +/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes +/// and adds declaration-specific, convention-specific, and +/// frontend-specific logic. The last is of particular importance: +/// attributes that restrict how the frontend generates code must be +/// added here rather than getDefaultFunctionAttributes. +/// +void CIRGenModule::ConstructAttributeList(StringRef Name, + const CIRGenFunctionInfo &FI, + CIRGenCalleeInfo CalleeInfo, + mlir::DictionaryAttr &Attrs, + bool AttrOnCallSite, bool IsThunk) { + // Implementation Disclaimer + // + // UnimplementedFeature and asserts are used throughout the code to track + // unsupported and things not yet implemented. However, most of the content of + // this function is on detecting attributes, which doesn't not cope with + // existing approaches to track work because its too big. + // + // That said, for the most part, the approach here is very specific compared + // to the rest of CIRGen and attributes and other handling should be done upon + // demand. + mlir::NamedAttrList FuncAttrs; + + // Collect function CIR attributes from the CC lowering. + // TODO: NoReturn, cmse_nonsecure_call + + // Collect function CIR attributes from the callee prototype if we have one. + AddAttributesFromFunctionProtoType(getBuilder(), astCtx, FuncAttrs, + CalleeInfo.getCalleeFunctionProtoType()); + + const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); + + // TODO(cir): Attach assumption attributes to the declaration. If this is a + // call site, attach assumptions from the caller to the call as well. + + bool HasOptnone = false; + (void)HasOptnone; + // The NoBuiltinAttr attached to the target FunctionDecl. + mlir::Attribute *NBA; + + if (TargetDecl) { + + if (TargetDecl->hasAttr()) { + auto nu = mlir::cir::NoThrowAttr::get(builder.getContext()); + FuncAttrs.set(nu.getMnemonic(), nu); + } + + if (const FunctionDecl *Fn = dyn_cast(TargetDecl)) { + AddAttributesFromFunctionProtoType( + getBuilder(), astCtx, FuncAttrs, + Fn->getType()->getAs()); + if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { + // A sane operator new returns a non-aliasing pointer. + auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); + if (getCodeGenOpts().AssumeSaneOperatorNew && + (Kind == OO_New || Kind == OO_Array_New)) + ; // llvm::Attribute::NoAlias + } + const CXXMethodDecl *MD = dyn_cast(Fn); + const bool IsVirtualCall = MD && MD->isVirtual(); + // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a + // virtual function. These attributes are not inherited by overloads. + if (!(AttrOnCallSite && IsVirtualCall)) { + if (Fn->isNoReturn()) + ; // NoReturn + // NBA = Fn->getAttr(); + (void)NBA; + } + } + + if (isa(TargetDecl) || isa(TargetDecl)) { + // Only place nomerge attribute on call sites, never functions. This + // allows it to work on indirect virtual function calls. + if (AttrOnCallSite && TargetDecl->hasAttr()) + ; + } + + // 'const', 'pure' and 'noalias' attributed functions are also nounwind. + if (TargetDecl->hasAttr()) { + // gcc specifies that 'const' functions have greater restrictions than + // 'pure' functions, so they also cannot have infinite loops. + } else if (TargetDecl->hasAttr()) { + // gcc specifies that 'pure' functions cannot have infinite loops. + } else if (TargetDecl->hasAttr()) { + } + + HasOptnone = TargetDecl->hasAttr(); + if (auto *AllocSize = TargetDecl->getAttr()) { + std::optional NumElemsParam; + if (AllocSize->getNumElemsParam().isValid()) + NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); + // TODO(cir): add alloc size attr. + } + + if (TargetDecl->hasAttr()) { + assert(!UnimplementedFeature::openCL()); + } + + if (TargetDecl->hasAttr() && + getLangOpts().OffloadUniformBlock) + assert(!UnimplementedFeature::CUDA()); + + if (TargetDecl->hasAttr()) + ; + } + + Attrs = mlir::DictionaryAttr::get(builder.getContext(), FuncAttrs); +} + +static mlir::cir::CIRCallOpInterface +buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, + mlir::cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, + mlir::cir::FuncOp directFuncOp, + SmallVectorImpl &CIRCallArgs, bool InvokeDest) { + auto &builder = CGF.getBuilder(); + + if (InvokeDest) { + auto addr = CGF.currLexScope->getExceptionInfo().addr; + if (indirectFuncTy) + return builder.create( + callLoc, addr, indirectFuncVal, indirectFuncTy, CIRCallArgs); + return builder.create(callLoc, directFuncOp, addr, + CIRCallArgs); + } + + if (indirectFuncTy) + return builder.create(callLoc, indirectFuncVal, + indirectFuncTy, CIRCallArgs); + return builder.create(callLoc, directFuncOp, CIRCallArgs); +} + +RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, + const CallArgList &CallArgs, + mlir::cir::CIRCallOpInterface *callOrTryCall, + bool IsMustTail, mlir::Location loc, + std::optional E) { + auto builder = CGM.getBuilder(); + // FIXME: We no longer need the types from CallArgs; lift up and simplify + + assert(Callee.isOrdinary() || Callee.isVirtual()); + + // Handle struct-return functions by passing a pointer to the location that we + // would like to return info. + QualType RetTy = CallInfo.getReturnType(); + const auto &RetAI = CallInfo.getReturnInfo(); + + mlir::cir::FuncType CIRFuncTy = getTypes().GetFunctionType(CallInfo); + + const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); + // This is not always tied to a FunctionDecl (e.g. builtins that are xformed + // into calls to other functions) + if (const FunctionDecl *FD = dyn_cast_or_null(TargetDecl)) { + // We can only guarantee that a function is called from the correct + // context/function based on the appropriate target attributes, + // so only check in the case where we have both always_inline and target + // since otherwise we could be making a conditional call after a check for + // the proper cpu features (and it won't cause code generation issues due to + // function based code generation). + if (TargetDecl->hasAttr() && + (TargetDecl->hasAttr() || + (CurFuncDecl && CurFuncDecl->hasAttr()))) { + // FIXME(cir): somehow refactor this function to use SourceLocation? + SourceLocation Loc; + checkTargetFeatures(Loc, FD); + } + + // Some architectures (such as x86-64) have the ABI changed based on + // attribute-target/features. Give them a chance to diagnose. + assert(!UnimplementedFeature::checkFunctionCallABI()); + } + + // TODO: add DNEBUG code + + // 1. Set up the arguments + + // If we're using inalloca, insert the allocation after the stack save. + // FIXME: Do this earlier rather than hacking it in here! + Address ArgMemory = Address::invalid(); + assert(!CallInfo.getArgStruct() && "NYI"); + + ClangToCIRArgMapping CIRFunctionArgs(CGM.getASTContext(), CallInfo); + SmallVector CIRCallArgs(CIRFunctionArgs.totalCIRArgs()); + + // If the call returns a temporary with struct return, create a temporary + // alloca to hold the result, unless one is given to us. + assert(!RetAI.isIndirect() && !RetAI.isInAlloca() && + !RetAI.isCoerceAndExpand() && "NYI"); + + // When passing arguments using temporary allocas, we need to add the + // appropriate lifetime markers. This vector keeps track of all the lifetime + // markers that need to be ended right after the call. + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + + // Translate all of the arguments as necessary to match the CIR lowering. + assert(CallInfo.arg_size() == CallArgs.size() && + "Mismatch between function signature & arguments."); + unsigned ArgNo = 0; + CIRGenFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); + for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); + I != E; ++I, ++info_it, ++ArgNo) { + const ABIArgInfo &ArgInfo = info_it->info; + + // Insert a padding argument to ensure proper alignment. + assert(!CIRFunctionArgs.hasPaddingArg(ArgNo) && "Padding args NYI"); + + unsigned FirstCIRArg, NumCIRArgs; + std::tie(FirstCIRArg, NumCIRArgs) = CIRFunctionArgs.getCIRArgs(ArgNo); + + switch (ArgInfo.getKind()) { + case ABIArgInfo::Direct: { + if (!ArgInfo.getCoerceToType().isa() && + ArgInfo.getCoerceToType() == convertType(info_it->type) && + ArgInfo.getDirectOffset() == 0) { + assert(NumCIRArgs == 1); + mlir::Value V; + assert(!I->isAggregate() && "Aggregate NYI"); + V = I->getKnownRValue().getScalarVal(); + + assert(CallInfo.getExtParameterInfo(ArgNo).getABI() != + ParameterABI::SwiftErrorResult && + "swift NYI"); + + // We might have to widen integers, but we should never truncate. + if (ArgInfo.getCoerceToType() != V.getType() && + V.getType().isa()) + llvm_unreachable("NYI"); + + // If the argument doesn't match, perform a bitcast to coerce it. This + // can happen due to trivial type mismatches. + if (FirstCIRArg < CIRFuncTy.getNumInputs() && + V.getType() != CIRFuncTy.getInput(FirstCIRArg)) + V = builder.createBitcast(V, CIRFuncTy.getInput(FirstCIRArg)); + + CIRCallArgs[FirstCIRArg] = V; + break; + } + + // FIXME: Avoid the conversion through memory if possible. + Address Src = Address::invalid(); + if (!I->isAggregate()) { + llvm_unreachable("NYI"); + } else { + Src = I->hasLValue() ? I->getKnownLValue().getAddress() + : I->getKnownRValue().getAggregateAddress(); + } + + // If the value is offset in memory, apply the offset now. + Src = emitAddressAtOffset(*this, Src, ArgInfo); + + // Fast-isel and the optimizer generally like scalar values better than + // FCAs, so we flatten them if this is safe to do for this argument. + auto STy = dyn_cast(ArgInfo.getCoerceToType()); + if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { + auto SrcTy = Src.getElementType(); + // FIXME(cir): get proper location for each argument. + auto argLoc = loc; + + // If the source type is smaller than the destination type of the + // coerce-to logic, copy the source value into a temp alloca the size + // of the destination type to allow loading all of it. The bits past + // the source value are left undef. + // FIXME(cir): add data layout info and compare sizes instead of + // matching the types. + // + // uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); + // uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); + // if (SrcSize < DstSize) { + if (SrcTy != STy) + llvm_unreachable("NYI"); + else { + // FIXME(cir): this currently only runs when the types are different, + // but should be when alloc sizes are different, fix this as soon as + // datalayout gets introduced. + Src = builder.createElementBitCast(argLoc, Src, STy); + } + + // assert(NumCIRArgs == STy.getMembers().size()); + // In LLVMGen: Still only pass the struct without any gaps but mark it + // as such somehow. + // + // In CIRGen: Emit a load from the "whole" struct, + // which shall be broken later by some lowering step into multiple + // loads. + assert(NumCIRArgs == 1 && "dont break up arguments here!"); + CIRCallArgs[FirstCIRArg] = builder.createLoad(argLoc, Src); + } else { + llvm_unreachable("NYI"); + } + + break; + } + default: + assert(false && "Only Direct support so far"); + } + } + + const CIRGenCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); + auto CalleePtr = ConcreteCallee.getFunctionPointer(); + + // If we're using inalloca, set up that argument. + assert(!ArgMemory.isValid() && "inalloca NYI"); + + // 2. Prepare the function pointer. + + // TODO: simplifyVariadicCallee + + // 3. Perform the actual call. + + // TODO: Deactivate any cleanups that we're supposed to do immediately before + // the call. + // if (!CallArgs.getCleanupsToDeactivate().empty()) + // deactivateArgCleanupsBeforeCall(*this, CallArgs); + // TODO: Update the largest vector width if any arguments have vector types. + + // Compute the calling convention and attributes. + mlir::DictionaryAttr Attrs; + StringRef FnName; + if (auto calleeFnOp = dyn_cast(CalleePtr)) + FnName = calleeFnOp.getName(); + CGM.ConstructAttributeList(FnName, CallInfo, Callee.getAbstractInfo(), Attrs, + /*AttrOnCallSite=*/true, + /*IsThunk=*/false); + + // TODO: strictfp + // TODO: Add call-site nomerge, noinline, always_inline attribute if exists. + + // Apply some call-site-specific attributes. + // TODO: work this into building the attribute set. + + // Apply always_inline to all calls within flatten functions. + // FIXME: should this really take priority over __try, below? + // assert(!CurCodeDecl->hasAttr() && + // !TargetDecl->hasAttr() && "NYI"); + + // Disable inlining inside SEH __try blocks. + if (isSEHTryScope()) + llvm_unreachable("NYI"); + + // Decide whether to use a call or an invoke. + bool CannotThrow; + if (currentFunctionUsesSEHTry()) { + // SEH cares about asynchronous exceptions, so everything can "throw." + CannotThrow = false; + } else if (isCleanupPadScope() && + EHPersonality::get(*this).isMSVCXXPersonality()) { + // The MSVC++ personality will implicitly terminate the program if an + // exception is thrown during a cleanup outside of a try/catch. + // We don't need to model anything in IR to get this behavior. + CannotThrow = true; + } else { + // Otherwise, nounwind call sites will never throw. + auto noThrowAttr = mlir::cir::NoThrowAttr::get(builder.getContext()); + CannotThrow = Attrs.contains(noThrowAttr.getMnemonic()); + + if (auto fptr = dyn_cast(CalleePtr)) + if (fptr.getExtraAttrs().getElements().contains( + noThrowAttr.getMnemonic())) + CannotThrow = true; + } + auto InvokeDest = CannotThrow ? false : getInvokeDest(); + + // TODO: UnusedReturnSizePtr + if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) + assert(!FD->hasAttr() && "NYI"); + + // TODO: alignment attributes + + // Emit the actual call op. + auto callLoc = loc; + assert(builder.getInsertionBlock() && "expected valid basic block"); + + mlir::cir::CIRCallOpInterface theCall = [&]() { + mlir::cir::FuncType indirectFuncTy; + mlir::Value indirectFuncVal; + mlir::cir::FuncOp directFuncOp; + + if (auto fnOp = dyn_cast(CalleePtr)) { + directFuncOp = fnOp; + } else if (auto getGlobalOp = dyn_cast(CalleePtr)) { + // FIXME(cir): This peephole optimization to avoids indirect calls for + // builtins. This should be fixed in the builting declaration instead by + // not emitting an unecessary get_global in the first place. + auto *globalOp = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), + getGlobalOp.getName()); + assert(getGlobalOp && "undefined global function"); + directFuncOp = llvm::dyn_cast(globalOp); + assert(directFuncOp && "operation is not a function"); + } else { + [[maybe_unused]] auto resultTypes = CalleePtr->getResultTypes(); + [[maybe_unused]] auto FuncPtrTy = + resultTypes.front().dyn_cast(); + assert((resultTypes.size() == 1) && FuncPtrTy && + FuncPtrTy.getPointee().isa() && + "expected pointer to function"); + + indirectFuncTy = CIRFuncTy; + indirectFuncVal = CalleePtr->getResult(0); + } + + mlir::cir::CIRCallOpInterface callLikeOp = + buildCallLikeOp(*this, callLoc, indirectFuncTy, indirectFuncVal, + directFuncOp, CIRCallArgs, InvokeDest); + + if (E) + callLikeOp->setAttr( + "ast", mlir::cir::ASTCallExprAttr::get(builder.getContext(), *E)); + + if (callOrTryCall) + *callOrTryCall = callLikeOp; + return callLikeOp; + }(); + + if (const auto *FD = dyn_cast_or_null(CurFuncDecl)) + assert(!FD->getAttr() && "NYI"); + + // TODO: set attributes on callop + // assert(!theCall.getResults().getType().front().isSignlessInteger() && + // "Vector NYI"); + // TODO: LLVM models indirect calls via a null callee, how should we do this? + assert(!CGM.getLangOpts().ObjCAutoRefCount && "Not supported"); + assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); + assert(!getDebugInfo() && "No debug info yet"); + assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); + + // 4. Finish the call. + + // If the call doesn't return, finish the basic block and clear the insertion + // point; this allows the rest of CIRGen to discard unreachable code. + // TODO: figure out how to support doesNotReturn + + assert(!IsMustTail && "NYI"); + + // TODO: figure out writebacks? seems like ObjC only __autorelease + + // TODO: cleanup argument memory at the end + + // Extract the return value. + RValue ret = [&] { + switch (RetAI.getKind()) { + case ABIArgInfo::Direct: { + mlir::Type RetCIRTy = convertType(RetTy); + if (RetAI.getCoerceToType() == RetCIRTy && RetAI.getDirectOffset() == 0) { + switch (getEvaluationKind(RetTy)) { + case TEK_Aggregate: { + Address DestPtr = ReturnValue.getValue(); + bool DestIsVolatile = ReturnValue.isVolatile(); + + if (!DestPtr.isValid()) { + DestPtr = CreateMemTemp(RetTy, callLoc, getCounterAggTmpAsString()); + DestIsVolatile = false; + } + + auto Results = theCall->getOpResults(); + assert(Results.size() <= 1 && "multiple returns NYI"); + + SourceLocRAIIObject Loc{*this, callLoc}; + buildAggregateStore(Results[0], DestPtr, DestIsVolatile); + return RValue::getAggregate(DestPtr); + } + case TEK_Scalar: { + // If the argument doesn't match, perform a bitcast to coerce it. This + // can happen due to trivial type mismatches. + auto Results = theCall->getOpResults(); + assert(Results.size() <= 1 && "multiple returns NYI"); + assert(Results[0].getType() == RetCIRTy && "Bitcast support NYI"); + return RValue::get(Results[0]); + } + default: + llvm_unreachable("NYI"); + } + } else { + llvm_unreachable("No other forms implemented yet."); + } + } + + case ABIArgInfo::Ignore: + // If we are ignoring an argument that had a result, make sure to + // construct the appropriate return value for our caller. + return GetUndefRValue(RetTy); + + default: + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + return RValue{}; + }(); + + // TODO: implement assumed_aligned + + // TODO: implement lifetime extensions + + assert(RetTy.isDestructedType() != QualType::DK_nontrivial_c_struct && "NYI"); + + return ret; +} + +RValue CIRGenFunction::GetUndefRValue(QualType Ty) { + assert(Ty->isVoidType() && "Only VoidType supported so far."); + return RValue::get(nullptr); +} + +mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, + mlir::cir::FuncOp callee, + ArrayRef args) { + // TODO(cir): set the calling convention to this runtime call. + assert(!UnimplementedFeature::setCallingConv()); + + auto call = builder.create(loc, callee, args); + assert(call->getNumResults() <= 1 && + "runtime functions have at most 1 result"); + + if (call->getNumResults() == 0) + return nullptr; + + return call->getResult(0); +} + +void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, + QualType type) { + // TODO: Add the DisableDebugLocationUpdates helper + assert(!dyn_cast(E) && "NYI"); + + assert(type->isReferenceType() == E->isGLValue() && + "reference binding to unmaterialized r-value!"); + + if (E->isGLValue()) { + assert(E->getObjectKind() == OK_Ordinary); + return args.add(buildReferenceBindingToExpr(E), type); + } + + bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); + + // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. + // However, we still have to push an EH-only cleanup in case we unwind before + // we make it to the call. + if (type->isRecordType() && + type->castAs()->getDecl()->isParamDestroyedInCallee()) { + llvm_unreachable("Microsoft C++ ABI is NYI"); + } + + if (HasAggregateEvalKind && isa(E) && + cast(E)->getCastKind() == CK_LValueToRValue) { + LValue L = buildLValue(cast(E)->getSubExpr()); + assert(L.isSimple()); + args.addUncopiedAggregate(L, type); + return; + } + + args.add(buildAnyExprToTemp(E), type); +} + +QualType CIRGenFunction::getVarArgType(const Expr *Arg) { + // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC + // implicitly widens null pointer constants that are arguments to varargs + // functions to pointer-sized ints. + if (!getTarget().getTriple().isOSWindows()) + return Arg->getType(); + + if (Arg->getType()->isIntegerType() && + getContext().getTypeSize(Arg->getType()) < + getContext().getTargetInfo().getPointerWidth(LangAS::Default) && + Arg->isNullPointerConstant(getContext(), + Expr::NPC_ValueDependentIsNotNull)) { + return getContext().getIntPtrType(); + } + + return Arg->getType(); +} + +/// Similar to buildAnyExpr(), however, the result will always be accessible +/// even if no aggregate location is provided. +RValue CIRGenFunction::buildAnyExprToTemp(const Expr *E) { + AggValueSlot AggSlot = AggValueSlot::ignored(); + + if (hasAggregateEvaluationKind(E->getType())) + AggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), + getCounterAggTmpAsString()); + + return buildAnyExpr(E, AggSlot); +} + +void CIRGenFunction::buildCallArgs( + CallArgList &Args, PrototypeWrapper Prototype, + llvm::iterator_range ArgRange, + AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { + + llvm::SmallVector ArgTypes; + + assert((ParamsToSkip == 0 || Prototype.P) && + "Can't skip parameters if type info is not provided"); + + // This variable only captures *explicitly* written conventions, not those + // applied by default via command line flags or target defaults, such as + // thiscall, appcs, stdcall via -mrtd, etc. Computing that correctly would + // require knowing if this is a C++ instance method or being able to see + // unprotyped FunctionTypes. + CallingConv ExplicitCC = CC_C; + + // First, if a prototype was provided, use those argument types. + bool IsVariadic = false; + if (Prototype.P) { + const auto *MD = Prototype.P.dyn_cast(); + assert(!MD && "ObjCMethodDecl NYI"); + + const auto *FPT = Prototype.P.get(); + IsVariadic = FPT->isVariadic(); + ExplicitCC = FPT->getExtInfo().getCC(); + ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, + FPT->param_type_end()); + } + + // If we still have any arguments, emit them using the type of the argument. + for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) + ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); + assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); + + // We must evaluate arguments from right to left in the MS C++ ABI, because + // arguments are destroyed left to right in the callee. As a special case, + // there are certain language constructs taht require left-to-right + // evaluation, and in those cases we consider the evaluation order requirement + // to trump the "destruction order is reverse construction order" guarantee. + bool LeftToRight = true; + assert(!CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() && + "MSABI NYI"); + assert(!hasInAllocaArgs(CGM, ExplicitCC, ArgTypes) && "NYI"); + + auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, + RValue EmittedArg) { + if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) + return; + auto *PS = AC.getParamDecl(I)->getAttr(); + if (PS == nullptr) + return; + + const auto &Context = getContext(); + auto SizeTy = Context.getSizeType(); + auto T = builder.getUIntNTy(Context.getTypeSize(SizeTy)); + assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); + auto V = evaluateOrEmitBuiltinObjectSize( + Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic()); + Args.add(RValue::get(V), SizeTy); + // If we're emitting args in reverse, be sure to do so with + // pass_object_size, as well. + if (!LeftToRight) + std::swap(Args.back(), *(&Args.back() - 1)); + }; + + // Evaluate each argument in the appropriate order. + size_t CallArgsStart = Args.size(); + for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { + unsigned Idx = LeftToRight ? I : E - I - 1; + CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; + unsigned InitialArgSize = Args.size(); + assert(!isa(*Arg) && "NYI"); + assert(!isa_and_nonnull(AC.getDecl()) && "NYI"); + + buildCallArg(Args, *Arg, ArgTypes[Idx]); + // In particular, we depend on it being the last arg in Args, and the + // objectsize bits depend on there only being one arg if !LeftToRight. + assert(InitialArgSize + 1 == Args.size() && + "The code below depends on only adding one arg per buildCallArg"); + (void)InitialArgSize; + // Since pointer argument are never emitted as LValue, it is safe to emit + // non-null argument check for r-value only. + if (!Args.back().hasLValue()) { + RValue RVArg = Args.back().getKnownRValue(); + assert(!SanOpts.has(SanitizerKind::NonnullAttribute) && "Sanitizers NYI"); + assert(!SanOpts.has(SanitizerKind::NullabilityArg) && "Sanitizers NYI"); + // @llvm.objectsize should never have side-effects and shouldn't need + // destruction/cleanups, so we can safely "emit" it after its arg, + // regardless of right-to-leftness + MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); + } + } + + if (!LeftToRight) { + // Un-reverse the arguments we just evaluated so they match up with the CIR + // function. + std::reverse(Args.begin() + CallArgsStart, Args.end()); + } +} + +/// Returns the canonical formal type of the given C++ method. +static CanQual GetFormalType(const CXXMethodDecl *MD) { + return MD->getType() + ->getCanonicalTypeUnqualified() + .getAs(); +} + +/// TODO(cir): this should be shared with LLVM codegen +static void addExtParameterInfosForCall( + llvm::SmallVectorImpl ¶mInfos, + const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs) { + assert(proto->hasExtParameterInfos()); + assert(paramInfos.size() <= prefixArgs); + assert(proto->getNumParams() + prefixArgs <= totalArgs); + + paramInfos.reserve(totalArgs); + + // Add default infos for any prefix args that don't already have infos. + paramInfos.resize(prefixArgs); + + // Add infos for the prototype. + for (const auto &ParamInfo : proto->getExtParameterInfos()) { + paramInfos.push_back(ParamInfo); + // pass_object_size params have no parameter info. + if (ParamInfo.hasPassObjectSize()) + paramInfos.emplace_back(); + } + + assert(paramInfos.size() <= totalArgs && + "Did we forget to insert pass_object_size args?"); + // Add default infos for the variadic and/or suffix arguments. + paramInfos.resize(totalArgs); +} + +/// Adds the formal parameters in FPT to the given prefix. If any parameter in +/// FPT has pass_object_size_attrs, then we'll add parameters for those, too. +/// TODO(cir): this should be shared with LLVM codegen +static void appendParameterTypes( + const CIRGenTypes &CGT, SmallVectorImpl &prefix, + SmallVectorImpl ¶mInfos, + CanQual FPT) { + // Fast path: don't touch param info if we don't need to. + if (!FPT->hasExtParameterInfos()) { + assert(paramInfos.empty() && + "We have paramInfos, but the prototype doesn't?"); + prefix.append(FPT->param_type_begin(), FPT->param_type_end()); + return; + } + + unsigned PrefixSize = prefix.size(); + // In the vast majority of cases, we'll have precisely FPT->getNumParams() + // parameters; the only thing that can change this is the presence of + // pass_object_size. So, we preallocate for the common case. + prefix.reserve(prefix.size() + FPT->getNumParams()); + + auto ExtInfos = FPT->getExtParameterInfos(); + assert(ExtInfos.size() == FPT->getNumParams()); + for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { + prefix.push_back(FPT->getParamType(I)); + if (ExtInfos[I].hasPassObjectSize()) + prefix.push_back(CGT.getContext().getSizeType()); + } + + addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, + prefix.size()); +} + +const CIRGenFunctionInfo & +CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { + auto *MD = cast(GD.getDecl()); + + llvm::SmallVector argTypes; + SmallVector paramInfos; + argTypes.push_back(DeriveThisType(MD->getParent(), MD)); + + bool PassParams = true; + + if (auto *CD = dyn_cast(MD)) { + // A base class inheriting constructor doesn't get forwarded arguments + // needed to construct a virtual base (or base class thereof) + assert(!CD->getInheritedConstructor() && "Inheritance NYI"); + } + + CanQual FTP = GetFormalType(MD); + + if (PassParams) + appendParameterTypes(*this, argTypes, paramInfos, FTP); + + assert(paramInfos.empty() && "NYI"); + + assert(!MD->isVariadic() && "Variadic fns NYI"); + RequiredArgs required = RequiredArgs::All; + (void)required; + + FunctionType::ExtInfo extInfo = FTP->getExtInfo(); + + assert(!TheCXXABI.HasThisReturn(GD) && "NYI"); + + CanQualType resultType = Context.VoidTy; + (void)resultType; + + return arrangeCIRFunctionInfo(resultType, FnInfoOpts::IsInstanceMethod, + argTypes, extInfo, paramInfos, required); +} + +/// Derives the 'this' type for CIRGen purposes, i.e. ignoring method CVR +/// qualification. Either or both of RD and MD may be null. A null RD indicates +/// that there is no meaningful 'this' type, and a null MD can occur when +/// calling a method pointer. +CanQualType CIRGenTypes::DeriveThisType(const CXXRecordDecl *RD, + const CXXMethodDecl *MD) { + QualType RecTy; + if (RD) + RecTy = getContext().getTagDeclType(RD)->getCanonicalTypeInternal(); + else + assert(false && "CXXMethodDecl NYI"); + + if (MD) + RecTy = getContext().getAddrSpaceQualType( + RecTy, MD->getMethodQualifiers().getAddressSpace()); + return getContext().getPointerType(CanQualType::CreateUnsafe(RecTy)); +} + +/// Arrange the CIR function layout for a value of the given function type, on +/// top of any implicit parameters already stored. +static const CIRGenFunctionInfo & +arrangeCIRFunctionInfo(CIRGenTypes &CGT, FnInfoOpts instanceMethod, + SmallVectorImpl &prefix, + CanQual FTP) { + SmallVector paramInfos; + RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); + // FIXME: Kill copy. -- from codegen + appendParameterTypes(CGT, prefix, paramInfos, FTP); + CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); + + return CGT.arrangeCIRFunctionInfo(resultType, instanceMethod, prefix, + FTP->getExtInfo(), paramInfos, Required); +} + +/// Arrange the argument and result information for a value of the given +/// freestanding function type. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { + SmallVector argTypes; + return ::arrangeCIRFunctionInfo(*this, FnInfoOpts::None, argTypes, FTP); +} + +/// Arrange the argument and result information for a value of the given +/// unprototyped freestanding function type. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeFreeFunctionType(CanQual FTNP) { + // When translating an unprototyped function type, always use a + // variadic type. + return arrangeCIRFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), + FnInfoOpts::None, std::nullopt, + FTNP->getExtInfo(), {}, RequiredArgs(0)); +} + +const CIRGenFunctionInfo & +CIRGenTypes::arrangeBuiltinFunctionCall(QualType resultType, + const CallArgList &args) { + // FIXME: Kill copy. + SmallVector argTypes; + for (const auto &Arg : args) + argTypes.push_back(getContext().getCanonicalParamType(Arg.Ty)); + llvm_unreachable("NYI"); +} + +/// Arrange a call to a C++ method, passing the given arguments. +/// +/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` +/// parameter. +/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of +/// args. +/// PassProtoArgs indicates whether `args` has args for the parameters in the +/// given CXXConstructorDecl. +const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( + const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, + unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs) { + + // FIXME: Kill copy. + llvm::SmallVector ArgTypes; + for (const auto &Arg : Args) + ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); + + // +1 for implicit this, which should always be args[0] + unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; + + CanQual FPT = GetFormalType(D); + RequiredArgs Required = PassProtoArgs + ? RequiredArgs::forPrototypePlus( + FPT, TotalPrefixArgs + ExtraSuffixArgs) + : RequiredArgs::All; + + GlobalDecl GD(D, CtorKind); + assert(!TheCXXABI.HasThisReturn(GD) && "ThisReturn NYI"); + assert(!TheCXXABI.hasMostDerivedReturn(GD) && "Most derived return NYI"); + CanQualType ResultType = Context.VoidTy; + + FunctionType::ExtInfo Info = FPT->getExtInfo(); + llvm::SmallVector ParamInfos; + // If the prototype args are elided, we should onlyy have ABI-specific args, + // which never have param info. + assert(!FPT->hasExtParameterInfos() && "NYI"); + + return arrangeCIRFunctionInfo(ResultType, FnInfoOpts::IsInstanceMethod, + ArgTypes, Info, ParamInfos, Required); +} + +bool CIRGenTypes::inheritingCtorHasParams(const InheritedConstructor &Inherited, + CXXCtorType Type) { + + // Parameters are unnecessary if we're constructing a base class subobject and + // the inherited constructor lives in a virtual base. + return Type == Ctor_Complete || + !Inherited.getShadowDecl()->constructsVirtualBase() || + !Target.getCXXABI().hasConstructorVariants(); +} + +bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, + QualType ReturnType) { + // We can't just disard the return value for a record type with a complex + // destructor or a non-trivially copyable type. + if (const RecordType *RT = + ReturnType.getCanonicalType()->getAs()) { + llvm_unreachable("NYI"); + } + + return ReturnType.isTriviallyCopyableType(Context); +} + +static bool isInAllocaArgument(CIRGenCXXABI &ABI, QualType type) { + const auto *RD = type->getAsCXXRecordDecl(); + return RD && + ABI.getRecordArgABI(RD) == CIRGenCXXABI::RecordArgABI::DirectInMemory; +} + +void CIRGenFunction::buildDelegateCallArg(CallArgList &args, + const VarDecl *param, + SourceLocation loc) { + // StartFunction converted the ABI-lowered parameter(s) into a local alloca. + // We need to turn that into an r-value suitable for buildCall + Address local = GetAddrOfLocalVar(param); + + QualType type = param->getType(); + + if (isInAllocaArgument(CGM.getCXXABI(), type)) { + llvm_unreachable("NYI"); + } + + // GetAddrOfLocalVar returns a pointer-to-pointer for references, but the + // argument needs to be the original pointer. + if (type->isReferenceType()) { + args.add( + RValue::get(builder.createLoad(getLoc(param->getSourceRange()), local)), + type); + } else if (getLangOpts().ObjCAutoRefCount) { + llvm_unreachable("NYI"); + // For the most part, we just need to load the alloca, except that aggregate + // r-values are actually pointers to temporaries. + } else { + args.add(convertTempToRValue(local, type, loc), type); + } + + // Deactivate the cleanup for the callee-destructed param that was pushed. + if (type->isRecordType() && !CurFuncIsThunk && + type->castAs()->getDecl()->isParamDestroyedInCallee() && + param->needsDestruction(getContext())) { + llvm_unreachable("NYI"); + } +} + +/// Returns the "extra-canonicalized" return type, which discards qualifiers on +/// the return type. Codegen doesn't care about them, and it makes ABI code a +/// little easier to be able to assume that all parameter and return types are +/// top-level unqualified. +/// FIXME(CIR): This should be a common helper extracted from CodeGen +static CanQualType GetReturnType(QualType RetTy) { + return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); +} + +/// Arrange a call as unto a free function, except possibly with an additional +/// number of formal parameters considered required. +static const CIRGenFunctionInfo & +arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, + const CallArgList &args, const FunctionType *fnType, + unsigned numExtraRequiredArgs, + FnInfoOpts chainCall) { + assert(args.size() >= numExtraRequiredArgs); + assert((chainCall != FnInfoOpts::IsChainCall) && "Chain call NYI"); + + llvm::SmallVector paramInfos; + + // In most cases, there are no optional arguments. + RequiredArgs required = RequiredArgs::All; + + // If we have a variadic prototype, the required arguments are the + // extra prefix plus the arguments in the prototype. + if (const FunctionProtoType *proto = dyn_cast(fnType)) { + if (proto->isVariadic()) + required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); + + if (proto->hasExtParameterInfos()) + addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, + args.size()); + } else if (llvm::isa(fnType)) { + assert(!UnimplementedFeature::targetCodeGenInfoIsProtoCallVariadic()); + required = RequiredArgs(args.size()); + } + + // FIXME: Kill copy. + SmallVector argTypes; + for (const auto &arg : args) + argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); + return CGT.arrangeCIRFunctionInfo(GetReturnType(fnType->getReturnType()), + chainCall, argTypes, fnType->getExtInfo(), + paramInfos, required); +} + +static llvm::SmallVector +getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { + llvm::SmallVector argTypes; + for (auto &arg : args) + argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); + return argTypes; +} + +static llvm::SmallVector +getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, + unsigned totalArgs) { + llvm::SmallVector result; + if (proto->hasExtParameterInfos()) { + llvm_unreachable("NYI"); + } + return result; +} + +/// Arrange a call to a C++ method, passing the given arguments. +/// +/// numPrefixArgs is the number of the ABI-specific prefix arguments we have. It +/// does not count `this`. +const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXMethodCall( + const CallArgList &args, const FunctionProtoType *proto, + RequiredArgs required, unsigned numPrefixArgs) { + assert(numPrefixArgs + 1 <= args.size() && + "Emitting a call with less args than the required prefix?"); + // Add one to account for `this`. It is a bit awkard here, but we don't count + // `this` in similar places elsewhere. + auto paramInfos = + getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); + + // FIXME: Kill copy. + auto argTypes = getArgTypesForCall(Context, args); + + auto info = proto->getExtInfo(); + return arrangeCIRFunctionInfo(GetReturnType(proto->getReturnType()), + FnInfoOpts::IsInstanceMethod, argTypes, info, + paramInfos, required); +} + +/// Figure out the rules for calling a function with the given formal type using +/// the given arguments. The arguments are necessary because the function might +/// be unprototyped, in which case it's target-dependent in crazy ways. +const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( + const CallArgList &args, const FunctionType *fnType, bool ChainCall) { + assert(!ChainCall && "ChainCall NYI"); + return arrangeFreeFunctionLikeCall( + *this, CGM, args, fnType, ChainCall ? 1 : 0, + ChainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None); +} + +/// Set calling convention for CUDA/HIP kernel. +static void setCUDAKernelCallingConvention(CanQualType &FTy, CIRGenModule &CGM, + const FunctionDecl *FD) { + if (FD->hasAttr()) { + llvm_unreachable("NYI"); + } +} + +/// Arrange the argument and result information for a declaration or definition +/// of the given C++ non-static member function. The member function must be an +/// ordinary function, i.e. not a constructor or destructor. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { + assert(!isa(MD) && "wrong method for constructors!"); + assert(!isa(MD) && "wrong method for destructors!"); + + CanQualType FT = GetFormalType(MD).getAs(); + setCUDAKernelCallingConvention(FT, CGM, MD); + auto prototype = FT.getAs(); + + if (MD->isInstance()) { + // The abstarct case is perfectly fine. + auto *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); + return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); + } + + return arrangeFreeFunctionType(prototype); +} + +/// Arrange the argument and result information for a call to an unknown C++ +/// non-static member function of the given abstract type. (A null RD means we +/// don't have any meaningful "this" argument type, so fall back to a generic +/// pointer type). The member fucntion must be an ordinary function, i.e. not a +/// constructor or destructor. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, + const FunctionProtoType *FTP, + const CXXMethodDecl *MD) { + llvm::SmallVector argTypes; + + // Add the 'this' pointer. + argTypes.push_back(DeriveThisType(RD, MD)); + + return ::arrangeCIRFunctionInfo( + *this, FnInfoOpts::IsChainCall, argTypes, + FTP->getCanonicalTypeUnqualified().getAs()); +} + +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { + if (const auto *MD = dyn_cast(FD)) + if (MD->isInstance()) + return arrangeCXXMethodDeclaration(MD); + + auto FTy = FD->getType()->getCanonicalTypeUnqualified(); + + assert(isa(FTy)); + // TODO: setCUDAKernelCallingConvention + + // When declaring a function without a prototype, always use a non-variadic + // type. + if (CanQual noProto = FTy.getAs()) { + return arrangeCIRFunctionInfo(noProto->getReturnType(), FnInfoOpts::None, + std::nullopt, noProto->getExtInfo(), {}, + RequiredArgs::All); + } + + return arrangeFreeFunctionType(FTy.castAs()); +} + +RValue CallArg::getRValue(CIRGenFunction &CGF, mlir::Location loc) const { + if (!HasLV) + return RV; + LValue Copy = CGF.makeAddrLValue(CGF.CreateMemTemp(Ty, loc), Ty); + CGF.buildAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, + LV.isVolatile()); + IsUsed = true; + return RValue::getAggregate(Copy.getAddress()); +} + +void CIRGenFunction::buildNonNullArgCheck(RValue RV, QualType ArgType, + SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum) { + if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || + SanOpts.has(SanitizerKind::NullabilityArg))) + return; + llvm_unreachable("non-null arg check is NYI"); +} + +/* VarArg handling */ + +// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. We +// need to decide how to handle va_arg target-specific codegen. +mlir::Value CIRGenFunction::buildVAArg(VAArgExpr *VE, Address &VAListAddr) { + assert(!VE->isMicrosoftABI() && "NYI"); + auto loc = CGM.getLoc(VE->getExprLoc()); + auto type = ConvertType(VE->getType()); + auto vaList = buildVAListRef(VE->getSubExpr()).getPointer(); + return builder.create(loc, type, vaList); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h new file mode 100644 index 000000000000..a192c6e1db80 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -0,0 +1,298 @@ +//===----- CIRGenCall.h - Encapsulate calling convention details ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENCALL_H +#define LLVM_CLANG_LIB_CODEGEN_CIRGENCALL_H + +#include "CIRGenValue.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/Type.h" + +#include "llvm/ADT/SmallVector.h" + +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +#include "mlir/IR/BuiltinOps.h" + +namespace cir { +class CIRGenFunction; + +/// Abstract information about a function or function prototype. +class CIRGenCalleeInfo { + const clang::FunctionProtoType *CalleeProtoTy; + clang::GlobalDecl CalleeDecl; + +public: + explicit CIRGenCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl() {} + CIRGenCalleeInfo(const clang::FunctionProtoType *calleeProtoTy, + clang::GlobalDecl calleeDecl) + : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {} + CIRGenCalleeInfo(const clang::FunctionProtoType *calleeProtoTy) + : CalleeProtoTy(calleeProtoTy) {} + CIRGenCalleeInfo(clang::GlobalDecl calleeDecl) + : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {} + + const clang::FunctionProtoType *getCalleeFunctionProtoType() const { + return CalleeProtoTy; + } + const clang::GlobalDecl getCalleeDecl() const { return CalleeDecl; } +}; + +/// All available information about a concrete callee. +class CIRGenCallee { + enum class SpecialKind : uintptr_t { + Invalid, + Builtin, + PsuedoDestructor, + Virtual, + + Last = Virtual + }; + + struct BuiltinInfoStorage { + const clang::FunctionDecl *Decl; + unsigned ID; + }; + struct PseudoDestructorInfoStorage { + const clang::CXXPseudoDestructorExpr *Expr; + }; + struct VirtualInfoStorage { + const clang::CallExpr *CE; + clang::GlobalDecl MD; + Address Addr; + mlir::cir::FuncType FTy; + }; + + SpecialKind KindOrFunctionPointer; + + union { + CIRGenCalleeInfo AbstractInfo; + BuiltinInfoStorage BuiltinInfo; + PseudoDestructorInfoStorage PseudoDestructorInfo; + VirtualInfoStorage VirtualInfo; + }; + + explicit CIRGenCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {} + +public: + CIRGenCallee() : KindOrFunctionPointer(SpecialKind::Invalid) {} + + // Construct a callee. Call this constructor directly when this isn't a direct + // call. + CIRGenCallee(const CIRGenCalleeInfo &abstractInfo, + mlir::Operation *functionPtr) + : KindOrFunctionPointer( + SpecialKind(reinterpret_cast(functionPtr))) { + AbstractInfo = abstractInfo; + assert(functionPtr && "configuring callee without function pointer"); + // TODO: codegen asserts functionPtr is a pointer + // TODO: codegen asserts functionPtr is either an opaque pointer type or a + // pointer to a function + } + + static CIRGenCallee + forDirect(mlir::Operation *functionPtr, + const CIRGenCalleeInfo &abstractInfo = CIRGenCalleeInfo()) { + return CIRGenCallee(abstractInfo, functionPtr); + } + + bool isBuiltin() const { + return KindOrFunctionPointer == SpecialKind::Builtin; + } + + const clang::FunctionDecl *getBuiltinDecl() const { + assert(isBuiltin()); + return BuiltinInfo.Decl; + } + unsigned getBuiltinID() const { + assert(isBuiltin()); + return BuiltinInfo.ID; + } + + static CIRGenCallee forBuiltin(unsigned builtinID, + const clang::FunctionDecl *builtinDecl) { + CIRGenCallee result(SpecialKind::Builtin); + result.BuiltinInfo.Decl = builtinDecl; + result.BuiltinInfo.ID = builtinID; + return result; + } + + bool isPsuedoDestructor() const { + return KindOrFunctionPointer == SpecialKind::PsuedoDestructor; + } + + bool isOrdinary() const { + return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last); + } + + /// If this is a delayed callee computation of some sort, prepare a concrete + /// callee + CIRGenCallee prepareConcreteCallee(CIRGenFunction &CGF) const; + + mlir::Operation *getFunctionPointer() const { + assert(isOrdinary()); + return reinterpret_cast(KindOrFunctionPointer); + } + + CIRGenCalleeInfo getAbstractInfo() const { + if (isVirtual()) + return VirtualInfo.MD; + assert(isOrdinary()); + return AbstractInfo; + } + + bool isVirtual() const { + return KindOrFunctionPointer == SpecialKind::Virtual; + } + + static CIRGenCallee forVirtual(const clang::CallExpr *CE, + clang::GlobalDecl MD, Address Addr, + mlir::cir::FuncType FTy) { + CIRGenCallee result(SpecialKind::Virtual); + result.VirtualInfo.CE = CE; + result.VirtualInfo.MD = MD; + result.VirtualInfo.Addr = Addr; + result.VirtualInfo.FTy = FTy; + return result; + } + + const clang::CallExpr *getVirtualCallExpr() const { + assert(isVirtual()); + return VirtualInfo.CE; + } + + clang::GlobalDecl getVirtualMethodDecl() const { + assert(isVirtual()); + return VirtualInfo.MD; + } + Address getThisAddress() const { + assert(isVirtual()); + return VirtualInfo.Addr; + } + mlir::cir::FuncType getVirtualFunctionType() const { + assert(isVirtual()); + return VirtualInfo.FTy; + } + + void setFunctionPointer(mlir::Operation *functionPtr) { + assert(isOrdinary()); + KindOrFunctionPointer = + SpecialKind(reinterpret_cast(functionPtr)); + } +}; + +struct CallArg { +private: + union { + RValue RV; + LValue LV; /// This argument is semantically a load from this l-value + }; + bool HasLV; + + /// A data-flow flag to make sure getRValue and/or copyInto are not + /// called twice for duplicated IR emission. + mutable bool IsUsed; + +public: + clang::QualType Ty; + CallArg(RValue rv, clang::QualType ty) + : RV(rv), HasLV(false), IsUsed(false), Ty(ty) { + (void)IsUsed; + } + CallArg(LValue lv, clang::QualType ty) + : LV(lv), HasLV(true), IsUsed(false), Ty(ty) {} + + /// \returns an independent RValue. If the CallArg contains an LValue, + /// a temporary copy is returned. + RValue getRValue(CIRGenFunction &CGF, mlir::Location loc) const; + + bool hasLValue() const { return HasLV; } + + LValue getKnownLValue() const { + assert(HasLV && !IsUsed); + return LV; + } + + RValue getKnownRValue() const { + assert(!HasLV && !IsUsed); + return RV; + } + + bool isAggregate() const { return HasLV || RV.isAggregate(); } +}; + +class CallArgList : public llvm::SmallVector { +public: + CallArgList() {} + + struct Writeback { + LValue Source; + }; + + void add(RValue rvalue, clang::QualType type) { + push_back(CallArg(rvalue, type)); + } + + void addUncopiedAggregate(LValue LV, clang::QualType type) { + push_back(CallArg(LV, type)); + } + + /// Add all the arguments from another CallArgList to this one. After doing + /// this, the old CallArgList retains its list of arguments, but must not + /// be used to emit a call. + void addFrom(const CallArgList &other) { + insert(end(), other.begin(), other.end()); + // TODO: Writebacks, CleanupsToDeactivate, StackBase??? + } +}; + +/// Type for representing both the decl and type of parameters to a function. +/// The decl must be either a ParmVarDecl or ImplicitParamDecl. +class FunctionArgList : public llvm::SmallVector {}; + +/// Contains the address where the return value of a function can be stored, and +/// whether the address is volatile or not. +class ReturnValueSlot { + Address Addr = Address::invalid(); + + // Return value slot flags + unsigned IsVolatile : 1; + unsigned IsUnused : 1; + unsigned IsExternallyDestructed : 1; + +public: + ReturnValueSlot() + : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {} + ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false, + bool IsExternallyDestructed = false) + : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused), + IsExternallyDestructed(IsExternallyDestructed) {} + + bool isNull() const { return !Addr.isValid(); } + bool isVolatile() const { return IsVolatile; } + Address getValue() const { return Addr; } + bool isUnused() const { return IsUnused; } + bool isExternallyDestructed() const { return IsExternallyDestructed; } +}; + +enum class FnInfoOpts { + None = 0, + IsInstanceMethod = 1 << 0, + IsChainCall = 1 << 1, + IsDelegateCall = 1 << 2, +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp new file mode 100644 index 000000000000..a209e4e8866c --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -0,0 +1,1647 @@ +//===--- CIRGenClass.cpp - Emit CIR Code for C++ classes --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation of classes +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenFunction.h" +#include "UnimplementedFeatureGuarding.h" + +#include "clang/AST/EvaluatedExprVisitor.h" +#include "clang/AST/RecordLayout.h" +#include "clang/Basic/NoSanitizeList.h" +#include "clang/Basic/TargetBuiltins.h" + +using namespace clang; +using namespace cir; + +/// Checks whether the given constructor is a valid subject for the +/// complete-to-base constructor delgation optimization, i.e. emitting the +/// complete constructor as a simple call to the base constructor. +bool CIRGenFunction::IsConstructorDelegationValid( + const CXXConstructorDecl *Ctor) { + + // Currently we disable the optimization for classes with virtual bases + // because (1) the address of parameter variables need to be consistent across + // all initializers but (2) the delegate function call necessarily creates a + // second copy of the parameter variable. + // + // The limiting example (purely theoretical AFAIK): + // struct A { A(int &c) { c++; } }; + // struct A : virtual A { + // B(int count) : A(count) { printf("%d\n", count); } + // }; + // ...although even this example could in principle be emitted as a delegation + // since the address of the parameter doesn't escape. + if (Ctor->getParent()->getNumVBases()) + return false; + + // We also disable the optimization for variadic functions because it's + // impossible to "re-pass" varargs. + if (Ctor->getType()->castAs()->isVariadic()) + return false; + + // FIXME: Decide if we can do a delegation of a delegating constructor. + if (Ctor->isDelegatingConstructor()) + llvm_unreachable("NYI"); + + return true; +} + +/// TODO(cir): strong candidate for AST helper to be shared between LLVM and CIR +/// codegen. +static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { + auto *CD = dyn_cast(D); + if (!(CD && CD->isCopyOrMoveConstructor()) && + !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) + return false; + + // We can emit a memcpy for a trivial copy or move constructor/assignment. + if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) + return true; + + // We *must* emit a memcpy for a defaulted union copy or move op. + if (D->getParent()->isUnion() && D->isDefaulted()) + return true; + + return false; +} + +namespace { +/// TODO(cir): a lot of what we see under this namespace is a strong candidate +/// to be shared between LLVM and CIR codegen. + +/// RAII object to indicate that codegen is copying the value representation +/// instead of the object representation. Useful when copying a struct or +/// class which has uninitialized members and we're only performing +/// lvalue-to-rvalue conversion on the object but not its members. +class CopyingValueRepresentation { +public: + explicit CopyingValueRepresentation(CIRGenFunction &CGF) + : CGF(CGF), OldSanOpts(CGF.SanOpts) { + CGF.SanOpts.set(SanitizerKind::Bool, false); + CGF.SanOpts.set(SanitizerKind::Enum, false); + } + ~CopyingValueRepresentation() { CGF.SanOpts = OldSanOpts; } + +private: + CIRGenFunction &CGF; + SanitizerSet OldSanOpts; +}; + +class FieldMemcpyizer { +public: + FieldMemcpyizer(CIRGenFunction &CGF, const CXXRecordDecl *ClassDecl, + const VarDecl *SrcRec) + : CGF(CGF), ClassDecl(ClassDecl), + // SrcRec(SrcRec), + RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), + FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), + LastFieldOffset(0), LastAddedFieldIndex(0) { + (void)SrcRec; + } + + bool isMemcpyableField(FieldDecl *F) const { + // Never memcpy fields when we are adding poised paddings. + if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding) + return false; + Qualifiers Qual = F->getType().getQualifiers(); + if (Qual.hasVolatile() || Qual.hasObjCLifetime()) + return false; + + return true; + } + + void addMemcpyableField(FieldDecl *F) { + if (F->isZeroSize(CGF.getContext())) + return; + if (!FirstField) + addInitialField(F); + else + addNextField(F); + } + + CharUnits getMemcpySize(uint64_t FirstByteOffset) const { + ASTContext &Ctx = CGF.getContext(); + unsigned LastFieldSize = + LastField->isBitField() + ? LastField->getBitWidthValue(Ctx) + : Ctx.toBits( + Ctx.getTypeInfoDataSizeInChars(LastField->getType()).Width); + uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize - + FirstByteOffset + Ctx.getCharWidth() - 1; + CharUnits MemcpySize = Ctx.toCharUnitsFromBits(MemcpySizeBits); + return MemcpySize; + } + + void buildMemcpy() { + // Give the subclass a chance to bail out if it feels the memcpy isn't worth + // it (e.g. Hasn't aggregated enough data). + if (!FirstField) { + return; + } + + llvm_unreachable("NYI"); + } + + void reset() { FirstField = nullptr; } + +protected: + CIRGenFunction &CGF; + const CXXRecordDecl *ClassDecl; + +private: + void buildMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { + llvm_unreachable("NYI"); + } + + void addInitialField(FieldDecl *F) { + FirstField = F; + LastField = F; + FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); + LastFieldOffset = FirstFieldOffset; + LastAddedFieldIndex = F->getFieldIndex(); + } + + void addNextField(FieldDecl *F) { + // For the most part, the following invariant will hold: + // F->getFieldIndex() == LastAddedFieldIndex + 1 + // The one exception is that Sema won't add a copy-initializer for an + // unnamed bitfield, which will show up here as a gap in the sequence. + assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && + "Cannot aggregate fields out of order."); + LastAddedFieldIndex = F->getFieldIndex(); + + // The 'first' and 'last' fields are chosen by offset, rather than field + // index. This allows the code to support bitfields, as well as regular + // fields. + uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); + if (FOffset < FirstFieldOffset) { + FirstField = F; + FirstFieldOffset = FOffset; + } else if (FOffset >= LastFieldOffset) { + LastField = F; + LastFieldOffset = FOffset; + } + } + + // const VarDecl *SrcRec; + const ASTRecordLayout &RecLayout; + FieldDecl *FirstField; + FieldDecl *LastField; + uint64_t FirstFieldOffset, LastFieldOffset; + unsigned LastAddedFieldIndex; +}; + +static void buildLValueForAnyFieldInitialization(CIRGenFunction &CGF, + CXXCtorInitializer *MemberInit, + LValue &LHS) { + FieldDecl *Field = MemberInit->getAnyMember(); + if (MemberInit->isIndirectMemberInitializer()) { + llvm_unreachable("NYI"); + } else { + LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName()); + } +} + +static void buildMemberInitializer(CIRGenFunction &CGF, + const CXXRecordDecl *ClassDecl, + CXXCtorInitializer *MemberInit, + const CXXConstructorDecl *Constructor, + FunctionArgList &Args) { + // TODO: ApplyDebugLocation + assert(MemberInit->isAnyMemberInitializer() && + "Mush have member initializer!"); + assert(MemberInit->getInit() && "Must have initializer!"); + + // non-static data member initializers + FieldDecl *Field = MemberInit->getAnyMember(); + QualType FieldType = Field->getType(); + + auto ThisPtr = CGF.LoadCXXThis(); + QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); + LValue LHS; + + // If a base constructor is being emitted, create an LValue that has the + // non-virtual alignment. + if (CGF.CurGD.getCtorType() == Ctor_Base) + LHS = CGF.MakeNaturalAlignPointeeAddrLValue(ThisPtr, RecordTy); + else + LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); + + buildLValueForAnyFieldInitialization(CGF, MemberInit, LHS); + + // Special case: If we are in a copy or move constructor, and we are copying + // an array off PODs or classes with tirival copy constructors, ignore the AST + // and perform the copy we know is equivalent. + // FIXME: This is hacky at best... if we had a bit more explicit information + // in the AST, we could generalize it more easily. + const ConstantArrayType *Array = + CGF.getContext().getAsConstantArrayType(FieldType); + if (Array && Constructor->isDefaulted() && + Constructor->isCopyOrMoveConstructor()) { + llvm_unreachable("NYI"); + } + + CGF.buildInitializerForField(Field, LHS, MemberInit->getInit()); +} + +class ConstructorMemcpyizer : public FieldMemcpyizer { +private: + /// Get source argument for copy constructor. Returns null if not a copy + /// constructor. + static const VarDecl *getTrivialCopySource(CIRGenFunction &CGF, + const CXXConstructorDecl *CD, + FunctionArgList &Args) { + if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) + return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)]; + + return nullptr; + } + + // Returns true if a CXXCtorInitializer represents a member initialization + // that can be rolled into a memcpy. + bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { + if (!MemcpyableCtor) + return false; + + assert(!UnimplementedFeature::fieldMemcpyizerBuildMemcpy()); + return false; + } + +public: + ConstructorMemcpyizer(CIRGenFunction &CGF, const CXXConstructorDecl *CD, + FunctionArgList &Args) + : FieldMemcpyizer(CGF, CD->getParent(), + getTrivialCopySource(CGF, CD, Args)), + ConstructorDecl(CD), + MemcpyableCtor(CD->isDefaulted() && CD->isCopyOrMoveConstructor() && + CGF.getLangOpts().getGC() == LangOptions::NonGC), + Args(Args) {} + + void addMemberInitializer(CXXCtorInitializer *MemberInit) { + if (isMemberInitMemcpyable(MemberInit)) { + AggregatedInits.push_back(MemberInit); + addMemcpyableField(MemberInit->getMember()); + } else { + buildAggregatedInits(); + buildMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, + ConstructorDecl, Args); + } + } + + void buildAggregatedInits() { + if (AggregatedInits.size() <= 1) { + // This memcpy is too small to be worthwhile. Fall back on default + // codegen. + if (!AggregatedInits.empty()) { + llvm_unreachable("NYI"); + } + reset(); + return; + } + + pushEHDestructors(); + buildMemcpy(); + AggregatedInits.clear(); + } + + void pushEHDestructors() { + Address ThisPtr = CGF.LoadCXXThisAddress(); + QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); + LValue LHS = CGF.makeAddrLValue(ThisPtr, RecordTy); + (void)LHS; + + for (unsigned i = 0; i < AggregatedInits.size(); ++i) { + CXXCtorInitializer *MemberInit = AggregatedInits[i]; + QualType FieldType = MemberInit->getAnyMember()->getType(); + QualType::DestructionKind dtorKind = FieldType.isDestructedType(); + if (!CGF.needsEHCleanup(dtorKind)) + continue; + LValue FieldLHS = LHS; + buildLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); + CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); + } + } + + void finish() { buildAggregatedInits(); } + +private: + const CXXConstructorDecl *ConstructorDecl; + bool MemcpyableCtor; + FunctionArgList &Args; + SmallVector AggregatedInits; +}; + +class AssignmentMemcpyizer : public FieldMemcpyizer { +private: + // Returns the memcpyable field copied by the given statement, if one + // exists. Otherwise returns null. + FieldDecl *getMemcpyableField(Stmt *S) { + if (!AssignmentsMemcpyable) + return nullptr; + if (BinaryOperator *BO = dyn_cast(S)) { + // Recognise trivial assignments. + if (BO->getOpcode() != BO_Assign) + return nullptr; + MemberExpr *ME = dyn_cast(BO->getLHS()); + if (!ME) + return nullptr; + FieldDecl *Field = dyn_cast(ME->getMemberDecl()); + if (!Field || !isMemcpyableField(Field)) + return nullptr; + Stmt *RHS = BO->getRHS(); + if (ImplicitCastExpr *EC = dyn_cast(RHS)) + RHS = EC->getSubExpr(); + if (!RHS) + return nullptr; + if (MemberExpr *ME2 = dyn_cast(RHS)) { + if (ME2->getMemberDecl() == Field) + return Field; + } + return nullptr; + } else if (CXXMemberCallExpr *MCE = dyn_cast(S)) { + CXXMethodDecl *MD = dyn_cast(MCE->getCalleeDecl()); + if (!(MD && isMemcpyEquivalentSpecialMember(MD))) + return nullptr; + MemberExpr *IOA = dyn_cast(MCE->getImplicitObjectArgument()); + if (!IOA) + return nullptr; + FieldDecl *Field = dyn_cast(IOA->getMemberDecl()); + if (!Field || !isMemcpyableField(Field)) + return nullptr; + MemberExpr *Arg0 = dyn_cast(MCE->getArg(0)); + if (!Arg0 || Field != dyn_cast(Arg0->getMemberDecl())) + return nullptr; + return Field; + } else if (CallExpr *CE = dyn_cast(S)) { + FunctionDecl *FD = dyn_cast(CE->getCalleeDecl()); + if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) + return nullptr; + Expr *DstPtr = CE->getArg(0); + if (ImplicitCastExpr *DC = dyn_cast(DstPtr)) + DstPtr = DC->getSubExpr(); + UnaryOperator *DUO = dyn_cast(DstPtr); + if (!DUO || DUO->getOpcode() != UO_AddrOf) + return nullptr; + MemberExpr *ME = dyn_cast(DUO->getSubExpr()); + if (!ME) + return nullptr; + FieldDecl *Field = dyn_cast(ME->getMemberDecl()); + if (!Field || !isMemcpyableField(Field)) + return nullptr; + Expr *SrcPtr = CE->getArg(1); + if (ImplicitCastExpr *SC = dyn_cast(SrcPtr)) + SrcPtr = SC->getSubExpr(); + UnaryOperator *SUO = dyn_cast(SrcPtr); + if (!SUO || SUO->getOpcode() != UO_AddrOf) + return nullptr; + MemberExpr *ME2 = dyn_cast(SUO->getSubExpr()); + if (!ME2 || Field != dyn_cast(ME2->getMemberDecl())) + return nullptr; + return Field; + } + + return nullptr; + } + + bool AssignmentsMemcpyable; + SmallVector AggregatedStmts; + +public: + AssignmentMemcpyizer(CIRGenFunction &CGF, const CXXMethodDecl *AD, + FunctionArgList &Args) + : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), + AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { + assert(Args.size() == 2); + } + + void emitAssignment(Stmt *S) { + FieldDecl *F = getMemcpyableField(S); + if (F) { + addMemcpyableField(F); + AggregatedStmts.push_back(S); + } else { + emitAggregatedStmts(); + if (CGF.buildStmt(S, /*useCurrentScope=*/true).failed()) + llvm_unreachable("Should not get here!"); + } + } + + void emitAggregatedStmts() { + if (AggregatedStmts.size() <= 1) { + if (!AggregatedStmts.empty()) { + CopyingValueRepresentation CVR(CGF); + if (CGF.buildStmt(AggregatedStmts[0], /*useCurrentScope=*/true) + .failed()) + llvm_unreachable("Should not get here!"); + } + reset(); + } + + buildMemcpy(); + AggregatedStmts.clear(); + } + + void finish() { emitAggregatedStmts(); } +}; +} // namespace + +static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) { + const Type *BaseType = BaseInit->getBaseClass(); + const auto *BaseClassDecl = + cast(BaseType->castAs()->getDecl()); + return BaseClassDecl->isDynamicClass(); +} + +namespace { +/// Call the destructor for a direct base class. +struct CallBaseDtor final : EHScopeStack::Cleanup { + const CXXRecordDecl *BaseClass; + bool BaseIsVirtual; + CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) + : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + const CXXRecordDecl *DerivedClass = + cast(CGF.CurCodeDecl)->getParent(); + + const CXXDestructorDecl *D = BaseClass->getDestructor(); + // We are already inside a destructor, so presumably the object being + // destroyed should have the expected type. + QualType ThisTy = D->getFunctionObjectParameterType(); + assert(CGF.currSrcLoc && "expected source location"); + Address Addr = CGF.getAddressOfDirectBaseInCompleteClass( + *CGF.currSrcLoc, CGF.LoadCXXThisAddress(), DerivedClass, BaseClass, + BaseIsVirtual); + CGF.buildCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, + /*Delegating=*/false, Addr, ThisTy); + } +}; + +/// A visitor which checks whether an initializer uses 'this' in a +/// way which requires the vtable to be properly set. +struct DynamicThisUseChecker + : ConstEvaluatedExprVisitor { + typedef ConstEvaluatedExprVisitor super; + + bool UsesThis; + + DynamicThisUseChecker(const ASTContext &C) : super(C), UsesThis(false) {} + + // Black-list all explicit and implicit references to 'this'. + // + // Do we need to worry about external references to 'this' derived + // from arbitrary code? If so, then anything which runs arbitrary + // external code might potentially access the vtable. + void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; } +}; +} // end anonymous namespace + +static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { + DynamicThisUseChecker Checker(C); + Checker.Visit(Init); + return Checker.UsesThis; +} + +/// Gets the address of a direct base class within a complete object. +/// This should only be used for (1) non-virtual bases or (2) virtual bases +/// when the type is known to be complete (e.g. in complete destructors). +/// +/// The object pointed to by 'This' is assumed to be non-null. +Address CIRGenFunction::getAddressOfDirectBaseInCompleteClass( + mlir::Location loc, Address This, const CXXRecordDecl *Derived, + const CXXRecordDecl *Base, bool BaseIsVirtual) { + // 'this' must be a pointer (in some address space) to Derived. + assert(This.getElementType() == ConvertType(Derived)); + + // Compute the offset of the virtual base. + CharUnits Offset; + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); + if (BaseIsVirtual) + Offset = Layout.getVBaseClassOffset(Base); + else + Offset = Layout.getBaseClassOffset(Base); + + // Shift and cast down to the base type. + // TODO: for complete types, this should be possible with a GEP. + Address V = This; + if (!Offset.isZero()) { + mlir::Value OffsetVal = builder.getSInt32(Offset.getQuantity(), loc); + mlir::Value VBaseThisPtr = builder.create( + loc, This.getPointer().getType(), This.getPointer(), OffsetVal); + V = Address(VBaseThisPtr, CXXABIThisAlignment); + } + V = builder.createElementBitCast(loc, V, ConvertType(Base)); + return V; +} + +static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, + const CXXRecordDecl *ClassDecl, + CXXCtorInitializer *BaseInit) { + assert(BaseInit->isBaseInitializer() && "Must have base initializer!"); + + Address ThisPtr = CGF.LoadCXXThisAddress(); + + const Type *BaseType = BaseInit->getBaseClass(); + const auto *BaseClassDecl = + cast(BaseType->castAs()->getDecl()); + + bool isBaseVirtual = BaseInit->isBaseVirtual(); + + // If the initializer for the base (other than the constructor + // itself) accesses 'this' in any way, we need to initialize the + // vtables. + if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) + CGF.initializeVTablePointers(loc, ClassDecl); + + // We can pretend to be a complete class because it only matters for + // virtual bases, and we only do virtual bases for complete ctors. + Address V = CGF.getAddressOfDirectBaseInCompleteClass( + loc, ThisPtr, ClassDecl, BaseClassDecl, isBaseVirtual); + AggValueSlot AggSlot = AggValueSlot::forAddr( + V, Qualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + CGF.getOverlapForBaseInit(ClassDecl, BaseClassDecl, isBaseVirtual)); + + CGF.buildAggExpr(BaseInit->getInit(), AggSlot); + + if (CGF.CGM.getLangOpts().Exceptions && + !BaseClassDecl->hasTrivialDestructor()) + CGF.EHStack.pushCleanup(EHCleanup, BaseClassDecl, + isBaseVirtual); +} + +/// This routine generates necessary code to initialize base classes and +/// non-static data members belonging to this constructor. +void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, + CXXCtorType CtorType, + FunctionArgList &Args) { + if (CD->isDelegatingConstructor()) + llvm_unreachable("NYI"); + + const CXXRecordDecl *ClassDecl = CD->getParent(); + + CXXConstructorDecl::init_const_iterator B = CD->init_begin(), + E = CD->init_end(); + + // Virtual base initializers first, if any. They aren't needed if: + // - This is a base ctor variant + // - There are no vbases + // - The class is abstract, so a complete object of it cannot be constructed + // + // The check for an abstract class is necessary because sema may not have + // marked virtual base destructors referenced. + bool ConstructVBases = CtorType != Ctor_Base && + ClassDecl->getNumVBases() != 0 && + !ClassDecl->isAbstract(); + + // In the Microsoft C++ ABI, there are no constructor variants. Instead, the + // constructor of a class with virtual bases takes an additional parameter to + // conditionally construct the virtual bases. Emit that check here. + mlir::Block *BaseCtorContinueBB = nullptr; + if (ConstructVBases && + !CGM.getTarget().getCXXABI().hasConstructorVariants()) { + llvm_unreachable("NYI"); + } + + auto const OldThis = CXXThisValue; + for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { + if (!ConstructVBases) + continue; + if (CGM.getCodeGenOpts().StrictVTablePointers && + CGM.getCodeGenOpts().OptimizationLevel > 0 && + isInitializerOfDynamicClass(*B)) + llvm_unreachable("NYI"); + buildBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); + } + + if (BaseCtorContinueBB) { + llvm_unreachable("NYI"); + } + + // Then, non-virtual base initializers. + for (; B != E && (*B)->isBaseInitializer(); B++) { + assert(!(*B)->isBaseVirtual()); + + if (CGM.getCodeGenOpts().StrictVTablePointers && + CGM.getCodeGenOpts().OptimizationLevel > 0 && + isInitializerOfDynamicClass(*B)) + llvm_unreachable("NYI"); + buildBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); + } + + CXXThisValue = OldThis; + + initializeVTablePointers(getLoc(CD->getBeginLoc()), ClassDecl); + + // And finally, initialize class members. + FieldConstructionScope FCS(*this, LoadCXXThisAddress()); + ConstructorMemcpyizer CM(*this, CD, Args); + for (; B != E; B++) { + CXXCtorInitializer *Member = (*B); + assert(!Member->isBaseInitializer()); + assert(Member->isAnyMemberInitializer() && + "Delegating initializer on non-delegating constructor"); + CM.addMemberInitializer(Member); + } + CM.finish(); +} + +static Address ApplyNonVirtualAndVirtualOffset( + CIRGenFunction &CGF, Address addr, CharUnits nonVirtualOffset, + mlir::Value virtualOffset, const CXXRecordDecl *derivedClass, + const CXXRecordDecl *nearestVBase) { + llvm_unreachable("NYI"); + return Address::invalid(); +} + +void CIRGenFunction::initializeVTablePointer(mlir::Location loc, + const VPtr &Vptr) { + // Compute the address point. + auto VTableAddressPoint = CGM.getCXXABI().getVTableAddressPointInStructor( + *this, Vptr.VTableClass, Vptr.Base, Vptr.NearestVBase); + + if (!VTableAddressPoint) + return; + + // Compute where to store the address point. + mlir::Value VirtualOffset{}; + CharUnits NonVirtualOffset = CharUnits::Zero(); + + if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(*this, Vptr)) { + llvm_unreachable("NYI"); + } else { + // We can just use the base offset in the complete class. + NonVirtualOffset = Vptr.Base.getBaseOffset(); + } + + // Apply the offsets. + Address VTableField = LoadCXXThisAddress(); + if (!NonVirtualOffset.isZero() || VirtualOffset) { + VTableField = ApplyNonVirtualAndVirtualOffset( + *this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass, + Vptr.NearestVBase); + } + + // Finally, store the address point. Use the same CIR types as the field. + // + // vtable field is derived from `this` pointer, therefore they should be in + // the same addr space. + assert(!UnimplementedFeature::addressSpace()); + VTableField = builder.createElementBitCast(loc, VTableField, + VTableAddressPoint.getType()); + builder.createStore(loc, VTableAddressPoint, VTableField); + assert(!UnimplementedFeature::tbaa()); +} + +void CIRGenFunction::initializeVTablePointers(mlir::Location loc, + const CXXRecordDecl *RD) { + // Ignore classes without a vtable. + if (!RD->isDynamicClass()) + return; + + // Initialize the vtable pointers for this class and all of its bases. + if (CGM.getCXXABI().doStructorsInitializeVPtrs(RD)) + for (const auto &Vptr : getVTablePointers(RD)) + initializeVTablePointer(loc, Vptr); + + if (RD->getNumVBases()) + CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); +} + +CIRGenFunction::VPtrsVector +CIRGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) { + CIRGenFunction::VPtrsVector VPtrsResult; + VisitedVirtualBasesSetTy VBases; + getVTablePointers(BaseSubobject(VTableClass, CharUnits::Zero()), + /*NearestVBase=*/nullptr, + /*OffsetFromNearestVBase=*/CharUnits::Zero(), + /*BaseIsNonVirtualPrimaryBase=*/false, VTableClass, VBases, + VPtrsResult); + return VPtrsResult; +} + +void CIRGenFunction::getVTablePointers(BaseSubobject Base, + const CXXRecordDecl *NearestVBase, + CharUnits OffsetFromNearestVBase, + bool BaseIsNonVirtualPrimaryBase, + const CXXRecordDecl *VTableClass, + VisitedVirtualBasesSetTy &VBases, + VPtrsVector &Vptrs) { + // If this base is a non-virtual primary base the address point has already + // been set. + if (!BaseIsNonVirtualPrimaryBase) { + // Initialize the vtable pointer for this base. + VPtr Vptr = {Base, NearestVBase, OffsetFromNearestVBase, VTableClass}; + Vptrs.push_back(Vptr); + } + + const CXXRecordDecl *RD = Base.getBase(); + + // Traverse bases. + for (const auto &I : RD->bases()) { + auto *BaseDecl = + cast(I.getType()->castAs()->getDecl()); + + // Ignore classes without a vtable. + if (!BaseDecl->isDynamicClass()) + continue; + + CharUnits BaseOffset; + CharUnits BaseOffsetFromNearestVBase; + bool BaseDeclIsNonVirtualPrimaryBase; + + if (I.isVirtual()) { + llvm_unreachable("NYI"); + } else { + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); + + BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); + BaseOffsetFromNearestVBase = + OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); + BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; + } + + getVTablePointers( + BaseSubobject(BaseDecl, BaseOffset), + I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase, + BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases, Vptrs); + } +} + +Address CIRGenFunction::LoadCXXThisAddress() { + assert(CurFuncDecl && "loading 'this' without a func declaration?"); + assert(isa(CurFuncDecl)); + + // Lazily compute CXXThisAlignment. + if (CXXThisAlignment.isZero()) { + // Just use the best known alignment for the parent. + // TODO: if we're currently emitting a complete-object ctor/dtor, we can + // always use the complete-object alignment. + auto RD = cast(CurFuncDecl)->getParent(); + CXXThisAlignment = CGM.getClassPointerAlignment(RD); + } + + return Address(LoadCXXThis(), CXXThisAlignment); +} + +void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, + Expr *Init) { + QualType FieldType = Field->getType(); + switch (getEvaluationKind(FieldType)) { + case TEK_Scalar: + if (LHS.isSimple()) { + buildExprAsInit(Init, Field, LHS, false); + } else { + llvm_unreachable("NYI"); + } + break; + case TEK_Complex: + llvm_unreachable("NYI"); + break; + case TEK_Aggregate: { + AggValueSlot Slot = AggValueSlot::forLValue( + LHS, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, getOverlapForFieldInit(Field), + AggValueSlot::IsNotZeroed, + // Checks are made by the code that calls constructor. + AggValueSlot::IsSanitizerChecked); + buildAggExpr(Init, Slot); + break; + } + } + + // Ensure that we destroy this object if an exception is thrown later in the + // constructor. + QualType::DestructionKind dtorKind = FieldType.isDestructedType(); + (void)dtorKind; + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); +} + +void CIRGenFunction::buildDelegateCXXConstructorCall( + const CXXConstructorDecl *Ctor, CXXCtorType CtorType, + const FunctionArgList &Args, SourceLocation Loc) { + CallArgList DelegateArgs; + + FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); + assert(I != E && "no parameters to constructor"); + + // this + Address This = LoadCXXThisAddress(); + DelegateArgs.add(RValue::get(This.getPointer()), (*I)->getType()); + ++I; + + // FIXME: The location of the VTT parameter in the parameter list is specific + // to the Itanium ABI and shouldn't be hardcoded here. + if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { + llvm_unreachable("NYI"); + } + + // Explicit arguments. + for (; I != E; ++I) { + const VarDecl *param = *I; + // FIXME: per-argument source location + buildDelegateCallArg(DelegateArgs, param, Loc); + } + + buildCXXConstructorCall(Ctor, CtorType, /*ForVirtualBase=*/false, + /*Delegating=*/true, This, DelegateArgs, + AggValueSlot::MayOverlap, Loc, + /*NewPointerIsChecked=*/true); +} + +void CIRGenFunction::buildImplicitAssignmentOperatorBody( + FunctionArgList &Args) { + const CXXMethodDecl *AssignOp = cast(CurGD.getDecl()); + const Stmt *RootS = AssignOp->getBody(); + assert(isa(RootS) && + "Body of an implicit assignment operator should be compound stmt."); + const CompoundStmt *RootCS = cast(RootS); + + // LexicalScope Scope(*this, RootCS->getSourceRange()); + // FIXME(cir): add all of the below under a new scope. + + assert(!UnimplementedFeature::incrementProfileCounter()); + AssignmentMemcpyizer AM(*this, AssignOp, Args); + for (auto *I : RootCS->body()) + AM.emitAssignment(I); + AM.finish(); +} + +void CIRGenFunction::buildForwardingCallToLambda( + const CXXMethodDecl *callOperator, CallArgList &callArgs) { + // Get the address of the call operator. + const auto &calleeFnInfo = + CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); + auto calleePtr = CGM.GetAddrOfFunction( + GlobalDecl(callOperator), CGM.getTypes().GetFunctionType(calleeFnInfo)); + + // Prepare the return slot. + const FunctionProtoType *FPT = + callOperator->getType()->castAs(); + QualType resultType = FPT->getReturnType(); + ReturnValueSlot returnSlot; + if (!resultType->isVoidType() && + calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && + !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) { + llvm_unreachable("NYI"); + } + + // We don't need to separately arrange the call arguments because + // the call can't be variadic anyway --- it's impossible to forward + // variadic arguments. + + // Now emit our call. + auto callee = CIRGenCallee::forDirect(calleePtr, GlobalDecl(callOperator)); + RValue RV = buildCall(calleeFnInfo, callee, returnSlot, callArgs); + + // If necessary, copy the returned value into the slot. + if (!resultType->isVoidType() && returnSlot.isNull()) { + if (getLangOpts().ObjCAutoRefCount && resultType->isObjCRetainableType()) + llvm_unreachable("NYI"); + buildReturnOfRValue(*currSrcLoc, RV, resultType); + } else { + llvm_unreachable("NYI"); + } +} + +void CIRGenFunction::buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { + const CXXRecordDecl *Lambda = MD->getParent(); + + // Start building arguments for forwarding call + CallArgList CallArgs; + + QualType LambdaType = getContext().getRecordType(Lambda); + QualType ThisType = getContext().getPointerType(LambdaType); + Address ThisPtr = + CreateMemTemp(LambdaType, getLoc(MD->getSourceRange()), "unused.capture"); + CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType); + + // Add the rest of the parameters. + for (auto *Param : MD->parameters()) + buildDelegateCallArg(CallArgs, Param, Param->getBeginLoc()); + + const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); + // For a generic lambda, find the corresponding call operator specialization + // to which the call to the static-invoker shall be forwarded. + if (Lambda->isGenericLambda()) { + assert(MD->isFunctionTemplateSpecialization()); + const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); + FunctionTemplateDecl *CallOpTemplate = + CallOp->getDescribedFunctionTemplate(); + void *InsertPos = nullptr; + FunctionDecl *CorrespondingCallOpSpecialization = + CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); + assert(CorrespondingCallOpSpecialization); + CallOp = cast(CorrespondingCallOpSpecialization); + } + buildForwardingCallToLambda(CallOp, CallArgs); +} + +void CIRGenFunction::buildLambdaStaticInvokeBody(const CXXMethodDecl *MD) { + if (MD->isVariadic()) { + // Codgen for LLVM doesn't emit code for this as well, it says: + // FIXME: Making this work correctly is nasty because it requires either + // cloning the body of the call operator or making the call operator + // forward. + llvm_unreachable("NYI"); + } + + buildLambdaDelegatingInvokeBody(MD); +} + +void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, + QualType type) { + const RecordType *rtype = type->castAs(); + const CXXRecordDecl *record = cast(rtype->getDecl()); + const CXXDestructorDecl *dtor = record->getDestructor(); + // TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial + // dtors which shall be removed on later CIR passes. However, only remove this + // assertion once we get a testcase to exercise this path. + assert(!dtor->isTrivial()); + CGF.buildCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, + /*Delegating=*/false, addr, type); +} + +static bool FieldHasTrivialDestructorBody(ASTContext &Context, + const FieldDecl *Field); + +// FIXME(cir): this should be shared with traditional codegen. +static bool +HasTrivialDestructorBody(ASTContext &Context, + const CXXRecordDecl *BaseClassDecl, + const CXXRecordDecl *MostDerivedClassDecl) { + // If the destructor is trivial we don't have to check anything else. + if (BaseClassDecl->hasTrivialDestructor()) + return true; + + if (!BaseClassDecl->getDestructor()->hasTrivialBody()) + return false; + + // Check fields. + for (const auto *Field : BaseClassDecl->fields()) + if (!FieldHasTrivialDestructorBody(Context, Field)) + return false; + + // Check non-virtual bases. + for (const auto &I : BaseClassDecl->bases()) { + if (I.isVirtual()) + continue; + + const CXXRecordDecl *NonVirtualBase = + cast(I.getType()->castAs()->getDecl()); + if (!HasTrivialDestructorBody(Context, NonVirtualBase, + MostDerivedClassDecl)) + return false; + } + + if (BaseClassDecl == MostDerivedClassDecl) { + // Check virtual bases. + for (const auto &I : BaseClassDecl->vbases()) { + const CXXRecordDecl *VirtualBase = + cast(I.getType()->castAs()->getDecl()); + if (!HasTrivialDestructorBody(Context, VirtualBase, MostDerivedClassDecl)) + return false; + } + } + + return true; +} + +// FIXME(cir): this should be shared with traditional codegen. +static bool FieldHasTrivialDestructorBody(ASTContext &Context, + const FieldDecl *Field) { + QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); + + const RecordType *RT = FieldBaseElementType->getAs(); + if (!RT) + return true; + + CXXRecordDecl *FieldClassDecl = cast(RT->getDecl()); + + // The destructor for an implicit anonymous union member is never invoked. + if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion()) + return false; + + return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); +} + +/// Check whether we need to initialize any vtable pointers before calling this +/// destructor. +/// FIXME(cir): this should be shared with traditional codegen. +static bool CanSkipVTablePointerInitialization(CIRGenFunction &CGF, + const CXXDestructorDecl *Dtor) { + const CXXRecordDecl *ClassDecl = Dtor->getParent(); + if (!ClassDecl->isDynamicClass()) + return true; + + // For a final class, the vtable pointer is known to already point to the + // class's vtable. + if (ClassDecl->isEffectivelyFinal()) + return true; + + if (!Dtor->hasTrivialBody()) + return false; + + // Check the fields. + for (const auto *Field : ClassDecl->fields()) + if (!FieldHasTrivialDestructorBody(CGF.getContext(), Field)) + return false; + + return true; +} + +/// Emits the body of the current destructor. +void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { + const CXXDestructorDecl *Dtor = cast(CurGD.getDecl()); + CXXDtorType DtorType = CurGD.getDtorType(); + + // For an abstract class, non-base destructors are never used (and can't + // be emitted in general, because vbase dtors may not have been validated + // by Sema), but the Itanium ABI doesn't make them optional and Clang may + // in fact emit references to them from other compilations, so emit them + // as functions containing a trap instruction. + if (DtorType != Dtor_Base && Dtor->getParent()->isAbstract()) { + llvm_unreachable("NYI"); + } + + Stmt *Body = Dtor->getBody(); + if (Body) + assert(!UnimplementedFeature::incrementProfileCounter()); + + // The call to operator delete in a deleting destructor happens + // outside of the function-try-block, which means it's always + // possible to delegate the destructor body to the complete + // destructor. Do so. + if (DtorType == Dtor_Deleting) { + RunCleanupsScope DtorEpilogue(*this); + EnterDtorCleanups(Dtor, Dtor_Deleting); + if (HaveInsertPoint()) { + QualType ThisTy = Dtor->getFunctionObjectParameterType(); + buildCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, + /*Delegating=*/false, LoadCXXThisAddress(), + ThisTy); + } + return; + } + + // If the body is a function-try-block, enter the try before + // anything else. + bool isTryBody = (Body && isa(Body)); + if (isTryBody) { + llvm_unreachable("NYI"); + // EnterCXXTryStmt(*cast(Body), true); + } + if (UnimplementedFeature::emitAsanPrologueOrEpilogue()) + llvm_unreachable("NYI"); + + // Enter the epilogue cleanups. + RunCleanupsScope DtorEpilogue(*this); + + // If this is the complete variant, just invoke the base variant; + // the epilogue will destruct the virtual bases. But we can't do + // this optimization if the body is a function-try-block, because + // we'd introduce *two* handler blocks. In the Microsoft ABI, we + // always delegate because we might not have a definition in this TU. + switch (DtorType) { + case Dtor_Comdat: + llvm_unreachable("not expecting a COMDAT"); + case Dtor_Deleting: + llvm_unreachable("already handled deleting case"); + + case Dtor_Complete: + assert((Body || getTarget().getCXXABI().isMicrosoft()) && + "can't emit a dtor without a body for non-Microsoft ABIs"); + + // Enter the cleanup scopes for virtual bases. + EnterDtorCleanups(Dtor, Dtor_Complete); + + if (!isTryBody) { + QualType ThisTy = Dtor->getFunctionObjectParameterType(); + buildCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, + /*Delegating=*/false, LoadCXXThisAddress(), + ThisTy); + break; + } + + // Fallthrough: act like we're in the base variant. + [[fallthrough]]; + + case Dtor_Base: + assert(Body); + + // Enter the cleanup scopes for fields and non-virtual bases. + EnterDtorCleanups(Dtor, Dtor_Base); + + // Initialize the vtable pointers before entering the body. + if (!CanSkipVTablePointerInitialization(*this, Dtor)) { + // Insert the llvm.launder.invariant.group intrinsic before initializing + // the vptrs to cancel any previous assumptions we might have made. + if (CGM.getCodeGenOpts().StrictVTablePointers && + CGM.getCodeGenOpts().OptimizationLevel > 0) + llvm_unreachable("NYI"); + llvm_unreachable("NYI"); + } + + if (isTryBody) + llvm_unreachable("NYI"); + else if (Body) + (void)buildStmt(Body, /*useCurrentScope=*/true); + else { + assert(Dtor->isImplicit() && "bodyless dtor not implicit"); + // nothing to do besides what's in the epilogue + } + // -fapple-kext must inline any call to this dtor into + // the caller's body. + if (getLangOpts().AppleKext) + llvm_unreachable("NYI"); + + break; + } + + // Jump out through the epilogue cleanups. + DtorEpilogue.ForceCleanup(); + + // Exit the try if applicable. + if (isTryBody) + llvm_unreachable("NYI"); +} + +namespace { +[[maybe_unused]] mlir::Value +LoadThisForDtorDelete(CIRGenFunction &CGF, const CXXDestructorDecl *DD) { + if (Expr *ThisArg = DD->getOperatorDeleteThisArg()) + return CGF.buildScalarExpr(ThisArg); + return CGF.LoadCXXThis(); +} + +/// Call the operator delete associated with the current destructor. +struct CallDtorDelete final : EHScopeStack::Cleanup { + CallDtorDelete() {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + const CXXDestructorDecl *Dtor = cast(CGF.CurCodeDecl); + const CXXRecordDecl *ClassDecl = Dtor->getParent(); + CGF.buildDeleteCall(Dtor->getOperatorDelete(), + LoadThisForDtorDelete(CGF, Dtor), + CGF.getContext().getTagDeclType(ClassDecl)); + } +}; +} // namespace + +/// Emit all code that comes at the end of class's destructor. This is to call +/// destructors on members and base classes in reverse order of their +/// construction. +/// +/// For a deleting destructor, this also handles the case where a destroying +/// operator delete completely overrides the definition. +void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, + CXXDtorType DtorType) { + assert((!DD->isTrivial() || DD->hasAttr()) && + "Should not emit dtor epilogue for non-exported trivial dtor!"); + + // The deleting-destructor phase just needs to call the appropriate + // operator delete that Sema picked up. + if (DtorType == Dtor_Deleting) { + assert(DD->getOperatorDelete() && + "operator delete missing - EnterDtorCleanups"); + if (CXXStructorImplicitParamValue) { + llvm_unreachable("NYI"); + } else { + if (DD->getOperatorDelete()->isDestroyingOperatorDelete()) { + llvm_unreachable("NYI"); + } else { + EHStack.pushCleanup(NormalAndEHCleanup); + } + } + return; + } + + const CXXRecordDecl *ClassDecl = DD->getParent(); + + // Unions have no bases and do not call field destructors. + if (ClassDecl->isUnion()) + return; + + // The complete-destructor phase just destructs all the virtual bases. + if (DtorType == Dtor_Complete) { + // Poison the vtable pointer such that access after the base + // and member destructors are invoked is invalid. + if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory) && ClassDecl->getNumVBases() && + ClassDecl->isPolymorphic()) + assert(!UnimplementedFeature::sanitizeDtor()); + + // We push them in the forward order so that they'll be popped in + // the reverse order. + for (const auto &Base : ClassDecl->vbases()) { + auto *BaseClassDecl = + cast(Base.getType()->castAs()->getDecl()); + + if (BaseClassDecl->hasTrivialDestructor()) { + // Under SanitizeMemoryUseAfterDtor, poison the trivial base class + // memory. For non-trival base classes the same is done in the class + // destructor. + assert(!UnimplementedFeature::sanitizeDtor()); + } else { + EHStack.pushCleanup(NormalAndEHCleanup, BaseClassDecl, + /*BaseIsVirtual*/ true); + } + } + + return; + } + + assert(DtorType == Dtor_Base); + // Poison the vtable pointer if it has no virtual bases, but inherits + // virtual functions. + if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory) && !ClassDecl->getNumVBases() && + ClassDecl->isPolymorphic()) + assert(!UnimplementedFeature::sanitizeDtor()); + + // Destroy non-virtual bases. + for (const auto &Base : ClassDecl->bases()) { + // Ignore virtual bases. + if (Base.isVirtual()) + continue; + + CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); + + if (BaseClassDecl->hasTrivialDestructor()) { + if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty()) + assert(!UnimplementedFeature::sanitizeDtor()); + } else { + EHStack.pushCleanup(NormalAndEHCleanup, BaseClassDecl, + /*BaseIsVirtual*/ false); + } + } + + // Poison fields such that access after their destructors are + // invoked, and before the base class destructor runs, is invalid. + bool SanitizeFields = CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory); + assert(!UnimplementedFeature::sanitizeDtor()); + + // Destroy direct fields. + for (const auto *Field : ClassDecl->fields()) { + if (SanitizeFields) + assert(!UnimplementedFeature::sanitizeDtor()); + + QualType type = Field->getType(); + QualType::DestructionKind dtorKind = type.isDestructedType(); + if (!dtorKind) + continue; + + // Anonymous union members do not have their destructors called. + const RecordType *RT = type->getAsUnionType(); + if (RT && RT->getDecl()->isAnonymousStructOrUnion()) + continue; + + [[maybe_unused]] CleanupKind cleanupKind = getCleanupKind(dtorKind); + llvm_unreachable("EHStack.pushCleanup(...) NYI"); + } + + if (SanitizeFields) + assert(!UnimplementedFeature::sanitizeDtor()); +} + +void CIRGenFunction::buildCXXDestructorCall(const CXXDestructorDecl *DD, + CXXDtorType Type, + bool ForVirtualBase, + bool Delegating, Address This, + QualType ThisTy) { + CGM.getCXXABI().buildDestructorCall(*this, DD, Type, ForVirtualBase, + Delegating, This, ThisTy); +} + +mlir::Value CIRGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, + bool Delegating) { + if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { + // This constructor/destructor does not need a VTT parameter. + return nullptr; + } + + const CXXRecordDecl *RD = cast(CurCodeDecl)->getParent(); + const CXXRecordDecl *Base = cast(GD.getDecl())->getParent(); + + if (Delegating) { + llvm_unreachable("NYI"); + } else if (RD == Base) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("NYI"); + } + + if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("NYI"); + } +} + +Address +CIRGenFunction::getAddressOfBaseClass(Address Value, + const CXXRecordDecl *Derived, + CastExpr::path_const_iterator PathBegin, + CastExpr::path_const_iterator PathEnd, + bool NullCheckValue, SourceLocation Loc) { + assert(PathBegin != PathEnd && "Base path should not be empty!"); + + CastExpr::path_const_iterator Start = PathBegin; + const CXXRecordDecl *VBase = nullptr; + + // Sema has done some convenient canonicalization here: if the + // access path involved any virtual steps, the conversion path will + // *start* with a step down to the correct virtual base subobject, + // and hence will not require any further steps. + if ((*Start)->isVirtual()) { + llvm_unreachable("NYI"); + } + + // Compute the static offset of the ultimate destination within its + // allocating subobject (the virtual base, if there is one, or else + // the "complete" object that we see). + CharUnits NonVirtualOffset = CGM.computeNonVirtualBaseClassOffset( + VBase ? VBase : Derived, Start, PathEnd); + + // If there's a virtual step, we can sometimes "devirtualize" it. + // For now, that's limited to when the derived type is final. + // TODO: "devirtualize" this for accesses to known-complete objects. + if (VBase && Derived->hasAttr()) { + llvm_unreachable("NYI"); + } + + // Get the base pointer type. + auto BaseValueTy = convertType((PathEnd[-1])->getType()); + assert(!UnimplementedFeature::addressSpace()); + // auto BasePtrTy = builder.getPointerTo(BaseValueTy); + // QualType DerivedTy = getContext().getRecordType(Derived); + // CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived); + + // If the static offset is zero and we don't have a virtual step, + // just do a bitcast; null checks are unnecessary. + if (NonVirtualOffset.isZero() && !VBase) { + if (sanitizePerformTypeCheck()) { + llvm_unreachable("NYI"); + } + return builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy); + } + + // Skip over the offset (and the vtable load) if we're supposed to + // null-check the pointer. + if (NullCheckValue) { + llvm_unreachable("NYI"); + } + + if (sanitizePerformTypeCheck()) { + llvm_unreachable("NYI"); + } + + // Compute the virtual offset. + mlir::Value VirtualOffset{}; + if (VBase) { + llvm_unreachable("NYI"); + } + + // Apply both offsets. + Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset, + VirtualOffset, Derived, VBase); + // Cast to the destination type. + Value = builder.createElementBitCast(Value.getPointer().getLoc(), Value, + BaseValueTy); + + // Build a phi if we needed a null check. + if (NullCheckValue) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + return Value; +} + +// TODO(cir): this can be shared with LLVM codegen. +bool CIRGenFunction::shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) { + if (!CGM.getCodeGenOpts().WholeProgramVTables || + !CGM.HasHiddenLTOVisibility(RD)) + return false; + + if (CGM.getCodeGenOpts().VirtualFunctionElimination) + return true; + + if (!SanOpts.has(SanitizerKind::CFIVCall) || + !CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIVCall)) + return false; + + std::string TypeName = RD->getQualifiedNameAsString(); + return !getContext().getNoSanitizeList().containsType(SanitizerKind::CFIVCall, + TypeName); +} + +void CIRGenFunction::buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, + mlir::Value VTable, + SourceLocation Loc) { + if (SanOpts.has(SanitizerKind::CFIVCall)) { + llvm_unreachable("NYI"); + } else if (CGM.getCodeGenOpts().WholeProgramVTables && + // Don't insert type test assumes if we are forcing public + // visibility. + !CGM.AlwaysHasLTOVisibilityPublic(RD)) { + llvm_unreachable("NYI"); + } +} + +mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, + mlir::Type VTableTy, + const CXXRecordDecl *RD) { + Address VTablePtrSrc = builder.createElementBitCast(Loc, This, VTableTy); + auto VTable = builder.createLoad(Loc, VTablePtrSrc); + assert(!UnimplementedFeature::tbaa()); + + if (CGM.getCodeGenOpts().OptimizationLevel > 0 && + CGM.getCodeGenOpts().StrictVTablePointers) { + assert(!UnimplementedFeature::createInvariantGroup()); + } + + return VTable; +} + +Address CIRGenFunction::buildCXXMemberDataPointerAddress( + const Expr *E, Address base, mlir::Value memberPtr, + const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo) { + assert(!UnimplementedFeature::cxxABI()); + + auto op = builder.createGetIndirectMember(getLoc(E->getSourceRange()), + base.getPointer(), memberPtr); + + QualType memberType = memberPtrType->getPointeeType(); + CharUnits memberAlign = CGM.getNaturalTypeAlignment(memberType, baseInfo); + memberAlign = CGM.getDynamicOffsetAlignment( + base.getAlignment(), memberPtrType->getClass()->getAsCXXRecordDecl(), + memberAlign); + + return Address(op, convertTypeForMem(memberPtrType->getPointeeType()), + memberAlign); +} + +clang::CharUnits +CIRGenModule::getDynamicOffsetAlignment(clang::CharUnits actualBaseAlign, + const clang::CXXRecordDecl *baseDecl, + clang::CharUnits expectedTargetAlign) { + // If the base is an incomplete type (which is, alas, possible with + // member pointers), be pessimistic. + if (!baseDecl->isCompleteDefinition()) + return std::min(actualBaseAlign, expectedTargetAlign); + + auto &baseLayout = getASTContext().getASTRecordLayout(baseDecl); + CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment(); + + // If the class is properly aligned, assume the target offset is, too. + // + // This actually isn't necessarily the right thing to do --- if the + // class is a complete object, but it's only properly aligned for a + // base subobject, then the alignments of things relative to it are + // probably off as well. (Note that this requires the alignment of + // the target to be greater than the NV alignment of the derived + // class.) + // + // However, our approach to this kind of under-alignment can only + // ever be best effort; after all, we're never going to propagate + // alignments through variables or parameters. Note, in particular, + // that constructing a polymorphic type in an address that's less + // than pointer-aligned will generally trap in the constructor, + // unless we someday add some sort of attribute to change the + // assumed alignment of 'this'. So our goal here is pretty much + // just to allow the user to explicitly say that a pointer is + // under-aligned and then safely access its fields and vtables. + if (actualBaseAlign >= expectedBaseAlign) { + return expectedTargetAlign; + } + + // Otherwise, we might be offset by an arbitrary multiple of the + // actual alignment. The correct adjustment is to take the min of + // the two alignments. + return std::min(actualBaseAlign, expectedTargetAlign); +} + +/// Emit a loop to call a particular constructor for each of several members of +/// an array. +/// +/// \param ctor the constructor to call for each element +/// \param arrayType the type of the array to initialize +/// \param arrayBegin an arrayType* +/// \param zeroInitialize true if each element should be +/// zero-initialized before it is constructed +void CIRGenFunction::buildCXXAggrConstructorCall( + const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, + Address arrayBegin, const CXXConstructExpr *E, bool NewPointerIsChecked, + bool zeroInitialize) { + QualType elementType; + auto numElements = buildArrayLength(arrayType, elementType, arrayBegin); + buildCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, + NewPointerIsChecked, zeroInitialize); +} + +/// Emit a loop to call a particular constructor for each of several members of +/// an array. +/// +/// \param ctor the constructor to call for each element +/// \param numElements the number of elements in the array; +/// may be zero +/// \param arrayBase a T*, where T is the type constructed by ctor +/// \param zeroInitialize true if each element should be +/// zero-initialized before it is constructed +void CIRGenFunction::buildCXXAggrConstructorCall( + const CXXConstructorDecl *ctor, mlir::Value numElements, Address arrayBase, + const CXXConstructExpr *E, bool NewPointerIsChecked, bool zeroInitialize) { + // It's legal for numElements to be zero. This can happen both + // dynamically, because x can be zero in 'new A[x]', and statically, + // because of GCC extensions that permit zero-length arrays. There + // are probably legitimate places where we could assume that this + // doesn't happen, but it's not clear that it's worth it. + // llvm::BranchInst *zeroCheckBranch = nullptr; + + // Optimize for a constant count. + auto constantCount = + dyn_cast(numElements.getDefiningOp()); + if (constantCount) { + auto constIntAttr = constantCount.getValue().dyn_cast(); + // Just skip out if the constant count is zero. + if (constIntAttr && constIntAttr.getUInt() == 0) + return; + // Otherwise, emit the check. + } else { + llvm_unreachable("NYI"); + } + + auto arrayTy = arrayBase.getElementType().dyn_cast(); + assert(arrayTy && "expected array type"); + auto elementType = arrayTy.getEltType(); + auto ptrToElmType = builder.getPointerTo(elementType); + + // Tradional LLVM codegen emits a loop here. + // TODO(cir): Lower to a loop as part of LoweringPrepare. + + // The alignment of the base, adjusted by the size of a single element, + // provides a conservative estimate of the alignment of every element. + // (This assumes we never start tracking offsetted alignments.) + // + // Note that these are complete objects and so we don't need to + // use the non-virtual size or alignment. + QualType type = getContext().getTypeDeclType(ctor->getParent()); + CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement( + getContext().getTypeSizeInChars(type)); + + // Zero initialize the storage, if requested. + if (zeroInitialize) { + llvm_unreachable("NYI"); + } + + // C++ [class.temporary]p4: + // There are two contexts in which temporaries are destroyed at a different + // point than the end of the full-expression. The first context is when a + // default constructor is called to initialize an element of an array. + // If the constructor has one or more default arguments, the destruction of + // every temporary created in a default argument expression is sequenced + // before the construction of the next array element, if any. + { + RunCleanupsScope Scope(*this); + + // Evaluate the constructor and its arguments in a regular + // partial-destroy cleanup. + if (getLangOpts().Exceptions && + !ctor->getParent()->hasTrivialDestructor()) { + llvm_unreachable("NYI"); + } + + // Wmit the constructor call that will execute for every array element. + builder.create( + *currSrcLoc, arrayBase.getPointer(), + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); + Address curAddr = Address(arg, ptrToElmType, eltAlignment); + auto currAVS = AggValueSlot::forAddr( + curAddr, type.getQualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap, AggValueSlot::IsNotZeroed, + NewPointerIsChecked ? AggValueSlot::IsSanitizerChecked + : AggValueSlot::IsNotSanitizerChecked); + buildCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, + /*Delegating=*/false, currAVS, E); + builder.create(loc); + }); + } + + if (constantCount.use_empty()) + constantCount.erase(); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp new file mode 100644 index 000000000000..7dc94348368b --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -0,0 +1,524 @@ +//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains code dealing with the IR generation for cleanups +// and related information. +// +// A "cleanup" is a piece of code which needs to be executed whenever +// control transfers out of a particular scope. This can be +// conditionalized to occur only on exceptional control flow, only on +// normal control flow, or both. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/SaveAndRestore.h" + +#include "CIRGenCleanup.h" +#include "CIRGenFunction.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +//===----------------------------------------------------------------------===// +// CIRGenFunction cleanup related +//===----------------------------------------------------------------------===// + +/// Build a unconditional branch to the lexical scope cleanup block +/// or with the labeled blocked if already solved. +/// +/// Track on scope basis, goto's we need to fix later. +mlir::cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, + JumpDest Dest) { + // Remove this once we go for making sure unreachable code is + // well modeled (or not). + assert(builder.getInsertionBlock() && "not yet implemented"); + assert(!UnimplementedFeature::ehStack()); + + // Insert a branch: to the cleanup block (unsolved) or to the already + // materialized label. Keep track of unsolved goto's. + return builder.create(Loc, Dest.isValid() ? Dest.getBlock() + : ReturnBlock().getBlock()); +} + +/// Emits all the code to cause the given temporary to be cleaned up. +void CIRGenFunction::buildCXXTemporary(const CXXTemporary *Temporary, + QualType TempType, Address Ptr) { + pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, + /*useEHCleanup*/ true); +} + +Address CIRGenFunction::createCleanupActiveFlag() { llvm_unreachable("NYI"); } + +DominatingValue::saved_type +DominatingValue::saved_type::save(CIRGenFunction &CGF, RValue rv) { + llvm_unreachable("NYI"); +} + +/// Deactive a cleanup that was created in an active state. +void CIRGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, + mlir::Operation *dominatingIP) { + assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); + EHCleanupScope &Scope = cast(*EHStack.find(C)); + assert(Scope.isActive() && "double deactivation"); + + // If it's the top of the stack, just pop it, but do so only if it belongs + // to the current RunCleanupsScope. + if (C == EHStack.stable_begin() && + CurrentCleanupScopeDepth.strictlyEncloses(C)) { + // Per comment below, checking EHAsynch is not really necessary + // it's there to assure zero-impact w/o EHAsynch option + if (!Scope.isNormalCleanup() && getLangOpts().EHAsynch) { + llvm_unreachable("NYI"); + } else { + // From LLVM: If it's a normal cleanup, we need to pretend that the + // fallthrough is unreachable. + // CIR remarks: LLVM uses an empty insertion point to signal behavior + // change to other codegen paths (triggered by PopCleanupBlock). + // CIRGen doesn't do that yet, but let's mimic just in case. + mlir::OpBuilder::InsertionGuard guard(builder); + builder.clearInsertionPoint(); + PopCleanupBlock(); + } + return; + } + + llvm_unreachable("NYI"); +} + +void CIRGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { + // Set that as the active flag in the cleanup. + EHCleanupScope &cleanup = cast(*EHStack.begin()); + assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?"); + cleanup.setActiveFlag(ActiveFlag); + + if (cleanup.isNormalCleanup()) + cleanup.setTestFlagInNormalCleanup(); + if (cleanup.isEHCleanup()) + cleanup.setTestFlagInEHCleanup(); +} + +/// We don't need a normal entry block for the given cleanup. +/// Optimistic fixup branches can cause these blocks to come into +/// existence anyway; if so, destroy it. +/// +/// The validity of this transformation is very much specific to the +/// exact ways in which we form branches to cleanup entries. +static void destroyOptimisticNormalEntry(CIRGenFunction &CGF, + EHCleanupScope &scope) { + auto *entry = scope.getNormalBlock(); + if (!entry) + return; + + llvm_unreachable("NYI"); +} + +static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, + EHScopeStack::Cleanup::Flags flags, + Address ActiveFlag) { + // If there's an active flag, load it and skip the cleanup if it's + // false. + if (ActiveFlag.isValid()) { + llvm_unreachable("NYI"); + } + + // Ask the cleanup to emit itself. + Fn->Emit(CGF, flags); + assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); + + // Emit the continuation block if there was an active flag. + if (ActiveFlag.isValid()) { + llvm_unreachable("NYI"); + } +} + +/// Pops a cleanup block. If the block includes a normal cleanup, the +/// current insertion point is threaded through the cleanup, as are +/// any branch fixups on the cleanup. +void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { + assert(!EHStack.empty() && "cleanup stack is empty!"); + assert(isa(*EHStack.begin()) && "top not a cleanup!"); + [[maybe_unused]] EHCleanupScope &Scope = + cast(*EHStack.begin()); + assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); + + // Remember activation information. + bool IsActive = Scope.isActive(); + Address NormalActiveFlag = Scope.shouldTestFlagInNormalCleanup() + ? Scope.getActiveFlag() + : Address::invalid(); + [[maybe_unused]] Address EHActiveFlag = Scope.shouldTestFlagInEHCleanup() + ? Scope.getActiveFlag() + : Address::invalid(); + + // Check whether we need an EH cleanup. This is only true if we've + // generated a lazy EH cleanup block. + auto *EHEntry = Scope.getCachedEHDispatchBlock(); + assert(Scope.hasEHBranches() == (EHEntry != nullptr)); + bool RequiresEHCleanup = (EHEntry != nullptr); + EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); + + // Check the three conditions which might require a normal cleanup: + + // - whether there are branch fix-ups through this cleanup + unsigned FixupDepth = Scope.getFixupDepth(); + bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; + + // - whether there are branch-throughs or branch-afters + bool HasExistingBranches = Scope.hasBranches(); + + // - whether there's a fallthrough + auto *FallthroughSource = builder.getInsertionBlock(); + bool HasFallthrough = (FallthroughSource != nullptr && IsActive); + + // Branch-through fall-throughs leave the insertion point set to the + // end of the last cleanup, which points to the current scope. The + // rest of CIR gen doesn't need to worry about this; it only happens + // during the execution of PopCleanupBlocks(). + bool HasTerminator = FallthroughSource && + FallthroughSource->mightHaveTerminator() && + FallthroughSource->getTerminator(); + bool HasPrebranchedFallthrough = + HasTerminator && + !isa(FallthroughSource->getTerminator()); + + // If this is a normal cleanup, then having a prebranched + // fallthrough implies that the fallthrough source unconditionally + // jumps here. + assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || + (Scope.getNormalBlock() && + FallthroughSource->getTerminator()->getSuccessor(0) == + Scope.getNormalBlock())); + + bool RequiresNormalCleanup = false; + if (Scope.isNormalCleanup() && + (HasFixups || HasExistingBranches || HasFallthrough)) { + RequiresNormalCleanup = true; + } + + // If we have a prebranched fallthrough into an inactive normal + // cleanup, rewrite it so that it leads to the appropriate place. + if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { + llvm_unreachable("NYI"); + } + + // If we don't need the cleanup at all, we're done. + if (!RequiresNormalCleanup && !RequiresEHCleanup) { + destroyOptimisticNormalEntry(*this, Scope); + EHStack.popCleanup(); // safe because there are no fixups + assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups()); + return; + } + + // Copy the cleanup emission data out. This uses either a stack + // array or malloc'd memory, depending on the size, which is + // behavior that SmallVector would provide, if we could use it + // here. Unfortunately, if you ask for a SmallVector, the + // alignment isn't sufficient. + auto *CleanupSource = reinterpret_cast(Scope.getCleanupBuffer()); + alignas(EHScopeStack::ScopeStackAlignment) char + CleanupBufferStack[8 * sizeof(void *)]; + std::unique_ptr CleanupBufferHeap; + size_t CleanupSize = Scope.getCleanupSize(); + EHScopeStack::Cleanup *Fn; + + if (CleanupSize <= sizeof(CleanupBufferStack)) { + memcpy(CleanupBufferStack, CleanupSource, CleanupSize); + Fn = reinterpret_cast(CleanupBufferStack); + } else { + CleanupBufferHeap.reset(new char[CleanupSize]); + memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize); + Fn = reinterpret_cast(CleanupBufferHeap.get()); + } + + EHScopeStack::Cleanup::Flags cleanupFlags; + if (Scope.isNormalCleanup()) + cleanupFlags.setIsNormalCleanupKind(); + if (Scope.isEHCleanup()) + cleanupFlags.setIsEHCleanupKind(); + + // Under -EHa, invoke seh.scope.end() to mark scope end before dtor + bool IsEHa = getLangOpts().EHAsynch && !Scope.isLifetimeMarker(); + // const EHPersonality &Personality = EHPersonality::get(*this); + if (!RequiresNormalCleanup) { + llvm_unreachable("NYI"); + } else { + // If we have a fallthrough and no other need for the cleanup, + // emit it directly. + if (HasFallthrough && !HasPrebranchedFallthrough && !HasFixups && + !HasExistingBranches) { + + // mark SEH scope end for fall-through flow + if (IsEHa) { + llvm_unreachable("NYI"); + } + + destroyOptimisticNormalEntry(*this, Scope); + EHStack.popCleanup(); + buildCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); + + // Otherwise, the best approach is to thread everything through + // the cleanup block and then try to clean up after ourselves. + } else { + llvm_unreachable("NYI"); + } + } + + assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); + + // Emit the EH cleanup if required. + if (RequiresEHCleanup) { + // FIXME(cir): should we guard insertion point here? + auto *NextAction = getEHDispatchBlock(EHParent); + (void)NextAction; + + // Push a terminate scope or cleanupendpad scope around the potentially + // throwing cleanups. For funclet EH personalities, the cleanupendpad models + // program termination when cleanups throw. + bool PushedTerminate = false; + SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad); + mlir::Operation *CPI = nullptr; + + const EHPersonality &Personality = EHPersonality::get(*this); + if (Personality.usesFuncletPads()) { + llvm_unreachable("NYI"); + } + + // Non-MSVC personalities need to terminate when an EH cleanup throws. + if (!Personality.isMSVCPersonality()) { + EHStack.pushTerminate(); + PushedTerminate = true; + } else if (IsEHa && getInvokeDest()) { + llvm_unreachable("NYI"); + } + + // We only actually emit the cleanup code if the cleanup is either + // active or was used before it was deactivated. + if (EHActiveFlag.isValid() || IsActive) { + cleanupFlags.setIsForEHCleanup(); + buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); + } + + // In LLVM traditional codegen, here's where it branches off to + // NextAction. + if (CPI) + llvm_unreachable("NYI"); + + // Leave the terminate scope. + if (PushedTerminate) + EHStack.popTerminate(); + + // FIXME(cir): LLVM traditional codegen tries to simplify some of the + // codegen here. Once we are further down with EH support revisit whether we + // need to this during lowering. + assert(!UnimplementedFeature::simplifyCleanupEntry()); + } +} + +/// Pops cleanup blocks until the given savepoint is reached. +void CIRGenFunction::PopCleanupBlocks( + EHScopeStack::stable_iterator Old, + std::initializer_list ValuesToReload) { + assert(Old.isValid()); + + bool HadBranches = false; + while (EHStack.stable_begin() != Old) { + EHCleanupScope &Scope = cast(*EHStack.begin()); + HadBranches |= Scope.hasBranches(); + + // As long as Old strictly encloses the scope's enclosing normal + // cleanup, we're going to emit another normal cleanup which + // fallthrough can propagate through. + bool FallThroughIsBranchThrough = + Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); + + PopCleanupBlock(FallThroughIsBranchThrough); + } + + // If we didn't have any branches, the insertion point before cleanups must + // dominate the current insertion point and we don't need to reload any + // values. + if (!HadBranches) + return; + + llvm_unreachable("NYI"); +} + +/// Pops cleanup blocks until the given savepoint is reached, then add the +/// cleanups from the given savepoint in the lifetime-extended cleanups stack. +void CIRGenFunction::PopCleanupBlocks( + EHScopeStack::stable_iterator Old, size_t OldLifetimeExtendedSize, + std::initializer_list ValuesToReload) { + PopCleanupBlocks(Old, ValuesToReload); + + // Move our deferred cleanups onto the EH stack. + for (size_t I = OldLifetimeExtendedSize, + E = LifetimeExtendedCleanupStack.size(); + I != E; + /**/) { + // Alignment should be guaranteed by the vptrs in the individual cleanups. + assert((I % alignof(LifetimeExtendedCleanupHeader) == 0) && + "misaligned cleanup stack entry"); + + LifetimeExtendedCleanupHeader &Header = + reinterpret_cast( + LifetimeExtendedCleanupStack[I]); + I += sizeof(Header); + + EHStack.pushCopyOfCleanup( + Header.getKind(), &LifetimeExtendedCleanupStack[I], Header.getSize()); + I += Header.getSize(); + + if (Header.isConditional()) { + Address ActiveFlag = + reinterpret_cast
(LifetimeExtendedCleanupStack[I]); + initFullExprCleanupWithFlag(ActiveFlag); + I += sizeof(ActiveFlag); + } + } + LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize); +} + +//===----------------------------------------------------------------------===// +// EHScopeStack +//===----------------------------------------------------------------------===// + +void EHScopeStack::Cleanup::anchor() {} + +/// Push an entry of the given size onto this protected-scope stack. +char *EHScopeStack::allocate(size_t Size) { + Size = llvm::alignTo(Size, ScopeStackAlignment); + if (!StartOfBuffer) { + unsigned Capacity = 1024; + while (Capacity < Size) + Capacity *= 2; + StartOfBuffer = new char[Capacity]; + StartOfData = EndOfBuffer = StartOfBuffer + Capacity; + } else if (static_cast(StartOfData - StartOfBuffer) < Size) { + unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; + unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); + + unsigned NewCapacity = CurrentCapacity; + do { + NewCapacity *= 2; + } while (NewCapacity < UsedCapacity + Size); + + char *NewStartOfBuffer = new char[NewCapacity]; + char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; + char *NewStartOfData = NewEndOfBuffer - UsedCapacity; + memcpy(NewStartOfData, StartOfData, UsedCapacity); + delete[] StartOfBuffer; + StartOfBuffer = NewStartOfBuffer; + EndOfBuffer = NewEndOfBuffer; + StartOfData = NewStartOfData; + } + + assert(StartOfBuffer + Size <= StartOfData); + StartOfData -= Size; + return StartOfData; +} + +void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { + char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); + bool IsNormalCleanup = Kind & NormalCleanup; + bool IsEHCleanup = Kind & EHCleanup; + bool IsLifetimeMarker = Kind & LifetimeMarker; + + // Per C++ [except.terminate], it is implementation-defined whether none, + // some, or all cleanups are called before std::terminate. Thus, when + // terminate is the current EH scope, we may skip adding any EH cleanup + // scopes. + if (InnermostEHScope != stable_end() && + find(InnermostEHScope)->getKind() == EHScope::Terminate) + IsEHCleanup = false; + + EHCleanupScope *Scope = new (Buffer) + EHCleanupScope(IsNormalCleanup, IsEHCleanup, Size, BranchFixups.size(), + InnermostNormalCleanup, InnermostEHScope); + if (IsNormalCleanup) + InnermostNormalCleanup = stable_begin(); + if (IsEHCleanup) + InnermostEHScope = stable_begin(); + if (IsLifetimeMarker) + llvm_unreachable("NYI"); + + // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup + if (CGF->getLangOpts().EHAsynch && IsEHCleanup && !IsLifetimeMarker && + CGF->getTarget().getCXXABI().isMicrosoft()) + llvm_unreachable("NYI"); + + return Scope->getCleanupBuffer(); +} + +void EHScopeStack::popCleanup() { + assert(!empty() && "popping exception stack when not empty"); + + assert(isa(*begin())); + EHCleanupScope &Cleanup = cast(*begin()); + InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); + InnermostEHScope = Cleanup.getEnclosingEHScope(); + deallocate(Cleanup.getAllocatedSize()); + + // Destroy the cleanup. + Cleanup.Destroy(); + + // Check whether we can shrink the branch-fixups stack. + if (!BranchFixups.empty()) { + // If we no longer have any normal cleanups, all the fixups are + // complete. + if (!hasNormalCleanups()) + BranchFixups.clear(); + + // Otherwise we can still trim out unnecessary nulls. + else + popNullFixups(); + } +} + +void EHScopeStack::deallocate(size_t Size) { + StartOfData += llvm::alignTo(Size, ScopeStackAlignment); +} + +/// Remove any 'null' fixups on the stack. However, we can't pop more +/// fixups than the fixup depth on the innermost normal cleanup, or +/// else fixups that we try to add to that cleanup will end up in the +/// wrong place. We *could* try to shrink fixup depths, but that's +/// actually a lot of work for little benefit. +void EHScopeStack::popNullFixups() { + // We expect this to only be called when there's still an innermost + // normal cleanup; otherwise there really shouldn't be any fixups. + llvm_unreachable("NYI"); +} + +bool EHScopeStack::requiresLandingPad() const { + for (stable_iterator si = getInnermostEHScope(); si != stable_end();) { + // Skip lifetime markers. + if (auto *cleanup = dyn_cast(&*find(si))) + if (cleanup->isLifetimeMarker()) { + si = cleanup->getEnclosingEHScope(); + continue; + } + return true; + } + + return false; +} + +EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { + char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); + EHCatchScope *scope = + new (buffer) EHCatchScope(numHandlers, InnermostEHScope); + InnermostEHScope = stable_begin(); + return scope; +} + +void EHScopeStack::pushTerminate() { + char *Buffer = allocate(EHTerminateScope::getSize()); + new (Buffer) EHTerminateScope(InnermostEHScope); + InnermostEHScope = stable_begin(); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h new file mode 100644 index 000000000000..4627b60d1c63 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -0,0 +1,620 @@ +//===-- CIRGenCleanup.h - Classes for cleanups CIR generation ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes support the generation of CIR for cleanups, initially based +// on LLVM IR cleanup handling, but ought to change as CIR evolves. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CGCLEANUP_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CGCLEANUP_H + +#include "Address.h" +#include "EHScopeStack.h" +#include "mlir/IR/Value.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" + +namespace clang { +class FunctionDecl; +} + +namespace cir { +class CIRGenModule; +class CIRGenFunction; + +/// The MS C++ ABI needs a pointer to RTTI data plus some flags to describe the +/// type of a catch handler, so we use this wrapper. +struct CatchTypeInfo { + mlir::TypedAttr RTTI; + unsigned Flags; +}; + +/// A protected scope for zero-cost EH handling. +class EHScope { + mlir::Operation *CachedLandingPad; + mlir::Block *CachedEHDispatchBlock; + + EHScopeStack::stable_iterator EnclosingEHScope; + + class CommonBitFields { + friend class EHScope; + unsigned Kind : 3; + }; + enum { NumCommonBits = 3 }; + +protected: + class CatchBitFields { + friend class EHCatchScope; + unsigned : NumCommonBits; + + unsigned NumHandlers : 32 - NumCommonBits; + }; + + class CleanupBitFields { + friend class EHCleanupScope; + unsigned : NumCommonBits; + + /// Whether this cleanup needs to be run along normal edges. + unsigned IsNormalCleanup : 1; + + /// Whether this cleanup needs to be run along exception edges. + unsigned IsEHCleanup : 1; + + /// Whether this cleanup is currently active. + unsigned IsActive : 1; + + /// Whether this cleanup is a lifetime marker + unsigned IsLifetimeMarker : 1; + + /// Whether the normal cleanup should test the activation flag. + unsigned TestFlagInNormalCleanup : 1; + + /// Whether the EH cleanup should test the activation flag. + unsigned TestFlagInEHCleanup : 1; + + /// The amount of extra storage needed by the Cleanup. + /// Always a multiple of the scope-stack alignment. + unsigned CleanupSize : 12; + }; + + class FilterBitFields { + friend class EHFilterScope; + unsigned : NumCommonBits; + + unsigned NumFilters : 32 - NumCommonBits; + }; + + union { + CommonBitFields CommonBits; + CatchBitFields CatchBits; + CleanupBitFields CleanupBits; + FilterBitFields FilterBits; + }; + +public: + enum Kind { Cleanup, Catch, Terminate, Filter }; + + EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope) + : CachedLandingPad(nullptr), CachedEHDispatchBlock(nullptr), + EnclosingEHScope(enclosingEHScope) { + CommonBits.Kind = kind; + } + + Kind getKind() const { return static_cast(CommonBits.Kind); } + + mlir::Operation *getCachedLandingPad() const { return CachedLandingPad; } + + void setCachedLandingPad(mlir::Operation *op) { CachedLandingPad = op; } + + mlir::Block *getCachedEHDispatchBlock() const { + return CachedEHDispatchBlock; + } + + void setCachedEHDispatchBlock(mlir::Block *block) { + CachedEHDispatchBlock = block; + } + + bool hasEHBranches() const { + // Traditional LLVM codegen also checks for `!block->use_empty()`, but + // in CIRGen the block content is not important, just used as a way to + // signal `hasEHBranches`. + if (mlir::Block *block = getCachedEHDispatchBlock()) + return true; + return false; + } + + EHScopeStack::stable_iterator getEnclosingEHScope() const { + return EnclosingEHScope; + } +}; + +/// A scope which attempts to handle some, possibly all, types of +/// exceptions. +/// +/// Objective C \@finally blocks are represented using a cleanup scope +/// after the catch scope. +class EHCatchScope : public EHScope { + // In effect, we have a flexible array member + // Handler Handlers[0]; + // But that's only standard in C99, not C++, so we have to do + // annoying pointer arithmetic instead. + +public: + struct Handler { + /// A type info value, or null (C++ null, not an LLVM null pointer) + /// for a catch-all. + CatchTypeInfo Type; + + /// The catch handler for this type. + mlir::Block *Block; + + bool isCatchAll() const { return Type.RTTI == nullptr; } + }; + +private: + friend class EHScopeStack; + + Handler *getHandlers() { return reinterpret_cast(this + 1); } + + const Handler *getHandlers() const { + return reinterpret_cast(this + 1); + } + +public: + static size_t getSizeForNumHandlers(unsigned N) { + return sizeof(EHCatchScope) + N * sizeof(Handler); + } + + EHCatchScope(unsigned numHandlers, + EHScopeStack::stable_iterator enclosingEHScope) + : EHScope(Catch, enclosingEHScope) { + CatchBits.NumHandlers = numHandlers; + assert(CatchBits.NumHandlers == numHandlers && "NumHandlers overflow?"); + } + + unsigned getNumHandlers() const { return CatchBits.NumHandlers; } + + void setCatchAllHandler(unsigned I, mlir::Block *Block) { + setHandler(I, CatchTypeInfo{nullptr, 0}, Block); + } + + void setHandler(unsigned I, mlir::TypedAttr Type, mlir::Block *Block) { + assert(I < getNumHandlers()); + getHandlers()[I].Type = CatchTypeInfo{Type, 0}; + getHandlers()[I].Block = Block; + } + + void setHandler(unsigned I, CatchTypeInfo Type, mlir::Block *Block) { + assert(I < getNumHandlers()); + getHandlers()[I].Type = Type; + getHandlers()[I].Block = Block; + } + + const Handler &getHandler(unsigned I) const { + assert(I < getNumHandlers()); + return getHandlers()[I]; + } + + // Clear all handler blocks. + // FIXME: it's better to always call clearHandlerBlocks in DTOR and have a + // 'takeHandler' or some such function which removes ownership from the + // EHCatchScope object if the handlers should live longer than EHCatchScope. + void clearHandlerBlocks() { + for (unsigned I = 0, N = getNumHandlers(); I != N; ++I) + delete getHandler(I).Block; + } + + typedef const Handler *iterator; + iterator begin() const { return getHandlers(); } + iterator end() const { return getHandlers() + getNumHandlers(); } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Catch; + } +}; + +/// A cleanup scope which generates the cleanup blocks lazily. +class alignas(8) EHCleanupScope : public EHScope { + /// The nearest normal cleanup scope enclosing this one. + EHScopeStack::stable_iterator EnclosingNormal; + + /// The nearest EH scope enclosing this one. + EHScopeStack::stable_iterator EnclosingEH; + + /// The dual entry/exit block along the normal edge. This is lazily + /// created if needed before the cleanup is popped. + mlir::Block *NormalBlock; + + /// An optional i1 variable indicating whether this cleanup has been + /// activated yet. + Address ActiveFlag; + + /// Extra information required for cleanups that have resolved + /// branches through them. This has to be allocated on the side + /// because everything on the cleanup stack has be trivially + /// movable. + struct ExtInfo { + /// The destinations of normal branch-afters and branch-throughs. + llvm::SmallPtrSet Branches; + + /// Normal branch-afters. + llvm::SmallVector, 4> BranchAfters; + }; + mutable struct ExtInfo *ExtInfo; + + /// The number of fixups required by enclosing scopes (not including + /// this one). If this is the top cleanup scope, all the fixups + /// from this index onwards belong to this scope. + unsigned FixupDepth; + + struct ExtInfo &getExtInfo() { + if (!ExtInfo) + ExtInfo = new struct ExtInfo(); + return *ExtInfo; + } + + const struct ExtInfo &getExtInfo() const { + if (!ExtInfo) + ExtInfo = new struct ExtInfo(); + return *ExtInfo; + } + +public: + /// Gets the size required for a lazy cleanup scope with the given + /// cleanup-data requirements. + static size_t getSizeForCleanupSize(size_t Size) { + return sizeof(EHCleanupScope) + Size; + } + + size_t getAllocatedSize() const { + return sizeof(EHCleanupScope) + CleanupBits.CleanupSize; + } + + EHCleanupScope(bool isNormal, bool isEH, unsigned cleanupSize, + unsigned fixupDepth, + EHScopeStack::stable_iterator enclosingNormal, + EHScopeStack::stable_iterator enclosingEH) + : EHScope(EHScope::Cleanup, enclosingEH), + EnclosingNormal(enclosingNormal), NormalBlock(nullptr), + ActiveFlag(Address::invalid()), ExtInfo(nullptr), + FixupDepth(fixupDepth) { + CleanupBits.IsNormalCleanup = isNormal; + CleanupBits.IsEHCleanup = isEH; + CleanupBits.IsActive = true; + CleanupBits.IsLifetimeMarker = false; + CleanupBits.TestFlagInNormalCleanup = false; + CleanupBits.TestFlagInEHCleanup = false; + CleanupBits.CleanupSize = cleanupSize; + + assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow"); + } + + void Destroy() { delete ExtInfo; } + // Objects of EHCleanupScope are not destructed. Use Destroy(). + ~EHCleanupScope() = delete; + + bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; } + mlir::Block *getNormalBlock() const { return NormalBlock; } + void setNormalBlock(mlir::Block *BB) { NormalBlock = BB; } + + bool isEHCleanup() const { return CleanupBits.IsEHCleanup; } + + bool isActive() const { return CleanupBits.IsActive; } + void setActive(bool A) { CleanupBits.IsActive = A; } + + bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; } + void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; } + + bool hasActiveFlag() const { return ActiveFlag.isValid(); } + Address getActiveFlag() const { return ActiveFlag; } + void setActiveFlag(Address Var) { + assert(Var.getAlignment().isOne()); + ActiveFlag = Var; + } + + void setTestFlagInNormalCleanup() { + CleanupBits.TestFlagInNormalCleanup = true; + } + bool shouldTestFlagInNormalCleanup() const { + return CleanupBits.TestFlagInNormalCleanup; + } + + void setTestFlagInEHCleanup() { CleanupBits.TestFlagInEHCleanup = true; } + bool shouldTestFlagInEHCleanup() const { + return CleanupBits.TestFlagInEHCleanup; + } + + unsigned getFixupDepth() const { return FixupDepth; } + EHScopeStack::stable_iterator getEnclosingNormalCleanup() const { + return EnclosingNormal; + } + + size_t getCleanupSize() const { return CleanupBits.CleanupSize; } + void *getCleanupBuffer() { return this + 1; } + + EHScopeStack::Cleanup *getCleanup() { + return reinterpret_cast(getCleanupBuffer()); + } + + /// True if this cleanup scope has any branch-afters or branch-throughs. + bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); } + + /// Add a branch-after to this cleanup scope. A branch-after is a + /// branch from a point protected by this (normal) cleanup to a + /// point in the normal cleanup scope immediately containing it. + /// For example, + /// for (;;) { A a; break; } + /// contains a branch-after. + /// + /// Branch-afters each have their own destination out of the + /// cleanup, guaranteed distinct from anything else threaded through + /// it. Therefore branch-afters usually force a switch after the + /// cleanup. + void addBranchAfter(mlir::Value Index, mlir::Block *Block) { + struct ExtInfo &ExtInfo = getExtInfo(); + if (ExtInfo.Branches.insert(Block).second) + ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index)); + } + + /// Return the number of unique branch-afters on this scope. + unsigned getNumBranchAfters() const { + return ExtInfo ? ExtInfo->BranchAfters.size() : 0; + } + + mlir::Block *getBranchAfterBlock(unsigned I) const { + assert(I < getNumBranchAfters()); + return ExtInfo->BranchAfters[I].first; + } + + mlir::Value getBranchAfterIndex(unsigned I) const { + assert(I < getNumBranchAfters()); + return ExtInfo->BranchAfters[I].second; + } + + /// Add a branch-through to this cleanup scope. A branch-through is + /// a branch from a scope protected by this (normal) cleanup to an + /// enclosing scope other than the immediately-enclosing normal + /// cleanup scope. + /// + /// In the following example, the branch through B's scope is a + /// branch-through, while the branch through A's scope is a + /// branch-after: + /// for (;;) { A a; B b; break; } + /// + /// All branch-throughs have a common destination out of the + /// cleanup, one possibly shared with the fall-through. Therefore + /// branch-throughs usually don't force a switch after the cleanup. + /// + /// \return true if the branch-through was new to this scope + bool addBranchThrough(mlir::Block *Block) { + return getExtInfo().Branches.insert(Block).second; + } + + /// Determines if this cleanup scope has any branch throughs. + bool hasBranchThroughs() const { + if (!ExtInfo) + return false; + return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size()); + } + + static bool classof(const EHScope *Scope) { + return (Scope->getKind() == Cleanup); + } +}; +// NOTE: there's a bunch of different data classes tacked on after an +// EHCleanupScope. It is asserted (in EHScopeStack::pushCleanup*) that +// they don't require greater alignment than ScopeStackAlignment. So, +// EHCleanupScope ought to have alignment equal to that -- not more +// (would be misaligned by the stack allocator), and not less (would +// break the appended classes). +static_assert(alignof(EHCleanupScope) == EHScopeStack::ScopeStackAlignment, + "EHCleanupScope expected alignment"); + +/// An exceptions scope which filters exceptions thrown through it. +/// Only exceptions matching the filter types will be permitted to be +/// thrown. +/// +/// This is used to implement C++ exception specifications. +class EHFilterScope : public EHScope { + // Essentially ends in a flexible array member: + // mlir::Value FilterTypes[0]; + + mlir::Value *getFilters() { + return reinterpret_cast(this + 1); + } + + mlir::Value const *getFilters() const { + return reinterpret_cast(this + 1); + } + +public: + EHFilterScope(unsigned numFilters) + : EHScope(Filter, EHScopeStack::stable_end()) { + FilterBits.NumFilters = numFilters; + assert(FilterBits.NumFilters == numFilters && "NumFilters overflow"); + } + + static size_t getSizeForNumFilters(unsigned numFilters) { + return sizeof(EHFilterScope) + numFilters * sizeof(mlir::Value); + } + + unsigned getNumFilters() const { return FilterBits.NumFilters; } + + void setFilter(unsigned i, mlir::Value filterValue) { + assert(i < getNumFilters()); + getFilters()[i] = filterValue; + } + + mlir::Value getFilter(unsigned i) const { + assert(i < getNumFilters()); + return getFilters()[i]; + } + + static bool classof(const EHScope *scope) { + return scope->getKind() == Filter; + } +}; + +/// An exceptions scope which calls std::terminate if any exception +/// reaches it. +class EHTerminateScope : public EHScope { +public: + EHTerminateScope(EHScopeStack::stable_iterator enclosingEHScope) + : EHScope(Terminate, enclosingEHScope) {} + static size_t getSize() { return sizeof(EHTerminateScope); } + + static bool classof(const EHScope *scope) { + return scope->getKind() == Terminate; + } +}; + +/// A non-stable pointer into the scope stack. +class EHScopeStack::iterator { + char *Ptr; + + friend class EHScopeStack; + explicit iterator(char *Ptr) : Ptr(Ptr) {} + +public: + iterator() : Ptr(nullptr) {} + + EHScope *get() const { return reinterpret_cast(Ptr); } + + EHScope *operator->() const { return get(); } + EHScope &operator*() const { return *get(); } + + iterator &operator++() { + size_t Size; + switch (get()->getKind()) { + case EHScope::Catch: + Size = EHCatchScope::getSizeForNumHandlers( + static_cast(get())->getNumHandlers()); + break; + + case EHScope::Filter: + Size = EHFilterScope::getSizeForNumFilters( + static_cast(get())->getNumFilters()); + break; + + case EHScope::Cleanup: + Size = static_cast(get())->getAllocatedSize(); + break; + + case EHScope::Terminate: + Size = EHTerminateScope::getSize(); + break; + } + Ptr += llvm::alignTo(Size, ScopeStackAlignment); + return *this; + } + + iterator next() { + iterator copy = *this; + ++copy; + return copy; + } + + iterator operator++(int) { + iterator copy = *this; + operator++(); + return copy; + } + + bool encloses(iterator other) const { return Ptr >= other.Ptr; } + bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; } + + bool operator==(iterator other) const { return Ptr == other.Ptr; } + bool operator!=(iterator other) const { return Ptr != other.Ptr; } +}; + +inline EHScopeStack::iterator EHScopeStack::begin() const { + return iterator(StartOfData); +} + +inline EHScopeStack::iterator EHScopeStack::end() const { + return iterator(EndOfBuffer); +} + +inline void EHScopeStack::popCatch() { + assert(!empty() && "popping exception stack when not empty"); + + EHCatchScope &scope = llvm::cast(*begin()); + InnermostEHScope = scope.getEnclosingEHScope(); + deallocate(EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers())); +} + +inline void EHScopeStack::popTerminate() { + assert(!empty() && "popping exception stack when not empty"); + + EHTerminateScope &scope = llvm::cast(*begin()); + InnermostEHScope = scope.getEnclosingEHScope(); + deallocate(EHTerminateScope::getSize()); +} + +inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const { + assert(sp.isValid() && "finding invalid savepoint"); + assert(sp.Size <= stable_begin().Size && "finding savepoint after pop"); + return iterator(EndOfBuffer - sp.Size); +} + +inline EHScopeStack::stable_iterator +EHScopeStack::stabilize(iterator ir) const { + assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer); + return stable_iterator(EndOfBuffer - ir.Ptr); +} + +/// The exceptions personality for a function. +struct EHPersonality { + const char *PersonalityFn; + + // If this is non-null, this personality requires a non-standard + // function for rethrowing an exception after a catchall cleanup. + // This function must have prototype void(void*). + const char *CatchallRethrowFn; + + static const EHPersonality &get(CIRGenModule &CGM, + const clang::FunctionDecl *FD); + static const EHPersonality &get(CIRGenFunction &CGF); + + static const EHPersonality GNU_C; + static const EHPersonality GNU_C_SJLJ; + static const EHPersonality GNU_C_SEH; + static const EHPersonality GNU_ObjC; + static const EHPersonality GNU_ObjC_SJLJ; + static const EHPersonality GNU_ObjC_SEH; + static const EHPersonality GNUstep_ObjC; + static const EHPersonality GNU_ObjCXX; + static const EHPersonality NeXT_ObjC; + static const EHPersonality GNU_CPlusPlus; + static const EHPersonality GNU_CPlusPlus_SJLJ; + static const EHPersonality GNU_CPlusPlus_SEH; + static const EHPersonality MSVC_except_handler; + static const EHPersonality MSVC_C_specific_handler; + static const EHPersonality MSVC_CxxFrameHandler3; + static const EHPersonality GNU_Wasm_CPlusPlus; + static const EHPersonality XL_CPlusPlus; + + /// Does this personality use landingpads or the family of pad instructions + /// designed to form funclets? + bool usesFuncletPads() const { + return isMSVCPersonality() || isWasmPersonality(); + } + + bool isMSVCPersonality() const { + return this == &MSVC_except_handler || this == &MSVC_C_specific_handler || + this == &MSVC_CxxFrameHandler3; + } + + bool isWasmPersonality() const { return this == &GNU_Wasm_CPlusPlus; } + + bool isMSVCXXPersonality() const { return this == &MSVC_CxxFrameHandler3; } +}; +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp new file mode 100644 index 000000000000..2a23fd1a73ed --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -0,0 +1,563 @@ +//===----- CGCoroutine.cpp - Emit CIR Code for C++ coroutines -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation of coroutines. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" +#include "clang/AST/StmtCXX.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/ScopeExit.h" + +using namespace clang; +using namespace cir; + +struct cir::CGCoroData { + // What is the current await expression kind and how many + // await/yield expressions were encountered so far. + // These are used to generate pretty labels for await expressions in LLVM IR. + mlir::cir::AwaitKind CurrentAwaitKind = mlir::cir::AwaitKind::init; + + // Stores the __builtin_coro_id emitted in the function so that we can supply + // it as the first argument to other builtins. + mlir::cir::CallOp CoroId = nullptr; + + // Stores the result of __builtin_coro_begin call. + mlir::Value CoroBegin = nullptr; + + // Stores the insertion point for final suspend, this happens after the + // promise call (return_xxx promise member) but before a cir.br to the return + // block. + mlir::Operation *FinalSuspendInsPoint; + + // How many co_return statements are in the coroutine. Used to decide whether + // we need to add co_return; equivalent at the end of the user authored body. + unsigned CoreturnCount = 0; + + // The promise type's 'unhandled_exception' handler, if it defines one. + Stmt *ExceptionHandler = nullptr; +}; + +// Defining these here allows to keep CGCoroData private to this file. +CIRGenFunction::CGCoroInfo::CGCoroInfo() {} +CIRGenFunction::CGCoroInfo::~CGCoroInfo() {} + +static void createCoroData(CIRGenFunction &CGF, + CIRGenFunction::CGCoroInfo &CurCoro, + mlir::cir::CallOp CoroId) { + if (CurCoro.Data) { + llvm_unreachable("EmitCoroutineBodyStatement called twice?"); + + return; + } + + CurCoro.Data = std::unique_ptr(new CGCoroData); + CurCoro.Data->CoroId = CoroId; +} + +namespace { +// FIXME: both GetParamRef and ParamReferenceReplacerRAII are good template +// candidates to be shared among LLVM / CIR codegen. + +// Hunts for the parameter reference in the parameter copy/move declaration. +struct GetParamRef : public StmtVisitor { +public: + DeclRefExpr *Expr = nullptr; + GetParamRef() {} + void VisitDeclRefExpr(DeclRefExpr *E) { + assert(Expr == nullptr && "multilple declref in param move"); + Expr = E; + } + void VisitStmt(Stmt *S) { + for (auto *C : S->children()) { + if (C) + Visit(C); + } + } +}; + +// This class replaces references to parameters to their copies by changing +// the addresses in CGF.LocalDeclMap and restoring back the original values in +// its destructor. +struct ParamReferenceReplacerRAII { + CIRGenFunction::DeclMapTy SavedLocals; + CIRGenFunction::DeclMapTy &LocalDeclMap; + + ParamReferenceReplacerRAII(CIRGenFunction::DeclMapTy &LocalDeclMap) + : LocalDeclMap(LocalDeclMap) {} + + void addCopy(DeclStmt const *PM) { + // Figure out what param it refers to. + + assert(PM->isSingleDecl()); + VarDecl const *VD = static_cast(PM->getSingleDecl()); + Expr const *InitExpr = VD->getInit(); + GetParamRef Visitor; + Visitor.Visit(const_cast(InitExpr)); + assert(Visitor.Expr); + DeclRefExpr *DREOrig = Visitor.Expr; + auto *PD = DREOrig->getDecl(); + + auto it = LocalDeclMap.find(PD); + assert(it != LocalDeclMap.end() && "parameter is not found"); + SavedLocals.insert({PD, it->second}); + + auto copyIt = LocalDeclMap.find(VD); + assert(copyIt != LocalDeclMap.end() && "parameter copy is not found"); + it->second = copyIt->getSecond(); + } + + ~ParamReferenceReplacerRAII() { + for (auto &&SavedLocal : SavedLocals) { + LocalDeclMap.insert({SavedLocal.first, SavedLocal.second}); + } + } +}; +} // namespace + +// Emit coroutine intrinsic and patch up arguments of the token type. +RValue CIRGenFunction::buildCoroutineIntrinsic(const CallExpr *E, + unsigned int IID) { + llvm_unreachable("NYI"); +} + +RValue CIRGenFunction::buildCoroutineFrame() { + if (CurCoro.Data && CurCoro.Data->CoroBegin) { + return RValue::get(CurCoro.Data->CoroBegin); + } + llvm_unreachable("NYI"); +} + +static mlir::LogicalResult +buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, + Stmt *Body, + const CIRGenFunction::LexicalScope *currLexScope) { + if (CGF.buildStmt(Body, /*useCurrentScope=*/true).failed()) + return mlir::failure(); + // Note that LLVM checks CanFallthrough by looking into the availability + // of the insert block which is kinda brittle and unintuitive, seems to be + // related with how landing pads are handled. + // + // CIRGen handles this by checking pre-existing co_returns in the current + // scope instead. Are we missing anything? + // + // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock(); + const bool CanFallthrough = !currLexScope->hasCoreturn(); + if (CanFallthrough) + if (Stmt *OnFallthrough = S.getFallthroughHandler()) + if (CGF.buildStmt(OnFallthrough, /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + return mlir::success(); +} + +mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { + auto int32Ty = builder.getUInt32Ty(); + + auto &TI = CGM.getASTContext().getTargetInfo(); + unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth(); + + mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroId); + + mlir::cir::FuncOp fnOp; + if (!builtin) { + fnOp = CGM.createCIRFunction( + loc, CGM.builtinCoroId, + mlir::cir::FuncType::get({int32Ty, VoidPtrTy, VoidPtrTy, VoidPtrTy}, + int32Ty), + /*FD=*/nullptr); + assert(fnOp && "should always succeed"); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + } else + fnOp = cast(builtin); + + return builder.create( + loc, fnOp, + mlir::ValueRange{builder.getUInt32(NewAlign, loc), nullPtr, nullPtr, + nullPtr}); +} + +mlir::cir::CallOp +CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { + auto boolTy = builder.getBoolTy(); + auto int32Ty = builder.getUInt32Ty(); + + mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroAlloc); + + mlir::cir::FuncOp fnOp; + if (!builtin) { + fnOp = CGM.createCIRFunction(loc, CGM.builtinCoroAlloc, + mlir::cir::FuncType::get({int32Ty}, boolTy), + /*FD=*/nullptr); + assert(fnOp && "should always succeed"); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + } else + fnOp = cast(builtin); + + return builder.create( + loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult(0)}); +} + +mlir::cir::CallOp +CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr) { + auto int32Ty = builder.getUInt32Ty(); + mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroBegin); + + mlir::cir::FuncOp fnOp; + if (!builtin) { + fnOp = CGM.createCIRFunction( + loc, CGM.builtinCoroBegin, + mlir::cir::FuncType::get({int32Ty, VoidPtrTy}, VoidPtrTy), + /*FD=*/nullptr); + assert(fnOp && "should always succeed"); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + } else + fnOp = cast(builtin); + + return builder.create( + loc, fnOp, + mlir::ValueRange{CurCoro.Data->CoroId.getResult(0), coroframeAddr}); +} + +mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { + auto boolTy = builder.getBoolTy(); + mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroEnd); + + mlir::cir::FuncOp fnOp; + if (!builtin) { + fnOp = CGM.createCIRFunction( + loc, CGM.builtinCoroEnd, + mlir::cir::FuncType::get({VoidPtrTy, boolTy}, boolTy), + /*FD=*/nullptr); + assert(fnOp && "should always succeed"); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + } else + fnOp = cast(builtin); + + return builder.create( + loc, fnOp, mlir::ValueRange{nullPtr, builder.getBool(false, loc)}); +} + +mlir::LogicalResult +CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { + auto openCurlyLoc = getLoc(S.getBeginLoc()); + auto nullPtrCst = builder.getNullPtr(VoidPtrTy, openCurlyLoc); + + auto Fn = dyn_cast(CurFn); + assert(Fn && "other callables NYI"); + Fn.setCoroutineAttr(mlir::UnitAttr::get(builder.getContext())); + auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); + createCoroData(*this, CurCoro, coroId); + + // Backend is allowed to elide memory allocations, to help it, emit + // auto mem = coro.alloc() ? 0 : ... allocation code ...; + auto coroAlloc = buildCoroAllocBuiltinCall(openCurlyLoc); + + // Initialize address of coroutine frame to null + auto astVoidPtrTy = CGM.getASTContext().VoidPtrTy; + auto allocaTy = getTypes().convertTypeForMem(astVoidPtrTy); + Address coroFrame = + CreateTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy), + openCurlyLoc, "__coro_frame_addr", + /*ArraySize=*/nullptr); + + auto storeAddr = coroFrame.getPointer(); + builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr); + builder.create(openCurlyLoc, coroAlloc.getResult(0), + /*withElseRegion=*/false, + /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + builder.CIRBaseBuilderTy::createStore( + loc, buildScalarExpr(S.getAllocate()), + storeAddr); + builder.create(loc); + }); + + CurCoro.Data->CoroBegin = + buildCoroBeginBuiltinCall( + openCurlyLoc, + builder.create(openCurlyLoc, allocaTy, storeAddr)) + .getResult(0); + + // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided. + if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) + llvm_unreachable("NYI"); + + { + // FIXME(cir): create a new scope to copy out the params? + // LLVM create scope cleanups here, but might be due to the use + // of many basic blocks? + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap); + + // Create mapping between parameters and copy-params for coroutine + // function. + llvm::ArrayRef ParamMoves = S.getParamMoves(); + assert((ParamMoves.size() == 0 || (ParamMoves.size() == FnArgs.size())) && + "ParamMoves and FnArgs should be the same size for coroutine " + "function"); + // For zipping the arg map into debug info. + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + // Create parameter copies. We do it before creating a promise, since an + // evolution of coroutine TS may allow promise constructor to observe + // parameter copies. + for (auto *PM : S.getParamMoves()) { + if (buildStmt(PM, /*useCurrentScope=*/true).failed()) + return mlir::failure(); + ParamReplacer.addCopy(cast(PM)); + } + + if (buildStmt(S.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + // ReturnValue should be valid as long as the coroutine's return type + // is not void. The assertion could help us to reduce the check later. + assert(ReturnValue.isValid() == (bool)S.getReturnStmt()); + // Now we have the promise, initialize the GRO. + // We need to emit `get_return_object` first. According to: + // [dcl.fct.def.coroutine]p7 + // The call to get_return_­object is sequenced before the call to + // initial_suspend and is invoked at most once. + // + // So we couldn't emit return value when we emit return statment, + // otherwise the call to get_return_object wouldn't be in front + // of initial_suspend. + if (ReturnValue.isValid()) { + buildAnyExprToMem(S.getReturnValue(), ReturnValue, + S.getReturnValue()->getType().getQualifiers(), + /*IsInit*/ true); + } + + // FIXME(cir): EHStack.pushCleanup(EHCleanup); + CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::init; + if (buildStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::user; + + // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. + if (S.getExceptionHandler()) + assert(!UnimplementedFeature::unhandledException() && "NYI"); + if (buildBodyAndFallthrough(*this, S, S.getBody(), currLexScope).failed()) + return mlir::failure(); + + // Note that LLVM checks CanFallthrough by looking into the availability + // of the insert block which is kinda brittle and unintuitive, seems to be + // related with how landing pads are handled. + // + // CIRGen handles this by checking pre-existing co_returns in the current + // scope instead. Are we missing anything? + // + // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock(); + const bool CanFallthrough = currLexScope->hasCoreturn(); + const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; + if (CanFallthrough || HasCoreturns) { + CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::final; + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPoint(CurCoro.Data->FinalSuspendInsPoint); + if (buildStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true) + .failed()) + return mlir::failure(); + } + } + } + return mlir::success(); +} + +static bool memberCallExpressionCanThrow(const Expr *E) { + if (const auto *CE = dyn_cast(E)) + if (const auto *Proto = + CE->getMethodDecl()->getType()->getAs()) + if (isNoexceptExceptionSpec(Proto->getExceptionSpecType()) && + Proto->canThrow() == CT_Cannot) + return false; + return true; +} + +// Given a suspend expression which roughly looks like: +// +// auto && x = CommonExpr(); +// if (!x.await_ready()) { +// x.await_suspend(...); (*) +// } +// x.await_resume(); +// +// where the result of the entire expression is the result of x.await_resume() +// +// (*) If x.await_suspend return type is bool, it allows to veto a suspend: +// if (x.await_suspend(...)) +// llvm_coro_suspend(); +// +// This is more higher level than LLVM codegen, for that one see llvm's +// docs/Coroutines.rst for more details. +namespace { +struct LValueOrRValue { + LValue LV; + RValue RV; +}; +} // namespace +static LValueOrRValue +buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, + CoroutineSuspendExpr const &S, mlir::cir::AwaitKind Kind, + AggValueSlot aggSlot, bool ignoreResult, + mlir::Block *scopeParentBlock, + mlir::Value &tmpResumeRValAddr, bool forLValue) { + auto *E = S.getCommonExpr(); + + auto awaitBuild = mlir::success(); + LValueOrRValue awaitRes; + + auto Binder = + CIRGenFunction::OpaqueValueMappingData::bind(CGF, S.getOpaqueValue(), E); + auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); }); + auto &builder = CGF.getBuilder(); + + [[maybe_unused]] auto awaitOp = builder.create( + CGF.getLoc(S.getSourceRange()), Kind, + /*readyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + Expr *condExpr = S.getReadyExpr()->IgnoreParens(); + builder.createCondition(CGF.evaluateExprAsBool(condExpr)); + }, + /*suspendBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // Note that differently from LLVM codegen we do not emit coro.save + // and coro.suspend here, that should be done as part of lowering this + // to LLVM dialect (or some other MLIR dialect) + + // A invalid suspendRet indicates "void returning await_suspend" + auto suspendRet = CGF.buildScalarExpr(S.getSuspendExpr()); + + // Veto suspension if requested by bool returning await_suspend. + if (suspendRet) { + // From LLVM codegen: + // if (SuspendRet != nullptr && SuspendRet->getType()->isIntegerTy(1)) + llvm_unreachable("NYI"); + } + + // Signals the parent that execution flows to next region. + builder.create(loc); + }, + /*resumeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // Exception handling requires additional IR. If the 'await_resume' + // function is marked as 'noexcept', we avoid generating this additional + // IR. + CXXTryStmt *TryStmt = nullptr; + if (Coro.ExceptionHandler && Kind == mlir::cir::AwaitKind::init && + memberCallExpressionCanThrow(S.getResumeExpr())) { + llvm_unreachable("NYI"); + } + + // FIXME(cir): the alloca for the resume expr should be placed in the + // enclosing cir.scope instead. + if (forLValue) + awaitRes.LV = CGF.buildLValue(S.getResumeExpr()); + else { + awaitRes.RV = + CGF.buildAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); + if (!awaitRes.RV.isIgnored()) { + // Create the alloca in the block before the scope wrapping + // cir.await. + tmpResumeRValAddr = CGF.buildAlloca( + "__coawait_resume_rval", awaitRes.RV.getScalarVal().getType(), + loc, CharUnits::One(), + builder.getBestAllocaInsertPoint(scopeParentBlock)); + // Store the rvalue so we can reload it before the promise call. + builder.CIRBaseBuilderTy::createStore( + loc, awaitRes.RV.getScalarVal(), tmpResumeRValAddr); + } + } + + if (TryStmt) { + llvm_unreachable("NYI"); + } + + // Returns control back to parent. + builder.create(loc); + }); + + assert(awaitBuild.succeeded() && "Should know how to codegen"); + return awaitRes; +} + +RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot, + bool ignoreResult) { + RValue rval; + auto scopeLoc = getLoc(E.getSourceRange()); + + // Since we model suspend / resume as an inner region, we must store + // resume scalar results in a tmp alloca, and load it after we build the + // suspend expression. An alternative way to do this would be to make + // every region return a value when promise.return_value() is used, but + // it's a bit awkward given that resume is the only region that actually + // returns a value. + mlir::Block *currEntryBlock = currLexScope->getEntryBlock(); + [[maybe_unused]] mlir::Value tmpResumeRValAddr; + + // No need to explicitly wrap this into a scope since the AST already uses a + // ExprWithCleanups, which will wrap this into a cir.scope anyways. + rval = buildSuspendExpression(*this, *CurCoro.Data, E, + CurCoro.Data->CurrentAwaitKind, aggSlot, + ignoreResult, currEntryBlock, tmpResumeRValAddr, + /*forLValue*/ false) + .RV; + + if (ignoreResult || rval.isIgnored()) + return rval; + + if (rval.isScalar()) { + rval = RValue::get(builder.create( + scopeLoc, rval.getScalarVal().getType(), tmpResumeRValAddr)); + } else if (rval.isAggregate()) { + // This is probably already handled via AggSlot, remove this assertion + // once we have a testcase and prove all pieces work. + llvm_unreachable("NYI"); + } else { // complex + llvm_unreachable("NYI"); + } + return rval; +} + +mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { + ++CurCoro.Data->CoreturnCount; + currLexScope->setCoreturn(); + + const Expr *RV = S.getOperand(); + if (RV && RV->getType()->isVoidType() && !isa(RV)) { + // Make sure to evaluate the non initlist expression of a co_return + // with a void expression for side effects. + // FIXME(cir): add scope + // RunCleanupsScope cleanupScope(*this); + buildIgnoredExpr(RV); + } + if (buildStmt(S.getPromiseCall(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + // Create a new return block (if not existent) and add a branch to + // it. The actual return instruction is only inserted during current + // scope cleanup handling. + auto loc = getLoc(S.getSourceRange()); + auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); + CurCoro.Data->FinalSuspendInsPoint = + builder.create(loc, retBlock); + + // Insert the new block to continue codegen after branch to ret block, + // this will likely be an empty block. + builder.createBlock(builder.getBlock()->getParent()); + + // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. + return mlir::success(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h new file mode 100644 index 000000000000..086c68baec9c --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -0,0 +1,160 @@ +//===--- CIRGenCstEmitter.h - CIR constant emission -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// A helper class for emitting expressions and values as mlir::cir::ConstantOp +// and as initializers for global variables. +// +// Note: this is based on LLVM's codegen in ConstantEmitter.h, reusing this +// class interface makes it easier move forward with bringing CIR codegen +// to completion. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CODEGEN_CIRGEN_CONSTANTEMITTER_H +#define LLVM_CLANG_LIB_CODEGEN_CIRGEN_CONSTANTEMITTER_H + +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +namespace cir { + +class ConstantEmitter { +public: + CIRGenModule &CGM; + CIRGenFunction *const CGF; + +private: + bool Abstract = false; + + /// Whether non-abstract components of the emitter have been initialized. + bool InitializedNonAbstract = false; + + /// Whether the emitter has been finalized. + bool Finalized = false; + + /// Whether the constant-emission failed. + bool Failed = false; + + /// Whether we're in a constant context. + bool InConstantContext = false; + + /// The AST address space where this (non-abstract) initializer is going. + /// Used for generating appropriate placeholders. + clang::LangAS DestAddressSpace; + + llvm::SmallVector, 4> + PlaceholderAddresses; + +public: + ConstantEmitter(CIRGenModule &CGM, CIRGenFunction *CGF = nullptr) + : CGM(CGM), CGF(CGF) {} + + /// Initialize this emission in the context of the given function. + /// Use this if the expression might contain contextual references like + /// block addresses or PredefinedExprs. + ConstantEmitter(CIRGenFunction &CGF) : CGM(CGF.CGM), CGF(&CGF) {} + + ConstantEmitter(const ConstantEmitter &other) = delete; + ConstantEmitter &operator=(const ConstantEmitter &other) = delete; + + ~ConstantEmitter(); + + /// Is the current emission context abstract? + bool isAbstract() const { return Abstract; } + + bool isInConstantContext() const { return InConstantContext; } + void setInConstantContext(bool var) { InConstantContext = var; } + + /// Try to emit the initiaizer of the given declaration as an abstract + /// constant. If this succeeds, the emission must be finalized. + mlir::Attribute tryEmitForInitializer(const VarDecl &D); + mlir::Attribute tryEmitForInitializer(const Expr *E, LangAS destAddrSpace, + QualType destType); + + void finalize(mlir::cir::GlobalOp global); + + // All of the "abstract" emission methods below permit the emission to + // be immediately discarded without finalizing anything. Therefore, they + // must also promise not to do anything that will, in the future, require + // finalization: + // + // - using the CGF (if present) for anything other than establishing + // semantic context; for example, an expression with ignored + // side-effects must not be emitted as an abstract expression + // + // - doing anything that would not be safe to duplicate within an + // initializer or to propagate to another context; for example, + // side effects, or emitting an initialization that requires a + // reference to its current location. + mlir::Attribute emitForMemory(mlir::Attribute C, QualType T) { + return emitForMemory(CGM, C, T); + } + + // static llvm::Constant *emitNullForMemory(CodeGenModule &CGM, QualType T); + static mlir::Attribute emitForMemory(CIRGenModule &CGM, mlir::Attribute C, + clang::QualType T); + + /// Try to emit the initializer of the given declaration as an abstract + /// constant. + mlir::Attribute tryEmitAbstractForInitializer(const VarDecl &D); + + /// Emit the result of the given expression as an abstract constant, + /// asserting that it succeeded. This is only safe to do when the + /// expression is known to be a constant expression with either a fairly + /// simple type or a known simple form. + mlir::Attribute emitAbstract(const Expr *E, QualType T); + mlir::Attribute emitAbstract(SourceLocation loc, const APValue &value, + QualType T); + + mlir::Attribute tryEmitConstantExpr(const ConstantExpr *CE); + + // These are private helper routines of the constant emitter that + // can't actually be private because things are split out into helper + // functions and classes. + + mlir::Attribute tryEmitPrivateForVarInit(const VarDecl &D); + mlir::TypedAttr tryEmitPrivate(const Expr *E, QualType T); + mlir::TypedAttr tryEmitPrivateForMemory(const Expr *E, QualType T); + + mlir::Attribute tryEmitPrivate(const APValue &value, QualType T); + mlir::Attribute tryEmitPrivateForMemory(const APValue &value, QualType T); + + mlir::Attribute tryEmitAbstract(const Expr *E, QualType destType); + mlir::Attribute tryEmitAbstractForMemory(const Expr *E, QualType destType); + + mlir::Attribute tryEmitAbstract(const APValue &value, QualType destType); + mlir::Attribute tryEmitAbstractForMemory(const APValue &value, + QualType destType); + +private: + void initializeNonAbstract(clang::LangAS destAS) { + assert(!InitializedNonAbstract); + InitializedNonAbstract = true; + DestAddressSpace = destAS; + } + mlir::Attribute markIfFailed(mlir::Attribute init) { + if (!init) + Failed = true; + return init; + } + + struct AbstractState { + bool OldValue; + size_t OldPlaceholdersSize; + }; + AbstractState pushAbstract() { + AbstractState saved = {Abstract, PlaceholderAddresses.size()}; + Abstract = true; + return saved; + } + mlir::Attribute validateAndPopAbstract(mlir::Attribute C, AbstractState save); +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp new file mode 100644 index 000000000000..7200d8949f4c --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -0,0 +1,1216 @@ +//===--- CIRGenDecl.cpp - Emit CIR Code for declarations ------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Decl nodes as CIR code. +// +//===----------------------------------------------------------------------===// + +#include "CIRDataLayout.h" +#include "CIRGenBuilder.h" +#include "CIRGenCstEmitter.h" +#include "CIRGenFunction.h" +#include "CIRGenOpenMPRuntime.h" +#include "EHScopeStack.h" +#include "UnimplementedFeatureGuarding.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/SymbolTable.h" + +#include "clang/AST/Decl.h" +#include "clang/AST/ExprCXX.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" +#include + +using namespace cir; +using namespace clang; + +CIRGenFunction::AutoVarEmission +CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, + mlir::OpBuilder::InsertPoint ip) { + QualType Ty = D.getType(); + // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && + // getLangOpts().OpenCL)) + assert(!UnimplementedFeature::openCL()); + assert(Ty.getAddressSpace() == LangAS::Default); + assert(!D.hasAttr() && "not implemented"); + + auto loc = getLoc(D.getSourceRange()); + bool NRVO = + getContext().getLangOpts().ElideConstructors && D.isNRVOVariable(); + AutoVarEmission emission(D); + bool isEscapingByRef = D.isEscapingByref(); + emission.IsEscapingByRef = isEscapingByRef; + + CharUnits alignment = getContext().getDeclAlign(&D); + + // If the type is variably-modified, emit all the VLA sizes for it. + if (Ty->isVariablyModifiedType()) + buildVariablyModifiedType(Ty); + + assert(!UnimplementedFeature::generateDebugInfo()); + assert(!UnimplementedFeature::cxxABI()); + + Address address = Address::invalid(); + Address allocaAddr = Address::invalid(); + Address openMPLocalAddr = + getCIRGenModule().getOpenMPRuntime().getAddressOfLocalVariable(*this, &D); + assert(!getLangOpts().OpenMPIsTargetDevice && "NYI"); + if (getLangOpts().OpenMP && openMPLocalAddr.isValid()) { + llvm_unreachable("NYI"); + } else if (Ty->isConstantSizeType()) { + // If this value is an array or struct with a statically determinable + // constant initializer, there are optimizations we can do. + // + // TODO: We should constant-evaluate the initializer of any variable, + // as long as it is initialized by a constant expression. Currently, + // isConstantInitializer produces wrong answers for structs with + // reference or bitfield members, and a few other cases, and checking + // for POD-ness protects us from some of these. + if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && + (D.isConstexpr() || + ((Ty.isPODType(getContext()) || + getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) && + D.getInit()->isConstantInitializer(getContext(), false)))) { + + // If the variable's a const type, and it's neither an NRVO + // candidate nor a __block variable and has no mutable members, + // emit it as a global instead. + // Exception is if a variable is located in non-constant address space + // in OpenCL. + // TODO: deal with CGM.getCodeGenOpts().MergeAllConstants + // TODO: perhaps we don't need this at all at CIR since this can + // be done as part of lowering down to LLVM. + if ((!getContext().getLangOpts().OpenCL || + Ty.getAddressSpace() == LangAS::opencl_constant) && + (!NRVO && !D.isEscapingByref() && + CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, + /*ExcludeDtor=*/false))) { + buildStaticVarDecl(D, mlir::cir::GlobalLinkageKind::InternalLinkage); + + // Signal this condition to later callbacks. + emission.Addr = Address::invalid(); + assert(emission.wasEmittedAsGlobal()); + return emission; + } + // Otherwise, tell the initialization code that we're in this case. + emission.IsConstantAggregate = true; + } + + // A normal fixed sized variable becomes an alloca in the entry block, + // unless: + // - it's an NRVO variable. + // - we are compiling OpenMP and it's an OpenMP local variable. + if (NRVO) { + // The named return value optimization: allocate this variable in the + // return slot, so that we can elide the copy when returning this + // variable (C++0x [class.copy]p34). + address = ReturnValue; + allocaAddr = ReturnValue; + + if (const RecordType *RecordTy = Ty->getAs()) { + const auto *RD = RecordTy->getDecl(); + const auto *CXXRD = dyn_cast(RD); + if ((CXXRD && !CXXRD->hasTrivialDestructor()) || + RD->isNonTrivialToPrimitiveDestroy()) { + // In LLVM: Create a flag that is used to indicate when the NRVO was + // applied to this variable. Set it to zero to indicate that NRVO was + // not applied. For now, use the same approach for CIRGen until we can + // be sure it's worth doing something more aggressive. + auto falseNVRO = builder.getFalse(loc); + Address NRVOFlag = CreateTempAlloca( + falseNVRO.getType(), CharUnits::One(), loc, "nrvo", + /*ArraySize=*/nullptr, &allocaAddr); + assert(builder.getInsertionBlock()); + builder.createStore(loc, falseNVRO, NRVOFlag); + + // Record the NRVO flag for this variable. + NRVOFlags[&D] = NRVOFlag.getPointer(); + emission.NRVOFlag = NRVOFlag.getPointer(); + } + } + } else { + if (isEscapingByRef) + llvm_unreachable("NYI"); + + mlir::Type allocaTy = getTypes().convertTypeForMem(Ty); + CharUnits allocaAlignment = alignment; + // Create the temp alloca and declare variable using it. + mlir::Value addrVal; + address = CreateTempAlloca(allocaTy, allocaAlignment, loc, D.getName(), + /*ArraySize=*/nullptr, &allocaAddr, ip); + if (failed(declare(address, &D, Ty, getLoc(D.getSourceRange()), alignment, + addrVal))) { + CGM.emitError("Cannot declare variable"); + return emission; + } + // TODO: what about emitting lifetime markers for MSVC catch parameters? + // TODO: something like @llvm.lifetime.start/end here? revisit this later. + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers()); + } + } else { // not openmp nor constant sized type + bool VarAllocated = false; + if (getLangOpts().OpenMPIsTargetDevice) + llvm_unreachable("NYI"); + + if (!VarAllocated) { + if (!DidCallStackSave) { + // Save the stack. + auto defaultTy = AllocaInt8PtrTy; + CharUnits Align = CharUnits::fromQuantity( + CGM.getDataLayout().getAlignment(defaultTy, false)); + Address Stack = CreateTempAlloca(defaultTy, Align, loc, "saved_stack"); + + mlir::Value V = builder.createStackSave(loc, defaultTy); + assert(V.getType() == AllocaInt8PtrTy); + builder.createStore(loc, V, Stack); + + DidCallStackSave = true; + + // Push a cleanup block and restore the stack there. + // FIXME: in general circumstances, this should be an EH cleanup. + pushStackRestore(NormalCleanup, Stack); + } + + auto VlaSize = getVLASize(Ty); + mlir::Type mTy = convertTypeForMem(VlaSize.Type); + + // Allocate memory for the array. + address = CreateTempAlloca(mTy, alignment, loc, "vla", VlaSize.NumElts, + &allocaAddr, builder.saveInsertionPoint()); + } + + // If we have debug info enabled, properly describe the VLA dimensions for + // this type by registering the vla size expression for each of the + // dimensions. + assert(!UnimplementedFeature::generateDebugInfo()); + } + + emission.Addr = address; + setAddrOfLocalVar(&D, emission.Addr); + return emission; +} + +/// Determine whether the given initializer is trivial in the sense +/// that it requires no code to be generated. +bool CIRGenFunction::isTrivialInitializer(const Expr *Init) { + if (!Init) + return true; + + if (const CXXConstructExpr *Construct = dyn_cast(Init)) + if (CXXConstructorDecl *Constructor = Construct->getConstructor()) + if (Constructor->isTrivial() && Constructor->isDefaultConstructor() && + !Construct->requiresZeroInitialization()) + return true; + + return false; +} + +static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, + Address addr, bool isVolatile, + CIRGenBuilderTy &builder, + mlir::TypedAttr constant, bool IsAutoInit) { + auto Ty = constant.getType(); + cir::CIRDataLayout layout{CGM.getModule()}; + uint64_t ConstantSize = layout.getTypeAllocSize(Ty); + if (!ConstantSize) + return; + assert(!UnimplementedFeature::addAutoInitAnnotation()); + assert(!UnimplementedFeature::vectorConstants()); + assert(!UnimplementedFeature::shouldUseBZeroPlusStoresToInitialize()); + assert(!UnimplementedFeature::shouldUseMemSetToInitialize()); + assert(!UnimplementedFeature::shouldSplitConstantStore()); + assert(!UnimplementedFeature::shouldCreateMemCpyFromGlobal()); + // In CIR we want to emit a store for the whole thing, later lowering + // prepare to LLVM should unwrap this into the best policy (see asserts + // above). + // + // FIXME(cir): This is closer to memcpy behavior but less optimal, instead of + // copy from a global, we just create a cir.const out of it. + + if (addr.getElementType() != Ty) { + auto ptr = addr.getPointer(); + ptr = builder.createBitcast(ptr.getLoc(), ptr, builder.getPointerTo(Ty)); + addr = addr.withPointer(ptr, addr.isKnownNonNull()); + } + + auto loc = CGM.getLoc(D.getSourceRange()); + builder.createStore(loc, builder.getConstant(loc, constant), addr); +} + +void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { + assert(emission.Variable && "emission was not valid!"); + + // If this was emitted as a global constant, we're done. + if (emission.wasEmittedAsGlobal()) + return; + + const VarDecl &D = *emission.Variable; + QualType type = D.getType(); + + // If this local has an initializer, emit it now. + const Expr *Init = D.getInit(); + + // TODO: in LLVM codegen if we are at an unreachable point, the initializer + // isn't emitted unless it contains a label. What we want for CIR? + assert(builder.getInsertionBlock()); + + // Initialize the variable here if it doesn't have a initializer and it is a + // C struct that is non-trivial to initialize or an array containing such a + // struct. + if (!Init && type.isNonTrivialToPrimitiveDefaultInitialize() == + QualType::PDIK_Struct) { + assert(0 && "not implemented"); + return; + } + + const Address Loc = emission.Addr; + // Check whether this is a byref variable that's potentially + // captured and moved by its own initializer. If so, we'll need to + // emit the initializer first, then copy into the variable. + assert(!UnimplementedFeature::capturedByInit() && "NYI"); + + // Note: constexpr already initializes everything correctly. + LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = + (D.isConstexpr() + ? LangOptions::TrivialAutoVarInitKind::Uninitialized + : (D.getAttr() + ? LangOptions::TrivialAutoVarInitKind::Uninitialized + : getContext().getLangOpts().getTrivialAutoVarInit())); + + auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) { + if (trivialAutoVarInit == + LangOptions::TrivialAutoVarInitKind::Uninitialized) + return; + + assert(0 && "unimplemented"); + }; + + if (isTrivialInitializer(Init)) + return initializeWhatIsTechnicallyUninitialized(Loc); + + mlir::Attribute constant; + if (emission.IsConstantAggregate || + D.mightBeUsableInConstantExpressions(getContext())) { + // FIXME: Differently from LLVM we try not to emit / lower too much + // here for CIR since we are interesting in seeing the ctor in some + // analysis later on. So CIR's implementation of ConstantEmitter will + // frequently return an empty Attribute, to signal we want to codegen + // some trivial ctor calls and whatnots. + constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D); + if (constant && !constant.isa() && + (trivialAutoVarInit != + LangOptions::TrivialAutoVarInitKind::Uninitialized)) { + llvm_unreachable("NYI"); + } + } + + // NOTE(cir): In case we have a constant initializer, we can just emit a + // store. But, in CIR, we wish to retain any ctor calls, so if it is a + // CXX temporary object creation, we ensure the ctor call is used deferring + // its removal/optimization to the CIR lowering. + if (!constant || isa(Init)) { + initializeWhatIsTechnicallyUninitialized(Loc); + LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); + buildExprAsInit(Init, &D, lv); + // In case lv has uses it means we indeed initialized something + // out of it while trying to build the expression, mark it as such. + auto addr = lv.getAddress().getPointer(); + assert(addr && "Should have an address"); + auto allocaOp = dyn_cast_or_null(addr.getDefiningOp()); + assert(allocaOp && "Address should come straight out of the alloca"); + + if (!allocaOp.use_empty()) + allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + return; + } + + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + auto typedConstant = constant.dyn_cast(); + assert(typedConstant && "expected typed attribute"); + if (!emission.IsConstantAggregate) { + // For simple scalar/complex initialization, store the value directly. + LValue lv = makeAddrLValue(Loc, type); + assert(Init && "expected initializer"); + auto initLoc = getLoc(Init->getSourceRange()); + lv.setNonGC(true); + return buildStoreThroughLValue( + RValue::get(builder.getConstant(initLoc, typedConstant)), lv); + } + + emitStoresForConstant(CGM, D, Loc, type.isVolatileQualified(), builder, + typedConstant, /*IsAutoInit=*/false); +} + +void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { + assert(emission.Variable && "emission was not valid!"); + + // If this was emitted as a global constant, we're done. + if (emission.wasEmittedAsGlobal()) + return; + + // TODO: in LLVM codegen if we are at an unreachable point codgen + // is ignored. What we want for CIR? + assert(builder.getInsertionBlock()); + const VarDecl &D = *emission.Variable; + + // Check the type for a cleanup. + if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) + buildAutoVarTypeCleanup(emission, dtorKind); + + // In GC mode, honor objc_precise_lifetime. + if (getContext().getLangOpts().getGC() != LangOptions::NonGC && + D.hasAttr()) + assert(0 && "not implemented"); + + // Handle the cleanup attribute. + if (const CleanupAttr *CA = D.getAttr()) + assert(0 && "not implemented"); + + // TODO: handle block variable +} + +/// Emit code and set up symbol table for a variable declaration with auto, +/// register, or no storage class specifier. These turn into simple stack +/// objects, globals depending on target. +void CIRGenFunction::buildAutoVarDecl(const VarDecl &D) { + AutoVarEmission emission = buildAutoVarAlloca(D); + buildAutoVarInit(emission); + buildAutoVarCleanups(emission); +} + +void CIRGenFunction::buildVarDecl(const VarDecl &D) { + if (D.hasExternalStorage()) { + // Don't emit it now, allow it to be emitted lazily on its first use. + return; + } + + // Some function-scope variable does not have static storage but still + // needs to be emitted like a static variable, e.g. a function-scope + // variable in constant address space in OpenCL. + if (D.getStorageDuration() != SD_Automatic) { + // Static sampler variables translated to function calls. + if (D.getType()->isSamplerT()) + return; + + auto Linkage = CGM.getCIRLinkageVarDefinition(&D, /*IsConstant=*/false); + + // FIXME: We need to force the emission/use of a guard variable for + // some variables even if we can constant-evaluate them because + // we can't guarantee every translation unit will constant-evaluate them. + + return buildStaticVarDecl(D, Linkage); + } + + if (D.getType().getAddressSpace() == LangAS::opencl_local) + llvm_unreachable("OpenCL and address space are NYI"); + + assert(D.hasLocalStorage()); + + CIRGenFunction::VarDeclContext varDeclCtx{*this, &D}; + return buildAutoVarDecl(D); +} + +static std::string getStaticDeclName(CIRGenModule &CGM, const VarDecl &D) { + if (CGM.getLangOpts().CPlusPlus) + return CGM.getMangledName(&D).str(); + + // If this isn't C++, we don't need a mangled name, just a pretty one. + assert(!D.isExternallyVisible() && "name shouldn't matter"); + std::string ContextName; + const DeclContext *DC = D.getDeclContext(); + if (auto *CD = dyn_cast(DC)) + DC = cast(CD->getNonClosureContext()); + if (const auto *FD = dyn_cast(DC)) + ContextName = std::string(CGM.getMangledName(FD)); + else if (const auto *BD = dyn_cast(DC)) + llvm_unreachable("block decl context for static var is NYI"); + else if (const auto *OMD = dyn_cast(DC)) + llvm_unreachable("ObjC decl context for static var is NYI"); + else + llvm_unreachable("Unknown context for static var decl"); + + ContextName += "." + D.getNameAsString(); + return ContextName; +} + +// TODO(cir): LLVM uses a Constant base class. Maybe CIR could leverage an +// interface for all constants? +mlir::cir::GlobalOp +CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalLinkageKind Linkage) { + // In general, we don't always emit static var decls once before we reference + // them. It is possible to reference them before emitting the function that + // contains them, and it is possible to emit the containing function multiple + // times. + if (mlir::cir::GlobalOp ExistingGV = StaticLocalDeclMap[&D]) + return ExistingGV; + + QualType Ty = D.getType(); + assert(Ty->isConstantSizeType() && "VLAs can't be static"); + + // Use the label if the variable is renamed with the asm-label extension. + std::string Name; + if (D.hasAttr()) + llvm_unreachable("asm label is NYI"); + else + Name = getStaticDeclName(*this, D); + + mlir::Type LTy = getTypes().convertTypeForMem(Ty); + assert(!UnimplementedFeature::addressSpace()); + + // OpenCL variables in local address space and CUDA shared + // variables cannot have an initializer. + mlir::Attribute Init = nullptr; + if (Ty.getAddressSpace() == LangAS::opencl_local || + D.hasAttr() || D.hasAttr()) + llvm_unreachable("OpenCL & CUDA are NYI"); + else + Init = builder.getZeroInitAttr(getTypes().ConvertType(Ty)); + + mlir::cir::GlobalOp GV = builder.createVersionedGlobal( + getModule(), getLoc(D.getLocation()), Name, LTy, false, Linkage); + // TODO(cir): infer visibility from linkage in global op builder. + GV.setVisibility(getMLIRVisibilityFromCIRLinkage(Linkage)); + GV.setInitialValueAttr(Init); + GV.setAlignment(getASTContext().getDeclAlign(&D).getAsAlign().value()); + + if (supportsCOMDAT() && GV.isWeakForLinker()) + llvm_unreachable("COMDAT globals are NYI"); + + if (D.getTLSKind()) + llvm_unreachable("TLS mode is NYI"); + + setGVProperties(GV, &D); + + // Make sure the result is of the correct type. + assert(!UnimplementedFeature::addressSpace()); + + // Ensure that the static local gets initialized by making sure the parent + // function gets emitted eventually. + const Decl *DC = cast(D.getDeclContext()); + + // We can't name blocks or captured statements directly, so try to emit their + // parents. + if (isa(DC) || isa(DC)) { + DC = DC->getNonClosureContext(); + // FIXME: Ensure that global blocks get emitted. + if (!DC) + llvm_unreachable("address space is NYI"); + } + + GlobalDecl GD; + if (const auto *CD = dyn_cast(DC)) + llvm_unreachable("C++ constructors static var context is NYI"); + else if (const auto *DD = dyn_cast(DC)) + llvm_unreachable("C++ destructors static var context is NYI"); + else if (const auto *FD = dyn_cast(DC)) + GD = GlobalDecl(FD); + else { + // Don't do anything for Obj-C method decls or global closures. We should + // never defer them. + assert(isa(DC) && "unexpected parent code decl"); + } + if (GD.getDecl() && UnimplementedFeature::openMP()) { + // Disable emission of the parent function for the OpenMP device codegen. + llvm_unreachable("OpenMP is NYI"); + } + + return GV; +} + +/// Add the initializer for 'D' to the global variable that has already been +/// created for it. If the initializer has a different type than GV does, this +/// may free GV and return a different one. Otherwise it just returns GV. +mlir::cir::GlobalOp +CIRGenFunction::addInitializerToStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalOp GV) { + ConstantEmitter emitter(*this); + mlir::TypedAttr Init = + emitter.tryEmitForInitializer(D).dyn_cast(); + assert(Init && "Expected typed attribute"); + + // If constant emission failed, then this should be a C++ static + // initializer. + if (!Init) { + if (!getLangOpts().CPlusPlus) + CGM.ErrorUnsupported(D.getInit(), "constant l-value expression"); + else if (D.hasFlexibleArrayInit(getContext())) + CGM.ErrorUnsupported(D.getInit(), "flexible array initializer"); + else { + // Since we have a static initializer, this global variable can't + // be constant. + GV.setConstant(false); + llvm_unreachable("C++ guarded init it NYI"); + } + return GV; + } + +#ifndef NDEBUG + CharUnits VarSize = CGM.getASTContext().getTypeSizeInChars(D.getType()) + + D.getFlexibleArrayInitChars(getContext()); + CharUnits CstSize = CharUnits::fromQuantity( + CGM.getDataLayout().getTypeAllocSize(Init.getType())); + assert(VarSize == CstSize && "Emitted constant has unexpected size"); +#endif + + // The initializer may differ in type from the global. Rewrite + // the global to match the initializer. (We have to do this + // because some types, like unions, can't be completely represented + // in the LLVM type system.) + if (GV.getSymType() != Init.getType()) { + llvm_unreachable("static decl initializer type mismatch is NYI"); + } + + bool NeedsDtor = + D.needsDestruction(getContext()) == QualType::DK_cxx_destructor; + + GV.setConstant( + CGM.isTypeConstant(D.getType(), /*ExcludeCtor=*/true, !NeedsDtor)); + GV.setInitialValueAttr(Init); + + emitter.finalize(GV); + + if (NeedsDtor) { + // We have a constant initializer, but a nontrivial destructor. We still + // need to perform a guarded "initialization" in order to register the + // destructor. + llvm_unreachable("C++ guarded init is NYI"); + } + + return GV; +} + +void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalLinkageKind Linkage) { + // Check to see if we already have a global variable for this + // declaration. This can happen when double-emitting function + // bodies, e.g. with complete and base constructors. + auto globalOp = CGM.getOrCreateStaticVarDecl(D, Linkage); + // TODO(cir): we should have a way to represent global ops as values without + // having to emit a get global op. Sometimes these emissions are not used. + auto addr = getBuilder().createGetGlobal(globalOp); + CharUnits alignment = getContext().getDeclAlign(&D); + + // Store into LocalDeclMap before generating initializer to handle + // circular references. + mlir::Type elemTy = getTypes().convertTypeForMem(D.getType()); + setAddrOfLocalVar(&D, Address(addr, elemTy, alignment)); + + // We can't have a VLA here, but we can have a pointer to a VLA, + // even though that doesn't really make any sense. + // Make sure to evaluate VLA bounds now so that we have them for later. + if (D.getType()->isVariablyModifiedType()) + llvm_unreachable("VLAs are NYI"); + + // Save the type in case adding the initializer forces a type change. + mlir::Type expectedType = addr.getType(); + + auto var = globalOp; + + // CUDA's local and local static __shared__ variables should not + // have any non-empty initializers. This is ensured by Sema. + // Whatever initializer such variable may have when it gets here is + // a no-op and should not be emitted. + bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice && + D.hasAttr(); + // If this value has an initializer, emit it. + if (D.getInit() && !isCudaSharedVar) + var = addInitializerToStaticVarDecl(D, var); + + var.setAlignment(alignment.getAsAlign().value()); + + if (D.hasAttr()) + llvm_unreachable("Global annotations are NYI"); + + if (auto *SA = D.getAttr()) + llvm_unreachable("CIR global BSS section attribute is NYI"); + if (auto *SA = D.getAttr()) + llvm_unreachable("CIR global Data section attribute is NYI"); + if (auto *SA = D.getAttr()) + llvm_unreachable("CIR global Rodata section attribute is NYI"); + if (auto *SA = D.getAttr()) + llvm_unreachable("CIR global Relro section attribute is NYI"); + + if (const SectionAttr *SA = D.getAttr()) + llvm_unreachable("CIR global object file section attribute is NYI"); + + if (D.hasAttr()) + llvm_unreachable("llvm.used metadata is NYI"); + else if (D.hasAttr()) + llvm_unreachable("llvm.compiler.used metadata is NYI"); + + // We may have to cast the constant because of the initializer + // mismatch above. + // + // FIXME: It is really dangerous to store this in the map; if anyone + // RAUW's the GV uses of this constant will be invalid. + // TODO(cir): its suppose to be possible that the initializer does not match + // the static var type. When this happens, there should be a cast here. + assert(var.getSymType() != expectedType && + "static var init type mismatch is NYI"); + CGM.setStaticLocalDeclAddress(&D, var); + + assert(!UnimplementedFeature::reportGlobalToASan()); + + // Emit global variable debug descriptor for static vars. + auto *DI = getDebugInfo(); + if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) { + llvm_unreachable("Debug info is NYI"); + } +} + +void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, + SourceLocation Loc) { + if (!SanOpts.has(SanitizerKind::NullabilityAssign)) + return; + + llvm_unreachable("NYI"); +} + +void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, + LValue lvalue, bool capturedByInit) { + // TODO: this is where a lot of ObjC lifetime stuff would be done. + SourceLocRAIIObject Loc{*this, loc}; + mlir::Value value = buildScalarExpr(init); + buildStoreThroughLValue(RValue::get(value), lvalue); + return; +} + +void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, + LValue lvalue, bool capturedByInit) { + SourceLocRAIIObject Loc{*this, getLoc(init->getSourceRange())}; + if (capturedByInit) + llvm_unreachable("NYI"); + + QualType type = D->getType(); + + if (type->isReferenceType()) { + RValue rvalue = buildReferenceBindingToExpr(init); + if (capturedByInit) + llvm_unreachable("NYI"); + buildStoreThroughLValue(rvalue, lvalue); + return; + } + switch (CIRGenFunction::getEvaluationKind(type)) { + case TEK_Scalar: + buildScalarInit(init, getLoc(D->getSourceRange()), lvalue); + return; + case TEK_Complex: { + assert(0 && "not implemented"); + return; + } + case TEK_Aggregate: + assert(!type->isAtomicType() && "NYI"); + AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap; + if (isa(D)) + Overlap = AggValueSlot::DoesNotOverlap; + else if (auto *FD = dyn_cast(D)) + assert(false && "Field decl NYI"); + else + assert(false && "Only VarDecl implemented so far"); + // TODO: how can we delay here if D is captured by its initializer? + buildAggExpr(init, + AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, Overlap)); + return; + } + llvm_unreachable("bad evaluation kind"); +} + +void CIRGenFunction::buildDecl(const Decl &D) { + switch (D.getKind()) { + case Decl::ImplicitConceptSpecialization: + case Decl::HLSLBuffer: + case Decl::TopLevelStmt: + llvm_unreachable("NYI"); + case Decl::BuiltinTemplate: + case Decl::TranslationUnit: + case Decl::ExternCContext: + case Decl::Namespace: + case Decl::UnresolvedUsingTypename: + case Decl::ClassTemplateSpecialization: + case Decl::ClassTemplatePartialSpecialization: + case Decl::VarTemplateSpecialization: + case Decl::VarTemplatePartialSpecialization: + case Decl::TemplateTypeParm: + case Decl::UnresolvedUsingValue: + case Decl::NonTypeTemplateParm: + case Decl::CXXDeductionGuide: + case Decl::CXXMethod: + case Decl::CXXConstructor: + case Decl::CXXDestructor: + case Decl::CXXConversion: + case Decl::Field: + case Decl::MSProperty: + case Decl::IndirectField: + case Decl::ObjCIvar: + case Decl::ObjCAtDefsField: + case Decl::ParmVar: + case Decl::ImplicitParam: + case Decl::ClassTemplate: + case Decl::VarTemplate: + case Decl::FunctionTemplate: + case Decl::TypeAliasTemplate: + case Decl::TemplateTemplateParm: + case Decl::ObjCMethod: + case Decl::ObjCCategory: + case Decl::ObjCProtocol: + case Decl::ObjCInterface: + case Decl::ObjCCategoryImpl: + case Decl::ObjCImplementation: + case Decl::ObjCProperty: + case Decl::ObjCCompatibleAlias: + case Decl::PragmaComment: + case Decl::PragmaDetectMismatch: + case Decl::AccessSpec: + case Decl::LinkageSpec: + case Decl::Export: + case Decl::ObjCPropertyImpl: + case Decl::FileScopeAsm: + case Decl::Friend: + case Decl::FriendTemplate: + case Decl::Block: + case Decl::Captured: + case Decl::UsingShadow: + case Decl::ConstructorUsingShadow: + case Decl::ObjCTypeParam: + case Decl::Binding: + case Decl::UnresolvedUsingIfExists: + llvm_unreachable("Declaration should not be in declstmts!"); + case Decl::Record: // struct/union/class X; + case Decl::CXXRecord: // struct/union/class X; [C++] + if (auto *DI = getDebugInfo()) + llvm_unreachable("NYI"); + return; + case Decl::Enum: // enum X; + if (auto *DI = getDebugInfo()) + llvm_unreachable("NYI"); + return; + case Decl::Function: // void X(); + case Decl::EnumConstant: // enum ? { X = ? } + case Decl::StaticAssert: // static_assert(X, ""); [C++0x] + case Decl::Label: // __label__ x; + case Decl::Import: + case Decl::MSGuid: // __declspec(uuid("...")) + case Decl::TemplateParamObject: + case Decl::OMPThreadPrivate: + case Decl::OMPAllocate: + case Decl::OMPCapturedExpr: + case Decl::OMPRequires: + case Decl::Empty: + case Decl::Concept: + case Decl::LifetimeExtendedTemporary: + case Decl::RequiresExprBody: + case Decl::UnnamedGlobalConstant: + // None of these decls require codegen support. + return; + + case Decl::NamespaceAlias: + case Decl::Using: // using X; [C++] + case Decl::UsingEnum: // using enum X; [C++] + case Decl::UsingDirective: // using namespace X; [C++] + assert(!UnimplementedFeature::generateDebugInfo()); + return; + case Decl::UsingPack: + assert(0 && "Not implemented"); + return; + case Decl::Var: + case Decl::Decomposition: { + const VarDecl &VD = cast(D); + assert(VD.isLocalVarDecl() && + "Should not see file-scope variables inside a function!"); + buildVarDecl(VD); + if (auto *DD = dyn_cast(&VD)) + assert(0 && "Not implemented"); + + // FIXME: add this + // if (auto *DD = dyn_cast(&VD)) + // for (auto *B : DD->bindings()) + // if (auto *HD = B->getHoldingVar()) + // EmitVarDecl(*HD); + return; + } + + case Decl::OMPDeclareReduction: + case Decl::OMPDeclareMapper: + assert(0 && "Not implemented"); + + case Decl::Typedef: // typedef int X; + case Decl::TypeAlias: { // using X = int; [C++0x] + QualType Ty = cast(D).getUnderlyingType(); + if (auto *DI = getDebugInfo()) + assert(!UnimplementedFeature::generateDebugInfo()); + if (Ty->isVariablyModifiedType()) + buildVariablyModifiedType(Ty); + return; + } + } +} + +namespace { +struct DestroyObject final : EHScopeStack::Cleanup { + DestroyObject(Address addr, QualType type, + CIRGenFunction::Destroyer *destroyer, bool useEHCleanupForArray) + : addr(addr), type(type), destroyer(destroyer), + useEHCleanupForArray(useEHCleanupForArray) {} + + Address addr; + QualType type; + CIRGenFunction::Destroyer *destroyer; + bool useEHCleanupForArray; + + void Emit(CIRGenFunction &CGF, Flags flags) override { + // Don't use an EH cleanup recursively from an EH cleanup. + [[maybe_unused]] bool useEHCleanupForArray = + flags.isForNormalCleanup() && this->useEHCleanupForArray; + + CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray); + } +}; + +template struct DestroyNRVOVariable : EHScopeStack::Cleanup { + DestroyNRVOVariable(Address addr, QualType type, mlir::Value NRVOFlag) + : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {} + + mlir::Value NRVOFlag; + Address Loc; + QualType Ty; + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } + + virtual ~DestroyNRVOVariable() = default; +}; + +struct DestroyNRVOVariableCXX final + : DestroyNRVOVariable { + DestroyNRVOVariableCXX(Address addr, QualType type, + const CXXDestructorDecl *Dtor, mlir::Value NRVOFlag) + : DestroyNRVOVariable(addr, type, NRVOFlag), + Dtor(Dtor) {} + + const CXXDestructorDecl *Dtor; + + void emitDestructorCall(CIRGenFunction &CGF) { llvm_unreachable("NYI"); } +}; + +struct DestroyNRVOVariableC final : DestroyNRVOVariable { + DestroyNRVOVariableC(Address addr, mlir::Value NRVOFlag, QualType Ty) + : DestroyNRVOVariable(addr, Ty, NRVOFlag) {} + + void emitDestructorCall(CIRGenFunction &CGF) { llvm_unreachable("NYI"); } +}; + +struct CallStackRestore final : EHScopeStack::Cleanup { + Address Stack; + CallStackRestore(Address Stack) : Stack(Stack) {} + bool isRedundantBeforeReturn() override { return true; } + void Emit(CIRGenFunction &CGF, Flags flags) override { + auto loc = Stack.getPointer().getLoc(); + mlir::Value V = CGF.getBuilder().createLoad(loc, Stack); + CGF.getBuilder().createStackRestore(loc, V); + } +}; + +struct ExtendGCLifetime final : EHScopeStack::Cleanup { + const VarDecl &Var; + ExtendGCLifetime(const VarDecl *var) : Var(*var) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; + +struct CallCleanupFunction final : EHScopeStack::Cleanup { + // FIXME: mlir::Value used as placeholder, check options before implementing + // Emit below. + mlir::Value CleanupFn; + const CIRGenFunctionInfo &FnInfo; + const VarDecl &Var; + + CallCleanupFunction(mlir::Value CleanupFn, const CIRGenFunctionInfo *Info, + const VarDecl *Var) + : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; +} // end anonymous namespace + +/// Push the standard destructor for the given type as +/// at least a normal cleanup. +void CIRGenFunction::pushDestroy(QualType::DestructionKind dtorKind, + Address addr, QualType type) { + assert(dtorKind && "cannot push destructor for trivial type"); + + CleanupKind cleanupKind = getCleanupKind(dtorKind); + pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind), + cleanupKind & EHCleanup); +} + +void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, + QualType type, Destroyer *destroyer, + bool useEHCleanupForArray) { + pushFullExprCleanup(cleanupKind, addr, type, destroyer, + useEHCleanupForArray); +} + +namespace { +/// A cleanup which performs a partial array destroy where the end pointer is +/// regularly determined and does not need to be loaded from a local. +class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup { + mlir::Value ArrayBegin; + mlir::Value ArrayEnd; + QualType ElementType; + [[maybe_unused]] CIRGenFunction::Destroyer *Destroyer; + CharUnits ElementAlign; + +public: + RegularPartialArrayDestroy(mlir::Value arrayBegin, mlir::Value arrayEnd, + QualType elementType, CharUnits elementAlign, + CIRGenFunction::Destroyer *destroyer) + : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd), ElementType(elementType), + Destroyer(destroyer), ElementAlign(elementAlign) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; + +/// A cleanup which performs a partial array destroy where the end pointer is +/// irregularly determined and must be loaded from a local. +class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup { + mlir::Value ArrayBegin; + Address ArrayEndPointer; + QualType ElementType; + [[maybe_unused]] CIRGenFunction::Destroyer *Destroyer; + CharUnits ElementAlign; + +public: + IrregularPartialArrayDestroy(mlir::Value arrayBegin, Address arrayEndPointer, + QualType elementType, CharUnits elementAlign, + CIRGenFunction::Destroyer *destroyer) + : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer), + ElementType(elementType), Destroyer(destroyer), + ElementAlign(elementAlign) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; +} // end anonymous namespace + +/// Push an EH cleanup to destroy already-constructed elements of the given +/// array. The cleanup may be popped with DeactivateCleanupBlock or +/// PopCleanupBlock. +/// +/// \param elementType - the immediate element type of the array; +/// possibly still an array type +void CIRGenFunction::pushIrregularPartialArrayCleanup(mlir::Value arrayBegin, + Address arrayEndPointer, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer) { + pushFullExprCleanup( + EHCleanup, arrayBegin, arrayEndPointer, elementType, elementAlign, + destroyer); +} + +/// Push an EH cleanup to destroy already-constructed elements of the given +/// array. The cleanup may be popped with DeactivateCleanupBlock or +/// PopCleanupBlock. +/// +/// \param elementType - the immediate element type of the array; +/// possibly still an array type +void CIRGenFunction::pushRegularPartialArrayCleanup(mlir::Value arrayBegin, + mlir::Value arrayEnd, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer) { + pushFullExprCleanup( + EHCleanup, arrayBegin, arrayEnd, elementType, elementAlign, destroyer); +} + +/// Destroys all the elements of the given array, beginning from last to first. +/// The array cannot be zero-length. +/// +/// \param begin - a type* denoting the first element of the array +/// \param end - a type* denoting one past the end of the array +/// \param elementType - the element type of the array +/// \param destroyer - the function to call to destroy elements +/// \param useEHCleanup - whether to push an EH cleanup to destroy +/// the remaining elements in case the destruction of a single +/// element throws +void CIRGenFunction::buildArrayDestroy(mlir::Value begin, mlir::Value end, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer, + bool checkZeroLength, + bool useEHCleanup) { + assert(!elementType->isArrayType()); + if (checkZeroLength) { + llvm_unreachable("NYI"); + } + + // Differently from LLVM traditional codegen, use a higher level + // representation instead of lowering directly to a loop. + mlir::Type cirElementType = convertTypeForMem(elementType); + auto ptrToElmType = builder.getPointerTo(cirElementType); + + // Emit the dtor call that will execute for every array element. + builder.create( + *currSrcLoc, begin, [&](mlir::OpBuilder &b, mlir::Location loc) { + auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); + Address curAddr = Address(arg, ptrToElmType, elementAlign); + if (useEHCleanup) { + pushRegularPartialArrayCleanup(arg, arg, elementType, elementAlign, + destroyer); + } + + // Perform the actual destruction there. + destroyer(*this, curAddr, elementType); + + if (useEHCleanup) + PopCleanupBlock(); + + builder.create(loc); + }); +} + +/// Immediately perform the destruction of the given object. +/// +/// \param addr - the address of the object; a type* +/// \param type - the type of the object; if an array type, all +/// objects are destroyed in reverse order +/// \param destroyer - the function to call to destroy individual +/// elements +/// \param useEHCleanupForArray - whether an EH cleanup should be +/// used when destroying array elements, in case one of the +/// destructions throws an exception +void CIRGenFunction::emitDestroy(Address addr, QualType type, + Destroyer *destroyer, + bool useEHCleanupForArray) { + const ArrayType *arrayType = getContext().getAsArrayType(type); + if (!arrayType) + return destroyer(*this, addr, type); + + auto length = buildArrayLength(arrayType, type, addr); + + CharUnits elementAlign = addr.getAlignment().alignmentOfArrayElement( + getContext().getTypeSizeInChars(type)); + + // Normally we have to check whether the array is zero-length. + bool checkZeroLength = true; + + // But if the array length is constant, we can suppress that. + auto constantCount = dyn_cast(length.getDefiningOp()); + if (constantCount) { + auto constIntAttr = constantCount.getValue().dyn_cast(); + // ...and if it's constant zero, we can just skip the entire thing. + if (constIntAttr && constIntAttr.getUInt() == 0) + return; + checkZeroLength = false; + } else { + llvm_unreachable("NYI"); + } + + auto begin = addr.getPointer(); + mlir::Value end; // Use this for future non-constant counts. + buildArrayDestroy(begin, end, type, elementAlign, destroyer, checkZeroLength, + useEHCleanupForArray); + if (constantCount.use_empty()) + constantCount.erase(); +} + +CIRGenFunction::Destroyer * +CIRGenFunction::getDestroyer(QualType::DestructionKind kind) { + switch (kind) { + case QualType::DK_none: + llvm_unreachable("no destroyer for trivial dtor"); + case QualType::DK_cxx_destructor: + return destroyCXXObject; + case QualType::DK_objc_strong_lifetime: + case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: + llvm_unreachable("NYI"); + } + llvm_unreachable("Unknown DestructionKind"); +} + +void CIRGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) { + EHStack.pushCleanup(Kind, SPMem); +} + +/// Enter a destroy cleanup for the given local variable. +void CIRGenFunction::buildAutoVarTypeCleanup( + const CIRGenFunction::AutoVarEmission &emission, + QualType::DestructionKind dtorKind) { + assert(dtorKind != QualType::DK_none); + + // Note that for __block variables, we want to destroy the + // original stack object, not the possibly forwarded object. + Address addr = emission.getObjectAddress(*this); + + const VarDecl *var = emission.Variable; + QualType type = var->getType(); + + CleanupKind cleanupKind = NormalAndEHCleanup; + CIRGenFunction::Destroyer *destroyer = nullptr; + + switch (dtorKind) { + case QualType::DK_none: + llvm_unreachable("no cleanup for trivially-destructible variable"); + + case QualType::DK_cxx_destructor: + // If there's an NRVO flag on the emission, we need a different + // cleanup. + if (emission.NRVOFlag) { + assert(!type->isArrayType()); + CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor(); + EHStack.pushCleanup(cleanupKind, addr, type, dtor, + emission.NRVOFlag); + return; + } + break; + + case QualType::DK_objc_strong_lifetime: + llvm_unreachable("NYI"); + break; + + case QualType::DK_objc_weak_lifetime: + break; + + case QualType::DK_nontrivial_c_struct: + llvm_unreachable("NYI"); + } + + // If we haven't chosen a more specific destroyer, use the default. + if (!destroyer) + destroyer = getDestroyer(dtorKind); + + // Use an EH cleanup in array destructors iff the destructor itself + // is being pushed as an EH cleanup. + bool useEHCleanup = (cleanupKind & EHCleanup); + EHStack.pushCleanup(cleanupKind, addr, type, destroyer, + useEHCleanup); +} + +/// Push the standard destructor for the given type as an EH-only cleanup. +void CIRGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind, + Address addr, QualType type) { + assert(dtorKind && "cannot push destructor for trivial type"); + assert(needsEHCleanup(dtorKind)); + + pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp new file mode 100644 index 000000000000..3d8c72dd7f5e --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -0,0 +1,86 @@ +//===--- CIRGenDeclCXX.cpp - Build CIR Code for C++ declarations ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with code generation of C++ declarations +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "TargetInfo.h" +#include "clang/AST/Attr.h" +#include "clang/Basic/LangOptions.h" + +using namespace clang; +using namespace mlir::cir; +using namespace cir; + +void CIRGenModule::buildCXXGlobalInitFunc() { + while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) + CXXGlobalInits.pop_back(); + + if (CXXGlobalInits.empty()) // TODO(cir): && + // PrioritizedCXXGlobalInits.empty()) + return; + + assert(0 && "NYE"); +} + +void CIRGenModule::buildGlobalVarDeclInit(const VarDecl *D, + mlir::cir::GlobalOp Addr, + bool PerformInit) { + // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, + // __constant__ and __shared__ variables defined in namespace scope, + // that are of class type, cannot have a non-empty constructor. All + // the checks have been done in Sema by now. Whatever initializers + // are allowed are empty and we just need to ignore them here. + if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit && + (D->hasAttr() || D->hasAttr() || + D->hasAttr())) + return; + + assert(!getLangOpts().OpenMP && "OpenMP global var init not implemented"); + + // Check if we've already initialized this decl. + auto I = DelayedCXXInitPosition.find(D); + if (I != DelayedCXXInitPosition.end() && I->second == ~0U) + return; + + if (PerformInit) { + QualType T = D->getType(); + + // TODO: handle address space + // The address space of a static local variable (DeclPtr) may be different + // from the address space of the "this" argument of the constructor. In that + // case, we need an addrspacecast before calling the constructor. + // + // struct StructWithCtor { + // __device__ StructWithCtor() {...} + // }; + // __device__ void foo() { + // __shared__ StructWithCtor s; + // ... + // } + // + // For example, in the above CUDA code, the static local variable s has a + // "shared" address space qualifier, but the constructor of StructWithCtor + // expects "this" in the "generic" address space. + assert(!UnimplementedFeature::addressSpace()); + + if (!T->isReferenceType()) { + bool NeedsDtor = + D->needsDestruction(getASTContext()) == QualType::DK_cxx_destructor; + assert(!isTypeConstant(D->getType(), true, !NeedsDtor) && + "invaraint-typed initialization NYI"); + + if (PerformInit || NeedsDtor) + codegenGlobalInitCxxStructor(D, Addr, PerformInit, NeedsDtor); + return; + } + } +} diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp new file mode 100644 index 000000000000..1c0b686154f4 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -0,0 +1,889 @@ +//===--- CIRGenException.cpp - Emit CIR Code for C++ exceptions -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ exception related code generation. +// +//===----------------------------------------------------------------------===// + +#include "CIRDataLayout.h" +#include "CIRGenCXXABI.h" +#include "CIRGenCleanup.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "UnimplementedFeatureGuarding.h" + +#include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" +#include + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Value.h" +#include "llvm/Support/SaveAndRestore.h" + +using namespace cir; +using namespace clang; + +const EHPersonality EHPersonality::GNU_C = {"__gcc_personality_v0", nullptr}; +const EHPersonality EHPersonality::GNU_C_SJLJ = {"__gcc_personality_sj0", + nullptr}; +const EHPersonality EHPersonality::GNU_C_SEH = {"__gcc_personality_seh0", + nullptr}; +const EHPersonality EHPersonality::NeXT_ObjC = {"__objc_personality_v0", + nullptr}; +const EHPersonality EHPersonality::GNU_CPlusPlus = {"__gxx_personality_v0", + nullptr}; +const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ = { + "__gxx_personality_sj0", nullptr}; +const EHPersonality EHPersonality::GNU_CPlusPlus_SEH = { + "__gxx_personality_seh0", nullptr}; +const EHPersonality EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", + "objc_exception_throw"}; +const EHPersonality EHPersonality::GNU_ObjC_SJLJ = { + "__gnu_objc_personality_sj0", "objc_exception_throw"}; +const EHPersonality EHPersonality::GNU_ObjC_SEH = { + "__gnu_objc_personality_seh0", "objc_exception_throw"}; +const EHPersonality EHPersonality::GNU_ObjCXX = { + "__gnustep_objcxx_personality_v0", nullptr}; +const EHPersonality EHPersonality::GNUstep_ObjC = { + "__gnustep_objc_personality_v0", nullptr}; +const EHPersonality EHPersonality::MSVC_except_handler = {"_except_handler3", + nullptr}; +const EHPersonality EHPersonality::MSVC_C_specific_handler = { + "__C_specific_handler", nullptr}; +const EHPersonality EHPersonality::MSVC_CxxFrameHandler3 = { + "__CxxFrameHandler3", nullptr}; +const EHPersonality EHPersonality::GNU_Wasm_CPlusPlus = { + "__gxx_wasm_personality_v0", nullptr}; +const EHPersonality EHPersonality::XL_CPlusPlus = {"__xlcxx_personality_v1", + nullptr}; + +static const EHPersonality &getCPersonality(const TargetInfo &Target, + const LangOptions &L) { + const llvm::Triple &T = Target.getTriple(); + if (T.isWindowsMSVCEnvironment()) + return EHPersonality::MSVC_CxxFrameHandler3; + if (L.hasSjLjExceptions()) + return EHPersonality::GNU_C_SJLJ; + if (L.hasDWARFExceptions()) + return EHPersonality::GNU_C; + if (L.hasSEHExceptions()) + return EHPersonality::GNU_C_SEH; + return EHPersonality::GNU_C; +} + +static const EHPersonality &getObjCPersonality(const TargetInfo &Target, + const LangOptions &L) { + const llvm::Triple &T = Target.getTriple(); + if (T.isWindowsMSVCEnvironment()) + return EHPersonality::MSVC_CxxFrameHandler3; + + switch (L.ObjCRuntime.getKind()) { + case ObjCRuntime::FragileMacOSX: + return getCPersonality(Target, L); + case ObjCRuntime::MacOSX: + case ObjCRuntime::iOS: + case ObjCRuntime::WatchOS: + return EHPersonality::NeXT_ObjC; + case ObjCRuntime::GNUstep: + if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7)) + return EHPersonality::GNUstep_ObjC; + [[fallthrough]]; + case ObjCRuntime::GCC: + case ObjCRuntime::ObjFW: + if (L.hasSjLjExceptions()) + return EHPersonality::GNU_ObjC_SJLJ; + if (L.hasSEHExceptions()) + return EHPersonality::GNU_ObjC_SEH; + return EHPersonality::GNU_ObjC; + } + llvm_unreachable("bad runtime kind"); +} + +static const EHPersonality &getCXXPersonality(const TargetInfo &Target, + const LangOptions &L) { + const llvm::Triple &T = Target.getTriple(); + if (T.isWindowsMSVCEnvironment()) + return EHPersonality::MSVC_CxxFrameHandler3; + if (T.isOSAIX()) + return EHPersonality::XL_CPlusPlus; + if (L.hasSjLjExceptions()) + return EHPersonality::GNU_CPlusPlus_SJLJ; + if (L.hasDWARFExceptions()) + return EHPersonality::GNU_CPlusPlus; + if (L.hasSEHExceptions()) + return EHPersonality::GNU_CPlusPlus_SEH; + if (L.hasWasmExceptions()) + return EHPersonality::GNU_Wasm_CPlusPlus; + return EHPersonality::GNU_CPlusPlus; +} + +/// Determines the personality function to use when both C++ +/// and Objective-C exceptions are being caught. +static const EHPersonality &getObjCXXPersonality(const TargetInfo &Target, + const LangOptions &L) { + if (Target.getTriple().isWindowsMSVCEnvironment()) + return EHPersonality::MSVC_CxxFrameHandler3; + + switch (L.ObjCRuntime.getKind()) { + // In the fragile ABI, just use C++ exception handling and hope + // they're not doing crazy exception mixing. + case ObjCRuntime::FragileMacOSX: + return getCXXPersonality(Target, L); + + // The ObjC personality defers to the C++ personality for non-ObjC + // handlers. Unlike the C++ case, we use the same personality + // function on targets using (backend-driven) SJLJ EH. + case ObjCRuntime::MacOSX: + case ObjCRuntime::iOS: + case ObjCRuntime::WatchOS: + return getObjCPersonality(Target, L); + + case ObjCRuntime::GNUstep: + return EHPersonality::GNU_ObjCXX; + + // The GCC runtime's personality function inherently doesn't support + // mixed EH. Use the ObjC personality just to avoid returning null. + case ObjCRuntime::GCC: + case ObjCRuntime::ObjFW: + return getObjCPersonality(Target, L); + } + llvm_unreachable("bad runtime kind"); +} + +static const EHPersonality &getSEHPersonalityMSVC(const llvm::Triple &T) { + if (T.getArch() == llvm::Triple::x86) + return EHPersonality::MSVC_except_handler; + return EHPersonality::MSVC_C_specific_handler; +} + +const EHPersonality &EHPersonality::get(CIRGenModule &CGM, + const FunctionDecl *FD) { + const llvm::Triple &T = CGM.getTarget().getTriple(); + const LangOptions &L = CGM.getLangOpts(); + const TargetInfo &Target = CGM.getTarget(); + + // Functions using SEH get an SEH personality. + if (FD && FD->usesSEHTry()) + return getSEHPersonalityMSVC(T); + + if (L.ObjC) + return L.CPlusPlus ? getObjCXXPersonality(Target, L) + : getObjCPersonality(Target, L); + return L.CPlusPlus ? getCXXPersonality(Target, L) + : getCPersonality(Target, L); +} + +const EHPersonality &EHPersonality::get(CIRGenFunction &CGF) { + const auto *FD = CGF.CurCodeDecl; + // For outlined finallys and filters, use the SEH personality in case they + // contain more SEH. This mostly only affects finallys. Filters could + // hypothetically use gnu statement expressions to sneak in nested SEH. + FD = FD ? FD : CGF.CurSEHParent.getDecl(); + return get(CGF.CGM, dyn_cast_or_null(FD)); +} + +void CIRGenFunction::buildCXXThrowExpr(const CXXThrowExpr *E) { + if (const Expr *SubExpr = E->getSubExpr()) { + QualType ThrowType = SubExpr->getType(); + if (ThrowType->isObjCObjectPointerType()) { + llvm_unreachable("NYI"); + } else { + CGM.getCXXABI().buildThrow(*this, E); + } + } else { + CGM.getCXXABI().buildRethrow(*this, /*isNoReturn=*/true); + } + + // In LLVM codegen the expression emitters expect to leave this + // path by starting a new basic block. We do not need that in CIR. +} + +namespace { +/// A cleanup to free the exception object if its initialization +/// throws. +struct FreeException final : EHScopeStack::Cleanup { + mlir::Value exn; + FreeException(mlir::Value exn) : exn(exn) {} + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("call to cxa_free or equivalent op NYI"); + } +}; +} // end anonymous namespace + +// Emits an exception expression into the given location. This +// differs from buildAnyExprToMem only in that, if a final copy-ctor +// call is required, an exception within that copy ctor causes +// std::terminate to be invoked. +void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { + // Make sure the exception object is cleaned up if there's an + // exception during initialization. + pushFullExprCleanup(EHCleanup, addr.getPointer()); + EHScopeStack::stable_iterator cleanup = EHStack.stable_begin(); + + // __cxa_allocate_exception returns a void*; we need to cast this + // to the appropriate type for the object. + auto ty = convertTypeForMem(e->getType()); + Address typedAddr = addr.withElementType(ty); + + // From LLVM's codegen: + // FIXME: this isn't quite right! If there's a final unelided call + // to a copy constructor, then according to [except.terminate]p1 we + // must call std::terminate() if that constructor throws, because + // technically that copy occurs after the exception expression is + // evaluated but before the exception is caught. But the best way + // to handle that is to teach EmitAggExpr to do the final copy + // differently if it can't be elided. + buildAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), + /*IsInit*/ true); + + // Deactivate the cleanup block. + auto op = typedAddr.getPointer().getDefiningOp(); + assert(op && + "expected valid Operation *, block arguments are not meaningful here"); + DeactivateCleanupBlock(cleanup, op); +} + +mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { + // Just like some other try/catch related logic: return the basic block + // pointer but only use it to denote we're tracking things, but there + // shouldn't be any changes to that block after work done in this function. + auto catchOp = currLexScope->getExceptionInfo().catchOp; + assert(catchOp.getNumRegions() && "expected at least one region"); + auto &fallbackRegion = catchOp.getRegion(catchOp.getNumRegions() - 1); + + auto *resumeBlock = &fallbackRegion.getBlocks().back(); + if (!resumeBlock->empty()) + return resumeBlock; + + auto ip = getBuilder().saveInsertionPoint(); + getBuilder().setInsertionPointToStart(resumeBlock); + + const EHPersonality &Personality = EHPersonality::get(*this); + + // This can always be a call because we necessarily didn't find + // anything on the EH stack which needs our help. + const char *RethrowName = Personality.CatchallRethrowFn; + if (RethrowName != nullptr && !isCleanup) { + llvm_unreachable("NYI"); + } + + getBuilder().create(catchOp.getLoc()); + getBuilder().restoreInsertionPoint(ip); + return resumeBlock; +} + +mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { + auto loc = getLoc(S.getSourceRange()); + mlir::OpBuilder::InsertPoint scopeIP; + + // Create a scope to hold try local storage for catch params. + [[maybe_unused]] auto s = builder.create( + loc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + scopeIP = getBuilder().saveInsertionPoint(); + }); + + auto r = mlir::success(); + { + mlir::OpBuilder::InsertionGuard guard(getBuilder()); + getBuilder().restoreInsertionPoint(scopeIP); + r = buildCXXTryStmtUnderScope(S); + getBuilder().create(loc); + } + return r; +} + +mlir::LogicalResult +CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { + const llvm::Triple &T = getTarget().getTriple(); + // If we encounter a try statement on in an OpenMP target region offloaded to + // a GPU, we treat it as a basic block. + const bool IsTargetDevice = + (CGM.getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN())); + assert(!IsTargetDevice && "NYI"); + + auto hasCatchAll = [&]() { + if (!S.getNumHandlers()) + return false; + unsigned lastHandler = S.getNumHandlers() - 1; + if (!S.getHandler(lastHandler)->getExceptionDecl()) + return true; + return false; + }; + + auto numHandlers = S.getNumHandlers(); + auto tryLoc = getLoc(S.getBeginLoc()); + auto scopeLoc = getLoc(S.getSourceRange()); + + mlir::OpBuilder::InsertPoint beginInsertTryBody; + auto ehPtrTy = mlir::cir::PointerType::get( + getBuilder().getContext(), + getBuilder().getType<::mlir::cir::ExceptionInfoType>()); + mlir::Value exceptionInfoInsideTry; + + // Create the scope to represent only the C/C++ `try {}` part. However, + // don't populate right away. Reserve some space to store the exception + // info but don't emit the bulk right away, for now only make sure the + // scope returns the exception information. + auto tryScope = builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { + // Allocate space for our exception info that might be passed down + // to `cir.try_call` everytime a call happens. + yieldTy = ehPtrTy; + exceptionInfoInsideTry = b.create( + loc, /*addr type*/ getBuilder().getPointerTo(yieldTy), + /*var type*/ yieldTy, "__exception_ptr", + CGM.getSize(CharUnits::One()), nullptr); + + beginInsertTryBody = getBuilder().saveInsertionPoint(); + }); + + // The catch {} parts consume the exception information provided by a + // try scope. Also don't emit the code right away for catch clauses, for + // now create the regions and consume the try scope result. + // Note that clauses are later populated in + // CIRGenFunction::buildLandingPad. + auto catchOp = builder.create( + tryLoc, + tryScope->getResult( + 0), // FIXME(cir): we can do better source location here. + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + mlir::OpBuilder::InsertionGuard guard(b); + auto numRegionsToCreate = numHandlers; + if (!hasCatchAll()) + numRegionsToCreate++; + // Once for each handler + (catch_all or unwind). + for (int i = 0, e = numRegionsToCreate; i != e; ++i) { + auto *r = result.addRegion(); + builder.createBlock(r); + } + }); + + // Finally emit the body for try/catch. + auto emitTryCatchBody = [&]() -> mlir::LogicalResult { + auto loc = catchOp.getLoc(); + mlir::OpBuilder::InsertionGuard guard(getBuilder()); + getBuilder().restoreInsertionPoint(beginInsertTryBody); + CIRGenFunction::LexicalScope lexScope{*this, loc, + getBuilder().getInsertionBlock()}; + + { + lexScope.setExceptionInfo({exceptionInfoInsideTry, catchOp}); + // Attach the basic blocks for the catchOp regions into ScopeCatch + // info. + enterCXXTryStmt(S, catchOp); + // Emit the body for the `try {}` part. + if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + auto v = getBuilder().create(loc, ehPtrTy, + exceptionInfoInsideTry); + getBuilder().create(loc, v.getResult()); + } + + { + lexScope.setExceptionInfo({tryScope->getResult(0), catchOp}); + // Emit catch clauses. + exitCXXTryStmt(S); + } + + return mlir::success(); + }; + + return emitTryCatchBody(); +} + +/// Emit the structure of the dispatch block for the given catch scope. +/// It is an invariant that the dispatch block already exists. +static void buildCatchDispatchBlock(CIRGenFunction &CGF, + EHCatchScope &catchScope) { + if (EHPersonality::get(CGF).isWasmPersonality()) + llvm_unreachable("NYI"); + if (EHPersonality::get(CGF).usesFuncletPads()) + llvm_unreachable("NYI"); + + auto *dispatchBlock = catchScope.getCachedEHDispatchBlock(); + assert(dispatchBlock); + + // If there's only a single catch-all, getEHDispatchBlock returned + // that catch-all as the dispatch block. + if (catchScope.getNumHandlers() == 1 && + catchScope.getHandler(0).isCatchAll()) { + assert(dispatchBlock == catchScope.getHandler(0).Block); + return; + } + + // In traditional LLVM codegen, the right handler is selected (with + // calls to eh_typeid_for) and the selector value is loaded. After that, + // blocks get connected for later codegen. In CIR, these are all + // implicit behaviors of cir.catch - not a lot of work to do. + // + // Test against each of the exception types we claim to catch. + for (unsigned i = 0, e = catchScope.getNumHandlers();; ++i) { + assert(i < e && "ran off end of handlers!"); + const EHCatchScope::Handler &handler = catchScope.getHandler(i); + + auto typeValue = handler.Type.RTTI; + assert(handler.Type.Flags == 0 && "catch handler flags not supported"); + assert(typeValue && "fell into catch-all case!"); + // Check for address space mismatch: if (typeValue->getType() != + // argTy) + assert(!UnimplementedFeature::addressSpace()); + + bool nextIsEnd = false; + // If this is the last handler, we're at the end, and the next + // block is the block for the enclosing EH scope. Make sure to call + // getEHDispatchBlock for caching it. + if (i + 1 == e) { + (void)CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope()); + nextIsEnd = true; + + // If the next handler is a catch-all, we're at the end, and the + // next block is that handler. + } else if (catchScope.getHandler(i + 1).isCatchAll()) { + // Block already created when creating CatchOp, just mark this + // is the end. + nextIsEnd = true; + } + + // If the next handler is a catch-all, we're completely done. + if (nextIsEnd) + return; + } +} + +void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, + mlir::cir::CatchOp catchOp, + bool IsFnTryBlock) { + unsigned NumHandlers = S.getNumHandlers(); + EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers); + for (unsigned I = 0; I != NumHandlers; ++I) { + const CXXCatchStmt *C = S.getHandler(I); + + mlir::Block *Handler = &catchOp.getRegion(I).getBlocks().front(); + if (C->getExceptionDecl()) { + // FIXME: Dropping the reference type on the type into makes it + // impossible to correctly implement catch-by-reference + // semantics for pointers. Unfortunately, this is what all + // existing compilers do, and it's not clear that the standard + // personality routine is capable of doing this right. See C++ DR 388 : + // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388 + Qualifiers CaughtTypeQuals; + QualType CaughtType = CGM.getASTContext().getUnqualifiedArrayType( + C->getCaughtType().getNonReferenceType(), CaughtTypeQuals); + + CatchTypeInfo TypeInfo{nullptr, 0}; + if (CaughtType->isObjCObjectPointerType()) + llvm_unreachable("NYI"); + else + TypeInfo = CGM.getCXXABI().getAddrOfCXXCatchHandlerType( + getLoc(S.getSourceRange()), CaughtType, C->getCaughtType()); + CatchScope->setHandler(I, TypeInfo, Handler); + } else { + // No exception decl indicates '...', a catch-all. + CatchScope->setHandler(I, CGM.getCXXABI().getCatchAllTypeInfo(), Handler); + // Under async exceptions, catch(...) need to catch HW exception too + // Mark scope with SehTryBegin as a SEH __try scope + if (getLangOpts().EHAsynch) + llvm_unreachable("NYI"); + } + } +} + +void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { + unsigned NumHandlers = S.getNumHandlers(); + EHCatchScope &CatchScope = cast(*EHStack.begin()); + assert(CatchScope.getNumHandlers() == NumHandlers); + + // If the catch was not required, bail out now. + if (!CatchScope.hasEHBranches()) { + llvm_unreachable("NYI"); + CatchScope.clearHandlerBlocks(); + EHStack.popCatch(); + return; + } + + // Emit the structure of the EH dispatch for this catch. + buildCatchDispatchBlock(*this, CatchScope); + + // Copy the handler blocks off before we pop the EH stack. Emitting + // the handlers might scribble on this memory. + SmallVector Handlers( + CatchScope.begin(), CatchScope.begin() + NumHandlers); + + EHStack.popCatch(); + + // Determine if we need an implicit rethrow for all these catch handlers; + // see the comment below. + bool doImplicitRethrow = false; + if (IsFnTryBlock) + doImplicitRethrow = isa(CurCodeDecl) || + isa(CurCodeDecl); + + // Wasm uses Windows-style EH instructions, but merges all catch clauses into + // one big catchpad. So we save the old funclet pad here before we traverse + // each catch handler. + SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad); + mlir::Block *WasmCatchStartBlock = nullptr; + if (EHPersonality::get(*this).isWasmPersonality()) { + llvm_unreachable("NYI"); + } + + bool HasCatchAll = false; + for (unsigned I = NumHandlers; I != 0; --I) { + HasCatchAll |= Handlers[I - 1].isCatchAll(); + mlir::Block *CatchBlock = Handlers[I - 1].Block; + mlir::OpBuilder::InsertionGuard guard(getBuilder()); + getBuilder().setInsertionPointToStart(CatchBlock); + + // Catch the exception if this isn't a catch-all. + const CXXCatchStmt *C = S.getHandler(I - 1); + + // Enter a cleanup scope, including the catch variable and the + // end-catch. + RunCleanupsScope CatchScope(*this); + + // Initialize the catch variable and set up the cleanups. + SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad); + CGM.getCXXABI().emitBeginCatch(*this, C); + + // Emit the PGO counter increment. + assert(!UnimplementedFeature::incrementProfileCounter()); + + // Perform the body of the catch. + (void)buildStmt(C->getHandlerBlock(), /*useCurrentScope=*/true); + + // [except.handle]p11: + // The currently handled exception is rethrown if control + // reaches the end of a handler of the function-try-block of a + // constructor or destructor. + + // It is important that we only do this on fallthrough and not on + // return. Note that it's illegal to put a return in a + // constructor function-try-block's catch handler (p14), so this + // really only applies to destructors. + if (doImplicitRethrow && HaveInsertPoint()) { + llvm_unreachable("NYI"); + } + + // Fall out through the catch cleanups. + CatchScope.ForceCleanup(); + } + + // Because in wasm we merge all catch clauses into one big catchpad, in case + // none of the types in catch handlers matches after we test against each of + // them, we should unwind to the next EH enclosing scope. We generate a call + // to rethrow function here to do that. + if (EHPersonality::get(*this).isWasmPersonality() && !HasCatchAll) { + assert(WasmCatchStartBlock); + // Navigate for the "rethrow" block we created in emitWasmCatchPadBlock(). + // Wasm uses landingpad-style conditional branches to compare selectors, so + // we follow the false destination for each of the cond branches to reach + // the rethrow block. + llvm_unreachable("NYI"); + } + + assert(!UnimplementedFeature::incrementProfileCounter()); +} + +/// Check whether this is a non-EH scope, i.e. a scope which doesn't +/// affect exception handling. Currently, the only non-EH scopes are +/// normal-only cleanup scopes. +static bool isNonEHScope(const EHScope &S) { + switch (S.getKind()) { + case EHScope::Cleanup: + return !cast(S).isEHCleanup(); + case EHScope::Filter: + case EHScope::Catch: + case EHScope::Terminate: + return false; + } + + llvm_unreachable("Invalid EHScope Kind!"); +} + +mlir::Operation *CIRGenFunction::buildLandingPad() { + assert(EHStack.requiresLandingPad()); + assert(!CGM.getLangOpts().IgnoreExceptions && + "LandingPad should not be emitted when -fignore-exceptions are in " + "effect."); + EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope()); + switch (innermostEHScope.getKind()) { + case EHScope::Terminate: + return getTerminateLandingPad(); + + case EHScope::Catch: + case EHScope::Cleanup: + case EHScope::Filter: + if (auto *lpad = innermostEHScope.getCachedLandingPad()) + return lpad; + } + + // If there's an existing CatchOp, it means we got a `cir.try` scope + // that leads to this "landing pad" creation site. Otherwise, exceptions + // are enabled but a throwing function is called anyways. + auto catchOp = currLexScope->getExceptionInfo().catchOp; + if (!catchOp) { + auto loc = *currSrcLoc; + auto ehPtrTy = mlir::cir::PointerType::get( + getBuilder().getContext(), + getBuilder().getType<::mlir::cir::ExceptionInfoType>()); + + mlir::Value exceptionAddr; + { + // Get a new alloca within the current scope. + mlir::OpBuilder::InsertionGuard guard(builder); + exceptionAddr = buildAlloca( + "__exception_ptr", ehPtrTy, loc, CharUnits::One(), + builder.getBestAllocaInsertPoint(builder.getInsertionBlock())); + } + + { + // Insert catch at the end of the block, and place the insert pointer + // back to where it was. + mlir::OpBuilder::InsertionGuard guard(builder); + auto exceptionPtr = + builder.create(loc, ehPtrTy, exceptionAddr); + catchOp = builder.create( + loc, exceptionPtr, + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + // There's no source code level catch here, create one region for + // the resume block. + mlir::OpBuilder::InsertionGuard guard(b); + auto *r = result.addRegion(); + builder.createBlock(r); + }); + } + currLexScope->setExceptionInfo({exceptionAddr, catchOp}); + } + + { + // Save the current CIR generation state. + mlir::OpBuilder::InsertionGuard guard(builder); + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + // Traditional LLVM codegen creates the lpad basic block, extract + // values, landing pad instructions, etc. + + // Accumulate all the handlers in scope. + bool hasCatchAll = false; + bool hasCleanup = false; + bool hasFilter = false; + SmallVector filterTypes; + llvm::SmallPtrSet catchTypes; + SmallVector clauses; + + for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end(); I != E; + ++I) { + + switch (I->getKind()) { + case EHScope::Cleanup: + // If we have a cleanup, remember that. + hasCleanup = (hasCleanup || cast(*I).isEHCleanup()); + continue; + + case EHScope::Filter: { + llvm_unreachable("NYI"); + } + + case EHScope::Terminate: + // Terminate scopes are basically catch-alls. + // assert(!hasCatchAll); + // hasCatchAll = true; + // goto done; + llvm_unreachable("NYI"); + + case EHScope::Catch: + break; + } + + EHCatchScope &catchScope = cast(*I); + for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) { + EHCatchScope::Handler handler = catchScope.getHandler(hi); + assert(handler.Type.Flags == 0 && + "landingpads do not support catch handler flags"); + + // If this is a catch-all, register that and abort. + if (!handler.Type.RTTI) { + assert(!hasCatchAll); + hasCatchAll = true; + goto done; + } + + // Check whether we already have a handler for this type. + if (catchTypes.insert(handler.Type.RTTI).second) { + // If not, keep track to later add to catch op. + clauses.push_back(handler.Type.RTTI); + } + } + } + + done: + // If we have a catch-all, add null to the landingpad. + assert(!(hasCatchAll && hasFilter)); + if (hasCatchAll) { + // Attach the catch_all region. Can't coexist with an unwind one. + auto catchAll = mlir::cir::CatchAllAttr::get(builder.getContext()); + clauses.push_back(catchAll); + + // If we have an EH filter, we need to add those handlers in the + // right place in the landingpad, which is to say, at the end. + } else if (hasFilter) { + // Create a filter expression: a constant array indicating which filter + // types there are. The personality routine only lands here if the filter + // doesn't match. + llvm_unreachable("NYI"); + + // Otherwise, signal that we at least have cleanups. + } else if (hasCleanup) { + // FIXME(cir): figure out whether and how we need this in CIR. + assert(!UnimplementedFeature::setLandingPadCleanup()); + } + + assert((clauses.size() > 0 || hasCleanup) && "CatchOp has no clauses!"); + + // If there's no catch_all, attach the unwind region. This needs to be the + // last region in the CatchOp operation. + if (!hasCatchAll) { + auto catchUnwind = mlir::cir::CatchUnwindAttr::get(builder.getContext()); + clauses.push_back(catchUnwind); + } + + // Add final array of clauses into catchOp. + catchOp.setCatchersAttr( + mlir::ArrayAttr::get(builder.getContext(), clauses)); + + // In traditional LLVM codegen. this tells the backend how to generate the + // landing pad by generating a branch to the dispatch block. In CIR the same + // function is called to gather some state, but this block info it's not + // useful per-se. + (void)getEHDispatchBlock(EHStack.getInnermostEHScope()); + } + + return catchOp; +} + +// Differently from LLVM traditional codegen, there are no dispatch blocks +// to look at given cir.try_call does not jump to blocks like invoke does. +// However, we keep this around since other parts of CIRGen use +// getCachedEHDispatchBlock to infer state. +mlir::Block * +CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) { + if (EHPersonality::get(*this).usesFuncletPads()) + llvm_unreachable("NYI"); + + // The dispatch block for the end of the scope chain is a block that + // just resumes unwinding. + if (si == EHStack.stable_end()) + return getEHResumeBlock(true); + + // Otherwise, we should look at the actual scope. + EHScope &scope = *EHStack.find(si); + + auto *dispatchBlock = scope.getCachedEHDispatchBlock(); + if (!dispatchBlock) { + switch (scope.getKind()) { + case EHScope::Catch: { + // Apply a special case to a single catch-all. + EHCatchScope &catchScope = cast(scope); + if (catchScope.getNumHandlers() == 1 && + catchScope.getHandler(0).isCatchAll()) { + dispatchBlock = catchScope.getHandler(0).Block; + + // Otherwise, make a dispatch block. + } else { + // As said in the function comment, just signal back we + // have something - even though the block value doesn't + // have any real meaning. + dispatchBlock = catchScope.getHandler(0).Block; + assert(dispatchBlock && "find another approach to signal"); + } + break; + } + + case EHScope::Cleanup: + assert(!UnimplementedFeature::setLandingPadCleanup()); + dispatchBlock = currLexScope->getOrCreateCleanupBlock(builder); + break; + + case EHScope::Filter: + llvm_unreachable("NYI"); + break; + + case EHScope::Terminate: + llvm_unreachable("NYI"); + break; + } + scope.setCachedEHDispatchBlock(dispatchBlock); + } + return dispatchBlock; +} + +mlir::Operation *CIRGenFunction::getInvokeDestImpl() { + assert(EHStack.requiresLandingPad()); + assert(!EHStack.empty()); + + // If exceptions are disabled/ignored and SEH is not in use, then there is no + // invoke destination. SEH "works" even if exceptions are off. In practice, + // this means that C++ destructors and other EH cleanups don't run, which is + // consistent with MSVC's behavior, except in the presence of -EHa + const LangOptions &LO = CGM.getLangOpts(); + if (!LO.Exceptions || LO.IgnoreExceptions) { + if (!LO.Borland && !LO.MicrosoftExt) + return nullptr; + if (!currentFunctionUsesSEHTry()) + return nullptr; + } + + // CUDA device code doesn't have exceptions. + if (LO.CUDA && LO.CUDAIsDevice) + return nullptr; + + // Check the innermost scope for a cached landing pad. If this is + // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad. + auto *LP = EHStack.begin()->getCachedLandingPad(); + if (LP) + return LP; + + const EHPersonality &Personality = EHPersonality::get(*this); + + // FIXME(cir): add personality function + // if (!CurFn->hasPersonalityFn()) + // CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality)); + + if (Personality.usesFuncletPads()) { + // We don't need separate landing pads in the funclet model. + llvm_unreachable("NYI"); + } else { + // Build the landing pad for this scope. + LP = buildLandingPad(); + } + + assert(LP); + + // Cache the landing pad on the innermost scope. If this is a + // non-EH scope, cache the landing pad on the enclosing scope, too. + for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) { + ir->setCachedLandingPad(LP); + if (!isNonEHScope(*ir)) + break; + } + + return LP; +} + +mlir::Operation *CIRGenFunction::getTerminateLandingPad() { + llvm_unreachable("NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp new file mode 100644 index 000000000000..dbb65d92290e --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -0,0 +1,2891 @@ +//===--- CIRGenExpr.cpp - Emit LLVM Code from Expressions -----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes as CIR code. +// +//===----------------------------------------------------------------------===// +#include "CIRGenBuilder.h" +#include "CIRGenCXXABI.h" +#include "CIRGenCall.h" +#include "CIRGenCstEmitter.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "CIRGenOpenMPRuntime.h" +#include "CIRGenValue.h" +#include "UnimplementedFeatureGuarding.h" + +#include "clang/AST/ExprCXX.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/Basic/Builtins.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" + +#include "llvm/ADT/StringExtras.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Operation.h" +#include "mlir/IR/Value.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +static mlir::cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, + GlobalDecl GD) { + const auto *FD = cast(GD.getDecl()); + + if (FD->hasAttr()) { + mlir::Operation *aliasee = CGM.getWeakRefReference(FD); + return dyn_cast(aliasee); + } + + auto V = CGM.GetAddrOfFunction(GD); + + return V; +} + +static Address buildPreserveStructAccess(CIRGenFunction &CGF, LValue base, + Address addr, const FieldDecl *field) { + llvm_unreachable("NYI"); +} + +/// Get the address of a zero-sized field within a record. The resulting address +/// doesn't necessarily have the right type. +static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, + const FieldDecl *field, + llvm::StringRef fieldName, + unsigned fieldIndex) { + if (field->isZeroSize(CGF.getContext())) + llvm_unreachable("NYI"); + + auto loc = CGF.getLoc(field->getLocation()); + + auto fieldType = CGF.convertType(field->getType()); + auto fieldPtr = + mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fieldType); + // For most cases fieldName is the same as field->getName() but for lambdas, + // which do not currently carry the name, so it can be passed down from the + // CaptureStmt. + auto memberAddr = CGF.getBuilder().createGetMember( + loc, fieldPtr, Base.getPointer(), fieldName, fieldIndex); + + // Retrieve layout information, compute alignment and return the final + // address. + const RecordDecl *rec = field->getParent(); + auto &layout = CGF.CGM.getTypes().getCIRGenRecordLayout(rec); + unsigned idx = layout.getCIRFieldNo(field); + auto offset = CharUnits::fromQuantity(layout.getCIRType().getElementOffset( + CGF.CGM.getDataLayout().layout, idx)); + auto addr = + Address(memberAddr, Base.getAlignment().alignmentAtOffset(offset)); + return addr; +} + +static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { + const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); + if (!RD) + return false; + + if (RD->isDynamicClass()) + return true; + + for (const auto &Base : RD->bases()) + if (hasAnyVptr(Base.getType(), Context)) + return true; + + for (const FieldDecl *Field : RD->fields()) + if (hasAnyVptr(Field->getType(), Context)) + return true; + + return false; +} + +static Address buildPointerWithAlignment(const Expr *E, + LValueBaseInfo *BaseInfo, + KnownNonNull_t IsKnownNonNull, + CIRGenFunction &CGF) { + // We allow this with ObjC object pointers because of fragile ABIs. + assert(E->getType()->isPointerType() || + E->getType()->isObjCObjectPointerType()); + E = E->IgnoreParens(); + + // Casts: + if (const CastExpr *CE = dyn_cast(E)) { + if (const auto *ECE = dyn_cast(CE)) + CGF.CGM.buildExplicitCastExprType(ECE, &CGF); + + switch (CE->getCastKind()) { + default: { + llvm::errs() << CE->getCastKindName() << "\n"; + assert(0 && "not implemented"); + } + // Non-converting casts (but not C's implicit conversion from void*). + case CK_BitCast: + case CK_NoOp: + case CK_AddressSpaceConversion: + if (auto PtrTy = + CE->getSubExpr()->getType()->getAs()) { + if (PtrTy->getPointeeType()->isVoidType()) + break; + assert(!UnimplementedFeature::tbaa()); + + LValueBaseInfo InnerBaseInfo; + Address Addr = CGF.buildPointerWithAlignment( + CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull); + if (BaseInfo) + *BaseInfo = InnerBaseInfo; + + if (isa(CE)) { + assert(!UnimplementedFeature::tbaa()); + LValueBaseInfo TargetTypeBaseInfo; + + CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( + E->getType(), &TargetTypeBaseInfo); + + // If the source l-value is opaque, honor the alignment of the + // casted-to type. + if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { + if (BaseInfo) + BaseInfo->mergeForCast(TargetTypeBaseInfo); + Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, + IsKnownNonNull); + } + } + + if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && + CE->getCastKind() == CK_BitCast) { + if (auto PT = E->getType()->getAs()) + llvm_unreachable("NYI"); + } + + auto ElemTy = + CGF.getTypes().convertTypeForMem(E->getType()->getPointeeType()); + Addr = CGF.getBuilder().createElementBitCast( + CGF.getLoc(E->getSourceRange()), Addr, ElemTy); + if (CE->getCastKind() == CK_AddressSpaceConversion) { + assert(!UnimplementedFeature::addressSpace()); + llvm_unreachable("NYI"); + } + return Addr; + } + break; + + // Nothing to do here... + case CK_LValueToRValue: + case CK_NullToPointer: + case CK_IntegralToPointer: + break; + + // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. + case CK_ArrayToPointerDecay: + return CGF.buildArrayToPointerDecay(CE->getSubExpr()); + + case CK_UncheckedDerivedToBase: + case CK_DerivedToBase: { + // TODO: Support accesses to members of base classes in TBAA. For now, we + // conservatively pretend that the complete object is of the base class + // type. + assert(!UnimplementedFeature::tbaa()); + Address Addr = CGF.buildPointerWithAlignment(CE->getSubExpr(), BaseInfo); + auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); + return CGF.getAddressOfBaseClass( + Addr, Derived, CE->path_begin(), CE->path_end(), + CGF.shouldNullCheckClassCastValue(CE), CE->getExprLoc()); + } + } + } + + // Unary &. + if (const UnaryOperator *UO = dyn_cast(E)) { + // TODO(cir): maybe we should use cir.unary for pointers here instead. + if (UO->getOpcode() == UO_AddrOf) { + LValue LV = CGF.buildLValue(UO->getSubExpr()); + if (BaseInfo) + *BaseInfo = LV.getBaseInfo(); + assert(!UnimplementedFeature::tbaa()); + return LV.getAddress(); + } + } + + // std::addressof and variants. + if (auto *Call = dyn_cast(E)) { + switch (Call->getBuiltinCallee()) { + default: + break; + case Builtin::BIaddressof: + case Builtin::BI__addressof: + case Builtin::BI__builtin_addressof: { + llvm_unreachable("NYI"); + } + } + } + + // TODO: conditional operators, comma. + + // Otherwise, use the alignment of the type. + return CGF.makeNaturalAddressForPointer( + CGF.buildScalarExpr(E), E->getType()->getPointeeType(), CharUnits(), + /*ForPointeeType=*/true, BaseInfo, IsKnownNonNull); +} + +/// Helper method to check if the underlying ABI is AAPCS +static bool isAAPCS(const TargetInfo &TargetInfo) { + return TargetInfo.getABI().starts_with("aapcs"); +} + +Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, + const FieldDecl *field, + mlir::Type fieldType, + unsigned index) { + if (index == 0) + return base.getAddress(); + auto loc = getLoc(field->getLocation()); + auto fieldPtr = + mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); + auto sea = getBuilder().createGetMember(loc, fieldPtr, base.getPointer(), + field->getName(), index); + return Address(sea, CharUnits::One()); +} + +static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, + const CIRGenBitFieldInfo &info, + const FieldDecl *field) { + return isAAPCS(cgm.getTarget()) && cgm.getCodeGenOpts().AAPCSBitfieldWidth && + info.VolatileStorageSize != 0 && + field->getType() + .withCVRQualifiers(base.getVRQualifiers()) + .isVolatileQualified(); +} + +LValue CIRGenFunction::buildLValueForBitField(LValue base, + const FieldDecl *field) { + + LValueBaseInfo BaseInfo = base.getBaseInfo(); + const RecordDecl *rec = field->getParent(); + auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + auto &info = layout.getBitFieldInfo(field); + auto useVolatile = useVolatileForBitField(CGM, base, info, field); + unsigned Idx = layout.getCIRFieldNo(field); + + if (useVolatile || + (IsInPreservedAIRegion || + (getDebugInfo() && rec->hasAttr()))) { + llvm_unreachable("NYI"); + } + + Address Addr = getAddrOfBitFieldStorage(base, field, info.StorageType, Idx); + + auto loc = getLoc(field->getLocation()); + if (Addr.getElementType() != info.StorageType) + Addr = builder.createElementBitCast(loc, Addr, info.StorageType); + + QualType fieldType = + field->getType().withCVRQualifiers(base.getVRQualifiers()); + assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields"); + LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); + return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); +} + +LValue CIRGenFunction::buildLValueForField(LValue base, + const FieldDecl *field) { + LValueBaseInfo BaseInfo = base.getBaseInfo(); + + if (field->isBitField()) + return buildLValueForBitField(base, field); + + // Fields of may-alias structures are may-alais themselves. + // FIXME: this hould get propagated down through anonymous structs and unions. + QualType FieldType = field->getType(); + const RecordDecl *rec = field->getParent(); + AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); + LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); + if (UnimplementedFeature::tbaa() || rec->hasAttr() || + FieldType->isVectorType()) { + assert(!UnimplementedFeature::tbaa() && "NYI"); + } else if (rec->isUnion()) { + assert(!UnimplementedFeature::tbaa() && "NYI"); + } else { + // If no base type been assigned for the base access, then try to generate + // one for this base lvalue. + assert(!UnimplementedFeature::tbaa() && "NYI"); + } + + Address addr = base.getAddress(); + if (auto *ClassDef = dyn_cast(rec)) { + if (CGM.getCodeGenOpts().StrictVTablePointers && + ClassDef->isDynamicClass()) { + llvm_unreachable("NYI"); + } + } + + unsigned RecordCVR = base.getVRQualifiers(); + if (rec->isUnion()) { + // NOTE(cir): the element to be loaded/stored need to type-match the + // source/destination, so we emit a GetMemberOp here. + llvm::StringRef fieldName = field->getName(); + unsigned fieldIndex = field->getFieldIndex(); + if (CGM.LambdaFieldToName.count(field)) + fieldName = CGM.LambdaFieldToName[field]; + addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); + + if (CGM.getCodeGenOpts().StrictVTablePointers && + hasAnyVptr(FieldType, getContext())) + // Because unions can easily skip invariant.barriers, we need to add + // a barrier every time CXXRecord field with vptr is referenced. + assert(!UnimplementedFeature::createInvariantGroup()); + + if (IsInPreservedAIRegion || + (getDebugInfo() && rec->hasAttr())) { + assert(!UnimplementedFeature::generateDebugInfo()); + } + + if (FieldType->isReferenceType()) + llvm_unreachable("NYI"); + } else { + if (!IsInPreservedAIRegion && + (!getDebugInfo() || !rec->hasAttr())) { + llvm::StringRef fieldName = field->getName(); + auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + unsigned fieldIndex = layout.getCIRFieldNo(field); + + if (CGM.LambdaFieldToName.count(field)) + fieldName = CGM.LambdaFieldToName[field]; + addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); + } else + // Remember the original struct field index + addr = buildPreserveStructAccess(*this, base, addr, field); + } + + // If this is a reference field, load the reference right now. + if (FieldType->isReferenceType()) { + assert(!UnimplementedFeature::tbaa()); + LValue RefLVal = makeAddrLValue(addr, FieldType, FieldBaseInfo); + if (RecordCVR & Qualifiers::Volatile) + RefLVal.getQuals().addVolatile(); + addr = buildLoadOfReference(RefLVal, getLoc(field->getSourceRange()), + &FieldBaseInfo); + + // Qualifiers on the struct don't apply to the referencee. + RecordCVR = 0; + FieldType = FieldType->getPointeeType(); + } + + // Make sure that the address is pointing to the right type. This is critical + // for both unions and structs. A union needs a bitcast, a struct element will + // need a bitcast if the CIR type laid out doesn't match the desired type. + // TODO(CIR): CodeGen requires a bitcast here for unions or for structs where + // the LLVM type doesn't match the desired type. No idea when the latter might + // occur, though. + + if (field->hasAttr()) + llvm_unreachable("NYI"); + + if (UnimplementedFeature::tbaa()) + // Next line should take a TBAA object + llvm_unreachable("NYI"); + LValue LV = makeAddrLValue(addr, FieldType, FieldBaseInfo); + LV.getQuals().addCVRQualifiers(RecordCVR); + + // __weak attribute on a field is ignored. + if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) + llvm_unreachable("NYI"); + + return LV; +} + +LValue CIRGenFunction::buildLValueForFieldInitialization( + LValue Base, const clang::FieldDecl *Field, llvm::StringRef FieldName) { + QualType FieldType = Field->getType(); + + if (!FieldType->isReferenceType()) + return buildLValueForField(Base, Field); + + auto &layout = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); + unsigned FieldIndex = layout.getCIRFieldNo(Field); + + Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field, + FieldName, FieldIndex); + + // Make sure that the address is pointing to the right type. + auto memTy = getTypes().convertTypeForMem(FieldType); + V = builder.createElementBitCast(getLoc(Field->getSourceRange()), V, memTy); + + // TODO: Generate TBAA information that describes this access as a structure + // member access and not just an access to an object of the field's type. This + // should be similar to what we do in EmitLValueForField(). + LValueBaseInfo BaseInfo = Base.getBaseInfo(); + AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); + LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); + assert(!UnimplementedFeature::tbaa() && "NYI"); + return makeAddrLValue(V, FieldType, FieldBaseInfo); +} + +LValue +CIRGenFunction::buildCompoundLiteralLValue(const CompoundLiteralExpr *E) { + if (E->isFileScope()) { + llvm_unreachable("NYI"); + } + + if (E->getType()->isVariablyModifiedType()) { + llvm_unreachable("NYI"); + } + + Address DeclPtr = CreateMemTemp(E->getType(), getLoc(E->getSourceRange()), + ".compoundliteral"); + const Expr *InitExpr = E->getInitializer(); + LValue Result = makeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); + + buildAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); + + // Block-scope compound literals are destroyed at the end of the enclosing + // scope in C. + if (!getLangOpts().CPlusPlus) + if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) + llvm_unreachable("NYI"); + + return Result; +} + +// Detect the unusual situation where an inline version is shadowed by a +// non-inline version. In that case we should pick the external one +// everywhere. That's GCC behavior too. +static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { + for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl()) + if (!PD->isInlineBuiltinDeclaration()) + return false; + return true; +} + +static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { + const auto *FD = cast(GD.getDecl()); + + if (auto builtinID = FD->getBuiltinID()) { + std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str(); + std::string NoBuiltins = "no-builtins"; + + auto *A = FD->getAttr(); + StringRef Ident = A ? A->getLabel() : FD->getName(); + std::string FDInlineName = (Ident + ".inline").str(); + + auto &CGF = *CGM.getCurrCIRGenFun(); + bool IsPredefinedLibFunction = + CGM.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID); + bool HasAttributeNoBuiltin = false; + assert(!UnimplementedFeature::attributeNoBuiltin() && "NYI"); + // bool HasAttributeNoBuiltin = + // CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) || + // CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins); + + // When directing calling an inline builtin, call it through it's mangled + // name to make it clear it's not the actual builtin. + auto Fn = cast(CGF.CurFn); + if (Fn.getName() != FDInlineName && onlyHasInlineBuiltinDeclaration(FD)) { + assert(0 && "NYI"); + } + + // Replaceable builtins provide their own implementation of a builtin. If we + // are in an inline builtin implementation, avoid trivial infinite + // recursion. Honor __attribute__((no_builtin("foo"))) or + // __attribute__((no_builtin)) on the current function unless foo is + // not a predefined library function which means we must generate the + // builtin no matter what. + else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin) + return CIRGenCallee::forBuiltin(builtinID, FD); + } + + auto CalleePtr = buildFunctionDeclPointer(CGM, GD); + + assert(!CGM.getLangOpts().CUDA && "NYI"); + + return CIRGenCallee::forDirect(CalleePtr, GD); +} + +// TODO: this can also be abstrated into common AST helpers +bool CIRGenFunction::hasBooleanRepresentation(QualType Ty) { + + if (Ty->isBooleanType()) + return true; + + if (const EnumType *ET = Ty->getAs()) + return ET->getDecl()->getIntegerType()->isBooleanType(); + + if (const AtomicType *AT = Ty->getAs()) + return hasBooleanRepresentation(AT->getValueType()); + + return false; +} + +CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { + E = E->IgnoreParens(); + + // Look through function-to-pointer decay. + if (const auto *ICE = dyn_cast(E)) { + if (ICE->getCastKind() == CK_FunctionToPointerDecay || + ICE->getCastKind() == CK_BuiltinFnToFnPtr) { + return buildCallee(ICE->getSubExpr()); + } + // Resolve direct calls. + } else if (const auto *DRE = dyn_cast(E)) { + const auto *FD = dyn_cast(DRE->getDecl()); + assert(FD && + "DeclRef referring to FunctionDecl only thing supported so far"); + return buildDirectCallee(CGM, FD); + } + + assert(!dyn_cast(E) && "NYI"); + assert(!dyn_cast(E) && "NYI"); + assert(!dyn_cast(E) && "NYI"); + + // Otherwise, we have an indirect reference. + mlir::Value calleePtr; + QualType functionType; + if (auto ptrType = E->getType()->getAs()) { + calleePtr = buildScalarExpr(E); + functionType = ptrType->getPointeeType(); + } else { + functionType = E->getType(); + calleePtr = buildLValue(E).getPointer(); + } + assert(functionType->isFunctionType()); + + GlobalDecl GD; + if (const auto *VD = + dyn_cast_or_null(E->getReferencedDeclOfCallee())) + GD = GlobalDecl(VD); + + CIRGenCalleeInfo calleeInfo(functionType->getAs(), GD); + CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp()); + return callee; + + assert(false && "Nothing else supported yet!"); +} + +mlir::Value CIRGenFunction::buildToMemory(mlir::Value Value, QualType Ty) { + // Bool has a different representation in memory than in registers. + return Value; +} + +void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue) { + // TODO: constant matrix type, no init, non temporal, TBAA + buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), lvalue.getBaseInfo(), false, false); +} + +void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, + bool Volatile, QualType Ty, + LValueBaseInfo BaseInfo, bool isInit, + bool isNontemporal) { + Value = buildToMemory(Value, Ty); + + if (Ty->isAtomicType()) { + llvm_unreachable("NYI"); + } + + if (const auto *ClangVecTy = Ty->getAs()) { + if (!CGM.getCodeGenOpts().PreserveVec3Type && + ClangVecTy->getNumElements() == 3) + llvm_unreachable("NYI: Special treatment of 3-element vector store"); + } + + // Update the alloca with more info on initialization. + assert(Addr.getPointer() && "expected pointer to exist"); + auto SrcAlloca = + dyn_cast_or_null(Addr.getPointer().getDefiningOp()); + if (currVarDecl && SrcAlloca) { + const VarDecl *VD = currVarDecl; + assert(VD && "VarDecl expected"); + if (VD->hasInit()) + SrcAlloca.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + } + + assert(currSrcLoc && "must pass in source location"); + builder.createStore(*currSrcLoc, Value, Addr, Volatile); + + if (isNontemporal) { + llvm_unreachable("NYI"); + } + + if (UnimplementedFeature::tbaa()) + llvm_unreachable("NYI"); +} + +void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, + bool isInit) { + if (lvalue.getType()->isConstantMatrixType()) { + llvm_unreachable("NYI"); + } + + buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), lvalue.getBaseInfo(), isInit, + lvalue.isNontemporal()); +} + +/// Given an expression that represents a value lvalue, this +/// method emits the address of the lvalue, then loads the result as an rvalue, +/// returning the rvalue. +RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { + assert(!LV.getType()->isFunctionType()); + assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); + + if (LV.isBitField()) + return buildLoadOfBitfieldLValue(LV, Loc); + + if (LV.isSimple()) + return RValue::get(buildLoadOfScalar(LV, Loc)); + llvm_unreachable("NYI"); +} + +RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, + SourceLocation Loc) { + const CIRGenBitFieldInfo &info = LV.getBitFieldInfo(); + + // Get the output type. + mlir::Type resLTy = convertType(LV.getType()); + Address ptr = LV.getBitFieldAddress(); + + bool useVolatile = LV.isVolatileQualified() && + info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + + auto field = builder.createGetBitfield(getLoc(Loc), resLTy, ptr.getPointer(), + ptr.getElementType(), info, + LV.isVolatile(), useVolatile); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + return RValue::get(field); +} + +void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { + if (!Dst.isSimple()) { + if (Dst.isVectorElt()) { + // Read/modify/write the vector, inserting the new element + mlir::Location loc = Dst.getVectorPointer().getLoc(); + mlir::Value Vector = builder.createLoad(loc, Dst.getVectorAddress()); + Vector = builder.create( + loc, Vector, Src.getScalarVal(), Dst.getVectorIdx()); + builder.createStore(loc, Vector, Dst.getVectorAddress()); + return; + } + assert(Dst.isBitField() && "NIY LValue type"); + mlir::Value result; + return buildStoreThroughBitfieldLValue(Src, Dst, result); + } + assert(Dst.isSimple() && "only implemented simple"); + + // There's special magic for assigning into an ARC-qualified l-value. + if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { + llvm_unreachable("NYI"); + } + + if (Dst.isObjCWeak() && !Dst.isNonGC()) { + llvm_unreachable("NYI"); + } + + if (Dst.isObjCStrong() && !Dst.isNonGC()) { + llvm_unreachable("NYI"); + } + + assert(Src.isScalar() && "Can't emit an agg store with this method"); + buildStoreOfScalar(Src.getScalarVal(), Dst); +} + +void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result) { + // According to the AACPS: + // When a volatile bit-field is written, and its container does not overlap + // with any non-bit-field member, its container must be read exactly once + // and written exactly once using the access width appropriate to the type + // of the container. The two accesses are not atomic. + if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && + CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) + llvm_unreachable("volatile bit-field is not implemented for the AACPS"); + + const CIRGenBitFieldInfo &info = Dst.getBitFieldInfo(); + mlir::Type resLTy = getTypes().convertTypeForMem(Dst.getType()); + Address ptr = Dst.getBitFieldAddress(); + + const bool useVolatile = + CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && + info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + + mlir::Value dstAddr = Dst.getAddress().getPointer(); + + Result = builder.createSetBitfield( + dstAddr.getLoc(), resLTy, dstAddr, ptr.getElementType(), + Src.getScalarVal(), info, Dst.isVolatileQualified(), useVolatile); +} + +static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, + const VarDecl *VD) { + QualType T = E->getType(); + + // If it's thread_local, emit a call to its wrapper function instead. + if (VD->getTLSKind() == VarDecl::TLS_Dynamic && + CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) + assert(0 && "not implemented"); + + // Check if the variable is marked as declare target with link clause in + // device codegen. + if (CGF.getLangOpts().OpenMP) + llvm_unreachable("not implemented"); + + // Traditional LLVM codegen handles thread local separately, CIR handles + // as part of getAddrOfGlobalVar. + auto V = CGF.CGM.getAddrOfGlobalVar(VD); + + auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); + auto realPtrTy = CGF.getBuilder().getPointerTo(RealVarTy); + if (realPtrTy != V.getType()) + V = CGF.getBuilder().createBitcast(V.getLoc(), V, realPtrTy); + + CharUnits Alignment = CGF.getContext().getDeclAlign(VD); + Address Addr(V, RealVarTy, Alignment); + // Emit reference to the private copy of the variable if it is an OpenMP + // threadprivate variable. + if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && + VD->hasAttr()) { + assert(0 && "NYI"); + } + LValue LV; + if (VD->getType()->isReferenceType()) + assert(0 && "NYI"); + else + LV = CGF.makeAddrLValue(Addr, T, AlignmentSource::Decl); + assert(!UnimplementedFeature::setObjCGCLValueClass() && "NYI"); + return LV; +} + +static LValue buildCapturedFieldLValue(CIRGenFunction &CGF, const FieldDecl *FD, + mlir::Value ThisValue) { + QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); + LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); + return CGF.buildLValueForField(LV, FD); +} + +static LValue buildFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, + GlobalDecl GD) { + const FunctionDecl *FD = cast(GD.getDecl()); + auto funcOp = buildFunctionDeclPointer(CGF.CGM, GD); + auto loc = CGF.getLoc(E->getSourceRange()); + CharUnits align = CGF.getContext().getDeclAlign(FD); + + auto fnTy = funcOp.getFunctionType(); + auto ptrTy = mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); + auto addr = CGF.getBuilder().create( + loc, ptrTy, funcOp.getSymName()); + return CGF.makeAddrLValue(Address(addr, fnTy, align), E->getType(), + AlignmentSource::Decl); +} + +LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { + const NamedDecl *ND = E->getDecl(); + QualType T = E->getType(); + + assert(E->isNonOdrUse() != NOUR_Unevaluated && + "should not emit an unevaluated operand"); + + if (const auto *VD = dyn_cast(ND)) { + // Global Named registers access via intrinsics only + if (VD->getStorageClass() == SC_Register && VD->hasAttr() && + !VD->isLocalVarDecl()) + llvm_unreachable("NYI"); + + assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); + + // Check for captured variables. + if (E->refersToEnclosingVariableOrCapture()) { + VD = VD->getCanonicalDecl(); + if (auto *FD = LambdaCaptureFields.lookup(VD)) + return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); + assert(!UnimplementedFeature::CGCapturedStmtInfo() && "NYI"); + // TODO[OpenMP]: Find the appropiate captured variable value and return + // it. + // TODO[OpenMP]: Set non-temporal information in the captured LVal. + // LLVM codegen: + assert(!UnimplementedFeature::openMP()); + // Address addr = GetAddrOfBlockDecl(VD); + // return MakeAddrLValue(addr, T, AlignmentSource::Decl); + } + } + + // FIXME(CIR): We should be able to assert this for FunctionDecls as well! + // FIXME(CIR): We should be able to assert this for all DeclRefExprs, not just + // those with a valid source location. + assert((ND->isUsed(false) || !isa(ND) || E->isNonOdrUse() || + !E->getLocation().isValid()) && + "Should not use decl without marking it used!"); + + if (ND->hasAttr()) { + llvm_unreachable("NYI"); + } + + if (const auto *VD = dyn_cast(ND)) { + // Check if this is a global variable + if (VD->hasLinkage() || VD->isStaticDataMember()) + return buildGlobalVarDeclLValue(*this, E, VD); + + Address addr = Address::invalid(); + + // The variable should generally be present in the local decl map. + auto iter = LocalDeclMap.find(VD); + if (iter != LocalDeclMap.end()) { + addr = iter->second; + } + // Otherwise, it might be static local we haven't emitted yet for some + // reason; most likely, because it's in an outer function. + else if (VD->isStaticLocal()) { + mlir::cir::GlobalOp var = CGM.getOrCreateStaticVarDecl( + *VD, CGM.getCIRLinkageVarDefinition(VD, /*IsConstant=*/false)); + addr = Address(builder.createGetGlobal(var), convertType(VD->getType()), + getContext().getDeclAlign(VD)); + } else { + llvm_unreachable("DeclRefExpr for decl not entered in LocalDeclMap?"); + } + + // Handle threadlocal function locals. + if (VD->getTLSKind() != VarDecl::TLS_None) + llvm_unreachable("thread-local storage is NYI"); + + // Check for OpenMP threadprivate variables. + if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && + VD->hasAttr()) { + llvm_unreachable("NYI"); + } + + // Drill into block byref variables. + bool isBlockByref = VD->isEscapingByref(); + if (isBlockByref) { + llvm_unreachable("NYI"); + } + + // Drill into reference types. + LValue LV = + VD->getType()->isReferenceType() + ? buildLoadOfReferenceLValue(addr, getLoc(E->getSourceRange()), + VD->getType(), AlignmentSource::Decl) + : makeAddrLValue(addr, T, AlignmentSource::Decl); + + // Statics are defined as globals, so they are not include in the function's + // symbol table. + assert((VD->isStaticLocal() || symbolTable.count(VD)) && + "non-static locals should be already mapped"); + + bool isLocalStorage = VD->hasLocalStorage(); + + bool NonGCable = + isLocalStorage && !VD->getType()->isReferenceType() && !isBlockByref; + + if (NonGCable && UnimplementedFeature::setNonGC()) { + llvm_unreachable("garbage collection is NYI"); + } + + bool isImpreciseLifetime = + (isLocalStorage && !VD->hasAttr()); + if (isImpreciseLifetime && UnimplementedFeature::ARC()) + llvm_unreachable("imprecise lifetime is NYI"); + assert(!UnimplementedFeature::setObjCGCLValueClass()); + + // Statics are defined as globals, so they are not include in the function's + // symbol table. + assert((VD->isStaticLocal() || symbolTable.lookup(VD)) && + "Name lookup must succeed for non-static local variables"); + + return LV; + } + + if (const auto *FD = dyn_cast(ND)) { + LValue LV = buildFunctionDeclLValue(*this, E, FD); + + // Emit debuginfo for the function declaration if the target wants to. + if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) + assert(!UnimplementedFeature::generateDebugInfo()); + + return LV; + } + + // FIXME: While we're emitting a binding from an enclosing scope, all other + // DeclRefExprs we see should be implicitly treated as if they also refer to + // an enclosing scope. + if (const auto *BD = dyn_cast(ND)) { + llvm_unreachable("NYI"); + } + + // We can form DeclRefExprs naming GUID declarations when reconstituting + // non-type template parameters into expressions. + if (const auto *GD = dyn_cast(ND)) + llvm_unreachable("NYI"); + + if (const auto *TPO = dyn_cast(ND)) + llvm_unreachable("NYI"); + + llvm_unreachable("Unhandled DeclRefExpr"); +} + +LValue +CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) { + assert((E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) && + "unexpected binary operator opcode"); + + auto baseAddr = Address::invalid(); + if (E->getOpcode() == BO_PtrMemD) + baseAddr = buildLValue(E->getLHS()).getAddress(); + else + baseAddr = buildPointerWithAlignment(E->getLHS()); + + const auto *memberPtrTy = E->getRHS()->getType()->castAs(); + + auto memberPtr = buildScalarExpr(E->getRHS()); + + LValueBaseInfo baseInfo; + // TODO(cir): add TBAA + assert(!UnimplementedFeature::tbaa()); + auto memberAddr = buildCXXMemberDataPointerAddress(E, baseAddr, memberPtr, + memberPtrTy, &baseInfo); + + return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo); +} + +LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { + // Comma expressions just emit their LHS then their RHS as an l-value. + if (E->getOpcode() == BO_Comma) { + buildIgnoredExpr(E->getLHS()); + return buildLValue(E->getRHS()); + } + + if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) + return buildPointerToDataMemberBinaryExpr(E); + + assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); + + // Note that in all of these cases, __block variables need the RHS + // evaluated first just in case the variable gets moved by the RHS. + + switch (CIRGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: { + assert(E->getLHS()->getType().getObjCLifetime() == + clang::Qualifiers::ObjCLifetime::OCL_None && + "not implemented"); + + RValue RV = buildAnyExpr(E->getRHS()); + LValue LV = buildLValue(E->getLHS()); + + SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; + if (LV.isBitField()) { + mlir::Value result; + buildStoreThroughBitfieldLValue(RV, LV, result); + } else { + buildStoreThroughLValue(RV, LV); + } + if (getLangOpts().OpenMP) + CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, + E->getLHS()); + return LV; + } + + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: + assert(0 && "not implemented"); + } + llvm_unreachable("bad evaluation kind"); +} + +/// Given an expression of pointer type, try to +/// derive a more accurate bound on the alignment of the pointer. +Address CIRGenFunction::buildPointerWithAlignment( + const Expr *E, LValueBaseInfo *BaseInfo, KnownNonNull_t IsKnownNonNull) { + Address Addr = + ::buildPointerWithAlignment(E, BaseInfo, IsKnownNonNull, *this); + if (IsKnownNonNull && !Addr.isKnownNonNull()) + Addr.setKnownNonNull(); + return Addr; +} + +/// Perform the usual unary conversions on the specified +/// expression and compare the result against zero, returning an Int1Ty value. +mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *E) { + // TODO(cir): PGO + if (const MemberPointerType *MPT = E->getType()->getAs()) { + assert(0 && "not implemented"); + } + + QualType BoolTy = getContext().BoolTy; + SourceLocation Loc = E->getExprLoc(); + // TODO(cir): CGFPOptionsRAII for FP stuff. + if (!E->getType()->isAnyComplexType()) + return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); + + llvm_unreachable("complex to scalar not implemented"); +} + +LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { + // __extension__ doesn't affect lvalue-ness. + if (E->getOpcode() == UO_Extension) + return buildLValue(E->getSubExpr()); + + switch (E->getOpcode()) { + default: + llvm_unreachable("Unknown unary operator lvalue!"); + case UO_Deref: { + QualType T = E->getSubExpr()->getType()->getPointeeType(); + assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); + + LValueBaseInfo BaseInfo; + // TODO: add TBAAInfo + Address Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); + + // Tag 'load' with deref attribute. + if (auto loadOp = + dyn_cast<::mlir::cir::LoadOp>(Addr.getPointer().getDefiningOp())) { + loadOp.setIsDerefAttr(mlir::UnitAttr::get(builder.getContext())); + } + + LValue LV = LValue::makeAddr(Addr, T, BaseInfo); + // TODO: set addr space + // TODO: ObjC/GC/__weak write barrier stuff. + return LV; + } + case UO_Real: + case UO_Imag: { + assert(0 && "not implemented"); + } + case UO_PreInc: + case UO_PreDec: { + bool isInc = E->isIncrementOp(); + bool isPre = E->isPrefix(); + LValue LV = buildLValue(E->getSubExpr()); + + if (E->getType()->isAnyComplexType()) { + assert(0 && "not implemented"); + } else { + buildScalarPrePostIncDec(E, LV, isInc, isPre); + } + + return LV; + } + } +} + +/// Emit code to compute the specified expression which +/// can have any type. The result is returned as an RValue struct. +RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, + bool ignoreResult) { + switch (CIRGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: + return RValue::get(buildScalarExpr(E)); + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: { + if (!ignoreResult && aggSlot.isIgnored()) + aggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), + getCounterAggTmpAsString()); + buildAggExpr(E, aggSlot); + return aggSlot.asRValue(); + } + } + llvm_unreachable("bad evaluation kind"); +} + +RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue) { + assert(!E->getCallee()->getType()->isBlockPointerType() && "ObjC Blocks NYI"); + + if (const auto *CE = dyn_cast(E)) + return buildCXXMemberCallExpr(CE, ReturnValue); + + assert(!dyn_cast(E) && "CUDA NYI"); + if (const auto *CE = dyn_cast(E)) + if (const CXXMethodDecl *MD = + dyn_cast_or_null(CE->getCalleeDecl())) + return buildCXXOperatorMemberCallExpr(CE, MD, ReturnValue); + + CIRGenCallee callee = buildCallee(E->getCallee()); + + if (callee.isBuiltin()) + return buildBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), E, + ReturnValue); + + assert(!callee.isPsuedoDestructor() && "NYI"); + + return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); +} + +LValue CIRGenFunction::buildStmtExprLValue(const StmtExpr *E) { + // Can only get l-value for message expression returning aggregate type + RValue RV = buildAnyExprToTemp(E); + return makeAddrLValue(RV.getAggregateAddress(), E->getType(), + AlignmentSource::Decl); +} + +RValue CIRGenFunction::buildCall(clang::QualType CalleeType, + const CIRGenCallee &OrigCallee, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue, + mlir::Value Chain) { + // Get the actual function type. The callee type will always be a pointer to + // function type or a block pointer type. + assert(CalleeType->isFunctionPointerType() && + "Call must have function pointer type!"); + + auto *TargetDecl = OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); + (void)TargetDecl; + + CalleeType = getContext().getCanonicalType(CalleeType); + + auto PointeeType = cast(CalleeType)->getPointeeType(); + + CIRGenCallee Callee = OrigCallee; + + if (getLangOpts().CPlusPlus) + assert(!SanOpts.has(SanitizerKind::Function) && "Sanitizers NYI"); + + const auto *FnType = cast(PointeeType); + + assert(!SanOpts.has(SanitizerKind::CFIICall) && "Sanitizers NYI"); + + CallArgList Args; + + assert(!Chain && "FIX THIS"); + + // C++17 requires that we evaluate arguments to a call using assignment syntax + // right-to-left, and that we evaluate arguments to certain other operators + // left-to-right. Note that we allow this to override the order dictated by + // the calling convention on the MS ABI, which means that parameter + // destruction order is not necessarily reverse construction order. + // FIXME: Revisit this based on C++ committee response to unimplementability. + EvaluationOrder Order = EvaluationOrder::Default; + if (auto *OCE = dyn_cast(E)) { + if (OCE->isAssignmentOp()) + Order = EvaluationOrder::ForceRightToLeft; + else { + switch (OCE->getOperator()) { + case OO_LessLess: + case OO_GreaterGreater: + case OO_AmpAmp: + case OO_PipePipe: + case OO_Comma: + case OO_ArrowStar: + Order = EvaluationOrder::ForceLeftToRight; + break; + default: + break; + } + } + } + + buildCallArgs(Args, dyn_cast(FnType), E->arguments(), + E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); + + const CIRGenFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( + Args, FnType, /*ChainCall=*/Chain.getAsOpaquePointer()); + + // C99 6.5.2.2p6: + // If the expression that denotes the called function has a type that does + // not include a prototype, [the default argument promotions are performed]. + // If the number of arguments does not equal the number of parameters, the + // behavior is undefined. If the function is defined with at type that + // includes a prototype, and either the prototype ends with an ellipsis (, + // ...) or the types of the arguments after promotion are not compatible + // with the types of the parameters, the behavior is undefined. If the + // function is defined with a type that does not include a prototype, and + // the types of the arguments after promotion are not compatible with those + // of the parameters after promotion, the behavior is undefined [except in + // some trivial cases]. + // That is, in the general case, we should assume that a call through an + // unprototyped function type works like a *non-variadic* call. The way we + // make this work is to cast to the exxact type fo the promoted arguments. + // + // Chain calls use the same code path to add the inviisble chain parameter to + // the function type. + if (isa(FnType) || Chain) { + assert(!UnimplementedFeature::chainCalls()); + assert(!UnimplementedFeature::addressSpace()); + auto CalleeTy = getTypes().GetFunctionType(FnInfo); + // get non-variadic function type + CalleeTy = mlir::cir::FuncType::get(CalleeTy.getInputs(), + CalleeTy.getReturnType(), false); + auto CalleePtrTy = + mlir::cir::PointerType::get(builder.getContext(), CalleeTy); + + auto *Fn = Callee.getFunctionPointer(); + mlir::Value Addr; + if (auto funcOp = llvm::dyn_cast(Fn)) { + Addr = builder.create( + getLoc(E->getSourceRange()), + mlir::cir::PointerType::get(builder.getContext(), + funcOp.getFunctionType()), + funcOp.getSymName()); + } else { + Addr = Fn->getResult(0); + } + + Fn = builder.createBitcast(Addr, CalleePtrTy).getDefiningOp(); + Callee.setFunctionPointer(Fn); + } + + assert(!CGM.getLangOpts().HIP && "HIP NYI"); + + assert(!MustTailCall && "Must tail NYI"); + mlir::cir::CIRCallOpInterface callOP; + RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, + E == MustTailCall, getLoc(E->getExprLoc()), E); + + assert(!getDebugInfo() && "Debug Info NYI"); + + return Call; +} + +/// Emit code to compute the specified expression, ignoring the result. +void CIRGenFunction::buildIgnoredExpr(const Expr *E) { + if (E->isPRValue()) + return (void)buildAnyExpr(E, AggValueSlot::ignored(), true); + + // Just emit it as an l-value and drop the result. + buildLValue(E); +} + +static mlir::Value maybeBuildArrayDecay(mlir::OpBuilder &builder, + mlir::Location loc, + mlir::Value arrayPtr, + mlir::Type eltTy) { + auto arrayPtrTy = arrayPtr.getType().dyn_cast<::mlir::cir::PointerType>(); + assert(arrayPtrTy && "expected pointer type"); + auto arrayTy = arrayPtrTy.getPointee().dyn_cast<::mlir::cir::ArrayType>(); + + if (arrayTy) { + mlir::cir::PointerType flatPtrTy = + mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); + return builder.create( + loc, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, arrayPtr); + } + + assert(arrayPtrTy.getPointee() == eltTy && + "flat pointee type must match original array element type"); + return arrayPtr; +} + +Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, + LValueBaseInfo *BaseInfo) { + assert(E->getType()->isArrayType() && + "Array to pointer decay must have array source type!"); + + // Expressions of array type can't be bitfields or vector elements. + LValue LV = buildLValue(E); + Address Addr = LV.getAddress(); + + // If the array type was an incomplete type, we need to make sure + // the decay ends up being the right type. + auto lvalueAddrTy = + Addr.getPointer().getType().dyn_cast(); + assert(lvalueAddrTy && "expected pointer"); + + if (E->getType()->isVariableArrayType()) + return Addr; + + auto pointeeTy = lvalueAddrTy.getPointee().dyn_cast(); + assert(pointeeTy && "expected array"); + + mlir::Type arrayTy = convertType(E->getType()); + assert(arrayTy.isa() && "expected array"); + assert(pointeeTy == arrayTy); + + // The result of this decay conversion points to an array element within the + // base lvalue. However, since TBAA currently does not support representing + // accesses to elements of member arrays, we conservatively represent accesses + // to the pointee object as if it had no any base lvalue specified. + // TODO: Support TBAA for member arrays. + QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); + if (BaseInfo) + *BaseInfo = LV.getBaseInfo(); + assert(!UnimplementedFeature::tbaa() && "NYI"); + + mlir::Value ptr = maybeBuildArrayDecay( + CGM.getBuilder(), CGM.getLoc(E->getSourceRange()), Addr.getPointer(), + getTypes().convertTypeForMem(EltType)); + return Address(ptr, Addr.getAlignment()); +} + +/// If the specified expr is a simple decay from an array to pointer, +/// return the array subexpression. +/// FIXME: this could be abstracted into a commeon AST helper. +static const Expr *isSimpleArrayDecayOperand(const Expr *E) { + // If this isn't just an array->pointer decay, bail out. + const auto *CE = dyn_cast(E); + if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) + return nullptr; + + // If this is a decay from variable width array, bail out. + const Expr *SubExpr = CE->getSubExpr(); + if (SubExpr->getType()->isVariableArrayType()) + return nullptr; + + return SubExpr; +} + +/// Given an array base, check whether its member access belongs to a record +/// with preserve_access_index attribute or not. +/// TODO(cir): don't need to be specific to LLVM's codegen, refactor into common +/// AST helpers. +static bool isPreserveAIArrayBase(CIRGenFunction &CGF, const Expr *ArrayBase) { + if (!ArrayBase || !CGF.getDebugInfo()) + return false; + + // Only support base as either a MemberExpr or DeclRefExpr. + // DeclRefExpr to cover cases like: + // struct s { int a; int b[10]; }; + // struct s *p; + // p[1].a + // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. + // p->b[5] is a MemberExpr example. + const Expr *E = ArrayBase->IgnoreImpCasts(); + if (const auto *ME = dyn_cast(E)) + return ME->getMemberDecl()->hasAttr(); + + if (const auto *DRE = dyn_cast(E)) { + const auto *VarDef = dyn_cast(DRE->getDecl()); + if (!VarDef) + return false; + + const auto *PtrT = VarDef->getType()->getAs(); + if (!PtrT) + return false; + + const auto *PointeeT = + PtrT->getPointeeType()->getUnqualifiedDesugaredType(); + if (const auto *RecT = dyn_cast(PointeeT)) + return RecT->getDecl()->hasAttr(); + return false; + } + + return false; +} + +static mlir::IntegerAttr getConstantIndexOrNull(mlir::Value idx) { + // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr? + if (auto constantOp = dyn_cast(idx.getDefiningOp())) + return constantOp.getValue().dyn_cast(); + return {}; +} + +static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, + CharUnits eltSize) { + // If we have a constant index, we can use the exact offset of the + // element we're accessing. + auto constantIdx = getConstantIndexOrNull(idx); + if (constantIdx) { + CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize; + return arrayAlign.alignmentAtOffset(offset); + // Otherwise, use the worst-case alignment for any element. + } else { + return arrayAlign.alignmentOfArrayElement(eltSize); + } +} + +static mlir::Value buildArrayAccessOp(mlir::OpBuilder &builder, + mlir::Location arrayLocBegin, + mlir::Location arrayLocEnd, + mlir::Value arrayPtr, mlir::Type eltTy, + mlir::Value idx, bool shouldDecay) { + mlir::Value basePtr = arrayPtr; + if (shouldDecay) + basePtr = maybeBuildArrayDecay(builder, arrayLocBegin, arrayPtr, eltTy); + mlir::Type flatPtrTy = basePtr.getType(); + + return builder.create(arrayLocEnd, flatPtrTy, basePtr, + idx); +} + +static mlir::Value +buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, + mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, + ArrayRef indices, bool inbounds, + bool signedIndices, bool shouldDecay, + const llvm::Twine &name = "arrayidx") { + assert(indices.size() == 1 && "cannot handle multiple indices yet"); + auto idx = indices.back(); + auto &CGM = CGF.getCIRGenModule(); + // TODO(cir): LLVM codegen emits in bound gep check here, is there anything + // that would enhance tracking this later in CIR? + if (inbounds) + assert(!UnimplementedFeature::emitCheckedInBoundsGEP() && "NYI"); + return buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, ptr, eltTy, idx, + shouldDecay); +} + +static QualType getFixedSizeElementType(const ASTContext &ctx, + const VariableArrayType *vla) { + QualType eltType; + do { + eltType = vla->getElementType(); + } while ((vla = ctx.getAsVariableArrayType(eltType))); + return eltType; +} + +static Address buildArraySubscriptPtr( + CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, + Address addr, ArrayRef indices, QualType eltType, + bool inbounds, bool signedIndices, mlir::Location loc, bool shouldDecay, + QualType *arrayType = nullptr, const Expr *Base = nullptr, + const llvm::Twine &name = "arrayidx") { + // Determine the element size of the statically-sized base. This is + // the thing that the indices are expressed in terms of. + if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { + eltType = getFixedSizeElementType(CGF.getContext(), vla); + } + + // We can use that to compute the best alignment of the element. + CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); + CharUnits eltAlign = + getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); + + mlir::Value eltPtr; + auto LastIndex = getConstantIndexOrNull(indices.back()); + if (!LastIndex || + (!CGF.IsInPreservedAIRegion && !isPreserveAIArrayBase(CGF, Base))) { + eltPtr = buildArraySubscriptPtr(CGF, beginLoc, endLoc, addr.getPointer(), + addr.getElementType(), indices, inbounds, + signedIndices, shouldDecay, name); + } else { + // assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + // assert(indices.size() == 1 && "cannot handle multiple indices yet"); + // auto idx = indices.back(); + // auto &CGM = CGF.getCIRGenModule(); + // eltPtr = buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, + // addr.getPointer(), addr.getElementType(), + // idx); + assert(0 && "NYI"); + } + + return Address(eltPtr, CGF.getTypes().convertTypeForMem(eltType), eltAlign); +} + +LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, + bool Accessed) { + // The index must always be an integer, which is not an aggregate. Emit it + // in lexical order (this complexity is, sadly, required by C++17). + mlir::Value IdxPre = + (E->getLHS() == E->getIdx()) ? buildScalarExpr(E->getIdx()) : nullptr; + bool SignedIndices = false; + auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> mlir::Value { + mlir::Value Idx = IdxPre; + if (E->getLHS() != E->getIdx()) { + assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); + Idx = buildScalarExpr(E->getIdx()); + } + + QualType IdxTy = E->getIdx()->getType(); + bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); + SignedIndices |= IdxSigned; + + if (SanOpts.has(SanitizerKind::ArrayBounds)) + llvm_unreachable("array bounds sanitizer is NYI"); + + // Extend or truncate the index type to 32 or 64-bits. + auto ptrTy = Idx.getType().dyn_cast(); + if (Promote && ptrTy && ptrTy.getPointee().isa()) + llvm_unreachable("index type cast is NYI"); + + return Idx; + }; + IdxPre = nullptr; + + // If the base is a vector type, then we are forming a vector element + // with this subscript. + if (E->getBase()->getType()->isVectorType() && + !isa(E->getBase())) { + LValue LHS = buildLValue(E->getBase()); + auto Index = EmitIdxAfterBase(/*Promote=*/false); + return LValue::MakeVectorElt(LHS.getAddress(), Index, + E->getBase()->getType(), LHS.getBaseInfo()); + } + + // All the other cases basically behave like simple offsetting. + + // Handle the extvector case we ignored above. + if (isa(E->getBase())) { + llvm_unreachable("extvector subscript is NYI"); + } + + assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); + LValueBaseInfo EltBaseInfo; + Address Addr = Address::invalid(); + if (const VariableArrayType *vla = + getContext().getAsVariableArrayType(E->getType())) { + // The base must be a pointer, which is not an aggregate. Emit + // it. It needs to be emitted first in case it's what captures + // the VLA bounds. + Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); + auto Idx = EmitIdxAfterBase(/*Promote*/ true); + + // The element count here is the total number of non-VLA elements. + mlir::Value numElements = getVLASize(vla).NumElts; + Idx = builder.createCast(mlir::cir::CastKind::integral, Idx, + numElements.getType()); + Idx = builder.createMul(Idx, numElements); + + QualType ptrType = E->getBase()->getType(); + Addr = buildArraySubscriptPtr( + *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, + {Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), + SignedIndices, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/false, + &ptrType, E->getBase()); + } else if (const ObjCObjectType *OIT = + E->getType()->getAs()) { + llvm_unreachable("ObjC object type subscript is NYI"); + } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { + // If this is A[i] where A is an array, the frontend will have decayed + // the base to be a ArrayToPointerDecay implicit cast. While correct, it is + // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then + // a "gep x, i" here. Emit one "gep A, 0, i". + assert(Array->getType()->isArrayType() && + "Array to pointer decay must have array source type!"); + LValue ArrayLV; + // For simple multidimensional array indexing, set the 'accessed' flag + // for better bounds-checking of the base expression. + if (const auto *ASE = dyn_cast(Array)) + ArrayLV = buildArraySubscriptExpr(ASE, /*Accessed=*/true); + else + ArrayLV = buildLValue(Array); + auto Idx = EmitIdxAfterBase(/*Promote=*/true); + + // Propagate the alignment from the array itself to the result. + QualType arrayType = Array->getType(); + Addr = buildArraySubscriptPtr( + *this, CGM.getLoc(Array->getBeginLoc()), CGM.getLoc(Array->getEndLoc()), + ArrayLV.getAddress(), {Idx}, E->getType(), + !getLangOpts().isSignedOverflowDefined(), SignedIndices, + CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/true, &arrayType, + E->getBase()); + EltBaseInfo = ArrayLV.getBaseInfo(); + // TODO(cir): EltTBAAInfo + assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); + } else { + // The base must be a pointer; emit it with an estimate of its alignment. + // TODO(cir): EltTBAAInfo + assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); + Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); + auto Idx = EmitIdxAfterBase(/*Promote*/ true); + QualType ptrType = E->getBase()->getType(); + Addr = buildArraySubscriptPtr( + *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, + Idx, E->getType(), !getLangOpts().isSignedOverflowDefined(), + SignedIndices, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/false, + &ptrType, E->getBase()); + } + + LValue LV = LValue::makeAddr(Addr, E->getType(), EltBaseInfo); + + if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) { + llvm_unreachable("ObjC is NYI"); + } + + return LV; +} + +LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { + auto sym = CGM.getAddrOfConstantStringFromLiteral(E).getSymbol(); + + auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), sym); + assert(cstGlobal && "Expected global"); + + auto g = dyn_cast(cstGlobal); + assert(g && "unaware of other symbol providers"); + + auto ptrTy = mlir::cir::PointerType::get(CGM.getBuilder().getContext(), + g.getSymType()); + assert(g.getAlignment() && "expected alignment for string literal"); + auto align = *g.getAlignment(); + auto addr = builder.create( + getLoc(E->getSourceRange()), ptrTy, g.getSymName()); + return makeAddrLValue( + Address(addr, g.getSymType(), CharUnits::fromQuantity(align)), + E->getType(), AlignmentSource::Decl); +} + +/// Casts are never lvalues unless that cast is to a reference type. If the cast +/// is to a reference, we can have the usual lvalue result, otherwise if a cast +/// is needed by the code generator in an lvalue context, then it must mean that +/// we need the address of an aggregate in order to access one of its members. +/// This can happen for all the reasons that casts are permitted with aggregate +/// result, including noop aggregate casts, and cast from scalar to union. +LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { + switch (E->getCastKind()) { + case CK_HLSLArrayRValue: + case CK_HLSLVectorTruncation: + case CK_ToVoid: + case CK_BitCast: + case CK_LValueToRValueBitCast: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_NullToMemberPointer: + case CK_NullToPointer: + case CK_IntegralToPointer: + case CK_PointerToIntegral: + case CK_PointerToBoolean: + case CK_VectorSplat: + case CK_IntegralCast: + case CK_BooleanToSignedIntegral: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingToBoolean: + case CK_FloatingCast: + case CK_FloatingRealToComplex: + case CK_FloatingComplexToReal: + case CK_FloatingComplexToBoolean: + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralRealToComplex: + case CK_IntegralComplexToReal: + case CK_IntegralComplexToBoolean: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_DerivedToBaseMemberPointer: + case CK_BaseToDerivedMemberPointer: + case CK_MemberPointerToBoolean: + case CK_ReinterpretMemberPointer: + case CK_AnyPointerToBlockPointerCast: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + case CK_IntToOCLSampler: + case CK_FloatingToFixedPoint: + case CK_FixedPointToFloating: + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToIntegral: + case CK_IntegralToFixedPoint: + case CK_MatrixCast: + llvm_unreachable("NYI"); + + case CK_Dependent: + llvm_unreachable("dependent cast kind in IR gen!"); + + case CK_BuiltinFnToFnPtr: + llvm_unreachable("builtin functions are handled elsewhere"); + + // These are never l-values; just use the aggregate emission code. + case CK_NonAtomicToAtomic: + case CK_AtomicToNonAtomic: + assert(0 && "NYI"); + + case CK_Dynamic: { + LValue LV = buildLValue(E->getSubExpr()); + Address V = LV.getAddress(); + const auto *DCE = cast(E); + return MakeNaturalAlignAddrLValue(buildDynamicCast(V, DCE), E->getType()); + } + + case CK_ConstructorConversion: + case CK_UserDefinedConversion: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_LValueToRValue: + return buildLValue(E->getSubExpr()); + + case CK_NoOp: { + // CK_NoOp can model a qualification conversion, which can remove an array + // bound and change the IR type. + LValue LV = buildLValue(E->getSubExpr()); + if (LV.isSimple()) { + Address V = LV.getAddress(); + if (V.isValid()) { + auto T = getTypes().convertTypeForMem(E->getType()); + if (V.getElementType() != T) + LV.setAddress( + builder.createElementBitCast(getLoc(E->getSourceRange()), V, T)); + } + } + return LV; + } + + case CK_UncheckedDerivedToBase: + case CK_DerivedToBase: { + const auto *DerivedClassTy = + E->getSubExpr()->getType()->castAs(); + auto *DerivedClassDecl = cast(DerivedClassTy->getDecl()); + + LValue LV = buildLValue(E->getSubExpr()); + Address This = LV.getAddress(); + + // Perform the derived-to-base conversion + Address Base = getAddressOfBaseClass( + This, DerivedClassDecl, E->path_begin(), E->path_end(), + /*NullCheckValue=*/false, E->getExprLoc()); + + // TODO: Support accesses to members of base classes in TBAA. For now, we + // conservatively pretend that the complete object is of the base class + // type. + assert(!UnimplementedFeature::tbaa()); + return makeAddrLValue(Base, E->getType(), LV.getBaseInfo()); + } + case CK_ToUnion: + assert(0 && "NYI"); + case CK_BaseToDerived: { + assert(0 && "NYI"); + } + case CK_LValueBitCast: { + assert(0 && "NYI"); + } + case CK_AddressSpaceConversion: { + assert(0 && "NYI"); + } + case CK_ObjCObjectLValueCast: { + assert(0 && "NYI"); + } + case CK_ZeroToOCLOpaqueType: + llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); + } + + llvm_unreachable("Unhandled lvalue cast kind?"); +} + +// TODO(cir): candidate for common helper between LLVM and CIR codegen. +static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &CGF, + const MemberExpr *ME) { + if (auto *VD = dyn_cast(ME->getMemberDecl())) { + // Try to emit static variable member expressions as DREs. + return DeclRefExpr::Create( + CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, + /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), + ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); + } + return nullptr; +} + +LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { + LValue LV; + if (SanOpts.has(SanitizerKind::ArrayBounds) && isa(E)) + assert(0 && "not implemented"); + else + LV = buildLValue(E); + if (!isa(E) && !LV.isBitField() && LV.isSimple()) { + SanitizerSet SkippedChecks; + if (const auto *ME = dyn_cast(E)) { + bool IsBaseCXXThis = isWrappedCXXThis(ME->getBase()); + if (IsBaseCXXThis) + SkippedChecks.set(SanitizerKind::Alignment, true); + if (IsBaseCXXThis || isa(ME->getBase())) + SkippedChecks.set(SanitizerKind::Null, true); + } + buildTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), E->getType(), + LV.getAlignment(), SkippedChecks); + } + return LV; +} + +// TODO(cir): candidate for common AST helper for LLVM and CIR codegen +bool CIRGenFunction::isWrappedCXXThis(const Expr *Obj) { + const Expr *Base = Obj; + while (!isa(Base)) { + // The result of a dynamic_cast can be null. + if (isa(Base)) + return false; + + if (const auto *CE = dyn_cast(Base)) { + Base = CE->getSubExpr(); + } else if (const auto *PE = dyn_cast(Base)) { + Base = PE->getSubExpr(); + } else if (const auto *UO = dyn_cast(Base)) { + if (UO->getOpcode() == UO_Extension) + Base = UO->getSubExpr(); + else + return false; + } else { + return false; + } + } + return true; +} + +LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { + if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { + buildIgnoredExpr(E->getBase()); + return buildDeclRefLValue(DRE); + } + + Expr *BaseExpr = E->getBase(); + // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. + LValue BaseLV; + if (E->isArrow()) { + LValueBaseInfo BaseInfo; + Address Addr = buildPointerWithAlignment(BaseExpr, &BaseInfo); + QualType PtrTy = BaseExpr->getType()->getPointeeType(); + SanitizerSet SkippedChecks; + bool IsBaseCXXThis = isWrappedCXXThis(BaseExpr); + if (IsBaseCXXThis) + SkippedChecks.set(SanitizerKind::Alignment, true); + if (IsBaseCXXThis || isa(BaseExpr)) + SkippedChecks.set(SanitizerKind::Null, true); + buildTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, + /*Alignment=*/CharUnits::Zero(), SkippedChecks); + BaseLV = makeAddrLValue(Addr, PtrTy, BaseInfo); + } else + BaseLV = buildCheckedLValue(BaseExpr, TCK_MemberAccess); + + NamedDecl *ND = E->getMemberDecl(); + if (auto *Field = dyn_cast(ND)) { + LValue LV = buildLValueForField(BaseLV, Field); + assert(!UnimplementedFeature::setObjCGCLValueClass() && "NYI"); + if (getLangOpts().OpenMP) { + // If the member was explicitly marked as nontemporal, mark it as + // nontemporal. If the base lvalue is marked as nontemporal, mark access + // to children as nontemporal too. + assert(0 && "not implemented"); + } + return LV; + } + + if (const auto *FD = dyn_cast(ND)) + assert(0 && "not implemented"); + + llvm_unreachable("Unhandled member declaration!"); +} + +LValue CIRGenFunction::buildCallExprLValue(const CallExpr *E) { + RValue RV = buildCallExpr(E); + + if (!RV.isScalar()) + return makeAddrLValue(RV.getAggregateAddress(), E->getType(), + AlignmentSource::Decl); + + assert(E->getCallReturnType(getContext())->isReferenceType() && + "Can't have a scalar return unless the return type is a " + "reference type!"); + + return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); +} + +/// Evaluate an expression into a given memory location. +void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, + Qualifiers Quals, bool IsInit) { + // FIXME: This function should take an LValue as an argument. + switch (getEvaluationKind(E->getType())) { + case TEK_Complex: + assert(0 && "NYI"); + return; + + case TEK_Aggregate: { + buildAggExpr(E, AggValueSlot::forAddr(Location, Quals, + AggValueSlot::IsDestructed_t(IsInit), + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsAliased_t(!IsInit), + AggValueSlot::MayOverlap)); + return; + } + + case TEK_Scalar: { + RValue RV = RValue::get(buildScalarExpr(E)); + LValue LV = makeAddrLValue(Location, E->getType()); + buildStoreThroughLValue(RV, LV); + return; + } + } + llvm_unreachable("bad evaluation kind"); +} + +static Address createReferenceTemporary(CIRGenFunction &CGF, + const MaterializeTemporaryExpr *M, + const Expr *Inner, + Address *Alloca = nullptr) { + // TODO(cir): CGF.getTargetHooks(); + switch (M->getStorageDuration()) { + case SD_FullExpression: + case SD_Automatic: { + // TODO(cir): probably not needed / too LLVM specific? + // If we have a constant temporary array or record try to promote it into a + // constant global under the same rules a normal constant would've been + // promoted. This is easier on the optimizer and generally emits fewer + // instructions. + QualType Ty = Inner->getType(); + if (CGF.CGM.getCodeGenOpts().MergeAllConstants && + (Ty->isArrayType() || Ty->isRecordType()) && + CGF.CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, /*ExcludeDtor=*/false)) + assert(0 && "NYI"); + return CGF.CreateMemTemp(Ty, CGF.getLoc(M->getSourceRange()), + CGF.getCounterRefTmpAsString(), Alloca); + } + case SD_Thread: + case SD_Static: + assert(0 && "NYI"); + + case SD_Dynamic: + llvm_unreachable("temporary can't have dynamic storage duration"); + } + llvm_unreachable("unknown storage duration"); +} + +static void pushTemporaryCleanup(CIRGenFunction &CGF, + const MaterializeTemporaryExpr *M, + const Expr *E, Address ReferenceTemporary) { + // Objective-C++ ARC: + // If we are binding a reference to a temporary that has ownership, we + // need to perform retain/release operations on the temporary. + // + // FIXME: This should be looking at E, not M. + if (auto Lifetime = M->getType().getObjCLifetime()) { + assert(0 && "NYI"); + } + + CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; + if (const RecordType *RT = + E->getType()->getBaseElementTypeUnsafe()->getAs()) { + // Get the destructor for the reference temporary. + auto *ClassDecl = cast(RT->getDecl()); + if (!ClassDecl->hasTrivialDestructor()) + ReferenceTemporaryDtor = ClassDecl->getDestructor(); + } + + if (!ReferenceTemporaryDtor) + return; + + // TODO(cir): Call the destructor for the temporary. + assert(0 && "NYI"); +} + +LValue CIRGenFunction::buildMaterializeTemporaryExpr( + const MaterializeTemporaryExpr *M) { + const Expr *E = M->getSubExpr(); + + assert((!M->getExtendingDecl() || !isa(M->getExtendingDecl()) || + !cast(M->getExtendingDecl())->isARCPseudoStrong()) && + "Reference should never be pseudo-strong!"); + + // FIXME: ideally this would use buildAnyExprToMem, however, we cannot do so + // as that will cause the lifetime adjustment to be lost for ARC + auto ownership = M->getType().getObjCLifetime(); + if (ownership != Qualifiers::OCL_None && + ownership != Qualifiers::OCL_ExplicitNone) { + assert(0 && "NYI"); + } + + SmallVector CommaLHSs; + SmallVector Adjustments; + E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); + + for (const auto &Ignored : CommaLHSs) + buildIgnoredExpr(Ignored); + + if (const auto *opaque = dyn_cast(E)) + assert(0 && "NYI"); + + // Create and initialize the reference temporary. + Address Alloca = Address::invalid(); + Address Object = createReferenceTemporary(*this, M, E, &Alloca); + + if (auto Var = + dyn_cast(Object.getPointer().getDefiningOp())) { + // TODO(cir): add something akin to stripPointerCasts() to ptr above + assert(0 && "NYI"); + } else { + switch (M->getStorageDuration()) { + case SD_Automatic: + assert(0 && "NYI"); + break; + + case SD_FullExpression: { + if (!ShouldEmitLifetimeMarkers) + break; + assert(0 && "NYI"); + break; + } + + default: + break; + } + + buildAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true); + } + pushTemporaryCleanup(*this, M, E, Object); + + // Perform derived-to-base casts and/or field accesses, to get from the + // temporary object we created (and, potentially, for which we extended + // the lifetime) to the subobject we're binding the reference to. + for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) { + (void)Adjustment; + assert(0 && "NYI"); + } + + return makeAddrLValue(Object, M->getType(), AlignmentSource::Decl); +} + +LValue CIRGenFunction::buildOpaqueValueLValue(const OpaqueValueExpr *e) { + assert(OpaqueValueMappingData::shouldBindAsLValue(e)); + return getOrCreateOpaqueLValueMapping(e); +} + +LValue +CIRGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { + assert(OpaqueValueMapping::shouldBindAsLValue(e)); + + llvm::DenseMap::iterator it = + OpaqueLValues.find(e); + + if (it != OpaqueLValues.end()) + return it->second; + + assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); + return buildLValue(e->getSourceExpr()); +} + +RValue +CIRGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { + assert(!OpaqueValueMapping::shouldBindAsLValue(e)); + + llvm::DenseMap::iterator it = + OpaqueRValues.find(e); + + if (it != OpaqueRValues.end()) + return it->second; + + assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); + return buildAnyExpr(e->getSourceExpr()); +} + +namespace { +// Handle the case where the condition is a constant evaluatable simple integer, +// which means we don't have to separately handle the true/false blocks. +std::optional HandleConditionalOperatorLValueSimpleCase( + CIRGenFunction &CGF, const AbstractConditionalOperator *E) { + const Expr *condExpr = E->getCond(); + bool CondExprBool; + if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { + const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr(); + if (!CondExprBool) + std::swap(Live, Dead); + + if (!CGF.ContainsLabel(Dead)) { + // If the true case is live, we need to track its region. + if (CondExprBool) { + assert(!UnimplementedFeature::incrementProfileCounter()); + } + // If a throw expression we emit it and return an undefined lvalue + // because it can't be used. + if (auto *ThrowExpr = dyn_cast(Live->IgnoreParens())) { + llvm_unreachable("NYI"); + } + return CGF.buildLValue(Live); + } + } + return std::nullopt; +} +} // namespace + +/// Emit the operand of a glvalue conditional operator. This is either a glvalue +/// or a (possibly-parenthesized) throw-expression. If this is a throw, no +/// LValue is returned and the current block has been terminated. +static std::optional buildLValueOrThrowExpression(CIRGenFunction &CGF, + const Expr *Operand) { + if (auto *ThrowExpr = dyn_cast(Operand->IgnoreParens())) { + llvm_unreachable("NYI"); + } + + return CGF.buildLValue(Operand); +} + +// Create and generate the 3 blocks for a conditional operator. +// Leaves the 'current block' in the continuation basic block. +template +CIRGenFunction::ConditionalInfo +CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, + const FuncTy &BranchGenFunc) { + ConditionalInfo Info; + auto &CGF = *this; + ConditionalEvaluation eval(CGF); + auto loc = CGF.getLoc(E->getSourceRange()); + auto &builder = CGF.getBuilder(); + auto *trueExpr = E->getTrueExpr(); + auto *falseExpr = E->getFalseExpr(); + + mlir::Value condV = + CGF.buildOpOnBoolExpr(E->getCond(), loc, trueExpr, falseExpr); + SmallVector insertPoints{}; + mlir::Type yieldTy{}; + + auto patchVoidOrThrowSites = [&]() { + if (insertPoints.empty()) + return; + // If both arms are void, so be it. + if (!yieldTy) + yieldTy = CGF.VoidTy; + + // Insert required yields. + for (auto &toInsert : insertPoints) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(toInsert); + + // Block does not return: build empty yield. + if (yieldTy.isa()) { + builder.create(loc); + } else { // Block returns: set null yield value. + mlir::Value op0 = builder.getNullValue(yieldTy, loc); + builder.create(loc, op0); + } + } + }; + + Info.Result = + builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{*this, loc, + b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + Info.LHS = BranchGenFunc(CGF, trueExpr); + auto lhs = Info.LHS->getPointer(); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch + // arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{*this, loc, + b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + Info.RHS = BranchGenFunc(CGF, falseExpr); + auto rhs = Info.RHS->getPointer(); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to + // patch arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); + return Info; +} + +LValue CIRGenFunction::buildConditionalOperatorLValue( + const AbstractConditionalOperator *expr) { + if (!expr->isGLValue()) { + llvm_unreachable("NYI"); + } + + OpaqueValueMapping binding(*this, expr); + if (std::optional Res = + HandleConditionalOperatorLValueSimpleCase(*this, expr)) + return *Res; + + ConditionalInfo Info = + buildConditionalBlocks(expr, [](CIRGenFunction &CGF, const Expr *E) { + return buildLValueOrThrowExpression(CGF, E); + }); + + if ((Info.LHS && !Info.LHS->isSimple()) || + (Info.RHS && !Info.RHS->isSimple())) + llvm_unreachable("unsupported conditional operator"); + + if (Info.LHS && Info.RHS) { + Address lhsAddr = Info.LHS->getAddress(); + Address rhsAddr = Info.RHS->getAddress(); + Address result(Info.Result, lhsAddr.getElementType(), + std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment())); + AlignmentSource alignSource = + std::max(Info.LHS->getBaseInfo().getAlignmentSource(), + Info.RHS->getBaseInfo().getAlignmentSource()); + assert(!UnimplementedFeature::tbaa()); + return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource)); + } else { + llvm_unreachable("NYI"); + } +} + +/// Emit code to compute a designator that specifies the location +/// of the expression. +/// FIXME: document this function better. +LValue CIRGenFunction::buildLValue(const Expr *E) { + // FIXME: ApplyDebugLocation DL(*this, E); + switch (E->getStmtClass()) { + default: { + emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") + << E->getStmtClassName() << "'"; + assert(0 && "not implemented"); + } + case Expr::ConditionalOperatorClass: + return buildConditionalOperatorLValue(cast(E)); + case Expr::ArraySubscriptExprClass: + return buildArraySubscriptExpr(cast(E)); + case Expr::BinaryOperatorClass: + return buildBinaryOperatorLValue(cast(E)); + case Expr::CompoundAssignOperatorClass: { + QualType Ty = E->getType(); + if (const AtomicType *AT = Ty->getAs()) + assert(0 && "not yet implemented"); + assert(!Ty->isAnyComplexType() && "complex types not implemented"); + return buildCompoundAssignmentLValue(cast(E)); + } + case Expr::CallExprClass: + case Expr::CXXMemberCallExprClass: + case Expr::CXXOperatorCallExprClass: + case Expr::UserDefinedLiteralClass: + return buildCallExprLValue(cast(E)); + case Expr::ExprWithCleanupsClass: { + const auto *cleanups = cast(E); + LValue LV; + + auto scopeLoc = getLoc(E->getSourceRange()); + [[maybe_unused]] auto scope = builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{*this, loc, + builder.getInsertionBlock()}; + + LV = buildLValue(cleanups->getSubExpr()); + if (LV.isSimple()) { + // Defend against branches out of gnu statement expressions + // surrounded by cleanups. + Address Addr = LV.getAddress(); + auto V = Addr.getPointer(); + LV = LValue::makeAddr(Addr.withPointer(V, NotKnownNonNull), + LV.getType(), getContext(), + LV.getBaseInfo() /*TODO(cir):TBAA*/); + } + }); + + // FIXME: Is it possible to create an ExprWithCleanups that produces a + // bitfield lvalue or some other non-simple lvalue? + return LV; + } + case Expr::ParenExprClass: + return buildLValue(cast(E)->getSubExpr()); + case Expr::DeclRefExprClass: + return buildDeclRefLValue(cast(E)); + case Expr::UnaryOperatorClass: + return buildUnaryOpLValue(cast(E)); + case Expr::StringLiteralClass: + return buildStringLiteralLValue(cast(E)); + case Expr::MemberExprClass: + return buildMemberExpr(cast(E)); + case Expr::CompoundLiteralExprClass: + return buildCompoundLiteralLValue(cast(E)); + case Expr::PredefinedExprClass: + return buildPredefinedLValue(cast(E)); + case Expr::CXXFunctionalCastExprClass: + case Expr::CXXReinterpretCastExprClass: + case Expr::CXXConstCastExprClass: + case Expr::CXXAddrspaceCastExprClass: + case Expr::ObjCBridgedCastExprClass: + emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") + << E->getStmtClassName() << "'"; + assert(0 && "Use buildCastLValue below, remove me when adding testcase"); + case Expr::CStyleCastExprClass: + case Expr::CXXStaticCastExprClass: + case Expr::CXXDynamicCastExprClass: + case Expr::ImplicitCastExprClass: + return buildCastLValue(cast(E)); + case Expr::OpaqueValueExprClass: + return buildOpaqueValueLValue(cast(E)); + + case Expr::MaterializeTemporaryExprClass: + return buildMaterializeTemporaryExpr(cast(E)); + + case Expr::ObjCPropertyRefExprClass: + llvm_unreachable("cannot emit a property reference directly"); + case Expr::StmtExprClass: + return buildStmtExprLValue(cast(E)); + } + + return LValue::makeAddr(Address::invalid(), E->getType()); +} + +/// Given the address of a temporary variable, produce an r-value of its type. +RValue CIRGenFunction::convertTempToRValue(Address addr, clang::QualType type, + clang::SourceLocation loc) { + LValue lvalue = makeAddrLValue(addr, type, AlignmentSource::Decl); + switch (getEvaluationKind(type)) { + case TEK_Complex: + llvm_unreachable("NYI"); + case TEK_Aggregate: + llvm_unreachable("NYI"); + case TEK_Scalar: + return RValue::get(buildLoadOfScalar(lvalue, loc)); + } + llvm_unreachable("NYI"); +} + +/// An LValue is a candidate for having its loads and stores be made atomic if +/// we are operating under /volatile:ms *and* the LValue itself is volatile and +/// performing such an operation can be performed without a libcall. +bool CIRGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { + if (!CGM.getLangOpts().MSVolatile) + return false; + + llvm_unreachable("NYI"); +} + +/// Emit an `if` on a boolean condition, filling `then` and `else` into +/// appropriated regions. +mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, + const Stmt *thenS, + const Stmt *elseS) { + auto getStmtLoc = [this](const Stmt &s) { + return mlir::FusedLoc::get(builder.getContext(), + {getLoc(s.getSourceRange().getBegin()), + getLoc(s.getSourceRange().getEnd())}); + }; + + auto thenLoc = getStmtLoc(*thenS); + std::optional elseLoc; + SmallVector ifLocs{thenLoc}; + + if (elseS) { + elseLoc = getStmtLoc(*elseS); + ifLocs.push_back(*elseLoc); + } + + // Attempt to be more accurate as possible with IfOp location, generate + // one fused location that has either 2 or 4 total locations, depending + // on else's availability. + auto loc = mlir::FusedLoc::get(builder.getContext(), ifLocs); + + // Emit the code with the fully general case. + mlir::Value condV = buildOpOnBoolExpr(cond, loc, thenS, elseS); + mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); + + builder.create( + loc, condV, elseS, + /*thenBuilder=*/ + [&](mlir::OpBuilder &, mlir::Location) { + LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()}; + resThen = buildStmt(thenS, /*useCurrentScope=*/true); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &, mlir::Location) { + assert(elseLoc && "Invalid location for elseS."); + LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()}; + resElse = buildStmt(elseS, /*useCurrentScope=*/true); + }); + + return mlir::LogicalResult::success(resThen.succeeded() && + resElse.succeeded()); +} + +/// TODO(cir): PGO data +/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas). +mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, + mlir::Location loc, + const Stmt *thenS, + const Stmt *elseS) { + // TODO(CIR): scoped ApplyDebugLocation DL(*this, Cond); + // TODO(CIR): __builtin_unpredictable and profile counts? + cond = cond->IgnoreParens(); + + // if (const BinaryOperator *CondBOp = dyn_cast(cond)) { + // llvm_unreachable("binaryoperator ifstmt NYI"); + // } + + if (const UnaryOperator *CondUOp = dyn_cast(cond)) { + // In LLVM the condition is reversed here for efficient codegen. + // This should be done in CIR prior to LLVM lowering, if we do now + // we can make CIR based diagnostics misleading. + // cir.ternary(!x, t, f) -> cir.ternary(x, f, t) + // if (CondUOp->getOpcode() == UO_LNot) { + // buildOpOnBoolExpr(CondUOp->getSubExpr(), loc, elseS, thenS); + // } + assert(!UnimplementedFeature::shouldReverseUnaryCondOnBoolExpr()); + } + + if (const ConditionalOperator *CondOp = dyn_cast(cond)) { + auto *trueExpr = CondOp->getTrueExpr(); + auto *falseExpr = CondOp->getFalseExpr(); + mlir::Value condV = + buildOpOnBoolExpr(CondOp->getCond(), loc, trueExpr, falseExpr); + + auto ternaryOpRes = + builder + .create( + loc, condV, /*thenBuilder=*/ + [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = buildScalarExpr(trueExpr); + b.create(loc, lhs); + }, + /*elseBuilder=*/ + [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = buildScalarExpr(falseExpr); + b.create(loc, rhs); + }) + .getResult(); + + return buildScalarConversion(ternaryOpRes, CondOp->getType(), + getContext().BoolTy, CondOp->getExprLoc()); + } + + if (const CXXThrowExpr *Throw = dyn_cast(cond)) { + llvm_unreachable("NYI"); + } + + // If the branch has a condition wrapped by __builtin_unpredictable, + // create metadata that specifies that the branch is unpredictable. + // Don't bother if not optimizing because that metadata would not be used. + auto *Call = dyn_cast(cond->IgnoreImpCasts()); + if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { + assert(!UnimplementedFeature::insertBuiltinUnpredictable()); + } + + // Emit the code with the fully general case. + return evaluateExprAsBool(cond); +} + +mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, + mlir::Location loc, CharUnits alignment, + bool insertIntoFnEntryBlock, + mlir::Value arraySize) { + mlir::Block *entryBlock = insertIntoFnEntryBlock + ? getCurFunctionEntryBlock() + : currLexScope->getEntryBlock(); + return buildAlloca(name, ty, loc, alignment, + builder.getBestAllocaInsertPoint(entryBlock), arraySize); +} + +mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, + mlir::Location loc, CharUnits alignment, + mlir::OpBuilder::InsertPoint ip, + mlir::Value arraySize) { + auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), ty); + auto alignIntAttr = CGM.getSize(alignment); + + mlir::Value addr; + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(ip); + addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy, + /*var type*/ ty, name, alignIntAttr, arraySize); + if (currVarDecl) { + auto alloca = cast(addr.getDefiningOp()); + alloca.setAstAttr(ASTVarDeclAttr::get(builder.getContext(), currVarDecl)); + } + } + return addr; +} + +mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, + mlir::Location loc, CharUnits alignment, + bool insertIntoFnEntryBlock, + mlir::Value arraySize) { + return buildAlloca(name, getCIRType(ty), loc, alignment, + insertIntoFnEntryBlock, arraySize); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, + SourceLocation Loc) { + return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), getLoc(Loc), lvalue.getBaseInfo(), + lvalue.isNontemporal()); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, + mlir::Location Loc) { + return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), Loc, lvalue.getBaseInfo(), + lvalue.isNontemporal()); +} + +mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { + if (!Ty->isBooleanType() && hasBooleanRepresentation(Ty)) { + llvm_unreachable("NIY"); + } + + return Value; +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, + QualType Ty, SourceLocation Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal) { + return buildLoadOfScalar(Addr, Volatile, Ty, getLoc(Loc), BaseInfo, + isNontemporal); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, + QualType Ty, mlir::Location Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal) { + // Atomic operations have to be done on integral types + LValue AtomicLValue = LValue::makeAddr(Addr, Ty, getContext(), BaseInfo); + if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { + llvm_unreachable("NYI"); + } + + if (const auto *ClangVecTy = Ty->getAs()) { + if (!CGM.getCodeGenOpts().PreserveVec3Type && + ClangVecTy->getNumElements() == 3) + llvm_unreachable("NYI: Special treatment of 3-element vector load"); + } + + mlir::cir::LoadOp Load = builder.create( + Loc, Addr.getElementType(), Addr.getPointer(), /* deref */ false, + Volatile, ::mlir::cir::MemOrderAttr{}); + + if (isNontemporal) { + llvm_unreachable("NYI"); + } + + assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + + return buildFromMemory(Load, Ty); +} + +// Note: this function also emit constructor calls to support a MSVC extensions +// allowing explicit constructor function call. +RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, + ReturnValueSlot ReturnValue) { + + const Expr *callee = CE->getCallee()->IgnoreParens(); + + if (isa(callee)) + llvm_unreachable("NYI"); + + const auto *ME = cast(callee); + const auto *MD = cast(ME->getMemberDecl()); + + if (MD->isStatic()) { + llvm_unreachable("NYI"); + } + + bool HasQualifier = ME->hasQualifier(); + NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr; + bool IsArrow = ME->isArrow(); + const Expr *Base = ME->getBase(); + + return buildCXXMemberOrOperatorMemberCallExpr( + CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); +} + +RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { + // Emit the expression as an lvalue. + LValue LV = buildLValue(E); + assert(LV.isSimple()); + auto Value = LV.getPointer(); + + if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { + assert(0 && "NYI"); + } + + return RValue::get(Value); +} + +Address CIRGenFunction::buildLoadOfReference(LValue RefLVal, mlir::Location Loc, + LValueBaseInfo *PointeeBaseInfo) { + assert(!RefLVal.isVolatile() && "NYI"); + mlir::cir::LoadOp Load = builder.create( + Loc, RefLVal.getAddress().getElementType(), + RefLVal.getAddress().getPointer()); + + // TODO(cir): DecorateInstructionWithTBAA relevant for us? + assert(!UnimplementedFeature::tbaa()); + + QualType PointeeType = RefLVal.getType()->getPointeeType(); + CharUnits Align = CGM.getNaturalTypeAlignment(PointeeType, PointeeBaseInfo, + /* forPointeeType= */ true); + return Address(Load, getTypes().convertTypeForMem(PointeeType), Align); +} + +LValue CIRGenFunction::buildLoadOfReferenceLValue(LValue RefLVal, + mlir::Location Loc) { + LValueBaseInfo PointeeBaseInfo; + Address PointeeAddr = buildLoadOfReference(RefLVal, Loc, &PointeeBaseInfo); + return makeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), + PointeeBaseInfo); +} + +void CIRGenFunction::buildUnreachable(SourceLocation Loc) { + if (SanOpts.has(SanitizerKind::Unreachable)) + llvm_unreachable("NYI"); + builder.create(getLoc(Loc)); +} + +//===----------------------------------------------------------------------===// +// CIR builder helpers +//===----------------------------------------------------------------------===// + +Address CIRGenFunction::CreateMemTemp(QualType Ty, mlir::Location Loc, + const Twine &Name, Address *Alloca) { + // FIXME: Should we prefer the preferred type alignment here? + return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Loc, Name, + Alloca); +} + +Address CIRGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, + mlir::Location Loc, const Twine &Name, + Address *Alloca) { + Address Result = + CreateTempAlloca(getTypes().convertTypeForMem(Ty), Align, Loc, Name, + /*ArraySize=*/nullptr, Alloca); + if (Ty->isConstantMatrixType()) { + assert(0 && "NYI"); + } + return Result; +} + +/// This creates a alloca and inserts it into the entry block of the +/// current region. +Address CIRGenFunction::CreateTempAllocaWithoutCast( + mlir::Type Ty, CharUnits Align, mlir::Location Loc, const Twine &Name, + mlir::Value ArraySize, mlir::OpBuilder::InsertPoint ip) { + auto Alloca = ip.isSet() ? CreateTempAlloca(Ty, Loc, Name, ip, ArraySize) + : CreateTempAlloca(Ty, Loc, Name, ArraySize); + Alloca.setAlignmentAttr(CGM.getSize(Align)); + return Address(Alloca, Ty, Align); +} + +/// This creates a alloca and inserts it into the entry block. The alloca is +/// casted to default address space if necessary. +Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, + mlir::Location Loc, const Twine &Name, + mlir::Value ArraySize, + Address *AllocaAddr, + mlir::OpBuilder::InsertPoint ip) { + auto Alloca = + CreateTempAllocaWithoutCast(Ty, Align, Loc, Name, ArraySize, ip); + if (AllocaAddr) + *AllocaAddr = Alloca; + mlir::Value V = Alloca.getPointer(); + // Alloca always returns a pointer in alloca address space, which may + // be different from the type defined by the language. For example, + // in C++ the auto variables are in the default address space. Therefore + // cast alloca to the default address space when necessary. + if (getASTAllocaAddressSpace() != LangAS::Default) { + llvm_unreachable("Requires address space cast which is NYI"); + } + return Address(V, Ty, Align); +} + +/// This creates an alloca and inserts it into the entry block if \p ArraySize +/// is nullptr, otherwise inserts it at the current insertion point of the +/// builder. +mlir::cir::AllocaOp +CIRGenFunction::CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, + const Twine &Name, mlir::Value ArraySize, + bool insertIntoFnEntryBlock) { + return cast(buildAlloca(Name.str(), Ty, Loc, CharUnits(), + insertIntoFnEntryBlock, + ArraySize) + .getDefiningOp()); +} + +/// This creates an alloca and inserts it into the provided insertion point +mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca( + mlir::Type Ty, mlir::Location Loc, const Twine &Name, + mlir::OpBuilder::InsertPoint ip, mlir::Value ArraySize) { + assert(ip.isSet() && "Insertion point is not set"); + return cast( + buildAlloca(Name.str(), Ty, Loc, CharUnits(), ip, ArraySize) + .getDefiningOp()); +} + +/// Just like CreateTempAlloca above, but place the alloca into the function +/// entry basic block instead. +mlir::cir::AllocaOp CIRGenFunction::CreateTempAllocaInFnEntryBlock( + mlir::Type Ty, mlir::Location Loc, const Twine &Name, + mlir::Value ArraySize) { + return CreateTempAlloca(Ty, Loc, Name, ArraySize, + /*insertIntoFnEntryBlock=*/true); +} + +/// Given an object of the given canonical type, can we safely copy a +/// value out of it based on its initializer? +static bool isConstantEmittableObjectType(QualType type) { + assert(type.isCanonical()); + assert(!type->isReferenceType()); + + // Must be const-qualified but non-volatile. + Qualifiers qs = type.getLocalQualifiers(); + if (!qs.hasConst() || qs.hasVolatile()) + return false; + + // Otherwise, all object types satisfy this except C++ classes with + // mutable subobjects or non-trivial copy/destroy behavior. + if (const auto *RT = dyn_cast(type)) + if (const auto *RD = dyn_cast(RT->getDecl())) + if (RD->hasMutableFields() || !RD->isTrivial()) + return false; + + return true; +} + +/// Can we constant-emit a load of a reference to a variable of the +/// given type? This is different from predicates like +/// Decl::mightBeUsableInConstantExpressions because we do want it to apply +/// in situations that don't necessarily satisfy the language's rules +/// for this (e.g. C++'s ODR-use rules). For example, we want to able +/// to do this with const float variables even if those variables +/// aren't marked 'constexpr'. +enum ConstantEmissionKind { + CEK_None, + CEK_AsReferenceOnly, + CEK_AsValueOrReference, + CEK_AsValueOnly +}; +static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { + type = type.getCanonicalType(); + if (const auto *ref = dyn_cast(type)) { + if (isConstantEmittableObjectType(ref->getPointeeType())) + return CEK_AsValueOrReference; + return CEK_AsReferenceOnly; + } + if (isConstantEmittableObjectType(type)) + return CEK_AsValueOnly; + return CEK_None; +} + +/// Try to emit a reference to the given value without producing it as +/// an l-value. This is just an optimization, but it avoids us needing +/// to emit global copies of variables if they're named without triggering +/// a formal use in a context where we can't emit a direct reference to them, +/// for instance if a block or lambda or a member of a local class uses a +/// const int variable or constexpr variable from an enclosing function. +CIRGenFunction::ConstantEmission +CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { + ValueDecl *value = refExpr->getDecl(); + + // The value needs to be an enum constant or a constant variable. + ConstantEmissionKind CEK; + if (isa(value)) { + CEK = CEK_None; + } else if (auto *var = dyn_cast(value)) { + CEK = checkVarTypeForConstantEmission(var->getType()); + } else if (isa(value)) { + CEK = CEK_AsValueOnly; + } else { + CEK = CEK_None; + } + if (CEK == CEK_None) + return ConstantEmission(); + + Expr::EvalResult result; + bool resultIsReference; + QualType resultType; + + // It's best to evaluate all the way as an r-value if that's permitted. + if (CEK != CEK_AsReferenceOnly && + refExpr->EvaluateAsRValue(result, getContext())) { + resultIsReference = false; + resultType = refExpr->getType(); + + // Otherwise, try to evaluate as an l-value. + } else if (CEK != CEK_AsValueOnly && + refExpr->EvaluateAsLValue(result, getContext())) { + resultIsReference = true; + resultType = value->getType(); + + // Failure. + } else { + return ConstantEmission(); + } + + // In any case, if the initializer has side-effects, abandon ship. + if (result.HasSideEffects) + return ConstantEmission(); + + // In CUDA/HIP device compilation, a lambda may capture a reference variable + // referencing a global host variable by copy. In this case the lambda should + // make a copy of the value of the global host variable. The DRE of the + // captured reference variable cannot be emitted as load from the host + // global variable as compile time constant, since the host variable is not + // accessible on device. The DRE of the captured reference variable has to be + // loaded from captures. + if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && + refExpr->refersToEnclosingVariableOrCapture()) { + auto *MD = dyn_cast_or_null(CurCodeDecl); + if (MD && MD->getParent()->isLambda() && + MD->getOverloadedOperator() == OO_Call) { + const APValue::LValueBase &base = result.Val.getLValueBase(); + if (const ValueDecl *D = base.dyn_cast()) { + if (const VarDecl *VD = dyn_cast(D)) { + if (!VD->hasAttr()) { + return ConstantEmission(); + } + } + } + } + } + + // Emit as a constant. + // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires + // somewhat heavy refactoring...) + auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(), + result.Val, resultType); + mlir::TypedAttr cstToEmit = C.dyn_cast_or_null(); + assert(cstToEmit && "expect a typed attribute"); + + // Make sure we emit a debug reference to the global variable. + // This should probably fire even for + if (isa(value)) { + if (!getContext().DeclMustBeEmitted(cast(value))) + buildDeclRefExprDbgValue(refExpr, result.Val); + } else { + assert(isa(value)); + buildDeclRefExprDbgValue(refExpr, result.Val); + } + + // If we emitted a reference constant, we need to dereference that. + if (resultIsReference) + return ConstantEmission::forReference(cstToEmit); + + return ConstantEmission::forValue(cstToEmit); +} + +CIRGenFunction::ConstantEmission +CIRGenFunction::tryEmitAsConstant(const MemberExpr *ME) { + llvm_unreachable("NYI"); +} + +mlir::Value CIRGenFunction::buildScalarConstant( + const CIRGenFunction::ConstantEmission &Constant, Expr *E) { + assert(Constant && "not a constant"); + if (Constant.isReference()) + return buildLoadOfLValue(Constant.getReferenceLValue(*this, E), + E->getExprLoc()) + .getScalarVal(); + return builder.getConstant(getLoc(E->getSourceRange()), Constant.getValue()); +} + +LValue CIRGenFunction::buildPredefinedLValue(const PredefinedExpr *E) { + const auto *SL = E->getFunctionName(); + assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); + auto Fn = dyn_cast(CurFn); + assert(Fn && "other callables NYI"); + StringRef FnName = Fn.getName(); + if (FnName.starts_with("\01")) + FnName = FnName.substr(1); + StringRef NameItems[] = {PredefinedExpr::getIdentKindName(E->getIdentKind()), + FnName}; + std::string GVName = llvm::join(NameItems, NameItems + 2, "."); + if (auto *BD = dyn_cast_or_null(CurCodeDecl)) { + llvm_unreachable("NYI"); + } + + return buildStringLiteralLValue(SL); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp new file mode 100644 index 000000000000..619ef026f410 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -0,0 +1,1570 @@ +//===--- CIRGenExprAgg.cpp - Emit CIR Code from Aggregate Expressions -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Aggregate Expr nodes as CIR code. +// +//===----------------------------------------------------------------------===// +#include "CIRGenCall.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "CIRGenTypes.h" +#include "CIRGenValue.h" +#include "UnimplementedFeatureGuarding.h" +#include "mlir/IR/Attributes.h" + +#include "clang/AST/Decl.h" +#include "clang/AST/Expr.h" +#include "clang/AST/OperationKinds.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +using namespace cir; +using namespace clang; + +namespace { + +// FIXME(cir): This should be a common helper between CIRGen +// and traditional CodeGen +/// Is the value of the given expression possibly a reference to or +/// into a __block variable? +static bool isBlockVarRef(const Expr *E) { + // Make sure we look through parens. + E = E->IgnoreParens(); + + // Check for a direct reference to a __block variable. + if (const DeclRefExpr *DRE = dyn_cast(E)) { + const VarDecl *var = dyn_cast(DRE->getDecl()); + return (var && var->hasAttr()); + } + + // More complicated stuff. + + // Binary operators. + if (const BinaryOperator *op = dyn_cast(E)) { + // For an assignment or pointer-to-member operation, just care + // about the LHS. + if (op->isAssignmentOp() || op->isPtrMemOp()) + return isBlockVarRef(op->getLHS()); + + // For a comma, just care about the RHS. + if (op->getOpcode() == BO_Comma) + return isBlockVarRef(op->getRHS()); + + // FIXME: pointer arithmetic? + return false; + + // Check both sides of a conditional operator. + } else if (const AbstractConditionalOperator *op = + dyn_cast(E)) { + return isBlockVarRef(op->getTrueExpr()) || + isBlockVarRef(op->getFalseExpr()); + + // OVEs are required to support BinaryConditionalOperators. + } else if (const OpaqueValueExpr *op = dyn_cast(E)) { + if (const Expr *src = op->getSourceExpr()) + return isBlockVarRef(src); + + // Casts are necessary to get things like (*(int*)&var) = foo(). + // We don't really care about the kind of cast here, except + // we don't want to look through l2r casts, because it's okay + // to get the *value* in a __block variable. + } else if (const CastExpr *cast = dyn_cast(E)) { + if (cast->getCastKind() == CK_LValueToRValue) + return false; + return isBlockVarRef(cast->getSubExpr()); + + // Handle unary operators. Again, just aggressively look through + // it, ignoring the operation. + } else if (const UnaryOperator *uop = dyn_cast(E)) { + return isBlockVarRef(uop->getSubExpr()); + + // Look into the base of a field access. + } else if (const MemberExpr *mem = dyn_cast(E)) { + return isBlockVarRef(mem->getBase()); + + // Look into the base of a subscript. + } else if (const ArraySubscriptExpr *sub = dyn_cast(E)) { + return isBlockVarRef(sub->getBase()); + } + + return false; +} + +class AggExprEmitter : public StmtVisitor { + CIRGenFunction &CGF; + AggValueSlot Dest; + bool IsResultUnused; + + // Calls `Fn` with a valid return value slot, potentially creating a temporary + // to do so. If a temporary is created, an appropriate copy into `Dest` will + // be emitted, as will lifetime markers. + // + // The given function should take a ReturnValueSlot, and return an RValue that + // points to said slot. + void withReturnValueSlot(const Expr *E, + llvm::function_ref Fn); + + AggValueSlot EnsureSlot(mlir::Location loc, QualType T) { + if (!Dest.isIgnored()) + return Dest; + return CGF.CreateAggTemp(T, loc, "agg.tmp.ensured"); + } + + void EnsureDest(mlir::Location loc, QualType T) { + if (!Dest.isIgnored()) + return; + Dest = CGF.CreateAggTemp(T, loc, "agg.tmp.ensured"); + } + +public: + AggExprEmitter(CIRGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) + : CGF{cgf}, Dest(Dest), IsResultUnused(IsResultUnused) {} + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + /// Given an expression with aggregate type that represents a value lvalue, + /// this method emits the address of the lvalue, then loads the result into + /// DestPtr. + void buildAggLoadOfLValue(const Expr *E); + + enum ExprValueKind { EVK_RValue, EVK_NonRValue }; + + /// Perform the final copy to DestPtr, if desired. + void buildFinalDestCopy(QualType type, RValue src); + + /// Perform the final copy to DestPtr, if desired. SrcIsRValue is true if + /// source comes from an RValue. + void buildFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind = EVK_NonRValue); + void buildCopy(QualType type, const AggValueSlot &dest, + const AggValueSlot &src); + + void buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, + QualType ArrayQTy, Expr *ExprToVisit, + ArrayRef Args, Expr *ArrayFiller); + + AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { + if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) + llvm_unreachable("garbage collection is NYI"); + return AggValueSlot::DoesNotNeedGCBarriers; + } + + bool TypeRequiresGCollection(QualType T); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + void Visit(Expr *E) { + if (CGF.getDebugInfo()) { + llvm_unreachable("NYI"); + } + StmtVisitor::Visit(E); + } + + void VisitStmt(Stmt *S) { + llvm::errs() << "Missing visitor for AggExprEmitter Stmt: " + << S->getStmtClassName() << "\n"; + llvm_unreachable("NYI"); + } + void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } + void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { + llvm_unreachable("NYI"); + } + void VisitCoawaitExpr(CoawaitExpr *E) { + CGF.buildCoawaitExpr(*E, Dest, IsResultUnused); + } + void VisitCoyieldExpr(CoyieldExpr *E) { llvm_unreachable("NYI"); } + void VisitUnaryCoawait(UnaryOperator *E) { llvm_unreachable("NYI"); } + void VisitUnaryExtension(UnaryOperator *E) { llvm_unreachable("NYI"); } + void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { + llvm_unreachable("NYI"); + } + void VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } + + // l-values + void VisitDeclRefExpr(DeclRefExpr *E) { buildAggLoadOfLValue(E); } + void VisitMemberExpr(MemberExpr *E) { buildAggLoadOfLValue(E); } + void VisitUnaryDeref(UnaryOperator *E) { buildAggLoadOfLValue(E); } + void VisitStringLiteral(StringLiteral *E) { llvm_unreachable("NYI"); } + void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); + void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { + buildAggLoadOfLValue(E); + } + void VisitPredefinedExpr(const PredefinedExpr *E) { llvm_unreachable("NYI"); } + + // Operators. + void VisitCastExpr(CastExpr *E); + void VisitCallExpr(const CallExpr *E); + + void VisitStmtExpr(const StmtExpr *E) { + assert(!UnimplementedFeature::stmtExprEvaluation() && "NYI"); + CGF.buildCompoundStmt(*E->getSubStmt(), /*getLast=*/true, Dest); + } + + void VisitBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *E) { + llvm_unreachable("NYI"); + } + void VisitBinAssign(const BinaryOperator *E) { + + // For an assignment to work, the value on the right has + // to be compatible with the value on the left. + assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), + E->getRHS()->getType()) && + "Invalid assignment"); + + if (isBlockVarRef(E->getLHS()) && + E->getRHS()->HasSideEffects(CGF.getContext())) { + llvm_unreachable("NYI"); + } + + LValue lhs = CGF.buildLValue(E->getLHS()); + + // If we have an atomic type, evaluate into the destination and then + // do an atomic copy. + if (lhs.getType()->isAtomicType() || + CGF.LValueIsSuitableForInlineAtomic(lhs)) { + assert(!UnimplementedFeature::atomicTypes()); + return; + } + + // Codegen the RHS so that it stores directly into the LHS. + AggValueSlot lhsSlot = AggValueSlot::forLValue( + lhs, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsAliased, AggValueSlot::MayOverlap); + + // A non-volatile aggregate destination might have volatile member. + if (!lhsSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType())) + assert(!UnimplementedFeature::atomicTypes()); + + CGF.buildAggExpr(E->getRHS(), lhsSlot); + + // Copy into the destination if the assignment isn't ignored. + buildFinalDestCopy(E->getType(), lhs); + + if (!Dest.isIgnored() && !Dest.isExternallyDestructed() && + E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) + CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), + E->getType()); + } + + void VisitBinComma(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitBinCmp(const BinaryOperator *E); + void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { + llvm_unreachable("NYI"); + } + + void VisitObjCMessageExpr(ObjCMessageExpr *E) { llvm_unreachable("NYI"); } + void VisitObjCIVarRefExpr(ObjCIvarRefExpr *E) { llvm_unreachable("NYI"); } + + void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { + llvm_unreachable("NYI"); + } + void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { + llvm_unreachable("NYI"); + } + void VisitChooseExpr(const ChooseExpr *E) { llvm_unreachable("NYI"); } + void VisitInitListExpr(InitListExpr *E); + void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef Args, + FieldDecl *InitializedFieldInUnion, + Expr *ArrayFiller); + void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, + llvm::Value *outerBegin = nullptr) { + llvm_unreachable("NYI"); + } + void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { + llvm_unreachable("NYI"); + } + void VisitNoInitExpr(NoInitExpr *E) { llvm_unreachable("NYI"); } + void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + CIRGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); + Visit(DAE->getExpr()); + } + void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { + CIRGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); + Visit(DIE->getExpr()); + } + void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); + void VisitCXXConstructExpr(const CXXConstructExpr *E); + void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E) { + llvm_unreachable("NYI"); + } + void VisitLambdaExpr(LambdaExpr *E); + void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { + llvm_unreachable("NYI"); + } + void VisitExprWithCleanups(ExprWithCleanups *E); + void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { + llvm_unreachable("NYI"); + } + void VisitCXXTypeidExpr(CXXTypeidExpr *E) { llvm_unreachable("NYI"); } + void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); + void VisitOpaqueValueExpr(OpaqueValueExpr *E) { llvm_unreachable("NYI"); } + + void VisitPseudoObjectExpr(PseudoObjectExpr *E) { llvm_unreachable("NYI"); } + + void VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } + + void buildInitializationToLValue(Expr *E, LValue LV); + + void buildNullInitializationToLValue(mlir::Location loc, LValue Address); + void VisitCXXThrowExpr(const CXXThrowExpr *E) { llvm_unreachable("NYI"); } + void VisitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } +}; +} // namespace + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +/// Given an expression with aggregate type that represents a value lvalue, this +/// method emits the address of the lvalue, then loads the result into DestPtr. +void AggExprEmitter::buildAggLoadOfLValue(const Expr *E) { + LValue LV = CGF.buildLValue(E); + + // If the type of the l-value is atomic, then do an atomic load. + if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV) || + UnimplementedFeature::atomicTypes()) + llvm_unreachable("atomic load is NYI"); + + buildFinalDestCopy(E->getType(), LV); +} + +/// Perform the final copy to DestPtr, if desired. +void AggExprEmitter::buildFinalDestCopy(QualType type, RValue src) { + assert(src.isAggregate() && "value must be aggregate value!"); + LValue srcLV = CGF.makeAddrLValue(src.getAggregateAddress(), type); + buildFinalDestCopy(type, srcLV, EVK_RValue); +} + +/// Perform the final copy to DestPtr, if desired. +void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind) { + // If Dest is ignored, then we're evaluating an aggregate expression + // in a context that doesn't care about the result. Note that loads + // from volatile l-values force the existence of a non-ignored + // destination. + if (Dest.isIgnored()) + return; + + // Copy non-trivial C structs here. + if (Dest.isVolatile()) + assert(!UnimplementedFeature::volatileTypes()); + + if (SrcValueKind == EVK_RValue) { + if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { + llvm_unreachable("move assignment/move ctor for rvalue is NYI"); + } + } else { + if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) + llvm_unreachable("non-trivial primitive copy is NYI"); + } + + AggValueSlot srcAgg = AggValueSlot::forLValue( + src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased, + AggValueSlot::MayOverlap); + buildCopy(type, Dest, srcAgg); +} + +/// Perform a copy from the source into the destination. +/// +/// \param type - the type of the aggregate being copied; qualifiers are +/// ignored +void AggExprEmitter::buildCopy(QualType type, const AggValueSlot &dest, + const AggValueSlot &src) { + if (dest.requiresGCollection()) + llvm_unreachable("garbage collection is NYI"); + + // If the result of the assignment is used, copy the LHS there also. + // It's volatile if either side is. Use the minimum alignment of + // the two sides. + LValue DestLV = CGF.makeAddrLValue(dest.getAddress(), type); + LValue SrcLV = CGF.makeAddrLValue(src.getAddress(), type); + if (dest.isVolatile() || src.isVolatile() || + UnimplementedFeature::volatileTypes()) + llvm_unreachable("volatile is NYI"); + CGF.buildAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), false); +} + +// FIXME(cir): This function could be shared with traditional LLVM codegen +/// Determine if E is a trivial array filler, that is, one that is +/// equivalent to zero-initialization. +static bool isTrivialFiller(Expr *E) { + if (!E) + return true; + + if (isa(E)) + return true; + + if (auto *ILE = dyn_cast(E)) { + if (ILE->getNumInits()) + return false; + return isTrivialFiller(ILE->getArrayFiller()); + } + + if (auto *Cons = dyn_cast_or_null(E)) + return Cons->getConstructor()->isDefaultConstructor() && + Cons->getConstructor()->isTrivial(); + + // FIXME: Are there other cases where we can avoid emitting an initializer? + return false; +} + +void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, + QualType ArrayQTy, Expr *ExprToVisit, + ArrayRef Args, Expr *ArrayFiller) { + uint64_t NumInitElements = Args.size(); + + uint64_t NumArrayElements = AType.getSize(); + assert(NumInitElements != 0 && "expected at least one initializaed value"); + assert(NumInitElements <= NumArrayElements); + + QualType elementType = + CGF.getContext().getAsArrayType(ArrayQTy)->getElementType(); + QualType elementPtrType = CGF.getContext().getPointerType(elementType); + + auto cirElementType = CGF.convertType(elementType); + auto cirElementPtrType = mlir::cir::PointerType::get( + CGF.getBuilder().getContext(), cirElementType); + auto loc = CGF.getLoc(ExprToVisit->getSourceRange()); + + // Cast from cir.ptr to cir.ptr + auto begin = CGF.getBuilder().create( + loc, cirElementPtrType, mlir::cir::CastKind::array_to_ptrdecay, + DestPtr.getPointer()); + + CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); + CharUnits elementAlign = + DestPtr.getAlignment().alignmentOfArrayElement(elementSize); + + // Exception safety requires us to destroy all the + // already-constructed members if an initializer throws. + // For that, we'll need an EH cleanup. + [[maybe_unused]] QualType::DestructionKind dtorKind = + elementType.isDestructedType(); + [[maybe_unused]] Address endOfInit = Address::invalid(); + assert(!CGF.needsEHCleanup(dtorKind) && "destructed types NIY"); + + // The 'current element to initialize'. The invariants on this + // variable are complicated. Essentially, after each iteration of + // the loop, it points to the last initialized element, except + // that it points to the beginning of the array before any + // elements have been initialized. + mlir::Value element = begin; + + // Don't build the 'one' before the cycle to avoid + // emmiting the redundant cir.const(1) instrs. + mlir::Value one; + + // Emit the explicit initializers. + for (uint64_t i = 0; i != NumInitElements; ++i) { + if (i == 1) + one = CGF.getBuilder().getConstInt( + loc, CGF.PtrDiffTy.cast(), 1); + + // Advance to the next element. + if (i > 0) { + element = CGF.getBuilder().create( + loc, cirElementPtrType, element, one); + + // Tell the cleanup that it needs to destroy up to this + // element. TODO: some of these stores can be trivially + // observed to be unnecessary. + assert(!endOfInit.isValid() && "destructed types NIY"); + } + + LValue elementLV = CGF.makeAddrLValue( + Address(element, cirElementType, elementAlign), elementType); + buildInitializationToLValue(Args[i], elementLV); + } + + // Check whether there's a non-trivial array-fill expression. + bool hasTrivialFiller = isTrivialFiller(ArrayFiller); + + // Any remaining elements need to be zero-initialized, possibly + // using the filler expression. We can skip this if the we're + // emitting to zeroed memory. + if (NumInitElements != NumArrayElements && + !(Dest.isZeroed() && hasTrivialFiller && + CGF.getTypes().isZeroInitializable(elementType))) { + + // Use an actual loop. This is basically + // do { *array++ = filler; } while (array != end); + + auto &builder = CGF.getBuilder(); + + // Advance to the start of the rest of the array. + if (NumInitElements) { + auto one = + builder.getConstInt(loc, CGF.PtrDiffTy.cast(), 1); + element = builder.create(loc, cirElementPtrType, + element, one); + + assert(!endOfInit.isValid() && "destructed types NIY"); + } + + // Allocate the temporary variable + // to store the pointer to first unitialized element + auto tmpAddr = CGF.CreateTempAlloca( + cirElementPtrType, CGF.getPointerAlign(), loc, "arrayinit.temp"); + LValue tmpLV = CGF.makeAddrLValue(tmpAddr, elementPtrType); + CGF.buildStoreThroughLValue(RValue::get(element), tmpLV); + + // Compute the end of array + auto numArrayElementsConst = builder.getConstInt( + loc, CGF.PtrDiffTy.cast(), NumArrayElements); + mlir::Value end = builder.create( + loc, cirElementPtrType, begin, numArrayElementsConst); + + builder.createDoWhile( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto currentElement = builder.createLoad(loc, tmpAddr); + mlir::Type boolTy = CGF.getCIRType(CGF.getContext().BoolTy); + auto cmp = builder.create( + loc, boolTy, mlir::cir::CmpOpKind::ne, currentElement, end); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto currentElement = builder.createLoad(loc, tmpAddr); + + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); + + // Emit the actual filler expression. + LValue elementLV = CGF.makeAddrLValue( + Address(currentElement, cirElementType, elementAlign), + elementType); + if (ArrayFiller) + buildInitializationToLValue(ArrayFiller, elementLV); + else + buildNullInitializationToLValue(loc, elementLV); + + // Tell the EH cleanup that we finished with the last element. + assert(!endOfInit.isValid() && "destructed types NIY"); + + // Advance pointer and store them to temporary variable + auto one = builder.getConstInt( + loc, CGF.PtrDiffTy.cast(), 1); + auto nextElement = builder.create( + loc, cirElementPtrType, currentElement, one); + CGF.buildStoreThroughLValue(RValue::get(nextElement), tmpLV); + + builder.createYield(loc); + }); + } + + // Leave the partial-array cleanup if we entered one. + assert(!dtorKind && "destructed types NIY"); +} + +/// True if the given aggregate type requires special GC API calls. +bool AggExprEmitter::TypeRequiresGCollection(QualType T) { + // Only record types have members that might require garbage collection. + const RecordType *RecordTy = T->getAs(); + if (!RecordTy) + return false; + + // Don't mess with non-trivial C++ types. + RecordDecl *Record = RecordTy->getDecl(); + if (isa(Record) && + (cast(Record)->hasNonTrivialCopyConstructor() || + !cast(Record)->hasTrivialDestructor())) + return false; + + // Check whether the type has an object member. + return Record->hasObjectMember(); +} + +//===----------------------------------------------------------------------===// +// Visitor Methods +//===----------------------------------------------------------------------===// + +/// Determine whether the given cast kind is known to always convert values +/// with all zero bits in their value representation to values with all zero +/// bits in their value representation. +/// TODO(cir): this can be shared with LLVM codegen. +static bool castPreservesZero(const CastExpr *CE) { + switch (CE->getCastKind()) { + case CK_HLSLVectorTruncation: + case CK_HLSLArrayRValue: + llvm_unreachable("NYI"); + // No-ops. + case CK_NoOp: + case CK_UserDefinedConversion: + case CK_ConstructorConversion: + case CK_BitCast: + case CK_ToUnion: + case CK_ToVoid: + // Conversions between (possibly-complex) integral, (possibly-complex) + // floating-point, and bool. + case CK_BooleanToSignedIntegral: + case CK_FloatingCast: + case CK_FloatingComplexCast: + case CK_FloatingComplexToBoolean: + case CK_FloatingComplexToIntegralComplex: + case CK_FloatingComplexToReal: + case CK_FloatingRealToComplex: + case CK_FloatingToBoolean: + case CK_FloatingToIntegral: + case CK_IntegralCast: + case CK_IntegralComplexCast: + case CK_IntegralComplexToBoolean: + case CK_IntegralComplexToFloatingComplex: + case CK_IntegralComplexToReal: + case CK_IntegralRealToComplex: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + // Reinterpreting integers as pointers and vice versa. + case CK_IntegralToPointer: + case CK_PointerToIntegral: + // Language extensions. + case CK_VectorSplat: + case CK_MatrixCast: + case CK_NonAtomicToAtomic: + case CK_AtomicToNonAtomic: + return true; + + case CK_BaseToDerivedMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_MemberPointerToBoolean: + case CK_NullToMemberPointer: + case CK_ReinterpretMemberPointer: + // FIXME: ABI-dependent. + return false; + + case CK_AnyPointerToBlockPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_CPointerToObjCPointerCast: + case CK_ObjCObjectLValueCast: + case CK_IntToOCLSampler: + case CK_ZeroToOCLOpaqueType: + // FIXME: Check these. + return false; + + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToFloating: + case CK_FixedPointToIntegral: + case CK_FloatingToFixedPoint: + case CK_IntegralToFixedPoint: + // FIXME: Do all fixed-point types represent zero as all 0 bits? + return false; + + case CK_AddressSpaceConversion: + case CK_BaseToDerived: + case CK_DerivedToBase: + case CK_Dynamic: + case CK_NullToPointer: + case CK_PointerToBoolean: + // FIXME: Preserves zeroes only if zero pointers and null pointers have the + // same representation in all involved address spaces. + return false; + + case CK_ARCConsumeObject: + case CK_ARCExtendBlockObject: + case CK_ARCProduceObject: + case CK_ARCReclaimReturnedObject: + case CK_CopyAndAutoreleaseBlockObject: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_BuiltinFnToFnPtr: + case CK_Dependent: + case CK_LValueBitCast: + case CK_LValueToRValue: + case CK_LValueToRValueBitCast: + case CK_UncheckedDerivedToBase: + return false; + } + llvm_unreachable("Unhandled clang::CastKind enum"); +} + +/// If emitting this value will obviously just cause a store of +/// zero to memory, return true. This can return false if uncertain, so it just +/// handles simple cases. +static bool isSimpleZero(const Expr *E, CIRGenFunction &CGF) { + E = E->IgnoreParens(); + while (auto *CE = dyn_cast(E)) { + if (!castPreservesZero(CE)) + break; + E = CE->getSubExpr()->IgnoreParens(); + } + + // 0 + if (const IntegerLiteral *IL = dyn_cast(E)) + return IL->getValue() == 0; + // +0.0 + if (const FloatingLiteral *FL = dyn_cast(E)) + return FL->getValue().isPosZero(); + // int() + if ((isa(E) || isa(E)) && + CGF.getTypes().isZeroInitializable(E->getType())) + return true; + // (int*)0 - Null pointer expressions. + if (const CastExpr *ICE = dyn_cast(E)) { + return ICE->getCastKind() == CK_NullToPointer && + CGF.getTypes().isPointerZeroInitializable(E->getType()) && + !E->HasSideEffects(CGF.getContext()); + } + // '\0' + if (const CharacterLiteral *CL = dyn_cast(E)) + return CL->getValue() == 0; + + // Otherwise, hard case: conservatively return false. + return false; +} + +void AggExprEmitter::buildNullInitializationToLValue(mlir::Location loc, + LValue lv) { + QualType type = lv.getType(); + + // If the destination slot is already zeroed out before the aggregate is + // copied into it, we don't have to emit any zeros here. + if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) + return; + + if (CGF.hasScalarEvaluationKind(type)) { + // For non-aggregates, we can store the appropriate null constant. + auto null = CGF.CGM.buildNullConstant(type, loc); + // Note that the following is not equivalent to + // EmitStoreThroughBitfieldLValue for ARC types. + if (lv.isBitField()) { + mlir::Value result; + CGF.buildStoreThroughBitfieldLValue(RValue::get(null), lv, result); + } else { + assert(lv.isSimple()); + CGF.buildStoreOfScalar(null, lv, /* isInitialization */ true); + } + } else { + // There's a potential optimization opportunity in combining + // memsets; that would be easy for arrays, but relatively + // difficult for structures with the current code. + CGF.buildNullInitialization(loc, lv.getAddress(), lv.getType()); + } +} + +void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { + QualType type = LV.getType(); + // FIXME: Ignore result? + // FIXME: Are initializers affected by volatile? + if (Dest.isZeroed() && isSimpleZero(E, CGF)) { + // TODO(cir): LLVM codegen considers 'storing "i32 0" to a zero'd memory + // location is a noop'. Consider emitting the store to zero in CIR, as to + // model the actual user behavior, we can have a pass to optimize this out + // later. + return; + } + + if (isa(E) || isa(E)) { + auto loc = E->getSourceRange().isValid() ? CGF.getLoc(E->getSourceRange()) + : *CGF.currSrcLoc; + return buildNullInitializationToLValue(loc, LV); + } else if (isa(E)) { + // Do nothing. + return; + } else if (type->isReferenceType()) { + RValue RV = CGF.buildReferenceBindingToExpr(E); + return CGF.buildStoreThroughLValue(RV, LV); + } + + switch (CGF.getEvaluationKind(type)) { + case TEK_Complex: + llvm_unreachable("NYI"); + return; + case TEK_Aggregate: + CGF.buildAggExpr( + E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::MayOverlap, Dest.isZeroed())); + return; + case TEK_Scalar: + if (LV.isSimple()) { + CGF.buildScalarInit(E, CGF.getLoc(E->getSourceRange()), LV); + } else { + CGF.buildStoreThroughLValue(RValue::get(CGF.buildScalarExpr(E)), LV); + } + return; + } + llvm_unreachable("bad evaluation kind"); +} + +void AggExprEmitter::VisitMaterializeTemporaryExpr( + MaterializeTemporaryExpr *E) { + Visit(E->getSubExpr()); +} + +void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { + AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); + CGF.buildCXXConstructExpr(E, Slot); +} + +void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + if (Dest.isPotentiallyAliased() && E->getType().isPODType(CGF.getContext())) { + llvm_unreachable("NYI"); + } + + AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); + + // Block-scope compound literals are destroyed at the end of the enclosing + // scope in C. + bool Destruct = + !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed(); + if (Destruct) + llvm_unreachable("NYI"); + + CGF.buildAggExpr(E->getInitializer(), Slot); + + if (Destruct) + if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) + llvm_unreachable("NYI"); +} + +void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); + + auto &builder = CGF.getBuilder(); + auto scopeLoc = CGF.getLoc(E->getSourceRange()); + [[maybe_unused]] auto scope = builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{CGF, loc, + builder.getInsertionBlock()}; + Visit(E->getSubExpr()); + }); +} + +void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { + CIRGenFunction::SourceLocRAIIObject loc{CGF, CGF.getLoc(E->getSourceRange())}; + AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); + LLVM_ATTRIBUTE_UNUSED LValue SlotLV = + CGF.makeAddrLValue(Slot.getAddress(), E->getType()); + + // We'll need to enter cleanup scopes in case any of the element initializers + // throws an exception. + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); + mlir::Operation *CleanupDominator = nullptr; + + auto CurField = E->getLambdaClass()->field_begin(); + auto captureInfo = E->capture_begin(); + for (auto &captureInit : E->capture_inits()) { + // Pick a name for the field. + llvm::StringRef fieldName = CurField->getName(); + const LambdaCapture &capture = *captureInfo; + if (capture.capturesVariable()) { + assert(!CurField->isBitField() && "lambdas don't have bitfield members!"); + ValueDecl *v = capture.getCapturedVar(); + fieldName = v->getName(); + CGF.getCIRGenModule().LambdaFieldToName[*CurField] = fieldName; + } else { + llvm_unreachable("NYI"); + } + + // Emit initialization + LValue LV = + CGF.buildLValueForFieldInitialization(SlotLV, *CurField, fieldName); + if (CurField->hasCapturedVLAType()) { + llvm_unreachable("NYI"); + } + + buildInitializationToLValue(captureInit, LV); + + // Push a destructor if necessary. + if (QualType::DestructionKind DtorKind = + CurField->getType().isDestructedType()) { + llvm_unreachable("NYI"); + } + + CurField++; + captureInfo++; + } + + // Deactivate all the partial cleanups in reverse order, which generally means + // popping them. + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); + + // Destroy the placeholder if we made one. + if (CleanupDominator) + CleanupDominator->erase(); +} + +void AggExprEmitter::VisitCastExpr(CastExpr *E) { + if (const auto *ECE = dyn_cast(E)) + CGF.CGM.buildExplicitCastExprType(ECE, &CGF); + switch (E->getCastKind()) { + + case CK_LValueToRValue: + // If we're loading from a volatile type, force the destination + // into existence. + if (E->getSubExpr()->getType().isVolatileQualified() || + UnimplementedFeature::volatileTypes()) { + llvm_unreachable("volatile is NYI"); + } + [[fallthrough]]; + + case CK_NoOp: + case CK_UserDefinedConversion: + case CK_ConstructorConversion: + assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), + E->getType()) && + "Implicit cast types must be compatible"); + Visit(E->getSubExpr()); + break; + + case CK_LValueBitCast: + llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); + + case CK_Dependent: + case CK_BitCast: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_NullToPointer: + case CK_NullToMemberPointer: + case CK_BaseToDerivedMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_MemberPointerToBoolean: + case CK_ReinterpretMemberPointer: + case CK_IntegralToPointer: + case CK_PointerToIntegral: + case CK_PointerToBoolean: + case CK_ToVoid: + case CK_VectorSplat: + case CK_IntegralCast: + case CK_BooleanToSignedIntegral: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingToBoolean: + case CK_FloatingCast: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + case CK_ObjCObjectLValueCast: + case CK_FloatingRealToComplex: + case CK_FloatingComplexToReal: + case CK_FloatingComplexToBoolean: + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralRealToComplex: + case CK_IntegralComplexToReal: + case CK_IntegralComplexToBoolean: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + case CK_BuiltinFnToFnPtr: + case CK_ZeroToOCLOpaqueType: + case CK_MatrixCast: + + case CK_IntToOCLSampler: + case CK_FloatingToFixedPoint: + case CK_FixedPointToFloating: + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToIntegral: + case CK_IntegralToFixedPoint: + llvm::errs() << "cast '" << E->getCastKindName() + << "' invalid for aggregate types\n"; + llvm_unreachable("cast kind invalid for aggregate types"); + default: { + llvm::errs() << "cast kind not implemented: '" << E->getCastKindName() + << "'\n"; + assert(0 && "not implemented"); + break; + } + } +} + +void AggExprEmitter::VisitCallExpr(const CallExpr *E) { + if (E->getCallReturnType(CGF.getContext())->isReferenceType()) { + llvm_unreachable("NYI"); + } + + withReturnValueSlot( + E, [&](ReturnValueSlot Slot) { return CGF.buildCallExpr(E, Slot); }); +} + +void AggExprEmitter::withReturnValueSlot( + const Expr *E, llvm::function_ref EmitCall) { + QualType RetTy = E->getType(); + bool RequiresDestruction = + !Dest.isExternallyDestructed() && + RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct; + + // If it makes no observable difference, save a memcpy + temporary. + // + // We need to always provide our own temporary if destruction is required. + // Otherwise, EmitCall will emit its own, notice that it's "unused", and end + // its lifetime before we have the chance to emit a proper destructor call. + bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() || + (RequiresDestruction && !Dest.getAddress().isValid()); + + Address RetAddr = Address::invalid(); + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + + if (!UseTemp) { + RetAddr = Dest.getAddress(); + } else { + RetAddr = CGF.CreateMemTemp(RetTy, CGF.getLoc(E->getSourceRange()), "tmp", + &RetAddr); + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + } + + RValue Src = + EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused, + Dest.isExternallyDestructed())); + + if (!UseTemp) + return; + + assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); + buildFinalDestCopy(E->getType(), Src); + + if (!RequiresDestruction) { + // If there's no dtor to run, the copy was the last use of our temporary. + // Since we're not guaranteed to be in an ExprWithCleanups, clean up + // eagerly. + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + } +} + +void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { + assert(CGF.getContext().hasSameType(E->getLHS()->getType(), + E->getRHS()->getType())); + const ComparisonCategoryInfo &CmpInfo = + CGF.getContext().CompCategories.getInfoForType(E->getType()); + assert(CmpInfo.Record->isTriviallyCopyable() && + "cannot copy non-trivially copyable aggregate"); + + QualType ArgTy = E->getLHS()->getType(); + + if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() && + !ArgTy->isNullPtrType() && !ArgTy->isPointerType() && + !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) + llvm_unreachable("aggregate three-way comparison"); + + auto Loc = CGF.getLoc(E->getSourceRange()); + + if (E->getType()->isAnyComplexType()) + llvm_unreachable("NYI"); + + auto LHS = CGF.buildAnyExpr(E->getLHS()).getScalarVal(); + auto RHS = CGF.buildAnyExpr(E->getRHS()).getScalarVal(); + + mlir::Value ResultScalar; + if (ArgTy->isNullPtrType()) { + ResultScalar = + CGF.builder.getConstInt(Loc, CmpInfo.getEqualOrEquiv()->getIntValue()); + } else { + auto LtRes = CmpInfo.getLess()->getIntValue(); + auto EqRes = CmpInfo.getEqualOrEquiv()->getIntValue(); + auto GtRes = CmpInfo.getGreater()->getIntValue(); + if (!CmpInfo.isPartial()) { + // Strong ordering. + ResultScalar = CGF.builder.createThreeWayCmpStrong(Loc, LHS, RHS, LtRes, + EqRes, GtRes); + } else { + // Partial ordering. + auto UnorderedRes = CmpInfo.getUnordered()->getIntValue(); + ResultScalar = CGF.builder.createThreeWayCmpPartial( + Loc, LHS, RHS, LtRes, EqRes, GtRes, UnorderedRes); + } + } + + // Create the return value in the destination slot. + EnsureDest(Loc, E->getType()); + LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); + + // Emit the address of the first (and only) field in the comparison category + // type, and initialize it from the constant integer value produced above. + const FieldDecl *ResultField = *CmpInfo.Record->field_begin(); + LValue FieldLV = CGF.buildLValueForFieldInitialization( + DestLV, ResultField, ResultField->getName()); + CGF.buildStoreThroughLValue(RValue::get(ResultScalar), FieldLV); + + // All done! The result is in the Dest slot. +} + +void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { + // TODO(cir): use something like CGF.ErrorUnsupported + if (E->hadArrayRangeDesignator()) + llvm_unreachable("GNU array range designator extension"); + + if (E->isTransparent()) + return Visit(E->getInit(0)); + + VisitCXXParenListOrInitListExpr( + E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller()); +} + +void AggExprEmitter::VisitCXXParenListOrInitListExpr( + Expr *ExprToVisit, ArrayRef InitExprs, + FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) { +#if 0 + // FIXME: Assess perf here? Figure out what cases are worth optimizing here + // (Length of globals? Chunks of zeroed-out space?). + // + // If we can, prefer a copy from a global; this is a lot less code for long + // globals, and it's easier for the current optimizers to analyze. + if (llvm::Constant *C = + CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) { + llvm::GlobalVariable* GV = + new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, + llvm::GlobalValue::InternalLinkage, C, ""); + EmitFinalDestCopy(ExprToVisit->getType(), + CGF.MakeAddrLValue(GV, ExprToVisit->getType())); + return; + } +#endif + + AggValueSlot Dest = EnsureSlot(CGF.getLoc(ExprToVisit->getSourceRange()), + ExprToVisit->getType()); + + LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), ExprToVisit->getType()); + + // Handle initialization of an array. + if (ExprToVisit->getType()->isConstantArrayType()) { + auto AType = cast(Dest.getAddress().getElementType()); + buildArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), + ExprToVisit, InitExprs, ArrayFiller); + return; + } else if (ExprToVisit->getType()->isVariableArrayType()) { + llvm_unreachable("variable arrays NYI"); + return; + } + + if (ExprToVisit->getType()->isArrayType()) { + llvm_unreachable("NYI"); + } + + assert(ExprToVisit->getType()->isRecordType() && + "Only support structs/unions here!"); + + // Do struct initialization; this code just sets each individual member + // to the approprate value. This makes bitfield support automatic; + // the disadvantage is that the generated code is more difficult for + // the optimizer, especially with bitfields. + unsigned NumInitElements = InitExprs.size(); + RecordDecl *record = ExprToVisit->getType()->castAs()->getDecl(); + + // We'll need to enter cleanup scopes in case any of the element + // initializers throws an exception. + SmallVector cleanups; + // FIXME(cir): placeholder + mlir::Operation *cleanupDominator = nullptr; + [[maybe_unused]] auto addCleanup = + [&](const EHScopeStack::stable_iterator &cleanup) { + llvm_unreachable("NYI"); + }; + + unsigned curInitIndex = 0; + + // Emit initialization of base classes. + if (auto *CXXRD = dyn_cast(record)) { + assert(NumInitElements >= CXXRD->getNumBases() && + "missing initializer for base class"); + for ([[maybe_unused]] auto &Base : CXXRD->bases()) { + llvm_unreachable("NYI"); + } + } + + // Prepare a 'this' for CXXDefaultInitExprs. + CIRGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress()); + + if (record->isUnion()) { + // Only initialize one field of a union. The field itself is + // specified by the initializer list. + if (!InitializedFieldInUnion) { + // Empty union; we have nothing to do. + +#ifndef NDEBUG + // Make sure that it's really an empty and not a failure of + // semantic analysis. + for (const auto *Field : record->fields()) + assert( + (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) && + "Only unnamed bitfields or ananymous class allowed"); +#endif + return; + } + + // FIXME: volatility + FieldDecl *Field = InitializedFieldInUnion; + + LValue FieldLoc = + CGF.buildLValueForFieldInitialization(DestLV, Field, Field->getName()); + if (NumInitElements) { + // Store the initializer into the field + buildInitializationToLValue(InitExprs[0], FieldLoc); + } else { + // Default-initialize to null. + buildNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), + FieldLoc); + } + + return; + } + + // Here we iterate over the fields; this makes it simpler to both + // default-initialize fields and skip over unnamed fields. + for (const auto *field : record->fields()) { + // We're done once we hit the flexible array member. + if (field->getType()->isIncompleteArrayType()) + break; + + // Always skip anonymous bitfields. + if (field->isUnnamedBitField()) + continue; + + // We're done if we reach the end of the explicit initializers, we + // have a zeroed object, and the rest of the fields are + // zero-initializable. + if (curInitIndex == NumInitElements && Dest.isZeroed() && + CGF.getTypes().isZeroInitializable(ExprToVisit->getType())) + break; + LValue LV = + CGF.buildLValueForFieldInitialization(DestLV, field, field->getName()); + // We never generate write-barries for initialized fields. + assert(!UnimplementedFeature::setNonGC()); + + if (curInitIndex < NumInitElements) { + // Store the initializer into the field. + CIRGenFunction::SourceLocRAIIObject loc{ + CGF, CGF.getLoc(record->getSourceRange())}; + buildInitializationToLValue(InitExprs[curInitIndex++], LV); + } else { + // We're out of initializers; default-initialize to null + buildNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), + LV); + } + + // Push a destructor if necessary. + // FIXME: if we have an array of structures, all explicitly + // initialized, we can end up pushing a linear number of cleanups. + [[maybe_unused]] bool pushedCleanup = false; + if (QualType::DestructionKind dtorKind = + field->getType().isDestructedType()) { + llvm_unreachable("NYI"); + } + + // From LLVM codegen, maybe not useful for CIR: + // If the GEP didn't get used because of a dead zero init or something + // else, clean it up for -O0 builds and general tidiness. + } + + // Deactivate all the partial cleanups in reverse order, which + // generally means popping them. + assert((cleanupDominator || cleanups.empty()) && + "Missing cleanupDominator before deactivating cleanup blocks"); + for (unsigned i = cleanups.size(); i != 0; --i) + llvm_unreachable("NYI"); + + // Destroy the placeholder if we made one. + if (cleanupDominator) + llvm_unreachable("NYI"); +} + +void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { + // Ensure that we have a slot, but if we already do, remember + // whether it was externally destructed. + bool wasExternallyDestructed = Dest.isExternallyDestructed(); + EnsureDest(CGF.getLoc(E->getSourceRange()), E->getType()); + + // We're going to push a destructor if there isn't already one. + Dest.setExternallyDestructed(); + + Visit(E->getSubExpr()); + + // Push that destructor we promised. + if (!wasExternallyDestructed) + CGF.buildCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); +} + +//===----------------------------------------------------------------------===// +// Helpers and dispatcher +//===----------------------------------------------------------------------===// + +/// Get an approximate count of the number of non-zero bytes that will be stored +/// when outputting the initializer for the specified initializer expression. +/// FIXME(cir): this can be shared with LLVM codegen. +static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CIRGenFunction &CGF) { + if (auto *MTE = dyn_cast(E)) + E = MTE->getSubExpr(); + E = E->IgnoreParenNoopCasts(CGF.getContext()); + + // 0 and 0.0 won't require any non-zero stores! + if (isSimpleZero(E, CGF)) + return CharUnits::Zero(); + + // If this is an initlist expr, sum up the size of sizes of the (present) + // elements. If this is something weird, assume the whole thing is non-zero. + const InitListExpr *ILE = dyn_cast(E); + while (ILE && ILE->isTransparent()) + ILE = dyn_cast(ILE->getInit(0)); + if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType())) + return CGF.getContext().getTypeSizeInChars(E->getType()); + + // InitListExprs for structs have to be handled carefully. If there are + // reference members, we need to consider the size of the reference, not the + // referencee. InitListExprs for unions and arrays can't have references. + if (const RecordType *RT = E->getType()->getAs()) { + if (!RT->isUnionType()) { + RecordDecl *SD = RT->getDecl(); + CharUnits NumNonZeroBytes = CharUnits::Zero(); + + unsigned ILEElement = 0; + if (auto *CXXRD = dyn_cast(SD)) + while (ILEElement != CXXRD->getNumBases()) + NumNonZeroBytes += + GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF); + for (const auto *Field : SD->fields()) { + // We're done once we hit the flexible array member or run out of + // InitListExpr elements. + if (Field->getType()->isIncompleteArrayType() || + ILEElement == ILE->getNumInits()) + break; + if (Field->isUnnamedBitField()) + continue; + + const Expr *E = ILE->getInit(ILEElement++); + + // Reference values are always non-null and have the width of a pointer. + if (Field->getType()->isReferenceType()) + NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( + CGF.getTarget().getPointerWidth(LangAS::Default)); + else + NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); + } + + return NumNonZeroBytes; + } + } + + // FIXME: This overestimates the number of non-zero bytes for bit-fields. + CharUnits NumNonZeroBytes = CharUnits::Zero(); + for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) + NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); + return NumNonZeroBytes; +} + +/// If the initializer is large and has a lot of zeros in it, emit a memset and +/// avoid storing the individual zeros. +static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, + CIRGenFunction &CGF) { + // If the slot is arleady known to be zeroed, nothing to do. Don't mess with + // volatile stores. + if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid()) + return; + + // C++ objects with a user-declared constructor don't need zero'ing. + if (CGF.getLangOpts().CPlusPlus) + if (const auto *RT = CGF.getContext() + .getBaseElementType(E->getType()) + ->getAs()) { + const auto *RD = cast(RT->getDecl()); + if (RD->hasUserDeclaredConstructor()) + return; + } + + // If the type is 16-bytes or smaller, prefer individual stores over memset. + CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType()); + if (Size <= CharUnits::fromQuantity(16)) + return; + + // Check to see if over 3/4 of the initializer are known to be zero. If so, + // we prefer to emit memset + individual stores for the rest. + CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); + if (NumNonZeroBytes * 4 > Size) + return; + + // Okay, it seems like a good idea to use an initial memset, emit the call. + auto &builder = CGF.getBuilder(); + auto loc = CGF.getLoc(E->getSourceRange()); + Address slotAddr = Slot.getAddress(); + auto zero = builder.getZero(loc, slotAddr.getElementType()); + + builder.createStore(loc, zero, slotAddr); + // Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty); + // CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false); + + // Tell the AggExprEmitter that the slot is known zero. + Slot.setZeroed(); +} + +AggValueSlot::Overlap_t CIRGenFunction::getOverlapForBaseInit( + const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) { + // If the most-derived object is a field declared with [[no_unique_address]], + // the tail padding of any virtual base could be reused for other subobjects + // of that field's class. + if (IsVirtual) + return AggValueSlot::MayOverlap; + + // If the base class is laid out entirely within the nvsize of the derived + // class, its tail padding cannot yet be initialized, so we can issue + // stores at the full width of the base class. + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); + if (Layout.getBaseClassOffset(BaseRD) + + getContext().getASTRecordLayout(BaseRD).getSize() <= + Layout.getNonVirtualSize()) + return AggValueSlot::DoesNotOverlap; + + // The tail padding may contain values we need to preserve. + return AggValueSlot::MayOverlap; +} + +void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { + assert(E && CIRGenFunction::hasAggregateEvaluationKind(E->getType()) && + "Invalid aggregate expression to emit"); + assert((Slot.getAddress().isValid() || Slot.isIgnored()) && + "slot has bits but no address"); + + // Optimize the slot if possible. + CheckAggExprForMemSetUse(Slot, E, *this); + + AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast(E)); +} + +void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, + AggValueSlot::Overlap_t MayOverlap, + bool isVolatile) { + // TODO(cir): this function needs improvements, commented code for now since + // this will be touched again soon. + assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); + + Address DestPtr = Dest.getAddress(); + Address SrcPtr = Src.getAddress(); + + if (getLangOpts().CPlusPlus) { + if (const RecordType *RT = Ty->getAs()) { + CXXRecordDecl *Record = cast(RT->getDecl()); + assert((Record->hasTrivialCopyConstructor() || + Record->hasTrivialCopyAssignment() || + Record->hasTrivialMoveConstructor() || + Record->hasTrivialMoveAssignment() || + Record->hasAttr() || Record->isUnion()) && + "Trying to aggregate-copy a type without a trivial copy/move " + "constructor or assignment operator"); + // Ignore empty classes in C++. + if (Record->isEmpty()) + return; + } + } + + if (getLangOpts().CUDAIsDevice) { + llvm_unreachable("CUDA is NYI"); + } + + // Aggregate assignment turns into llvm.memcpy. This is almost valid per + // C99 6.5.16.1p3, which states "If the value being stored in an object is + // read from another object that overlaps in anyway the storage of the first + // object, then the overlap shall be exact and the two objects shall have + // qualified or unqualified versions of a compatible type." + // + // memcpy is not defined if the source and destination pointers are exactly + // equal, but other compilers do this optimization, and almost every memcpy + // implementation handles this case safely. If there is a libc that does not + // safely handle this, we can add a target hook. + + // Get data size info for this aggregate. Don't copy the tail padding if this + // might be a potentially-overlapping subobject, since the tail padding might + // be occupied by a different object. Otherwise, copying it is fine. + TypeInfoChars TypeInfo; + if (MayOverlap) + TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty); + else + TypeInfo = getContext().getTypeInfoInChars(Ty); + + mlir::Attribute SizeVal = nullptr; + if (TypeInfo.Width.isZero()) { + // But note that getTypeInfo returns 0 for a VLA. + if (auto *VAT = dyn_cast_or_null( + getContext().getAsArrayType(Ty))) { + llvm_unreachable("VLA is NYI"); + } + } + if (!SizeVal) { + // NOTE(cir): CIR types already carry info about their sizes. This is here + // just for codegen parity. + SizeVal = builder.getI64IntegerAttr(TypeInfo.Width.getQuantity()); + } + + // FIXME: If we have a volatile struct, the optimizer can remove what might + // appear to be `extra' memory ops: + // + // volatile struct { int i; } a, b; + // + // int main() { + // a = b; + // a = b; + // } + // + // we need to use a different call here. We use isVolatile to indicate when + // either the source or the destination is volatile. + + // NOTE(cir): original codegen would normally convert DestPtr and SrcPtr to + // i8* since memcpy operates on bytes. We don't need that in CIR because + // cir.copy will operate on any CIR pointer that points to a sized type. + + // Don't do any of the memmove_collectable tests if GC isn't set. + if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { + // fall through + } else if (const RecordType *RecordTy = Ty->getAs()) { + RecordDecl *Record = RecordTy->getDecl(); + if (Record->hasObjectMember()) { + llvm_unreachable("ObjC is NYI"); + } + } else if (Ty->isArrayType()) { + QualType BaseType = getContext().getBaseElementType(Ty); + if (const RecordType *RecordTy = BaseType->getAs()) { + if (RecordTy->getDecl()->hasObjectMember()) { + llvm_unreachable("ObjC is NYI"); + } + } + } + + builder.createCopy(DestPtr.getPointer(), SrcPtr.getPointer()); + + // Determine the metadata to describe the position of any padding in this + // memcpy, as well as the TBAA tags for the members of the struct, in case + // the optimizer wishes to expand it in to scalar memory operations. + if (CGM.getCodeGenOpts().NewStructPathTBAA || UnimplementedFeature::tbaa()) + llvm_unreachable("TBAA is NYI"); +} + +AggValueSlot::Overlap_t +CIRGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { + if (!FD->hasAttr() || !FD->getType()->isRecordType()) + return AggValueSlot::DoesNotOverlap; + + // If the field lies entirely within the enclosing class's nvsize, its tail + // padding cannot overlap any already-initialized object. (The only subobjects + // with greater addresses that might already be initialized are vbases.) + const RecordDecl *ClassRD = FD->getParent(); + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD); + if (Layout.getFieldOffset(FD->getFieldIndex()) + + getContext().getTypeSize(FD->getType()) <= + (uint64_t)getContext().toBits(Layout.getNonVirtualSize())) + return AggValueSlot::DoesNotOverlap; + + // The tail padding may contain values we need to preserve. + return AggValueSlot::MayOverlap; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp new file mode 100644 index 000000000000..7201cfa4fe8a --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -0,0 +1,1164 @@ +//===--- CIRGenExprCXX.cpp - Emit CIR Code for C++ expressions ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with code generation of C++ expressions +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include +#include +#include +#include +#include + +#include + +using namespace cir; +using namespace clang; + +namespace { +struct MemberCallInfo { + RequiredArgs ReqArgs; + // Number of prefix arguments for the call. Ignores the `this` pointer. + unsigned PrefixSize; +}; +} // namespace + +static RValue buildNewDeleteCall(CIRGenFunction &CGF, + const FunctionDecl *CalleeDecl, + const FunctionProtoType *CalleeType, + const CallArgList &Args); + +static MemberCallInfo +commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, + mlir::Value This, mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *CE, + CallArgList &Args, CallArgList *RtlArgs) { + assert(CE == nullptr || isa(CE) || + isa(CE)); + assert(MD->isInstance() && + "Trying to emit a member or operator call expr on a static method!"); + + // Push the this ptr. + const CXXRecordDecl *RD = + CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD); + Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD)); + + // If there is an implicit parameter (e.g. VTT), emit it. + if (ImplicitParam) { + llvm_unreachable("NYI"); + } + + const auto *FPT = MD->getType()->castAs(); + RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size()); + unsigned PrefixSize = Args.size() - 1; + + // Add the rest of the call args + if (RtlArgs) { + // Special case: if the caller emitted the arguments right-to-left already + // (prior to emitting the *this argument), we're done. This happens for + // assignment operators. + Args.addFrom(*RtlArgs); + } else if (CE) { + // Special case: skip first argument of CXXOperatorCall (it is "this"). + unsigned ArgsToSkip = isa(CE) ? 1 : 0; + CGF.buildCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), + CE->getDirectCallee()); + } else { + assert( + FPT->getNumParams() == 0 && + "No CallExpr specified for function with non-zero number of arguments"); + } + + return {required, PrefixSize}; +} + +RValue CIRGenFunction::buildCXXMemberOrOperatorCall( + const CXXMethodDecl *MD, const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs) { + + const auto *FPT = MD->getType()->castAs(); + CallArgList Args; + MemberCallInfo CallInfo = commonBuildCXXMemberOrOperatorCall( + *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs); + auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall( + Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize); + assert((CE || currSrcLoc) && "expected source location"); + mlir::Location loc = CE ? getLoc(CE->getExprLoc()) : *currSrcLoc; + return buildCall(FnInfo, Callee, ReturnValue, Args, nullptr, + CE && CE == MustTailCall, loc, CE); +} + +// TODO(cir): this can be shared with LLVM codegen +static CXXRecordDecl *getCXXRecord(const Expr *E) { + QualType T = E->getType(); + if (const PointerType *PTy = T->getAs()) + T = PTy->getPointeeType(); + const RecordType *Ty = T->castAs(); + return cast(Ty->getDecl()); +} + +RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( + const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, + bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, + const Expr *Base) { + assert(isa(CE) || isa(CE)); + + // Compute the object pointer. + bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; + const CXXMethodDecl *DevirtualizedMethod = nullptr; + if (CanUseVirtualCall && + MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) { + const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType(); + DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl); + assert(DevirtualizedMethod); + const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent(); + const Expr *Inner = Base->IgnoreParenBaseCasts(); + if (DevirtualizedMethod->getReturnType().getCanonicalType() != + MD->getReturnType().getCanonicalType()) { + // If the return types are not the same, this might be a case where more + // code needs to run to compensate for it. For example, the derived + // method might return a type that inherits form from the return + // type of MD and has a prefix. + // For now we just avoid devirtualizing these covariant cases. + DevirtualizedMethod = nullptr; + } else if (getCXXRecord(Inner) == DevirtualizedClass) { + // If the class of the Inner expression is where the dynamic method + // is defined, build the this pointer from it. + Base = Inner; + } else if (getCXXRecord(Base) != DevirtualizedClass) { + // If the method is defined in a class that is not the best dynamic + // one or the one of the full expression, we would have to build + // a derived-to-base cast to compute the correct this pointer, but + // we don't have support for that yet, so do a virtual call. + assert(!UnimplementedFeature::buildDerivedToBaseCastForDevirt()); + DevirtualizedMethod = nullptr; + } + } + + bool TrivialForCodegen = + MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion()); + bool TrivialAssignment = + TrivialForCodegen && + (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && + !MD->getParent()->mayInsertExtraPadding(); + (void)TrivialAssignment; + + // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment + // operator before the LHS. + CallArgList RtlArgStorage; + CallArgList *RtlArgs = nullptr; + LValue TrivialAssignmentRHS; + if (auto *OCE = dyn_cast(CE)) { + if (OCE->isAssignmentOp()) { + // See further note on TrivialAssignment, we don't handle this during + // codegen, differently than LLVM, which early optimizes like this: + // if (TrivialAssignment) { + // TrivialAssignmentRHS = buildLValue(CE->getArg(1)); + // } else { + RtlArgs = &RtlArgStorage; + buildCallArgs(*RtlArgs, MD->getType()->castAs(), + drop_begin(CE->arguments(), 1), CE->getDirectCallee(), + /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); + } + } + + LValue This; + if (IsArrow) { + LValueBaseInfo BaseInfo; + assert(!UnimplementedFeature::tbaa()); + Address ThisValue = buildPointerWithAlignment(Base, &BaseInfo); + This = makeAddrLValue(ThisValue, Base->getType(), BaseInfo); + } else { + This = buildLValue(Base); + } + + if (const CXXConstructorDecl *Ctor = dyn_cast(MD)) { + llvm_unreachable("NYI"); + } + + if (TrivialForCodegen) { + if (isa(MD)) + return RValue::get(nullptr); + + if (TrivialAssignment) { + // From LLVM codegen: + // We don't like to generate the trivial copy/move assignment operator + // when it isn't necessary; just produce the proper effect here. + // It's important that we use the result of EmitLValue here rather than + // emitting call arguments, in order to preserve TBAA information from + // the RHS. + // + // We don't early optimize like LLVM does: + // LValue RHS = isa(CE) ? TrivialAssignmentRHS + // : + // buildLValue(*CE->arg_begin()); + // buildAggregateAssign(This, RHS, CE->getType()); + // return RValue::get(This.getPointer()); + } else { + assert(MD->getParent()->mayInsertExtraPadding() && + "unknown trivial member function"); + } + } + + // Compute the function type we're calling + const CXXMethodDecl *CalleeDecl = + DevirtualizedMethod ? DevirtualizedMethod : MD; + const CIRGenFunctionInfo *FInfo = nullptr; + if (const auto *Dtor = dyn_cast(CalleeDecl)) + llvm_unreachable("NYI"); + else + FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); + + auto Ty = CGM.getTypes().GetFunctionType(*FInfo); + + // C++11 [class.mfct.non-static]p2: + // If a non-static member function of a class X is called for an object that + // is not of type X, or of a type derived from X, the behavior is undefined. + SourceLocation CallLoc; + ASTContext &C = getContext(); + (void)C; + if (CE) + CallLoc = CE->getExprLoc(); + + SanitizerSet SkippedChecks; + if (const auto *cmce = dyn_cast(CE)) { + auto *ioa = cmce->getImplicitObjectArgument(); + auto isImplicitObjectCXXThis = isWrappedCXXThis(ioa); + if (isImplicitObjectCXXThis) + SkippedChecks.set(SanitizerKind::Alignment, true); + if (isImplicitObjectCXXThis || isa(ioa)) + SkippedChecks.set(SanitizerKind::Null, true); + } + + if (UnimplementedFeature::buildTypeCheck()) + llvm_unreachable("NYI"); + + // C++ [class.virtual]p12: + // Explicit qualification with the scope operator (5.1) suppresses the + // virtual call mechanism. + // + // We also don't emit a virtual call if the base expression has a record type + // because then we know what the type is. + bool useVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; + + if (const auto *dtor = dyn_cast(CalleeDecl)) { + llvm_unreachable("NYI"); + } + + // FIXME: Uses of 'MD' past this point need to be audited. We may need to use + // 'CalleeDecl' instead. + + CIRGenCallee Callee; + if (useVirtualCall) { + Callee = CIRGenCallee::forVirtual(CE, MD, This.getAddress(), Ty); + } else { + if (SanOpts.has(SanitizerKind::CFINVCall)) { + llvm_unreachable("NYI"); + } + + if (getLangOpts().AppleKext) + llvm_unreachable("NYI"); + else if (!DevirtualizedMethod) + // TODO(cir): shouldn't this call getAddrOfCXXStructor instead? + Callee = CIRGenCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), + GlobalDecl(MD)); + else { + Callee = CIRGenCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), + GlobalDecl(MD)); + } + } + + if (MD->isVirtual()) { + Address NewThisAddr = + CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall( + *this, CalleeDecl, This.getAddress(), useVirtualCall); + This.setAddress(NewThisAddr); + } + + return buildCXXMemberOrOperatorCall( + CalleeDecl, Callee, ReturnValue, This.getPointer(), + /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); +} + +RValue +CIRGenFunction::buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD, + ReturnValueSlot ReturnValue) { + assert(MD->isInstance() && + "Trying to emit a member call expr on a static method!"); + return buildCXXMemberOrOperatorMemberCallExpr( + E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr, + /*IsArrow=*/false, E->getArg(0)); +} + +void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, + AggValueSlot Dest) { + assert(!Dest.isIgnored() && "Must have a destination!"); + const auto *CD = E->getConstructor(); + + // If we require zero initialization before (or instead of) calling the + // constructor, as can be the case with a non-user-provided default + // constructor, emit the zero initialization now, unless destination is + // already zeroed. + if (E->requiresZeroInitialization() && !Dest.isZeroed()) { + switch (E->getConstructionKind()) { + case CXXConstructionKind::Delegating: + case CXXConstructionKind::Complete: + buildNullInitialization(getLoc(E->getSourceRange()), Dest.getAddress(), + E->getType()); + break; + case CXXConstructionKind::VirtualBase: + case CXXConstructionKind::NonVirtualBase: + llvm_unreachable("NYI"); + break; + } + } + + // If this is a call to a trivial default constructor: + // In LLVM: do nothing. + // In CIR: emit as a regular call, other later passes should lower the + // ctor call into trivial initialization. + // if (CD->isTrivial() && CD->isDefaultConstructor()) + // return; + + // Elide the constructor if we're constructing from a temporary + if (getLangOpts().ElideConstructors && E->isElidable()) { + // FIXME: This only handles the simplest case, where the source object is + // passed directly as the first argument to the constructor. This + // should also handle stepping through implicit casts and conversion + // sequences which involve two steps, with a conversion operator + // follwed by a converting constructor. + const auto *SrcObj = E->getArg(0); + assert(SrcObj->isTemporaryObject(getContext(), CD->getParent())); + assert( + getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType())); + buildAggExpr(SrcObj, Dest); + return; + } + + if (const ArrayType *arrayType = getContext().getAsArrayType(E->getType())) { + buildCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, + Dest.isSanitizerChecked()); + } else { + clang::CXXCtorType Type = Ctor_Complete; + bool ForVirtualBase = false; + bool Delegating = false; + + switch (E->getConstructionKind()) { + case CXXConstructionKind::Complete: + Type = Ctor_Complete; + break; + case CXXConstructionKind::Delegating: + llvm_unreachable("NYI"); + break; + case CXXConstructionKind::VirtualBase: + ForVirtualBase = true; + [[fallthrough]]; + case CXXConstructionKind::NonVirtualBase: + Type = Ctor_Base; + break; + } + + buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); + } +} + +namespace { +/// The parameters to pass to a usual operator delete. +struct UsualDeleteParams { + bool DestroyingDelete = false; + bool Size = false; + bool Alignment = false; +}; +} // namespace + +// FIXME(cir): this should be shared with LLVM codegen +static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { + UsualDeleteParams Params; + + const FunctionProtoType *FPT = FD->getType()->castAs(); + auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); + + // The first argument is always a void*. + ++AI; + + // The next parameter may be a std::destroying_delete_t. + if (FD->isDestroyingOperatorDelete()) { + Params.DestroyingDelete = true; + assert(AI != AE); + ++AI; + } + + // Figure out what other parameters we should be implicitly passing. + if (AI != AE && (*AI)->isIntegerType()) { + Params.Size = true; + ++AI; + } + + if (AI != AE && (*AI)->isAlignValT()) { + Params.Alignment = true; + ++AI; + } + + assert(AI == AE && "unexpected usual deallocation function parameter"); + return Params; +} + +static mlir::Value buildCXXNewAllocSize(CIRGenFunction &CGF, + const CXXNewExpr *e, + unsigned minElements, + mlir::Value &numElements, + mlir::Value &sizeWithoutCookie) { + QualType type = e->getAllocatedType(); + + if (!e->isArray()) { + CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); + sizeWithoutCookie = CGF.getBuilder().getConstant( + CGF.getLoc(e->getSourceRange()), + mlir::cir::IntAttr::get(CGF.SizeTy, typeSize.getQuantity())); + return sizeWithoutCookie; + } + + llvm_unreachable("NYI"); +} + +namespace { +/// A cleanup to call the given 'operator delete' function upon abnormal +/// exit from a new expression. Templated on a traits type that deals with +/// ensuring that the arguments dominate the cleanup if necessary. +template +class CallDeleteDuringNew final : public EHScopeStack::Cleanup { + /// Type used to hold llvm::Value*s. + typedef typename Traits::ValueTy ValueTy; + /// Type used to hold RValues. + typedef typename Traits::RValueTy RValueTy; + struct PlacementArg { + RValueTy ArgValue; + QualType ArgType; + }; + + unsigned NumPlacementArgs : 31; + unsigned PassAlignmentToPlacementDelete : 1; + const FunctionDecl *OperatorDelete; + ValueTy Ptr; + ValueTy AllocSize; + CharUnits AllocAlign; + + PlacementArg *getPlacementArgs() { + return reinterpret_cast(this + 1); + } + +public: + static size_t getExtraSize(size_t NumPlacementArgs) { + return NumPlacementArgs * sizeof(PlacementArg); + } + + CallDeleteDuringNew(size_t NumPlacementArgs, + const FunctionDecl *OperatorDelete, ValueTy Ptr, + ValueTy AllocSize, bool PassAlignmentToPlacementDelete, + CharUnits AllocAlign) + : NumPlacementArgs(NumPlacementArgs), + PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete), + OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize), + AllocAlign(AllocAlign) {} + + void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { + assert(I < NumPlacementArgs && "index out of range"); + getPlacementArgs()[I] = {Arg, Type}; + } + + void Emit(CIRGenFunction &CGF, Flags flags) override { + const auto *FPT = OperatorDelete->getType()->castAs(); + CallArgList DeleteArgs; + + // The first argument is always a void* (or C* for a destroying operator + // delete for class type C). + DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0)); + + // Figure out what other parameters we should be implicitly passing. + UsualDeleteParams Params; + if (NumPlacementArgs) { + // A placement deallocation function is implicitly passed an alignment + // if the placement allocation function was, but is never passed a size. + Params.Alignment = PassAlignmentToPlacementDelete; + } else { + // For a non-placement new-expression, 'operator delete' can take a + // size and/or an alignment if it has the right parameters. + Params = getUsualDeleteParams(OperatorDelete); + } + + assert(!Params.DestroyingDelete && + "should not call destroying delete in a new-expression"); + + // The second argument can be a std::size_t (for non-placement delete). + if (Params.Size) + DeleteArgs.add(Traits::get(CGF, AllocSize), + CGF.getContext().getSizeType()); + + // The next (second or third) argument can be a std::align_val_t, which + // is an enum whose underlying type is std::size_t. + // FIXME: Use the right type as the parameter type. Note that in a call + // to operator delete(size_t, ...), we may not have it available. + if (Params.Alignment) { + llvm_unreachable("NYI"); + } + + // Pass the rest of the arguments, which must match exactly. + for (unsigned I = 0; I != NumPlacementArgs; ++I) { + auto Arg = getPlacementArgs()[I]; + DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); + } + + // Call 'operator delete'. + buildNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); + } +}; +} // namespace + +/// Enter a cleanup to call 'operator delete' if the initializer in a +/// new-expression throws. +static void EnterNewDeleteCleanup(CIRGenFunction &CGF, const CXXNewExpr *E, + Address NewPtr, mlir::Value AllocSize, + CharUnits AllocAlign, + const CallArgList &NewArgs) { + unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1; + + // If we're not inside a conditional branch, then the cleanup will + // dominate and we can do the easier (and more efficient) thing. + if (!CGF.isInConditionalBranch()) { + struct DirectCleanupTraits { + typedef mlir::Value ValueTy; + typedef RValue RValueTy; + static RValue get(CIRGenFunction &, ValueTy V) { return RValue::get(V); } + static RValue get(CIRGenFunction &, RValueTy V) { return V; } + }; + + typedef CallDeleteDuringNew DirectCleanup; + + DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra( + EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(), + NewPtr.getPointer(), AllocSize, E->passAlignment(), AllocAlign); + for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { + auto &Arg = NewArgs[I + NumNonPlacementArgs]; + Cleanup->setPlacementArg( + I, Arg.getRValue(CGF, CGF.getLoc(E->getSourceRange())), Arg.Ty); + } + + return; + } + + // Otherwise, we need to save all this stuff. + DominatingValue::saved_type SavedNewPtr = + DominatingValue::save(CGF, RValue::get(NewPtr.getPointer())); + DominatingValue::saved_type SavedAllocSize = + DominatingValue::save(CGF, RValue::get(AllocSize)); + + struct ConditionalCleanupTraits { + typedef DominatingValue::saved_type ValueTy; + typedef DominatingValue::saved_type RValueTy; + static RValue get(CIRGenFunction &CGF, ValueTy V) { return V.restore(CGF); } + }; + typedef CallDeleteDuringNew ConditionalCleanup; + + ConditionalCleanup *Cleanup = + CGF.EHStack.pushCleanupWithExtra( + EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(), + SavedNewPtr, SavedAllocSize, E->passAlignment(), AllocAlign); + for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { + auto &Arg = NewArgs[I + NumNonPlacementArgs]; + Cleanup->setPlacementArg( + I, + DominatingValue::save( + CGF, Arg.getRValue(CGF, CGF.getLoc(E->getSourceRange()))), + Arg.Ty); + } + + CGF.initFullExprCleanup(); +} + +static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, + QualType AllocType, Address NewPtr, + AggValueSlot::Overlap_t MayOverlap) { + // FIXME: Refactor with buildExprAsInit. + switch (CGF.getEvaluationKind(AllocType)) { + case TEK_Scalar: + CGF.buildScalarInit(Init, CGF.getLoc(Init->getSourceRange()), + CGF.makeAddrLValue(NewPtr, AllocType), false); + return; + case TEK_Complex: + llvm_unreachable("NYI"); + return; + case TEK_Aggregate: { + AggValueSlot Slot = AggValueSlot::forAddr( + NewPtr, AllocType.getQualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + MayOverlap, AggValueSlot::IsNotZeroed, + AggValueSlot::IsSanitizerChecked); + CGF.buildAggExpr(Init, Slot); + return; + } + } + llvm_unreachable("bad evaluation kind"); +} + +static void buildNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, + QualType ElementType, mlir::Type ElementTy, + Address NewPtr, mlir::Value NumElements, + mlir::Value AllocSizeWithoutCookie) { + assert(!UnimplementedFeature::generateDebugInfo()); + if (E->isArray()) { + llvm_unreachable("NYI"); + } else if (const Expr *Init = E->getInitializer()) { + StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr, + AggValueSlot::DoesNotOverlap); + } +} + +static CharUnits CalculateCookiePadding(CIRGenFunction &CGF, + const CXXNewExpr *E) { + if (!E->isArray()) + return CharUnits::Zero(); + + // No cookie is required if the operator new[] being used is the + // reserved placement operator new[]. + if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) + return CharUnits::Zero(); + + llvm_unreachable("NYI"); + // return CGF.CGM.getCXXABI().GetArrayCookieSize(E); +} + +namespace { +/// Calls the given 'operator delete' on a single object. +struct CallObjectDelete final : EHScopeStack::Cleanup { + mlir::Value Ptr; + const FunctionDecl *OperatorDelete; + QualType ElementType; + + CallObjectDelete(mlir::Value Ptr, const FunctionDecl *OperatorDelete, + QualType ElementType) + : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + CGF.buildDeleteCall(OperatorDelete, Ptr, ElementType); + } +}; +} // namespace + +/// Emit the code for deleting a single object. +/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false +/// if not. +static bool EmitObjectDelete(CIRGenFunction &CGF, const CXXDeleteExpr *DE, + Address Ptr, QualType ElementType) { + // C++11 [expr.delete]p3: + // If the static type of the object to be deleted is different from its + // dynamic type, the static type shall be a base class of the dynamic type + // of the object to be deleted and the static type shall have a virtual + // destructor or the behavior is undefined. + CGF.buildTypeCheck(CIRGenFunction::TCK_MemberCall, DE->getExprLoc(), + Ptr.getPointer(), ElementType); + + const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); + assert(!OperatorDelete->isDestroyingOperatorDelete()); + + // Find the destructor for the type, if applicable. If the + // destructor is virtual, we'll just emit the vcall and return. + const CXXDestructorDecl *Dtor = nullptr; + if (const RecordType *RT = ElementType->getAs()) { + CXXRecordDecl *RD = cast(RT->getDecl()); + if (RD->hasDefinition() && !RD->hasTrivialDestructor()) { + Dtor = RD->getDestructor(); + + if (Dtor->isVirtual()) { + bool UseVirtualCall = true; + const Expr *Base = DE->getArgument(); + if (auto *DevirtualizedDtor = dyn_cast_or_null( + Dtor->getDevirtualizedMethod( + Base, CGF.CGM.getLangOpts().AppleKext))) { + UseVirtualCall = false; + const CXXRecordDecl *DevirtualizedClass = + DevirtualizedDtor->getParent(); + if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) { + // Devirtualized to the class of the base type (the type of the + // whole expression). + Dtor = DevirtualizedDtor; + } else { + // Devirtualized to some other type. Would need to cast the this + // pointer to that type but we don't have support for that yet, so + // do a virtual call. FIXME: handle the case where it is + // devirtualized to the derived type (the type of the inner + // expression) as in EmitCXXMemberOrOperatorMemberCallExpr. + UseVirtualCall = true; + } + } + if (UseVirtualCall) { + llvm_unreachable("NYI"); + return false; + } + } + } + } + + // Make sure that we call delete even if the dtor throws. + // This doesn't have to a conditional cleanup because we're going + // to pop it off in a second. + CGF.EHStack.pushCleanup( + NormalAndEHCleanup, Ptr.getPointer(), OperatorDelete, ElementType); + + if (Dtor) { + llvm_unreachable("NYI"); + } else if (auto Lifetime = ElementType.getObjCLifetime()) { + switch (Lifetime) { + case Qualifiers::OCL_None: + case Qualifiers::OCL_ExplicitNone: + case Qualifiers::OCL_Autoreleasing: + break; + + case Qualifiers::OCL_Strong: + llvm_unreachable("NYI"); + break; + + case Qualifiers::OCL_Weak: + llvm_unreachable("NYI"); + break; + } + } + + // In traditional LLVM codegen null checks are emitted to save a delete call. + // In CIR we optimize for size by default, the null check should be added into + // this function callers. + assert(!UnimplementedFeature::emitNullCheckForDeleteCalls()); + + CGF.PopCleanupBlock(); + return false; +} + +void CIRGenFunction::buildCXXDeleteExpr(const CXXDeleteExpr *E) { + const Expr *Arg = E->getArgument(); + Address Ptr = buildPointerWithAlignment(Arg); + + // Null check the pointer. + // + // We could avoid this null check if we can determine that the object + // destruction is trivial and doesn't require an array cookie; we can + // unconditionally perform the operator delete call in that case. For now, we + // assume that deleted pointers are null rarely enough that it's better to + // keep the branch. This might be worth revisiting for a -O0 code size win. + // + // CIR note: emit the code size friendly by default for now, such as mentioned + // in `EmitObjectDelete`. + assert(!UnimplementedFeature::emitNullCheckForDeleteCalls()); + QualType DeleteTy = E->getDestroyedType(); + + // A destroying operator delete overrides the entire operation of the + // delete expression. + if (E->getOperatorDelete()->isDestroyingOperatorDelete()) { + llvm_unreachable("NYI"); + return; + } + + // We might be deleting a pointer to array. If so, GEP down to the + // first non-array element. + // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) + if (DeleteTy->isConstantArrayType()) { + llvm_unreachable("NYI"); + } + + assert(convertTypeForMem(DeleteTy) == Ptr.getElementType()); + + if (E->isArrayForm()) { + llvm_unreachable("NYI"); + } else { + (void)EmitObjectDelete(*this, E, Ptr, DeleteTy); + } +} + +mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { + // The element type being allocated. + QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); + + // 1. Build a call to the allocation function. + FunctionDecl *allocator = E->getOperatorNew(); + + // If there is a brace-initializer, cannot allocate fewer elements than inits. + unsigned minElements = 0; + if (E->isArray() && E->hasInitializer()) { + const InitListExpr *ILE = dyn_cast(E->getInitializer()); + if (ILE && ILE->isStringLiteralInit()) + minElements = + cast(ILE->getType()->getAsArrayTypeUnsafe()) + ->getSize() + .getZExtValue(); + else if (ILE) + minElements = ILE->getNumInits(); + } + + mlir::Value numElements = nullptr; + mlir::Value allocSizeWithoutCookie = nullptr; + mlir::Value allocSize = buildCXXNewAllocSize( + *this, E, minElements, numElements, allocSizeWithoutCookie); + CharUnits allocAlign = getContext().getTypeAlignInChars(allocType); + + // Emit the allocation call. + Address allocation = Address::invalid(); + CallArgList allocatorArgs; + if (allocator->isReservedGlobalPlacementOperator()) { + // If the allocator is a global placement operator, just + // "inline" it directly. + assert(E->getNumPlacementArgs() == 1); + const Expr *arg = *E->placement_arguments().begin(); + + LValueBaseInfo BaseInfo; + allocation = buildPointerWithAlignment(arg, &BaseInfo); + + // The pointer expression will, in many cases, be an opaque void*. + // In these cases, discard the computed alignment and use the + // formal alignment of the allocated type. + if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl) + allocation = allocation.withAlignment(allocAlign); + + // Set up allocatorArgs for the call to operator delete if it's not + // the reserved global operator. + if (E->getOperatorDelete() && + !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { + allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType()); + allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType()); + } + } else { + const FunctionProtoType *allocatorType = + allocator->getType()->castAs(); + unsigned ParamsToSkip = 0; + + // The allocation size is the first argument. + QualType sizeType = getContext().getSizeType(); + allocatorArgs.add(RValue::get(allocSize), sizeType); + ++ParamsToSkip; + + if (allocSize != allocSizeWithoutCookie) { + llvm_unreachable("NYI"); + } + + // The allocation alignment may be passed as the second argument. + if (E->passAlignment()) { + llvm_unreachable("NYI"); + } + + // FIXME: Why do we not pass a CalleeDecl here? + buildCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), + /*AC*/ + AbstractCallee(), + /*ParamsToSkip*/ + ParamsToSkip); + RValue RV = + buildNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); + + // Set !heapallocsite metadata on the call to operator new. + assert(!UnimplementedFeature::generateDebugInfo()); + + // If this was a call to a global replaceable allocation function that does + // not take an alignment argument, the allocator is known to produce storage + // that's suitably aligned for any object that fits, up to a known + // threshold. Otherwise assume it's suitably aligned for the allocated type. + CharUnits allocationAlign = allocAlign; + if (!E->passAlignment() && + allocator->isReplaceableGlobalAllocationFunction()) { + auto &Target = CGM.getASTContext().getTargetInfo(); + unsigned AllocatorAlign = llvm::bit_floor(std::min( + Target.getNewAlign(), getContext().getTypeSize(allocType))); + allocationAlign = std::max( + allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign)); + } + + allocation = Address(RV.getScalarVal(), UInt8Ty, allocationAlign); + } + + // Emit a null check on the allocation result if the allocation + // function is allowed to return null (because it has a non-throwing + // exception spec or is the reserved placement new) and we have an + // interesting initializer will be running sanitizers on the initialization. + bool nullCheck = E->shouldNullCheckAllocation() && + (!allocType.isPODType(getContext()) || E->hasInitializer() || + sanitizePerformTypeCheck()); + + // The null-check means that the initializer is conditionally + // evaluated. + ConditionalEvaluation conditional(*this); + + if (nullCheck) { + llvm_unreachable("NYI"); + } + + // If there's an operator delete, enter a cleanup to call it if an + // exception is thrown. + EHScopeStack::stable_iterator operatorDeleteCleanup; + [[maybe_unused]] mlir::Operation *cleanupDominator = nullptr; + if (E->getOperatorDelete() && + !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { + EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign, + allocatorArgs); + operatorDeleteCleanup = EHStack.stable_begin(); + cleanupDominator = + builder.create(getLoc(E->getSourceRange())) + .getOperation(); + } + + assert((allocSize == allocSizeWithoutCookie) == + CalculateCookiePadding(*this, E).isZero()); + if (allocSize != allocSizeWithoutCookie) { + llvm_unreachable("NYI"); + } + + mlir::Type elementTy = getTypes().convertTypeForMem(allocType); + Address result = builder.createElementBitCast(getLoc(E->getSourceRange()), + allocation, elementTy); + + // Passing pointer through launder.invariant.group to avoid propagation of + // vptrs information which may be included in previous type. + // To not break LTO with different optimizations levels, we do it regardless + // of optimization level. + if (CGM.getCodeGenOpts().StrictVTablePointers && + allocator->isReservedGlobalPlacementOperator()) + llvm_unreachable("NYI"); + + // Emit sanitizer checks for pointer value now, so that in the case of an + // array it was checked only once and not at each constructor call. We may + // have already checked that the pointer is non-null. + // FIXME: If we have an array cookie and a potentially-throwing allocator, + // we'll null check the wrong pointer here. + SanitizerSet SkippedChecks; + SkippedChecks.set(SanitizerKind::Null, nullCheck); + buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, + E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), + result.getPointer(), allocType, result.getAlignment(), + SkippedChecks, numElements); + + buildNewInitializer(*this, E, allocType, elementTy, result, numElements, + allocSizeWithoutCookie); + auto resultPtr = result.getPointer(); + if (E->isArray()) { + llvm_unreachable("NYI"); + } + + // Deactivate the 'operator delete' cleanup if we finished + // initialization. + if (operatorDeleteCleanup.isValid()) { + // FIXME: enable cleanupDominator above before implementing this. + DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator); + if (cleanupDominator) + cleanupDominator->erase(); + } + + if (nullCheck) { + llvm_unreachable("NYI"); + } + + return resultPtr; +} + +RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, + const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, + const CallExpr *CE) { + const CXXMethodDecl *DtorDecl = cast(Dtor.getDecl()); + + assert(!ThisTy.isNull()); + assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() && + "Pointer/Object mixup"); + + LangAS SrcAS = ThisTy.getAddressSpace(); + LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace(); + if (SrcAS != DstAS) { + llvm_unreachable("NYI"); + } + + CallArgList Args; + commonBuildCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam, + ImplicitParamTy, CE, Args, nullptr); + assert((CE || Dtor.getDecl()) && "expected source location provider"); + return buildCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, + ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, + CE ? getLoc(CE->getExprLoc()) + : getLoc(Dtor.getDecl()->getSourceRange())); +} + +/// Emit a call to an operator new or operator delete function, as implicitly +/// created by new-expressions and delete-expressions. +static RValue buildNewDeleteCall(CIRGenFunction &CGF, + const FunctionDecl *CalleeDecl, + const FunctionProtoType *CalleeType, + const CallArgList &Args) { + mlir::cir::CIRCallOpInterface CallOrTryCall; + auto CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); + CIRGenCallee Callee = + CIRGenCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); + RValue RV = CGF.buildCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( + Args, CalleeType, /*ChainCall=*/false), + Callee, ReturnValueSlot(), Args, &CallOrTryCall); + + /// C++1y [expr.new]p10: + /// [In a new-expression,] an implementation is allowed to omit a call + /// to a replaceable global allocation function. + /// + /// We model such elidable calls with the 'builtin' attribute. + assert(!UnimplementedFeature::attributeBuiltin()); + return RV; +} + +void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, + mlir::Value Ptr, QualType DeleteTy, + mlir::Value NumElements, + CharUnits CookieSize) { + assert((!NumElements && CookieSize.isZero()) || + DeleteFD->getOverloadedOperator() == OO_Array_Delete); + + const auto *DeleteFTy = DeleteFD->getType()->castAs(); + CallArgList DeleteArgs; + + auto Params = getUsualDeleteParams(DeleteFD); + auto ParamTypeIt = DeleteFTy->param_type_begin(); + + // Pass the pointer itself. + QualType ArgTy = *ParamTypeIt++; + mlir::Value DeletePtr = + builder.createBitcast(Ptr.getLoc(), Ptr, ConvertType(ArgTy)); + DeleteArgs.add(RValue::get(DeletePtr), ArgTy); + + // Pass the std::destroying_delete tag if present. + mlir::Value DestroyingDeleteTag{}; + if (Params.DestroyingDelete) { + llvm_unreachable("NYI"); + } + + // Pass the size if the delete function has a size_t parameter. + if (Params.Size) { + QualType SizeType = *ParamTypeIt++; + CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); + assert(SizeTy && "expected mlir::cir::IntType"); + auto Size = builder.getConstInt(*currSrcLoc, ConvertType(SizeType), + DeleteTypeSize.getQuantity()); + + // For array new, multiply by the number of elements. + if (NumElements) { + // Uncomment upon adding testcase. + // Size = builder.createMul(Size, NumElements); + llvm_unreachable("NYI"); + } + + // If there is a cookie, add the cookie size. + if (!CookieSize.isZero()) { + // Uncomment upon adding testcase. + // builder.createBinop( + // Size, mlir::cir::BinOpKind::Add, + // builder.getConstInt(*currSrcLoc, SizeTy, + // CookieSize.getQuantity())); + llvm_unreachable("NYI"); + } + + DeleteArgs.add(RValue::get(Size), SizeType); + } + + // Pass the alignment if the delete function has an align_val_t parameter. + if (Params.Alignment) { + llvm_unreachable("NYI"); + } + + assert(ParamTypeIt == DeleteFTy->param_type_end() && + "unknown parameter to usual delete function"); + + // Emit the call to delete. + buildNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); + + // If call argument lowering didn't use the destroying_delete_t alloca, + // remove it again. + if (DestroyingDeleteTag && DestroyingDeleteTag.use_empty()) { + llvm_unreachable("NYI"); // DestroyingDeleteTag->eraseFromParent(); + } +} + +static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, + mlir::Location Loc, QualType DestTy) { + mlir::Type DestCIRTy = CGF.ConvertType(DestTy); + assert(DestCIRTy.isa() && + "result of dynamic_cast should be a ptr"); + + mlir::Value NullPtrValue = CGF.getBuilder().getNullPtr(DestCIRTy, Loc); + + if (!DestTy->isPointerType()) { + /// C++ [expr.dynamic.cast]p9: + /// A failed cast to reference type throws std::bad_cast + CGF.CGM.getCXXABI().buildBadCastCall(CGF, Loc); + } + + return NullPtrValue; +} + +mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, + const CXXDynamicCastExpr *DCE) { + auto loc = getLoc(DCE->getSourceRange()); + + CGM.buildExplicitCastExprType(DCE, this); + QualType destTy = DCE->getTypeAsWritten(); + QualType srcTy = DCE->getSubExpr()->getType(); + + // C++ [expr.dynamic.cast]p7: + // If T is "pointer to cv void," then the result is a pointer to the most + // derived object pointed to by v. + bool isDynCastToVoid = destTy->isVoidPointerType(); + bool isRefCast = destTy->isReferenceType(); + + QualType srcRecordTy; + QualType destRecordTy; + if (isDynCastToVoid) { + srcRecordTy = srcTy->getPointeeType(); + // No destRecordTy. + } else if (const PointerType *DestPTy = destTy->getAs()) { + srcRecordTy = srcTy->castAs()->getPointeeType(); + destRecordTy = DestPTy->getPointeeType(); + } else { + srcRecordTy = srcTy; + destRecordTy = destTy->castAs()->getPointeeType(); + } + + assert(srcRecordTy->isRecordType() && "source type must be a record type!"); + buildTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), + srcRecordTy); + + if (DCE->isAlwaysNull()) + return buildDynamicCastToNull(*this, loc, destTy); + + if (isDynCastToVoid) { + auto srcIsNull = builder.createPtrIsNull(ThisAddr.getPointer()); + return builder + .create( + loc, srcIsNull, + [&](mlir::OpBuilder &, mlir::Location) { + auto nullPtr = + builder.getNullPtr(builder.getVoidPtrTy(), loc).getResult(); + builder.createYield(loc, nullPtr); + }, + [&](mlir::OpBuilder &, mlir::Location) { + auto castedPtr = CGM.getCXXABI().buildDynamicCastToVoid( + *this, loc, ThisAddr, srcRecordTy); + builder.createYield(loc, castedPtr); + }) + .getResult(); + } + + assert(destRecordTy->isRecordType() && "dest type must be a record type!"); + + auto destCirTy = ConvertType(destTy).cast(); + auto castInfo = CGM.getCXXABI().buildDynamicCastInfo(*this, loc, srcRecordTy, + destRecordTy); + return builder.createDynCast(loc, ThisAddr.getPointer(), destCirTy, isRefCast, + castInfo); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp new file mode 100644 index 000000000000..08644dc163d0 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -0,0 +1,1868 @@ +//===---- CIRGenExprCst.cpp - Emit LLVM Code from Constant Expressions ----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Constant Expr nodes as LLVM code. +// +//===----------------------------------------------------------------------===// +#include "Address.h" +#include "CIRDataLayout.h" +#include "CIRGenCstEmitter.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "clang/AST/APValue.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/OperationKinds.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/Builtins.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/Sequence.h" +#include "llvm/Support/ErrorHandling.h" +#include + +using namespace clang; +using namespace cir; + +//===----------------------------------------------------------------------===// +// ConstantAggregateBuilder +//===----------------------------------------------------------------------===// + +namespace { +class ConstExprEmitter; + +static mlir::Attribute +buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::TypedAttr Filler); + +struct ConstantAggregateBuilderUtils { + CIRGenModule &CGM; + CIRDataLayout dataLayout; + + ConstantAggregateBuilderUtils(CIRGenModule &CGM) + : CGM(CGM), dataLayout{CGM.getModule()} {} + + CharUnits getAlignment(const mlir::TypedAttr C) const { + return CharUnits::fromQuantity( + dataLayout.getAlignment(C.getType(), /*useABI=*/true)); + } + + CharUnits getSize(mlir::Type Ty) const { + return CharUnits::fromQuantity(dataLayout.getTypeAllocSize(Ty)); + } + + CharUnits getSize(const mlir::TypedAttr C) const { + return getSize(C.getType()); + } + + mlir::TypedAttr getPadding(CharUnits size) const { + auto eltTy = CGM.UCharTy; + auto arSize = size.getQuantity(); + auto &bld = CGM.getBuilder(); + SmallVector elts(arSize, bld.getZeroAttr(eltTy)); + return bld.getConstArray(mlir::ArrayAttr::get(bld.getContext(), elts), + bld.getArrayType(eltTy, arSize)); + } + + mlir::Attribute getZeroes(CharUnits ZeroSize) const { + llvm_unreachable("NYI"); + } +}; + +/// Incremental builder for an mlir::TypedAttr holding a struct or array +/// constant. +class ConstantAggregateBuilder : private ConstantAggregateBuilderUtils { + /// The elements of the constant. These two arrays must have the same size; + /// Offsets[i] describes the offset of Elems[i] within the constant. The + /// elements are kept in increasing offset order, and we ensure that there + /// is no overlap: Offsets[i+1] >= Offsets[i] + getSize(Elemes[i]). + /// + /// This may contain explicit padding elements (in order to create a + /// natural layout), but need not. Gaps between elements are implicitly + /// considered to be filled with undef. + llvm::SmallVector Elems; + llvm::SmallVector Offsets; + + /// The size of the constant (the maximum end offset of any added element). + /// May be larger than the end of Elems.back() if we split the last element + /// and removed some trailing undefs. + CharUnits Size = CharUnits::Zero(); + + /// This is true only if laying out Elems in order as the elements of a + /// non-packed LLVM struct will give the correct layout. + bool NaturalLayout = true; + + bool split(size_t Index, CharUnits Hint); + std::optional splitAt(CharUnits Pos); + + static mlir::Attribute + buildFrom(CIRGenModule &CGM, ArrayRef Elems, + ArrayRef Offsets, CharUnits StartOffset, CharUnits Size, + bool NaturalLayout, mlir::Type DesiredTy, bool AllowOversized); + +public: + ConstantAggregateBuilder(CIRGenModule &CGM) + : ConstantAggregateBuilderUtils(CGM) {} + + /// Update or overwrite the value starting at \p Offset with \c C. + /// + /// \param AllowOverwrite If \c true, this constant might overwrite (part of) + /// a constant that has already been added. This flag is only used to + /// detect bugs. + bool add(mlir::Attribute C, CharUnits Offset, bool AllowOverwrite); + + /// Update or overwrite the bits starting at \p OffsetInBits with \p Bits. + bool addBits(llvm::APInt Bits, uint64_t OffsetInBits, bool AllowOverwrite); + + /// Attempt to condense the value starting at \p Offset to a constant of type + /// \p DesiredTy. + void condense(CharUnits Offset, mlir::Type DesiredTy); + + /// Produce a constant representing the entire accumulated value, ideally of + /// the specified type. If \p AllowOversized, the constant might be larger + /// than implied by \p DesiredTy (eg, if there is a flexible array member). + /// Otherwise, the constant will be of exactly the same size as \p DesiredTy + /// even if we can't represent it as that type. + mlir::Attribute build(mlir::Type DesiredTy, bool AllowOversized) const { + return buildFrom(CGM, Elems, Offsets, CharUnits::Zero(), Size, + NaturalLayout, DesiredTy, AllowOversized); + } +}; + +template > +static void replace(Container &C, size_t BeginOff, size_t EndOff, Range Vals) { + assert(BeginOff <= EndOff && "invalid replacement range"); + llvm::replace(C, C.begin() + BeginOff, C.begin() + EndOff, Vals); +} + +bool ConstantAggregateBuilder::add(mlir::Attribute A, CharUnits Offset, + bool AllowOverwrite) { + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + mlir::TypedAttr C = A.dyn_cast(); + assert(C && "expected typed attribute"); + // Common case: appending to a layout. + if (Offset >= Size) { + CharUnits Align = getAlignment(C); + CharUnits AlignedSize = Size.alignTo(Align); + if (AlignedSize > Offset || Offset.alignTo(Align) != Offset) + NaturalLayout = false; + else if (AlignedSize < Offset) { + Elems.push_back(getPadding(Offset - Size)); + Offsets.push_back(Size); + } + Elems.push_back(C); + Offsets.push_back(Offset); + Size = Offset + getSize(C); + return true; + } + + // Uncommon case: constant overlaps what we've already created. + std::optional FirstElemToReplace = splitAt(Offset); + if (!FirstElemToReplace) + return false; + + CharUnits CSize = getSize(C); + std::optional LastElemToReplace = splitAt(Offset + CSize); + if (!LastElemToReplace) + return false; + + assert((FirstElemToReplace == LastElemToReplace || AllowOverwrite) && + "unexpectedly overwriting field"); + + replace(Elems, *FirstElemToReplace, *LastElemToReplace, {C}); + replace(Offsets, *FirstElemToReplace, *LastElemToReplace, {Offset}); + Size = std::max(Size, Offset + CSize); + NaturalLayout = false; + return true; +} + +bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits, + bool AllowOverwrite) { + const ASTContext &Context = CGM.getASTContext(); + const uint64_t CharWidth = CGM.getASTContext().getCharWidth(); + auto charTy = CGM.getBuilder().getUIntNTy(CharWidth); + // Offset of where we want the first bit to go within the bits of the + // current char. + unsigned OffsetWithinChar = OffsetInBits % CharWidth; + + // We split bit-fields up into individual bytes. Walk over the bytes and + // update them. + for (CharUnits OffsetInChars = + Context.toCharUnitsFromBits(OffsetInBits - OffsetWithinChar); + /**/; ++OffsetInChars) { + // Number of bits we want to fill in this char. + unsigned WantedBits = + std::min((uint64_t)Bits.getBitWidth(), CharWidth - OffsetWithinChar); + + // Get a char containing the bits we want in the right places. The other + // bits have unspecified values. + llvm::APInt BitsThisChar = Bits; + if (BitsThisChar.getBitWidth() < CharWidth) + BitsThisChar = BitsThisChar.zext(CharWidth); + if (CGM.getDataLayout().isBigEndian()) { + // Figure out how much to shift by. We may need to left-shift if we have + // less than one byte of Bits left. + int Shift = Bits.getBitWidth() - CharWidth + OffsetWithinChar; + if (Shift > 0) + BitsThisChar.lshrInPlace(Shift); + else if (Shift < 0) + BitsThisChar = BitsThisChar.shl(-Shift); + } else { + BitsThisChar = BitsThisChar.shl(OffsetWithinChar); + } + if (BitsThisChar.getBitWidth() > CharWidth) + BitsThisChar = BitsThisChar.trunc(CharWidth); + + if (WantedBits == CharWidth) { + // Got a full byte: just add it directly. + add(mlir::cir::IntAttr::get(charTy, BitsThisChar), OffsetInChars, + AllowOverwrite); + } else { + // Partial byte: update the existing integer if there is one. If we + // can't split out a 1-CharUnit range to update, then we can't add + // these bits and fail the entire constant emission. + std::optional FirstElemToUpdate = splitAt(OffsetInChars); + if (!FirstElemToUpdate) + return false; + std::optional LastElemToUpdate = + splitAt(OffsetInChars + CharUnits::One()); + if (!LastElemToUpdate) + return false; + assert(*LastElemToUpdate - *FirstElemToUpdate < 2 && + "should have at most one element covering one byte"); + + // Figure out which bits we want and discard the rest. + llvm::APInt UpdateMask(CharWidth, 0); + if (CGM.getDataLayout().isBigEndian()) + UpdateMask.setBits(CharWidth - OffsetWithinChar - WantedBits, + CharWidth - OffsetWithinChar); + else + UpdateMask.setBits(OffsetWithinChar, OffsetWithinChar + WantedBits); + BitsThisChar &= UpdateMask; + bool isNull = false; + if (*FirstElemToUpdate < Elems.size()) { + auto firstEltToUpdate = + dyn_cast(Elems[*FirstElemToUpdate]); + isNull = firstEltToUpdate && firstEltToUpdate.isNullValue(); + } + + if (*FirstElemToUpdate == *LastElemToUpdate || isNull) { + // All existing bits are either zero or undef. + add(CGM.getBuilder().getAttr(charTy, BitsThisChar), + OffsetInChars, /*AllowOverwrite*/ true); + } else { + mlir::cir::IntAttr CI = + dyn_cast(Elems[*FirstElemToUpdate]); + // In order to perform a partial update, we need the existing bitwise + // value, which we can only extract for a constant int. + // auto *CI = dyn_cast(ToUpdate); + if (!CI) + return false; + // Because this is a 1-CharUnit range, the constant occupying it must + // be exactly one CharUnit wide. + assert(CI.getBitWidth() == CharWidth && "splitAt failed"); + assert((!(CI.getValue() & UpdateMask) || AllowOverwrite) && + "unexpectedly overwriting bitfield"); + BitsThisChar |= (CI.getValue() & ~UpdateMask); + Elems[*FirstElemToUpdate] = + CGM.getBuilder().getAttr(charTy, BitsThisChar); + } + } + + // Stop if we've added all the bits. + if (WantedBits == Bits.getBitWidth()) + break; + + // Remove the consumed bits from Bits. + if (!CGM.getDataLayout().isBigEndian()) + Bits.lshrInPlace(WantedBits); + Bits = Bits.trunc(Bits.getBitWidth() - WantedBits); + + // The remanining bits go at the start of the following bytes. + OffsetWithinChar = 0; + } + + return true; +} + +/// Returns a position within Elems and Offsets such that all elements +/// before the returned index end before Pos and all elements at or after +/// the returned index begin at or after Pos. Splits elements as necessary +/// to ensure this. Returns None if we find something we can't split. +std::optional ConstantAggregateBuilder::splitAt(CharUnits Pos) { + if (Pos >= Size) + return Offsets.size(); + + while (true) { + auto FirstAfterPos = llvm::upper_bound(Offsets, Pos); + if (FirstAfterPos == Offsets.begin()) + return 0; + + // If we already have an element starting at Pos, we're done. + size_t LastAtOrBeforePosIndex = FirstAfterPos - Offsets.begin() - 1; + if (Offsets[LastAtOrBeforePosIndex] == Pos) + return LastAtOrBeforePosIndex; + + // We found an element starting before Pos. Check for overlap. + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + mlir::TypedAttr C = + Elems[LastAtOrBeforePosIndex].dyn_cast(); + assert(C && "expected typed attribute"); + if (Offsets[LastAtOrBeforePosIndex] + getSize(C) <= Pos) + return LastAtOrBeforePosIndex + 1; + + // Try to decompose it into smaller constants. + if (!split(LastAtOrBeforePosIndex, Pos)) + return std::nullopt; + } +} + +/// Split the constant at index Index, if possible. Return true if we did. +/// Hint indicates the location at which we'd like to split, but may be +/// ignored. +bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) { + llvm_unreachable("NYI"); +} + +mlir::Attribute ConstantAggregateBuilder::buildFrom( + CIRGenModule &CGM, ArrayRef Elems, + ArrayRef Offsets, CharUnits StartOffset, CharUnits Size, + bool NaturalLayout, mlir::Type DesiredTy, bool AllowOversized) { + ConstantAggregateBuilderUtils Utils(CGM); + + if (Elems.empty()) + return {}; + auto Offset = [&](size_t I) { return Offsets[I] - StartOffset; }; + + // If we want an array type, see if all the elements are the same type and + // appropriately spaced. + if (auto aty = DesiredTy.dyn_cast()) { + llvm_unreachable("NYI"); + } + + // The size of the constant we plan to generate. This is usually just the size + // of the initialized type, but in AllowOversized mode (i.e. flexible array + // init), it can be larger. + CharUnits DesiredSize = Utils.getSize(DesiredTy); + if (Size > DesiredSize) { + assert(AllowOversized && "Elems are oversized"); + DesiredSize = Size; + } + + // The natural alignment of an unpacked CIR struct with the given elements. + CharUnits Align = CharUnits::One(); + for (auto e : Elems) { + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + auto C = e.dyn_cast(); + assert(C && "expected typed attribute"); + Align = std::max(Align, Utils.getAlignment(C)); + } + + // The natural size of an unpacked LLVM struct with the given elements. + CharUnits AlignedSize = Size.alignTo(Align); + + bool Packed = false; + ArrayRef UnpackedElems = Elems; + llvm::SmallVector UnpackedElemStorage; + if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) { + NaturalLayout = false; + Packed = true; + } else if (DesiredSize > AlignedSize) { + // The natural layout would be too small. Add padding to fix it. (This + // is ignored if we choose a packed layout.) + UnpackedElemStorage.assign(Elems.begin(), Elems.end()); + UnpackedElemStorage.push_back(Utils.getPadding(DesiredSize - Size)); + UnpackedElems = UnpackedElemStorage; + } + + // If we don't have a natural layout, insert padding as necessary. + // As we go, double-check to see if we can actually just emit Elems + // as a non-packed struct and do so opportunistically if possible. + llvm::SmallVector PackedElems; + if (!NaturalLayout) { + CharUnits SizeSoFar = CharUnits::Zero(); + for (size_t I = 0; I != Elems.size(); ++I) { + mlir::TypedAttr C = Elems[I].dyn_cast(); + assert(C && "expected typed attribute"); + + CharUnits Align = Utils.getAlignment(C); + CharUnits NaturalOffset = SizeSoFar.alignTo(Align); + CharUnits DesiredOffset = Offset(I); + assert(DesiredOffset >= SizeSoFar && "elements out of order"); + + if (DesiredOffset != NaturalOffset) + Packed = true; + if (DesiredOffset != SizeSoFar) + PackedElems.push_back(Utils.getPadding(DesiredOffset - SizeSoFar)); + PackedElems.push_back(Elems[I]); + SizeSoFar = DesiredOffset + Utils.getSize(C); + } + // If we're using the packed layout, pad it out to the desired size if + // necessary. + if (Packed) { + assert(SizeSoFar <= DesiredSize && + "requested size is too small for contents"); + + if (SizeSoFar < DesiredSize) + PackedElems.push_back(Utils.getPadding(DesiredSize - SizeSoFar)); + } + } + + auto &builder = CGM.getBuilder(); + auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), + Packed ? PackedElems : UnpackedElems); + auto strType = builder.getCompleteStructType(arrAttr, Packed); + + if (auto desired = dyn_cast(DesiredTy)) + if (desired.isLayoutIdentical(strType)) + strType = desired; + + return builder.getConstStructOrZeroAttr(arrAttr, Packed, strType); +} + +void ConstantAggregateBuilder::condense(CharUnits Offset, + mlir::Type DesiredTy) { + CharUnits Size = getSize(DesiredTy); + + std::optional FirstElemToReplace = splitAt(Offset); + if (!FirstElemToReplace) + return; + size_t First = *FirstElemToReplace; + + std::optional LastElemToReplace = splitAt(Offset + Size); + if (!LastElemToReplace) + return; + size_t Last = *LastElemToReplace; + + size_t Length = Last - First; + if (Length == 0) + return; + + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + mlir::TypedAttr C = Elems[First].dyn_cast(); + assert(C && "expected typed attribute"); + if (Length == 1 && Offsets[First] == Offset && getSize(C) == Size) { + // Re-wrap single element structs if necessary. Otherwise, leave any single + // element constant of the right size alone even if it has the wrong type. + llvm_unreachable("NYI"); + } + + mlir::Attribute Replacement = buildFrom( + CGM, ArrayRef(Elems).slice(First, Length), + ArrayRef(Offsets).slice(First, Length), Offset, getSize(DesiredTy), + /*known to have natural layout=*/false, DesiredTy, false); + replace(Elems, First, Last, {Replacement}); + replace(Offsets, First, Last, {Offset}); +} + +//===----------------------------------------------------------------------===// +// ConstStructBuilder +//===----------------------------------------------------------------------===// + +class ConstStructBuilder { + CIRGenModule &CGM; + ConstantEmitter &Emitter; + ConstantAggregateBuilder &Builder; + CharUnits StartOffset; + +public: + static mlir::Attribute BuildStruct(ConstantEmitter &Emitter, + InitListExpr *ILE, QualType StructTy); + static mlir::Attribute BuildStruct(ConstantEmitter &Emitter, + const APValue &Value, QualType ValTy); + static bool UpdateStruct(ConstantEmitter &Emitter, + ConstantAggregateBuilder &Const, CharUnits Offset, + InitListExpr *Updater); + +private: + ConstStructBuilder(ConstantEmitter &Emitter, + ConstantAggregateBuilder &Builder, CharUnits StartOffset) + : CGM(Emitter.CGM), Emitter(Emitter), Builder(Builder), + StartOffset(StartOffset) {} + + bool AppendField(const FieldDecl *Field, uint64_t FieldOffset, + mlir::Attribute InitExpr, bool AllowOverwrite = false); + + bool AppendBytes(CharUnits FieldOffsetInChars, mlir::Attribute InitCst, + bool AllowOverwrite = false); + + bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, + mlir::cir::IntAttr InitExpr, bool AllowOverwrite = false); + + bool Build(InitListExpr *ILE, bool AllowOverwrite); + bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase, + const CXXRecordDecl *VTableClass, CharUnits BaseOffset); + mlir::Attribute Finalize(QualType Ty); +}; + +bool ConstStructBuilder::AppendField(const FieldDecl *Field, + uint64_t FieldOffset, + mlir::Attribute InitCst, + bool AllowOverwrite) { + const ASTContext &Context = CGM.getASTContext(); + + CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset); + + return AppendBytes(FieldOffsetInChars, InitCst, AllowOverwrite); +} + +bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars, + mlir::Attribute InitCst, + bool AllowOverwrite) { + return Builder.add(InitCst, StartOffset + FieldOffsetInChars, AllowOverwrite); +} + +bool ConstStructBuilder::AppendBitField(const FieldDecl *Field, + uint64_t FieldOffset, + mlir::cir::IntAttr CI, + bool AllowOverwrite) { + const auto &RL = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); + const auto &Info = RL.getBitFieldInfo(Field); + llvm::APInt FieldValue = CI.getValue(); + + // Promote the size of FieldValue if necessary + // FIXME: This should never occur, but currently it can because initializer + // constants are cast to bool, and because clang is not enforcing bitfield + // width limits. + if (Info.Size > FieldValue.getBitWidth()) + FieldValue = FieldValue.zext(Info.Size); + + // Truncate the size of FieldValue to the bit field size. + if (Info.Size < FieldValue.getBitWidth()) + FieldValue = FieldValue.trunc(Info.Size); + + return Builder.addBits(FieldValue, + CGM.getASTContext().toBits(StartOffset) + FieldOffset, + AllowOverwrite); +} + +static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter, + ConstantAggregateBuilder &Const, + CharUnits Offset, QualType Type, + InitListExpr *Updater) { + if (Type->isRecordType()) + return ConstStructBuilder::UpdateStruct(Emitter, Const, Offset, Updater); + + auto CAT = Emitter.CGM.getASTContext().getAsConstantArrayType(Type); + if (!CAT) + return false; + QualType ElemType = CAT->getElementType(); + CharUnits ElemSize = Emitter.CGM.getASTContext().getTypeSizeInChars(ElemType); + mlir::Type ElemTy = Emitter.CGM.getTypes().convertTypeForMem(ElemType); + + mlir::Attribute FillC = nullptr; + if (Expr *Filler = Updater->getArrayFiller()) { + if (!isa(Filler)) { + llvm_unreachable("NYI"); + } + } + + unsigned NumElementsToUpdate = + FillC ? CAT->getSize().getZExtValue() : Updater->getNumInits(); + for (unsigned I = 0; I != NumElementsToUpdate; ++I, Offset += ElemSize) { + Expr *Init = nullptr; + if (I < Updater->getNumInits()) + Init = Updater->getInit(I); + + if (!Init && FillC) { + if (!Const.add(FillC, Offset, true)) + return false; + } else if (!Init || isa(Init)) { + continue; + } else if (InitListExpr *ChildILE = dyn_cast(Init)) { + if (!EmitDesignatedInitUpdater(Emitter, Const, Offset, ElemType, + ChildILE)) + return false; + // Attempt to reduce the array element to a single constant if necessary. + Const.condense(Offset, ElemTy); + } else { + mlir::Attribute Val = Emitter.tryEmitPrivateForMemory(Init, ElemType); + if (!Const.add(Val, Offset, true)) + return false; + } + } + + return true; +} + +bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { + RecordDecl *RD = ILE->getType()->castAs()->getDecl(); + const ASTRecordLayout &Layout = CGM.getASTContext().getASTRecordLayout(RD); + + unsigned FieldNo = -1; + unsigned ElementNo = 0; + + // Bail out if we have base classes. We could support these, but they only + // arise in C++1z where we will have already constant folded most interesting + // cases. FIXME: There are still a few more cases we can handle this way. + if (auto *CXXRD = dyn_cast(RD)) + if (CXXRD->getNumBases()) + return false; + + for (FieldDecl *Field : RD->fields()) { + ++FieldNo; + + // If this is a union, skip all the fields that aren't being initialized. + if (RD->isUnion() && + !declaresSameEntity(ILE->getInitializedFieldInUnion(), Field)) + continue; + + // Don't emit anonymous bitfields. + if (Field->isUnnamedBitField()) + continue; + + // Get the initializer. A struct can include fields without initializers, + // we just use explicit null values for them. + Expr *Init = nullptr; + if (ElementNo < ILE->getNumInits()) + Init = ILE->getInit(ElementNo++); + if (Init && isa(Init)) + continue; + + // Zero-sized fields are not emitted, but their initializers may still + // prevent emission of this struct as a constant. + if (Field->isZeroSize(CGM.getASTContext())) { + if (Init->HasSideEffects(CGM.getASTContext())) + return false; + continue; + } + + // When emitting a DesignatedInitUpdateExpr, a nested InitListExpr + // represents additional overwriting of our current constant value, and not + // a new constant to emit independently. + if (AllowOverwrite && + (Field->getType()->isArrayType() || Field->getType()->isRecordType())) { + if (auto *SubILE = dyn_cast(Init)) { + CharUnits Offset = CGM.getASTContext().toCharUnitsFromBits( + Layout.getFieldOffset(FieldNo)); + if (!EmitDesignatedInitUpdater(Emitter, Builder, StartOffset + Offset, + Field->getType(), SubILE)) + return false; + // If we split apart the field's value, try to collapse it down to a + // single value now. + llvm_unreachable("NYI"); + continue; + } + } + + mlir::Attribute EltInit; + if (Init) + EltInit = Emitter.tryEmitPrivateForMemory(Init, Field->getType()); + else + llvm_unreachable("NYI"); + + if (!EltInit) + return false; + + if (!Field->isBitField()) { + // Handle non-bitfield members. + if (!AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit, + AllowOverwrite)) + return false; + // After emitting a non-empty field with [[no_unique_address]], we may + // need to overwrite its tail padding. + if (Field->hasAttr()) + AllowOverwrite = true; + } else { + // Otherwise we have a bitfield. + if (auto constInt = dyn_cast(EltInit)) { + if (!AppendBitField(Field, Layout.getFieldOffset(FieldNo), constInt, + AllowOverwrite)) + return false; + } else { + // We are trying to initialize a bitfield with a non-trivial constant, + // this must require run-time code. + return false; + } + } + } + + return true; +} + +namespace { +struct BaseInfo { + BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index) + : Decl(Decl), Offset(Offset), Index(Index) {} + + const CXXRecordDecl *Decl; + CharUnits Offset; + unsigned Index; + + bool operator<(const BaseInfo &O) const { return Offset < O.Offset; } +}; +} // namespace + +bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD, + bool IsPrimaryBase, + const CXXRecordDecl *VTableClass, + CharUnits Offset) { + const ASTRecordLayout &Layout = CGM.getASTContext().getASTRecordLayout(RD); + + if (const CXXRecordDecl *CD = dyn_cast(RD)) { + // Add a vtable pointer, if we need one and it hasn't already been added. + if (Layout.hasOwnVFPtr()) + llvm_unreachable("NYI"); + + // Accumulate and sort bases, in order to visit them in address order, which + // may not be the same as declaration order. + SmallVector Bases; + Bases.reserve(CD->getNumBases()); + unsigned BaseNo = 0; + for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(), + BaseEnd = CD->bases_end(); + Base != BaseEnd; ++Base, ++BaseNo) { + assert(!Base->isVirtual() && "should not have virtual bases here"); + const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl(); + CharUnits BaseOffset = Layout.getBaseClassOffset(BD); + Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo)); + } + llvm::stable_sort(Bases); + + for (unsigned I = 0, N = Bases.size(); I != N; ++I) { + BaseInfo &Base = Bases[I]; + + bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl; + Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase, + VTableClass, Offset + Base.Offset); + } + } + + unsigned FieldNo = 0; + uint64_t OffsetBits = CGM.getASTContext().toBits(Offset); + + bool AllowOverwrite = false; + for (RecordDecl::field_iterator Field = RD->field_begin(), + FieldEnd = RD->field_end(); + Field != FieldEnd; ++Field, ++FieldNo) { + // If this is a union, skip all the fields that aren't being initialized. + if (RD->isUnion() && !declaresSameEntity(Val.getUnionField(), *Field)) + continue; + + // Don't emit anonymous bitfields or zero-sized fields. + if (Field->isUnnamedBitField() || Field->isZeroSize(CGM.getASTContext())) + continue; + + // Emit the value of the initializer. + const APValue &FieldValue = + RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo); + mlir::Attribute EltInit = + Emitter.tryEmitPrivateForMemory(FieldValue, Field->getType()); + if (!EltInit) + return false; + + if (!Field->isBitField()) { + // Handle non-bitfield members. + if (!AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, + EltInit, AllowOverwrite)) + return false; + // After emitting a non-empty field with [[no_unique_address]], we may + // need to overwrite its tail padding. + if (Field->hasAttr()) + AllowOverwrite = true; + } else { + llvm_unreachable("NYI"); + } + } + + return true; +} + +mlir::Attribute ConstStructBuilder::Finalize(QualType Type) { + Type = Type.getNonReferenceType(); + RecordDecl *RD = Type->castAs()->getDecl(); + mlir::Type ValTy = CGM.getTypes().ConvertType(Type); + return Builder.build(ValTy, RD->hasFlexibleArrayMember()); +} + +mlir::Attribute ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter, + InitListExpr *ILE, + QualType ValTy) { + ConstantAggregateBuilder Const(Emitter.CGM); + ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero()); + + if (!Builder.Build(ILE, /*AllowOverwrite*/ false)) + return nullptr; + + return Builder.Finalize(ValTy); +} + +mlir::Attribute ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter, + const APValue &Val, + QualType ValTy) { + ConstantAggregateBuilder Const(Emitter.CGM); + ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero()); + + const RecordDecl *RD = ValTy->castAs()->getDecl(); + const CXXRecordDecl *CD = dyn_cast(RD); + if (!Builder.Build(Val, RD, false, CD, CharUnits::Zero())) + return nullptr; + + return Builder.Finalize(ValTy); +} + +bool ConstStructBuilder::UpdateStruct(ConstantEmitter &Emitter, + ConstantAggregateBuilder &Const, + CharUnits Offset, InitListExpr *Updater) { + return ConstStructBuilder(Emitter, Const, Offset) + .Build(Updater, /*AllowOverwrite*/ true); +} + +//===----------------------------------------------------------------------===// +// ConstExprEmitter +//===----------------------------------------------------------------------===// + +// This class only needs to handle arrays, structs and unions. +// +// In LLVM codegen, when outside C++11 mode, those types are not constant +// folded, while all other types are handled by constant folding. +// +// In CIR codegen, instead of folding things here, we should defer that work +// to MLIR: do not attempt to do much here. +class ConstExprEmitter + : public StmtVisitor { + CIRGenModule &CGM; + LLVM_ATTRIBUTE_UNUSED ConstantEmitter &Emitter; + +public: + ConstExprEmitter(ConstantEmitter &emitter) + : CGM(emitter.CGM), Emitter(emitter) {} + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + mlir::Attribute VisitStmt(Stmt *S, QualType T) { return nullptr; } + + mlir::Attribute VisitConstantExpr(ConstantExpr *CE, QualType T) { + if (mlir::Attribute Result = Emitter.tryEmitConstantExpr(CE)) + return Result; + return Visit(CE->getSubExpr(), T); + } + + mlir::Attribute VisitParenExpr(ParenExpr *PE, QualType T) { + return Visit(PE->getSubExpr(), T); + } + + mlir::Attribute + VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE, + QualType T) { + return Visit(PE->getReplacement(), T); + } + + mlir::Attribute VisitGenericSelectionExpr(GenericSelectionExpr *GE, + QualType T) { + return Visit(GE->getResultExpr(), T); + } + + mlir::Attribute VisitChooseExpr(ChooseExpr *CE, QualType T) { + return Visit(CE->getChosenSubExpr(), T); + } + + mlir::Attribute VisitCompoundLiteralExpr(CompoundLiteralExpr *E, QualType T) { + return Visit(E->getInitializer(), T); + } + + mlir::Attribute VisitCastExpr(CastExpr *E, QualType destType) { + if (const auto *ECE = dyn_cast(E)) + CGM.buildExplicitCastExprType(ECE, Emitter.CGF); + Expr *subExpr = E->getSubExpr(); + + switch (E->getCastKind()) { + case CK_HLSLArrayRValue: + case CK_HLSLVectorTruncation: + case CK_ToUnion: + llvm_unreachable("not implemented"); + + case CK_AddressSpaceConversion: { + llvm_unreachable("not implemented"); + } + + case CK_LValueToRValue: + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: + case CK_NoOp: + case CK_ConstructorConversion: + return Visit(subExpr, destType); + + case CK_IntToOCLSampler: + llvm_unreachable("global sampler variables are not generated"); + + case CK_Dependent: + llvm_unreachable("saw dependent cast!"); + + case CK_BuiltinFnToFnPtr: + llvm_unreachable("builtin functions are handled elsewhere"); + + case CK_ReinterpretMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_BaseToDerivedMemberPointer: { + llvm_unreachable("not implemented"); + } + + // These will never be supported. + case CK_ObjCObjectLValueCast: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + return nullptr; + + // These don't need to be handled here because Evaluate knows how to + // evaluate them in the cases where they can be folded. + case CK_BitCast: + case CK_ToVoid: + case CK_Dynamic: + case CK_LValueBitCast: + case CK_LValueToRValueBitCast: + case CK_NullToMemberPointer: + case CK_UserDefinedConversion: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_BaseToDerived: + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + case CK_MemberPointerToBoolean: + case CK_VectorSplat: + case CK_FloatingRealToComplex: + case CK_FloatingComplexToReal: + case CK_FloatingComplexToBoolean: + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralRealToComplex: + case CK_IntegralComplexToReal: + case CK_IntegralComplexToBoolean: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_PointerToIntegral: + case CK_PointerToBoolean: + case CK_NullToPointer: + case CK_IntegralCast: + case CK_BooleanToSignedIntegral: + case CK_IntegralToPointer: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingToBoolean: + case CK_FloatingCast: + case CK_FloatingToFixedPoint: + case CK_FixedPointToFloating: + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToIntegral: + case CK_IntegralToFixedPoint: + case CK_ZeroToOCLOpaqueType: + case CK_MatrixCast: + return nullptr; + } + llvm_unreachable("Invalid CastKind"); + } + + mlir::Attribute VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE, QualType T) { + // TODO(cir): figure out CIR story here... + // No need for a DefaultInitExprScope: we don't handle 'this' in a + // constant expression. + return Visit(DIE->getExpr(), T); + } + + mlir::Attribute VisitExprWithCleanups(ExprWithCleanups *E, QualType T) { + // Since this about constant emission no need to wrap this under a scope. + return Visit(E->getSubExpr(), T); + } + + mlir::Attribute VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E, + QualType T) { + return Visit(E->getSubExpr(), T); + } + + mlir::Attribute EmitArrayInitialization(InitListExpr *ILE, QualType T) { + auto *CAT = CGM.getASTContext().getAsConstantArrayType(ILE->getType()); + assert(CAT && "can't emit array init for non-constant-bound array"); + unsigned NumInitElements = ILE->getNumInits(); // init list size + unsigned NumElements = CAT->getSize().getZExtValue(); // array size + unsigned NumInitableElts = std::min(NumInitElements, NumElements); + + QualType EltTy = CAT->getElementType(); + SmallVector Elts; + Elts.reserve(NumElements); + + // Emit array filler, if there is one. + mlir::Attribute Filler; + if (ILE->hasArrayFiller()) { + auto *aux = ILE->getArrayFiller(); + Filler = Emitter.tryEmitAbstractForMemory(aux, CAT->getElementType()); + if (!Filler) + return {}; + } + + // Emit initializer elements as MLIR attributes and check for common type. + mlir::Type CommonElementType; + for (unsigned i = 0; i != NumInitableElts; ++i) { + Expr *Init = ILE->getInit(i); + auto C = Emitter.tryEmitPrivateForMemory(Init, EltTy); + if (!C) + return {}; + if (i == 0) + CommonElementType = C.getType(); + else if (C.getType() != CommonElementType) + CommonElementType = nullptr; + Elts.push_back(std::move(C)); + } + + auto desiredType = CGM.getTypes().ConvertType(T); + auto typedFiller = llvm::dyn_cast_or_null(Filler); + if (Filler && !typedFiller) + llvm_unreachable("We shouldn't be receiving untyped attrs here"); + return buildArrayConstant(CGM, desiredType, CommonElementType, NumElements, + Elts, typedFiller); + } + + mlir::Attribute EmitRecordInitialization(InitListExpr *ILE, QualType T) { + return ConstStructBuilder::BuildStruct(Emitter, ILE, T); + } + + mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *E, + QualType T) { + return CGM.getBuilder().getZeroInitAttr(CGM.getCIRType(T)); + } + + mlir::Attribute VisitInitListExpr(InitListExpr *ILE, QualType T) { + if (ILE->isTransparent()) + return Visit(ILE->getInit(0), T); + + if (ILE->getType()->isArrayType()) + return EmitArrayInitialization(ILE, T); + + if (ILE->getType()->isRecordType()) + return EmitRecordInitialization(ILE, T); + + return nullptr; + } + + mlir::Attribute VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E, + QualType destType) { + auto C = Visit(E->getBase(), destType); + if (!C) + return nullptr; + + assert(0 && "not implemented"); + return {}; + } + + mlir::Attribute VisitCXXConstructExpr(CXXConstructExpr *E, QualType Ty) { + if (!E->getConstructor()->isTrivial()) + return nullptr; + + // Only default and copy/move constructors can be trivial. + if (E->getNumArgs()) { + assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument"); + assert(E->getConstructor()->isCopyOrMoveConstructor() && + "trivial ctor has argument but isn't a copy/move ctor"); + + Expr *Arg = E->getArg(0); + assert(CGM.getASTContext().hasSameUnqualifiedType(Ty, Arg->getType()) && + "argument to copy ctor is of wrong type"); + + // Look through the temporary; it's just converting the value to an lvalue + // to pass it to the constructor. + if (auto *MTE = dyn_cast(Arg)) + return Visit(MTE->getSubExpr(), Ty); + // Don't try to support arbitrary lvalue-to-rvalue conversions for now. + return nullptr; + } + + return CGM.getBuilder().getZeroInitAttr(CGM.getCIRType(Ty)); + } + + mlir::Attribute VisitStringLiteral(StringLiteral *E, QualType T) { + // This is a string literal initializing an array in an initializer. + return CGM.getConstantArrayFromStringLiteral(E); + } + + mlir::Attribute VisitObjCEncodeExpr(ObjCEncodeExpr *E, QualType T) { + assert(0 && "not implemented"); + return {}; + } + + mlir::Attribute VisitUnaryExtension(const UnaryOperator *E, QualType T) { + return Visit(E->getSubExpr(), T); + } + + // Utility methods + mlir::Type ConvertType(QualType T) { return CGM.getTypes().ConvertType(T); } +}; + +static mlir::Attribute +buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::TypedAttr Filler) { + auto &builder = CGM.getBuilder(); + + // Figure out how long the initial prefix of non-zero elements is. + unsigned NonzeroLength = ArrayBound; + if (Elements.size() < NonzeroLength && builder.isNullValue(Filler)) + NonzeroLength = Elements.size(); + if (NonzeroLength == Elements.size()) { + while (NonzeroLength > 0 && + builder.isNullValue(Elements[NonzeroLength - 1])) + --NonzeroLength; + } + + if (NonzeroLength == 0) + return builder.getZeroInitAttr(DesiredType); + + // Add a zeroinitializer array filler if we have lots of trailing zeroes. + unsigned TrailingZeroes = ArrayBound - NonzeroLength; + if (TrailingZeroes >= 8) { + assert(Elements.size() >= NonzeroLength && + "missing initializer for non-zero element"); + + SmallVector Eles; + Eles.reserve(Elements.size()); + for (auto const &Element : Elements) + Eles.push_back(Element); + + return builder.getConstArray( + mlir::ArrayAttr::get(builder.getContext(), Eles), + mlir::cir::ArrayType::get(builder.getContext(), CommonElementType, + ArrayBound)); + // TODO(cir): If all the elements had the same type up to the trailing + // zeroes, emit a struct of two arrays (the nonzero data and the + // zeroinitializer). Use DesiredType to get the element type. + } else if (Elements.size() != ArrayBound) { + // Otherwise pad to the right size with the filler if necessary. + Elements.resize(ArrayBound, Filler); + if (Filler.getType() != CommonElementType) + CommonElementType = {}; + } + + // If all elements have the same type, just emit an array constant. + if (CommonElementType) { + SmallVector Eles; + Eles.reserve(Elements.size()); + for (auto const &Element : Elements) + Eles.push_back(Element); + + return builder.getConstArray( + mlir::ArrayAttr::get(builder.getContext(), Eles), + mlir::cir::ArrayType::get(builder.getContext(), CommonElementType, + ArrayBound)); + } + + SmallVector Eles; + Eles.reserve(Elements.size()); + for (auto const &Element : Elements) + Eles.push_back(Element); + + auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), Eles); + return builder.getAnonConstStruct(arrAttr, false); +} + +} // end anonymous namespace. + +//===----------------------------------------------------------------------===// +// ConstantLValueEmitter +//===----------------------------------------------------------------------===// + +namespace { +/// A struct which can be used to peephole certain kinds of finalization +/// that normally happen during l-value emission. +struct ConstantLValue { + llvm::PointerUnion Value; + bool HasOffsetApplied; + + /*implicit*/ ConstantLValue(mlir::Value value, bool hasOffsetApplied = false) + : Value(value), HasOffsetApplied(hasOffsetApplied) {} + + /*implicit*/ ConstantLValue(mlir::cir::GlobalViewAttr address) + : Value(address), HasOffsetApplied(false) {} + + ConstantLValue(std::nullptr_t) : ConstantLValue({}, false) {} +}; + +/// A helper class for emitting constant l-values. +class ConstantLValueEmitter + : public ConstStmtVisitor { + CIRGenModule &CGM; + ConstantEmitter &Emitter; + const APValue &Value; + QualType DestType; + + // Befriend StmtVisitorBase so that we don't have to expose Visit*. + friend StmtVisitorBase; + +public: + ConstantLValueEmitter(ConstantEmitter &emitter, const APValue &value, + QualType destType) + : CGM(emitter.CGM), Emitter(emitter), Value(value), DestType(destType) {} + + mlir::Attribute tryEmit(); + +private: + mlir::Attribute tryEmitAbsolute(mlir::Type destTy); + ConstantLValue tryEmitBase(const APValue::LValueBase &base); + + ConstantLValue VisitStmt(const Stmt *S) { return nullptr; } + ConstantLValue VisitConstantExpr(const ConstantExpr *E); + ConstantLValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E); + ConstantLValue VisitStringLiteral(const StringLiteral *E); + ConstantLValue VisitObjCBoxedExpr(const ObjCBoxedExpr *E); + ConstantLValue VisitObjCEncodeExpr(const ObjCEncodeExpr *E); + ConstantLValue VisitObjCStringLiteral(const ObjCStringLiteral *E); + ConstantLValue VisitPredefinedExpr(const PredefinedExpr *E); + ConstantLValue VisitAddrLabelExpr(const AddrLabelExpr *E); + ConstantLValue VisitCallExpr(const CallExpr *E); + ConstantLValue VisitBlockExpr(const BlockExpr *E); + ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *E); + ConstantLValue + VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + + bool hasNonZeroOffset() const { return !Value.getLValueOffset().isZero(); } + + /// Return GEP-like value offset + mlir::ArrayAttr getOffset(mlir::Type Ty) { + auto Offset = Value.getLValueOffset().getQuantity(); + CIRDataLayout Layout(CGM.getModule()); + SmallVector Idx; + CGM.getBuilder().computeGlobalViewIndicesFromFlatOffset(Offset, Ty, Layout, + Idx); + + llvm::SmallVector Indices; + for (auto I : Idx) { + auto Attr = CGM.getBuilder().getI32IntegerAttr(I); + Indices.push_back(Attr); + } + + if (Indices.empty()) + return {}; + return CGM.getBuilder().getArrayAttr(Indices); + } + + // TODO(cir): create a proper interface to absctract CIR constant values. + + /// Apply the value offset to the given constant. + ConstantLValue applyOffset(ConstantLValue &C) { + + // Handle attribute constant LValues. + if (auto Attr = C.Value.dyn_cast()) { + if (auto GV = Attr.dyn_cast()) { + auto baseTy = GV.getType().cast().getPointee(); + auto destTy = CGM.getTypes().convertTypeForMem(DestType); + assert(!GV.getIndices() && "Global view is already indexed"); + return mlir::cir::GlobalViewAttr::get(destTy, GV.getSymbol(), + getOffset(baseTy)); + } + llvm_unreachable("Unsupported attribute type to offset"); + } + + // TODO(cir): use ptr_stride, or something... + llvm_unreachable("NYI"); + } +}; + +} // namespace + +mlir::Attribute ConstantLValueEmitter::tryEmit() { + const APValue::LValueBase &base = Value.getLValueBase(); + + // The destination type should be a pointer or reference + // type, but it might also be a cast thereof. + // + // FIXME: the chain of casts required should be reflected in the APValue. + // We need this in order to correctly handle things like a ptrtoint of a + // non-zero null pointer and addrspace casts that aren't trivially + // represented in LLVM IR. + auto destTy = CGM.getTypes().convertTypeForMem(DestType); + assert(destTy.isa()); + + // If there's no base at all, this is a null or absolute pointer, + // possibly cast back to an integer type. + if (!base) { + return tryEmitAbsolute(destTy); + } + + // Otherwise, try to emit the base. + ConstantLValue result = tryEmitBase(base); + + // If that failed, we're done. + auto &value = result.Value; + if (!value) + return {}; + + // Apply the offset if necessary and not already done. + if (!result.HasOffsetApplied) { + value = applyOffset(result).Value; + } + + // Convert to the appropriate type; this could be an lvalue for + // an integer. FIXME: performAddrSpaceCast + if (destTy.isa()) { + if (value.is()) + return value.get(); + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); +} + +/// Try to emit an absolute l-value, such as a null pointer or an integer +/// bitcast to pointer type. +mlir::Attribute ConstantLValueEmitter::tryEmitAbsolute(mlir::Type destTy) { + // If we're producing a pointer, this is easy. + auto destPtrTy = destTy.dyn_cast(); + assert(destPtrTy && "expected !cir.ptr type"); + return CGM.getBuilder().getConstPtrAttr( + destPtrTy, Value.getLValueOffset().getQuantity()); +} + +ConstantLValue +ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) { + // Handle values. + if (const ValueDecl *D = base.dyn_cast()) { + // The constant always points to the canonical declaration. We want to look + // at properties of the most recent declaration at the point of emission. + D = cast(D->getMostRecentDecl()); + + if (D->hasAttr()) + llvm_unreachable("emit pointer base for weakref is NYI"); + + if (auto *FD = dyn_cast(D)) { + auto fop = CGM.GetAddrOfFunction(FD); + auto builder = CGM.getBuilder(); + auto ctxt = builder.getContext(); + return mlir::cir::GlobalViewAttr::get( + builder.getPointerTo(fop.getFunctionType()), + mlir::FlatSymbolRefAttr::get(ctxt, fop.getSymNameAttr())); + } + + if (auto *VD = dyn_cast(D)) { + // We can never refer to a variable with local storage. + if (!VD->hasLocalStorage()) { + if (VD->isFileVarDecl() || VD->hasExternalStorage()) + return CGM.getAddrOfGlobalVarAttr(VD); + + if (VD->isLocalVarDecl()) { + auto linkage = + CGM.getCIRLinkageVarDefinition(VD, /*IsConstant=*/false); + return CGM.getBuilder().getGlobalViewAttr( + CGM.getOrCreateStaticVarDecl(*VD, linkage)); + } + } + } + } + + // Handle typeid(T). + if (TypeInfoLValue TI = base.dyn_cast()) { + assert(0 && "NYI"); + } + + // Otherwise, it must be an expression. + return Visit(base.get()); +} + +static ConstantLValue +tryEmitGlobalCompoundLiteral(ConstantEmitter &emitter, + const CompoundLiteralExpr *E) { + CIRGenModule &CGM = emitter.CGM; + + LangAS addressSpace = E->getType().getAddressSpace(); + mlir::Attribute C = emitter.tryEmitForInitializer(E->getInitializer(), + addressSpace, E->getType()); + if (!C) { + assert(!E->isFileScope() && + "file-scope compound literal did not have constant initializer!"); + return nullptr; + } + + auto GV = CIRGenModule::createGlobalOp( + CGM, CGM.getLoc(E->getSourceRange()), + CGM.createGlobalCompoundLiteralName(), + CGM.getTypes().convertTypeForMem(E->getType()), + E->getType().isConstantStorage(CGM.getASTContext(), false, false)); + GV.setInitialValueAttr(C); + GV.setLinkage(mlir::cir::GlobalLinkageKind::InternalLinkage); + CharUnits Align = CGM.getASTContext().getTypeAlignInChars(E->getType()); + GV.setAlignment(Align.getAsAlign().value()); + + emitter.finalize(GV); + return CGM.getBuilder().getGlobalViewAttr(GV); +} + +ConstantLValue ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) { + assert(0 && "NYI"); + return Visit(E->getSubExpr()); +} + +ConstantLValue +ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { + ConstantEmitter CompoundLiteralEmitter(CGM, Emitter.CGF); + CompoundLiteralEmitter.setInConstantContext(Emitter.isInConstantContext()); + return tryEmitGlobalCompoundLiteral(CompoundLiteralEmitter, E); +} + +ConstantLValue +ConstantLValueEmitter::VisitStringLiteral(const StringLiteral *E) { + return CGM.getAddrOfConstantStringFromLiteral(E); +} + +ConstantLValue +ConstantLValueEmitter::VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitObjCStringLiteral(const ObjCStringLiteral *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue ConstantLValueEmitter::VisitBlockExpr(const BlockExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitCXXTypeidExpr(const CXXTypeidExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue ConstantLValueEmitter::VisitMaterializeTemporaryExpr( + const MaterializeTemporaryExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +//===----------------------------------------------------------------------===// +// ConstantEmitter +//===----------------------------------------------------------------------===// + +mlir::Attribute ConstantEmitter::validateAndPopAbstract(mlir::Attribute C, + AbstractState saved) { + Abstract = saved.OldValue; + + assert(saved.OldPlaceholdersSize == PlaceholderAddresses.size() && + "created a placeholder while doing an abstract emission?"); + + // No validation necessary for now. + // No cleanup to do for now. + return C; +} + +mlir::Attribute ConstantEmitter::tryEmitForInitializer(const VarDecl &D) { + initializeNonAbstract(D.getType().getAddressSpace()); + return markIfFailed(tryEmitPrivateForVarInit(D)); +} + +mlir::Attribute ConstantEmitter::tryEmitForInitializer(const Expr *E, + LangAS destAddrSpace, + QualType destType) { + initializeNonAbstract(destAddrSpace); + return markIfFailed(tryEmitPrivateForMemory(E, destType)); +} + +void ConstantEmitter::finalize(mlir::cir::GlobalOp global) { + assert(InitializedNonAbstract && + "finalizing emitter that was used for abstract emission?"); + assert(!Finalized && "finalizing emitter multiple times"); + assert(!global.isDeclaration()); + + // Note that we might also be Failed. + Finalized = true; + + if (!PlaceholderAddresses.empty()) { + assert(0 && "not implemented"); + } +} + +ConstantEmitter::~ConstantEmitter() { + assert((!InitializedNonAbstract || Finalized || Failed) && + "not finalized after being initialized for non-abstract emission"); + assert(PlaceholderAddresses.empty() && "unhandled placeholders"); +} + +// TODO(cir): this can be shared with LLVM's codegen +static QualType getNonMemoryType(CIRGenModule &CGM, QualType type) { + if (auto AT = type->getAs()) { + return CGM.getASTContext().getQualifiedType(AT->getValueType(), + type.getQualifiers()); + } + return type; +} + +mlir::Attribute +ConstantEmitter::tryEmitAbstractForInitializer(const VarDecl &D) { + auto state = pushAbstract(); + auto C = tryEmitPrivateForVarInit(D); + return validateAndPopAbstract(C, state); +} + +mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { + // Make a quick check if variable can be default NULL initialized + // and avoid going through rest of code which may do, for c++11, + // initialization of memory to all NULLs. + if (!D.hasLocalStorage()) { + QualType Ty = CGM.getASTContext().getBaseElementType(D.getType()); + if (Ty->isRecordType()) + if (const CXXConstructExpr *E = + dyn_cast_or_null(D.getInit())) { + const CXXConstructorDecl *CD = E->getConstructor(); + // FIXME: we should probably model this more closely to C++ than + // just emitting a global with zero init (mimic what we do for trivial + // assignments and whatnots). Since this is for globals shouldn't + // be a problem for the near future. + if (CD->isTrivial() && CD->isDefaultConstructor()) + return mlir::cir::ZeroAttr::get( + CGM.getBuilder().getContext(), + CGM.getTypes().ConvertType(D.getType())); + } + } + InConstantContext = D.hasConstantInitialization(); + + const Expr *E = D.getInit(); + assert(E && "No initializer to emit"); + + QualType destType = D.getType(); + + if (!destType->isReferenceType()) { + QualType nonMemoryDestType = getNonMemoryType(CGM, destType); + if (auto C = ConstExprEmitter(*this).Visit(const_cast(E), + nonMemoryDestType)) + return emitForMemory(C, destType); + } + + // Try to emit the initializer. Note that this can allow some things that + // are not allowed by tryEmitPrivateForMemory alone. + if (auto value = D.evaluateValue()) + return tryEmitPrivateForMemory(*value, destType); + + return nullptr; +} + +mlir::Attribute ConstantEmitter::tryEmitAbstract(const Expr *E, + QualType destType) { + auto state = pushAbstract(); + auto C = tryEmitPrivate(E, destType); + return validateAndPopAbstract(C, state); +} + +mlir::Attribute ConstantEmitter::tryEmitAbstract(const APValue &value, + QualType destType) { + auto state = pushAbstract(); + auto C = tryEmitPrivate(value, destType); + return validateAndPopAbstract(C, state); +} + +mlir::Attribute ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) { + if (!CE->hasAPValueResult()) + return nullptr; + + QualType RetType = CE->getType(); + if (CE->isGLValue()) + RetType = CGM.getASTContext().getLValueReferenceType(RetType); + + return emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType); +} + +mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const Expr *E, + QualType destType) { + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = tryEmitAbstract(E, nonMemoryDestType); + return (C ? emitForMemory(C, destType) : nullptr); +} + +mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const APValue &value, + QualType destType) { + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = tryEmitAbstract(value, nonMemoryDestType); + return (C ? emitForMemory(C, destType) : nullptr); +} + +mlir::TypedAttr ConstantEmitter::tryEmitPrivateForMemory(const Expr *E, + QualType destType) { + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = tryEmitPrivate(E, nonMemoryDestType); + if (C) { + auto attr = emitForMemory(C, destType); + auto typedAttr = llvm::dyn_cast(attr); + if (!typedAttr) + llvm_unreachable("this should always be typed"); + return typedAttr; + } else { + return nullptr; + } +} + +mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, + QualType destType) { + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = tryEmitPrivate(value, nonMemoryDestType); + return (C ? emitForMemory(C, destType) : nullptr); +} + +mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, + mlir::Attribute C, + QualType destType) { + // For an _Atomic-qualified constant, we may need to add tail padding. + if (auto AT = destType->getAs()) { + assert(0 && "not implemented"); + } + + // Zero-extend bool. + auto typed = C.dyn_cast(); + if (typed && typed.getType().isa()) { + // Already taken care given that bool values coming from + // integers only carry true/false. + } + + return C; +} + +mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, + QualType destType) { + assert(!destType->isVoidType() && "can't emit a void constant"); + + if (auto C = ConstExprEmitter(*this).Visit(const_cast(E), destType)) { + if (auto TypedC = C.dyn_cast_or_null()) + return TypedC; + llvm_unreachable("this should always be typed"); + } + + Expr::EvalResult Result; + + bool Success; + + if (destType->isReferenceType()) + Success = E->EvaluateAsLValue(Result, CGM.getASTContext()); + else + Success = + E->EvaluateAsRValue(Result, CGM.getASTContext(), InConstantContext); + + if (Success && !Result.hasSideEffects()) { + auto C = tryEmitPrivate(Result.Val, destType); + if (auto TypedC = C.dyn_cast_or_null()) + return TypedC; + llvm_unreachable("this should always be typed"); + } + + return nullptr; +} + +mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, + QualType DestType) { + auto &builder = CGM.getBuilder(); + switch (Value.getKind()) { + case APValue::None: + case APValue::Indeterminate: + // TODO(cir): LLVM models out-of-lifetime and indeterminate values as + // 'undef'. Find out what's better for CIR. + assert(0 && "not implemented"); + case APValue::Int: { + mlir::Type ty = CGM.getCIRType(DestType); + if (ty.isa()) + return builder.getCIRBoolAttr(Value.getInt().getZExtValue()); + assert(ty.isa() && "expected integral type"); + return CGM.getBuilder().getAttr(ty, Value.getInt()); + } + case APValue::Float: { + const llvm::APFloat &Init = Value.getFloat(); + if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf() && + !CGM.getASTContext().getLangOpts().NativeHalfType && + CGM.getASTContext().getTargetInfo().useFP16ConversionIntrinsics()) + assert(0 && "not implemented"); + else { + mlir::Type ty = CGM.getCIRType(DestType); + assert(ty.isa() && + "expected floating-point type"); + return CGM.getBuilder().getAttr(ty, Init); + } + } + case APValue::Array: { + const ArrayType *ArrayTy = CGM.getASTContext().getAsArrayType(DestType); + unsigned NumElements = Value.getArraySize(); + unsigned NumInitElts = Value.getArrayInitializedElts(); + + // Emit array filler, if there is one. + mlir::Attribute Filler; + if (Value.hasArrayFiller()) { + Filler = tryEmitAbstractForMemory(Value.getArrayFiller(), + ArrayTy->getElementType()); + if (!Filler) + return {}; + } + + // Emit initializer elements. + SmallVector Elts; + if (Filler && builder.isNullValue(Filler)) + Elts.reserve(NumInitElts + 1); + else + Elts.reserve(NumElements); + + mlir::Type CommonElementType; + for (unsigned I = 0; I < NumInitElts; ++I) { + auto C = tryEmitPrivateForMemory(Value.getArrayInitializedElt(I), + ArrayTy->getElementType()); + if (!C) + return {}; + + assert(C.isa() && "This should always be a TypedAttr."); + auto CTyped = C.cast(); + + if (I == 0) + CommonElementType = CTyped.getType(); + else if (CTyped.getType() != CommonElementType) + CommonElementType = {}; + auto typedC = llvm::dyn_cast(C); + if (!typedC) + llvm_unreachable("this should always be typed"); + Elts.push_back(typedC); + } + + auto Desired = CGM.getTypes().ConvertType(DestType); + + auto typedFiller = llvm::dyn_cast_or_null(Filler); + if (Filler && !typedFiller) + llvm_unreachable("this should always be typed"); + + return buildArrayConstant(CGM, Desired, CommonElementType, NumElements, + Elts, typedFiller); + } + case APValue::MemberPointer: { + assert(!UnimplementedFeature::cxxABI()); + + const ValueDecl *memberDecl = Value.getMemberPointerDecl(); + assert(!Value.isMemberPointerToDerivedMember() && "NYI"); + + if (const auto *memberFuncDecl = dyn_cast(memberDecl)) + assert(0 && "not implemented"); + + auto cirTy = + CGM.getTypes().ConvertType(DestType).cast(); + + const auto *fieldDecl = cast(memberDecl); + return builder.getDataMemberAttr(cirTy, fieldDecl->getFieldIndex()); + } + case APValue::LValue: + return ConstantLValueEmitter(*this, Value, DestType).tryEmit(); + case APValue::Struct: + case APValue::Union: + return ConstStructBuilder::BuildStruct(*this, Value, DestType); + case APValue::FixedPoint: + case APValue::ComplexInt: + case APValue::ComplexFloat: + case APValue::Vector: + case APValue::AddrLabelDiff: + assert(0 && "not implemented"); + } + llvm_unreachable("Unknown APValue kind"); +} + +mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { + if (T->getAs()) { + return builder.getNullPtr(getTypes().convertTypeForMem(T), loc); + } + + if (getTypes().isZeroInitializable(T)) + return builder.getNullValue(getTypes().convertTypeForMem(T), loc); + + if (const ConstantArrayType *CAT = + getASTContext().getAsConstantArrayType(T)) { + llvm_unreachable("NYI"); + } + + if (const RecordType *RT = T->getAs()) + llvm_unreachable("NYI"); + + assert(T->isMemberDataPointerType() && + "Should only see pointers to data members here!"); + + llvm_unreachable("NYI"); + return {}; +} + +mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { + assert(!UnimplementedFeature::cxxABI()); + + auto loc = getLoc(E->getSourceRange()); + + const auto *decl = cast(E->getSubExpr())->getDecl(); + + // A member function pointer. + // Member function pointer is not supported yet. + if (const auto *methodDecl = dyn_cast(decl)) + assert(0 && "not implemented"); + + auto ty = getCIRType(E->getType()).cast(); + + // Otherwise, a member data pointer. + const auto *fieldDecl = cast(decl); + return builder.create( + loc, ty, builder.getDataMemberAttr(ty, fieldDecl->getFieldIndex())); +} + +mlir::Attribute ConstantEmitter::emitAbstract(const Expr *E, + QualType destType) { + auto state = pushAbstract(); + auto C = tryEmitPrivate(E, destType).cast(); + C = validateAndPopAbstract(C, state); + if (!C) { + llvm_unreachable("NYI"); + } + return C; +} + +mlir::Attribute ConstantEmitter::emitAbstract(SourceLocation loc, + const APValue &value, + QualType destType) { + auto state = pushAbstract(); + auto C = tryEmitPrivate(value, destType); + C = validateAndPopAbstract(C, state); + if (!C) { + CGM.Error(loc, + "internal error: could not emit constant value \"abstractly\""); + llvm_unreachable("NYI"); + } + return C; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp new file mode 100644 index 000000000000..6f143812858e --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -0,0 +1,2454 @@ +//===--- CIRGenExprScalar.cpp - Emit CIR Code for Scalar Exprs ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes with scalar CIR types as CIR code. +// +//===----------------------------------------------------------------------===// + +#include "Address.h" +#include "CIRDataLayout.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "CIRGenOpenMPRuntime.h" +#include "UnimplementedFeatureGuarding.h" + +#include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" +#include + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Value.h" + +using namespace cir; +using namespace clang; + +namespace { + +struct BinOpInfo { + mlir::Value LHS; + mlir::Value RHS; + SourceRange Loc; + QualType FullType; // Type of operands and result + QualType CompType; // Type used for computations. Element type + // for vectors, otherwise same as FullType. + BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform + FPOptions FPFeatures; + const Expr *E; // Entire expr, for error unsupported. May not be binop. + + /// Check if the binop computes a division or a remainder. + bool isDivremOp() const { + return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || + Opcode == BO_RemAssign; + } + + /// Check if the binop can result in integer overflow. + bool mayHaveIntegerOverflow() const { + // Without constant input, we can't rule out overflow. + auto LHSCI = dyn_cast(LHS.getDefiningOp()); + auto RHSCI = dyn_cast(RHS.getDefiningOp()); + if (!LHSCI || !RHSCI) + return true; + + llvm::APInt Result; + assert(!UnimplementedFeature::mayHaveIntegerOverflow()); + llvm_unreachable("NYI"); + return false; + } + + /// Check if at least one operand is a fixed point type. In such cases, + /// this operation did not follow usual arithmetic conversion and both + /// operands might not be of the same type. + bool isFixedPointOp() const { + // We cannot simply check the result type since comparison operations + // return an int. + if (const auto *BinOp = llvm::dyn_cast(E)) { + QualType LHSType = BinOp->getLHS()->getType(); + QualType RHSType = BinOp->getRHS()->getType(); + return LHSType->isFixedPointType() || RHSType->isFixedPointType(); + } + if (const auto *UnOp = llvm::dyn_cast(E)) + return UnOp->getSubExpr()->getType()->isFixedPointType(); + return false; + } +}; + +static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( + QualType SrcType, QualType DstType) { + return SrcType->isIntegerType() && DstType->isIntegerType(); +} + +class ScalarExprEmitter : public StmtVisitor { + CIRGenFunction &CGF; + CIRGenBuilderTy &Builder; + bool IgnoreResultAssign; + +public: + ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder, + bool ira = false) + : CGF(cgf), Builder(builder), IgnoreResultAssign(ira) {} + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + bool TestAndClearIgnoreResultAssign() { + bool I = IgnoreResultAssign; + IgnoreResultAssign = false; + return I; + } + + mlir::Type ConvertType(QualType T) { return CGF.ConvertType(T); } + LValue buildLValue(const Expr *E) { return CGF.buildLValue(E); } + LValue buildCheckedLValue(const Expr *E, CIRGenFunction::TypeCheckKind TCK) { + return CGF.buildCheckedLValue(E, TCK); + } + + /// Emit a value that corresponds to null for the given type. + mlir::Value buildNullValue(QualType Ty, mlir::Location loc); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + mlir::Value Visit(Expr *E) { + return StmtVisitor::Visit(E); + } + + mlir::Value VisitStmt(Stmt *S) { + S->dump(llvm::errs(), CGF.getContext()); + llvm_unreachable("Stmt can't have complex result type!"); + } + + mlir::Value VisitExpr(Expr *E) { + // Crashing here for "ScalarExprClassName"? Please implement + // VisitScalarExprClassName(...) to get this working. + emitError(CGF.getLoc(E->getExprLoc()), "scalar exp no implemented: '") + << E->getStmtClassName() << "'"; + llvm_unreachable("NYI"); + return {}; + } + + mlir::Value VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } + mlir::Value + VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { + return Visit(E->getReplacement()); + } + mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *GE) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCoawaitExpr(CoawaitExpr *S) { + return CGF.buildCoawaitExpr(*S).getScalarVal(); + } + mlir::Value VisitCoyieldExpr(CoyieldExpr *S) { llvm_unreachable("NYI"); } + mlir::Value VisitUnaryCoawait(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + + // Leaves. + mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { + mlir::Type Ty = CGF.getCIRType(E->getType()); + return Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, + Builder.getAttr(Ty, E->getValue())); + } + + mlir::Value VisitFixedPointLiteral(const FixedPointLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitFloatingLiteral(const FloatingLiteral *E) { + mlir::Type Ty = CGF.getCIRType(E->getType()); + assert(Ty.isa() && + "expect floating-point type"); + return Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, + Builder.getAttr(Ty, E->getValue())); + } + mlir::Value VisitCharacterLiteral(const CharacterLiteral *E) { + mlir::Type Ty = CGF.getCIRType(E->getType()); + auto loc = CGF.getLoc(E->getExprLoc()); + auto init = mlir::cir::IntAttr::get(Ty, E->getValue()); + return Builder.create(loc, Ty, init); + } + mlir::Value VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + mlir::Type Ty = CGF.getCIRType(E->getType()); + return Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, Builder.getCIRBoolAttr(E->getValue())); + } + + mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { + if (E->getType()->isVoidType()) + return nullptr; + + return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + } + mlir::Value VisitGNUNullExpr(const GNUNullExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitOffsetOfExpr(OffsetOfExpr *E) { + // Try folding the offsetof to a constant. + Expr::EvalResult EVResult; + if (E->EvaluateAsInt(EVResult, CGF.getContext())) { + llvm::APSInt Value = EVResult.Val.getInt(); + return Builder.getConstInt(CGF.getLoc(E->getExprLoc()), Value); + } + + llvm_unreachable("NYI"); + } + + mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); + mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitSizeOfPackExpr(SizeOfPackExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitPseudoObjectExpr(PseudoObjectExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *E) { + if (E->isGLValue()) + llvm_unreachable("NYI"); + + // Otherwise, assume the mapping is the scalar directly. + return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal(); + } + + /// Emits the address of the l-value, then loads and returns the result. + mlir::Value buildLoadOfLValue(const Expr *E) { + LValue LV = CGF.buildLValue(E); + // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); + return CGF.buildLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); + } + + mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc) { + return CGF.buildLoadOfLValue(LV, Loc).getScalarVal(); + } + + // l-values + mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { + if (CIRGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { + return CGF.buildScalarConstant(Constant, E); + } + return buildLoadOfLValue(E); + } + + mlir::Value VisitObjCSelectorExpr(ObjCSelectorExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCProtocolExpr(ObjCProtocolExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCIVarRefExpr(ObjCIvarRefExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCMessageExpr(ObjCMessageExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCIsaExpr(ObjCIsaExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *E) { + // Do we need anything like TestAndClearIgnoreResultAssign()? + + if (E->getBase()->getType()->isVectorType()) { + assert(!UnimplementedFeature::scalableVectors() && + "NYI: index into scalable vector"); + // Subscript of vector type. This is handled differently, with a custom + // operation. + mlir::Value VecValue = Visit(E->getBase()); + mlir::Value IndexValue = Visit(E->getIdx()); + return CGF.builder.create( + CGF.getLoc(E->getSourceRange()), VecValue, IndexValue); + } + + // Just load the lvalue formed by the subscript expression. + return buildLoadOfLValue(E); + } + + mlir::Value VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *E) { + if (E->getNumSubExprs() == 2) { + // The undocumented form of __builtin_shufflevector. + mlir::Value InputVec = Visit(E->getExpr(0)); + mlir::Value IndexVec = Visit(E->getExpr(1)); + return CGF.builder.create( + CGF.getLoc(E->getSourceRange()), InputVec, IndexVec); + } else { + // The documented form of __builtin_shufflevector, where the indices are + // a variable number of integer constants. The constants will be stored + // in an ArrayAttr. + mlir::Value Vec1 = Visit(E->getExpr(0)); + mlir::Value Vec2 = Visit(E->getExpr(1)); + SmallVector Indices; + for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { + Indices.push_back(mlir::cir::IntAttr::get( + CGF.builder.getSInt64Ty(), + E->getExpr(i) + ->EvaluateKnownConstInt(CGF.getContext()) + .getSExtValue())); + } + return CGF.builder.create( + CGF.getLoc(E->getSourceRange()), CGF.getCIRType(E->getType()), Vec1, + Vec2, CGF.builder.getArrayAttr(Indices)); + } + } + mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *E) { + // __builtin_convertvector is an element-wise cast, and is implemented as a + // regular cast. The back end handles casts of vectors correctly. + return buildScalarConversion(Visit(E->getSrcExpr()), + E->getSrcExpr()->getType(), E->getType(), + E->getSourceRange().getBegin()); + } + mlir::Value VisitMemberExpr(MemberExpr *E); + mlir::Value VisitExtVectorelementExpr(Expr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCompoundLiteralEpxr(CompoundLiteralExpr *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitInitListExpr(InitListExpr *E); + + mlir::Value VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { + return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + } + mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *E) { + return VisitCastExpr(E); + } + mlir::Value VisitCastExpr(CastExpr *E); + mlir::Value VisitCallExpr(const CallExpr *E); + + mlir::Value VisitStmtExpr(StmtExpr *E) { + assert(!UnimplementedFeature::stmtExprEvaluation() && "NYI"); + Address retAlloca = + CGF.buildCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); + if (!retAlloca.isValid()) + return {}; + + // FIXME(cir): This is a work around the ScopeOp builder. If we build the + // ScopeOp before its body, we would be able to create the retAlloca + // direclty in the parent scope removing the need to hoist it. + assert(retAlloca.getDefiningOp() && "expected a alloca op"); + CGF.getBuilder().hoistAllocaToParentRegion( + cast(retAlloca.getDefiningOp())); + + return CGF.buildLoadOfScalar(CGF.makeAddrLValue(retAlloca, E->getType()), + E->getExprLoc()); + } + + // Unary Operators. + mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { + LValue LV = buildLValue(E->getSubExpr()); + return buildScalarPrePostIncDec(E, LV, false, false); + } + mlir::Value VisitUnaryPostInc(const UnaryOperator *E) { + LValue LV = buildLValue(E->getSubExpr()); + return buildScalarPrePostIncDec(E, LV, true, false); + } + mlir::Value VisitUnaryPreDec(const UnaryOperator *E) { + LValue LV = buildLValue(E->getSubExpr()); + return buildScalarPrePostIncDec(E, LV, false, true); + } + mlir::Value VisitUnaryPreInc(const UnaryOperator *E) { + LValue LV = buildLValue(E->getSubExpr()); + return buildScalarPrePostIncDec(E, LV, true, true); + } + mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre) { + assert(!CGF.getLangOpts().OpenMP && "Not implemented"); + QualType type = E->getSubExpr()->getType(); + + int amount = (isInc ? 1 : -1); + bool atomicPHI = false; + mlir::Value value{}; + mlir::Value input{}; + + if (const AtomicType *atomicTy = type->getAs()) { + llvm_unreachable("no atomics inc/dec yet"); + } else { + value = buildLoadOfLValue(LV, E->getExprLoc()); + input = value; + } + + // NOTE: When possible, more frequent cases are handled first. + + // Special case of integer increment that we have to check first: bool++. + // Due to promotion rules, we get: + // bool++ -> bool = bool + 1 + // -> bool = (int)bool + 1 + // -> bool = ((int)bool + 1 != 0) + // An interesting aspect of this is that increment is always true. + // Decrement does not have this property. + if (isInc && type->isBooleanType()) { + value = Builder.create( + CGF.getLoc(E->getExprLoc()), CGF.getCIRType(type), + Builder.getCIRBoolAttr(true)); + } else if (type->isIntegerType()) { + QualType promotedType; + bool canPerformLossyDemotionCheck = false; + if (CGF.getContext().isPromotableIntegerType(type)) { + promotedType = CGF.getContext().getPromotedIntegerType(type); + assert(promotedType != type && "Shouldn't promote to the same type."); + canPerformLossyDemotionCheck = true; + canPerformLossyDemotionCheck &= + CGF.getContext().getCanonicalType(type) != + CGF.getContext().getCanonicalType(promotedType); + canPerformLossyDemotionCheck &= + PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( + type, promotedType); + + // TODO(cir): Currently, we store bitwidths in CIR types only for + // integers. This might also be required for other types. + auto srcCirTy = ConvertType(type).dyn_cast(); + auto promotedCirTy = ConvertType(type).dyn_cast(); + assert(srcCirTy && promotedCirTy && "Expected integer type"); + + assert( + (!canPerformLossyDemotionCheck || + type->isSignedIntegerOrEnumerationType() || + promotedType->isSignedIntegerOrEnumerationType() || + srcCirTy.getWidth() == promotedCirTy.getWidth()) && + "The following check expects that if we do promotion to different " + "underlying canonical type, at least one of the types (either " + "base or promoted) will be signed, or the bitwidths will match."); + } + + if (CGF.SanOpts.hasOneOf( + SanitizerKind::ImplicitIntegerArithmeticValueChange) && + canPerformLossyDemotionCheck) { + llvm_unreachable( + "perform lossy demotion case for inc/dec not implemented yet"); + } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { + value = buildIncDecConsiderOverflowBehavior(E, value, isInc); + } else if (E->canOverflow() && type->isUnsignedIntegerType() && + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { + llvm_unreachable( + "unsigned integer overflow sanitized inc/dec not implemented"); + } else { + auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc + : mlir::cir::UnaryOpKind::Dec; + // NOTE(CIR): clang calls CreateAdd but folds this to a unary op + value = buildUnaryOp(E, Kind, input); + } + // Next most common: pointer increment. + } else if (const PointerType *ptr = type->getAs()) { + QualType type = ptr->getPointeeType(); + if (const VariableArrayType *vla = + CGF.getContext().getAsVariableArrayType(type)) { + // VLA types don't have constant size. + llvm_unreachable("NYI"); + } else if (type->isFunctionType()) { + // Arithmetic on function pointers (!) is just +-1. + llvm_unreachable("NYI"); + } else { + // For everything else, we can just do a simple increment. + auto loc = CGF.getLoc(E->getSourceRange()); + auto &builder = CGF.getBuilder(); + auto amt = builder.getSInt32(amount, loc); + if (CGF.getLangOpts().isSignedOverflowDefined()) { + value = builder.create(loc, value.getType(), + value, amt); + } else { + value = builder.create(loc, value.getType(), + value, amt); + assert(!UnimplementedFeature::emitCheckedInBoundsGEP()); + } + } + } else if (type->isVectorType()) { + llvm_unreachable("no vector inc/dec yet"); + } else if (type->isRealFloatingType()) { + auto isFloatOrDouble = type->isSpecificBuiltinType(BuiltinType::Float) || + type->isSpecificBuiltinType(BuiltinType::Double); + assert(isFloatOrDouble && "Non-float/double NYI"); + + // Create the inc/dec operation. + auto kind = + (isInc ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec); + value = buildUnaryOp(E, kind, input); + + } else if (type->isFixedPointType()) { + llvm_unreachable("no fixed point inc/dec yet"); + } else { + assert(type->castAs()); + llvm_unreachable("no objc pointer type inc/dec yet"); + } + + if (atomicPHI) { + llvm_unreachable("NYI"); + } + + CIRGenFunction::SourceLocRAIIObject sourceloc{ + CGF, CGF.getLoc(E->getSourceRange())}; + + // Store the updated result through the lvalue + if (LV.isBitField()) + CGF.buildStoreThroughBitfieldLValue(RValue::get(value), LV, value); + else + CGF.buildStoreThroughLValue(RValue::get(value), LV); + + // If this is a postinc, return the value read from memory, otherwise use + // the updated value. + return isPre ? value : input; + } + + mlir::Value buildIncDecConsiderOverflowBehavior(const UnaryOperator *E, + mlir::Value InVal, + bool IsInc) { + // NOTE(CIR): The SignedOverflowBehavior is attached to the global ModuleOp + // and the nsw behavior is handled during lowering. + auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc + : mlir::cir::UnaryOpKind::Dec; + switch (CGF.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: + return buildUnaryOp(E, Kind, InVal); + case LangOptions::SOB_Undefined: + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return buildUnaryOp(E, Kind, InVal); + llvm_unreachable( + "inc/dec overflow behavior SOB_Undefined not implemented yet"); + break; + case LangOptions::SOB_Trapping: + if (!E->canOverflow()) + return buildUnaryOp(E, Kind, InVal); + llvm_unreachable( + "inc/dec overflow behavior SOB_Trapping not implemented yet"); + break; + } + } + + mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { + if (llvm::isa(E->getType())) + return CGF.CGM.buildMemberPointerConstant(E); + + return CGF.buildLValue(E->getSubExpr()).getPointer(); + } + + mlir::Value VisitUnaryDeref(const UnaryOperator *E) { + if (E->getType()->isVoidType()) + return Visit(E->getSubExpr()); // the actual value should be unused + return buildLoadOfLValue(E); + } + mlir::Value VisitUnaryPlus(const UnaryOperator *E) { + // NOTE(cir): QualType function parameter still not used, so don´t replicate + // it here yet. + QualType promotionTy = getPromotionType(E->getSubExpr()->getType()); + auto result = VisitPlus(E, promotionTy); + if (result && !promotionTy.isNull()) + assert(0 && "not implemented yet"); + return buildUnaryOp(E, mlir::cir::UnaryOpKind::Plus, result); + } + + mlir::Value VisitPlus(const UnaryOperator *E, QualType PromotionType) { + // This differs from gcc, though, most likely due to a bug in gcc. + TestAndClearIgnoreResultAssign(); + if (!PromotionType.isNull()) + assert(0 && "scalar promotion not implemented yet"); + return Visit(E->getSubExpr()); + } + + mlir::Value VisitUnaryMinus(const UnaryOperator *E) { + // NOTE(cir): QualType function parameter still not used, so don´t replicate + // it here yet. + QualType promotionTy = getPromotionType(E->getSubExpr()->getType()); + auto result = VisitMinus(E, promotionTy); + if (result && !promotionTy.isNull()) + assert(0 && "not implemented yet"); + return buildUnaryOp(E, mlir::cir::UnaryOpKind::Minus, result); + } + + mlir::Value VisitMinus(const UnaryOperator *E, QualType PromotionType) { + TestAndClearIgnoreResultAssign(); + if (!PromotionType.isNull()) + assert(0 && "scalar promotion not implemented yet"); + + // NOTE: LLVM codegen will lower this directly to either a FNeg + // or a Sub instruction. In CIR this will be handled later in LowerToLLVM. + + return Visit(E->getSubExpr()); + } + + mlir::Value VisitUnaryNot(const UnaryOperator *E) { + TestAndClearIgnoreResultAssign(); + mlir::Value op = Visit(E->getSubExpr()); + return buildUnaryOp(E, mlir::cir::UnaryOpKind::Not, op); + } + + mlir::Value VisitUnaryLNot(const UnaryOperator *E); + mlir::Value VisitUnaryReal(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryImag(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitUnaryExtension(const UnaryOperator *E) { + // __extension__ doesn't requred any codegen + // just forward the value + return Visit(E->getSubExpr()); + } + + mlir::Value buildUnaryOp(const UnaryOperator *E, mlir::cir::UnaryOpKind kind, + mlir::Value input) { + return Builder.create( + CGF.getLoc(E->getSourceRange().getBegin()), + CGF.getCIRType(E->getType()), kind, input); + } + + // C++ + mlir::Value VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitSourceLocExpr(SourceLocExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + CIRGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); + return Visit(DAE->getExpr()); + } + mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { + CIRGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); + return Visit(DIE->getExpr()); + } + + mlir::Value VisitCXXThisExpr(CXXThisExpr *TE) { return CGF.LoadCXXThis(); } + + mlir::Value VisitExprWithCleanups(ExprWithCleanups *E); + mlir::Value VisitCXXNewExpr(const CXXNewExpr *E) { + return CGF.buildCXXNewExpr(E); + } + mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *E) { + CGF.buildCXXDeleteExpr(E); + return {}; + } + mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value + VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitRequiresExpr(const RequiresExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { + return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + } + mlir::Value VisitCXXThrowExpr(CXXThrowExpr *E) { + CGF.buildCXXThrowExpr(E); + return nullptr; + } + mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { + llvm_unreachable("NYI"); + } + + /// Perform a pointer to boolean conversion. + mlir::Value buildPointerToBoolConversion(mlir::Value V, QualType QT) { + // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM. + // We might want to have a separate pass for these types of conversions. + return CGF.getBuilder().createPtrToBoolCast(V); + } + + // Comparisons. +#define VISITCOMP(CODE) \ + mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } + VISITCOMP(LT) + VISITCOMP(GT) + VISITCOMP(LE) + VISITCOMP(GE) + VISITCOMP(EQ) + VISITCOMP(NE) +#undef VISITCOMP + + mlir::Value VisitBinAssign(const BinaryOperator *E); + mlir::Value VisitBinLAnd(const BinaryOperator *B); + mlir::Value VisitBinLOr(const BinaryOperator *B); + mlir::Value VisitBinComma(const BinaryOperator *E) { + CGF.buildIgnoredExpr(E->getLHS()); + // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen. + return Visit(E->getRHS()); + } + + mlir::Value VisitBinPtrMemD(const BinaryOperator *E) { + return buildLoadOfLValue(E); + } + + mlir::Value VisitBinPtrMemI(const BinaryOperator *E) { + return buildLoadOfLValue(E); + } + + mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { + llvm_unreachable("NYI"); + } + + // Other Operators. + mlir::Value VisitBlockExpr(const BlockExpr *E) { llvm_unreachable("NYI"); } + mlir::Value + VisitAbstractConditionalOperator(const AbstractConditionalOperator *E); + mlir::Value VisitChooseExpr(ChooseExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitVAArgExpr(VAArgExpr *VE); + mlir::Value VisitObjCStringLiteral(const ObjCStringLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCBoxedExpr(ObjCBoxedExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitObjCArrayLiteral(ObjCArrayLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitAsTypeExpr(AsTypeExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitAtomicExpr(AtomicExpr *E) { + return CGF.buildAtomicExpr(E).getScalarVal(); + } + + // Emit a conversion from the specified type to the specified destination + // type, both of which are CIR scalar types. + struct ScalarConversionOpts { + bool TreatBooleanAsSigned; + bool EmitImplicitIntegerTruncationChecks; + bool EmitImplicitIntegerSignChangeChecks; + + ScalarConversionOpts() + : TreatBooleanAsSigned(false), + EmitImplicitIntegerTruncationChecks(false), + EmitImplicitIntegerSignChangeChecks(false) {} + + ScalarConversionOpts(clang::SanitizerSet SanOpts) + : TreatBooleanAsSigned(false), + EmitImplicitIntegerTruncationChecks( + SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)), + EmitImplicitIntegerSignChangeChecks( + SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} + }; + mlir::Value buildScalarCast(mlir::Value Src, QualType SrcType, + QualType DstType, mlir::Type SrcTy, + mlir::Type DstTy, ScalarConversionOpts Opts); + + BinOpInfo buildBinOps(const BinaryOperator *E) { + BinOpInfo Result; + Result.LHS = Visit(E->getLHS()); + Result.RHS = Visit(E->getRHS()); + Result.FullType = E->getType(); + Result.CompType = E->getType(); + if (auto VecType = dyn_cast_or_null(E->getType())) { + Result.CompType = VecType->getElementType(); + } + Result.Opcode = E->getOpcode(); + Result.Loc = E->getSourceRange(); + // TODO: Result.FPFeatures + Result.E = E; + return Result; + } + + mlir::Value buildMul(const BinOpInfo &Ops); + mlir::Value buildDiv(const BinOpInfo &Ops); + mlir::Value buildRem(const BinOpInfo &Ops); + mlir::Value buildAdd(const BinOpInfo &Ops); + mlir::Value buildSub(const BinOpInfo &Ops); + mlir::Value buildShl(const BinOpInfo &Ops); + mlir::Value buildShr(const BinOpInfo &Ops); + mlir::Value buildAnd(const BinOpInfo &Ops); + mlir::Value buildXor(const BinOpInfo &Ops); + mlir::Value buildOr(const BinOpInfo &Ops); + + LValue buildCompoundAssignLValue( + const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &), + mlir::Value &Result); + mlir::Value + buildCompoundAssign(const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &)); + + // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM + // codegen. + QualType getPromotionType(QualType Ty) { + if (auto *CT = Ty->getAs()) { + llvm_unreachable("NYI"); + } + if (Ty.UseExcessPrecision(CGF.getContext())) + llvm_unreachable("NYI"); + return QualType(); + } + + // Binary operators and binary compound assignment operators. +#define HANDLEBINOP(OP) \ + mlir::Value VisitBin##OP(const BinaryOperator *E) { \ + return build##OP(buildBinOps(E)); \ + } \ + mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *E) { \ + return buildCompoundAssign(E, &ScalarExprEmitter::build##OP); \ + } + + HANDLEBINOP(Mul) + HANDLEBINOP(Div) + HANDLEBINOP(Rem) + HANDLEBINOP(Add) + HANDLEBINOP(Sub) + HANDLEBINOP(Shl) + HANDLEBINOP(Shr) + HANDLEBINOP(And) + HANDLEBINOP(Xor) + HANDLEBINOP(Or) +#undef HANDLEBINOP + + mlir::Value buildCmp(const BinaryOperator *E) { + mlir::Value Result; + QualType LHSTy = E->getLHS()->getType(); + QualType RHSTy = E->getRHS()->getType(); + + auto ClangCmpToCIRCmp = [](auto ClangCmp) -> mlir::cir::CmpOpKind { + switch (ClangCmp) { + case BO_LT: + return mlir::cir::CmpOpKind::lt; + case BO_GT: + return mlir::cir::CmpOpKind::gt; + case BO_LE: + return mlir::cir::CmpOpKind::le; + case BO_GE: + return mlir::cir::CmpOpKind::ge; + case BO_EQ: + return mlir::cir::CmpOpKind::eq; + case BO_NE: + return mlir::cir::CmpOpKind::ne; + default: + llvm_unreachable("unsupported comparison kind"); + return mlir::cir::CmpOpKind(-1); + } + }; + + if (const MemberPointerType *MPT = LHSTy->getAs()) { + assert(0 && "not implemented"); + } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { + BinOpInfo BOInfo = buildBinOps(E); + mlir::Value LHS = BOInfo.LHS; + mlir::Value RHS = BOInfo.RHS; + + if (LHSTy->isVectorType()) { + if (!E->getType()->isVectorType()) { + // If AltiVec, the comparison results in a numeric type, so we use + // intrinsics comparing vectors and giving 0 or 1 as a result + llvm_unreachable("NYI: AltiVec comparison"); + } else { + // Other kinds of vectors. Element-wise comparison returning + // a vector. + mlir::cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); + return Builder.create( + CGF.getLoc(BOInfo.Loc), CGF.getCIRType(BOInfo.FullType), Kind, + BOInfo.LHS, BOInfo.RHS); + } + } + if (BOInfo.isFixedPointOp()) { + assert(0 && "not implemented"); + } else { + // FIXME(cir): handle another if above for CIR equivalent on + // LHSTy->hasSignedIntegerRepresentation() + + // Unsigned integers and pointers. + if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && + LHS.getType().isa() && + RHS.getType().isa()) { + llvm_unreachable("NYI"); + } + + mlir::cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); + return Builder.create(CGF.getLoc(BOInfo.Loc), + CGF.getCIRType(BOInfo.FullType), + Kind, BOInfo.LHS, BOInfo.RHS); + } + } else { // Complex Comparison: can only be an equality comparison. + assert(0 && "not implemented"); + } + + return buildScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), + E->getExprLoc()); + } + + mlir::Value buildFloatToBoolConversion(mlir::Value src, mlir::Location loc) { + auto boolTy = Builder.getBoolTy(); + return Builder.create( + loc, boolTy, mlir::cir::CastKind::float_to_bool, src); + } + + mlir::Value buildIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { + // Because of the type rules of C, we often end up computing a + // logical value, then zero extending it to int, then wanting it + // as a logical value again. + // TODO: optimize this common case here or leave it for later + // CIR passes? + mlir::Type boolTy = CGF.getCIRType(CGF.getContext().BoolTy); + return Builder.create( + loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); + } + + /// Convert the specified expression value to a boolean (!cir.bool) truth + /// value. This is equivalent to "Val != 0". + mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, + mlir::Location loc) { + assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); + + if (SrcType->isRealFloatingType()) + return buildFloatToBoolConversion(Src, loc); + + if (auto *MPT = llvm::dyn_cast(SrcType)) + assert(0 && "not implemented"); + + if (SrcType->isIntegerType()) + return buildIntToBoolConversion(Src, loc); + + assert(Src.getType().isa<::mlir::cir::PointerType>()); + return buildPointerToBoolConversion(Src, SrcType); + } + + /// Emit a conversion from the specified type to the specified destination + /// type, both of which are CIR scalar types. + /// TODO: do we need ScalarConversionOpts here? Should be done in another + /// pass. + mlir::Value + buildScalarConversion(mlir::Value Src, QualType SrcType, QualType DstType, + SourceLocation Loc, + ScalarConversionOpts Opts = ScalarConversionOpts()) { + // All conversions involving fixed point types should be handled by the + // buildFixedPoint family functions. This is done to prevent bloating up + // this function more, and although fixed point numbers are represented by + // integers, we do not want to follow any logic that assumes they should be + // treated as integers. + // TODO(leonardchan): When necessary, add another if statement checking for + // conversions to fixed point types from other types. + if (SrcType->isFixedPointType()) { + llvm_unreachable("not implemented"); + } else if (DstType->isFixedPointType()) { + llvm_unreachable("not implemented"); + } + + SrcType = CGF.getContext().getCanonicalType(SrcType); + DstType = CGF.getContext().getCanonicalType(DstType); + if (SrcType == DstType) + return Src; + + if (DstType->isVoidType()) + return nullptr; + + mlir::Type SrcTy = Src.getType(); + + // Handle conversions to bool first, they are special: comparisons against + // 0. + if (DstType->isBooleanType()) + return buildConversionToBool(Src, SrcType, CGF.getLoc(Loc)); + + mlir::Type DstTy = ConvertType(DstType); + + // Cast from half through float if half isn't a native type. + if (SrcType->isHalfType() && + !CGF.getContext().getLangOpts().NativeHalfType) { + llvm_unreachable("not implemented"); + } + + // TODO(cir): LLVM codegen ignore conversions like int -> uint, + // is there anything to be done for CIR here? + if (SrcTy == DstTy) { + if (Opts.EmitImplicitIntegerSignChangeChecks) + llvm_unreachable("not implemented"); + return Src; + } + + assert(!SrcTy.isa<::mlir::cir::PointerType>() && + !DstTy.isa<::mlir::cir::PointerType>() && + "Internal error: pointer conversions are handled elsewhere"); + + // A scalar can be splatted to an extended vector of the same element type + if (DstType->isExtVectorType() && !SrcType->isVectorType()) { + // Sema should add casts to make sure that the source expression's type + // is the same as the vector's element type (sans qualifiers) + assert(DstType->castAs()->getElementType().getTypePtr() == + SrcType.getTypePtr() && + "Splatted expr doesn't match with vector element type?"); + + llvm_unreachable("not implemented"); + } + + if (SrcType->isMatrixType() && DstType->isMatrixType()) + llvm_unreachable("NYI: matrix type to matrix type conversion"); + assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && + "Internal error: conversion between matrix type and scalar type"); + + // Finally, we have the arithmetic types or vectors of arithmetic types. + mlir::Value Res = nullptr; + mlir::Type ResTy = DstTy; + + // An overflowing conversion has undefined behavior if eitehr the source + // type or the destination type is a floating-point type. However, we + // consider the range of representable values for all floating-point types + // to be [-inf,+inf], so no overflow can ever happen when the destination + // type is a floating-point type. + if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow)) + llvm_unreachable("NYI"); + + // Cast to half through float if half isn't a native type. + if (DstType->isHalfType() && + !CGF.getContext().getLangOpts().NativeHalfType) { + llvm_unreachable("NYI"); + } + + Res = buildScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); + + if (DstTy != ResTy) { + llvm_unreachable("NYI"); + } + + if (Opts.EmitImplicitIntegerTruncationChecks) + llvm_unreachable("NYI"); + + if (Opts.EmitImplicitIntegerSignChangeChecks) + llvm_unreachable("NYI"); + + return Res; + } +}; + +} // namespace + +/// Emit the computation of the specified expression of scalar type, +/// ignoring the result. +mlir::Value CIRGenFunction::buildScalarExpr(const Expr *E) { + assert(E && hasScalarEvaluationKind(E->getType()) && + "Invalid scalar expression to emit"); + + return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); +} + +[[maybe_unused]] static bool MustVisitNullValue(const Expr *E) { + // If a null pointer expression's type is the C++0x nullptr_t, then + // it's not necessarily a simple constant and it must be evaluated + // for its potential side effects. + return E->getType()->isNullPtrType(); +} + +/// If \p E is a widened promoted integer, get its base (unpromoted) type. +static std::optional getUnwidenedIntegerType(const ASTContext &Ctx, + const Expr *E) { + const Expr *Base = E->IgnoreImpCasts(); + if (E == Base) + return std::nullopt; + + QualType BaseTy = Base->getType(); + if (!Ctx.isPromotableIntegerType(BaseTy) || + Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) + return std::nullopt; + + return BaseTy; +} + +/// Check if \p E is a widened promoted integer. +[[maybe_unused]] static bool IsWidenedIntegerOp(const ASTContext &Ctx, + const Expr *E) { + return getUnwidenedIntegerType(Ctx, E).has_value(); +} + +/// Check if we can skip the overflow check for \p Op. +[[maybe_unused]] static bool CanElideOverflowCheck(const ASTContext &Ctx, + const BinOpInfo &Op) { + assert((isa(Op.E) || isa(Op.E)) && + "Expected a unary or binary operator"); + + // If the binop has constant inputs and we can prove there is no overflow, + // we can elide the overflow check. + if (!Op.mayHaveIntegerOverflow()) + return true; + + // If a unary op has a widened operand, the op cannot overflow. + if (const auto *UO = dyn_cast(Op.E)) + return !UO->canOverflow(); + + // We usually don't need overflow checks for binops with widened operands. + // Multiplication with promoted unsigned operands is a special case. + const auto *BO = cast(Op.E); + auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); + if (!OptionalLHSTy) + return false; + + auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); + if (!OptionalRHSTy) + return false; + + QualType LHSTy = *OptionalLHSTy; + QualType RHSTy = *OptionalRHSTy; + + // This is the simple case: binops without unsigned multiplication, and with + // widened operands. No overflow check is needed here. + if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || + !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) + return true; + + // For unsigned multiplication the overflow check can be elided if either one + // of the unpromoted types are less than half the size of the promoted type. + unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); + return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || + (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; +} + +/// Emit pointer + index arithmetic. +static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, + const BinOpInfo &op, + bool isSubtraction) { + // Must have binary (not unary) expr here. Unary pointer + // increment/decrement doesn't use this path. + const BinaryOperator *expr = cast(op.E); + + mlir::Value pointer = op.LHS; + Expr *pointerOperand = expr->getLHS(); + mlir::Value index = op.RHS; + Expr *indexOperand = expr->getRHS(); + + // In a subtraction, the LHS is always the pointer. + if (!isSubtraction && !pointer.getType().isa()) { + std::swap(pointer, index); + std::swap(pointerOperand, indexOperand); + } + + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); + + // Some versions of glibc and gcc use idioms (particularly in their malloc + // routines) that add a pointer-sized integer (known to be a pointer value) + // to a null pointer in order to cast the value back to an integer or as + // part of a pointer alignment algorithm. This is undefined behavior, but + // we'd like to be able to compile programs that use it. + // + // Normally, we'd generate a GEP with a null-pointer base here in response + // to that code, but it's also UB to dereference a pointer created that + // way. Instead (as an acknowledged hack to tolerate the idiom) we will + // generate a direct cast of the integer value to a pointer. + // + // The idiom (p = nullptr + N) is not met if any of the following are true: + // + // The operation is subtraction. + // The index is not pointer-sized. + // The pointer type is not byte-sized. + // + if (BinaryOperator::isNullPointerArithmeticExtension( + CGF.getContext(), op.Opcode, expr->getLHS(), expr->getRHS())) + llvm_unreachable("null pointer arithmetic extension is NYI"); + + if (UnimplementedFeature::dataLayoutGetIndexTypeSizeInBits()) { + // TODO(cir): original codegen zero/sign-extends the index to the same width + // as the pointer. Since CIR's pointer stride doesn't care about that, it's + // skiped here. + llvm_unreachable("target-specific pointer width is NYI"); + } + + // If this is subtraction, negate the index. + if (isSubtraction) + index = CGF.getBuilder().createNeg(index); + + if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) + llvm_unreachable("array bounds sanitizer is NYI"); + + const PointerType *pointerType = + pointerOperand->getType()->getAs(); + if (!pointerType) + llvm_unreachable("ObjC is NYI"); + + QualType elementType = pointerType->getPointeeType(); + if (const VariableArrayType *vla = + CGF.getContext().getAsVariableArrayType(elementType)) { + + // The element count here is the total number of non-VLA elements. + mlir::Value numElements = CGF.getVLASize(vla).NumElts; + + // GEP indexes are signed, and scaling an index isn't permitted to + // signed-overflow, so we use the same semantics for our explicit + // multiply. We suppress this if overflow is not undefined behavior. + mlir::Type elemTy = CGF.convertTypeForMem(vla->getElementType()); + + index = CGF.getBuilder().createCast(mlir::cir::CastKind::integral, index, + numElements.getType()); + index = CGF.getBuilder().createMul(index, numElements); + + if (CGF.getLangOpts().isSignedOverflowDefined()) { + pointer = CGF.getBuilder().create( + CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); + } else { + pointer = CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.E->getExprLoc()); + } + return pointer; + } + // Explicitly handle GNU void* and function pointer arithmetic extensions. The + // GNU void* casts amount to no-ops since our void* type is i8*, but this is + // future proof. + if (elementType->isVoidType() || elementType->isFunctionType()) + llvm_unreachable("GNU void* and func ptr arithmetic extensions are NYI"); + + mlir::Type elemTy = CGF.convertTypeForMem(elementType); + if (CGF.getLangOpts().isSignedOverflowDefined()) + return CGF.getBuilder().create( + CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); + + return CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.E->getExprLoc()); +} + +mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildDiv(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildRem(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); +} + +mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { + if (Ops.LHS.getType().isa() || + Ops.RHS.getType().isa()) + return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); + + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); +} + +mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { + // The LHS is always a pointer if either side is. + if (!Ops.LHS.getType().isa()) { + if (Ops.CompType->isSignedIntegerOrEnumerationType()) { + switch (CGF.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: { + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return Builder.createSub(Ops.LHS, Ops.RHS); + [[fallthrough]]; + } + case LangOptions::SOB_Undefined: + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return Builder.createNSWSub(Ops.LHS, Ops.RHS); + [[fallthrough]]; + case LangOptions::SOB_Trapping: + if (CanElideOverflowCheck(CGF.getContext(), Ops)) + return Builder.createNSWSub(Ops.LHS, Ops.RHS); + llvm_unreachable("NYI"); + } + } + + if (Ops.FullType->isConstantMatrixType()) { + llvm_unreachable("NYI"); + } + + if (Ops.CompType->isUnsignedIntegerType() && + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !CanElideOverflowCheck(CGF.getContext(), Ops)) + llvm_unreachable("NYI"); + + if (Ops.CompType->isFloatingType()) { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); + return Builder.createFSub(Ops.LHS, Ops.RHS); + } + + if (Ops.isFixedPointOp()) + llvm_unreachable("NYI"); + + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); + } + + // If the RHS is not a pointer, then we have normal pointer + // arithmetic. + if (!Ops.RHS.getType().isa()) + return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/true); + + // Otherwise, this is a pointer subtraction + + // Do the raw subtraction part. + // + // TODO(cir): note for LLVM lowering out of this; when expanding this into + // LLVM we shall take VLA's, division by element size, etc. + // + // See more in `EmitSub` in CGExprScalar.cpp. + assert(!UnimplementedFeature::llvmLoweringPtrDiffConsidersPointee()); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.PtrDiffTy, Ops.LHS, Ops.RHS); +} + +mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { + // TODO: This misses out on the sanitizer check below. + if (Ops.isFixedPointOp()) + llvm_unreachable("NYI"); + + // CIR accepts shift between different types, meaning nothing special + // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type: + // promote or truncate the RHS to the same size as the LHS. + + bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && + Ops.CompType->hasSignedIntegerRepresentation() && + !CGF.getLangOpts().isSignedOverflowDefined() && + !CGF.getLangOpts().CPlusPlus20; + bool SanitizeUnsignedBase = + CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && + Ops.CompType->hasUnsignedIntegerRepresentation(); + bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; + bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); + + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (CGF.getLangOpts().OpenCL) + llvm_unreachable("NYI"); + else if ((SanitizeBase || SanitizeExponent) && + Ops.LHS.getType().isa()) { + llvm_unreachable("NYI"); + } + + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS, + CGF.getBuilder().getUnitAttr()); +} + +mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { + // TODO: This misses out on the sanitizer check below. + if (Ops.isFixedPointOp()) + llvm_unreachable("NYI"); + + // CIR accepts shift between different types, meaning nothing special + // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type: + // promote or truncate the RHS to the same size as the LHS. + + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (CGF.getLangOpts().OpenCL) + llvm_unreachable("NYI"); + else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && + Ops.LHS.getType().isa()) { + llvm_unreachable("NYI"); + } + + // Note that we don't need to distinguish unsigned treatment at this + // point since it will be handled later by LLVM lowering. + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS); +} + +mlir::Value ScalarExprEmitter::buildAnd(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildXor(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildOr(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Or, Ops.LHS, Ops.RHS); +} + +// Emit code for an explicit or implicit cast. Implicit +// casts have to handle a more broad range of conversions than explicit +// casts, as they handle things like function to ptr-to-function decay +// etc. +mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { + Expr *E = CE->getSubExpr(); + QualType DestTy = CE->getType(); + CastKind Kind = CE->getCastKind(); + + // These cases are generally not written to ignore the result of evaluating + // their sub-expressions, so we clear this now. + bool Ignored = TestAndClearIgnoreResultAssign(); + (void)Ignored; + + // Since almost all cast kinds apply to scalars, this switch doesn't have a + // default case, so the compiler will warn on a missing case. The cases are + // in the same order as in the CastKind enum. + switch (Kind) { + case clang::CK_Dependent: + llvm_unreachable("dependent cast kind in CIR gen!"); + case clang::CK_BuiltinFnToFnPtr: + llvm_unreachable("builtin functions are handled elsewhere"); + + case CK_LValueBitCast: + llvm_unreachable("NYI"); + case CK_ObjCObjectLValueCast: + llvm_unreachable("NYI"); + case CK_LValueToRValueBitCast: + llvm_unreachable("NYI"); + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + case CK_BitCast: { + auto Src = Visit(const_cast(E)); + mlir::Type DstTy = CGF.convertType(DestTy); + + assert(!UnimplementedFeature::addressSpace()); + if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { + llvm_unreachable("NYI"); + } + + if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { + llvm_unreachable("NYI"); + } + + // Update heapallocsite metadata when there is an explicit pointer cast. + assert(!UnimplementedFeature::addHeapAllocSiteMetadata()); + + // If Src is a fixed vector and Dst is a scalable vector, and both have the + // same element type, use the llvm.vector.insert intrinsic to perform the + // bitcast. + assert(!UnimplementedFeature::scalableVectors()); + + // If Src is a scalable vector and Dst is a fixed vector, and both have the + // same element type, use the llvm.vector.extract intrinsic to perform the + // bitcast. + assert(!UnimplementedFeature::scalableVectors()); + + // Perform VLAT <-> VLST bitcast through memory. + // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics + // require the element types of the vectors to be the same, we + // need to keep this around for bitcasts between VLAT <-> VLST where + // the element types of the vectors are not the same, until we figure + // out a better way of doing these casts. + assert(!UnimplementedFeature::scalableVectors()); + + return CGF.getBuilder().createBitcast(CGF.getLoc(E->getSourceRange()), Src, + DstTy); + } + case CK_AddressSpaceConversion: + llvm_unreachable("NYI"); + case CK_AtomicToNonAtomic: + llvm_unreachable("NYI"); + case CK_NonAtomicToAtomic: + llvm_unreachable("NYI"); + case CK_UserDefinedConversion: + return Visit(const_cast(E)); + case CK_NoOp: { + auto V = Visit(const_cast(E)); + if (V) { + // CK_NoOp can model a pointer qualification conversion, which can remove + // an array bound and change the IR type. + // FIXME: Once pointee types are removed from IR, remove this. + auto T = CGF.convertType(DestTy); + if (T != V.getType()) + assert(0 && "NYI"); + } + return V; + } + case CK_BaseToDerived: + llvm_unreachable("NYI"); + case CK_DerivedToBase: { + // The EmitPointerWithAlignment path does this fine; just discard + // the alignment. + return CGF.buildPointerWithAlignment(CE).getPointer(); + } + case CK_Dynamic: { + Address V = CGF.buildPointerWithAlignment(E); + const auto *DCE = cast(CE); + return CGF.buildDynamicCast(V, DCE); + } + case CK_ArrayToPointerDecay: + return CGF.buildArrayToPointerDecay(E).getPointer(); + case CK_FunctionToPointerDecay: + return buildLValue(E).getPointer(); + + case CK_NullToPointer: { + // FIXME: use MustVisitNullValue(E) and evaluate expr. + // Note that DestTy is used as the MLIR type instead of a custom + // nullptr type. + mlir::Type Ty = CGF.getCIRType(DestTy); + return Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, + mlir::cir::ConstPtrAttr::get(Builder.getContext(), Ty, 0)); + } + + case CK_NullToMemberPointer: { + if (MustVisitNullValue(E)) + CGF.buildIgnoredExpr(E); + + assert(!UnimplementedFeature::cxxABI()); + + const MemberPointerType *MPT = CE->getType()->getAs(); + assert(!MPT->isMemberFunctionPointerType() && "NYI"); + + auto Ty = CGF.getCIRType(DestTy).cast(); + return Builder.getNullDataMemberPtr(Ty, CGF.getLoc(E->getExprLoc())); + } + case CK_ReinterpretMemberPointer: + llvm_unreachable("NYI"); + case CK_BaseToDerivedMemberPointer: + llvm_unreachable("NYI"); + case CK_DerivedToBaseMemberPointer: + llvm_unreachable("NYI"); + case CK_ARCProduceObject: + llvm_unreachable("NYI"); + case CK_ARCConsumeObject: + llvm_unreachable("NYI"); + case CK_ARCReclaimReturnedObject: + llvm_unreachable("NYI"); + case CK_ARCExtendBlockObject: + llvm_unreachable("NYI"); + case CK_CopyAndAutoreleaseBlockObject: + llvm_unreachable("NYI"); + case CK_FloatingRealToComplex: + llvm_unreachable("NYI"); + case CK_FloatingComplexCast: + llvm_unreachable("NYI"); + case CK_IntegralComplexToFloatingComplex: + llvm_unreachable("NYI"); + case CK_FloatingComplexToIntegralComplex: + llvm_unreachable("NYI"); + case CK_ConstructorConversion: + llvm_unreachable("NYI"); + case CK_ToUnion: + llvm_unreachable("NYI"); + + case CK_LValueToRValue: + assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); + assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); + return Visit(const_cast(E)); + + case CK_IntegralToPointer: { + auto DestCIRTy = ConvertType(DestTy); + mlir::Value Src = Visit(const_cast(E)); + + // Properly resize by casting to an int of the same size as the pointer. + // Clang's IntegralToPointer includes 'bool' as the source, but in CIR + // 'bool' is not an integral type. So check the source type to get the + // correct CIR conversion. + auto MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestCIRTy); + auto MiddleVal = Builder.createCast(E->getType()->isBooleanType() + ? mlir::cir::CastKind::bool_to_int + : mlir::cir::CastKind::integral, + Src, MiddleTy); + + if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) + llvm_unreachable("NYI"); + + return Builder.createIntToPtr(MiddleVal, DestCIRTy); + } + case CK_PointerToIntegral: { + assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); + if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) + llvm_unreachable("NYI"); + return Builder.createPtrToInt(Visit(E), ConvertType(DestTy)); + } + case CK_ToVoid: { + CGF.buildIgnoredExpr(E); + return nullptr; + } + case CK_MatrixCast: + llvm_unreachable("NYI"); + case CK_VectorSplat: { + // Create a vector object and fill all elements with the same scalar value. + assert(DestTy->isVectorType() && "CK_VectorSplat to non-vector type"); + return CGF.getBuilder().create( + CGF.getLoc(E->getSourceRange()), CGF.getCIRType(DestTy), Visit(E)); + } + case CK_FixedPointCast: + llvm_unreachable("NYI"); + case CK_FixedPointToBoolean: + llvm_unreachable("NYI"); + case CK_FixedPointToIntegral: + llvm_unreachable("NYI"); + case CK_IntegralToFixedPoint: + llvm_unreachable("NYI"); + + case CK_IntegralCast: { + ScalarConversionOpts Opts; + if (auto *ICE = dyn_cast(CE)) { + if (!ICE->isPartOfExplicitCast()) + Opts = ScalarConversionOpts(CGF.SanOpts); + } + return buildScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc(), Opts); + } + + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingCast: + case CK_FixedPointToFloating: + case CK_FloatingToFixedPoint: { + if (Kind == CK_FixedPointToFloating || Kind == CK_FloatingToFixedPoint) + llvm_unreachable("Fixed point casts are NYI."); + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, CE); + return buildScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc()); + } + case CK_BooleanToSignedIntegral: + llvm_unreachable("NYI"); + + case CK_IntegralToBoolean: { + return buildIntToBoolConversion(Visit(E), CGF.getLoc(CE->getSourceRange())); + } + + case CK_PointerToBoolean: + return buildPointerToBoolConversion(Visit(E), E->getType()); + case CK_FloatingToBoolean: + return buildFloatToBoolConversion(Visit(E), CGF.getLoc(E->getExprLoc())); + case CK_MemberPointerToBoolean: + llvm_unreachable("NYI"); + case CK_FloatingComplexToReal: + llvm_unreachable("NYI"); + case CK_IntegralComplexToReal: + llvm_unreachable("NYI"); + case CK_FloatingComplexToBoolean: + llvm_unreachable("NYI"); + case CK_IntegralComplexToBoolean: + llvm_unreachable("NYI"); + case CK_ZeroToOCLOpaqueType: + llvm_unreachable("NYI"); + case CK_IntToOCLSampler: + llvm_unreachable("NYI"); + + default: + emitError(CGF.getLoc(CE->getExprLoc()), "cast kind not implemented: '") + << CE->getCastKindName() << "'"; + return nullptr; + } // end of switch + + llvm_unreachable("unknown scalar cast"); +} + +mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *E) { + if (E->getCallReturnType(CGF.getContext())->isReferenceType()) + return buildLoadOfLValue(E); + + auto V = CGF.buildCallExpr(E).getScalarVal(); + assert(!UnimplementedFeature::buildLValueAlignmentAssumption()); + return V; +} + +mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { + // TODO(cir): Folding all this constants sound like work for MLIR optimizers, + // keep assertion for now. + assert(!UnimplementedFeature::tryEmitAsConstant()); + Expr::EvalResult Result; + if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { + llvm::APSInt Value = Result.Val.getInt(); + CGF.buildIgnoredExpr(E->getBase()); + return Builder.getConstInt(CGF.getLoc(E->getExprLoc()), Value); + } + return buildLoadOfLValue(E); +} + +/// Emit a conversion from the specified type to the specified destination +/// type, both of which are CIR scalar types. +mlir::Value CIRGenFunction::buildScalarConversion(mlir::Value Src, + QualType SrcTy, + QualType DstTy, + SourceLocation Loc) { + assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && + CIRGenFunction::hasScalarEvaluationKind(DstTy) && + "Invalid scalar expression to emit"); + return ScalarExprEmitter(*this, builder) + .buildScalarConversion(Src, SrcTy, DstTy, Loc); +} + +/// If the specified expression does not fold +/// to a constant, or if it does but contains a label, return false. If it +/// constant folds return true and set the boolean result in Result. +bool CIRGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, + bool &ResultBool, + bool AllowLabels) { + llvm::APSInt ResultInt; + if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) + return false; + + ResultBool = ResultInt.getBoolValue(); + return true; +} + +mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { + bool Ignore = TestAndClearIgnoreResultAssign(); + (void)Ignore; + assert(Ignore == false && "init list ignored"); + unsigned NumInitElements = E->getNumInits(); + + if (E->hadArrayRangeDesignator()) + llvm_unreachable("NYI"); + + if (E->getType()->isVectorType()) { + assert(!UnimplementedFeature::scalableVectors() && + "NYI: scalable vector init"); + assert(!UnimplementedFeature::vectorConstants() && "NYI: vector constants"); + auto VectorType = + CGF.getCIRType(E->getType()).dyn_cast(); + SmallVector Elements; + for (Expr *init : E->inits()) { + Elements.push_back(Visit(init)); + } + // Zero-initialize any remaining values. + if (NumInitElements < VectorType.getSize()) { + mlir::Value ZeroValue = CGF.getBuilder().create( + CGF.getLoc(E->getSourceRange()), VectorType.getEltType(), + CGF.getBuilder().getZeroInitAttr(VectorType.getEltType())); + for (uint64_t i = NumInitElements; i < VectorType.getSize(); ++i) { + Elements.push_back(ZeroValue); + } + } + return CGF.getBuilder().create( + CGF.getLoc(E->getSourceRange()), VectorType, Elements); + } + + if (NumInitElements == 0) { + // C++11 value-initialization for the scalar. + llvm_unreachable("NYI"); + } + + return Visit(E->getInit(0)); +} + +mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { + // Perform vector logical not on comparison with zero vector. + if (E->getType()->isVectorType() && + E->getType()->castAs()->getVectorKind() == + VectorKind::Generic) { + llvm_unreachable("NYI"); + } + + // Compare operand to zero. + mlir::Value boolVal = CGF.evaluateExprAsBool(E->getSubExpr()); + + // Invert value. + boolVal = Builder.createNot(boolVal); + + // ZExt result to the expr type. + auto dstTy = ConvertType(E->getType()); + if (dstTy.isa()) + return Builder.createBoolToInt(boolVal, dstTy); + if (dstTy.isa()) + return boolVal; + + llvm_unreachable("destination type for logical-not unary operator is NYI"); +} + +// Conversion from bool, integral, or floating-point to integral or +// floating-point. Conversions involving other types are handled elsewhere. +// Conversion to bool is handled elsewhere because that's a comparison against +// zero, not a simple cast. This handles both individual scalars and vectors. +mlir::Value ScalarExprEmitter::buildScalarCast( + mlir::Value Src, QualType SrcType, QualType DstType, mlir::Type SrcTy, + mlir::Type DstTy, ScalarConversionOpts Opts) { + assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && + "Internal error: matrix types not handled by this function."); + if (SrcTy.isa() || DstTy.isa()) + llvm_unreachable("Obsolete code. Don't use mlir::IntegerType with CIR."); + + mlir::Type FullDstTy = DstTy; + if (SrcTy.isa() && + DstTy.isa()) { + // Use the element types of the vectors to figure out the CastKind. + SrcTy = SrcTy.dyn_cast().getEltType(); + DstTy = DstTy.dyn_cast().getEltType(); + } + assert(!SrcTy.isa() && + !DstTy.isa() && + "buildScalarCast given a vector type and a non-vector type"); + + std::optional CastKind; + + if (SrcTy.isa()) { + if (Opts.TreatBooleanAsSigned) + llvm_unreachable("NYI: signed bool"); + if (CGF.getBuilder().isInt(DstTy)) { + CastKind = mlir::cir::CastKind::bool_to_int; + } else if (DstTy.isa()) { + CastKind = mlir::cir::CastKind::bool_to_float; + } else { + llvm_unreachable("Internal error: Cast to unexpected type"); + } + } else if (CGF.getBuilder().isInt(SrcTy)) { + if (CGF.getBuilder().isInt(DstTy)) { + CastKind = mlir::cir::CastKind::integral; + } else if (DstTy.isa()) { + CastKind = mlir::cir::CastKind::int_to_float; + } else { + llvm_unreachable("Internal error: Cast to unexpected type"); + } + } else if (SrcTy.isa()) { + if (CGF.getBuilder().isInt(DstTy)) { + // If we can't recognize overflow as undefined behavior, assume that + // overflow saturates. This protects against normal optimizations if we + // are compiling with non-standard FP semantics. + if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) + llvm_unreachable("NYI"); + if (Builder.getIsFPConstrained()) + llvm_unreachable("NYI"); + CastKind = mlir::cir::CastKind::float_to_int; + } else if (DstTy.isa()) { + // TODO: split this to createFPExt/createFPTrunc + return Builder.createFloatingCast(Src, FullDstTy); + } else { + llvm_unreachable("Internal error: Cast to unexpected type"); + } + } else { + llvm_unreachable("Internal error: Cast from unexpected type"); + } + + assert(CastKind.has_value() && "Internal error: CastKind not set."); + return Builder.create(Src.getLoc(), FullDstTy, *CastKind, + Src); +} + +LValue +CIRGenFunction::buildCompoundAssignmentLValue(const CompoundAssignOperator *E) { + ScalarExprEmitter Scalar(*this, builder); + mlir::Value Result; + switch (E->getOpcode()) { +#define COMPOUND_OP(Op) \ + case BO_##Op##Assign: \ + return Scalar.buildCompoundAssignLValue(E, &ScalarExprEmitter::build##Op, \ + Result) + COMPOUND_OP(Mul); + COMPOUND_OP(Div); + COMPOUND_OP(Rem); + COMPOUND_OP(Add); + COMPOUND_OP(Sub); + COMPOUND_OP(Shl); + COMPOUND_OP(Shr); + COMPOUND_OP(And); + COMPOUND_OP(Xor); + COMPOUND_OP(Or); +#undef COMPOUND_OP + + case BO_PtrMemD: + case BO_PtrMemI: + case BO_Mul: + case BO_Div: + case BO_Rem: + case BO_Add: + case BO_Sub: + case BO_Shl: + case BO_Shr: + case BO_LT: + case BO_GT: + case BO_LE: + case BO_GE: + case BO_EQ: + case BO_NE: + case BO_Cmp: + case BO_And: + case BO_Xor: + case BO_Or: + case BO_LAnd: + case BO_LOr: + case BO_Assign: + case BO_Comma: + llvm_unreachable("Not valid compound assignment operators"); + } + llvm_unreachable("Unhandled compound assignment operator"); +} + +LValue ScalarExprEmitter::buildCompoundAssignLValue( + const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &), + mlir::Value &Result) { + QualType LHSTy = E->getLHS()->getType(); + BinOpInfo OpInfo; + + if (E->getComputationResultType()->isAnyComplexType()) + assert(0 && "not implemented"); + + // Emit the RHS first. __block variables need to have the rhs evaluated + // first, plus this should improve codegen a little. + OpInfo.RHS = Visit(E->getRHS()); + OpInfo.FullType = E->getComputationResultType(); + OpInfo.CompType = OpInfo.FullType; + if (auto VecType = dyn_cast_or_null(OpInfo.FullType)) { + OpInfo.CompType = VecType->getElementType(); + } + OpInfo.Opcode = E->getOpcode(); + OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); + OpInfo.E = E; + OpInfo.Loc = E->getSourceRange(); + + // Load/convert the LHS + LValue LHSLV = CGF.buildLValue(E->getLHS()); + + if (const AtomicType *atomicTy = LHSTy->getAs()) { + assert(0 && "not implemented"); + } + + OpInfo.LHS = buildLoadOfLValue(LHSLV, E->getExprLoc()); + + CIRGenFunction::SourceLocRAIIObject sourceloc{ + CGF, CGF.getLoc(E->getSourceRange())}; + SourceLocation Loc = E->getExprLoc(); + OpInfo.LHS = + buildScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc); + + // Expand the binary operator. + Result = (this->*Func)(OpInfo); + + // Convert the result back to the LHS type, + // potentially with Implicit Conversion sanitizer check. + Result = buildScalarConversion(Result, E->getComputationResultType(), LHSTy, + Loc, ScalarConversionOpts(CGF.SanOpts)); + + // Store the result value into the LHS lvalue. Bit-fields are handled + // specially because the result is altered by the store, i.e., [C99 6.5.16p1] + // 'An assignment expression has the value of the left operand after the + // assignment...'. + if (LHSLV.isBitField()) + CGF.buildStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, Result); + else + CGF.buildStoreThroughLValue(RValue::get(Result), LHSLV); + + if (CGF.getLangOpts().OpenMP) + CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, + E->getLHS()); + return LHSLV; +} + +mlir::Value ScalarExprEmitter::buildNullValue(QualType Ty, mlir::Location loc) { + return CGF.buildFromMemory(CGF.CGM.buildNullConstant(Ty, loc), Ty); +} + +mlir::Value ScalarExprEmitter::buildCompoundAssign( + const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &)) { + + bool Ignore = TestAndClearIgnoreResultAssign(); + mlir::Value RHS; + LValue LHS = buildCompoundAssignLValue(E, Func, RHS); + + // If the result is clearly ignored, return now. + if (Ignore) + return {}; + + // The result of an assignment in C is the assigned r-value. + if (!CGF.getLangOpts().CPlusPlus) + return RHS; + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!LHS.isVolatileQualified()) + return RHS; + + // Otherwise, reload the value. + return buildLoadOfLValue(LHS, E->getExprLoc()); +} + +mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { + auto scopeLoc = CGF.getLoc(E->getSourceRange()); + auto &builder = CGF.builder; + + auto scope = builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{CGF, loc, + builder.getInsertionBlock()}; + auto scopeYieldVal = Visit(E->getSubExpr()); + if (scopeYieldVal) { + builder.create(loc, scopeYieldVal); + yieldTy = scopeYieldVal.getType(); + } + }); + + // Defend against dominance problems caused by jumps out of expression + // evaluation through the shared cleanup block. + // TODO(cir): Scope.ForceCleanup({&V}); + return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr; +} + +mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { + bool Ignore = TestAndClearIgnoreResultAssign(); + + mlir::Value RHS; + LValue LHS; + + switch (E->getLHS()->getType().getObjCLifetime()) { + case Qualifiers::OCL_Strong: + llvm_unreachable("NYI"); + case Qualifiers::OCL_Autoreleasing: + llvm_unreachable("NYI"); + case Qualifiers::OCL_ExplicitNone: + llvm_unreachable("NYI"); + case Qualifiers::OCL_Weak: + llvm_unreachable("NYI"); + case Qualifiers::OCL_None: + // __block variables need to have the rhs evaluated first, plus this should + // improve codegen just a little. + RHS = Visit(E->getRHS()); + LHS = buildCheckedLValue(E->getLHS(), CIRGenFunction::TCK_Store); + + // Store the value into the LHS. Bit-fields are handled specially because + // the result is altered by the store, i.e., [C99 6.5.16p1] + // 'An assignment expression has the value of the left operand after the + // assignment...'. + if (LHS.isBitField()) { + CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); + } else { + CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); + CIRGenFunction::SourceLocRAIIObject loc{CGF, + CGF.getLoc(E->getSourceRange())}; + CGF.buildStoreThroughLValue(RValue::get(RHS), LHS); + } + } + + // If the result is clearly ignored, return now. + if (Ignore) + return nullptr; + + // The result of an assignment in C is the assigned r-value. + if (!CGF.getLangOpts().CPlusPlus) + return RHS; + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!LHS.isVolatileQualified()) + return RHS; + + // Otherwise, reload the value. + return buildLoadOfLValue(LHS, E->getExprLoc()); +} + +/// Return true if the specified expression is cheap enough and side-effect-free +/// enough to evaluate unconditionally instead of conditionally. This is used +/// to convert control flow into selects in some cases. +/// TODO(cir): can be shared with LLVM codegen. +static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, + CIRGenFunction &CGF) { + // Anything that is an integer or floating point constant is fine. + return E->IgnoreParens()->isEvaluatable(CGF.getContext()); + + // Even non-volatile automatic variables can't be evaluated unconditionally. + // Referencing a thread_local may cause non-trivial initialization work to + // occur. If we're inside a lambda and one of the variables is from the scope + // outside the lambda, that function may have returned already. Reading its + // locals is a bad idea. Also, these reads may introduce races there didn't + // exist in the source-level program. +} + +mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( + const AbstractConditionalOperator *E) { + auto &builder = CGF.getBuilder(); + auto loc = CGF.getLoc(E->getSourceRange()); + TestAndClearIgnoreResultAssign(); + + // Bind the common expression if necessary. + CIRGenFunction::OpaqueValueMapping binding(CGF, E); + + Expr *condExpr = E->getCond(); + Expr *lhsExpr = E->getTrueExpr(); + Expr *rhsExpr = E->getFalseExpr(); + + // If the condition constant folds and can be elided, try to avoid emitting + // the condition and the dead arm. + bool CondExprBool; + if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { + Expr *live = lhsExpr, *dead = rhsExpr; + if (!CondExprBool) + std::swap(live, dead); + + // If the dead side doesn't have labels we need, just emit the Live part. + if (!CGF.ContainsLabel(dead)) { + if (CondExprBool) + assert(!UnimplementedFeature::incrementProfileCounter()); + auto Result = Visit(live); + + // If the live part is a throw expression, it acts like it has a void + // type, so evaluating it returns a null Value. However, a conditional + // with non-void type must return a non-null Value. + if (!Result && !E->getType()->isVoidType()) { + llvm_unreachable("NYI"); + } + + return Result; + } + } + + // OpenCL: If the condition is a vector, we can treat this condition like + // the select function. + if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || + condExpr->getType()->isExtVectorType()) { + llvm_unreachable("NYI"); + } + + if (condExpr->getType()->isVectorType() || + condExpr->getType()->isSveVLSBuiltinType()) { + assert(condExpr->getType()->isVectorType() && "?: op for SVE vector NYI"); + mlir::Value condValue = Visit(condExpr); + mlir::Value lhsValue = Visit(lhsExpr); + mlir::Value rhsValue = Visit(rhsExpr); + return builder.create(loc, condValue, lhsValue, + rhsValue); + } + + // If this is a really simple expression (like x ? 4 : 5), emit this as a + // select instead of as control flow. We can only do this if it is cheap and + // safe to evaluate the LHS and RHS unconditionally. + if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && + isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { + bool lhsIsVoid = false; + auto condV = CGF.evaluateExprAsBool(condExpr); + assert(!UnimplementedFeature::incrementProfileCounter()); + + return builder + .create( + loc, condV, /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = Visit(lhsExpr); + if (!lhs) { + lhs = builder.getNullValue(CGF.VoidTy, loc); + lhsIsVoid = true; + } + builder.create(loc, lhs); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = Visit(rhsExpr); + if (lhsIsVoid) { + assert(!rhs && "lhs and rhs types must match"); + rhs = builder.getNullValue(CGF.VoidTy, loc); + } + builder.create(loc, rhs); + }) + .getResult(); + } + + mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); + CIRGenFunction::ConditionalEvaluation eval(CGF); + SmallVector insertPoints{}; + mlir::Type yieldTy{}; + + auto patchVoidOrThrowSites = [&]() { + if (insertPoints.empty()) + return; + // If both arms are void, so be it. + if (!yieldTy) + yieldTy = CGF.VoidTy; + + // Insert required yields. + for (auto &toInsert : insertPoints) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(toInsert); + + // Block does not return: build empty yield. + if (yieldTy.isa()) { + builder.create(loc); + } else { // Block returns: set null yield value. + mlir::Value op0 = builder.getNullValue(yieldTy, loc); + builder.create(loc, op0); + } + } + }; + + return builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{CGF, loc, + b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto lhs = Visit(lhsExpr); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{CGF, loc, + b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto rhs = Visit(rhsExpr); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to patch + // arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); +} + +mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, + LValue LV, bool isInc, + bool isPre) { + return ScalarExprEmitter(*this, builder) + .buildScalarPrePostIncDec(E, LV, isInc, isPre); +} + +mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { + if (E->getType()->isVectorType()) { + llvm_unreachable("NYI"); + } + + bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); + mlir::Type ResTy = ConvertType(E->getType()); + mlir::Location Loc = CGF.getLoc(E->getExprLoc()); + + // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. + // If we have 1 && X, just emit X without inserting the control flow. + bool LHSCondVal; + if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { + if (LHSCondVal) { // If we have 1 && X, just emit X. + + mlir::Value RHSCond = CGF.evaluateExprAsBool(E->getRHS()); + + if (InstrumentRegions) { + llvm_unreachable("NYI"); + } + // ZExt result to int or bool. + return Builder.createZExtOrBitCast(RHSCond.getLoc(), RHSCond, ResTy); + } + // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. + if (!CGF.ContainsLabel(E->getRHS())) + return Builder.getNullValue(ResTy, Loc); + } + + CIRGenFunction::ConditionalEvaluation eval(CGF); + + mlir::Value LHSCondV = CGF.evaluateExprAsBool(E->getLHS()); + auto ResOp = Builder.create( + Loc, LHSCondV, /*trueBuilder=*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + CIRGenFunction::LexicalScope LexScope{CGF, Loc, B.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + mlir::Value RHSCondV = CGF.evaluateExprAsBool(E->getRHS()); + auto res = B.create( + Loc, RHSCondV, /*trueBuilder*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + CIRGenFunction::LexicalScope lexScope{CGF, Loc, + B.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + auto res = B.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), + true)); + B.create(Loc, res.getRes()); + }, + /*falseBuilder*/ + [&](mlir::OpBuilder &b, mlir::Location Loc) { + CIRGenFunction::LexicalScope lexScope{CGF, Loc, + b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + auto res = b.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), + false)); + b.create(Loc, res.getRes()); + }); + B.create(Loc, res.getResult()); + }, + /*falseBuilder*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + auto res = B.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), false)); + B.create(Loc, res.getRes()); + }); + return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp.getResult(), ResTy); +} + +mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { + if (E->getType()->isVectorType()) { + llvm_unreachable("NYI"); + } + + bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); + mlir::Type ResTy = ConvertType(E->getType()); + mlir::Location Loc = CGF.getLoc(E->getExprLoc()); + + // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. + // If we have 0 || X, just emit X without inserting the control flow. + bool LHSCondVal; + if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { + if (!LHSCondVal) { // If we have 0 || X, just emit X. + + mlir::Value RHSCond = CGF.evaluateExprAsBool(E->getRHS()); + + if (InstrumentRegions) { + llvm_unreachable("NYI"); + } + // ZExt result to int or bool. + return Builder.createZExtOrBitCast(RHSCond.getLoc(), RHSCond, ResTy); + } + // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. + if (!CGF.ContainsLabel(E->getRHS())) { + if (auto intTy = ResTy.dyn_cast()) + return Builder.getConstInt(Loc, intTy, 1); + else + return Builder.getBool(true, Loc); + } + } + + CIRGenFunction::ConditionalEvaluation eval(CGF); + + mlir::Value LHSCondV = CGF.evaluateExprAsBool(E->getLHS()); + auto ResOp = Builder.create( + Loc, LHSCondV, /*trueBuilder=*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + auto res = B.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), true)); + B.create(Loc, res.getRes()); + }, + /*falseBuilder*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + CIRGenFunction::LexicalScope LexScope{CGF, Loc, B.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + mlir::Value RHSCondV = CGF.evaluateExprAsBool(E->getRHS()); + auto res = B.create( + Loc, RHSCondV, /*trueBuilder*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + SmallVector Locs; + if (Loc.isa()) { + Locs.push_back(Loc); + Locs.push_back(Loc); + } else if (Loc.isa()) { + auto fusedLoc = Loc.cast(); + Locs.push_back(fusedLoc.getLocations()[0]); + Locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScope lexScope{CGF, Loc, + B.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + auto res = B.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), + true)); + B.create(Loc, res.getRes()); + }, + /*falseBuilder*/ + [&](mlir::OpBuilder &b, mlir::Location Loc) { + SmallVector Locs; + if (Loc.isa()) { + Locs.push_back(Loc); + Locs.push_back(Loc); + } else if (Loc.isa()) { + auto fusedLoc = Loc.cast(); + Locs.push_back(fusedLoc.getLocations()[0]); + Locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScope lexScope{CGF, Loc, + B.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + auto res = b.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), + false)); + b.create(Loc, res.getRes()); + }); + B.create(Loc, res.getResult()); + }); + + return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp.getResult(), ResTy); +} + +mlir::Value ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { + QualType Ty = VE->getType(); + + if (Ty->isVariablyModifiedType()) + assert(!UnimplementedFeature::variablyModifiedTypeEmission() && "NYI"); + + Address ArgValue = Address::invalid(); + mlir::Value Val = CGF.buildVAArg(VE, ArgValue); + + return Val; +} + +/// Return the size or alignment of the type of argument of the sizeof +/// expression as an integer. +mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( + const UnaryExprOrTypeTraitExpr *E) { + QualType TypeToSize = E->getTypeOfArgument(); + if (E->getKind() == UETT_SizeOf) { + if (const VariableArrayType *VAT = + CGF.getContext().getAsVariableArrayType(TypeToSize)) { + + if (E->isArgumentType()) { + // sizeof(type) - make sure to emit the VLA size. + CGF.buildVariablyModifiedType(TypeToSize); + } else { + // C99 6.5.3.4p2: If the argument is an expression of type + // VLA, it is evaluated. + CGF.buildIgnoredExpr(E->getArgumentExpr()); + } + + auto VlaSize = CGF.getVLASize(VAT); + mlir::Value size = VlaSize.NumElts; + + // Scale the number of non-VLA elements by the non-VLA element size. + CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type); + if (!eltSize.isOne()) + size = Builder.createMul(size, CGF.CGM.getSize(eltSize).getValue()); + + return size; + } + } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { + llvm_unreachable("NYI"); + } + + // If this isn't sizeof(vla), the result must be constant; use the constant + // folding logic so we don't have to duplicate it here. + return Builder.getConstInt(CGF.getLoc(E->getSourceRange()), + E->EvaluateKnownConstInt(CGF.getContext())); +} + +mlir::Value CIRGenFunction::buildCheckedInBoundsGEP( + mlir::Type ElemTy, mlir::Value Ptr, ArrayRef IdxList, + bool SignedIndices, bool IsSubtraction, SourceLocation Loc) { + mlir::Type PtrTy = Ptr.getType(); + assert(IdxList.size() == 1 && "multi-index ptr arithmetic NYI"); + mlir::Value GEPVal = builder.create( + CGM.getLoc(Loc), PtrTy, Ptr, IdxList[0]); + + // If the pointer overflow sanitizer isn't enabled, do nothing. + if (!SanOpts.has(SanitizerKind::PointerOverflow)) + return GEPVal; + + // TODO(cir): the unreachable code below hides a substantial amount of code + // from the original codegen related with pointer overflow sanitizer. + assert(UnimplementedFeature::pointerOverflowSanitizer()); + llvm_unreachable("pointer overflow sanitizer NYI"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp new file mode 100644 index 000000000000..8256bd6db71c --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -0,0 +1,1736 @@ +//===- CIRGenFunction.cpp - Emit CIR from ASTs for a Function -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This coordinates the per-function state used while generating code +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" +#include "CIRGenCXXABI.h" +#include "CIRGenModule.h" +#include "CIRGenOpenMPRuntime.h" +#include "UnimplementedFeatureGuarding.h" + +#include "clang/AST/ASTLambda.h" +#include "clang/AST/ExprObjC.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/DiagnosticCategories.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/FPEnv.h" +#include "clang/Frontend/FrontendDiagnostic.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Support/LogicalResult.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, + bool suppressNewContext) + : CIRGenTypeCache(CGM), CGM{CGM}, builder(builder), + SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()), + ShouldEmitLifetimeMarkers(false) { + if (!suppressNewContext) + CGM.getCXXABI().getMangleContext().startNewFunction(); + EHStack.setCGF(this); + + // TODO(CIR): SetFastMathFlags(CurFPFeatures); +} + +clang::ASTContext &CIRGenFunction::getContext() const { + return CGM.getASTContext(); +} + +mlir::Type CIRGenFunction::ConvertType(QualType T) { + return CGM.getTypes().ConvertType(T); +} + +TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { + type = type.getCanonicalType(); + while (true) { + switch (type->getTypeClass()) { +#define TYPE(name, parent) +#define ABSTRACT_TYPE(name, parent) +#define NON_CANONICAL_TYPE(name, parent) case Type::name: +#define DEPENDENT_TYPE(name, parent) case Type::name: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("non-canonical or dependent type in IR-generation"); + + case Type::ArrayParameter: + llvm_unreachable("NYI"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("undeduced type in IR-generation"); + + // Various scalar types. + case Type::Builtin: + case Type::Pointer: + case Type::BlockPointer: + case Type::LValueReference: + case Type::RValueReference: + case Type::MemberPointer: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::FunctionProto: + case Type::FunctionNoProto: + case Type::Enum: + case Type::ObjCObjectPointer: + case Type::Pipe: + case Type::BitInt: + return TEK_Scalar; + + // Complexes. + case Type::Complex: + return TEK_Complex; + + // Arrays, records, and Objective-C objects. + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::Record: + case Type::ObjCObject: + case Type::ObjCInterface: + return TEK_Aggregate; + + // We operate on atomic values according to their underlying type. + case Type::Atomic: + type = cast(type)->getValueType(); + continue; + } + llvm_unreachable("unknown type kind!"); + } +} + +mlir::Type CIRGenFunction::convertTypeForMem(QualType T) { + return CGM.getTypes().convertTypeForMem(T); +} + +mlir::Type CIRGenFunction::convertType(QualType T) { + return CGM.getTypes().ConvertType(T); +} + +mlir::Location CIRGenFunction::getLoc(SourceLocation SLoc) { + // Some AST nodes might contain invalid source locations (e.g. + // CXXDefaultArgExpr), workaround that to still get something out. + if (SLoc.isValid()) { + const SourceManager &SM = getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(SLoc); + StringRef Filename = PLoc.getFilename(); + return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), + PLoc.getLine(), PLoc.getColumn()); + } else { + // Do our best... + assert(currSrcLoc && "expected to inherit some source location"); + return *currSrcLoc; + } +} + +mlir::Location CIRGenFunction::getLoc(SourceRange SLoc) { + // Some AST nodes might contain invalid source locations (e.g. + // CXXDefaultArgExpr), workaround that to still get something out. + if (SLoc.isValid()) { + mlir::Location B = getLoc(SLoc.getBegin()); + mlir::Location E = getLoc(SLoc.getEnd()); + SmallVector locs = {B, E}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + } else if (currSrcLoc) { + return *currSrcLoc; + } + + // We're brave, but time to give up. + return builder.getUnknownLoc(); +} + +mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) { + SmallVector locs = {lhs, rhs}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); +} + +/// Return true if the statement contains a label in it. If +/// this statement is not executed normally, it not containing a label means +/// that we can just remove the code. +bool CIRGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { + // Null statement, not a label! + if (!S) + return false; + + // If this is a label, we have to emit the code, consider something like: + // if (0) { ... foo: bar(); } goto foo; + // + // TODO: If anyone cared, we could track __label__'s, since we know that you + // can't jump to one from outside their declared region. + if (isa(S)) + return true; + + // If this is a case/default statement, and we haven't seen a switch, we + // have to emit the code. + if (isa(S) && !IgnoreCaseStmts) + return true; + + // If this is a switch statement, we want to ignore cases below it. + if (isa(S)) + IgnoreCaseStmts = true; + + // Scan subexpressions for verboten labels. + for (const Stmt *SubStmt : S->children()) + if (ContainsLabel(SubStmt, IgnoreCaseStmts)) + return true; + + return false; +} + +bool CIRGenFunction::sanitizePerformTypeCheck() const { + return SanOpts.has(SanitizerKind::Null) || + SanOpts.has(SanitizerKind::Alignment) || + SanOpts.has(SanitizerKind::ObjectSize) || + SanOpts.has(SanitizerKind::Vptr); +} + +void CIRGenFunction::buildTypeCheck(TypeCheckKind TCK, + clang::SourceLocation Loc, mlir::Value V, + clang::QualType Type, + clang::CharUnits Alignment, + clang::SanitizerSet SkippedChecks, + std::optional ArraySize) { + if (!sanitizePerformTypeCheck()) + return; + + assert(false && "type check NYI"); +} + +/// If the specified expression does not fold +/// to a constant, or if it does but contains a label, return false. If it +/// constant folds return true and set the folded value. +bool CIRGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, + llvm::APSInt &ResultInt, + bool AllowLabels) { + // FIXME: Rename and handle conversion of other evaluatable things + // to bool. + Expr::EvalResult Result; + if (!Cond->EvaluateAsInt(Result, getContext())) + return false; // Not foldable, not integer or not fully evaluatable. + + llvm::APSInt Int = Result.Val.getInt(); + if (!AllowLabels && ContainsLabel(Cond)) + return false; // Contains a label. + + ResultInt = Int; + return true; +} + +mlir::Type CIRGenFunction::getCIRType(const QualType &type) { + return CGM.getCIRType(type); +} + +/// Determine whether the function F ends with a return stmt. +static bool endsWithReturn(const Decl *F) { + const Stmt *Body = nullptr; + if (auto *FD = dyn_cast_or_null(F)) + Body = FD->getBody(); + else if (auto *OMD = dyn_cast_or_null(F)) + llvm_unreachable("NYI"); + + if (auto *CS = dyn_cast_or_null(Body)) { + auto LastStmt = CS->body_rbegin(); + if (LastStmt != CS->body_rend()) + return isa(*LastStmt); + } + return false; +} + +void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, + CharUnits alignment) { + + if (ty->isVoidType()) { + // Void type; nothing to return. + ReturnValue = Address::invalid(); + + // Count the implicit return. + if (!endsWithReturn(CurFuncDecl)) + ++NumReturnExprs; + } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { + // TODO(CIR): Consider this implementation in CIRtoLLVM + llvm_unreachable("NYI"); + // TODO(CIR): Consider this implementation in CIRtoLLVM + } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca) { + llvm_unreachable("NYI"); + } else { + auto addr = buildAlloca("__retval", ty, loc, alignment); + FnRetAlloca = addr; + ReturnValue = Address(addr, alignment); + + // Tell the epilog emitter to autorelease the result. We do this now so + // that various specialized functions can suppress it during their IR - + // generation + if (getLangOpts().ObjCAutoRefCount) + llvm_unreachable("NYI"); + } +} + +mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, + mlir::Location loc, + CharUnits alignment, + mlir::Value &addr, bool isParam) { + const auto *namedVar = dyn_cast_or_null(var); + assert(namedVar && "Needs a named decl"); + assert(!symbolTable.count(var) && "not supposed to be available just yet"); + + addr = buildAlloca(namedVar->getName(), ty, loc, alignment); + if (isParam) { + auto allocaOp = cast(addr.getDefiningOp()); + allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + } + + symbolTable.insert(var, addr); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::declare(Address addr, const Decl *var, + QualType ty, mlir::Location loc, + CharUnits alignment, + mlir::Value &addrVal, + bool isParam) { + const auto *namedVar = dyn_cast_or_null(var); + assert(namedVar && "Needs a named decl"); + assert(!symbolTable.count(var) && "not supposed to be available just yet"); + + addrVal = addr.getPointer(); + if (isParam) { + auto allocaOp = cast(addrVal.getDefiningOp()); + allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + } + + symbolTable.insert(var, addrVal); + return mlir::success(); +} + +/// All scope related cleanup needed: +/// - Patching up unsolved goto's. +/// - Build all cleanup code and insert yield/returns. +void CIRGenFunction::LexicalScope::cleanup() { + auto &builder = CGF.builder; + auto *localScope = CGF.currLexScope; + + // Handle pending gotos and the solved labels in this scope. + while (!localScope->PendingGotos.empty()) { + auto gotoInfo = localScope->PendingGotos.back(); + // FIXME: Currently only support resolving goto labels inside the + // same lexical ecope. + assert(localScope->SolvedLabels.count(gotoInfo.second) && + "goto across scopes not yet supported"); + + // The goto in this lexical context actually maps to a basic + // block. + auto g = cast(gotoInfo.first); + g.setSuccessor(CGF.LabelMap[gotoInfo.second].getBlock()); + localScope->PendingGotos.pop_back(); + } + localScope->SolvedLabels.clear(); + + auto applyCleanup = [&]() { + if (PerformCleanup) { + // ApplyDebugLocation + assert(!UnimplementedFeature::generateDebugInfo()); + ForceCleanup(); + } + }; + + // Cleanup are done right before codegen resume a scope. This is where + // objects are destroyed. + unsigned curLoc = 0; + for (auto *retBlock : localScope->getRetBlocks()) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(retBlock); + mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; + curLoc++; + (void)buildReturn(retLoc); + } + + auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(InsPt); + + // Leverage and defers to RunCleanupsScope's dtor and scope handling. + applyCleanup(); + + if (localScope->Depth == 0) { + buildImplicitReturn(); + return; + } + + // End of any local scope != function + // Ternary ops have to deal with matching arms for yielding types + // and do return a value, it must do its own cir.yield insertion. + if (!localScope->isTernary()) { + !retVal ? builder.create(localScope->EndLoc) + : builder.create(localScope->EndLoc, retVal); + } + }; + + // If a cleanup block has been created at some point, branch to it + // and set the insertion point to continue at the cleanup block. + // Terminators are then inserted either in the cleanup block or + // inline in this current block. + auto *cleanupBlock = localScope->getCleanupBlock(builder); + if (cleanupBlock) + insertCleanupAndLeave(cleanupBlock); + + // Now deal with any pending block wrap up like implicit end of + // scope. + + // If a terminator is already present in the current block, nothing + // else to do here. + auto *currBlock = builder.getBlock(); + if (currBlock->mightHaveTerminator() && currBlock->getTerminator()) + return; + + // An empty non-entry block has nothing to offer, and since this is + // synthetic, losing information does not affect anything. + bool entryBlock = builder.getInsertionBlock()->isEntryBlock(); + if (!entryBlock && currBlock->empty()) { + currBlock->erase(); + // Remove unused cleanup blocks. + if (cleanupBlock && cleanupBlock->hasNoPredecessors()) + cleanupBlock->erase(); + // FIXME(cir): ideally we should call applyCleanup() before we + // get into this condition and emit the proper cleanup. This is + // needed to get nrvo to interop with dtor logic. + PerformCleanup = false; + return; + } + + // If there's a cleanup block, branch to it, nothing else to do. + if (cleanupBlock) { + builder.create(currBlock->back().getLoc(), cleanupBlock); + return; + } + + // No pre-existent cleanup block, emit cleanup code and yield/return. + insertCleanupAndLeave(currBlock); +} + +mlir::cir::ReturnOp +CIRGenFunction::LexicalScope::buildReturn(mlir::Location loc) { + auto &builder = CGF.getBuilder(); + + // If we are on a coroutine, add the coro_end builtin call. + auto Fn = dyn_cast(CGF.CurFn); + assert(Fn && "other callables NYI"); + if (Fn.getCoroutine()) + CGF.buildCoroEndBuiltinCall( + loc, builder.getNullPtr(builder.getVoidPtrTy(), loc)); + + if (CGF.FnRetCIRTy.has_value()) { + // If there's anything to return, load it first. + auto val = builder.create(loc, *CGF.FnRetCIRTy, *CGF.FnRetAlloca); + return builder.create(loc, llvm::ArrayRef(val.getResult())); + } + return builder.create(loc); +} + +void CIRGenFunction::LexicalScope::buildImplicitReturn() { + auto &builder = CGF.getBuilder(); + auto *localScope = CGF.currLexScope; + + const auto *FD = cast(CGF.CurGD.getDecl()); + + // C++11 [stmt.return]p2: + // Flowing off the end of a function [...] results in undefined behavior + // in a value-returning function. + // C11 6.9.1p12: + // If the '}' that terminates a function is reached, and the value of the + // function call is used by the caller, the behavior is undefined. + if (CGF.getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && + !CGF.SawAsmBlock && !FD->getReturnType()->isVoidType() && + builder.getInsertionBlock()) { + bool shouldEmitUnreachable = CGF.CGM.getCodeGenOpts().StrictReturn || + !CGF.CGM.MayDropFunctionReturn( + FD->getASTContext(), FD->getReturnType()); + + if (CGF.SanOpts.has(SanitizerKind::Return)) { + assert(!UnimplementedFeature::sanitizerReturn()); + llvm_unreachable("NYI"); + } else if (shouldEmitUnreachable) { + if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { + builder.create(localScope->EndLoc); + builder.clearInsertionPoint(); + return; + } + } + + if (CGF.SanOpts.has(SanitizerKind::Return) || shouldEmitUnreachable) { + builder.create(localScope->EndLoc); + builder.clearInsertionPoint(); + return; + } + } + + (void)buildReturn(localScope->EndLoc); +} + +void CIRGenFunction::finishFunction(SourceLocation EndLoc) { + // CIRGen doesn't use a BreakContinueStack or evaluates OnlySimpleReturnStmts. + + // Usually the return expression is evaluated before the cleanup + // code. If the function contains only a simple return statement, + // such as a constant, the location before the cleanup code becomes + // the last useful breakpoint in the function, because the simple + // return expression will be evaluated after the cleanup code. To be + // safe, set the debug location for cleanup code to the location of + // the return statement. Otherwise the cleanup code should be at the + // end of the function's lexical scope. + // + // If there are multiple branches to the return block, the branch + // instructions will get the location of the return statements and + // all will be fine. + if (auto *DI = getDebugInfo()) + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + // Pop any cleanups that might have been associated with the + // parameters. Do this in whatever block we're currently in; it's + // important to do this before we enter the return block or return + // edges will be *really* confused. + bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; + if (HasCleanups) { + // Make sure the line table doesn't jump back into the body for + // the ret after it's been at EndLoc. + if (auto *DI = getDebugInfo()) + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + // FIXME(cir): vla.c test currently crashes here. + // PopCleanupBlocks(PrologueCleanupDepth); + } + + // Emit function epilog (to return). + + // Original LLVM codegen does EmitReturnBlock() here, CIRGen handles + // this as part of LexicalScope instead, given CIR might have multiple + // blocks with `cir.return`. + if (ShouldInstrumentFunction()) { + assert(!UnimplementedFeature::shouldInstrumentFunction() && "NYI"); + } + + // Emit debug descriptor for function end. + if (auto *DI = getDebugInfo()) + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + // Reset the debug location to that of the simple 'return' expression, if any + // rather than that of the end of the function's scope '}'. + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + assert(!UnimplementedFeature::emitFunctionEpilog() && "NYI"); + assert(!UnimplementedFeature::emitEndEHSpec() && "NYI"); + + // FIXME(cir): vla.c test currently crashes here. + // assert(EHStack.empty() && "did not remove all scopes from cleanup stack!"); + + // If someone did an indirect goto, emit the indirect goto block at the end of + // the function. + assert(!UnimplementedFeature::indirectBranch() && "NYI"); + + // If some of our locals escaped, insert a call to llvm.localescape in the + // entry block. + assert(!UnimplementedFeature::escapedLocals() && "NYI"); + + // If someone took the address of a label but never did an indirect goto, we + // made a zero entry PHI node, which is illegal, zap it now. + assert(!UnimplementedFeature::indirectBranch() && "NYI"); + + // CIRGen doesn't need to emit EHResumeBlock, TerminateLandingPad, + // TerminateHandler, UnreachableBlock, TerminateFunclets, NormalCleanupDest + // here because the basic blocks aren't shared. + + assert(!UnimplementedFeature::emitDeclMetadata() && "NYI"); + assert(!UnimplementedFeature::deferredReplacements() && "NYI"); + + // Add the min-legal-vector-width attribute. This contains the max width from: + // 1. min-vector-width attribute used in the source program. + // 2. Any builtins used that have a vector width specified. + // 3. Values passed in and out of inline assembly. + // 4. Width of vector arguments and return types for this function. + // 5. Width of vector arguments and return types for functions called by + // this function. + assert(!UnimplementedFeature::minLegalVectorWidthAttr() && "NYI"); + + // Add vscale_range attribute if appropriate. + assert(!UnimplementedFeature::vscaleRangeAttr() && "NYI"); + + // In traditional LLVM codegen, if clang generated an unreachable return + // block, it'd be deleted now. Same for unused ret allocas from ReturnValue +} + +mlir::cir::FuncOp +CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, + const CIRGenFunctionInfo &FnInfo) { + assert(Fn && "generating code for a null function"); + const auto FD = cast(GD.getDecl()); + CurGD = GD; + + FnRetQualTy = FD->getReturnType(); + if (!FnRetQualTy->isVoidType()) + FnRetCIRTy = getCIRType(FnRetQualTy); + + FunctionArgList Args; + QualType ResTy = buildFunctionArgList(GD, Args); + + if (FD->isInlineBuiltinDeclaration()) { + llvm_unreachable("NYI"); + } else { + // Detect the unusual situation where an inline version is shadowed by a + // non-inline version. In that case we should pick the external one + // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way + // to detect that situation before we reach codegen, so do some late + // replacement. + for (const auto *PD = FD->getPreviousDecl(); PD; + PD = PD->getPreviousDecl()) { + if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) { + llvm_unreachable("NYI"); + } + } + } + + // Check if we should generate debug info for this function. + if (FD->hasAttr()) { + llvm_unreachable("NYI"); + } + + // The function might not have a body if we're generating thunks for a + // function declaration. + SourceRange BodyRange; + if (Stmt *Body = FD->getBody()) + BodyRange = Body->getSourceRange(); + else + BodyRange = FD->getLocation(); + // TODO: CurEHLocation + + // Use the location of the start of the function to determine where the + // function definition is located. By default we use the location of the + // declaration as the location for the subprogram. A function may lack a + // declaration in the source code if it is created by code gen. (examples: + // _GLOBAL__I_a, __cxx_global_array_dtor, thunk). + SourceLocation Loc = FD->getLocation(); + + // If this is a function specialization then use the pattern body as the + // location for the function. + if (const auto *SpecDecl = FD->getTemplateInstantiationPattern()) + if (SpecDecl->hasBody(SpecDecl)) + Loc = SpecDecl->getLocation(); + + Stmt *Body = FD->getBody(); + + if (Body) { + // LLVM codegen: Coroutines always emit lifetime markers + // Hide this under request for lifetime emission so that we can write + // tests when the time comes, but CIR should be intrinsically scope + // accurate, so no need to tie coroutines to such markers. + if (isa(Body)) + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + + // Initialize helper which will detect jumps which can cause invalid + // lifetime markers. + if (ShouldEmitLifetimeMarkers) + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + } + + // Create a scope in the symbol table to hold variable declarations. + SymTableScopeTy varScope(symbolTable); + // Compiler synthetized functions might have invalid slocs... + auto bSrcLoc = FD->getBody()->getBeginLoc(); + auto eSrcLoc = FD->getBody()->getEndLoc(); + auto unknownLoc = builder.getUnknownLoc(); + + auto FnBeginLoc = bSrcLoc.isValid() ? getLoc(bSrcLoc) : unknownLoc; + auto FnEndLoc = eSrcLoc.isValid() ? getLoc(eSrcLoc) : unknownLoc; + const auto fusedLoc = + mlir::FusedLoc::get(builder.getContext(), {FnBeginLoc, FnEndLoc}); + SourceLocRAIIObject fnLoc{*this, Loc.isValid() ? getLoc(Loc) : unknownLoc}; + + assert(Fn.isDeclaration() && "Function already has body?"); + mlir::Block *EntryBB = Fn.addEntryBlock(); + builder.setInsertionPointToStart(EntryBB); + + { + LexicalScope lexScope{*this, fusedLoc, EntryBB}; + + // Emit the standard function prologue. + StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); + + // Initialize lexical scope information. + + // Save parameters for coroutine function. + if (Body && isa_and_nonnull(Body)) + llvm::append_range(FnArgs, FD->parameters()); + + // Generate the body of the function. + // TODO: PGO.assignRegionCounters + if (isa(FD)) + buildDestructorBody(Args); + else if (isa(FD)) + buildConstructorBody(Args); + else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && + FD->hasAttr()) + llvm_unreachable("NYI"); + else if (isa(FD) && + cast(FD)->isLambdaStaticInvoker()) { + // The lambda static invoker function is special, because it forwards or + // clones the body of the function call operator (but is actually + // static). + buildLambdaStaticInvokeBody(cast(FD)); + } else if (FD->isDefaulted() && isa(FD) && + (cast(FD)->isCopyAssignmentOperator() || + cast(FD)->isMoveAssignmentOperator())) { + // Implicit copy-assignment gets the same special treatment as implicit + // copy-constructors. + buildImplicitAssignmentOperatorBody(Args); + } else if (Body) { + if (mlir::failed(buildFunctionBody(Body))) { + Fn.erase(); + return nullptr; + } + } else + llvm_unreachable("no definition for emitted function"); + + assert(builder.getInsertionBlock() && "Should be valid"); + } + + if (mlir::failed(Fn.verifyBody())) + return nullptr; + + // Emit the standard function epilogue. + finishFunction(BodyRange.getEnd()); + + // If we haven't marked the function nothrow through other means, do a quick + // pass now to see if we can. + assert(!UnimplementedFeature::tryMarkNoThrow()); + + return Fn; +} + +mlir::Value CIRGenFunction::createLoad(const VarDecl *VD, const char *Name) { + auto addr = GetAddrOfLocalVar(VD); + return builder.create(getLoc(VD->getLocation()), + addr.getElementType(), addr.getPointer()); +} + +static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { + auto *CD = llvm::dyn_cast(D); + if (!(CD && CD->isCopyOrMoveConstructor()) && + !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) + return false; + + // We can emit a memcpy for a trivial copy or move constructor/assignment + if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) + return true; + + if (D->getParent()->isUnion() && D->isDefaulted()) + return true; + + return false; +} + +void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, + bool ForVirtualBase, + bool Delegating, + AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E) { + CallArgList Args; + Address This = ThisAVS.getAddress(); + LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace(); + QualType ThisType = D->getThisType(); + LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace(); + mlir::Value ThisPtr = This.getPointer(); + + assert(SlotAS == ThisAS && "This edge case NYI"); + + Args.add(RValue::get(ThisPtr), D->getThisType()); + + // In LLVM Codegen: If this is a trivial constructor, just emit what's needed. + // If this is a union copy constructor, we must emit a memcpy, because the AST + // does not model that copy. + if (isMemcpyEquivalentSpecialMember(D)) { + assert(!UnimplementedFeature::isMemcpyEquivalentSpecialMember()); + } + + const FunctionProtoType *FPT = D->getType()->castAs(); + EvaluationOrder Order = E->isListInitialization() + ? EvaluationOrder::ForceLeftToRight + : EvaluationOrder::Default; + + buildCallArgs(Args, FPT, E->arguments(), E->getConstructor(), + /*ParamsToSkip*/ 0, Order); + + buildCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args, + ThisAVS.mayOverlap(), E->getExprLoc(), + ThisAVS.isSanitizerChecked()); +} + +void CIRGenFunction::buildCXXConstructorCall( + const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, + bool Delegating, Address This, CallArgList &Args, + AggValueSlot::Overlap_t Overlap, SourceLocation Loc, + bool NewPointerIsChecked) { + + const auto *ClassDecl = D->getParent(); + + if (!NewPointerIsChecked) + buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), + getContext().getRecordType(ClassDecl), CharUnits::Zero()); + + // If this is a call to a trivial default constructor: + // In LLVM: do nothing. + // In CIR: emit as a regular call, other later passes should lower the + // ctor call into trivial initialization. + assert(!UnimplementedFeature::isTrivialAndisDefaultConstructor()); + + if (isMemcpyEquivalentSpecialMember(D)) { + assert(!UnimplementedFeature::isMemcpyEquivalentSpecialMember()); + } + + bool PassPrototypeArgs = true; + + assert(!D->getInheritedConstructor() && "inheritance NYI"); + + // Insert any ABI-specific implicit constructor arguments. + CIRGenCXXABI::AddedStructorArgCounts ExtraArgs = + CGM.getCXXABI().addImplicitConstructorArgs(*this, D, Type, ForVirtualBase, + Delegating, Args); + + // Emit the call. + auto CalleePtr = CGM.getAddrOfCXXStructor(GlobalDecl(D, Type)); + const CIRGenFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall( + Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); + CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); + mlir::cir::CIRCallOpInterface C; + buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); + + assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || + ClassDecl->isDynamicClass() || Type == Ctor_Base || + !CGM.getCodeGenOpts().StrictVTablePointers && + "vtable assumption loads NYI"); +} + +void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { + // TODO: EmitAsanPrologueOrEpilogue(true); + const auto *Ctor = cast(CurGD.getDecl()); + auto CtorType = CurGD.getCtorType(); + + assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || + CtorType == Ctor_Complete) && + "can only generate complete ctor for this ABI"); + + // Before we go any further, try the complete->base constructor delegation + // optimization. + if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && + CGM.getTarget().getCXXABI().hasConstructorVariants()) { + buildDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getEndLoc()); + return; + } + + const FunctionDecl *Definition = nullptr; + Stmt *Body = Ctor->getBody(Definition); + assert(Definition == Ctor && "emitting wrong constructor body"); + + // Enter the function-try-block before the constructor prologue if + // applicable. + bool IsTryBody = (Body && isa(Body)); + if (IsTryBody) + llvm_unreachable("NYI"); + + // TODO: incrementProfileCounter + + // TODO: RunClenaupCcope RunCleanups(*this); + + // TODO: in restricted cases, we can emit the vbase initializers of a + // complete ctor and then delegate to the base ctor. + + // Emit the constructor prologue, i.e. the base and member initializers. + buildCtorPrologue(Ctor, CtorType, Args); + + // Emit the body of the statement. + if (IsTryBody) + llvm_unreachable("NYI"); + else { + // TODO: propagate this result via mlir::logical result. Just unreachable + // now just to have it handled. + if (mlir::failed(buildStmt(Body, true))) + llvm_unreachable("NYI"); + } + + // Emit any cleanup blocks associated with the member or base initializers, + // which inlcudes (along the exceptional path) the destructors for those + // members and bases that were fully constructed. + /// TODO: RunCleanups.ForceCleanup(); + + if (IsTryBody) + llvm_unreachable("NYI"); +} + +/// Given a value of type T* that may not be to a complete object, construct +/// an l-vlaue withi the natural pointee alignment of T. +LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Value V, + QualType T) { + // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps + // assert on the result type first. + LValueBaseInfo BaseInfo; + CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, + /* for PointeeType= */ true); + return makeAddrLValue(Address(V, Align), T, BaseInfo); +} + +LValue CIRGenFunction::MakeNaturalAlignAddrLValue(mlir::Value V, QualType T) { + LValueBaseInfo BaseInfo; + assert(!UnimplementedFeature::tbaa()); + CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo); + Address Addr(V, getTypes().convertTypeForMem(T), Alignment); + return LValue::makeAddr(Addr, T, getContext(), BaseInfo); +} + +// Map the LangOption for exception behavior into the corresponding enum in +// the IR. +cir::fp::ExceptionBehavior +ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) { + switch (Kind) { + case LangOptions::FPE_Ignore: + return cir::fp::ebIgnore; + case LangOptions::FPE_MayTrap: + return cir::fp::ebMayTrap; + case LangOptions::FPE_Strict: + return cir::fp::ebStrict; + default: + llvm_unreachable("Unsupported FP Exception Behavior"); + } +} + +void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, + mlir::cir::FuncOp Fn, + const CIRGenFunctionInfo &FnInfo, + const FunctionArgList &Args, + SourceLocation Loc, + SourceLocation StartLoc) { + assert(!CurFn && + "Do not use a CIRGenFunction object for more than one function"); + + const auto *D = GD.getDecl(); + + DidCallStackSave = false; + CurCodeDecl = D; + const auto *FD = dyn_cast_or_null(D); + if (FD && FD->usesSEHTry()) + CurSEHParent = GD; + CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); + FnRetTy = RetTy; + CurFn = Fn; + CurFnInfo = &FnInfo; + + // If this function is ignored for any of the enabled sanitizers, disable + // the sanitizer for the function. + do { +#define SANITIZER(NAME, ID) \ + if (SanOpts.empty()) \ + break; \ + if (SanOpts.has(SanitizerKind::ID)) \ + if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \ + SanOpts.set(SanitizerKind::ID, false); + +#include "clang/Basic/Sanitizers.def" +#undef SANITIZER + } while (0); + + if (D) { + bool NoSanitizeCoverage = false; + (void)NoSanitizeCoverage; + + for (auto Attr : D->specific_attrs()) { + (void)Attr; + llvm_unreachable("NYI"); + } + + // SanitizeCoverage is not handled by SanOpts + if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage()) + llvm_unreachable("NYI"); + } + + // Apply sanitizer attributes to the function. + if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress | + SanitizerKind::HWAddress | + SanitizerKind::KernelHWAddress | SanitizerKind::MemTag | + SanitizerKind::Thread | SanitizerKind::Memory | + SanitizerKind::KernelMemory | SanitizerKind::SafeStack | + SanitizerKind::ShadowCallStack | SanitizerKind::Fuzzer | + SanitizerKind::FuzzerNoLink | + SanitizerKind::CFIUnrelatedCast | SanitizerKind::Null)) + llvm_unreachable("NYI"); + + // TODO: XRay + // TODO: PGO + + unsigned Count, Offset; + if (const auto *Attr = + D ? D->getAttr() : nullptr) { + llvm_unreachable("NYI"); + } else { + Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount; + Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset; + } + if (Count && Offset <= Count) { + llvm_unreachable("NYI"); + } + + // Add no-jump-tables value. + if (CGM.getCodeGenOpts().NoUseJumpTables) + llvm_unreachable("NYI"); + + // Add no-inline-line-tables value. + if (CGM.getCodeGenOpts().NoInlineLineTables) + llvm_unreachable("NYI"); + + // Add profile-sample-accurate value. + if (CGM.getCodeGenOpts().ProfileSampleAccurate) + llvm_unreachable("NYI"); + + if (!CGM.getCodeGenOpts().SampleProfileFile.empty()) + llvm_unreachable("NYI"); + + if (D && D->hasAttr()) + llvm_unreachable("NYI"); + + if (D && D->hasAttr()) + llvm_unreachable("NYI"); + + if (FD && getLangOpts().OpenCL) { + llvm_unreachable("NYI"); + } + + // If we are checking function types, emit a function type signature as + // prologue data. + if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { + llvm_unreachable("NYI"); + } + + // If we're checking nullability, we need to know whether we can check the + // return value. Initialize the falg to 'true' and refine it in + // buildParmDecl. + if (SanOpts.has(SanitizerKind::NullabilityReturn)) { + llvm_unreachable("NYI"); + } + + // If we're in C++ mode and the function name is "main", it is guaranteed to + // be norecurse by the standard (3.6.1.3 "The function main shall not be + // used within a program"). + // + // OpenCL C 2.0 v2.2-11 s6.9.i: + // Recursion is not supported. + // + // SYCL v1.2.1 s3.10: + // kernels cannot include RTTI information, exception cases, recursive + // code, virtual functions or make use of C++ libraries that are not + // compiled for the device. + if (FD && + ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL || + getLangOpts().SYCLIsDevice | + (getLangOpts().CUDA && FD->hasAttr()))) + ; // TODO: support norecurse attr + + llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode(); + cir::fp::ExceptionBehavior FPExceptionBehavior = + ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode()); + builder.setDefaultConstrainedRounding(RM); + builder.setDefaultConstrainedExcept(FPExceptionBehavior); + if ((FD && (FD->UsesFPIntrin() || FD->hasAttr())) || + (!FD && (FPExceptionBehavior != cir::fp::ebIgnore || + RM != llvm::RoundingMode::NearestTiesToEven))) { + llvm_unreachable("NYI"); + } + + // TODO: stackrealign attr + + mlir::Block *EntryBB = &Fn.getBlocks().front(); + + // TODO: allocapt insertion? probably don't need for CIR + + // TODO: return value checking + + if (getDebugInfo()) { + llvm_unreachable("NYI"); + } + + if (ShouldInstrumentFunction()) { + llvm_unreachable("NYI"); + } + + // Since emitting the mcount call here impacts optimizations such as + // function inlining, we just add an attribute to insert a mcount call in + // backend. The attribute "counting-function" is set to mcount function name + // which is architecture dependent. + if (CGM.getCodeGenOpts().InstrumentForProfiling) { + llvm_unreachable("NYI"); + } + + if (CGM.getCodeGenOpts().PackedStack) { + llvm_unreachable("NYI"); + } + + if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX) { + llvm_unreachable("NYI"); + } + + assert(!UnimplementedFeature::emitStartEHSpec() && "NYI"); + // FIXME(cir): vla.c test currently crashes here. + // PrologueCleanupDepth = EHStack.stable_begin(); + + if (getLangOpts().OpenMP && CurCodeDecl) + CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); + + // TODO: buildFunctionProlog + + { + // Set the insertion point in the builder to the beginning of the + // function body, it will be used throughout the codegen to create + // operations in this function. + builder.setInsertionPointToStart(EntryBB); + + // TODO: this should live in `buildFunctionProlog + // Declare all the function arguments in the symbol table. + for (const auto nameValue : llvm::zip(Args, EntryBB->getArguments())) { + auto *paramVar = std::get<0>(nameValue); + auto paramVal = std::get<1>(nameValue); + auto alignment = getContext().getDeclAlign(paramVar); + auto paramLoc = getLoc(paramVar->getSourceRange()); + paramVal.setLoc(paramLoc); + + mlir::Value addr; + if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, + addr, true /*param*/))) + return; + + auto address = Address(addr, alignment); + setAddrOfLocalVar(paramVar, address); + + // Location of the store to the param storage tracked as beginning of + // the function body. + auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); + builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addr); + } + assert(builder.getInsertionBlock() && "Should be valid"); + + auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); + + // When the current function is not void, create an address to store the + // result value. + if (FnRetCIRTy.has_value()) + buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, + CGM.getNaturalTypeAlignment(FnRetQualTy)); + } + + if (D && isa(D) && cast(D)->isInstance()) { + CGM.getCXXABI().buildInstanceFunctionProlog(*this); + + const auto *MD = cast(D); + if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { + // We're in a lambda. + auto Fn = dyn_cast(CurFn); + assert(Fn && "other callables NYI"); + Fn.setLambdaAttr(mlir::UnitAttr::get(builder.getContext())); + + // Figure out the captures. + MD->getParent()->getCaptureFields(LambdaCaptureFields, + LambdaThisCaptureField); + if (LambdaThisCaptureField) { + llvm_unreachable("NYI"); + } + for (auto *FD : MD->getParent()->fields()) { + if (FD->hasCapturedVLAType()) { + llvm_unreachable("NYI"); + } + } + + } else { + // Not in a lambda; just use 'this' from the method. + // FIXME: Should we generate a new load for each use of 'this'? The fast + // register allocator would be happier... + CXXThisValue = CXXABIThisValue; + } + + // Check the 'this' pointer once per function, if it's available + if (CXXABIThisValue) { + SanitizerSet SkippedChecks; + SkippedChecks.set(SanitizerKind::ObjectSize, true); + QualType ThisTy = MD->getThisType(); + (void)ThisTy; + + // If this is the call operator of a lambda with no capture-default, it + // may have a staic invoker function, which may call this operator with + // a null 'this' pointer. + if (isLambdaCallOperator(MD) && + MD->getParent()->getLambdaCaptureDefault() == LCD_None) + SkippedChecks.set(SanitizerKind::Null, true); + + assert(!UnimplementedFeature::buildTypeCheck() && "NYI"); + } + } + + // If any of the arguments have a variably modified type, make sure to emit + // the type size. + for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; + ++i) { + const VarDecl *VD = *i; + + // Dig out the type as written from ParmVarDecls; it's unclear whether the + // standard (C99 6.9.1p10) requires this, but we're following the + // precedent set by gcc. + QualType Ty; + if (const auto *PVD = dyn_cast(VD)) + Ty = PVD->getOriginalType(); + else + Ty = VD->getType(); + + if (Ty->isVariablyModifiedType()) + buildVariablyModifiedType(Ty); + } + // Emit a location at the end of the prologue. + if (getDebugInfo()) + llvm_unreachable("NYI"); + + // TODO: Do we need to handle this in two places like we do with + // target-features/target-cpu? + if (CurFuncDecl) + if (const auto *VecWidth = CurFuncDecl->getAttr()) + llvm_unreachable("NYI"); +} + +/// Return true if the current function should be instrumented with +/// __cyg_profile_func_* calls +bool CIRGenFunction::ShouldInstrumentFunction() { + if (!CGM.getCodeGenOpts().InstrumentFunctions && + !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && + !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) + return false; + + llvm_unreachable("NYI"); +} + +mlir::LogicalResult CIRGenFunction::buildFunctionBody(const clang::Stmt *Body) { + // TODO: incrementProfileCounter(Body); + + // We start with function level scope for variables. + SymTableScopeTy varScope(symbolTable); + + auto result = mlir::LogicalResult::success(); + if (const CompoundStmt *S = dyn_cast(Body)) + buildCompoundStmtWithoutScope(*S); + else + result = buildStmt(Body, /*useCurrentScope*/ true); + + // This is checked after emitting the function body so we know if there are + // any permitted infinite loops. + // TODO: if (checkIfFunctionMustProgress()) + // CurFn->addFnAttr(llvm::Attribute::MustProgress); + return result; +} + +clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, + FunctionArgList &Args) { + const auto *FD = cast(GD.getDecl()); + QualType ResTy = FD->getReturnType(); + + const auto *MD = dyn_cast(FD); + if (MD && MD->isInstance()) { + if (CGM.getCXXABI().HasThisReturn(GD)) + llvm_unreachable("NYI"); + else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) + llvm_unreachable("NYI"); + CGM.getCXXABI().buildThisParam(*this, Args); + } + + // The base version of an inheriting constructor whose constructed base is a + // virtual base is not passed any arguments (because it doesn't actually + // call the inherited constructor). + bool PassedParams = true; + if (const auto *CD = dyn_cast(FD)) + if (auto Inherited = CD->getInheritedConstructor()) + PassedParams = + getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); + + if (PassedParams) { + for (auto *Param : FD->parameters()) { + Args.push_back(Param); + if (!Param->hasAttr()) + continue; + + auto *Implicit = ImplicitParamDecl::Create( + getContext(), Param->getDeclContext(), Param->getLocation(), + /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other); + SizeArguments[Param] = Implicit; + Args.push_back(Implicit); + } + } + + if (MD && (isa(MD) || isa(MD))) + CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); + + return ResTy; +} + +static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) { + SmallString<256> Buffer; + llvm::raw_svector_ostream Out(Buffer); + Out << name << cnt; + return std::string(Out.str()); +} + +std::string CIRGenFunction::getCounterAggTmpAsString() { + return getVersionedTmpName("agg.tmp", CounterAggTmp++); +} + +std::string CIRGenFunction::getCounterRefTmpAsString() { + return getVersionedTmpName("ref.tmp", CounterRefTmp++); +} + +void CIRGenFunction::buildNullInitialization(mlir::Location loc, + Address DestPtr, QualType Ty) { + // Ignore empty classes in C++. + if (getLangOpts().CPlusPlus) { + if (const RecordType *RT = Ty->getAs()) { + if (cast(RT->getDecl())->isEmpty()) + return; + } + } + + // Cast the dest ptr to the appropriate i8 pointer type. + if (builder.isInt8Ty(DestPtr.getElementType())) { + llvm_unreachable("NYI"); + } + + // Get size and alignment info for this aggregate. + CharUnits size = getContext().getTypeSizeInChars(Ty); + [[maybe_unused]] mlir::Attribute SizeVal{}; + [[maybe_unused]] const VariableArrayType *vla = nullptr; + + // Don't bother emitting a zero-byte memset. + if (size.isZero()) { + // But note that getTypeInfo returns 0 for a VLA. + if (const VariableArrayType *vlaType = dyn_cast_or_null( + getContext().getAsArrayType(Ty))) { + llvm_unreachable("NYI"); + } else { + return; + } + } else { + SizeVal = CGM.getSize(size); + } + + // If the type contains a pointer to data member we can't memset it to zero. + // Instead, create a null constant and copy it to the destination. + // TODO: there are other patterns besides zero that we can usefully memset, + // like -1, which happens to be the pattern used by member-pointers. + if (!CGM.getTypes().isZeroInitializable(Ty)) { + llvm_unreachable("NYI"); + } + + // In LLVM Codegen: otherwise, just memset the whole thing to zero using + // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the + // respective address. + // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); + builder.createStore(loc, builder.getZero(loc, getTypes().ConvertType(Ty)), + DestPtr); +} + +CIRGenFunction::CIRGenFPOptionsRAII::CIRGenFPOptionsRAII(CIRGenFunction &CGF, + const clang::Expr *E) + : CGF(CGF) { + ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts())); +} + +CIRGenFunction::CIRGenFPOptionsRAII::CIRGenFPOptionsRAII(CIRGenFunction &CGF, + FPOptions FPFeatures) + : CGF(CGF) { + ConstructorHelper(FPFeatures); +} + +void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper( + FPOptions FPFeatures) { + OldFPFeatures = CGF.CurFPFeatures; + CGF.CurFPFeatures = FPFeatures; + + OldExcept = CGF.builder.getDefaultConstrainedExcept(); + OldRounding = CGF.builder.getDefaultConstrainedRounding(); + + if (OldFPFeatures == FPFeatures) + return; + + // TODO(cir): create guard to restore fast math configurations. + assert(!UnimplementedFeature::fastMathGuard()); + + llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode(); + // TODO(cir): override rounding behaviour once FM configs are guarded. + auto NewExceptionBehavior = + ToConstrainedExceptMD(static_cast( + FPFeatures.getExceptionMode())); + // TODO(cir): override exception behaviour once FM configs are guarded. + + // TODO(cir): override FP flags once FM configs are guarded. + assert(!UnimplementedFeature::fastMathFlags()); + + assert((CGF.CurFuncDecl == nullptr || CGF.builder.getIsFPConstrained() || + isa(CGF.CurFuncDecl) || + isa(CGF.CurFuncDecl) || + (NewExceptionBehavior == fp::ebIgnore && + NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) && + "FPConstrained should be enabled on entire function"); + + // TODO(cir): mark CIR function with fast math attributes. + assert(!UnimplementedFeature::fastMathFuncAttributes()); +} + +CIRGenFunction::CIRGenFPOptionsRAII::~CIRGenFPOptionsRAII() { + CGF.CurFPFeatures = OldFPFeatures; + CGF.builder.setDefaultConstrainedExcept(OldExcept); + CGF.builder.setDefaultConstrainedRounding(OldRounding); +} + +// TODO(cir): should be shared with LLVM codegen. +bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *CE) { + const Expr *E = CE->getSubExpr(); + + if (CE->getCastKind() == CK_UncheckedDerivedToBase) + return false; + + if (isa(E->IgnoreParens())) { + // We always assume that 'this' is never null. + return false; + } + + if (const ImplicitCastExpr *ICE = dyn_cast(CE)) { + // And that glvalue casts are never null. + if (ICE->isGLValue()) + return false; + } + + return true; +} + +void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, + const APValue &Init) { + assert(!UnimplementedFeature::generateDebugInfo()); +} + +Address CIRGenFunction::buildVAListRef(const Expr *E) { + if (getContext().getBuiltinVaListType()->isArrayType()) + return buildPointerWithAlignment(E); + return buildLValue(E).getAddress(); +} + +// Emits an error if we don't have a valid set of target features for the +// called function. +void CIRGenFunction::checkTargetFeatures(const CallExpr *E, + const FunctionDecl *TargetDecl) { + return checkTargetFeatures(E->getBeginLoc(), TargetDecl); +} + +// Emits an error if we don't have a valid set of target features for the +// called function. +void CIRGenFunction::checkTargetFeatures(SourceLocation Loc, + const FunctionDecl *TargetDecl) { + // Early exit if this is an indirect call. + if (!TargetDecl) + return; + + // Get the current enclosing function if it exists. If it doesn't + // we can't check the target features anyhow. + const FunctionDecl *FD = dyn_cast_or_null(CurCodeDecl); + if (!FD) + return; + + // Grab the required features for the call. For a builtin this is listed in + // the td file with the default cpu, for an always_inline function this is any + // listed cpu and any listed features. + unsigned BuiltinID = TargetDecl->getBuiltinID(); + std::string MissingFeature; + llvm::StringMap CallerFeatureMap; + CGM.getASTContext().getFunctionFeatureMap(CallerFeatureMap, FD); + if (BuiltinID) { + StringRef FeatureList( + getContext().BuiltinInfo.getRequiredFeatures(BuiltinID)); + if (!Builtin::evaluateRequiredTargetFeatures(FeatureList, + CallerFeatureMap)) { + CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature) + << TargetDecl->getDeclName() << FeatureList; + } + } else if (!TargetDecl->isMultiVersion() && + TargetDecl->hasAttr()) { + // Get the required features for the callee. + + const TargetAttr *TD = TargetDecl->getAttr(); + ParsedTargetAttr ParsedAttr = getContext().filterFunctionTargetAttrs(TD); + + SmallVector ReqFeatures; + llvm::StringMap CalleeFeatureMap; + getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); + + for (const auto &F : ParsedAttr.Features) { + if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1))) + ReqFeatures.push_back(StringRef(F).substr(1)); + } + + for (const auto &F : CalleeFeatureMap) { + // Only positive features are "required". + if (F.getValue()) + ReqFeatures.push_back(F.getKey()); + } + if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) { + if (!CallerFeatureMap.lookup(Feature)) { + MissingFeature = Feature.str(); + return false; + } + return true; + })) + CGM.getDiags().Report(Loc, diag::err_function_needs_feature) + << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; + } else if (!FD->isMultiVersion() && FD->hasAttr()) { + llvm::StringMap CalleeFeatureMap; + getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); + + for (const auto &F : CalleeFeatureMap) { + if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) || + !CallerFeatureMap.find(F.getKey())->getValue())) + CGM.getDiags().Report(Loc, diag::err_function_needs_feature) + << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey(); + } + } +} + +CIRGenFunction::VlaSizePair CIRGenFunction::getVLASize(QualType type) { + const VariableArrayType *vla = + CGM.getASTContext().getAsVariableArrayType(type); + assert(vla && "type was not a variable array type!"); + return getVLASize(vla); +} + +CIRGenFunction::VlaSizePair +CIRGenFunction::getVLASize(const VariableArrayType *type) { + // The number of elements so far; always size_t. + mlir::Value numElements; + + QualType elementType; + do { + elementType = type->getElementType(); + mlir::Value vlaSize = VLASizeMap[type->getSizeExpr()]; + assert(vlaSize && "no size for VLA!"); + assert(vlaSize.getType() == SizeTy); + + if (!numElements) { + numElements = vlaSize; + } else { + // It's undefined behavior if this wraps around, so mark it that way. + // FIXME: Teach -fsanitize=undefined to trap this. + + numElements = builder.createMul(numElements, vlaSize); + } + } while ((type = getContext().getAsVariableArrayType(elementType))); + + assert(numElements && "Undefined elements number"); + return {numElements, elementType}; +} + +// TODO(cir): most part of this function can be shared between CIRGen +// and traditional LLVM codegen +void CIRGenFunction::buildVariablyModifiedType(QualType type) { + assert(type->isVariablyModifiedType() && + "Must pass variably modified type to EmitVLASizes!"); + + // We're going to walk down into the type and look for VLA + // expressions. + do { + assert(type->isVariablyModifiedType()); + + const Type *ty = type.getTypePtr(); + switch (ty->getTypeClass()) { + case clang::Type::CountAttributed: + case clang::Type::PackIndexing: + case clang::Type::ArrayParameter: + llvm_unreachable("NYI"); + +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_TYPE(Class, Base) +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("unexpected dependent type!"); + + // These types are never variably-modified. + case Type::Builtin: + case Type::Complex: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Record: + case Type::Enum: + case Type::Using: + case Type::TemplateSpecialization: + case Type::ObjCTypeParam: + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + case Type::BitInt: + llvm_unreachable("type class is never variably-modified!"); + + case Type::Elaborated: + type = cast(ty)->getNamedType(); + break; + + case Type::Adjusted: + type = cast(ty)->getAdjustedType(); + break; + + case Type::Decayed: + type = cast(ty)->getPointeeType(); + break; + + case Type::Pointer: + type = cast(ty)->getPointeeType(); + break; + + case Type::BlockPointer: + type = cast(ty)->getPointeeType(); + break; + + case Type::LValueReference: + case Type::RValueReference: + type = cast(ty)->getPointeeType(); + break; + + case Type::MemberPointer: + type = cast(ty)->getPointeeType(); + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + // Losing element qualification here is fine. + type = cast(ty)->getElementType(); + break; + + case Type::VariableArray: { + // Losing element qualification here is fine. + const VariableArrayType *vat = cast(ty); + + // Unknown size indication requires no size computation. + // Otherwise, evaluate and record it. + if (const Expr *sizeExpr = vat->getSizeExpr()) { + // It's possible that we might have emitted this already, + // e.g. with a typedef and a pointer to it. + mlir::Value &entry = VLASizeMap[sizeExpr]; + if (!entry) { + mlir::Value size = buildScalarExpr(sizeExpr); + assert(!UnimplementedFeature::sanitizeVLABound()); + + // Always zexting here would be wrong if it weren't + // undefined behavior to have a negative bound. + // FIXME: What about when size's type is larger than size_t? + entry = builder.createIntCast(size, SizeTy); + } + } + type = vat->getElementType(); + break; + } + + case Type::FunctionProto: + case Type::FunctionNoProto: + type = cast(ty)->getReturnType(); + break; + + case Type::Paren: + case Type::TypeOf: + case Type::UnaryTransform: + case Type::Attributed: + case Type::BTFTagAttributed: + case Type::SubstTemplateTypeParm: + case Type::MacroQualified: + // Keep walking after single level desugaring. + type = type.getSingleStepDesugaredType(getContext()); + break; + + case Type::Typedef: + case Type::Decltype: + case Type::Auto: + case Type::DeducedTemplateSpecialization: + // Stop walking: nothing to do. + return; + + case Type::TypeOfExpr: + // Stop walking: emit typeof expression. + buildIgnoredExpr(cast(ty)->getUnderlyingExpr()); + return; + + case Type::Atomic: + type = cast(ty)->getValueType(); + break; + + case Type::Pipe: + type = cast(ty)->getElementType(); + break; + } + } while (type->isVariablyModifiedType()); +} + +/// Computes the length of an array in elements, as well as the base +/// element type and a properly-typed first element pointer. +mlir::Value +CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, + QualType &baseType, Address &addr) { + const auto *arrayType = origArrayType; + + // If it's a VLA, we have to load the stored size. Note that + // this is the size of the VLA in bytes, not its size in elements. + mlir::Value numVLAElements{}; + if (isa(arrayType)) { + llvm_unreachable("NYI"); + } + + uint64_t countFromCLAs = 1; + QualType eltType; + + // llvm::ArrayType *llvmArrayType = + // dyn_cast(addr.getElementType()); + auto cirArrayType = addr.getElementType().dyn_cast(); + + while (cirArrayType) { + assert(isa(arrayType)); + countFromCLAs *= cirArrayType.getSize(); + eltType = arrayType->getElementType(); + + cirArrayType = cirArrayType.getEltType().dyn_cast(); + + arrayType = getContext().getAsArrayType(arrayType->getElementType()); + assert((!cirArrayType || arrayType) && + "CIR and Clang types are out-of-synch"); + } + + if (arrayType) { + // From this point onwards, the Clang array type has been emitted + // as some other type (probably a packed struct). Compute the array + // size, and just emit the 'begin' expression as a bitcast. + llvm_unreachable("NYI"); + } + + baseType = eltType; + auto numElements = builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs); + + // If we had any VLA dimensions, factor them in. + if (numVLAElements) + llvm_unreachable("NYI"); + + return numElements; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h new file mode 100644 index 000000000000..22b863ba0d7c --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -0,0 +1,2148 @@ +//===-- CIRGenFunction.h - Per-Function state for CIR gen -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the internal per-function state used for CIR translation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H +#define LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H + +#include "CIRGenBuilder.h" +#include "CIRGenCall.h" +#include "CIRGenModule.h" +#include "CIRGenTypeCache.h" +#include "CIRGenValue.h" +#include "EHScopeStack.h" + +#include "clang/AST/BaseSubobject.h" +#include "clang/AST/CurrentSourceLocExprScope.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/Type.h" +#include "clang/Basic/ABI.h" +#include "clang/Basic/TargetInfo.h" + +#include "mlir/IR/TypeRange.h" +#include "mlir/IR/Value.h" + +namespace clang { +class Expr; +} // namespace clang + +namespace mlir { +namespace func { +class CallOp; +} +} // namespace mlir + +namespace { +class ScalarExprEmitter; +class AggExprEmitter; +} // namespace + +namespace cir { + +// FIXME: for now we are reusing this from lib/Clang/CIRGenFunction.h, which +// isn't available in the include dir. Same for getEvaluationKind below. +enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; +struct CGCoroData; + +class CIRGenFunction : public CIRGenTypeCache { +public: + CIRGenModule &CGM; + +private: + friend class ::ScalarExprEmitter; + friend class ::AggExprEmitter; + + /// The builder is a helper class to create IR inside a function. The + /// builder is stateful, in particular it keeps an "insertion point": this + /// is where the next operations will be introduced. + CIRGenBuilderTy &builder; + + /// ------- + /// Goto + /// ------- + + /// A jump destination is an abstract label, branching to which may + /// require a jump out through normal cleanups. + struct JumpDest { + JumpDest() = default; + JumpDest(mlir::Block *Block) : Block(Block) {} + + bool isValid() const { return Block != nullptr; } + mlir::Block *getBlock() const { return Block; } + mlir::Block *Block = nullptr; + }; + + /// Track mlir Blocks for each C/C++ label. + llvm::DenseMap LabelMap; + JumpDest &getJumpDestForLabel(const clang::LabelDecl *D); + + // --------------------- + // Opaque value handling + // --------------------- + + /// Keeps track of the current set of opaque value expressions. + llvm::DenseMap OpaqueLValues; + llvm::DenseMap OpaqueRValues; + + // This keeps track of the associated size for each VLA type. + // We track this by the size expression rather than the type itself because + // in certain situations, like a const qualifier applied to an VLA typedef, + // multiple VLA types can share the same size expression. + // FIXME: Maybe this could be a stack of maps that is pushed/popped as we + // enter/leave scopes. + llvm::DenseMap VLASizeMap; + +public: + /// A non-RAII class containing all the information about a bound + /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for + /// this which makes individual mappings very simple; using this + /// class directly is useful when you have a variable number of + /// opaque values or don't want the RAII functionality for some + /// reason. + class OpaqueValueMappingData { + const OpaqueValueExpr *OpaqueValue; + bool BoundLValue; + + OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue) + : OpaqueValue(ov), BoundLValue(boundLValue) {} + + public: + OpaqueValueMappingData() : OpaqueValue(nullptr) {} + + static bool shouldBindAsLValue(const Expr *expr) { + // gl-values should be bound as l-values for obvious reasons. + // Records should be bound as l-values because IR generation + // always keeps them in memory. Expressions of function type + // act exactly like l-values but are formally required to be + // r-values in C. + return expr->isGLValue() || expr->getType()->isFunctionType() || + hasAggregateEvaluationKind(expr->getType()); + } + + static OpaqueValueMappingData + bind(CIRGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) { + if (shouldBindAsLValue(ov)) + return bind(CGF, ov, CGF.buildLValue(e)); + return bind(CGF, ov, CGF.buildAnyExpr(e)); + } + + static OpaqueValueMappingData + bind(CIRGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv) { + assert(shouldBindAsLValue(ov)); + CGF.OpaqueLValues.insert(std::make_pair(ov, lv)); + return OpaqueValueMappingData(ov, true); + } + + static OpaqueValueMappingData + bind(CIRGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv) { + assert(!shouldBindAsLValue(ov)); + CGF.OpaqueRValues.insert(std::make_pair(ov, rv)); + + OpaqueValueMappingData data(ov, false); + + // Work around an extremely aggressive peephole optimization in + // EmitScalarConversion which assumes that all other uses of a + // value are extant. + assert(!UnimplementedFeature::peepholeProtection() && "NYI"); + return data; + } + + bool isValid() const { return OpaqueValue != nullptr; } + void clear() { OpaqueValue = nullptr; } + + void unbind(CIRGenFunction &CGF) { + assert(OpaqueValue && "no data to unbind!"); + + if (BoundLValue) { + CGF.OpaqueLValues.erase(OpaqueValue); + } else { + CGF.OpaqueRValues.erase(OpaqueValue); + assert(!UnimplementedFeature::peepholeProtection() && "NYI"); + } + } + }; + + /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr. + class OpaqueValueMapping { + CIRGenFunction &CGF; + OpaqueValueMappingData Data; + + public: + static bool shouldBindAsLValue(const Expr *expr) { + return OpaqueValueMappingData::shouldBindAsLValue(expr); + } + + /// Build the opaque value mapping for the given conditional + /// operator if it's the GNU ?: extension. This is a common + /// enough pattern that the convenience operator is really + /// helpful. + /// + OpaqueValueMapping(CIRGenFunction &CGF, + const AbstractConditionalOperator *op) + : CGF(CGF) { + if (isa(op)) + // Leave Data empty. + return; + + const BinaryConditionalOperator *e = cast(op); + Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(), + e->getCommon()); + } + + /// Build the opaque value mapping for an OpaqueValueExpr whose source + /// expression is set to the expression the OVE represents. + OpaqueValueMapping(CIRGenFunction &CGF, const OpaqueValueExpr *OV) + : CGF(CGF) { + if (OV) { + assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used " + "for OVE with no source expression"); + Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr()); + } + } + + OpaqueValueMapping(CIRGenFunction &CGF, const OpaqueValueExpr *opaqueValue, + LValue lvalue) + : CGF(CGF), + Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {} + + OpaqueValueMapping(CIRGenFunction &CGF, const OpaqueValueExpr *opaqueValue, + RValue rvalue) + : CGF(CGF), + Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {} + + void pop() { + Data.unbind(CGF); + Data.clear(); + } + + ~OpaqueValueMapping() { + if (Data.isValid()) + Data.unbind(CGF); + } + }; + +private: + /// Declare a variable in the current scope, return success if the variable + /// wasn't declared yet. + mlir::LogicalResult declare(const clang::Decl *var, clang::QualType ty, + mlir::Location loc, clang::CharUnits alignment, + mlir::Value &addr, bool isParam = false); + + /// Declare a variable in the current scope but take an Address as input. + mlir::LogicalResult declare(Address addr, const clang::Decl *var, + clang::QualType ty, mlir::Location loc, + clang::CharUnits alignment, mlir::Value &addrVal, + bool isParam = false); + +public: + // FIXME(cir): move this to CIRGenBuider.h + mlir::Value buildAlloca(llvm::StringRef name, clang::QualType ty, + mlir::Location loc, clang::CharUnits alignment, + bool insertIntoFnEntryBlock = false, + mlir::Value arraySize = nullptr); + mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, + mlir::Location loc, clang::CharUnits alignment, + bool insertIntoFnEntryBlock = false, + mlir::Value arraySize = nullptr); + mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, + mlir::Location loc, clang::CharUnits alignment, + mlir::OpBuilder::InsertPoint ip, + mlir::Value arraySize = nullptr); + +private: + void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, + clang::CharUnits alignment); + + // Track current variable initialization (if there's one) + const clang::VarDecl *currVarDecl = nullptr; + class VarDeclContext { + CIRGenFunction &P; + const clang::VarDecl *OldVal = nullptr; + + public: + VarDeclContext(CIRGenFunction &p, const VarDecl *Value) : P(p) { + if (P.currVarDecl) + OldVal = P.currVarDecl; + P.currVarDecl = Value; + } + + /// Can be used to restore the state early, before the dtor + /// is run. + void restore() { P.currVarDecl = OldVal; } + ~VarDeclContext() { restore(); } + }; + + /// ------- + /// Source Location tracking + /// ------- + +public: + /// Use to track source locations across nested visitor traversals. + /// Always use a `SourceLocRAIIObject` to change currSrcLoc. + std::optional currSrcLoc; + class SourceLocRAIIObject { + CIRGenFunction &P; + std::optional OldVal; + + public: + SourceLocRAIIObject(CIRGenFunction &p, mlir::Location Value) : P(p) { + if (P.currSrcLoc) + OldVal = P.currSrcLoc; + P.currSrcLoc = Value; + } + + /// Can be used to restore the state early, before the dtor + /// is run. + void restore() { P.currSrcLoc = OldVal; } + ~SourceLocRAIIObject() { restore(); } + }; + + using SymTableScopeTy = + llvm::ScopedHashTableScope; + + /// Try/Catch: calls within try statements need to refer to local + /// allocas for the exception info + struct CIRExceptionInfo { + mlir::Value addr{}; + mlir::cir::CatchOp catchOp{}; + }; + + enum class EvaluationOrder { + ///! No langauge constraints on evaluation order. + Default, + ///! Language semantics require left-to-right evaluation + ForceLeftToRight, + ///! Language semantics require right-to-left evaluation. + ForceRightToLeft + }; + + /// Situations in which we might emit a check for the suitability of a pointer + /// or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in + /// compiler-rt. + enum TypeCheckKind { + /// Checking the operand of a load. Must be suitably sized and aligned. + TCK_Load, + /// Checking the destination of a store. Must be suitably sized and aligned. + TCK_Store, + /// Checking the bound value in a reference binding. Must be suitably sized + /// and aligned, but is not required to refer to an object (until the + /// reference is used), per core issue 453. + TCK_ReferenceBinding, + /// Checking the object expression in a non-static data member access. Must + /// be an object within its lifetime. + TCK_MemberAccess, + /// Checking the 'this' pointer for a call to a non-static member function. + /// Must be an object within its lifetime. + TCK_MemberCall, + /// Checking the 'this' pointer for a constructor call. + TCK_ConstructorCall, + /// Checking the operand of a dynamic_cast or a typeid expression. Must be + /// null or an object within its lifetime. + TCK_DynamicOperation + }; + + // Holds coroutine data if the current function is a coroutine. We use a + // wrapper to manage its lifetime, so that we don't have to define CGCoroData + // in this header. + struct CGCoroInfo { + std::unique_ptr Data; + CGCoroInfo(); + ~CGCoroInfo(); + }; + CGCoroInfo CurCoro; + + bool isCoroutine() const { return CurCoro.Data != nullptr; } + + /// The GlobalDecl for the current function being compiled. + clang::GlobalDecl CurGD; + + /// Unified return block. + /// Not that for LLVM codegen this is a memeber variable instead. + JumpDest ReturnBlock() { + return JumpDest(currLexScope->getOrCreateCleanupBlock(builder)); + } + + /// The temporary alloca to hold the return value. This is + /// invalid iff the function has no return value. + Address ReturnValue = Address::invalid(); + + /// Tracks function scope overall cleanup handling. + EHScopeStack EHStack; + llvm::SmallVector LifetimeExtendedCleanupStack; + + /// A mapping from NRVO variables to the flags used to indicate + /// when the NRVO has been applied to this variable. + llvm::DenseMap NRVOFlags; + + /// Counts of the number return expressions in the function. + unsigned NumReturnExprs = 0; + + clang::QualType FnRetQualTy; + std::optional FnRetCIRTy; + std::optional FnRetAlloca; + + llvm::DenseMap + LambdaCaptureFields; + clang::FieldDecl *LambdaThisCaptureField = nullptr; + + void buildForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, + CallArgList &CallArgs); + void buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD); + void buildLambdaStaticInvokeBody(const CXXMethodDecl *MD); + + LValue buildPredefinedLValue(const PredefinedExpr *E); + + /// When generating code for a C++ member function, this will + /// hold the implicit 'this' declaration. + clang::ImplicitParamDecl *CXXABIThisDecl = nullptr; + mlir::Value CXXABIThisValue = nullptr; + mlir::Value CXXThisValue = nullptr; + clang::CharUnits CXXABIThisAlignment; + clang::CharUnits CXXThisAlignment; + + /// When generating code for a constructor or destructor, this will hold the + /// implicit argument (e.g. VTT). + ImplicitParamDecl *CXXStructorImplicitParamDecl{}; + mlir::Value CXXStructorImplicitParamValue{}; + + /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this + /// expression. + Address CXXDefaultInitExprThis = Address::invalid(); + + // Holds the Decl for the current outermost non-closure context + const clang::Decl *CurFuncDecl = nullptr; + /// This is the inner-most code context, which includes blocks. + const clang::Decl *CurCodeDecl; + const CIRGenFunctionInfo *CurFnInfo; + clang::QualType FnRetTy; + + /// This is the current function or global initializer that is generated code + /// for. + mlir::Operation *CurFn = nullptr; + + /// Save Parameter Decl for coroutine. + llvm::SmallVector FnArgs; + + // The CallExpr within the current statement that the musttail attribute + // applies to. nullptr if there is no 'musttail' on the current statement. + const clang::CallExpr *MustTailCall = nullptr; + + clang::ASTContext &getContext() const; + + CIRGenBuilderTy &getBuilder() { return builder; } + + CIRGenModule &getCIRGenModule() { return CGM; } + const CIRGenModule &getCIRGenModule() const { return CGM; } + + mlir::Block *getCurFunctionEntryBlock() { + auto Fn = dyn_cast(CurFn); + assert(Fn && "other callables NYI"); + return &Fn.getRegion().front(); + } + + /// Sanitizers enabled for this function. + clang::SanitizerSet SanOpts; + + class CIRGenFPOptionsRAII { + public: + CIRGenFPOptionsRAII(CIRGenFunction &CGF, FPOptions FPFeatures); + CIRGenFPOptionsRAII(CIRGenFunction &CGF, const clang::Expr *E); + ~CIRGenFPOptionsRAII(); + + private: + void ConstructorHelper(clang::FPOptions FPFeatures); + CIRGenFunction &CGF; + clang::FPOptions OldFPFeatures; + fp::ExceptionBehavior OldExcept; + llvm::RoundingMode OldRounding; + }; + clang::FPOptions CurFPFeatures; + + /// The symbol table maps a variable name to a value in the current scope. + /// Entering a function creates a new scope, and the function arguments are + /// added to the mapping. When the processing of a function is terminated, + /// the scope is destroyed and the mappings created in this scope are + /// dropped. + using SymTableTy = llvm::ScopedHashTable; + SymTableTy symbolTable; + /// True if we need to emit the life-time markers. This is initially set in + /// the constructor, but could be overwrriten to true if this is a coroutine. + bool ShouldEmitLifetimeMarkers; + + using DeclMapTy = llvm::DenseMap; + /// This keeps track of the CIR allocas or globals for local C + /// delcs. + DeclMapTy LocalDeclMap; + + /// Whether llvm.stacksave has been called. Used to avoid + /// calling llvm.stacksave for multiple VLAs in the same scope. + /// TODO: Translate to MLIR + bool DidCallStackSave = false; + + /// Whether we processed a Microsoft-style asm block during CIRGen. These can + /// potentially set the return value. + bool SawAsmBlock = false; + + /// True if CodeGen currently emits code inside preserved access index region. + bool IsInPreservedAIRegion = false; + + /// In C++, whether we are code generating a thunk. This controls whether we + /// should emit cleanups. + bool CurFuncIsThunk = false; + + /// Hold counters for incrementally naming temporaries + unsigned CounterRefTmp = 0; + unsigned CounterAggTmp = 0; + std::string getCounterRefTmpAsString(); + std::string getCounterAggTmpAsString(); + + mlir::Type convertTypeForMem(QualType T); + + mlir::Type ConvertType(clang::QualType T); + mlir::Type ConvertType(const TypeDecl *T) { + return ConvertType(getContext().getTypeDeclType(T)); + } + + /// Return the TypeEvaluationKind of QualType \c T. + static TypeEvaluationKind getEvaluationKind(clang::QualType T); + + static bool hasScalarEvaluationKind(clang::QualType T) { + return getEvaluationKind(T) == TEK_Scalar; + } + + static bool hasAggregateEvaluationKind(clang::QualType T) { + return getEvaluationKind(T) == TEK_Aggregate; + } + + CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, + bool suppressNewContext = false); + + CIRGenTypes &getTypes() const { return CGM.getTypes(); } + + const TargetInfo &getTarget() const { return CGM.getTarget(); } + + const TargetCIRGenInfo &getTargetHooks() const { + return CGM.getTargetCIRGenInfo(); + } + + /// Helpers to convert Clang's SourceLocation to a MLIR Location. + mlir::Location getLoc(clang::SourceLocation SLoc); + + mlir::Location getLoc(clang::SourceRange SLoc); + + mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); + + const clang::LangOptions &getLangOpts() const { return CGM.getLangOpts(); } + + // TODO: This is currently just a dumb stub. But we want to be able to clearly + // assert where we arne't doing things that we know we should and will crash + // as soon as we add a DebugInfo type to this class. + std::nullptr_t *getDebugInfo() { return nullptr; } + + void buildReturnOfRValue(mlir::Location loc, RValue RV, QualType Ty); + + /// Set the address of a local variable. + void setAddrOfLocalVar(const clang::VarDecl *VD, Address Addr) { + assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!"); + LocalDeclMap.insert({VD, Addr}); + // Add to the symbol table if not there already. + if (symbolTable.count(VD)) + return; + symbolTable.insert(VD, Addr.getPointer()); + } + + /// True if an insertion point is defined. If not, this indicates that the + /// current code being emitted is unreachable. + /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism + /// since we don't yet force null insertion point to designate behavior (like + /// LLVM's codegen does) and we probably shouldn't. + bool HaveInsertPoint() const { + return builder.getInsertionBlock() != nullptr; + } + + /// Whether any type-checking sanitizers are enabled. If \c false, calls to + /// buildTypeCheck can be skipped. + bool sanitizePerformTypeCheck() const; + + void buildTypeCheck(TypeCheckKind TCK, clang::SourceLocation Loc, + mlir::Value V, clang::QualType Type, + clang::CharUnits Alignment = clang::CharUnits::Zero(), + clang::SanitizerSet SkippedChecks = clang::SanitizerSet(), + std::optional ArraySize = std::nullopt); + + void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); + + /// Emits a reference binding to the passed in expression. + RValue buildReferenceBindingToExpr(const Expr *E); + + LValue buildCastLValue(const CastExpr *E); + + void buildCXXConstructExpr(const clang::CXXConstructExpr *E, + AggValueSlot Dest); + + void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E); + + void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, Address This, CallArgList &Args, + AggValueSlot::Overlap_t Overlap, + clang::SourceLocation Loc, + bool NewPointerIsChecked); + + RValue buildCXXMemberOrOperatorCall( + const clang::CXXMethodDecl *Method, const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, + clang::QualType ImplicitParamTy, const clang::CallExpr *E, + CallArgList *RtlArgs); + + RValue buildCXXMemberCallExpr(const clang::CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue); + RValue buildCXXMemberOrOperatorMemberCallExpr( + const clang::CallExpr *CE, const clang::CXXMethodDecl *MD, + ReturnValueSlot ReturnValue, bool HasQualifier, + clang::NestedNameSpecifier *Qualifier, bool IsArrow, + const clang::Expr *Base); + RValue buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD, + ReturnValueSlot ReturnValue); + void buildNullInitialization(mlir::Location loc, Address DestPtr, + QualType Ty); + bool shouldNullCheckClassCastValue(const CastExpr *CE); + + void buildCXXTemporary(const CXXTemporary *Temporary, QualType TempType, + Address Ptr); + mlir::Value buildCXXNewExpr(const CXXNewExpr *E); + void buildCXXDeleteExpr(const CXXDeleteExpr *E); + + void buildCXXAggrConstructorCall(const CXXConstructorDecl *D, + const clang::ArrayType *ArrayTy, + Address ArrayPtr, const CXXConstructExpr *E, + bool NewPointerIsChecked, + bool ZeroInitialization = false); + + void buildCXXAggrConstructorCall(const CXXConstructorDecl *ctor, + mlir::Value numElements, Address arrayBase, + const CXXConstructExpr *E, + bool NewPointerIsChecked, + bool zeroInitialize); + + /// Compute the length of an array, even if it's a VLA, and drill down to the + /// base element type. + mlir::Value buildArrayLength(const clang::ArrayType *arrayType, + QualType &baseType, Address &addr); + + void buildDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, + QualType DeleteTy, mlir::Value NumElements = nullptr, + CharUnits CookieSize = CharUnits()); + + mlir::Value buildDynamicCast(Address ThisAddr, const CXXDynamicCastExpr *DCE); + + mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); + + mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre); + + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or + // an ObjCMethodDecl. + struct PrototypeWrapper { + llvm::PointerUnion + P; + + PrototypeWrapper(const clang::FunctionProtoType *FT) : P(FT) {} + PrototypeWrapper(const clang::ObjCMethodDecl *MD) : P(MD) {} + }; + + bool LValueIsSuitableForInlineAtomic(LValue Src); + + /// An abstract representation of regular/ObjC call/message targets. + class AbstractCallee { + /// The function declaration of the callee. + const clang::Decl *CalleeDecl; + + public: + AbstractCallee() : CalleeDecl(nullptr) {} + AbstractCallee(const clang::FunctionDecl *FD) : CalleeDecl(FD) {} + AbstractCallee(const clang::ObjCMethodDecl *OMD) : CalleeDecl(OMD) {} + bool hasFunctionDecl() const { + return llvm::isa_and_nonnull(CalleeDecl); + } + const clang::Decl *getDecl() const { return CalleeDecl; } + unsigned getNumParams() const { + if (const auto *FD = llvm::dyn_cast(CalleeDecl)) + return FD->getNumParams(); + return llvm::cast(CalleeDecl)->param_size(); + } + const clang::ParmVarDecl *getParamDecl(unsigned I) const { + if (const auto *FD = llvm::dyn_cast(CalleeDecl)) + return FD->getParamDecl(I); + return *(llvm::cast(CalleeDecl)->param_begin() + + I); + } + }; + + RValue convertTempToRValue(Address addr, clang::QualType type, + clang::SourceLocation Loc); + + /// If a ParmVarDecl had the pass_object_size attribute, this + /// will contain a mapping from said ParmVarDecl to its implicit "object_size" + /// parameter. + llvm::SmallDenseMap + SizeArguments; + + // Build a "reference" to a va_list; this is either the address or the value + // of the expression, depending on how va_list is defined. + Address buildVAListRef(const Expr *E); + + /// Emits a CIR variable-argument operation, either + /// \c cir.va.start or \c cir.va.end. + /// + /// \param ArgValue A reference to the \c va_list as emitted by either + /// \c buildVAListRef or \c buildMSVAListRef. + /// + /// \param IsStart If \c true, emits \c cir.va.start, otherwise \c cir.va.end. + void buildVAStartEnd(mlir::Value ArgValue, bool IsStart); + + /// Generate code to get an argument from the passed in pointer + /// and update it accordingly. + /// + /// \param VE The \c VAArgExpr for which to generate code. + /// + /// \param VAListAddr Receives a reference to the \c va_list as emitted by + /// either \c buildVAListRef or \c buildMSVAListRef. + /// + /// \returns SSA value with the argument. + mlir::Value buildVAArg(VAArgExpr *VE, Address &VAListAddr); + + void buildVariablyModifiedType(QualType Ty); + + struct VlaSizePair { + mlir::Value NumElts; + QualType Type; + + VlaSizePair(mlir::Value NE, QualType T) : NumElts(NE), Type(T) {} + }; + + /// Returns an MLIR value that corresponds to the size, + /// in non-variably-sized elements, of a variable length array type, + /// plus that largest non-variably-sized element type. Assumes that + /// the type has already been emitted with buildVariablyModifiedType. + VlaSizePair getVLASize(const VariableArrayType *vla); + VlaSizePair getVLASize(QualType vla); + + mlir::Value emitBuiltinObjectSize(const Expr *E, unsigned Type, + mlir::cir::IntType ResType, + mlir::Value EmittedE, bool IsDynamic); + mlir::Value evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, + mlir::cir::IntType ResType, + mlir::Value EmittedE, + bool IsDynamic); + + /// Given an expression that represents a value lvalue, this method emits + /// the address of the lvalue, then loads the result as an rvalue, + /// returning the rvalue. + RValue buildLoadOfLValue(LValue LV, SourceLocation Loc); + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + clang::SourceLocation Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal = false); + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + mlir::Location Loc, LValueBaseInfo BaseInfo, + bool isNontemporal = false); + + RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); + + /// Load a scalar value from an address, taking care to appropriately convert + /// from the memory representation to CIR value representation. + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + clang::SourceLocation Loc, + AlignmentSource Source = AlignmentSource::Type, + bool isNontemporal = false) { + return buildLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source), + isNontemporal); + } + + /// Load a scalar value from an address, taking care to appropriately convert + /// form the memory representation to the CIR value representation. The + /// l-value must be a simple l-value. + mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); + + Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, + LValueBaseInfo *PointeeBaseInfo = nullptr); + LValue buildLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc); + LValue + buildLoadOfReferenceLValue(Address RefAddr, mlir::Location Loc, + QualType RefTy, + AlignmentSource Source = AlignmentSource::Type) { + LValue RefLVal = makeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source)); + return buildLoadOfReferenceLValue(RefLVal, Loc); + } + void buildImplicitAssignmentOperatorBody(FunctionArgList &Args); + + void buildAggregateStore(mlir::Value Val, Address Dest, bool DestIsVolatile); + + void buildCallArgs( + CallArgList &Args, PrototypeWrapper Prototype, + llvm::iterator_range ArgRange, + AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, + EvaluationOrder Order = EvaluationOrder::Default); + + void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl); + void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl); + + LValue buildStmtExprLValue(const StmtExpr *E); + + LValue buildPointerToDataMemberBinaryExpr(const BinaryOperator *E); + + /// TODO: Add TBAAAccessInfo + Address buildCXXMemberDataPointerAddress( + const Expr *E, Address base, mlir::Value memberPtr, + const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo); + + /// Generate a call of the given function, expecting the given + /// result type, and using the given argument list which specifies both the + /// LLVM arguments and the types they were derived from. + RValue buildCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, + const CallArgList &Args, + mlir::cir::CIRCallOpInterface *callOrTryCall, + bool IsMustTail, mlir::Location loc, + std::optional E = std::nullopt); + RValue buildCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, + const CallArgList &Args, + mlir::cir::CIRCallOpInterface *callOrTryCall = nullptr, + bool IsMustTail = false) { + assert(currSrcLoc && "source location must have been set"); + return buildCall(CallInfo, Callee, ReturnValue, Args, callOrTryCall, + IsMustTail, *currSrcLoc, std::nullopt); + } + RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, + const clang::CallExpr *E, ReturnValueSlot returnValue, + mlir::Value Chain = nullptr); + + RValue buildCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue = ReturnValueSlot()); + + mlir::Value buildRuntimeCall(mlir::Location loc, mlir::cir::FuncOp callee, + ArrayRef args = {}); + + /// Create a check for a function parameter that may potentially be + /// declared as non-null. + void buildNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum); + + void buildCallArg(CallArgList &args, const clang::Expr *E, + clang::QualType ArgType); + + LValue buildCallExprLValue(const CallExpr *E); + + /// Similarly to buildAnyExpr(), however, the result will always be accessible + /// even if no aggregate location is provided. + RValue buildAnyExprToTemp(const clang::Expr *E); + + CIRGenCallee buildCallee(const clang::Expr *E); + + void finishFunction(SourceLocation EndLoc); + + /// Emit code to compute the specified expression which can have any type. The + /// result is returned as an RValue struct. If this is an aggregate + /// expression, the aggloc/agglocvolatile arguments indicate where the result + /// should be returned. + RValue buildAnyExpr(const clang::Expr *E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); + + mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); + mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); + mlir::LogicalResult buildCoreturnStmt(const CoreturnStmt &S); + + mlir::cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc, + mlir::Value nullPtr); + mlir::cir::CallOp buildCoroAllocBuiltinCall(mlir::Location loc); + mlir::cir::CallOp buildCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr); + mlir::cir::CallOp buildCoroEndBuiltinCall(mlir::Location loc, + mlir::Value nullPtr); + + RValue buildCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); + RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); + RValue buildCoroutineFrame(); + + /// Build a debug stoppoint if we are emitting debug info. + void buildStopPoint(const Stmt *S); + + // Build CIR for a statement. useCurrentScope should be true if no + // new scopes need be created when finding a compound statement. + mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope, + ArrayRef Attrs = std::nullopt); + + mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, + bool useCurrentScope); + + mlir::LogicalResult buildForStmt(const clang::ForStmt &S); + mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); + mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); + mlir::LogicalResult + buildCXXForRangeStmt(const CXXForRangeStmt &S, + ArrayRef Attrs = std::nullopt); + mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); + + mlir::LogicalResult buildCXXTryStmtUnderScope(const clang::CXXTryStmt &S); + mlir::LogicalResult buildCXXTryStmt(const clang::CXXTryStmt &S); + void enterCXXTryStmt(const CXXTryStmt &S, mlir::cir::CatchOp catchOp, + bool IsFnTryBlock = false); + void exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); + + Address buildCompoundStmt(const clang::CompoundStmt &S, bool getLast = false, + AggValueSlot slot = AggValueSlot::ignored()); + + Address + buildCompoundStmtWithoutScope(const clang::CompoundStmt &S, + bool getLast = false, + AggValueSlot slot = AggValueSlot::ignored()); + GlobalDecl CurSEHParent; + bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } + + /// Returns true inside SEH __try blocks. + bool isSEHTryScope() const { return UnimplementedFeature::isSEHTryScope(); } + + mlir::Operation *CurrentFuncletPad = nullptr; + + /// Returns true while emitting a cleanuppad. + bool isCleanupPadScope() const { + assert(!CurrentFuncletPad && "NYI"); + return false; + } + + /// Return a landing pad that just calls terminate. + mlir::Operation *getTerminateLandingPad(); + + /// Emit code to compute the specified expression, + /// ignoring the result. + void buildIgnoredExpr(const clang::Expr *E); + + LValue buildArraySubscriptExpr(const clang::ArraySubscriptExpr *E, + bool Accessed = false); + + mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); + + /// Determine whether a return value slot may overlap some other object. + AggValueSlot::Overlap_t getOverlapForReturnValue() { + // FIXME: Assuming no overlap here breaks guaranteed copy elision for base + // class subobjects. These cases may need to be revisited depending on the + // resolution of the relevant core issue. + return AggValueSlot::DoesNotOverlap; + } + + /// Determine whether a base class initialization may overlap some other + /// object. + AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, + const CXXRecordDecl *BaseRD, + bool IsVirtual); + + /// Get an appropriate 'undef' rvalue for the given type. + /// TODO: What's the equivalent for MLIR? Currently we're only using this for + /// void types so it just returns RValue::get(nullptr) but it'll need + /// addressed later. + RValue GetUndefRValue(clang::QualType Ty); + + mlir::Value buildFromMemory(mlir::Value Value, clang::QualType Ty); + + mlir::Type convertType(clang::QualType T); + + mlir::LogicalResult buildAsmStmt(const clang::AsmStmt &S); + + std::pair + buildAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue, + QualType InputType, std::string &ConstraintStr, + SourceLocation Loc); + + std::pair + buildAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, + std::string &ConstraintStr); + + mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); + + mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); + + mlir::LogicalResult buildGotoStmt(const clang::GotoStmt &S); + + mlir::LogicalResult buildLabel(const clang::LabelDecl *D); + mlir::LogicalResult buildLabelStmt(const clang::LabelStmt &S); + + mlir::LogicalResult buildAttributedStmt(const AttributedStmt &S); + + mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); + mlir::LogicalResult buildContinueStmt(const clang::ContinueStmt &S); + + // OpenMP gen functions: + mlir::LogicalResult buildOMPParallelDirective(const OMPParallelDirective &S); + + LValue buildOpaqueValueLValue(const OpaqueValueExpr *e); + + /// Emit code to compute a designator that specifies the location + /// of the expression. + /// FIXME: document this function better. + LValue buildLValue(const clang::Expr *E); + + void buildDecl(const clang::Decl &D); + + /// If the specified expression does not fold to a constant, or if it does but + /// contains a label, return false. If it constant folds return true and set + /// the boolean result in Result. + bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, bool &ResultBool, + bool AllowLabels = false); + bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, + llvm::APSInt &ResultInt, + bool AllowLabels = false); + + /// Return true if the statement contains a label in it. If + /// this statement is not executed normally, it not containing a label means + /// that we can just remove the code. + bool ContainsLabel(const clang::Stmt *S, bool IgnoreCaseStmts = false); + + /// Emit an if on a boolean condition to the specified blocks. + /// FIXME: Based on the condition, this might try to simplify the codegen of + /// the conditional based on the branch. TrueCount should be the number of + /// times we expect the condition to evaluate to true based on PGO data. We + /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr + /// for extra ideas). + mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + mlir::Value buildTernaryOnBoolExpr(const clang::Expr *cond, + mlir::Location loc, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + mlir::Value buildOpOnBoolExpr(const clang::Expr *cond, mlir::Location loc, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + + class ConstantEmission { + // Cannot use mlir::TypedAttr directly here because of bit availability. + llvm::PointerIntPair ValueAndIsReference; + ConstantEmission(mlir::TypedAttr C, bool isReference) + : ValueAndIsReference(C, isReference) {} + + public: + ConstantEmission() {} + static ConstantEmission forReference(mlir::TypedAttr C) { + return ConstantEmission(C, true); + } + static ConstantEmission forValue(mlir::TypedAttr C) { + return ConstantEmission(C, false); + } + + explicit operator bool() const { + return ValueAndIsReference.getOpaqueValue() != nullptr; + } + + bool isReference() const { return ValueAndIsReference.getInt(); } + LValue getReferenceLValue(CIRGenFunction &CGF, Expr *refExpr) const { + assert(isReference()); + // create(loc, ty, getZeroAttr(ty)); + // CGF.getBuilder().const + // return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(), + // refExpr->getType()); + llvm_unreachable("NYI"); + } + + mlir::TypedAttr getValue() const { + assert(!isReference()); + return ValueAndIsReference.getPointer().cast(); + } + }; + + ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr); + ConstantEmission tryEmitAsConstant(const MemberExpr *ME); + + /// Emit the computation of the specified expression of scalar type, + /// ignoring the result. + mlir::Value buildScalarExpr(const clang::Expr *E); + mlir::Value buildScalarConstant(const ConstantEmission &Constant, Expr *E); + + mlir::Type getCIRType(const clang::QualType &type); + + const CaseStmt *foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, + SmallVector &caseAttrs); + + template + mlir::LogicalResult + buildCaseDefaultCascade(const T *stmt, mlir::Type condType, + SmallVector &caseAttrs); + + mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, + mlir::Type condType, + SmallVector &caseAttrs); + + mlir::LogicalResult + buildDefaultStmt(const clang::DefaultStmt &S, mlir::Type condType, + SmallVector &caseAttrs); + + mlir::LogicalResult + buildSwitchCase(const clang::SwitchCase &S, mlir::Type condType, + SmallVector &caseAttrs); + + mlir::LogicalResult + buildSwitchBody(const clang::Stmt *S, mlir::Type condType, + SmallVector &caseAttrs); + + mlir::cir::FuncOp generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, + const CIRGenFunctionInfo &FnInfo); + + clang::QualType buildFunctionArgList(clang::GlobalDecl GD, + FunctionArgList &Args); + struct AutoVarEmission { + const clang::VarDecl *Variable; + /// The address of the alloca for languages with explicit address space + /// (e.g. OpenCL) or alloca casted to generic pointer for address space + /// agnostic languages (e.g. C++). Invalid if the variable was emitted + /// as a global constant. + Address Addr; + + /// True if the variable is of aggregate type and has a constant + /// initializer. + bool IsConstantAggregate = false; + + /// True if the variable is a __block variable that is captured by an + /// escaping block. + bool IsEscapingByRef = false; + + mlir::Value NRVOFlag{}; + + struct Invalid {}; + AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} + + AutoVarEmission(const clang::VarDecl &variable) + : Variable(&variable), Addr(Address::invalid()) {} + + static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } + + bool wasEmittedAsGlobal() const { return !Addr.isValid(); } + + /// Returns the raw, allocated address, which is not necessarily + /// the address of the object itself. It is casted to default + /// address space for address space agnostic languages. + Address getAllocatedAddress() const { return Addr; } + + /// Returns the address of the object within this declaration. + /// Note that this does not chase the forwarding pointer for + /// __block decls. + Address getObjectAddress(CIRGenFunction &CGF) const { + if (!IsEscapingByRef) + return Addr; + + llvm_unreachable("NYI"); + } + }; + + LValue buildMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + + /// Emit the alloca and debug information for a + /// local variable. Does not emit initialization or destruction. + AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D, + mlir::OpBuilder::InsertPoint = {}); + + void buildAutoVarInit(const AutoVarEmission &emission); + void buildAutoVarCleanups(const AutoVarEmission &emission); + void buildAutoVarTypeCleanup(const AutoVarEmission &emission, + clang::QualType::DestructionKind dtorKind); + + void buildStoreOfScalar(mlir::Value value, LValue lvalue); + void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, + clang::QualType Ty, LValueBaseInfo BaseInfo, + bool isInit = false, bool isNontemporal = false); + void buildStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); + + mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); + void buildDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init); + + /// Store the specified rvalue into the specified + /// lvalue, where both are guaranteed to the have the same type, and that type + /// is 'Ty'. + void buildStoreThroughLValue(RValue Src, LValue Dst); + + void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result); + + mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); + + /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is + /// nonnull, if 1\p LHS is marked _Nonnull. + void buildNullabilityCheck(LValue LHS, mlir::Value RHS, + clang::SourceLocation Loc); + + /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to + /// detect undefined behavior when the pointer overflow sanitizer is enabled. + /// \p SignedIndices indicates whether any of the GEP indices are signed. + /// \p IsSubtraction indicates whether the expression used to form the GEP + /// is a subtraction. + mlir::Value buildCheckedInBoundsGEP(mlir::Type ElemTy, mlir::Value Ptr, + ArrayRef IdxList, + bool SignedIndices, bool IsSubtraction, + SourceLocation Loc); + + void buildScalarInit(const clang::Expr *init, mlir::Location loc, + LValue lvalue, bool capturedByInit = false); + + LValue buildDeclRefLValue(const clang::DeclRefExpr *E); + LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); + LValue buildCompoundAssignmentLValue(const clang::CompoundAssignOperator *E); + LValue buildUnaryOpLValue(const clang::UnaryOperator *E); + LValue buildStringLiteralLValue(const StringLiteral *E); + RValue buildBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue); + mlir::Value buildTargetBuiltinExpr(unsigned BuiltinID, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue); + + /// Given an expression with a pointer type, emit the value and compute our + /// best estimate of the alignment of the pointee. + /// + /// \param BaseInfo - If non-null, this will be initialized with + /// information about the source of the alignment and the may-alias + /// attribute. Note that this function will conservatively fall back on + /// the type when it doesn't recognize the expression and may-alias will + /// be set to false. + /// + /// One reasonable way to use this information is when there's a language + /// guarantee that the pointer must be aligned to some stricter value, and + /// we're simply trying to ensure that sufficiently obvious uses of under- + /// aligned objects don't get miscompiled; for example, a placement new + /// into the address of a local variable. In such a case, it's quite + /// reasonable to just ignore the returned alignment when it isn't from an + /// explicit source. + Address + buildPointerWithAlignment(const clang::Expr *E, + LValueBaseInfo *BaseInfo = nullptr, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull); + + LValue + buildConditionalOperatorLValue(const AbstractConditionalOperator *expr); + + /// Emit an expression as an initializer for an object (variable, field, etc.) + /// at the given location. The expression is not necessarily the normal + /// initializer for the object, and the address is not necessarily + /// its normal location. + /// + /// \param init the initializing expression + /// \param D the object to act as if we're initializing + /// \param lvalue the lvalue to initialize + /// \param capturedByInit true if \p D is a __block variable whose address is + /// potentially changed by the initializer + void buildExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, + LValue lvalue, bool capturedByInit = false); + + /// Emit code and set up symbol table for a variable declaration with auto, + /// register, or no storage class specifier. These turn into simple stack + /// objects, globals depending on target. + void buildAutoVarDecl(const clang::VarDecl &D); + + /// This method handles emission of any variable declaration + /// inside a function, including static vars etc. + void buildVarDecl(const clang::VarDecl &D); + + mlir::cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalOp GV); + + void buildStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalLinkageKind Linkage); + + /// Perform the usual unary conversions on the specified + /// expression and compare the result against zero, returning an Int1Ty value. + mlir::Value evaluateExprAsBool(const clang::Expr *E); + + void buildCtorPrologue(const clang::CXXConstructorDecl *CD, + clang::CXXCtorType Type, FunctionArgList &Args); + void buildConstructorBody(FunctionArgList &Args); + void buildDestructorBody(FunctionArgList &Args); + void buildCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, + bool ForVirtualBase, bool Delegating, + Address This, QualType ThisTy); + RValue buildCXXDestructorCall(GlobalDecl Dtor, const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *E); + + /// Enter the cleanups necessary to complete the given phase of destruction + /// for a destructor. The end result should call destructors on members and + /// base classes in reverse order of their construction. + void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type); + + /// Determines whether an EH cleanup is required to destroy a type + /// with the given destruction kind. + /// TODO(cir): could be shared with Clang LLVM codegen + bool needsEHCleanup(QualType::DestructionKind kind) { + switch (kind) { + case QualType::DK_none: + return false; + case QualType::DK_cxx_destructor: + case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: + return getLangOpts().Exceptions; + case QualType::DK_objc_strong_lifetime: + return getLangOpts().Exceptions && + CGM.getCodeGenOpts().ObjCAutoRefCountExceptions; + } + llvm_unreachable("bad destruction kind"); + } + + CleanupKind getCleanupKind(QualType::DestructionKind kind) { + return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup); + } + + void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, + QualType type); + + void pushStackRestore(CleanupKind kind, Address SPMem); + + static bool + IsConstructorDelegationValid(const clang::CXXConstructorDecl *Ctor); + + struct VPtr { + clang::BaseSubobject Base; + const clang::CXXRecordDecl *NearestVBase; + clang::CharUnits OffsetFromNearestVBase; + const clang::CXXRecordDecl *VTableClass; + }; + + using VisitedVirtualBasesSetTy = + llvm::SmallPtrSet; + + using VPtrsVector = llvm::SmallVector; + VPtrsVector getVTablePointers(const clang::CXXRecordDecl *VTableClass); + void getVTablePointers(clang::BaseSubobject Base, + const clang::CXXRecordDecl *NearestVBase, + clang::CharUnits OffsetFromNearestVBase, + bool BaseIsNonVirtualPrimaryBase, + const clang::CXXRecordDecl *VTableClass, + VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); + /// Return the Value of the vtable pointer member pointed to by This. + mlir::Value getVTablePtr(mlir::Location Loc, Address This, + mlir::Type VTableTy, + const CXXRecordDecl *VTableClass); + + /// Returns whether we should perform a type checked load when loading a + /// virtual function for virtual calls to members of RD. This is generally + /// true when both vcall CFI and whole-program-vtables are enabled. + bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD); + + /// If whole-program virtual table optimization is enabled, emit an assumption + /// that VTable is a member of RD's type identifier. Or, if vptr CFI is + /// enabled, emit a check that VTable is a member of RD's type identifier. + void buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, + mlir::Value VTable, SourceLocation Loc); + + /// Return the VTT parameter that should be passed to a base + /// constructor/destructor with virtual bases. + /// FIXME: VTTs are Itanium ABI-specific, so the definition should move + /// to CIRGenItaniumCXXABI.cpp together with all the references to VTT. + mlir::Value GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, + bool Delegating); + + /// Source location information about the default argument or member + /// initializer expression we're evaluating, if any. + clang::CurrentSourceLocExprScope CurSourceLocExprScope; + using SourceLocExprScopeGuard = + clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard; + + /// A scoep within which we are constructing the fields of an object which + /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if + /// we need to evaluate the CXXDefaultInitExpr within the evaluation. + class FieldConstructionScope { + public: + FieldConstructionScope(CIRGenFunction &CGF, Address This) + : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) { + CGF.CXXDefaultInitExprThis = This; + } + ~FieldConstructionScope() { + CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis; + } + + private: + CIRGenFunction &CGF; + Address OldCXXDefaultInitExprThis; + }; + + /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this' + /// is overridden to be the object under construction. + class CXXDefaultInitExprScope { + public: + CXXDefaultInitExprScope(CIRGenFunction &CGF, + const clang::CXXDefaultInitExpr *E) + : CGF{CGF}, OldCXXThisValue(CGF.CXXThisValue), + OldCXXThisAlignment(CGF.CXXThisAlignment), + SourceLocScope(E, CGF.CurSourceLocExprScope) { + CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer(); + CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment(); + } + ~CXXDefaultInitExprScope() { + CGF.CXXThisValue = OldCXXThisValue; + CGF.CXXThisAlignment = OldCXXThisAlignment; + } + + public: + CIRGenFunction &CGF; + mlir::Value OldCXXThisValue; + clang::CharUnits OldCXXThisAlignment; + SourceLocExprScopeGuard SourceLocScope; + }; + + struct CXXDefaultArgExprScope : SourceLocExprScopeGuard { + CXXDefaultArgExprScope(CIRGenFunction &CGF, const CXXDefaultArgExpr *E) + : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {} + }; + + LValue MakeNaturalAlignPointeeAddrLValue(mlir::Value V, clang::QualType T); + LValue MakeNaturalAlignAddrLValue(mlir::Value V, QualType T); + + /// Construct an address with the natural alignment of T. If a pointer to T + /// is expected to be signed, the pointer passed to this function must have + /// been signed, and the returned Address will have the pointer authentication + /// information needed to authenticate the signed pointer. + Address makeNaturalAddressForPointer( + mlir::Value Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(), + bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) { + if (Alignment.isZero()) + Alignment = CGM.getNaturalTypeAlignment(T, BaseInfo, ForPointeeType); + return Address(Ptr, convertTypeForMem(T), Alignment, IsKnownNonNull); + } + + /// Load the value for 'this'. This function is only valid while generating + /// code for an C++ member function. + /// FIXME(cir): this should return a mlir::Value! + mlir::Value LoadCXXThis() { + assert(CXXThisValue && "no 'this' value for this function"); + return CXXThisValue; + } + Address LoadCXXThisAddress(); + + /// Convert the given pointer to a complete class to the given direct base. + Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, + Address Value, + const CXXRecordDecl *Derived, + const CXXRecordDecl *Base, + bool BaseIsVirtual); + + Address getAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, + CastExpr::path_const_iterator PathBegin, + CastExpr::path_const_iterator PathEnd, + bool NullCheckValue, SourceLocation Loc); + + /// Emit code for the start of a function. + /// \param Loc The location to be associated with the function. + /// \param StartLoc The location of the function body. + void StartFunction(clang::GlobalDecl GD, clang::QualType RetTy, + mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, + const FunctionArgList &Args, clang::SourceLocation Loc, + clang::SourceLocation StartLoc); + + /// Emit a conversion from the specified type to the specified destination + /// type, both of which are CIR scalar types. + mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcTy, + clang::QualType DstTy, + clang::SourceLocation Loc); + + LValue makeAddrLValue(Address Addr, clang::QualType T, + LValueBaseInfo BaseInfo) { + return LValue::makeAddr(Addr, T, getContext(), BaseInfo); + } + + LValue makeAddrLValue(Address Addr, clang::QualType T, + AlignmentSource Source = AlignmentSource::Type) { + return LValue::makeAddr(Addr, T, getContext(), LValueBaseInfo(Source)); + } + + void initializeVTablePointers(mlir::Location loc, + const clang::CXXRecordDecl *RD); + void initializeVTablePointer(mlir::Location loc, const VPtr &Vptr); + + AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); + LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); + LValue buildLValueForBitField(LValue base, const FieldDecl *field); + + /// Like buildLValueForField, excpet that if the Field is a reference, this + /// will return the address of the reference and not the address of the value + /// stored in the reference. + LValue buildLValueForFieldInitialization(LValue Base, + const clang::FieldDecl *Field, + llvm::StringRef FieldName); + + void buildInitializerForField(clang::FieldDecl *Field, LValue LHS, + clang::Expr *Init); + + /// Determine whether the given initializer is trivial in the sense + /// that it requires no code to be generated. + bool isTrivialInitializer(const clang::Expr *Init); + + // TODO: this can also be abstrated into common AST helpers + bool hasBooleanRepresentation(clang::QualType Ty); + + void buildCXXThrowExpr(const CXXThrowExpr *E); + + RValue buildAtomicExpr(AtomicExpr *E); + + /// Return the address of a local variable. + Address GetAddrOfLocalVar(const clang::VarDecl *VD) { + auto it = LocalDeclMap.find(VD); + assert(it != LocalDeclMap.end() && + "Invalid argument to GetAddrOfLocalVar(), no decl!"); + return it->second; + } + + Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, + mlir::Type fieldType, unsigned index); + + /// Given an opaque value expression, return its LValue mapping if it exists, + /// otherwise create one. + LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e); + + /// Given an opaque value expression, return its RValue mapping if it exists, + /// otherwise create one. + RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e); + + /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. + static bool isWrappedCXXThis(const clang::Expr *E); + + void buildDelegateCXXConstructorCall(const clang::CXXConstructorDecl *Ctor, + clang::CXXCtorType CtorType, + const FunctionArgList &Args, + clang::SourceLocation Loc); + + /// We are performing a delegate call; that is, the current function is + /// delegating to another one. Produce a r-value suitable for passing the + /// given parameter. + void buildDelegateCallArg(CallArgList &args, const clang::VarDecl *param, + clang::SourceLocation loc); + + /// Return true if the current function should be instrumented with + /// __cyg_profile_func_* calls + bool ShouldInstrumentFunction(); + + /// TODO(cir): add TBAAAccessInfo + Address buildArrayToPointerDecay(const Expr *Array, + LValueBaseInfo *BaseInfo = nullptr); + + /// Emits the code necessary to evaluate an arbitrary expression into the + /// given memory location. + void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, + bool IsInitializer); + void buildAnyExprToExn(const Expr *E, Address Addr); + + LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); + LValue buildMemberExpr(const MemberExpr *E); + LValue buildCompoundLiteralLValue(const CompoundLiteralExpr *E); + + /// Specifies which type of sanitizer check to apply when handling a + /// particular builtin. + enum BuiltinCheckKind { + BCK_CTZPassedZero, + BCK_CLZPassedZero, + }; + + /// Emits an argument for a call to a builtin. If the builtin sanitizer is + /// enabled, a runtime check specified by \p Kind is also emitted. + mlir::Value buildCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind); + + /// returns true if aggregate type has a volatile member. + /// TODO(cir): this could be a common AST helper between LLVM / CIR. + bool hasVolatileMember(QualType T) { + if (const RecordType *RT = T->getAs()) { + const RecordDecl *RD = cast(RT->getDecl()); + return RD->hasVolatileMember(); + } + return false; + } + + /// Emit an aggregate assignment. + void buildAggregateAssign(LValue Dest, LValue Src, QualType EltTy) { + bool IsVolatile = hasVolatileMember(EltTy); + buildAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile); + } + + /// Emit an aggregate copy. + /// + /// \param isVolatile \c true iff either the source or the destination is + /// volatile. + /// \param MayOverlap Whether the tail padding of the destination might be + /// occupied by some other object. More efficient code can often be + /// generated if not. + void buildAggregateCopy(LValue Dest, LValue Src, QualType EltTy, + AggValueSlot::Overlap_t MayOverlap, + bool isVolatile = false); + + /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime + /// checking is enabled. Otherwise, just emit an unreachable instruction. + void buildUnreachable(SourceLocation Loc); + + /// + /// Cleanups + /// -------- + + /// Header for data within LifetimeExtendedCleanupStack. + struct LifetimeExtendedCleanupHeader { + /// The size of the following cleanup object. + unsigned Size; + /// The kind of cleanup to push: a value from the CleanupKind enumeration. + unsigned Kind : 31; + /// Whether this is a conditional cleanup. + unsigned IsConditional : 1; + + size_t getSize() const { return Size; } + CleanupKind getKind() const { return (CleanupKind)Kind; } + bool isConditional() const { return IsConditional; } + }; + + /// Emits try/catch information for the current EH stack. + mlir::Operation *buildLandingPad(); + mlir::Block *getEHResumeBlock(bool isCleanup); + mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope); + + /// The cleanup depth enclosing all the cleanups associated with the + /// parameters. + EHScopeStack::stable_iterator PrologueCleanupDepth; + + mlir::Operation *getInvokeDestImpl(); + bool getInvokeDest() { + if (!EHStack.requiresLandingPad()) + return false; + // cir.try_call does not require a block destination, but keep the + // overall traditional LLVM codegen names, and just ignore the result. + return (bool)getInvokeDestImpl(); + } + + /// Takes the old cleanup stack size and emits the cleanup blocks + /// that have been added. + void + PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, + std::initializer_list ValuesToReload = {}); + + /// Takes the old cleanup stack size and emits the cleanup blocks + /// that have been added, then adds all lifetime-extended cleanups from + /// the given position to the stack. + void + PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, + size_t OldLifetimeExtendedStackSize, + std::initializer_list ValuesToReload = {}); + + /// Will pop the cleanup entry on the stack and process all branch fixups. + void PopCleanupBlock(bool FallThroughIsBranchThrough = false); + + /// Deactivates the given cleanup block. The block cannot be reactivated. Pops + /// it if it's the top of the stack. + /// + /// \param DominatingIP - An instruction which is known to + /// dominate the current IP (if set) and which lies along + /// all paths of execution between the current IP and the + /// the point at which the cleanup comes into scope. + void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, + mlir::Operation *DominatingIP); + + typedef void Destroyer(CIRGenFunction &CGF, Address addr, QualType ty); + + static Destroyer destroyCXXObject; + + void pushDestroy(QualType::DestructionKind dtorKind, Address addr, + QualType type); + + void pushDestroy(CleanupKind kind, Address addr, QualType type, + Destroyer *destroyer, bool useEHCleanupForArray); + + Destroyer *getDestroyer(QualType::DestructionKind kind); + + void emitDestroy(Address addr, QualType type, Destroyer *destroyer, + bool useEHCleanupForArray); + + /// An object to manage conditionally-evaluated expressions. + class ConditionalEvaluation { + // llvm::BasicBlock *StartBB; + + public: + ConditionalEvaluation(CIRGenFunction &CGF) + /*: StartBB(CGF.Builder.GetInsertBlock())*/ {} + + void begin(CIRGenFunction &CGF) { + assert(CGF.OutermostConditional != this); + if (!CGF.OutermostConditional) + CGF.OutermostConditional = this; + } + + void end(CIRGenFunction &CGF) { + assert(CGF.OutermostConditional != nullptr); + if (CGF.OutermostConditional == this) + CGF.OutermostConditional = nullptr; + } + + /// Returns a block which will be executed prior to each + /// evaluation of the conditional code. + // llvm::BasicBlock *getStartingBlock() const { return StartBB; } + }; + + struct ConditionalInfo { + std::optional LHS{}, RHS{}; + mlir::Value Result{}; + }; + + template + ConditionalInfo buildConditionalBlocks(const AbstractConditionalOperator *E, + const FuncTy &BranchGenFunc); + + // Return true if we're currently emitting one branch or the other of a + // conditional expression. + bool isInConditionalBranch() const { return OutermostConditional != nullptr; } + + void setBeforeOutermostConditional(mlir::Value value, Address addr) { + assert(isInConditionalBranch()); + llvm_unreachable("NYI"); + } + + void pushIrregularPartialArrayCleanup(mlir::Value arrayBegin, + Address arrayEndPointer, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer); + void pushRegularPartialArrayCleanup(mlir::Value arrayBegin, + mlir::Value arrayEnd, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer); + void buildArrayDestroy(mlir::Value begin, mlir::Value end, + QualType elementType, CharUnits elementAlign, + Destroyer *destroyer, bool checkZeroLength, + bool useEHCleanup); + + // Points to the outermost active conditional control. This is used so that + // we know if a temporary should be destroyed conditionally. + ConditionalEvaluation *OutermostConditional = nullptr; + + /// Push a cleanup to be run at the end of the current full-expression. Safe + /// against the possibility that we're currently inside a + /// conditionally-evaluated expression. + template + void pushFullExprCleanup(CleanupKind kind, As... A) { + // If we're not in a conditional branch, or if none of the + // arguments requires saving, then use the unconditional cleanup. + if (!isInConditionalBranch()) + return EHStack.pushCleanup(kind, A...); + + llvm_unreachable("NYI"); + // Stash values in a tuple so we can guarantee the order of saves. + // typedef std::tuple::saved_type...> + // SavedTuple; SavedTuple Saved{saveValueInCond(A)...}; + + // typedef EHScopeStack::ConditionalCleanup CleanupType; + // EHStack.pushCleanupTuple(kind, Saved); + // initFullExprCleanup(); + } + + /// Set up the last cleanup that was pushed as a conditional + /// full-expression cleanup. + void initFullExprCleanup() { + initFullExprCleanupWithFlag(createCleanupActiveFlag()); + } + + void initFullExprCleanupWithFlag(Address ActiveFlag); + Address createCleanupActiveFlag(); + + /// Enters a new scope for capturing cleanups, all of which + /// will be executed once the scope is exited. + class RunCleanupsScope { + EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth; + size_t LifetimeExtendedCleanupStackSize; + bool OldDidCallStackSave; + + protected: + bool PerformCleanup; + + private: + RunCleanupsScope(const RunCleanupsScope &) = delete; + void operator=(const RunCleanupsScope &) = delete; + + protected: + CIRGenFunction &CGF; + + public: + /// Enter a new cleanup scope. + explicit RunCleanupsScope(CIRGenFunction &CGF) + : PerformCleanup(true), CGF(CGF) { + CleanupStackDepth = CGF.EHStack.stable_begin(); + LifetimeExtendedCleanupStackSize = + CGF.LifetimeExtendedCleanupStack.size(); + OldDidCallStackSave = CGF.DidCallStackSave; + CGF.DidCallStackSave = false; + OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth; + CGF.CurrentCleanupScopeDepth = CleanupStackDepth; + } + + /// Exit this cleanup scope, emitting any accumulated cleanups. + ~RunCleanupsScope() { + if (PerformCleanup) + ForceCleanup(); + } + + /// Determine whether this scope requires any cleanups. + bool requiresCleanups() const { + return CGF.EHStack.stable_begin() != CleanupStackDepth; + } + + /// Force the emission of cleanups now, instead of waiting + /// until this object is destroyed. + /// \param ValuesToReload - A list of values that need to be available at + /// the insertion point after cleanup emission. If cleanup emission created + /// a shared cleanup block, these value pointers will be rewritten. + /// Otherwise, they not will be modified. + void + ForceCleanup(std::initializer_list ValuesToReload = {}) { + assert(PerformCleanup && "Already forced cleanup"); + CGF.DidCallStackSave = OldDidCallStackSave; + CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, + ValuesToReload); + PerformCleanup = false; + CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; + } + }; + + // Cleanup stack depth of the RunCleanupsScope that was pushed most recently. + EHScopeStack::stable_iterator CurrentCleanupScopeDepth = + EHScopeStack::stable_end(); + + /// ------- + /// Lexical Scope: to be read as in the meaning in CIR, a scope is always + /// related with initialization and destruction of objects. + /// ------- + +public: + // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical + // scopes that require cleanups. + struct LexicalScope : public RunCleanupsScope { + private: + // Block containing cleanup code for things initialized in this + // lexical context (scope). + mlir::Block *CleanupBlock = nullptr; + + // Points to scope entry block. This is useful, for instance, for + // helping to insert allocas before finalizing any recursive codegen + // from switches. + mlir::Block *EntryBlock; + + // On a coroutine body, the OnFallthrough sub stmt holds the handler + // (CoreturnStmt) for control flow falling off the body. Keep track + // of emitted co_return in this scope and allow OnFallthrough to be + // skipeed. + bool HasCoreturn = false; + + LexicalScope *ParentScope = nullptr; + + // If there's exception information for this scope, store it. + CIRExceptionInfo exInfo{}; + + // FIXME: perhaps we can use some info encoded in operations. + enum Kind { + Regular, // cir.if, cir.scope, if_regions + Ternary, // cir.ternary + Switch // cir.switch + } ScopeKind = Regular; + + // Track scope return value. + mlir::Value retVal = nullptr; + + public: + unsigned Depth = 0; + bool HasReturn = false; + + LexicalScope(CIRGenFunction &CGF, mlir::Location loc, mlir::Block *eb) + : RunCleanupsScope(CGF), EntryBlock(eb), ParentScope(CGF.currLexScope), + BeginLoc(loc), EndLoc(loc) { + + CGF.currLexScope = this; + if (ParentScope) + Depth++; + + // Has multiple locations: overwrite with separate start and end locs. + if (const auto fusedLoc = loc.dyn_cast()) { + assert(fusedLoc.getLocations().size() == 2 && "too many locations"); + BeginLoc = fusedLoc.getLocations()[0]; + EndLoc = fusedLoc.getLocations()[1]; + } + + assert(EntryBlock && "expected valid block"); + } + + void setRetVal(mlir::Value v) { retVal = v; } + + void cleanup(); + void restore() { CGF.currLexScope = ParentScope; } + + ~LexicalScope() { + // EmitLexicalBlockEnd + assert(!UnimplementedFeature::generateDebugInfo()); + // If we should perform a cleanup, force them now. Note that + // this ends the cleanup scope before rescoping any labels. + cleanup(); + restore(); + } + + /// Force the emission of cleanups now, instead of waiting + /// until this object is destroyed. + void ForceCleanup() { + RunCleanupsScope::ForceCleanup(); + // TODO(cir): something akin to rescopeLabels if it makes sense to CIR. + } + + // --- + // Coroutine tracking + // --- + bool hasCoreturn() const { return HasCoreturn; } + void setCoreturn() { HasCoreturn = true; } + + // --- + // Kind + // --- + bool isRegular() { return ScopeKind == Kind::Regular; } + bool isSwitch() { return ScopeKind == Kind::Switch; } + bool isTernary() { return ScopeKind == Kind::Ternary; } + + void setAsSwitch() { ScopeKind = Kind::Switch; } + void setAsTernary() { ScopeKind = Kind::Ternary; } + + // --- + // Goto handling + // --- + + // Lazy create cleanup block or return what's available. + mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { + if (CleanupBlock) + return getCleanupBlock(builder); + return createCleanupBlock(builder); + } + + mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { + return CleanupBlock; + } + mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) { + { + // Create the cleanup block but dont hook it up around just yet. + mlir::OpBuilder::InsertionGuard guard(builder); + CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); + } + assert(builder.getInsertionBlock() && "Should be valid"); + return CleanupBlock; + } + + // Goto's introduced in this scope but didn't get fixed. + llvm::SmallVector, 4> + PendingGotos; + + // Labels solved inside this scope. + llvm::SmallPtrSet SolvedLabels; + + // --- + // Exception handling + // --- + CIRExceptionInfo &getExceptionInfo() { return exInfo; } + void setExceptionInfo(const CIRExceptionInfo &info) { exInfo = info; } + + // --- + // Return handling + // --- + + private: + // On switches we need one return block per region, since cases don't + // have their own scopes but are distinct regions nonetheless. + llvm::SmallVector RetBlocks; + llvm::SmallVector> RetLocs; + llvm::SmallVector> SwitchRegions; + + // There's usually only one ret block per scope, but this needs to be + // get or create because of potential unreachable return statements, note + // that for those, all source location maps to the first one found. + mlir::Block *createRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + assert((isSwitch() || RetBlocks.size() == 0) && + "only switches can hold more than one ret block"); + + // Create the cleanup block but dont hook it up around just yet. + mlir::OpBuilder::InsertionGuard guard(CGF.builder); + auto *b = CGF.builder.createBlock(CGF.builder.getBlock()->getParent()); + RetBlocks.push_back(b); + RetLocs.push_back(loc); + return b; + } + + mlir::cir::ReturnOp buildReturn(mlir::Location loc); + void buildImplicitReturn(); + + public: + llvm::ArrayRef getRetBlocks() { return RetBlocks; } + llvm::ArrayRef> getRetLocs() { + return RetLocs; + } + llvm::MutableArrayRef> getSwitchRegions() { + assert(isSwitch() && "expected switch scope"); + return SwitchRegions; + } + + mlir::Region *createSwitchRegion() { + assert(isSwitch() && "expected switch scope"); + SwitchRegions.push_back(std::make_unique()); + return SwitchRegions.back().get(); + } + + mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + unsigned int regionIdx = 0; + if (isSwitch()) + regionIdx = SwitchRegions.size() - 1; + if (regionIdx >= RetBlocks.size()) + return createRetBlock(CGF, loc); + return &*RetBlocks.back(); + } + + // Scope entry block tracking + mlir::Block *getEntryBlock() { return EntryBlock; } + + mlir::Location BeginLoc, EndLoc; + }; + + LexicalScope *currLexScope = nullptr; + + /// CIR build helpers + /// ----------------- + + /// This creates an alloca and inserts it into the entry block if \p ArraySize + /// is nullptr, + /// + /// TODO(cir): ... otherwise inserts it at the current insertion point of + /// the builder. + /// The caller is responsible for setting an appropriate alignment on + /// the alloca. + /// + /// \p ArraySize is the number of array elements to be allocated if it + /// is not nullptr. + /// + /// LangAS::Default is the address space of pointers to local variables and + /// temporaries, as exposed in the source language. In certain + /// configurations, this is not the same as the alloca address space, and a + /// cast is needed to lift the pointer from the alloca AS into + /// LangAS::Default. This can happen when the target uses a restricted + /// address space for the stack but the source language requires + /// LangAS::Default to be a generic address space. The latter condition is + /// common for most programming languages; OpenCL is an exception in that + /// LangAS::Default is the private address space, which naturally maps + /// to the stack. + /// + /// Because the address of a temporary is often exposed to the program in + /// various ways, this function will perform the cast. The original alloca + /// instruction is returned through \p Alloca if it is not nullptr. + /// + /// The cast is not performaed in CreateTempAllocaWithoutCast. This is + /// more efficient if the caller knows that the address will not be exposed. + mlir::cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr, + bool insertIntoFnEntryBlock = false); + mlir::cir::AllocaOp + CreateTempAllocaInFnEntryBlock(mlir::Type Ty, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr); + mlir::cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::OpBuilder::InsertPoint ip = {}, + mlir::Value ArraySize = nullptr); + Address CreateTempAlloca(mlir::Type Ty, CharUnits align, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr, + Address *Alloca = nullptr, + mlir::OpBuilder::InsertPoint ip = {}); + Address CreateTempAllocaWithoutCast(mlir::Type Ty, CharUnits align, + mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr, + mlir::OpBuilder::InsertPoint ip = {}); + + /// Create a temporary memory object of the given type, with + /// appropriate alignmen and cast it to the default address space. Returns + /// the original alloca instruction by \p Alloca if it is not nullptr. + Address CreateMemTemp(QualType T, mlir::Location Loc, + const Twine &Name = "tmp", Address *Alloca = nullptr); + Address CreateMemTemp(QualType T, CharUnits Align, mlir::Location Loc, + const Twine &Name = "tmp", Address *Alloca = nullptr); + + /// Create a temporary memory object of the given type, with + /// appropriate alignment without casting it to the default address space. + Address CreateMemTempWithoutCast(QualType T, mlir::Location Loc, + const Twine &Name = "tmp"); + Address CreateMemTempWithoutCast(QualType T, CharUnits Align, + mlir::Location Loc, + const Twine &Name = "tmp"); + + /// Create a temporary memory object for the given + /// aggregate type. + AggValueSlot CreateAggTemp(QualType T, mlir::Location Loc, + const Twine &Name = "tmp", + Address *Alloca = nullptr) { + return AggValueSlot::forAddr( + CreateMemTemp(T, Loc, Name, Alloca), T.getQualifiers(), + AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap); + } + +private: + QualType getVarArgType(const Expr *Arg); +}; + +/// A specialization of DominatingValue for RValue. +template <> struct DominatingValue { + typedef RValue type; + class saved_type { + enum Kind { + ScalarLiteral, + ScalarAddress, + AggregateLiteral, + AggregateAddress, + ComplexAddress + }; + + llvm::Value *Value; + llvm::Type *ElementType; + unsigned K : 3; + unsigned Align : 29; + saved_type(llvm::Value *v, llvm::Type *e, Kind k, unsigned a = 0) + : Value(v), ElementType(e), K(k), Align(a) {} + + public: + static bool needsSaving(RValue value); + static saved_type save(CIRGenFunction &CGF, RValue value); + RValue restore(CIRGenFunction &CGF) { llvm_unreachable("NYI"); } + + // implementations in CGCleanup.cpp + }; + + static bool needsSaving(type value) { return saved_type::needsSaving(value); } + static saved_type save(CIRGenFunction &CGF, type value) { + return saved_type::save(CGF, value); + } + static type restore(CIRGenFunction &CGF, saved_type value) { + return value.restore(CGF); + } +}; + +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h new file mode 100644 index 000000000000..36425beb9fb5 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -0,0 +1,476 @@ +//==-- CIRGenFunctionInfo.h - Representation of fn argument/return types ---==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Defines CIRGenFunctionInfo and associated types used in representing the +// CIR source types and ABI-coerced types for function arguments and +// return values. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_CIRGENFUNCTIONINFO_H +#define LLVM_CLANG_CIR_CIRGENFUNCTIONINFO_H + +#include "clang/AST/CanonicalType.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "llvm/ADT/FoldingSet.h" +#include "llvm/Support/TrailingObjects.h" + +namespace cir { + +/// ABIArgInfo - Helper class to encapsulate information about how a specific C +/// type should be passed to or returned from a function. +class ABIArgInfo { +public: + enum Kind : uint8_t { + /// Direct - Pass the argument directly using the normal converted CIR type, + /// or by coercing to another specified type stored in 'CoerceToType'). If + /// an offset is specified (in UIntData), then the argument passed is offset + /// by some number of bytes in the memory representation. A dummy argument + /// is emitted before the real argument if the specified type stored in + /// "PaddingType" is not zero. + Direct, + + /// Extend - Valid only for integer argument types. Same as 'direct' but + /// also emit a zer/sign extension attribute. + Extend, + + /// Indirect - Pass the argument indirectly via a hidden pointer with the + /// specified alignment (0 indicates default alignment) and address space. + Indirect, + + /// IndirectAliased - Similar to Indirect, but the pointer may be to an + /// object that is otherwise referenced. The object is known to not be + /// modified through any other references for the duration of the call, and + /// the callee must not itself modify the object. Because C allows parameter + /// variables to be modified and guarantees that they have unique addresses, + /// the callee must defensively copy the object into a local variable if it + /// might be modified or its address might be compared. Since those are + /// uncommon, in principle this convention allows programs to avoid copies + /// in more situations. However, it may introduce *extra* copies if the + /// callee fails to prove that a copy is unnecessary and the caller + /// naturally produces an unaliased object for the argument. + IndirectAliased, + + /// Ignore - Ignore the argument (treat as void). Useful for void and empty + /// structs. + Ignore, + + /// Expand - Only valid for aggregate argument types. The structure should + /// be expanded into consecutive arguments for its constituent fields. + /// Currently expand is only allowed on structures whose fields are all + /// scalar types or are themselves expandable types. + Expand, + + /// CoerceAndExpand - Only valid for aggregate argument types. The structure + /// should be expanded into consecutive arguments corresponding to the + /// non-array elements of the type stored in CoerceToType. + /// Array elements in the type are assumed to be padding and skipped. + CoerceAndExpand, + + // TODO: translate this idea to CIR! Define it for now just to ensure that + // we can assert it not being used + InAlloca, + KindFirst = Direct, + KindLast = InAlloca + }; + +private: + mlir::Type TypeData; // canHaveCoerceToType(); + union { + mlir::Type PaddingType; // canHavePaddingType() + mlir::Type UnpaddedCoerceAndExpandType; // isCoerceAndExpand() + }; + struct DirectAttrInfo { + unsigned Offset; + unsigned Align; + }; + struct IndirectAttrInfo { + unsigned Align; + unsigned AddrSpace; + }; + union { + DirectAttrInfo DirectAttr; // isDirect() || isExtend() + IndirectAttrInfo IndirectAttr; // isIndirect() + unsigned AllocaFieldIndex; // isInAlloca() + }; + Kind TheKind; + bool CanBeFlattened : 1; // isDirect() + bool SignExt : 1; // isExtend() + + bool canHavePaddingType() const { + return isDirect() || isExtend() || isIndirect() || isIndirectAliased() || + isExpand(); + } + + void setPaddingType(mlir::Type T) { + assert(canHavePaddingType()); + PaddingType = T; + } + +public: + ABIArgInfo(Kind K = Direct) + : TypeData(nullptr), PaddingType(nullptr), DirectAttr{0, 0}, TheKind(K), + CanBeFlattened(false) {} + + static ABIArgInfo getDirect(mlir::Type T = nullptr, unsigned Offset = 0, + mlir::Type Padding = nullptr, + bool CanBeFlattened = true, unsigned Align = 0) { + auto AI = ABIArgInfo(Direct); + AI.setCoerceToType(T); + AI.setPaddingType(Padding); + AI.setDirectOffset(Offset); + AI.setDirectAlign(Align); + AI.setCanBeFlattened(CanBeFlattened); + return AI; + } + + static ABIArgInfo getSignExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + auto AI = ABIArgInfo(Extend); + AI.setCoerceToType(T); + AI.setPaddingType(nullptr); + AI.setDirectOffset(0); + AI.setDirectAlign(0); + AI.setSignExt(true); + return AI; + } + + static ABIArgInfo getZeroExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + auto AI = ABIArgInfo(Extend); + AI.setCoerceToType(T); + AI.setPaddingType(nullptr); + AI.setDirectOffset(0); + AI.setDirectAlign(0); + AI.setSignExt(false); + return AI; + } + + // ABIArgInfo will record the argument as being extended based on the sign of + // it's type. + static ABIArgInfo getExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + if (Ty->hasSignedIntegerRepresentation()) + return getSignExtend(Ty, T); + return getZeroExtend(Ty, T); + } + + static ABIArgInfo getIgnore() { return ABIArgInfo(Ignore); } + + Kind getKind() const { return TheKind; } + bool isDirect() const { return TheKind == Direct; } + bool isInAlloca() const { return TheKind == InAlloca; } + bool isExtend() const { return TheKind == Extend; } + bool isIndirect() const { return TheKind == Indirect; } + bool isIndirectAliased() const { return TheKind == IndirectAliased; } + bool isExpand() const { return TheKind == Expand; } + bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; } + + bool canHaveCoerceToType() const { + return isDirect() || isExtend() || isCoerceAndExpand(); + } + + // Direct/Extend accessors + unsigned getDirectOffset() const { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + return DirectAttr.Offset; + } + + void setDirectOffset(unsigned Offset) { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + DirectAttr.Offset = Offset; + } + + void setDirectAlign(unsigned Align) { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + DirectAttr.Align = Align; + } + + void setSignExt(bool SExt) { + assert(isExtend() && "Invalid kind!"); + SignExt = SExt; + } + + void setCanBeFlattened(bool Flatten) { + assert(isDirect() && "Invalid kind!"); + CanBeFlattened = Flatten; + } + + bool getCanBeFlattened() const { + assert(isDirect() && "Invalid kind!"); + return CanBeFlattened; + } + + mlir::Type getPaddingType() const { + return (canHavePaddingType() ? PaddingType : nullptr); + } + + mlir::Type getCoerceToType() const { + assert(canHaveCoerceToType() && "Invalid kind!"); + return TypeData; + } + + void setCoerceToType(mlir::Type T) { + assert(canHaveCoerceToType() && "Invalid kind!"); + TypeData = T; + } +}; + +struct CIRGenFunctionInfoArgInfo { + clang::CanQualType type; + ABIArgInfo info; +}; + +/// A class for recording the number of arguments that a function signature +/// requires. +class RequiredArgs { + /// The number of required arguments, or ~0 if the signature does not permit + /// optional arguments. + unsigned NumRequired; + +public: + enum All_t { All }; + + RequiredArgs(All_t _) : NumRequired(~0U) {} + explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } + + unsigned getOpaqueData() const { return NumRequired; } + + bool allowsOptionalArgs() const { return NumRequired != ~0U; } + + /// Compute the arguments required by the given formal prototype, given that + /// there may be some additional, non-formal arguments in play. + /// + /// If FD is not null, this will consider pass_object_size params in FD. + static RequiredArgs + forPrototypePlus(const clang::FunctionProtoType *prototype, + unsigned additional) { + if (!prototype->isVariadic()) + return All; + + if (prototype->hasExtParameterInfos()) + additional += llvm::count_if( + prototype->getExtParameterInfos(), + [](const clang::FunctionProtoType::ExtParameterInfo &ExtInfo) { + return ExtInfo.hasPassObjectSize(); + }); + + return RequiredArgs(prototype->getNumParams() + additional); + } + + static RequiredArgs + forPrototypePlus(clang::CanQual prototype, + unsigned additional) { + return forPrototypePlus(prototype.getTypePtr(), additional); + } + + unsigned getNumRequiredArgs() const { + assert(allowsOptionalArgs()); + return NumRequired; + } +}; + +class CIRGenFunctionInfo final + : public llvm::FoldingSetNode, + private llvm::TrailingObjects< + CIRGenFunctionInfo, CIRGenFunctionInfoArgInfo, + clang::FunctionProtoType::ExtParameterInfo> { + + typedef CIRGenFunctionInfoArgInfo ArgInfo; + typedef clang::FunctionProtoType::ExtParameterInfo ExtParameterInfo; + + /// The cir::CallingConv to use for this function (as specified by the user). + unsigned CallingConvention : 8; + + /// The cir::CallingConv to actually use for this function, which may depend + /// on the ABI. + unsigned EffectiveCallingConvention : 8; + + /// The clang::CallingConv that this was originally created with. + unsigned ASTCallingConvention : 6; + + /// Whether this is an instance method. + unsigned InstanceMethod : 1; + + /// Whether this is a chain call. + unsigned ChainCall : 1; + + /// Whether this function is a CMSE nonsecure call + unsigned CmseNSCall : 1; + + /// Whether this function is noreturn. + unsigned NoReturn : 1; + + /// Whether this function is returns-retained. + unsigned ReturnsRetained : 1; + + /// Whether this function saved caller registers. + unsigned NoCallerSavedRegs : 1; + + /// How many arguments to pass inreg. + unsigned HasRegParm : 1; + unsigned RegParm : 3; + + /// Whether this function has nocf_check attribute. + unsigned NoCfCheck : 1; + + RequiredArgs Required; + + /// The struct representing all arguments passed in memory. Only used when + /// passing non-trivial types with inalloca. Not part of the profile. + /// TODO: think about modeling this properly, this is just a dumb subsitution + /// for now since we arent supporting anything other than arguments in + /// registers atm + mlir::cir::StructType *ArgStruct; + unsigned ArgStructAlign : 31; + unsigned HasExtParameterInfos : 1; + + unsigned NumArgs; + + ArgInfo *getArgsBuffer() { return getTrailingObjects(); } + + const ArgInfo *getArgsBuffer() const { return getTrailingObjects(); } + + ExtParameterInfo *getExtParameterInfosBuffer() { + return getTrailingObjects(); + } + + const ExtParameterInfo *getExtParameterInfosBuffer() const { + return getTrailingObjects(); + } + + CIRGenFunctionInfo() : Required(RequiredArgs::All) {} + +public: + static CIRGenFunctionInfo *create(unsigned cirCC, bool instanceMethod, + bool chainCall, + const clang::FunctionType::ExtInfo &extInfo, + llvm::ArrayRef paramInfos, + clang::CanQualType resultType, + llvm::ArrayRef argTypes, + RequiredArgs required); + void operator delete(void *p) { ::operator delete(p); } + + // Friending class TrailingObjects is apparantly not good enough for MSVC, so + // these have to be public. + friend class TrailingObjects; + size_t numTrailingObjects(OverloadToken) const { + return NumArgs + 1; + } + size_t numTrailingObjects(OverloadToken) const { + return (HasExtParameterInfos ? NumArgs : 0); + } + + using const_arg_iterator = const ArgInfo *; + using arg_iterator = ArgInfo *; + + static void Profile(llvm::FoldingSetNodeID &ID, bool InstanceMethod, + bool ChainCall, const clang::FunctionType::ExtInfo &info, + llvm::ArrayRef paramInfos, + RequiredArgs required, clang::CanQualType resultType, + llvm::ArrayRef argTypes) { + ID.AddInteger(info.getCC()); + ID.AddBoolean(InstanceMethod); + ID.AddBoolean(info.getNoReturn()); + ID.AddBoolean(info.getProducesResult()); + ID.AddBoolean(info.getNoCallerSavedRegs()); + ID.AddBoolean(info.getHasRegParm()); + ID.AddBoolean(info.getRegParm()); + ID.AddBoolean(info.getNoCfCheck()); + ID.AddBoolean(info.getCmseNSCall()); + ID.AddBoolean(required.getOpaqueData()); + ID.AddBoolean(!paramInfos.empty()); + if (!paramInfos.empty()) { + for (auto paramInfo : paramInfos) + ID.AddInteger(paramInfo.getOpaqueValue()); + } + resultType.Profile(ID); + for (auto i : argTypes) + i.Profile(ID); + } + + /// getASTCallingConvention() - Return the AST-specified calling convention + clang::CallingConv getASTCallingConvention() const { + return clang::CallingConv(ASTCallingConvention); + } + + void Profile(llvm::FoldingSetNodeID &ID) { + ID.AddInteger(getASTCallingConvention()); + ID.AddBoolean(InstanceMethod); + ID.AddBoolean(ChainCall); + ID.AddBoolean(NoReturn); + ID.AddBoolean(ReturnsRetained); + ID.AddBoolean(NoCallerSavedRegs); + ID.AddBoolean(HasRegParm); + ID.AddBoolean(RegParm); + ID.AddBoolean(NoCfCheck); + ID.AddBoolean(CmseNSCall); + ID.AddInteger(Required.getOpaqueData()); + ID.AddBoolean(HasExtParameterInfos); + if (HasExtParameterInfos) { + for (auto paramInfo : getExtParameterInfos()) + ID.AddInteger(paramInfo.getOpaqueValue()); + } + getReturnType().Profile(ID); + for (const auto &I : arguments()) + I.type.Profile(ID); + } + + llvm::MutableArrayRef arguments() { + return llvm::MutableArrayRef(arg_begin(), NumArgs); + } + llvm::ArrayRef arguments() const { + return llvm::ArrayRef(arg_begin(), NumArgs); + } + + const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; } + const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; } + arg_iterator arg_begin() { return getArgsBuffer() + 1; } + arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; } + + unsigned arg_size() const { return NumArgs; } + + llvm::ArrayRef getExtParameterInfos() const { + if (!HasExtParameterInfos) + return {}; + return llvm::ArrayRef(getExtParameterInfosBuffer(), NumArgs); + } + ExtParameterInfo getExtParameterInfo(unsigned argIndex) const { + assert(argIndex <= NumArgs); + if (!HasExtParameterInfos) + return ExtParameterInfo(); + return getExtParameterInfos()[argIndex]; + } + + /// getCallingConvention - REturn the user specified calling convention, which + /// has been translated into a CIR CC. + unsigned getCallingConvention() const { return CallingConvention; } + + clang::CanQualType getReturnType() const { return getArgsBuffer()[0].type; } + + ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } + const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; } + + bool isChainCall() const { return ChainCall; } + + bool isVariadic() const { return Required.allowsOptionalArgs(); } + RequiredArgs getRequiredArgs() const { return Required; } + unsigned getNumRequiredArgs() const { + return isVariadic() ? getRequiredArgs().getNumRequiredArgs() : arg_size(); + } + + mlir::cir::StructType *getArgStruct() const { return ArgStruct; } + + /// Return true if this function uses inalloca arguments. + bool usesInAlloca() const { return ArgStruct; } +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp new file mode 100644 index 000000000000..28cbc2a0ab01 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -0,0 +1,2363 @@ +//===----- CIRGenItaniumCXXABI.cpp - Emit CIR from ASTs for a Module ------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides C++ code generation targeting the Itanium C++ ABI. The class +// in this file generates structures that follow the Itanium C++ ABI, which is +// documented at: +// https://itanium-cxx-abi.github.io/cxx-abi/abi.html +// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html +// +// It also supports the closely-related ARM ABI, documented at: +// https://developer.arm.com/documentation/ihi0041/g/ +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenCleanup.h" +#include "CIRGenFunctionInfo.h" +#include "ConstantInitBuilder.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/Mangle.h" +#include "clang/AST/VTableBuilder.h" +#include "clang/Basic/Linkage.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace cir; +using namespace clang; + +namespace { +class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { + /// All the vtables which have been defined. + llvm::DenseMap VTables; + +protected: + bool UseARMMethodPtrABI; + bool UseARMGuardVarABI; + bool Use32BitVTableOffsetABI; + + ItaniumMangleContext &getMangleContext() { + return cast(cir::CIRGenCXXABI::getMangleContext()); + } + + bool isVTableHidden(const CXXRecordDecl *RD) const { + const auto &VtableLayout = + CGM.getItaniumVTableContext().getVTableLayout(RD); + + for (const auto &VtableComponent : VtableLayout.vtable_components()) { + if (VtableComponent.isRTTIKind()) { + const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl(); + if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility) + return true; + } else if (VtableComponent.isUsedFunctionPointerKind()) { + const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); + if (Method->getVisibility() == Visibility::HiddenVisibility && + !Method->isDefined()) + return true; + } + } + return false; + } + + bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { + const auto &VtableLayout = + CGM.getItaniumVTableContext().getVTableLayout(RD); + + for (const auto &VtableComponent : VtableLayout.vtable_components()) { + // Skip empty slot. + if (!VtableComponent.isUsedFunctionPointerKind()) + continue; + + const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); + if (!Method->getCanonicalDecl()->isInlined()) + continue; + + StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl()); + auto *op = CGM.getGlobalValue(Name); + if (auto globalOp = dyn_cast_or_null(op)) + llvm_unreachable("NYI"); + + if (auto funcOp = dyn_cast_or_null(op)) { + // This checks if virtual inline function has already been emitted. + // Note that it is possible that this inline function would be emitted + // after trying to emit vtable speculatively. Because of this we do + // an extra pass after emitting all deferred vtables to find and emit + // these vtables opportunistically. + if (!funcOp || funcOp.isDeclaration()) + return true; + } + } + return false; + } + +public: + CIRGenItaniumCXXABI(CIRGenModule &CGM, bool UseARMMethodPtrABI = false, + bool UseARMGuardVarABI = false) + : CIRGenCXXABI(CGM), UseARMMethodPtrABI{UseARMMethodPtrABI}, + UseARMGuardVarABI{UseARMGuardVarABI}, Use32BitVTableOffsetABI{false} { + assert(!UseARMMethodPtrABI && "NYI"); + assert(!UseARMGuardVarABI && "NYI"); + } + AddedStructorArgs getImplicitConstructorArgs(CIRGenFunction &CGF, + const CXXConstructorDecl *D, + CXXCtorType Type, + bool ForVirtualBase, + bool Delegating) override; + + bool NeedsVTTParameter(GlobalDecl GD) override; + + RecordArgABI getRecordArgABI(const clang::CXXRecordDecl *RD) const override { + // If C++ prohibits us from making a copy, pass by address. + if (!RD->canPassInRegisters()) + return RecordArgABI::Indirect; + else + return RecordArgABI::Default; + } + + bool classifyReturnType(CIRGenFunctionInfo &FI) const override; + + AddedStructorArgCounts + buildStructorSignature(GlobalDecl GD, + llvm::SmallVectorImpl &ArgTys) override; + + bool isThisCompleteObject(GlobalDecl GD) const override { + // The Itanium ABI has separate complete-object vs. base-object variants of + // both constructors and destructors. + if (isa(GD.getDecl())) { + llvm_unreachable("NYI"); + } + if (isa(GD.getDecl())) { + switch (GD.getCtorType()) { + case Ctor_Complete: + return true; + + case Ctor_Base: + return false; + + case Ctor_CopyingClosure: + case Ctor_DefaultClosure: + llvm_unreachable("closure ctors in Itanium ABI?"); + + case Ctor_Comdat: + llvm_unreachable("emitting ctor comdat as function?"); + } + llvm_unreachable("bad dtor kind"); + } + + // No other kinds. + return false; + } + + void buildInstanceFunctionProlog(CIRGenFunction &CGF) override; + + void addImplicitStructorParams(CIRGenFunction &CGF, QualType &ResTy, + FunctionArgList &Params) override; + + mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &CGF, + const CXXDestructorDecl *DD, + CXXDtorType Type, + bool ForVirtualBase, + bool Delegating) override; + void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; + void buildCXXDestructors(const clang::CXXDestructorDecl *D) override; + void buildCXXStructor(clang::GlobalDecl GD) override; + void buildDestructorCall(CIRGenFunction &CGF, const CXXDestructorDecl *DD, + CXXDtorType Type, bool ForVirtualBase, + bool Delegating, Address This, + QualType ThisTy) override; + virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) override; + virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; + CatchTypeInfo + getAddrOfCXXCatchHandlerType(mlir::Location loc, QualType Ty, + QualType CatchHandlerType) override { + auto rtti = + dyn_cast(getAddrOfRTTIDescriptor(loc, Ty)); + assert(rtti && "expected GlobalViewAttr"); + return CatchTypeInfo{rtti, 0}; + } + + void emitBeginCatch(CIRGenFunction &CGF, const CXXCatchStmt *C) override; + + bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; + mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) override; + CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &CGF, GlobalDecl GD, + Address This, mlir::Type Ty, + SourceLocation Loc) override; + mlir::Value getVTableAddressPoint(BaseSubobject Base, + const CXXRecordDecl *VTableClass) override; + bool isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, + CIRGenFunction::VPtr Vptr) override; + bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const; + mlir::Value getVTableAddressPointInStructor( + CIRGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, + const CXXRecordDecl *NearestVBase) override; + void emitVTableDefinitions(CIRGenVTables &CGVT, + const CXXRecordDecl *RD) override; + void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; + mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, + QualType Ty) override; + bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, + CXXDtorType DT) const override { + // Itanium does not emit any destructor variant as an inline thunk. + // Delegating may occur as an optimization, but all variants are either + // emitted with external linkage or as linkonce if they are inline and used. + return false; + } + + /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. + bool mayNeedDestruction(const VarDecl *VD) const { + if (VD->needsDestruction(getContext())) + return true; + + // If the variable has an incomplete class type (or array thereof), it + // might need destruction. + const Type *T = VD->getType()->getBaseElementTypeUnsafe(); + if (T->getAs() && T->isIncompleteType()) + return true; + + return false; + } + + /// Determine whether we will definitely emit this variable with a constant + /// initializer, either because the language semantics demand it or because + /// we know that the initializer is a constant. + /// For weak definitions, any initializer available in the current translation + /// is not necessarily reflective of the initializer used; such initializers + /// are ignored unless if InspectInitForWeakDef is true. + /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. + bool + isEmittedWithConstantInitializer(const VarDecl *VD, + bool InspectInitForWeakDef = false) const { + VD = VD->getMostRecentDecl(); + if (VD->hasAttr()) + return true; + + // All later checks examine the initializer specified on the variable. If + // the variable is weak, such examination would not be correct. + if (!InspectInitForWeakDef && + (VD->isWeak() || VD->hasAttr())) + return false; + + const VarDecl *InitDecl = VD->getInitializingDeclaration(); + if (!InitDecl) + return false; + + // If there's no initializer to run, this is constant initialization. + if (!InitDecl->hasInit()) + return true; + + // If we have the only definition, we don't need a thread wrapper if we + // will emit the value as a constant. + if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD))) + return !mayNeedDestruction(VD) && InitDecl->evaluateValue(); + + // Otherwise, we need a thread wrapper unless we know that every + // translation unit will emit the value as a constant. We rely on the + // variable being constant-initialized in every translation unit if it's + // constant-initialized in any translation unit, which isn't actually + // guaranteed by the standard but is necessary for sanity. + return InitDecl->hasConstantInitialization(); + } + + // TODO(cir): seems like could be shared between LLVM IR and CIR codegen. + bool usesThreadWrapperFunction(const VarDecl *VD) const override { + return !isEmittedWithConstantInitializer(VD) || mayNeedDestruction(VD); + } + + bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { + return true; + } + + size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, + FunctionArgList &Args) const override { + assert(!Args.empty() && "expected the arglist to not be empty!"); + return Args.size() - 1; + } + + void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) override; + + // The traditional clang CodeGen emits calls to `__dynamic_cast` directly into + // LLVM in the `emitDynamicCastCall` function. In CIR, `dynamic_cast` + // expressions are lowered to `cir.dyn_cast` ops instead of calls to runtime + // functions. So during CIRGen we don't need the `emitDynamicCastCall` + // function that clang CodeGen has. + + mlir::cir::DynamicCastInfoAttr + buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, QualType DestRecordTy) override; + + mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, + Address Value, + QualType SrcRecordTy) override; + + /**************************** RTTI Uniqueness ******************************/ +protected: + /// Returns true if the ABI requires RTTI type_info objects to be unique + /// across a program. + virtual bool shouldRTTIBeUnique() const { return true; } + +public: + /// What sort of unique-RTTI behavior should we use? + enum RTTIUniquenessKind { + /// We are guaranteeing, or need to guarantee, that the RTTI string + /// is unique. + RUK_Unique, + + /// We are not guaranteeing uniqueness for the RTTI string, so we + /// can demote to hidden visibility but must use string comparisons. + RUK_NonUniqueHidden, + + /// We are not guaranteeing uniqueness for the RTTI string, so we + /// have to use string comparisons, but we also have to emit it with + /// non-hidden visibility. + RUK_NonUniqueVisible + }; + + /// Return the required visibility status for the given type and linkage in + /// the current ABI. + RTTIUniquenessKind + classifyRTTIUniqueness(QualType CanTy, + mlir::cir::GlobalLinkageKind Linkage) const; + friend class CIRGenItaniumRTTIBuilder; +}; +} // namespace + +CIRGenCXXABI::AddedStructorArgs CIRGenItaniumCXXABI::getImplicitConstructorArgs( + CIRGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, + bool ForVirtualBase, bool Delegating) { + assert(!NeedsVTTParameter(GlobalDecl(D, Type)) && "VTT NYI"); + + return {}; +} + +/// Return whether the given global decl needs a VTT parameter, which it does if +/// it's a base constructor or destructor with virtual bases. +bool CIRGenItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { + auto *MD = cast(GD.getDecl()); + + // We don't have any virtual bases, just return early. + if (!MD->getParent()->getNumVBases()) + return false; + + // Check if we have a base constructor. + if (isa(MD) && GD.getCtorType() == Ctor_Base) + return true; + + // Check if we have a base destructor. + if (isa(MD) && GD.getDtorType() == Dtor_Base) + llvm_unreachable("NYI"); + + return false; +} + +CIRGenCXXABI *cir::CreateCIRGenItaniumCXXABI(CIRGenModule &CGM) { + switch (CGM.getASTContext().getCXXABIKind()) { + case TargetCXXABI::GenericItanium: + assert(CGM.getASTContext().getTargetInfo().getTriple().getArch() != + llvm::Triple::le32 && + "le32 NYI"); + LLVM_FALLTHROUGH; + case TargetCXXABI::GenericAArch64: + case TargetCXXABI::AppleARM64: + // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits + // from ARMCXXABI. We'll have to follow suit. + return new CIRGenItaniumCXXABI(CGM); + + default: + llvm_unreachable("bad or NYI ABI kind"); + } +} + +bool CIRGenItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { + auto *RD = FI.getReturnType()->getAsCXXRecordDecl(); + assert(!RD && "RecordDecl return types NYI"); + return false; +} + +CIRGenCXXABI::AddedStructorArgCounts +CIRGenItaniumCXXABI::buildStructorSignature( + GlobalDecl GD, llvm::SmallVectorImpl &ArgTys) { + auto &Context = getContext(); + + // All parameters are already in place except VTT, which goes after 'this'. + // These are clang types, so we don't need to worry about sret yet. + + // Check if we need to add a VTT parameter (which has type void **). + if ((isa(GD.getDecl()) ? GD.getCtorType() == Ctor_Base + : GD.getDtorType() == Dtor_Base) && + cast(GD.getDecl())->getParent()->getNumVBases() != 0) { + llvm_unreachable("NYI"); + (void)Context; + } + + return AddedStructorArgCounts{}; +} + +// Find out how to cirgen the complete destructor and constructor +namespace { +enum class StructorCIRGen { Emit, RAUW, Alias, COMDAT }; +} + +static StructorCIRGen getCIRGenToUse(CIRGenModule &CGM, + const CXXMethodDecl *MD) { + if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) + return StructorCIRGen::Emit; + + // The complete and base structors are not equivalent if there are any virtual + // bases, so emit separate functions. + if (MD->getParent()->getNumVBases()) + return StructorCIRGen::Emit; + + GlobalDecl AliasDecl; + if (const auto *DD = dyn_cast(MD)) { + AliasDecl = GlobalDecl(DD, Dtor_Complete); + } else { + const auto *CD = cast(MD); + AliasDecl = GlobalDecl(CD, Ctor_Complete); + } + auto Linkage = CGM.getFunctionLinkage(AliasDecl); + (void)Linkage; + + if (mlir::cir::isDiscardableIfUnused(Linkage)) + return StructorCIRGen::RAUW; + + // FIXME: Should we allow available_externally aliases? + if (!mlir::cir::isValidLinkage(Linkage)) + return StructorCIRGen::RAUW; + + if (mlir::cir::isWeakForLinker(Linkage)) { + // Only ELF and wasm support COMDATs with arbitrary names (C5/D5). + if (CGM.getTarget().getTriple().isOSBinFormatELF() || + CGM.getTarget().getTriple().isOSBinFormatWasm()) + return StructorCIRGen::COMDAT; + return StructorCIRGen::Emit; + } + + return StructorCIRGen::Alias; +} + +static void emitConstructorDestructorAlias(CIRGenModule &CGM, + GlobalDecl AliasDecl, + GlobalDecl TargetDecl) { + auto Linkage = CGM.getFunctionLinkage(AliasDecl); + + // Does this function alias already exists? + StringRef MangledName = CGM.getMangledName(AliasDecl); + auto Entry = + dyn_cast_or_null(CGM.getGlobalValue(MangledName)); + if (Entry && !Entry.isDeclaration()) + return; + + // Retrieve aliasee info. + auto Aliasee = + dyn_cast_or_null(CGM.GetAddrOfGlobal(TargetDecl)); + assert(Aliasee && "expected cir.func"); + + // Populate actual alias. + CGM.buildAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); +} + +void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { + auto *MD = cast(GD.getDecl()); + auto *CD = dyn_cast(MD); + const CXXDestructorDecl *DD = CD ? nullptr : cast(MD); + + StructorCIRGen CIRGenType = getCIRGenToUse(CGM, MD); + + if (CD ? GD.getCtorType() == Ctor_Complete + : GD.getDtorType() == Dtor_Complete) { + GlobalDecl BaseDecl; + if (CD) + BaseDecl = GD.getWithCtorType(Ctor_Base); + else + BaseDecl = GD.getWithDtorType(Dtor_Base); + + if (CIRGenType == StructorCIRGen::Alias || + CIRGenType == StructorCIRGen::COMDAT) { + emitConstructorDestructorAlias(CGM, GD, BaseDecl); + return; + } + + if (CIRGenType == StructorCIRGen::RAUW) { + StringRef MangledName = CGM.getMangledName(GD); + auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl); + CGM.addReplacement(MangledName, Aliasee); + return; + } + } + + // The base destructor is equivalent to the base destructor of its base class + // if there is exactly one non-virtual base class with a non-trivial + // destructor, there are no fields with a non-trivial destructor, and the body + // of the destructor is trivial. + if (DD && GD.getDtorType() == Dtor_Base && + CIRGenType != StructorCIRGen::COMDAT && + !CGM.tryEmitBaseDestructorAsAlias(DD)) + return; + + // FIXME: The deleting destructor is equivalent to the selected operator + // delete if: + // * either the delete is a destroying operator delete or the destructor + // would be trivial if it weren't virtual. + // * the conversion from the 'this' parameter to the first parameter of the + // destructor is equivalent to a bitcast, + // * the destructor does not have an implicit "this" return, and + // * the operator delete has the same calling convention and CIR function + // type as the destructor. + // In such cases we should try to emit the deleting dtor as an alias to the + // selected 'operator delete'. + + auto Fn = CGM.codegenCXXStructor(GD); + + if (CIRGenType == StructorCIRGen::COMDAT) { + llvm_unreachable("NYI"); + } else { + CGM.maybeSetTrivialComdat(*MD, Fn); + } +} + +void CIRGenItaniumCXXABI::addImplicitStructorParams(CIRGenFunction &CGF, + QualType &ResTY, + FunctionArgList &Params) { + const auto *MD = cast(CGF.CurGD.getDecl()); + assert(isa(MD) || isa(MD)); + + // Check if we need a VTT parameter as well. + if (NeedsVTTParameter(CGF.CurGD)) { + llvm_unreachable("NYI"); + } +} + +mlir::Value CIRGenCXXABI::loadIncomingCXXThis(CIRGenFunction &CGF) { + return CGF.createLoad(getThisDecl(CGF), "this"); +} + +void CIRGenCXXABI::setCXXABIThisValue(CIRGenFunction &CGF, + mlir::Value ThisPtr) { + /// Initialize the 'this' slot. + assert(getThisDecl(CGF) && "no 'this' variable for function"); + CGF.CXXABIThisValue = ThisPtr; +} + +void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(CIRGenFunction &CGF) { + // Naked functions have no prolog. + if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr()) + llvm_unreachable("NYI"); + + /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue + /// adjustments are required, because they are all handled by thunks. + setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF)); + + /// Initialize the 'vtt' slot if needed. + if (getStructorImplicitParamDecl(CGF)) { + llvm_unreachable("NYI"); + } + + /// If this is a function that the ABI specifies returns 'this', initialize + /// the return slot to this' at the start of the function. + /// + /// Unlike the setting of return types, this is done within the ABI + /// implementation instead of by clients of CIRGenCXXBI because: + /// 1) getThisValue is currently protected + /// 2) in theory, an ABI could implement 'this' returns some other way; + /// HasThisReturn only specifies a contract, not the implementation + if (HasThisReturn(CGF.CurGD)) + llvm_unreachable("NYI"); +} + +void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { + // Just make sure we're in sync with TargetCXXABI. + assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); + + // The constructor used for constructing this as a base class; + // ignores virtual bases. + CGM.buildGlobal(GlobalDecl(D, Ctor_Base)); + + // The constructor used for constructing this as a complete class; + // constructs the virtual bases, then calls the base constructor. + if (!D->getParent()->isAbstract()) { + // We don't need to emit the complete ctro if the class is abstract. + CGM.buildGlobal(GlobalDecl(D, Ctor_Complete)); + } +} + +void CIRGenItaniumCXXABI::buildCXXDestructors(const CXXDestructorDecl *D) { + // The destructor used for destructing this as a base class; ignores + // virtual bases. + CGM.buildGlobal(GlobalDecl(D, Dtor_Base)); + + // The destructor used for destructing this as a most-derived class; + // call the base destructor and then destructs any virtual bases. + CGM.buildGlobal(GlobalDecl(D, Dtor_Complete)); + + // The destructor in a virtual table is always a 'deleting' + // destructor, which calls the complete destructor and then uses the + // appropriate operator delete. + if (D->isVirtual()) + CGM.buildGlobal(GlobalDecl(D, Dtor_Deleting)); +} + +namespace { +/// From traditional LLVM, useful info for LLVM lowering support: +/// A cleanup to call __cxa_end_catch. In many cases, the caught +/// exception type lets us state definitively that the thrown exception +/// type does not have a destructor. In particular: +/// - Catch-alls tell us nothing, so we have to conservatively +/// assume that the thrown exception might have a destructor. +/// - Catches by reference behave according to their base types. +/// - Catches of non-record types will only trigger for exceptions +/// of non-record types, which never have destructors. +/// - Catches of record types can trigger for arbitrary subclasses +/// of the caught type, so we have to assume the actual thrown +/// exception type might have a throwing destructor, even if the +/// caught type's destructor is trivial or nothrow. +struct CallEndCatch final : EHScopeStack::Cleanup { + CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} + bool MightThrow; + + void Emit(CIRGenFunction &CGF, Flags flags) override { + if (!MightThrow) { + // Traditional LLVM codegen would emit a call to __cxa_end_catch + // here. For CIR, just let it pass since the cleanup is going + // to be emitted on a later pass when lowering the catch region. + // CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); + CGF.getBuilder().create(*CGF.currSrcLoc); + return; + } + + // Traditional LLVM codegen would emit a call to __cxa_end_catch + // here. For CIR, just let it pass since the cleanup is going + // to be emitted on a later pass when lowering the catch region. + // CGF.EmitRuntimeCallOrTryCall(getEndCatchFn(CGF.CGM)); + CGF.getBuilder().create(*CGF.currSrcLoc); + } +}; +} // namespace + +/// From traditional LLVM codegen, useful info for LLVM lowering support: +/// Emits a call to __cxa_begin_catch and enters a cleanup to call +/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume +/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch +/// call can be marked as nounwind even if EndMightThrow is true. +/// +/// \param EndMightThrow - true if __cxa_end_catch might throw +static mlir::Value CallBeginCatch(CIRGenFunction &CGF, mlir::Value Exn, + mlir::Type ParamTy, bool EndMightThrow) { + // llvm::CallInst *call = + // CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); + auto catchParam = CGF.getBuilder().create( + Exn.getLoc(), ParamTy, Exn); + + CGF.EHStack.pushCleanup( + NormalAndEHCleanup, + EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor); + + return catchParam; +} + +/// A "special initializer" callback for initializing a catch +/// parameter during catch initialization. +static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, + Address ParamAddr, SourceLocation Loc) { + // Load the exception from where the landing pad saved it. + auto Exn = CGF.currLexScope->getExceptionInfo().addr; + + CanQualType CatchType = + CGF.CGM.getASTContext().getCanonicalType(CatchParam.getType()); + auto CIRCatchTy = CGF.convertTypeForMem(CatchType); + + // If we're catching by reference, we can just cast the object + // pointer to the appropriate pointer. + if (isa(CatchType)) { + llvm_unreachable("NYI"); + return; + } + + // Scalars and complexes. + TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); + if (TEK != TEK_Aggregate) { + // Notes for LLVM lowering: + // If the catch type is a pointer type, __cxa_begin_catch returns + // the pointer by value. + if (CatchType->hasPointerRepresentation()) { + auto catchParam = CallBeginCatch(CGF, Exn, CIRCatchTy, false); + + switch (CatchType.getQualifiers().getObjCLifetime()) { + case Qualifiers::OCL_Strong: + llvm_unreachable("NYI"); + // arc retain non block: + assert(!UnimplementedFeature::ARC()); + [[fallthrough]]; + + case Qualifiers::OCL_None: + case Qualifiers::OCL_ExplicitNone: + case Qualifiers::OCL_Autoreleasing: + CGF.getBuilder().createStore(Exn.getLoc(), catchParam, ParamAddr); + return; + + case Qualifiers::OCL_Weak: + llvm_unreachable("NYI"); + // arc init weak: + assert(!UnimplementedFeature::ARC()); + return; + } + llvm_unreachable("bad ownership qualifier!"); + } + + // Otherwise, it returns a pointer into the exception object. + auto catchParam = CallBeginCatch( + CGF, Exn, CGF.getBuilder().getPointerTo(CIRCatchTy), false); + LValue srcLV = CGF.MakeNaturalAlignAddrLValue(catchParam, CatchType); + LValue destLV = CGF.makeAddrLValue(ParamAddr, CatchType); + switch (TEK) { + case TEK_Complex: + llvm_unreachable("NYI"); + return; + case TEK_Scalar: { + auto exnLoad = CGF.buildLoadOfScalar(srcLV, catchParam.getLoc()); + CGF.buildStoreOfScalar(exnLoad, destLV, /*init*/ true); + return; + } + case TEK_Aggregate: + llvm_unreachable("evaluation kind filtered out!"); + } + llvm_unreachable("bad evaluation kind"); + } + + // Check for a copy expression. If we don't have a copy expression, + // that means a trivial copy is okay. + const Expr *copyExpr = CatchParam.getInit(); + if (!copyExpr) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); +} + +/// Begins a catch statement by initializing the catch variable and +/// calling __cxa_begin_catch. +void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, + const CXXCatchStmt *S) { + // Notes for LLVM lowering: + // We have to be very careful with the ordering of cleanups here: + // C++ [except.throw]p4: + // The destruction [of the exception temporary] occurs + // immediately after the destruction of the object declared in + // the exception-declaration in the handler. + // + // So the precise ordering is: + // 1. Construct catch variable. + // 2. __cxa_begin_catch + // 3. Enter __cxa_end_catch cleanup + // 4. Enter dtor cleanup + // + // We do this by using a slightly abnormal initialization process. + // Delegation sequence: + // - ExitCXXTryStmt opens a RunCleanupsScope + // - EmitAutoVarAlloca creates the variable and debug info + // - InitCatchParam initializes the variable from the exception + // - CallBeginCatch calls __cxa_begin_catch + // - CallBeginCatch enters the __cxa_end_catch cleanup + // - EmitAutoVarCleanups enters the variable destructor cleanup + // - EmitCXXTryStmt emits the code for the catch body + // - EmitCXXTryStmt close the RunCleanupsScope + + VarDecl *CatchParam = S->getExceptionDecl(); + if (!CatchParam) { + auto Exn = CGF.currLexScope->getExceptionInfo().addr; + CallBeginCatch(CGF, Exn, CGF.getBuilder().getVoidPtrTy(), true); + return; + } + + auto getCatchParamAllocaIP = [&]() { + auto currIns = CGF.getBuilder().saveInsertionPoint(); + auto currParent = currIns.getBlock()->getParentOp(); + mlir::Operation *scopeLikeOp = + currParent->getParentOfType(); + if (!scopeLikeOp) + scopeLikeOp = currParent->getParentOfType(); + assert(scopeLikeOp && "unknown outermost scope-like parent"); + assert(scopeLikeOp->getNumRegions() == 1 && "expected single region"); + + auto *insertBlock = &scopeLikeOp->getRegion(0).getBlocks().back(); + return CGF.getBuilder().getBestAllocaInsertPoint(insertBlock); + }; + + // Emit the local. Make sure the alloca's superseed the current scope, since + // these are going to be consumed by `cir.catch`, which is not within the + // current scope. + auto var = CGF.buildAutoVarAlloca(*CatchParam, getCatchParamAllocaIP()); + InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); + // FIXME(cir): double check cleanups here are happening in the right blocks. + CGF.buildAutoVarCleanups(var); +} + +mlir::cir::GlobalOp +CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) { + assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); + mlir::cir::GlobalOp &vtable = VTables[RD]; + if (vtable) + return vtable; + + // Queue up this vtable for possible deferred emission. + CGM.addDeferredVTable(RD); + + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + getMangleContext().mangleCXXVTable(RD, Out); + + const VTableLayout &VTLayout = + CGM.getItaniumVTableContext().getVTableLayout(RD); + auto VTableType = CGM.getVTables().getVTableType(VTLayout); + + // Use pointer alignment for the vtable. Otherwise we would align them based + // on the size of the initializer which doesn't make sense as only single + // values are read. + unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout() + ? 32 + : CGM.getTarget().getPointerAlign(LangAS::Default); + + vtable = CGM.createOrReplaceCXXRuntimeVariable( + CGM.getLoc(RD->getSourceRange()), Name, VTableType, + mlir::cir::GlobalLinkageKind::ExternalLinkage, + getContext().toCharUnitsFromBits(PAlign)); + // LLVM codegen handles unnamedAddr + assert(!UnimplementedFeature::unnamedAddr()); + + // In MS C++ if you have a class with virtual functions in which you are using + // selective member import/export, then all virtual functions must be exported + // unless they are inline, otherwise a link error will result. To match this + // behavior, for such classes, we dllimport the vtable if it is defined + // externally and all the non-inline virtual methods are marked dllimport, and + // we dllexport the vtable if it is defined in this TU and all the non-inline + // virtual methods are marked dllexport. + if (CGM.getTarget().hasPS4DLLImportExport()) + llvm_unreachable("NYI"); + + CGM.setGVProperties(vtable, RD); + return vtable; +} + +CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( + CIRGenFunction &CGF, GlobalDecl GD, Address This, mlir::Type Ty, + SourceLocation Loc) { + auto loc = CGF.getLoc(Loc); + auto TyPtr = CGF.getBuilder().getPointerTo(Ty); + auto *MethodDecl = cast(GD.getDecl()); + auto VTable = CGF.getVTablePtr( + loc, This, CGF.getBuilder().getPointerTo(TyPtr), MethodDecl->getParent()); + + uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); + mlir::Value VFunc{}; + if (CGF.shouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { + llvm_unreachable("NYI"); + } else { + CGF.buildTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); + + mlir::Value VFuncLoad; + if (CGM.getItaniumVTableContext().isRelativeLayout()) { + llvm_unreachable("NYI"); + } else { + VTable = CGF.getBuilder().createBitcast( + loc, VTable, CGF.getBuilder().getPointerTo(TyPtr)); + auto VTableSlotPtr = + CGF.getBuilder().create( + loc, CGF.getBuilder().getPointerTo(TyPtr), + ::mlir::FlatSymbolRefAttr{}, VTable, + /*vtable_index=*/0, VTableIndex); + VFuncLoad = CGF.getBuilder().createAlignedLoad(loc, TyPtr, VTableSlotPtr, + CGF.getPointerAlign()); + } + + // Add !invariant.load md to virtual function load to indicate that + // function didn't change inside vtable. + // It's safe to add it without -fstrict-vtable-pointers, but it would not + // help in devirtualization because it will only matter if we will have 2 + // the same virtual function loads from the same vtable load, which won't + // happen without enabled devirtualization with -fstrict-vtable-pointers. + if (CGM.getCodeGenOpts().OptimizationLevel > 0 && + CGM.getCodeGenOpts().StrictVTablePointers) { + llvm_unreachable("NYI"); + } + VFunc = VFuncLoad; + } + + CIRGenCallee Callee(GD, VFunc.getDefiningOp()); + return Callee; +} + +mlir::Value +CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, + const CXXRecordDecl *VTableClass) { + auto vtable = getAddrOfVTable(VTableClass, CharUnits()); + + // Find the appropriate vtable within the vtable group, and the address point + // within that vtable. + VTableLayout::AddressPointLocation AddressPoint = + CGM.getItaniumVTableContext() + .getVTableLayout(VTableClass) + .getAddressPoint(Base); + + auto &builder = CGM.getBuilder(); + auto vtablePtrTy = builder.getVirtualFnPtrType(/*isVarArg=*/false); + + return builder.create( + CGM.getLoc(VTableClass->getSourceRange()), vtablePtrTy, + mlir::FlatSymbolRefAttr::get(vtable.getSymNameAttr()), mlir::Value{}, + AddressPoint.VTableIndex, AddressPoint.AddressPointIndex); +} + +mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructor( + CIRGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, + const CXXRecordDecl *NearestVBase) { + + if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && + NeedsVTTParameter(CGF.CurGD)) { + llvm_unreachable("NYI"); + } + return getVTableAddressPoint(Base, VTableClass); +} + +bool CIRGenItaniumCXXABI::isVirtualOffsetNeededForVTableField( + CIRGenFunction &CGF, CIRGenFunction::VPtr Vptr) { + if (Vptr.NearestVBase == nullptr) + return false; + return NeedsVTTParameter(CGF.CurGD); +} + +bool CIRGenItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass( + const CXXRecordDecl *RD) const { + // We don't emit available_externally vtables if we are in -fapple-kext mode + // because kext mode does not permit devirtualization. + if (CGM.getLangOpts().AppleKext) + return false; + + // If the vtable is hidden then it is not safe to emit an available_externally + // copy of vtable. + if (isVTableHidden(RD)) + return false; + + if (CGM.getCodeGenOpts().ForceEmitVTables) + return true; + + // If we don't have any not emitted inline virtual function then we are safe + // to emit an available_externally copy of vtable. + // FIXME we can still emit a copy of the vtable if we + // can emit definition of the inline functions. + if (hasAnyUnusedVirtualInlineFunction(RD)) + return false; + + // For a class with virtual bases, we must also be able to speculatively + // emit the VTT, because CodeGen doesn't have separate notions of "can emit + // the vtable" and "can emit the VTT". For a base subobject, this means we + // need to be able to emit non-virtual base vtables. + if (RD->getNumVBases()) { + for (const auto &B : RD->bases()) { + auto *BRD = B.getType()->getAsCXXRecordDecl(); + assert(BRD && "no class for base specifier"); + if (B.isVirtual() || !BRD->isDynamicClass()) + continue; + if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) + return false; + } + } + + return true; +} + +bool CIRGenItaniumCXXABI::canSpeculativelyEmitVTable( + const CXXRecordDecl *RD) const { + if (!canSpeculativelyEmitVTableAsBaseClass(RD)) + return false; + + // For a complete-object vtable (or more specifically, for the VTT), we need + // to be able to speculatively emit the vtables of all dynamic virtual bases. + for (const auto &B : RD->vbases()) { + auto *BRD = B.getType()->getAsCXXRecordDecl(); + assert(BRD && "no class for base specifier"); + if (!BRD->isDynamicClass()) + continue; + if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) + return false; + } + + return true; +} + +namespace { +class CIRGenItaniumRTTIBuilder { + CIRGenModule &CGM; // Per-module state. + const CIRGenItaniumCXXABI &CXXABI; // Per-module state. + + /// The fields of the RTTI descriptor currently being built. + SmallVector Fields; + + // Returns the mangled type name of the given type. + mlir::cir::GlobalOp GetAddrOfTypeName(mlir::Location loc, QualType Ty, + mlir::cir::GlobalLinkageKind Linkage); + + // /// Returns the constant for the RTTI + // /// descriptor of the given type. + mlir::Attribute GetAddrOfExternalRTTIDescriptor(mlir::Location loc, + QualType Ty); + + /// Build the vtable pointer for the given type. + void BuildVTablePointer(mlir::Location loc, const Type *Ty); + + /// Build an abi::__si_class_type_info, used for single inheritance, according + /// to the Itanium C++ ABI, 2.9.5p6b. + void BuildSIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *RD); + + /// Build an abi::__vmi_class_type_info, used for + /// classes with bases that do not satisfy the abi::__si_class_type_info + /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. + void BuildVMIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *RD); + + // /// Build an abi::__pointer_type_info struct, used + // /// for pointer types. + // void BuildPointerTypeInfo(QualType PointeeTy); + + // /// Build the appropriate kind of + // /// type_info for an object type. + // void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); + + // /// Build an + // abi::__pointer_to_member_type_info + // /// struct, used for member pointer types. + // void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); + +public: + CIRGenItaniumRTTIBuilder(const CIRGenItaniumCXXABI &ABI, CIRGenModule &_CGM) + : CGM(_CGM), CXXABI(ABI) {} + + // Pointer type info flags. + enum { + /// PTI_Const - Type has const qualifier. + PTI_Const = 0x1, + + /// PTI_Volatile - Type has volatile qualifier. + PTI_Volatile = 0x2, + + /// PTI_Restrict - Type has restrict qualifier. + PTI_Restrict = 0x4, + + /// PTI_Incomplete - Type is incomplete. + PTI_Incomplete = 0x8, + + /// PTI_ContainingClassIncomplete - Containing class is incomplete. + /// (in pointer to member). + PTI_ContainingClassIncomplete = 0x10, + + /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). + // PTI_TransactionSafe = 0x20, + + /// PTI_Noexcept - Pointee is noexcept function (C++1z). + PTI_Noexcept = 0x40, + }; + + // VMI type info flags. + enum { + /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. + VMI_NonDiamondRepeat = 0x1, + + /// VMI_DiamondShaped - Class is diamond shaped. + VMI_DiamondShaped = 0x2 + }; + + // Base class type info flags. + enum { + /// BCTI_Virtual - Base class is virtual. + BCTI_Virtual = 0x1, + + /// BCTI_Public - Base class is public. + BCTI_Public = 0x2 + }; + + /// Build the RTTI type info struct for the given type, or + /// link to an existing RTTI descriptor if one already exists. + mlir::Attribute BuildTypeInfo(mlir::Location loc, QualType Ty); + + /// Build the RTTI type info struct for the given type. + mlir::Attribute BuildTypeInfo(mlir::Location loc, QualType Ty, + mlir::cir::GlobalLinkageKind Linkage, + mlir::SymbolTable::Visibility Visibility); +}; +} // namespace + +/// Given a builtin type, returns whether the type +/// info for that type is defined in the standard library. +/// TODO(cir): this can unified with LLVM codegen +static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { + // Itanium C++ ABI 2.9.2: + // Basic type information (e.g. for "int", "bool", etc.) will be kept in + // the run-time support library. Specifically, the run-time support + // library should contain type_info objects for the types X, X* and + // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, + // unsigned char, signed char, short, unsigned short, int, unsigned int, + // long, unsigned long, long long, unsigned long long, float, double, + // long double, char16_t, char32_t, and the IEEE 754r decimal and + // half-precision floating point types. + // + // GCC also emits RTTI for __int128. + // FIXME: We do not emit RTTI information for decimal types here. + + // Types added here must also be added to EmitFundamentalRTTIDescriptors. + switch (Ty->getKind()) { + case BuiltinType::WasmExternRef: + llvm_unreachable("NYI"); + case BuiltinType::Void: + case BuiltinType::NullPtr: + case BuiltinType::Bool: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Char_U: + case BuiltinType::Char_S: + case BuiltinType::UChar: + case BuiltinType::SChar: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::Int: + case BuiltinType::UInt: + case BuiltinType::Long: + case BuiltinType::ULong: + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + case BuiltinType::Half: + case BuiltinType::Float: + case BuiltinType::Double: + case BuiltinType::LongDouble: + case BuiltinType::Float16: + case BuiltinType::Float128: + case BuiltinType::Ibm128: + case BuiltinType::Char8: + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Int128: + case BuiltinType::UInt128: + return true; + +#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: +#include "clang/Basic/OpenCLImageTypes.def" +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id: +#include "clang/Basic/OpenCLExtensionTypes.def" + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: +#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AArch64SVEACLETypes.def" +#define PPC_VECTOR_TYPE(Name, Id, Size) case BuiltinType::Id: +#include "clang/Basic/PPCTypes.def" +#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/RISCVVTypes.def" + case BuiltinType::ShortAccum: + case BuiltinType::Accum: + case BuiltinType::LongAccum: + case BuiltinType::UShortAccum: + case BuiltinType::UAccum: + case BuiltinType::ULongAccum: + case BuiltinType::ShortFract: + case BuiltinType::Fract: + case BuiltinType::LongFract: + case BuiltinType::UShortFract: + case BuiltinType::UFract: + case BuiltinType::ULongFract: + case BuiltinType::SatShortAccum: + case BuiltinType::SatAccum: + case BuiltinType::SatLongAccum: + case BuiltinType::SatUShortAccum: + case BuiltinType::SatUAccum: + case BuiltinType::SatULongAccum: + case BuiltinType::SatShortFract: + case BuiltinType::SatFract: + case BuiltinType::SatLongFract: + case BuiltinType::SatUShortFract: + case BuiltinType::SatUFract: + case BuiltinType::SatULongFract: + case BuiltinType::BFloat16: + return false; + + case BuiltinType::Dependent: +#define BUILTIN_TYPE(Id, SingletonId) +#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("asking for RRTI for a placeholder type!"); + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + llvm_unreachable("FIXME: Objective-C types are unsupported!"); + } + + llvm_unreachable("Invalid BuiltinType Kind!"); +} + +static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { + QualType PointeeTy = PointerTy->getPointeeType(); + const BuiltinType *BuiltinTy = dyn_cast(PointeeTy); + if (!BuiltinTy) + return false; + + // Check the qualifiers. + Qualifiers Quals = PointeeTy.getQualifiers(); + Quals.removeConst(); + + if (!Quals.empty()) + return false; + + return TypeInfoIsInStandardLibrary(BuiltinTy); +} + +/// Returns whether the type +/// information for the given type exists in the standard library. +/// TODO(cir): this can unified with LLVM codegen +static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { + // Type info for builtin types is defined in the standard library. + if (const BuiltinType *BuiltinTy = dyn_cast(Ty)) + return TypeInfoIsInStandardLibrary(BuiltinTy); + + // Type info for some pointer types to builtin types is defined in the + // standard library. + if (const PointerType *PointerTy = dyn_cast(Ty)) + return TypeInfoIsInStandardLibrary(PointerTy); + + return false; +} + +/// Returns whether the type information for +/// the given type exists somewhere else, and that we should not emit the type +/// information in this translation unit. Assumes that it is not a +/// standard-library type. +/// TODO(cir): this can unified with LLVM codegen +static bool ShouldUseExternalRTTIDescriptor(CIRGenModule &CGM, QualType Ty) { + ASTContext &Context = CGM.getASTContext(); + + // If RTTI is disabled, assume it might be disabled in the + // translation unit that defines any potential key function, too. + if (!Context.getLangOpts().RTTI) + return false; + + if (const RecordType *RecordTy = dyn_cast(Ty)) { + const CXXRecordDecl *RD = cast(RecordTy->getDecl()); + if (!RD->hasDefinition()) + return false; + + if (!RD->isDynamicClass()) + return false; + + // FIXME: this may need to be reconsidered if the key function + // changes. + // N.B. We must always emit the RTTI data ourselves if there exists a key + // function. + bool IsDLLImport = RD->hasAttr(); + + // Don't import the RTTI but emit it locally. + if (CGM.getTriple().isWindowsGNUEnvironment()) + return false; + + if (CGM.getVTables().isVTableExternal(RD)) { + if (CGM.getTarget().hasPS4DLLImportExport()) + return true; + + return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment() + ? false + : true; + } + if (IsDLLImport) + return true; + } + + return false; +} + +/// Returns whether the given record type is incomplete. +/// TODO(cir): this can unified with LLVM codegen +static bool IsIncompleteClassType(const RecordType *RecordTy) { + return !RecordTy->getDecl()->isCompleteDefinition(); +} + +/// Returns whether the given type contains an +/// incomplete class type. This is true if +/// +/// * The given type is an incomplete class type. +/// * The given type is a pointer type whose pointee type contains an +/// incomplete class type. +/// * The given type is a member pointer type whose class is an incomplete +/// class type. +/// * The given type is a member pointer type whoise pointee type contains an +/// incomplete class type. +/// is an indirect or direct pointer to an incomplete class type. +/// TODO(cir): this can unified with LLVM codegen +static bool ContainsIncompleteClassType(QualType Ty) { + if (const RecordType *RecordTy = dyn_cast(Ty)) { + if (IsIncompleteClassType(RecordTy)) + return true; + } + + if (const PointerType *PointerTy = dyn_cast(Ty)) + return ContainsIncompleteClassType(PointerTy->getPointeeType()); + + if (const MemberPointerType *MemberPointerTy = + dyn_cast(Ty)) { + // Check if the class type is incomplete. + const RecordType *ClassType = cast(MemberPointerTy->getClass()); + if (IsIncompleteClassType(ClassType)) + return true; + + return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); + } + + return false; +} + +// Return whether the given record decl has a "single, +// public, non-virtual base at offset zero (i.e. the derived class is dynamic +// iff the base is)", according to Itanium C++ ABI, 2.95p6b. +// TODO(cir): this can unified with LLVM codegen +static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { + // Check the number of bases. + if (RD->getNumBases() != 1) + return false; + + // Get the base. + CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); + + // Check that the base is not virtual. + if (Base->isVirtual()) + return false; + + // Check that the base is public. + if (Base->getAccessSpecifier() != AS_public) + return false; + + // Check that the class is dynamic iff the base is. + auto *BaseDecl = + cast(Base->getType()->castAs()->getDecl()); + if (!BaseDecl->isEmpty() && + BaseDecl->isDynamicClass() != RD->isDynamicClass()) + return false; + + return true; +} + +/// Return the linkage that the type info and type info name constants +/// should have for the given type. +static mlir::cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, + QualType Ty) { + // Itanium C++ ABI 2.9.5p7: + // In addition, it and all of the intermediate abi::__pointer_type_info + // structs in the chain down to the abi::__class_type_info for the + // incomplete class type must be prevented from resolving to the + // corresponding type_info structs for the complete class type, possibly + // by making them local static objects. Finally, a dummy class RTTI is + // generated for the incomplete type that will not resolve to the final + // complete class RTTI (because the latter need not exist), possibly by + // making it a local static object. + if (ContainsIncompleteClassType(Ty)) + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + switch (Ty->getLinkage()) { + case Linkage::None: + case Linkage::Internal: + case Linkage::UniqueExternal: + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + case Linkage::VisibleNone: + case Linkage::Module: + case Linkage::External: + // RTTI is not enabled, which means that this type info struct is going + // to be used for exception handling. Give it linkonce_odr linkage. + if (!CGM.getLangOpts().RTTI) + return mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; + + if (const RecordType *Record = dyn_cast(Ty)) { + const CXXRecordDecl *RD = cast(Record->getDecl()); + if (RD->hasAttr()) + return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + if (CGM.getTriple().isWindowsItaniumEnvironment()) + if (RD->hasAttr() && + ShouldUseExternalRTTIDescriptor(CGM, Ty)) + return mlir::cir::GlobalLinkageKind::ExternalLinkage; + // MinGW always uses LinkOnceODRLinkage for type info. + if (RD->isDynamicClass() && !CGM.getASTContext() + .getTargetInfo() + .getTriple() + .isWindowsGNUEnvironment()) + return CGM.getVTableLinkage(RD); + } + + return mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; + case Linkage::Invalid: + llvm_unreachable("Invalid linkage!"); + } + + llvm_unreachable("Invalid linkage!"); +} + +mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, + QualType Ty) { + // We want to operate on the canonical type. + Ty = Ty.getCanonicalType(); + + // Check if we've already emitted an RTTI descriptor for this type. + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); + + auto OldGV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); + + if (OldGV && !OldGV.isDeclaration()) { + assert(!OldGV.hasAvailableExternallyLinkage() && + "available_externally typeinfos not yet implemented"); + return CGM.getBuilder().getGlobalViewAttr(CGM.getBuilder().getUInt8PtrTy(), + OldGV); + } + + // Check if there is already an external RTTI descriptor for this type. + if (IsStandardLibraryRTTIDescriptor(Ty) || + ShouldUseExternalRTTIDescriptor(CGM, Ty)) + return GetAddrOfExternalRTTIDescriptor(loc, Ty); + + // Emit the standard library with external linkage. + auto Linkage = getTypeInfoLinkage(CGM, Ty); + + // Give the type_info object and name the formal visibility of the + // type itself. + assert(!UnimplementedFeature::hiddenVisibility()); + assert(!UnimplementedFeature::protectedVisibility()); + mlir::SymbolTable::Visibility symVisibility; + if (mlir::cir::isLocalLinkage(Linkage)) + // If the linkage is local, only default visibility makes sense. + symVisibility = mlir::SymbolTable::Visibility::Public; + else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) == + CIRGenItaniumCXXABI::RUK_NonUniqueHidden) + llvm_unreachable("NYI"); + else + symVisibility = CIRGenModule::getCIRVisibility(Ty->getVisibility()); + + assert(!UnimplementedFeature::setDLLStorageClass()); + return BuildTypeInfo(loc, Ty, Linkage, symVisibility); +} + +void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, + const Type *Ty) { + auto &builder = CGM.getBuilder(); + + // abi::__class_type_info. + static const char *const ClassTypeInfo = + "_ZTVN10__cxxabiv117__class_type_infoE"; + // abi::__si_class_type_info. + static const char *const SIClassTypeInfo = + "_ZTVN10__cxxabiv120__si_class_type_infoE"; + // abi::__vmi_class_type_info. + static const char *const VMIClassTypeInfo = + "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; + + const char *VTableName = nullptr; + + switch (Ty->getTypeClass()) { + case Type::ArrayParameter: + llvm_unreachable("NYI"); +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + case Type::LValueReference: + case Type::RValueReference: + llvm_unreachable("References shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Undeduced type shouldn't get here"); + + case Type::Pipe: + llvm_unreachable("Pipe types shouldn't get here"); + + case Type::Builtin: + case Type::BitInt: + // GCC treats vector and complex types as fundamental types. + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Complex: + case Type::Atomic: + // FIXME: GCC treats block pointers as fundamental types?! + case Type::BlockPointer: + // abi::__fundamental_type_info. + VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + // abi::__array_type_info. + VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; + break; + + case Type::FunctionNoProto: + case Type::FunctionProto: + // abi::__function_type_info. + VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; + break; + + case Type::Enum: + // abi::__enum_type_info. + VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; + break; + + case Type::Record: { + const CXXRecordDecl *RD = + cast(cast(Ty)->getDecl()); + + if (!RD->hasDefinition() || !RD->getNumBases()) { + VTableName = ClassTypeInfo; + } else if (CanUseSingleInheritance(RD)) { + VTableName = SIClassTypeInfo; + } else { + VTableName = VMIClassTypeInfo; + } + + break; + } + + case Type::ObjCObject: + // Ignore protocol qualifiers. + Ty = cast(Ty)->getBaseType().getTypePtr(); + + // Handle id and Class. + if (isa(Ty)) { + VTableName = ClassTypeInfo; + break; + } + + assert(isa(Ty)); + [[fallthrough]]; + + case Type::ObjCInterface: + if (cast(Ty)->getDecl()->getSuperClass()) { + VTableName = SIClassTypeInfo; + } else { + VTableName = ClassTypeInfo; + } + break; + + case Type::ObjCObjectPointer: + case Type::Pointer: + // abi::__pointer_type_info. + VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; + break; + + case Type::MemberPointer: + // abi::__pointer_to_member_type_info. + VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; + break; + } + + mlir::cir::GlobalOp VTable{}; + + // Check if the alias exists. If it doesn't, then get or create the global. + if (CGM.getItaniumVTableContext().isRelativeLayout()) + llvm_unreachable("NYI"); + if (!VTable) { + VTable = CGM.getOrInsertGlobal(loc, VTableName, + CGM.getBuilder().getUInt8PtrTy()); + } + + if (UnimplementedFeature::setDSOLocal()) + llvm_unreachable("NYI"); + + // The vtable address point is 2. + mlir::Attribute field{}; + if (CGM.getItaniumVTableContext().isRelativeLayout()) { + llvm_unreachable("NYI"); + } else { + SmallVector offsets{ + CGM.getBuilder().getI32IntegerAttr(2)}; + auto indices = mlir::ArrayAttr::get(builder.getContext(), offsets); + field = CGM.getBuilder().getGlobalViewAttr(CGM.getBuilder().getUInt8PtrTy(), + VTable, indices); + } + + assert(field && "expected attribute"); + Fields.push_back(field); +} + +mlir::cir::GlobalOp CIRGenItaniumRTTIBuilder::GetAddrOfTypeName( + mlir::Location loc, QualType Ty, mlir::cir::GlobalLinkageKind Linkage) { + auto &builder = CGM.getBuilder(); + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); + + // We know that the mangled name of the type starts at index 4 of the + // mangled name of the typename, so we can just index into it in order to + // get the mangled name of the type. + auto Init = builder.getString( + Name.substr(4), CGM.getTypes().ConvertType(CGM.getASTContext().CharTy)); + auto Align = + CGM.getASTContext().getTypeAlignInChars(CGM.getASTContext().CharTy); + + // builder.getString can return a #cir.zero if the string given to it only + // contains null bytes. However, type names cannot be full of null bytes. + // So cast Init to a ConstArrayAttr should be safe. + auto InitStr = cast(Init); + + auto GV = CGM.createOrReplaceCXXRuntimeVariable(loc, Name, InitStr.getType(), + Linkage, Align); + CIRGenModule::setInitializer(GV, Init); + return GV; +} + +/// Build an abi::__si_class_type_info, used for single inheritance, according +/// to the Itanium C++ ABI, 2.95p6b. +void CIRGenItaniumRTTIBuilder::BuildSIClassTypeInfo(mlir::Location loc, + const CXXRecordDecl *RD) { + // Itanium C++ ABI 2.9.5p6b: + // It adds to abi::__class_type_info a single member pointing to the + // type_info structure for the base type, + auto BaseTypeInfo = CIRGenItaniumRTTIBuilder(CXXABI, CGM) + .BuildTypeInfo(loc, RD->bases_begin()->getType()); + Fields.push_back(BaseTypeInfo); +} + +namespace { +/// Contains virtual and non-virtual bases seen when traversing a class +/// hierarchy. +struct SeenBases { + llvm::SmallPtrSet NonVirtualBases; + llvm::SmallPtrSet VirtualBases; +}; +} // namespace + +/// Compute the value of the flags member in abi::__vmi_class_type_info. +/// +static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, + SeenBases &Bases) { + + unsigned Flags = 0; + + auto *BaseDecl = + cast(Base->getType()->castAs()->getDecl()); + + if (Base->isVirtual()) { + // Mark the virtual base as seen. + if (!Bases.VirtualBases.insert(BaseDecl).second) { + // If this virtual base has been seen before, then the class is diamond + // shaped. + Flags |= CIRGenItaniumRTTIBuilder::VMI_DiamondShaped; + } else { + if (Bases.NonVirtualBases.count(BaseDecl)) + Flags |= CIRGenItaniumRTTIBuilder::VMI_NonDiamondRepeat; + } + } else { + // Mark the non-virtual base as seen. + if (!Bases.NonVirtualBases.insert(BaseDecl).second) { + // If this non-virtual base has been seen before, then the class has non- + // diamond shaped repeated inheritance. + Flags |= CIRGenItaniumRTTIBuilder::VMI_NonDiamondRepeat; + } else { + if (Bases.VirtualBases.count(BaseDecl)) + Flags |= CIRGenItaniumRTTIBuilder::VMI_NonDiamondRepeat; + } + } + + // Walk all bases. + for (const auto &I : BaseDecl->bases()) + Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); + + return Flags; +} + +static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { + unsigned Flags = 0; + SeenBases Bases; + + // Walk all bases. + for (const auto &I : RD->bases()) + Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); + + return Flags; +} + +/// Build an abi::__vmi_class_type_info, used for +/// classes with bases that do not satisfy the abi::__si_class_type_info +/// constraints, according to the Itanium C++ ABI, 2.9.5p5c. +void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(mlir::Location loc, + const CXXRecordDecl *RD) { + auto UnsignedIntLTy = + CGM.getTypes().ConvertType(CGM.getASTContext().UnsignedIntTy); + // Itanium C++ ABI 2.9.5p6c: + // __flags is a word with flags describing details about the class + // structure, which may be referenced by using the __flags_masks + // enumeration. These flags refer to both direct and indirect bases. + unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); + Fields.push_back(mlir::cir::IntAttr::get(UnsignedIntLTy, Flags)); + + // Itanium C++ ABI 2.9.5p6c: + // __base_count is a word with the number of direct proper base class + // descriptions that follow. + Fields.push_back(mlir::cir::IntAttr::get(UnsignedIntLTy, RD->getNumBases())); + + if (!RD->getNumBases()) + return; + + // Now add the base class descriptions. + + // Itanium C++ ABI 2.9.5p6c: + // __base_info[] is an array of base class descriptions -- one for every + // direct proper base. Each description is of the type: + // + // struct abi::__base_class_type_info { + // public: + // const __class_type_info *__base_type; + // long __offset_flags; + // + // enum __offset_flags_masks { + // __virtual_mask = 0x1, + // __public_mask = 0x2, + // __offset_shift = 8 + // }; + // }; + + // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long + // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on + // LLP64 platforms. + // FIXME: Consider updating libc++abi to match, and extend this logic to all + // LLP64 platforms. + QualType OffsetFlagsTy = CGM.getASTContext().LongTy; + const TargetInfo &TI = CGM.getASTContext().getTargetInfo(); + if (TI.getTriple().isOSCygMing() && + TI.getPointerWidth(LangAS::Default) > TI.getLongWidth()) + OffsetFlagsTy = CGM.getASTContext().LongLongTy; + auto OffsetFlagsLTy = CGM.getTypes().ConvertType(OffsetFlagsTy); + + for (const auto &Base : RD->bases()) { + // The __base_type member points to the RTTI for the base type. + Fields.push_back(CIRGenItaniumRTTIBuilder(CXXABI, CGM) + .BuildTypeInfo(loc, Base.getType())); + + auto *BaseDecl = + cast(Base.getType()->castAs()->getDecl()); + + int64_t OffsetFlags = 0; + + // All but the lower 8 bits of __offset_flags are a signed offset. + // For a non-virtual base, this is the offset in the object of the base + // subobject. For a virtual base, this is the offset in the virtual table of + // the virtual base offset for the virtual base referenced (negative). + CharUnits Offset; + if (Base.isVirtual()) + Offset = CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset( + RD, BaseDecl); + else + llvm_unreachable("Multi-inheritence NYI"); + + OffsetFlags = uint64_t(Offset.getQuantity()) << 8; + + // The low-order byte of __offset_flags contains flags, as given by the + // masks from the enumeration __offset_flags_masks. + if (Base.isVirtual()) + OffsetFlags |= BCTI_Virtual; + if (Base.getAccessSpecifier() == AS_public) + OffsetFlags |= BCTI_Public; + + Fields.push_back(mlir::cir::IntAttr::get(OffsetFlagsLTy, OffsetFlags)); + } +} + +mlir::Attribute +CIRGenItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(mlir::Location loc, + QualType Ty) { + // Mangle the RTTI name. + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); + auto &builder = CGM.getBuilder(); + + // Look for an existing global. + auto GV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); + + if (!GV) { + // Create a new global variable. + // From LLVM codegen => Note for the future: If we would ever like to do + // deferred emission of RTTI, check if emitting vtables opportunistically + // need any adjustment. + GV = CIRGenModule::createGlobalOp(CGM, loc, Name, builder.getUInt8PtrTy(), + /*isConstant=*/true); + const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); + CGM.setGVProperties(GV, RD); + + // Import the typeinfo symbol when all non-inline virtual methods are + // imported. + if (CGM.getTarget().hasPS4DLLImportExport()) + llvm_unreachable("NYI"); + } + + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV); +} + +mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( + mlir::Location loc, QualType Ty, mlir::cir::GlobalLinkageKind Linkage, + mlir::SymbolTable::Visibility Visibility) { + auto &builder = CGM.getBuilder(); + assert(!UnimplementedFeature::setDLLStorageClass()); + + // Add the vtable pointer. + BuildVTablePointer(loc, cast(Ty)); + + // And the name. + auto TypeName = GetAddrOfTypeName(loc, Ty, Linkage); + mlir::Attribute TypeNameField; + + // If we're supposed to demote the visibility, be sure to set a flag + // to use a string comparison for type_info comparisons. + CIRGenItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = + CXXABI.classifyRTTIUniqueness(Ty, Linkage); + if (RTTIUniqueness != CIRGenItaniumCXXABI::RUK_Unique) { + // The flag is the sign bit, which on ARM64 is defined to be clear + // for global pointers. This is very ARM64-specific. + llvm_unreachable("NYI"); + } else { + TypeNameField = + builder.getGlobalViewAttr(builder.getUInt8PtrTy(), TypeName); + } + Fields.push_back(TypeNameField); + + switch (Ty->getTypeClass()) { + case Type::ArrayParameter: + llvm_unreachable("NYI"); +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + // GCC treats vector types as fundamental types. + case Type::Builtin: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Complex: + case Type::BlockPointer: + // Itanium C++ ABI 2.9.5p4: + // abi::__fundamental_type_info adds no data members to std::type_info. + break; + + case Type::LValueReference: + case Type::RValueReference: + llvm_unreachable("References shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Undeduced type shouldn't get here"); + + case Type::Pipe: + break; + + case Type::BitInt: + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + // Itanium C++ ABI 2.9.5p5: + // abi::__array_type_info adds no data members to std::type_info. + break; + + case Type::FunctionNoProto: + case Type::FunctionProto: + // Itanium C++ ABI 2.9.5p5: + // abi::__function_type_info adds no data members to std::type_info. + break; + + case Type::Enum: + // Itanium C++ ABI 2.9.5p5: + // abi::__enum_type_info adds no data members to std::type_info. + break; + + case Type::Record: { + const CXXRecordDecl *RD = + cast(cast(Ty)->getDecl()); + if (!RD->hasDefinition() || !RD->getNumBases()) { + // We don't need to emit any fields. + break; + } + + if (CanUseSingleInheritance(RD)) { + BuildSIClassTypeInfo(loc, RD); + } else { + BuildVMIClassTypeInfo(loc, RD); + } + + break; + } + + case Type::ObjCObject: + case Type::ObjCInterface: + llvm_unreachable("NYI"); + break; + + case Type::ObjCObjectPointer: + llvm_unreachable("NYI"); + break; + + case Type::Pointer: + llvm_unreachable("NYI"); + break; + + case Type::MemberPointer: + llvm_unreachable("NYI"); + break; + + case Type::Atomic: + // No fields, at least for the moment. + break; + } + + assert(!UnimplementedFeature::setDLLImportDLLExport()); + auto init = builder.getTypeInfo(builder.getArrayAttr(Fields)); + + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); + + // Create new global and search for an existing global. + auto OldGV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); + mlir::cir::GlobalOp GV = + CIRGenModule::createGlobalOp(CGM, loc, Name, init.getType(), + /*isConstant=*/true); + + // Export the typeinfo in the same circumstances as the vtable is + // exported. + if (CGM.getTarget().hasPS4DLLImportExport()) + llvm_unreachable("NYI"); + + // If there's already an old global variable, replace it with the new one. + if (OldGV) { + // Replace occurrences of the old variable if needed. + GV.setName(OldGV.getName()); + if (!OldGV->use_empty()) { + // TODO: replaceAllUsesWith + llvm_unreachable("NYI"); + } + OldGV->erase(); + } + + if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(GV.getLinkage())) { + assert(!UnimplementedFeature::setComdat()); + llvm_unreachable("NYI"); + } + + CharUnits Align = CGM.getASTContext().toCharUnitsFromBits( + CGM.getTarget().getPointerAlign(LangAS::Default)); + GV.setAlignmentAttr(CGM.getSize(Align)); + + // The Itanium ABI specifies that type_info objects must be globally + // unique, with one exception: if the type is an incomplete class + // type or a (possibly indirect) pointer to one. That exception + // affects the general case of comparing type_info objects produced + // by the typeid operator, which is why the comparison operators on + // std::type_info generally use the type_info name pointers instead + // of the object addresses. However, the language's built-in uses + // of RTTI generally require class types to be complete, even when + // manipulating pointers to those class types. This allows the + // implementation of dynamic_cast to rely on address equality tests, + // which is much faster. + // + // All of this is to say that it's important that both the type_info + // object and the type_info name be uniqued when weakly emitted. + + // TODO(cir): setup other bits for TypeName + assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!UnimplementedFeature::setPartition()); + assert(!UnimplementedFeature::setDSOLocal()); + mlir::SymbolTable::setSymbolVisibility( + TypeName, CIRGenModule::getMLIRVisibility(TypeName)); + + // TODO(cir): setup other bits for GV + assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!UnimplementedFeature::setPartition()); + assert(!UnimplementedFeature::setDSOLocal()); + CIRGenModule::setInitializer(GV, init); + + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV); + ; +} + +mlir::Attribute CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(mlir::Location loc, + QualType Ty) { + return CIRGenItaniumRTTIBuilder(*this, CGM).BuildTypeInfo(loc, Ty); +} + +void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, + const CXXRecordDecl *RD) { + auto VTable = getAddrOfVTable(RD, CharUnits()); + if (VTable.hasInitializer()) + return; + + ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); + const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); + auto Linkage = CGM.getVTableLinkage(RD); + auto RTTI = CGM.getAddrOfRTTIDescriptor( + CGM.getLoc(RD->getBeginLoc()), CGM.getASTContext().getTagDeclType(RD)); + + // Create and set the initializer. + ConstantInitBuilder builder(CGM); + auto components = builder.beginStruct(); + + CGVT.createVTableInitializer(components, VTLayout, RTTI, + mlir::cir::isLocalLinkage(Linkage)); + components.finishAndSetAsInitializer(VTable, /*forVtable=*/true); + + // Set the correct linkage. + VTable.setLinkage(Linkage); + + if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage)) { + assert(!UnimplementedFeature::setComdat()); + } + + // Set the right visibility. + CGM.setGVProperties(VTable, RD); + + // If this is the magic class __cxxabiv1::__fundamental_type_info, + // we will emit the typeinfo for the fundamental types. This is the + // same behaviour as GCC. + const DeclContext *DC = RD->getDeclContext(); + if (RD->getIdentifier() && + RD->getIdentifier()->isStr("__fundamental_type_info") && + isa(DC) && cast(DC)->getIdentifier() && + cast(DC)->getIdentifier()->isStr("__cxxabiv1") && + DC->getParent()->isTranslationUnit()) { + llvm_unreachable("NYI"); + // EmitFundamentalRTTIDescriptors(RD); + } + + // Always emit type metadata on non-available_externally definitions, and on + // available_externally definitions if we are performing whole program + // devirtualization. For WPD we need the type metadata on all vtable + // definitions to ensure we associate derived classes with base classes + // defined in headers but with a strong definition only in a shared + // library. + if (!VTable.isDeclarationForLinker() || + CGM.getCodeGenOpts().WholeProgramVTables) { + CGM.buildVTableTypeMetadata(RD, VTable, VTLayout); + // For available_externally definitions, add the vtable to + // @llvm.compiler.used so that it isn't deleted before whole program + // analysis. + if (VTable.isDeclarationForLinker()) { + llvm_unreachable("NYI"); + assert(CGM.getCodeGenOpts().WholeProgramVTables); + assert(!UnimplementedFeature::addCompilerUsedGlobal()); + } + } + + if (VTContext.isRelativeLayout()) + llvm_unreachable("NYI"); +} + +void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( + const CXXRecordDecl *RD) { + CIRGenVTables &VTables = CGM.getVTables(); + auto VTT = VTables.getAddrOfVTT(RD); + VTables.buildVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); +} + +/// What sort of uniqueness rules should we use for the RTTI for the +/// given type? +CIRGenItaniumCXXABI::RTTIUniquenessKind +CIRGenItaniumCXXABI::classifyRTTIUniqueness( + QualType CanTy, mlir::cir::GlobalLinkageKind Linkage) const { + if (shouldRTTIBeUnique()) + return RUK_Unique; + + // It's only necessary for linkonce_odr or weak_odr linkage. + if (Linkage != mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage && + Linkage != mlir::cir::GlobalLinkageKind::WeakODRLinkage) + return RUK_Unique; + + // It's only necessary with default visibility. + if (CanTy->getVisibility() != DefaultVisibility) + return RUK_Unique; + + // If we're not required to publish this symbol, hide it. + if (Linkage == mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage) + return RUK_NonUniqueHidden; + + // If we're required to publish this symbol, as we might be under an + // explicit instantiation, leave it with default visibility but + // enable string-comparisons. + assert(Linkage == mlir::cir::GlobalLinkageKind::WeakODRLinkage); + return RUK_NonUniqueVisible; +} + +void CIRGenItaniumCXXABI::buildDestructorCall( + CIRGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, + bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy) { + GlobalDecl GD(DD, Type); + auto VTT = + getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating); + QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); + CIRGenCallee Callee; + if (getContext().getLangOpts().AppleKext && Type != Dtor_Base && + DD->isVirtual()) + llvm_unreachable("NYI"); + else + Callee = CIRGenCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); + + CGF.buildCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, + nullptr); +} + +mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( + CIRGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, + bool ForVirtualBase, bool Delegating) { + GlobalDecl GD(DD, Type); + return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); +} + +void CIRGenItaniumCXXABI::buildRethrow(CIRGenFunction &CGF, bool isNoReturn) { + // void __cxa_rethrow(); + llvm_unreachable("NYI"); +} + +void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, + const CXXThrowExpr *E) { + // This differs a bit from LLVM codegen, CIR has native operations for some + // cxa functions, and defers allocation size computation, always pass the dtor + // symbol, etc. CIRGen also does not use getAllocateExceptionFn / getThrowFn. + + // Now allocate the exception object. + auto &builder = CGF.getBuilder(); + QualType clangThrowType = E->getSubExpr()->getType(); + auto throwTy = CGF.ConvertType(clangThrowType); + auto subExprLoc = CGF.getLoc(E->getSubExpr()->getSourceRange()); + // Defer computing allocation size to some later lowering pass. + auto exceptionPtr = + builder + .create( + subExprLoc, builder.getPointerTo(throwTy), throwTy) + .getAddr(); + + // Build expression and store its result into exceptionPtr. + CharUnits exnAlign = CGF.getContext().getExnObjectAlignment(); + CGF.buildAnyExprToExn(E->getSubExpr(), Address(exceptionPtr, exnAlign)); + + // Get the RTTI symbol address. + auto typeInfo = CGM.getAddrOfRTTIDescriptor(subExprLoc, clangThrowType, + /*ForEH=*/true) + .dyn_cast_or_null(); + assert(typeInfo && "expected GlobalViewAttr typeinfo"); + assert(!typeInfo.getIndices() && "expected no indirection"); + + // The address of the destructor. + // + // Note: LLVM codegen already optimizes out the dtor if the + // type is a record with trivial dtor (by passing down a + // null dtor). In CIR, we forward this info and allow for + // LoweringPrepare or some other pass to skip passing the + // trivial function. + // + // TODO(cir): alternatively, dtor could be ignored here and + // the type used to gather the relevant dtor during + // LoweringPrepare. + mlir::FlatSymbolRefAttr dtor{}; + if (const RecordType *recordTy = clangThrowType->getAs()) { + CXXRecordDecl *rec = cast(recordTy->getDecl()); + CXXDestructorDecl *dtorD = rec->getDestructor(); + dtor = mlir::FlatSymbolRefAttr::get( + CGM.getAddrOfCXXStructor(GlobalDecl(dtorD, Dtor_Complete)) + .getSymNameAttr()); + } + + assert(!CGF.getInvokeDest() && "landing pad like logic NYI"); + + // Now throw the exception. + builder.create(CGF.getLoc(E->getSourceRange()), + exceptionPtr, typeInfo.getSymbol(), dtor); +} + +static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { + // Prototype: void __cxa_bad_cast(); + + // TODO(cir): set the calling convention of the runtime function. + assert(!UnimplementedFeature::setCallingConv()); + + mlir::cir::FuncType FTy = + CGF.getBuilder().getFuncType({}, CGF.getBuilder().getVoidTy()); + return CGF.CGM.createRuntimeFunction(FTy, "__cxa_bad_cast"); +} + +void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, + mlir::Location loc) { + // TODO(cir): set the calling convention to the runtime function. + assert(!UnimplementedFeature::setCallingConv()); + + CGF.buildRuntimeCall(loc, getBadCastFn(CGF)); + CGF.getBuilder().create(loc); + CGF.getBuilder().clearInsertionPoint(); +} + +static CharUnits computeOffsetHint(ASTContext &Context, + const CXXRecordDecl *Src, + const CXXRecordDecl *Dst) { + CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, + /*DetectVirtual=*/false); + + // If Dst is not derived from Src we can skip the whole computation below and + // return that Src is not a public base of Dst. Record all inheritance paths. + if (!Dst->isDerivedFrom(Src, Paths)) + return CharUnits::fromQuantity(-2ULL); + + unsigned NumPublicPaths = 0; + CharUnits Offset; + + // Now walk all possible inheritance paths. + for (const CXXBasePath &Path : Paths) { + if (Path.Access != AS_public) // Ignore non-public inheritance. + continue; + + ++NumPublicPaths; + + for (const CXXBasePathElement &PathElement : Path) { + // If the path contains a virtual base class we can't give any hint. + // -1: no hint. + if (PathElement.Base->isVirtual()) + return CharUnits::fromQuantity(-1ULL); + + if (NumPublicPaths > 1) // Won't use offsets, skip computation. + continue; + + // Accumulate the base class offsets. + const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); + Offset += L.getBaseClassOffset( + PathElement.Base->getType()->getAsCXXRecordDecl()); + } + } + + // -2: Src is not a public base of Dst. + if (NumPublicPaths == 0) + return CharUnits::fromQuantity(-2ULL); + + // -3: Src is a multiple public base type but never a virtual base type. + if (NumPublicPaths > 1) + return CharUnits::fromQuantity(-3ULL); + + // Otherwise, the Src type is a unique public nonvirtual base type of Dst. + // Return the offset of Src from the origin of Dst. + return Offset; +} + +static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { + // Prototype: + // void *__dynamic_cast(const void *sub, + // global_as const abi::__class_type_info *src, + // global_as const abi::__class_type_info *dst, + // std::ptrdiff_t src2dst_offset); + + mlir::Type VoidPtrTy = CGF.VoidPtrTy; + mlir::Type RTTIPtrTy = CGF.getBuilder().getUInt8PtrTy(); + mlir::Type PtrDiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); + + // TODO(cir): mark the function as nowind readonly. + + // TODO(cir): set the calling convention of the runtime function. + assert(!UnimplementedFeature::setCallingConv()); + + mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType( + {VoidPtrTy, RTTIPtrTy, RTTIPtrTy, PtrDiffTy}, VoidPtrTy); + return CGF.CGM.createRuntimeFunction(FTy, "__dynamic_cast"); +} + +mlir::cir::DynamicCastInfoAttr CIRGenItaniumCXXABI::buildDynamicCastInfo( + CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, + QualType DestRecordTy) { + auto srcRtti = CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy) + .cast(); + auto destRtti = CGF.CGM.getAddrOfRTTIDescriptor(Loc, DestRecordTy) + .cast(); + + auto runtimeFuncOp = getItaniumDynamicCastFn(CGF); + auto badCastFuncOp = getBadCastFn(CGF); + auto runtimeFuncRef = mlir::FlatSymbolRefAttr::get(runtimeFuncOp); + auto badCastFuncRef = mlir::FlatSymbolRefAttr::get(badCastFuncOp); + + const CXXRecordDecl *srcDecl = SrcRecordTy->getAsCXXRecordDecl(); + const CXXRecordDecl *destDecl = DestRecordTy->getAsCXXRecordDecl(); + auto offsetHint = computeOffsetHint(CGF.getContext(), srcDecl, destDecl); + + mlir::Type ptrdiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); + auto offsetHintAttr = + mlir::cir::IntAttr::get(ptrdiffTy, offsetHint.getQuantity()); + + return mlir::cir::DynamicCastInfoAttr::get(srcRtti, destRtti, runtimeFuncRef, + badCastFuncRef, offsetHintAttr); +} + +mlir::Value CIRGenItaniumCXXABI::buildDynamicCastToVoid(CIRGenFunction &CGF, + mlir::Location Loc, + Address Value, + QualType SrcRecordTy) { + auto *clsDecl = + cast(SrcRecordTy->castAs()->getDecl()); + + // TODO(cir): consider address space in this function. + assert(!UnimplementedFeature::addressSpace()); + + auto loadOffsetToTopFromVTable = + [&](mlir::Type vtableElemTy, CharUnits vtableElemAlign) -> mlir::Value { + mlir::Type vtablePtrTy = CGF.getBuilder().getPointerTo(vtableElemTy); + mlir::Value vtablePtr = CGF.getVTablePtr(Loc, Value, vtablePtrTy, clsDecl); + + // Get the address point in the vtable that contains offset-to-top. + mlir::Value offsetToTopSlotPtr = + CGF.getBuilder().create( + Loc, vtablePtrTy, mlir::FlatSymbolRefAttr{}, vtablePtr, + /*vtable_index=*/0, -2ULL); + return CGF.getBuilder().createAlignedLoad( + Loc, vtableElemTy, offsetToTopSlotPtr, vtableElemAlign); + }; + + // Calculate the offset from the given object to its containing complete + // object. + mlir::Value offsetToTop; + if (CGM.getItaniumVTableContext().isRelativeLayout()) { + offsetToTop = loadOffsetToTopFromVTable(CGF.getBuilder().getSInt32Ty(), + CharUnits::fromQuantity(4)); + } else { + offsetToTop = loadOffsetToTopFromVTable( + CGF.convertType(CGF.getContext().getPointerDiffType()), + CGF.getPointerAlign()); + } + + // Finally, add the offset to the given pointer. + // Cast the input pointer to a uint8_t* to allow pointer arithmetic. + auto u8PtrTy = CGF.getBuilder().getUInt8PtrTy(); + mlir::Value srcBytePtr = + CGF.getBuilder().createBitcast(Value.getPointer(), u8PtrTy); + // Do the pointer arithmetic. + mlir::Value dstBytePtr = CGF.getBuilder().create( + Loc, u8PtrTy, srcBytePtr, offsetToTop); + // Cast the result to a void*. + return CGF.getBuilder().createBitcast(dstBytePtr, + CGF.getBuilder().getVoidPtrTy()); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp new file mode 100644 index 000000000000..5c22c18dc551 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -0,0 +1,2892 @@ +//===- CIRGenModule.cpp - Per-Module state for CIR generation -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the internal per-translation-unit state used for CIR translation. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenModule.h" + +#include "CIRGenCXXABI.h" +#include "CIRGenCstEmitter.h" +#include "CIRGenFunction.h" +#include "CIRGenOpenMPRuntime.h" +#include "CIRGenTypes.h" +#include "CIRGenValue.h" +#include "TargetInfo.h" + +#include "UnimplementedFeatureGuarding.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/OperationSupport.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/IR/Verifier.h" + +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclGroup.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/EvaluatedExprVisitor.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/ParentMap.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/RecursiveASTVisitor.h" +#include "clang/AST/StmtCXX.h" +#include "clang/AST/StmtObjC.h" +#include "clang/AST/Type.h" +#include "clang/Basic/Diagnostic.h" +#include "clang/Basic/LangStandard.h" +#include "clang/Basic/NoSanitizeList.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/LowerToLLVM.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Lex/Preprocessor.h" + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/ScopedHashTable.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/raw_ostream.h" + +#include +#include + +using namespace mlir::cir; +using namespace cir; +using namespace clang; + +using llvm::cast; +using llvm::dyn_cast; +using llvm::isa; +using llvm::SmallVector; +using llvm::StringRef; + +static CIRGenCXXABI *createCXXABI(CIRGenModule &CGM) { + switch (CGM.getASTContext().getCXXABIKind()) { + case TargetCXXABI::GenericItanium: + case TargetCXXABI::GenericAArch64: + case TargetCXXABI::AppleARM64: + return CreateCIRGenItaniumCXXABI(CGM); + default: + llvm_unreachable("invalid C++ ABI kind"); + } +} + +CIRGenModule::CIRGenModule(mlir::MLIRContext &context, + clang::ASTContext &astctx, + const clang::CodeGenOptions &CGO, + DiagnosticsEngine &Diags) + : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), + codeGenOpts(CGO), + theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), + target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, + VTables{*this}, openMPRuntime(new CIRGenOpenMPRuntime(*this)) { + + // Initialize CIR signed integer types cache. + SInt8Ty = + ::mlir::cir::IntType::get(builder.getContext(), 8, /*isSigned=*/true); + SInt16Ty = + ::mlir::cir::IntType::get(builder.getContext(), 16, /*isSigned=*/true); + SInt32Ty = + ::mlir::cir::IntType::get(builder.getContext(), 32, /*isSigned=*/true); + SInt64Ty = + ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/true); + + // Initialize CIR unsigned integer types cache. + UInt8Ty = + ::mlir::cir::IntType::get(builder.getContext(), 8, /*isSigned=*/false); + UInt16Ty = + ::mlir::cir::IntType::get(builder.getContext(), 16, /*isSigned=*/false); + UInt32Ty = + ::mlir::cir::IntType::get(builder.getContext(), 32, /*isSigned=*/false); + UInt64Ty = + ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/false); + + VoidTy = ::mlir::cir::VoidType::get(builder.getContext()); + + // Initialize CIR pointer types cache. + VoidPtrTy = ::mlir::cir::PointerType::get(builder.getContext(), VoidTy); + + // TODO: HalfTy + // TODO: BFloatTy + FloatTy = ::mlir::cir::SingleType::get(builder.getContext()); + DoubleTy = ::mlir::cir::DoubleType::get(builder.getContext()); + FP80Ty = ::mlir::cir::FP80Type::get(builder.getContext()); + // TODO(cir): perhaps we should abstract long double variations into a custom + // cir.long_double type. Said type would also hold the semantics for lowering. + + // TODO: PointerWidthInBits + PointerAlignInBytes = + astctx + .toCharUnitsFromBits( + astctx.getTargetInfo().getPointerAlign(LangAS::Default)) + .getQuantity(); + // TODO: SizeSizeInBytes + // TODO: IntAlignInBytes + UCharTy = ::mlir::cir::IntType::get(builder.getContext(), + astCtx.getTargetInfo().getCharWidth(), + /*isSigned=*/false); + UIntTy = ::mlir::cir::IntType::get(builder.getContext(), + astCtx.getTargetInfo().getIntWidth(), + /*isSigned=*/false); + UIntPtrTy = ::mlir::cir::IntType::get( + builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), + /*isSigned=*/false); + UInt8PtrTy = builder.getPointerTo(UInt8Ty); + UInt8PtrPtrTy = builder.getPointerTo(UInt8PtrTy); + AllocaInt8PtrTy = UInt8PtrTy; + // TODO: GlobalsInt8PtrTy + // TODO: ConstGlobalsPtrTy + ASTAllocaAddressSpace = getTargetCIRGenInfo().getASTAllocaAddressSpace(); + + PtrDiffTy = ::mlir::cir::IntType::get( + builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), + /*isSigned=*/true); + + mlir::cir::sob::SignedOverflowBehavior sob; + switch (langOpts.getSignedOverflowBehavior()) { + case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: + sob = sob::SignedOverflowBehavior::defined; + break; + case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Undefined: + sob = sob::SignedOverflowBehavior::undefined; + break; + case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Trapping: + sob = sob::SignedOverflowBehavior::trapping; + break; + } + theModule->setAttr("cir.sob", + mlir::cir::SignedOverflowBehaviorAttr::get(&context, sob)); + theModule->setAttr( + "cir.lang", mlir::cir::LangAttr::get(&context, getCIRSourceLanguage())); + // Set the module name to be the name of the main file. TranslationUnitDecl + // often contains invalid source locations and isn't a reliable source for the + // module location. + auto MainFileID = astctx.getSourceManager().getMainFileID(); + const FileEntry &MainFile = + *astctx.getSourceManager().getFileEntryForID(MainFileID); + auto Path = MainFile.tryGetRealPathName(); + if (!Path.empty()) { + theModule.setSymName(Path); + theModule->setLoc(mlir::FileLineColLoc::get(&context, Path, + /*line=*/0, + /*col=*/0)); + } +} + +CIRGenModule::~CIRGenModule() {} + +bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor, + bool ExcludeDtor) { + if (!Ty.isConstant(astCtx) && !Ty->isReferenceType()) + return false; + + if (astCtx.getLangOpts().CPlusPlus) { + if (const CXXRecordDecl *Record = + astCtx.getBaseElementType(Ty)->getAsCXXRecordDecl()) + return ExcludeCtor && !Record->hasMutableFields() && + (Record->hasTrivialDestructor() || ExcludeDtor); + } + + return true; +} + +/// FIXME: this could likely be a common helper and not necessarily related +/// with codegen. +/// Return the best known alignment for an unknown pointer to a +/// particular class. +CharUnits CIRGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) { + if (!RD->hasDefinition()) + return CharUnits::One(); // Hopefully won't be used anywhere. + + auto &layout = astCtx.getASTRecordLayout(RD); + + // If the class is final, then we know that the pointer points to an + // object of that type and can use the full alignment. + if (RD->isEffectivelyFinal()) + return layout.getAlignment(); + + // Otherwise, we have to assume it could be a subclass. + return layout.getNonVirtualAlignment(); +} + +/// FIXME: this could likely be a common helper and not necessarily related +/// with codegen. +/// TODO: Add TBAAAccessInfo +CharUnits +CIRGenModule::getNaturalPointeeTypeAlignment(QualType T, + LValueBaseInfo *BaseInfo) { + return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, + /* forPointeeType= */ true); +} + +/// FIXME: this could likely be a common helper and not necessarily related +/// with codegen. +/// TODO: Add TBAAAccessInfo +CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, + LValueBaseInfo *BaseInfo, + bool forPointeeType) { + // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But + // that doesn't return the information we need to compute BaseInfo. + + // Honor alignment typedef attributes even on incomplete types. + // We also honor them straight for C++ class types, even as pointees; + // there's an expressivity gap here. + if (auto TT = T->getAs()) { + if (auto Align = TT->getDecl()->getMaxAlignment()) { + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); + return astCtx.toCharUnitsFromBits(Align); + } + } + + bool AlignForArray = T->isArrayType(); + + // Analyze the base element type, so we don't get confused by incomplete + // array types. + T = astCtx.getBaseElementType(T); + + if (T->isIncompleteType()) { + // We could try to replicate the logic from + // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the + // type is incomplete, so it's impossible to test. We could try to reuse + // getTypeAlignIfKnown, but that doesn't return the information we need + // to set BaseInfo. So just ignore the possibility that the alignment is + // greater than one. + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::Type); + return CharUnits::One(); + } + + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::Type); + + CharUnits Alignment; + const CXXRecordDecl *RD; + if (T.getQualifiers().hasUnaligned()) { + Alignment = CharUnits::One(); + } else if (forPointeeType && !AlignForArray && + (RD = T->getAsCXXRecordDecl())) { + // For C++ class pointees, we don't know whether we're pointing at a + // base or a complete object, so we generally need to use the + // non-virtual alignment. + Alignment = getClassPointerAlignment(RD); + } else { + Alignment = astCtx.getTypeAlignInChars(T); + } + + // Cap to the global maximum type alignment unless the alignment + // was somehow explicit on the type. + if (unsigned MaxAlign = astCtx.getLangOpts().MaxTypeAlign) { + if (Alignment.getQuantity() > MaxAlign && !astCtx.isAlignmentRequired(T)) + Alignment = CharUnits::fromQuantity(MaxAlign); + } + return Alignment; +} + +bool CIRGenModule::MustBeEmitted(const ValueDecl *Global) { + // Never defer when EmitAllDecls is specified. + assert(!langOpts.EmitAllDecls && "EmitAllDecls NYI"); + assert(!codeGenOpts.KeepStaticConsts && "KeepStaticConsts NYI"); + + return getASTContext().DeclMustBeEmitted(Global); +} + +bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { + // In OpenMP 5.0 variables and function may be marked as + // device_type(host/nohost) and we should not emit them eagerly unless we sure + // that they must be emitted on the host/device. To be sure we need to have + // seen a declare target with an explicit mentioning of the function, we know + // we have if the level of the declare target attribute is -1. Note that we + // check somewhere else if we should emit this at all. + if (langOpts.OpenMP >= 50 && !langOpts.OpenMPSimd) { + std::optional ActiveAttr = + OMPDeclareTargetDeclAttr::getActiveAttr(Global); + if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1) + return false; + } + + const auto *FD = dyn_cast(Global); + if (FD) { + // Implicit template instantiations may change linkage if they are later + // explicitly instantiated, so they should not be emitted eagerly. + // TODO(cir): do we care? + assert(FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation && + "not implemented"); + assert(!FD->isTemplated() && "Templates NYI"); + } + const auto *VD = dyn_cast(Global); + if (VD) + // A definition of an inline constexpr static data member may change + // linkage later if it's redeclared outside the class. + // TODO(cir): do we care? + assert(astCtx.getInlineVariableDefinitionKind(VD) != + ASTContext::InlineVariableDefinitionKind::WeakUnknown && + "not implemented"); + + // If OpenMP is enabled and threadprivates must be generated like TLS, delay + // codegen for global variables, because they may be marked as threadprivate. + if (langOpts.OpenMP && langOpts.OpenMPUseTLS && + getASTContext().getTargetInfo().isTLSSupported() && + isa(Global) && + !Global->getType().isConstantStorage(getASTContext(), false, false) && + !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global)) + return false; + + assert((FD || VD) && + "Only FunctionDecl and VarDecl should hit this path so far."); + return true; +} + +void CIRGenModule::buildGlobal(GlobalDecl GD) { + const auto *Global = cast(GD.getDecl()); + + assert(!Global->hasAttr() && "NYI"); + assert(!Global->hasAttr() && "NYI"); + assert(!langOpts.CUDA && "NYI"); + + if (langOpts.OpenMP) { + // If this is OpenMP, check if it is legal to emit this global normally. + if (openMPRuntime && openMPRuntime->emitTargetGlobal(GD)) { + assert(!UnimplementedFeature::openMPRuntime()); + return; + } + if (auto *DRD = dyn_cast(Global)) { + assert(!UnimplementedFeature::openMP()); + return; + } + if (auto *DMD = dyn_cast(Global)) { + assert(!UnimplementedFeature::openMP()); + return; + } + } + + // Ignore declarations, they will be emitted on their first use. + if (const auto *FD = dyn_cast(Global)) { + // Forward declarations are emitted lazily on first use. + if (!FD->doesThisDeclarationHaveABody()) { + if (!FD->doesDeclarationForceExternallyVisibleDefinition()) + return; + + llvm::StringRef MangledName = getMangledName(GD); + + // Compute the function info and CIR type. + const auto &FI = getTypes().arrangeGlobalDeclaration(GD); + mlir::Type Ty = getTypes().GetFunctionType(FI); + + GetOrCreateCIRFunction(MangledName, Ty, GD, /*ForVTable=*/false, + /*DontDefer=*/false); + return; + } + } else { + const auto *VD = cast(Global); + assert(VD->isFileVarDecl() && "Cannot emit local var decl as global."); + if (VD->isThisDeclarationADefinition() != VarDecl::Definition && + !astCtx.isMSStaticDataMemberInlineDefinition(VD)) { + if (langOpts.OpenMP) { + // Emit declaration of the must-be-emitted declare target variable. + if (std::optional Res = + OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) { + assert(0 && "OMPDeclareTargetDeclAttr NYI"); + } + } + // If this declaration may have caused an inline variable definition + // to change linkage, make sure that it's emitted. + // TODO(cir): probably use GetAddrOfGlobalVar(VD) below? + assert((astCtx.getInlineVariableDefinitionKind(VD) != + ASTContext::InlineVariableDefinitionKind::Strong) && + "not implemented"); + return; + } + } + + // Defer code generation to first use when possible, e.g. if this is an inline + // function. If the global mjust always be emitted, do it eagerly if possible + // to benefit from cache locality. + if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { + // Emit the definition if it can't be deferred. + buildGlobalDefinition(GD); + return; + } + + // If we're deferring emission of a C++ variable with an initializer, remember + // the order in which it appeared on the file. + if (getLangOpts().CPlusPlus && isa(Global) && + cast(Global)->hasInit()) { + DelayedCXXInitPosition[Global] = CXXGlobalInits.size(); + CXXGlobalInits.push_back(nullptr); + } + + llvm::StringRef MangledName = getMangledName(GD); + if (getGlobalValue(MangledName) != nullptr) { + // The value has already been used and should therefore be emitted. + addDeferredDeclToEmit(GD); + } else if (MustBeEmitted(Global)) { + // The value must be emitted, but cannot be emitted eagerly. + assert(!MayBeEmittedEagerly(Global)); + addDeferredDeclToEmit(GD); + } else { + // Otherwise, remember that we saw a deferred decl with this name. The first + // use of the mangled name will cause it to move into DeferredDeclsToEmit. + DeferredDecls[MangledName] = GD; + } +} + +void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, + mlir::Operation *Op) { + auto const *D = cast(GD.getDecl()); + + // Compute the function info and CIR type. + const CIRGenFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); + auto Ty = getTypes().GetFunctionType(FI); + + // Get or create the prototype for the function. + // if (!V || (V.getValueType() != Ty)) + // TODO(cir): Figure out what to do here? llvm uses a GlobalValue for the + // FuncOp in mlir + Op = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, + ForDefinition); + + auto Fn = cast(Op); + // Already emitted. + if (!Fn.isDeclaration()) + return; + + setFunctionLinkage(GD, Fn); + setGVProperties(Op, D); + // TODO(cir): MaubeHandleStaticInExternC + // TODO(cir): maybeSetTrivialComdat + // TODO(cir): setLLVMFunctionFEnvAttributes + + CIRGenFunction CGF{*this, builder}; + CurCGF = &CGF; + { + mlir::OpBuilder::InsertionGuard guard(builder); + CGF.generateCode(GD, Fn, FI); + } + CurCGF = nullptr; + + // TODO: setNonAliasAttributes + // TODO: SetLLVMFunctionAttributesForDeclaration + + if (const ConstructorAttr *CA = D->getAttr()) + AddGlobalCtor(Fn, CA->getPriority()); + if (const DestructorAttr *DA = D->getAttr()) + AddGlobalDtor(Fn, DA->getPriority(), true); + + assert(!D->getAttr() && "NYI"); +} + +/// Track functions to be called before main() runs. +void CIRGenModule::AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority) { + // FIXME(cir): handle LexOrder and Associated data upon testcases. + // + // Traditional LLVM codegen directly adds the function to the list of global + // ctors. In CIR we just add a global_ctor attribute to the function. The + // global list is created in LoweringPrepare. + // + // FIXME(from traditional LLVM): Type coercion of void()* types. + Ctor->setAttr(Ctor.getGlobalCtorAttrName(), + mlir::cir::GlobalCtorAttr::get(builder.getContext(), + Ctor.getName(), Priority)); +} + +/// Add a function to the list that will be called when the module is unloaded. +void CIRGenModule::AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority, + bool IsDtorAttrFunc) { + assert(IsDtorAttrFunc && "NYI"); + if (codeGenOpts.RegisterGlobalDtorsWithAtExit && + (!getASTContext().getTargetInfo().getTriple().isOSAIX() || + IsDtorAttrFunc)) { + llvm_unreachable("NYI"); + } + + // FIXME(from traditional LLVM): Type coercion of void()* types. + Dtor->setAttr(Dtor.getGlobalDtorAttrName(), + mlir::cir::GlobalDtorAttr::get(builder.getContext(), + Dtor.getName(), Priority)); +} + +mlir::Operation *CIRGenModule::getGlobalValue(StringRef Name) { + auto global = mlir::SymbolTable::lookupSymbolIn(theModule, Name); + if (!global) + return {}; + return global; +} + +mlir::Value CIRGenModule::getGlobalValue(const Decl *D) { + assert(CurCGF); + return CurCGF->symbolTable.lookup(D); +} + +mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, + mlir::Location loc, + StringRef name, mlir::Type t, + bool isCst, + mlir::Operation *insertPoint) { + mlir::cir::GlobalOp g; + auto &builder = CGM.getBuilder(); + { + mlir::OpBuilder::InsertionGuard guard(builder); + + // Some global emissions are triggered while emitting a function, e.g. + // void s() { const char *s = "yolo"; ... } + // + // Be sure to insert global before the current function + auto *curCGF = CGM.getCurrCIRGenFun(); + if (curCGF) + builder.setInsertionPoint(curCGF->CurFn); + + g = builder.create(loc, name, t, isCst); + if (!curCGF) { + if (insertPoint) + CGM.getModule().insert(insertPoint, g); + else + CGM.getModule().push_back(g); + } + + // Default to private until we can judge based on the initializer, + // since MLIR doesn't allow public declarations. + mlir::SymbolTable::setSymbolVisibility( + g, mlir::SymbolTable::Visibility::Private); + } + return g; +} + +void CIRGenModule::setCommonAttributes(GlobalDecl GD, mlir::Operation *GV) { + assert(!UnimplementedFeature::setCommonAttributes()); +} + +void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, + mlir::cir::GlobalOp New) { + assert(Old.getSymName() == New.getSymName() && "symbol names must match"); + + // If the types does not match, update all references to Old to the new type. + auto OldTy = Old.getSymType(); + auto NewTy = New.getSymType(); + if (OldTy != NewTy) { + auto OldSymUses = Old.getSymbolUses(theModule.getOperation()); + if (OldSymUses.has_value()) { + for (auto Use : *OldSymUses) { + auto *UserOp = Use.getUser(); + assert((isa(UserOp) || + isa(UserOp)) && + "GlobalOp symbol user is neither a GetGlobalOp nor a GlobalOp"); + + if (auto GGO = dyn_cast(Use.getUser())) { + auto UseOpResultValue = GGO.getAddr(); + UseOpResultValue.setType( + mlir::cir::PointerType::get(builder.getContext(), NewTy)); + } + } + } + } + + // Remove old global from the module. + Old.erase(); +} + +mlir::cir::TLS_Model CIRGenModule::GetDefaultCIRTLSModel() const { + switch (getCodeGenOpts().getDefaultTLSModel()) { + case CodeGenOptions::GeneralDynamicTLSModel: + return mlir::cir::TLS_Model::GeneralDynamic; + case CodeGenOptions::LocalDynamicTLSModel: + return mlir::cir::TLS_Model::LocalDynamic; + case CodeGenOptions::InitialExecTLSModel: + return mlir::cir::TLS_Model::InitialExec; + case CodeGenOptions::LocalExecTLSModel: + return mlir::cir::TLS_Model::LocalExec; + } + llvm_unreachable("Invalid TLS model!"); +} + +void CIRGenModule::setTLSMode(mlir::Operation *Op, const VarDecl &D) const { + assert(D.getTLSKind() && "setting TLS mode on non-TLS var!"); + + auto TLM = GetDefaultCIRTLSModel(); + + // Override the TLS model if it is explicitly specified. + if (const TLSModelAttr *Attr = D.getAttr()) { + llvm_unreachable("NYI"); + } + + auto global = dyn_cast(Op); + assert(global && "NYI for other operations"); + global.setTlsModel(TLM); +} + +/// If the specified mangled name is not in the module, +/// create and return an mlir GlobalOp with the specified type (TODO(cir): +/// address space). +/// +/// TODO(cir): +/// 1. If there is something in the module with the specified name, return +/// it potentially bitcasted to the right type. +/// +/// 2. If D is non-null, it specifies a decl that correspond to this. This is +/// used to set the attributes on the global when it is first created. +/// +/// 3. If IsForDefinition is true, it is guaranteed that an actual global with +/// type Ty will be returned, not conversion of a variable with the same +/// mangled name but some other type. +mlir::cir::GlobalOp +CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, + LangAS AddrSpace, const VarDecl *D, + ForDefinition_t IsForDefinition) { + // Lookup the entry, lazily creating it if necessary. + mlir::cir::GlobalOp Entry; + if (auto *V = getGlobalValue(MangledName)) { + assert(isa(V) && "only supports GlobalOp for now"); + Entry = dyn_cast_or_null(V); + } + + // unsigned TargetAS = astCtx.getTargetAddressSpace(AddrSpace); + if (Entry) { + if (WeakRefReferences.erase(Entry)) { + if (D && !D->hasAttr()) { + auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; + Entry.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), LT)); + mlir::SymbolTable::setSymbolVisibility(Entry, getMLIRVisibility(Entry)); + } + } + + // Handle dropped DLL attributes. + if (D && !D->hasAttr() && + !D->hasAttr()) + assert(!UnimplementedFeature::setDLLStorageClass() && "NYI"); + + if (langOpts.OpenMP && !langOpts.OpenMPSimd && D) + getOpenMPRuntime().registerTargetGlobalVariable(D, Entry); + + // TODO(cir): check TargetAS matches Entry address space + if (Entry.getSymType() == Ty && + !UnimplementedFeature::addressSpaceInGlobalVar()) + return Entry; + + // If there are two attempts to define the same mangled name, issue an + // error. + // + // TODO(cir): look at mlir::GlobalValue::isDeclaration for all aspects of + // recognizing the global as a declaration, for now only check if + // initializer is present. + if (IsForDefinition && !Entry.isDeclaration()) { + GlobalDecl OtherGD; + const VarDecl *OtherD; + + // Check that D is not yet in DiagnosedConflictingDefinitions is required + // to make sure that we issue an error only once. + if (D && lookupRepresentativeDecl(MangledName, OtherGD) && + (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) && + (OtherD = dyn_cast(OtherGD.getDecl())) && + OtherD->hasInit() && + DiagnosedConflictingDefinitions.insert(D).second) { + getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name) + << MangledName; + getDiags().Report(OtherGD.getDecl()->getLocation(), + diag::note_previous_definition); + } + } + + // TODO(cir): LLVM codegen makes sure the result is of the correct type + // by issuing a address space cast. + + // (If global is requested for a definition, we always need to create a new + // global, not just return a bitcast.) + if (!IsForDefinition) + return Entry; + } + + // TODO(cir): auto DAddrSpace = GetGlobalVarAddressSpace(D); + // TODO(cir): do we need to strip pointer casts for Entry? + + auto loc = getLoc(D->getSourceRange()); + + // mlir::SymbolTable::Visibility::Public is the default, no need to explicitly + // mark it as such. + auto GV = CIRGenModule::createGlobalOp(*this, loc, MangledName, Ty, + /*isConstant=*/false, + /*insertPoint=*/Entry.getOperation()); + + // If we already created a global with the same mangled name (but different + // type) before, replace it with the new global. + if (Entry) { + replaceGlobal(Entry, GV); + } + + // This is the first use or definition of a mangled name. If there is a + // deferred decl with this name, remember that we need to emit it at the end + // of the file. + auto DDI = DeferredDecls.find(MangledName); + if (DDI != DeferredDecls.end()) { + // Move the potentially referenced deferred decl to the DeferredDeclsToEmit + // list, and remove it from DeferredDecls (since we don't need it anymore). + addDeferredDeclToEmit(DDI->second); + DeferredDecls.erase(DDI); + } + + // Handle things which are present even on external declarations. + if (D) { + if (langOpts.OpenMP && !langOpts.OpenMPSimd && D) + getOpenMPRuntime().registerTargetGlobalVariable(D, Entry); + + // FIXME: This code is overly simple and should be merged with other global + // handling. + + // TODO(cir): + // GV->setConstant(isTypeConstant(D->getType(), false)); + // GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); + // setLinkageForGV(GV, D); + + if (D->getTLSKind()) { + if (D->getTLSKind() == VarDecl::TLS_Dynamic) + llvm_unreachable("NYI"); + setTLSMode(GV, *D); + } + + setGVProperties(GV, D); + + // If required by the ABI, treat declarations of static data members with + // inline initializers as definitions. + if (astCtx.isMSStaticDataMemberInlineDefinition(D)) { + assert(0 && "not implemented"); + } + + // Emit section information for extern variables. + if (D->hasExternalStorage()) { + if (const SectionAttr *SA = D->getAttr()) + GV.setSectionAttr(builder.getStringAttr(SA->getName())); + } + + // Handle XCore specific ABI requirements. + if (getTriple().getArch() == llvm::Triple::xcore) + assert(0 && "not implemented"); + + // Check if we a have a const declaration with an initializer, we maybe + // able to emit it as available_externally to expose it's value to the + // optimizer. + if (getLangOpts().CPlusPlus && GV.isPublic() && + D->getType().isConstQualified() && GV.isDeclaration() && + !D->hasDefinition() && D->hasInit() && !D->hasAttr()) { + assert(0 && "not implemented"); + } + } + + // TODO(cir): if this method is used to handle functions we must have + // something closer to GlobalValue::isDeclaration instead of checking for + // initializer. + if (GV.isDeclaration()) { + // TODO(cir): set target attributes + + // External HIP managed variables needed to be recorded for transformation + // in both device and host compilations. + if (getLangOpts().CUDA) + assert(0 && "not implemented"); + } + + // TODO(cir): address space cast when needed for DAddrSpace. + return GV; +} + +mlir::cir::GlobalOp CIRGenModule::buildGlobal(const VarDecl *D, mlir::Type Ty, + ForDefinition_t IsForDefinition) { + assert(D->hasGlobalStorage() && "Not a global variable"); + QualType ASTTy = D->getType(); + if (!Ty) + Ty = getTypes().convertTypeForMem(ASTTy); + + StringRef MangledName = getMangledName(D); + return getOrCreateCIRGlobal(MangledName, Ty, ASTTy.getAddressSpace(), D, + IsForDefinition); +} + +/// Return the mlir::Value for the address of the given global variable. If Ty +/// is non-null and if the global doesn't exist, then it will be created with +/// the specified type instead of whatever the normal requested type would be. +/// If IsForDefinition is true, it is guaranteed that an actual global with type +/// Ty will be returned, not conversion of a variable with the same mangled name +/// but some other type. +mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, + ForDefinition_t IsForDefinition) { + assert(D->hasGlobalStorage() && "Not a global variable"); + QualType ASTTy = D->getType(); + if (!Ty) + Ty = getTypes().convertTypeForMem(ASTTy); + + bool tlsAccess = D->getTLSKind() != VarDecl::TLS_None; + auto g = buildGlobal(D, Ty, IsForDefinition); + auto ptrTy = + mlir::cir::PointerType::get(builder.getContext(), g.getSymType()); + return builder.create( + getLoc(D->getSourceRange()), ptrTy, g.getSymName(), tlsAccess); +} + +mlir::cir::GlobalViewAttr +CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty, + ForDefinition_t IsForDefinition) { + assert(D->hasGlobalStorage() && "Not a global variable"); + QualType ASTTy = D->getType(); + if (!Ty) + Ty = getTypes().convertTypeForMem(ASTTy); + + auto globalOp = buildGlobal(D, Ty, IsForDefinition); + return builder.getGlobalViewAttr(builder.getPointerTo(Ty), globalOp); +} + +mlir::Operation *CIRGenModule::getWeakRefReference(const ValueDecl *VD) { + const AliasAttr *AA = VD->getAttr(); + assert(AA && "No alias?"); + + // See if there is already something with the target's name in the module. + mlir::Operation *Entry = getGlobalValue(AA->getAliasee()); + if (Entry) { + assert((isa(Entry) || isa(Entry)) && + "weak ref should be against a global variable or function"); + return Entry; + } + + mlir::Type DeclTy = getTypes().convertTypeForMem(VD->getType()); + if (DeclTy.isa()) { + auto F = GetOrCreateCIRFunction(AA->getAliasee(), DeclTy, + GlobalDecl(cast(VD)), + /*ForVtable=*/false); + F.setLinkage(mlir::cir::GlobalLinkageKind::ExternalWeakLinkage); + WeakRefReferences.insert(F); + return F; + } + + llvm_unreachable("GlobalOp NYI"); +} + +/// TODO(cir): looks like part of this code can be part of a common AST +/// helper betweem CIR and LLVM codegen. +template +void CIRGenModule::maybeHandleStaticInExternC(const SomeDecl *D, + mlir::cir::GlobalOp GV) { + if (!getLangOpts().CPlusPlus) + return; + + // Must have 'used' attribute, or else inline assembly can't rely on + // the name existing. + if (!D->template hasAttr()) + return; + + // Must have internal linkage and an ordinary name. + if (!D->getIdentifier() || D->getFormalLinkage() != Linkage::Internal) + return; + + // Must be in an extern "C" context. Entities declared directly within + // a record are not extern "C" even if the record is in such a context. + const SomeDecl *First = D->getFirstDecl(); + if (First->getDeclContext()->isRecord() || !First->isInExternCContext()) + return; + + // TODO(cir): + // OK, this is an internal linkage entity inside an extern "C" linkage + // specification. Make a note of that so we can give it the "expected" + // mangled name if nothing else is using that name. + // + // If we have multiple internal linkage entities with the same name + // in extern "C" regions, none of them gets that name. + assert(0 && "not implemented"); +} + +void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, + bool IsTentative) { + // TODO(cir): + // OpenCL global variables of sampler type are translated to function calls, + // therefore no need to be translated. + // If this is OpenMP device, check if it is legal to emit this global + // normally. + QualType ASTTy = D->getType(); + if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) + llvm_unreachable("not implemented"); + + // TODO(cir): LLVM's codegen uses a llvm::TrackingVH here. Is that + // necessary here for CIR gen? + mlir::Attribute Init; + bool NeedsGlobalCtor = false; + // Whether the definition of the variable is available externally. + // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable + // since this is the job for its original source. + bool IsDefinitionAvailableExternally = + astCtx.GetGVALinkageForVariable(D) == GVA_AvailableExternally; + bool NeedsGlobalDtor = + !IsDefinitionAvailableExternally && + D->needsDestruction(astCtx) == QualType::DK_cxx_destructor; + + const VarDecl *InitDecl; + const Expr *InitExpr = D->getAnyInitializer(InitDecl); + + std::optional emitter; + + // CUDA E.2.4.1 "__shared__ variables cannot have an initialization + // as part of their declaration." Sema has already checked for + // error cases, so we just need to set Init to UndefValue. + bool IsCUDASharedVar = + getLangOpts().CUDAIsDevice && D->hasAttr(); + // Shadows of initialized device-side global variables are also left + // undefined. + // Managed Variables should be initialized on both host side and device side. + bool IsCUDAShadowVar = + !getLangOpts().CUDAIsDevice && !D->hasAttr() && + (D->hasAttr() || D->hasAttr() || + D->hasAttr()); + bool IsCUDADeviceShadowVar = + getLangOpts().CUDAIsDevice && !D->hasAttr() && + (D->getType()->isCUDADeviceBuiltinSurfaceType() || + D->getType()->isCUDADeviceBuiltinTextureType()); + if (getLangOpts().CUDA && + (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar)) + assert(0 && "not implemented"); + else if (D->hasAttr()) + assert(0 && "not implemented"); + else if (!InitExpr) { + // This is a tentative definition; tentative definitions are + // implicitly initialized with { 0 }. + // + // Note that tentative definitions are only emitted at the end of + // a translation unit, so they should never have incomplete + // type. In addition, EmitTentativeDefinition makes sure that we + // never attempt to emit a tentative definition if a real one + // exists. A use may still exists, however, so we still may need + // to do a RAUW. + assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type"); + Init = builder.getZeroInitAttr(getCIRType(D->getType())); + } else { + initializedGlobalDecl = GlobalDecl(D); + emitter.emplace(*this); + auto Initializer = emitter->tryEmitForInitializer(*InitDecl); + if (!Initializer) { + QualType T = InitExpr->getType(); + if (D->getType()->isReferenceType()) + T = D->getType(); + + if (getLangOpts().CPlusPlus) { + if (InitDecl->hasFlexibleArrayInit(astCtx)) + ErrorUnsupported(D, "flexible array initializer"); + Init = builder.getZeroInitAttr(getCIRType(T)); + if (!IsDefinitionAvailableExternally) + NeedsGlobalCtor = true; + } else { + ErrorUnsupported(D, "static initializer"); + } + } else { + Init = Initializer; + // We don't need an initializer, so remove the entry for the delayed + // initializer position (just in case this entry was delayed) if we + // also don't need to register a destructor. + if (getLangOpts().CPlusPlus && !NeedsGlobalDtor) + DelayedCXXInitPosition.erase(D); + } + } + + mlir::Type InitType; + // If the initializer attribute is a SymbolRefAttr it means we are + // initializing the global based on a global constant. + // + // TODO(cir): create another attribute to contain the final type and abstract + // away SymbolRefAttr. + if (auto symAttr = Init.dyn_cast()) { + auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(theModule, symAttr); + assert(isa(cstGlobal) && + "unaware of other symbol providers"); + auto g = cast(cstGlobal); + auto arrayTy = g.getSymType().dyn_cast(); + // TODO(cir): pointer to array decay. Should this be modeled explicitly in + // CIR? + if (arrayTy) + InitType = mlir::cir::PointerType::get(builder.getContext(), + arrayTy.getEltType()); + } else { + assert(Init.isa() && "This should have a type"); + auto TypedInitAttr = Init.cast(); + InitType = TypedInitAttr.getType(); + } + assert(!InitType.isa() && "Should have a type by now"); + + auto Entry = buildGlobal(D, InitType, ForDefinition_t(!IsTentative)); + // TODO(cir): Strip off pointer casts from Entry if we get them? + + // TODO(cir): LLVM codegen used GlobalValue to handle both Function or + // GlobalVariable here. We currently only support GlobalOp, should this be + // used for FuncOp? + assert(dyn_cast(&Entry) && "FuncOp not supported here"); + auto GV = Entry; + + // We have a definition after a declaration with the wrong type. + // We must make a new GlobalVariable* and update everything that used OldGV + // (a declaration or tentative definition) with the new GlobalVariable* + // (which will be a definition). + // + // This happens if there is a prototype for a global (e.g. + // "extern int x[];") and then a definition of a different type (e.g. + // "int x[10];"). This also happens when an initializer has a different type + // from the type of the global (this happens with unions). + if (!GV || GV.getSymType() != InitType) { + // TODO(cir): this should include an address space check as well. + assert(0 && "not implemented"); + } + + maybeHandleStaticInExternC(D, GV); + + if (D->hasAttr()) + assert(0 && "not implemented"); + + // Set CIR's linkage type as appropriate. + mlir::cir::GlobalLinkageKind Linkage = + getCIRLinkageVarDefinition(D, /*IsConstant=*/false); + + // TODO(cir): + // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on + // the device. [...]" + // CUDA B.2.2 "The __constant__ qualifier, optionally used together with + // __device__, declares a variable that: [...] + if (GV && getLangOpts().CUDA) { + assert(0 && "not implemented"); + } + + // Set initializer and finalize emission + CIRGenModule::setInitializer(GV, Init); + if (emitter) + emitter->finalize(GV); + + // TODO(cir): If it is safe to mark the global 'constant', do so now. + // GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor && + // isTypeConstant(D->getType(), true)); + + // If it is in a read-only section, mark it 'constant'. + if (const SectionAttr *SA = D->getAttr()) + GV.setSectionAttr(builder.getStringAttr(SA->getName())); + + // TODO(cir): + // GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); + + // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper + // function is only defined alongside the variable, not also alongside + // callers. Normally, all accesses to a thread_local go through the + // thread-wrapper in order to ensure initialization has occurred, underlying + // variable will never be used other than the thread-wrapper, so it can be + // converted to internal linkage. + // + // However, if the variable has the 'constinit' attribute, it _can_ be + // referenced directly, without calling the thread-wrapper, so the linkage + // must not be changed. + // + // Additionally, if the variable isn't plain external linkage, e.g. if it's + // weak or linkonce, the de-duplication semantics are important to preserve, + // so we don't change the linkage. + if (D->getTLSKind() == VarDecl::TLS_Dynamic && GV.isPublic() && + astCtx.getTargetInfo().getTriple().isOSDarwin() && + !D->hasAttr()) { + // TODO(cir): set to mlir::SymbolTable::Visibility::Private once we have + // testcases. + assert(0 && "not implemented"); + } + + // Set CIR linkage and DLL storage class. + GV.setLinkage(Linkage); + // FIXME(cir): setLinkage should likely set MLIR's visibility automatically. + GV.setVisibility(getMLIRVisibilityFromCIRLinkage(Linkage)); + // TODO(cir): handle DLL storage classes in CIR? + if (D->hasAttr()) + assert(!UnimplementedFeature::setDLLStorageClass()); + else if (D->hasAttr()) + assert(!UnimplementedFeature::setDLLStorageClass()); + else + assert(!UnimplementedFeature::setDLLStorageClass()); + + if (Linkage == mlir::cir::GlobalLinkageKind::CommonLinkage) { + // common vars aren't constant even if declared const. + GV.setConstant(false); + // Tentative definition of global variables may be initialized with + // non-zero null pointers. In this case they should have weak linkage + // since common linkage must have zero initializer and must not have + // explicit section therefore cannot have non-zero initial value. + auto Initializer = GV.getInitialValue(); + if (Initializer && !getBuilder().isNullValue(*Initializer)) + GV.setLinkage(mlir::cir::GlobalLinkageKind::WeakAnyLinkage); + } + + // TODO(cir): setNonAliasAttributes(D, GV); + + if (D->getTLSKind() && !GV.getTlsModelAttr()) { + if (D->getTLSKind() == VarDecl::TLS_Dynamic) + llvm_unreachable("NYI"); + setTLSMode(GV, *D); + } + + // TODO(cir): maybeSetTrivialComdat(*D, *GV); + + // TODO(cir): + // Emit the initializer function if necessary. + if (NeedsGlobalCtor || NeedsGlobalDtor) + buildGlobalVarDeclInit(D, GV, NeedsGlobalCtor); + + // TODO(cir): sanitizers (reportGlobalToASan) and global variable debug + // information. +} + +void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { + const auto *D = cast(GD.getDecl()); + + if (const auto *FD = dyn_cast(D)) { + // At -O0, don't generate CIR for functions with available_externally + // linkage. + if (!shouldEmitFunction(GD)) + return; + + if (const auto *Method = dyn_cast(D)) { + // Make sure to emit the definition(s) before we emit the thunks. This is + // necessary for the generation of certain thunks. + if (isa(Method) || isa(Method)) + ABI->buildCXXStructor(GD); + else if (FD->isMultiVersion()) + llvm_unreachable("NYI"); + else + buildGlobalFunctionDefinition(GD, Op); + + if (Method->isVirtual()) + getVTables().buildThunks(GD); + + return; + } + + if (FD->isMultiVersion()) + llvm_unreachable("NYI"); + buildGlobalFunctionDefinition(GD, Op); + return; + } + + if (const auto *VD = dyn_cast(D)) + return buildGlobalVarDefinition(VD, !VD->hasDefinition()); + + llvm_unreachable("Invalid argument to buildGlobalDefinition()"); +} + +mlir::Attribute +CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { + assert(!E->getType()->isPointerType() && "Strings are always arrays"); + + // Don't emit it as the address of the string, emit the string data itself + // as an inline array. + if (E->getCharByteWidth() == 1) { + SmallString<64> Str(E->getString()); + + // Resize the string to the right size, which is indicated by its type. + const ConstantArrayType *CAT = astCtx.getAsConstantArrayType(E->getType()); + auto finalSize = CAT->getSize().getZExtValue(); + Str.resize(finalSize); + + auto eltTy = getTypes().ConvertType(CAT->getElementType()); + return builder.getString(Str, eltTy, finalSize); + } + + auto arrayTy = + getTypes().ConvertType(E->getType()).dyn_cast(); + assert(arrayTy && "string literals must be emitted as an array type"); + + auto arrayEltTy = arrayTy.getEltType().dyn_cast(); + assert(arrayEltTy && + "string literal elements must be emitted as integral type"); + + auto arraySize = arrayTy.getSize(); + auto literalSize = E->getLength(); + + // Collect the code units. + SmallVector elementValues; + elementValues.reserve(arraySize); + for (unsigned i = 0; i < literalSize; ++i) + elementValues.push_back(E->getCodeUnit(i)); + elementValues.resize(arraySize); + + // If the string is full of null bytes, emit a #cir.zero instead. + if (std::all_of(elementValues.begin(), elementValues.end(), + [](uint32_t x) { return x == 0; })) + return builder.getZeroAttr(arrayTy); + + // Otherwise emit a constant array holding the characters. + SmallVector elements; + elements.reserve(arraySize); + for (uint64_t i = 0; i < arraySize; ++i) + elements.push_back(mlir::cir::IntAttr::get(arrayEltTy, elementValues[i])); + + auto elementsAttr = mlir::ArrayAttr::get(builder.getContext(), elements); + return builder.getConstArray(elementsAttr, arrayTy); +} + +// TODO(cir): this could be a common AST helper for both CIR and LLVM codegen. +LangAS CIRGenModule::getGlobalConstantAddressSpace() const { + // OpenCL v1.2 s6.5.3: a string literal is in the constant address space. + if (getLangOpts().OpenCL) + return LangAS::opencl_constant; + if (getLangOpts().SYCLIsDevice) + return LangAS::sycl_global; + if (auto AS = getTarget().getConstantAddressSpace()) + return AS.value(); + return LangAS::Default; +} + +static mlir::cir::GlobalOp +generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, + mlir::cir::GlobalLinkageKind LT, CIRGenModule &CGM, + StringRef GlobalName, CharUnits Alignment) { + unsigned AddrSpace = CGM.getASTContext().getTargetAddressSpace( + CGM.getGlobalConstantAddressSpace()); + assert((AddrSpace == 0 && + !cir::UnimplementedFeature::addressSpaceInGlobalVar()) && + "NYI"); + + // Create a global variable for this string + // FIXME(cir): check for insertion point in module level. + auto GV = CIRGenModule::createGlobalOp(CGM, loc, GlobalName, C.getType(), + !CGM.getLangOpts().WritableStrings); + + // Set up extra information and add to the module + GV.setAlignmentAttr(CGM.getSize(Alignment)); + GV.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(CGM.getBuilder().getContext(), LT)); + CIRGenModule::setInitializer(GV, C); + + // TODO(cir) + assert(!cir::UnimplementedFeature::threadLocal() && "NYI"); + assert(!cir::UnimplementedFeature::unnamedAddr() && "NYI"); + assert(!mlir::cir::isWeakForLinker(LT) && "NYI"); + assert(!cir::UnimplementedFeature::setDSOLocal() && "NYI"); + return GV; +} + +/// Return a pointer to a constant array for the given string literal. +mlir::cir::GlobalViewAttr +CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, + StringRef Name) { + CharUnits Alignment = + astCtx.getAlignOfGlobalVarInChars(S->getType(), /*VD=*/nullptr); + + mlir::Attribute C = getConstantArrayFromStringLiteral(S); + + mlir::cir::GlobalOp GV; + if (!getLangOpts().WritableStrings && ConstantStringMap.count(C)) { + GV = ConstantStringMap[C]; + // The bigger alignment always wins. + if (!GV.getAlignment() || + uint64_t(Alignment.getQuantity()) > *GV.getAlignment()) + GV.setAlignmentAttr(getSize(Alignment)); + } else { + SmallString<256> StringNameBuffer = Name; + llvm::raw_svector_ostream Out(StringNameBuffer); + if (StringLiteralCnt) + Out << StringLiteralCnt; + Name = Out.str(); + StringLiteralCnt++; + + SmallString<256> MangledNameBuffer; + StringRef GlobalVariableName; + auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; + + // Mangle the string literal if that's how the ABI merges duplicate strings. + // Don't do it if they are writable, since we don't want writes in one TU to + // affect strings in another. + if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) && + !getLangOpts().WritableStrings) { + assert(0 && "not implemented"); + } else { + LT = mlir::cir::GlobalLinkageKind::InternalLinkage; + GlobalVariableName = Name; + } + + auto loc = getLoc(S->getSourceRange()); + auto typedC = llvm::dyn_cast(C); + if (!typedC) + llvm_unreachable("this should never be untyped at this point"); + GV = generateStringLiteral(loc, typedC, LT, *this, GlobalVariableName, + Alignment); + ConstantStringMap[C] = GV; + + assert(!cir::UnimplementedFeature::reportGlobalToASan() && "NYI"); + } + + auto ArrayTy = GV.getSymType().dyn_cast(); + assert(ArrayTy && "String literal must be array"); + auto PtrTy = + mlir::cir::PointerType::get(builder.getContext(), ArrayTy.getEltType()); + + return builder.getGlobalViewAttr(PtrTy, GV); +} + +void CIRGenModule::buildDeclContext(const DeclContext *DC) { + for (auto *I : DC->decls()) { + // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope + // are themselves considered "top-level", so EmitTopLevelDecl on an + // ObjCImplDecl does not recursively visit them. We need to do that in + // case they're nested inside another construct (LinkageSpecDecl / + // ExportDecl) that does stop them from being considered "top-level". + if (auto *OID = dyn_cast(I)) + llvm_unreachable("NYI"); + + buildTopLevelDecl(I); + } +} + +void CIRGenModule::buildLinkageSpec(const LinkageSpecDecl *LSD) { + if (LSD->getLanguage() != LinkageSpecLanguageIDs::C && + LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) { + llvm_unreachable("unsupported linkage spec"); + return; + } + buildDeclContext(LSD); +} + +// Emit code for a single top level declaration. +void CIRGenModule::buildTopLevelDecl(Decl *decl) { + // Ignore dependent declarations + if (decl->isTemplated()) + return; + + // Consteval function shouldn't be emitted. + if (auto *FD = dyn_cast(decl)) + if (FD->isConsteval()) + return; + + switch (decl->getKind()) { + default: + llvm::errs() << "buildTopLevelDecl codegen for decl kind '" + << decl->getDeclKindName() << "' not implemented\n"; + assert(false && "Not yet implemented"); + + case Decl::TranslationUnit: { + // This path is CIR only - CIRGen handles TUDecls because + // of clang-tidy checks, that operate on TU granularity. + TranslationUnitDecl *TU = cast(decl); + for (DeclContext::decl_iterator D = TU->decls_begin(), + DEnd = TU->decls_end(); + D != DEnd; ++D) + buildTopLevelDecl(*D); + return; + } + case Decl::Var: + case Decl::Decomposition: + case Decl::VarTemplateSpecialization: + buildGlobal(cast(decl)); + assert(!isa(decl) && "not implemented"); + // if (auto *DD = dyn_cast(decl)) + // for (auto *B : DD->bindings()) + // if (auto *HD = B->getHoldingVar()) + // EmitGlobal(HD); + break; + + case Decl::CXXConversion: + case Decl::CXXMethod: + case Decl::Function: + buildGlobal(cast(decl)); + assert(!codeGenOpts.CoverageMapping && "Coverage Mapping NYI"); + break; + // C++ Decls + case Decl::Namespace: + buildDeclContext(cast(decl)); + break; + case Decl::ClassTemplateSpecialization: { + // const auto *Spec = cast(decl); + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + } + [[fallthrough]]; + case Decl::CXXRecord: { + CXXRecordDecl *crd = cast(decl); + // TODO: Handle debug info as CodeGenModule.cpp does + for (auto *childDecl : crd->decls()) + if (isa(childDecl) || isa(childDecl)) + buildTopLevelDecl(childDecl); + break; + } + // No code generation needed. + case Decl::UsingShadow: + case Decl::ClassTemplate: + case Decl::VarTemplate: + case Decl::Concept: + case Decl::VarTemplatePartialSpecialization: + case Decl::FunctionTemplate: + case Decl::TypeAliasTemplate: + case Decl::Block: + case Decl::Empty: + case Decl::Binding: + break; + case Decl::Using: // using X; [C++] + case Decl::UsingEnum: // using enum X; [C++] + case Decl::NamespaceAlias: + case Decl::UsingDirective: // using namespace X; [C++] + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + break; + case Decl::CXXConstructor: + getCXXABI().buildCXXConstructors(cast(decl)); + break; + case Decl::CXXDestructor: + getCXXABI().buildCXXDestructors(cast(decl)); + break; + + case Decl::StaticAssert: + // Nothing to do. + break; + + case Decl::LinkageSpec: + buildLinkageSpec(cast(decl)); + break; + + case Decl::Typedef: + case Decl::TypeAlias: // using foo = bar; [C++11] + case Decl::Record: + case Decl::Enum: + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + break; + } +} + +static bool shouldBeInCOMDAT(CIRGenModule &CGM, const Decl &D) { + if (!CGM.supportsCOMDAT()) + return false; + + if (D.hasAttr()) + return true; + + GVALinkage Linkage; + if (auto *VD = dyn_cast(&D)) + Linkage = CGM.getASTContext().GetGVALinkageForVariable(VD); + else + Linkage = + CGM.getASTContext().GetGVALinkageForFunction(cast(&D)); + + switch (Linkage) { + case clang::GVA_Internal: + case clang::GVA_AvailableExternally: + case clang::GVA_StrongExternal: + return false; + case clang::GVA_DiscardableODR: + case clang::GVA_StrongODR: + return true; + } + llvm_unreachable("No such linkage"); +} + +// TODO(cir): this could be a common method between LLVM codegen. +static bool isVarDeclStrongDefinition(const ASTContext &Context, + CIRGenModule &CGM, const VarDecl *D, + bool NoCommon) { + // Don't give variables common linkage if -fno-common was specified unless it + // was overridden by a NoCommon attribute. + if ((NoCommon || D->hasAttr()) && !D->hasAttr()) + return true; + + // C11 6.9.2/2: + // A declaration of an identifier for an object that has file scope without + // an initializer, and without a storage-class specifier or with the + // storage-class specifier static, constitutes a tentative definition. + if (D->getInit() || D->hasExternalStorage()) + return true; + + // A variable cannot be both common and exist in a section. + if (D->hasAttr()) + return true; + + // A variable cannot be both common and exist in a section. + // We don't try to determine which is the right section in the front-end. + // If no specialized section name is applicable, it will resort to default. + if (D->hasAttr() || + D->hasAttr() || + D->hasAttr() || + D->hasAttr()) + return true; + + // Thread local vars aren't considered common linkage. + if (D->getTLSKind()) + return true; + + // Tentative definitions marked with WeakImportAttr are true definitions. + if (D->hasAttr()) + return true; + + // A variable cannot be both common and exist in a comdat. + if (shouldBeInCOMDAT(CGM, *D)) + return true; + + // Declarations with a required alignment do not have common linkage in MSVC + // mode. + if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { + if (D->hasAttr()) + return true; + QualType VarType = D->getType(); + if (Context.isAlignmentRequired(VarType)) + return true; + + if (const auto *RT = VarType->getAs()) { + const RecordDecl *RD = RT->getDecl(); + for (const FieldDecl *FD : RD->fields()) { + if (FD->isBitField()) + continue; + if (FD->hasAttr()) + return true; + if (Context.isAlignmentRequired(FD->getType())) + return true; + } + } + } + + // Microsoft's link.exe doesn't support alignments greater than 32 bytes for + // common symbols, so symbols with greater alignment requirements cannot be + // common. + // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two + // alignments for common symbols via the aligncomm directive, so this + // restriction only applies to MSVC environments. + if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() && + Context.getTypeAlignIfKnown(D->getType()) > + Context.toBits(CharUnits::fromQuantity(32))) + return true; + + return false; +} + +void CIRGenModule::setInitializer(mlir::cir::GlobalOp &global, + mlir::Attribute value) { + // Recompute visibility when updating initializer. + global.setInitialValueAttr(value); + mlir::SymbolTable::setSymbolVisibility( + global, CIRGenModule::getMLIRVisibility(global)); +} + +mlir::SymbolTable::Visibility +CIRGenModule::getMLIRVisibility(mlir::cir::GlobalOp op) { + // MLIR doesn't accept public symbols declarations (only + // definitions). + if (op.isDeclaration()) + return mlir::SymbolTable::Visibility::Private; + return getMLIRVisibilityFromCIRLinkage(op.getLinkage()); +} + +mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( + mlir::cir::GlobalLinkageKind GLK) { + switch (GLK) { + case mlir::cir::GlobalLinkageKind::InternalLinkage: + case mlir::cir::GlobalLinkageKind::PrivateLinkage: + return mlir::SymbolTable::Visibility::Private; + case mlir::cir::GlobalLinkageKind::ExternalLinkage: + case mlir::cir::GlobalLinkageKind::ExternalWeakLinkage: + case mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage: + case mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage: + case mlir::cir::GlobalLinkageKind::CommonLinkage: + return mlir::SymbolTable::Visibility::Public; + default: { + llvm::errs() << "visibility not implemented for '" + << stringifyGlobalLinkageKind(GLK) << "'\n"; + assert(0 && "not implemented"); + } + } + llvm_unreachable("linkage should be handled above!"); +} + +mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( + const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) { + if (Linkage == GVA_Internal) + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + if (D->hasAttr()) { + if (IsConstantVariable) + return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + else + return mlir::cir::GlobalLinkageKind::WeakAnyLinkage; + } + + if (const auto *FD = D->getAsFunction()) + if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally) + return mlir::cir::GlobalLinkageKind::LinkOnceAnyLinkage; + + // We are guaranteed to have a strong definition somewhere else, + // so we can use available_externally linkage. + if (Linkage == GVA_AvailableExternally) + return mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + + // Note that Apple's kernel linker doesn't support symbol + // coalescing, so we need to avoid linkonce and weak linkages there. + // Normally, this means we just map to internal, but for explicit + // instantiations we'll map to external. + + // In C++, the compiler has to emit a definition in every translation unit + // that references the function. We should use linkonce_odr because + // a) if all references in this translation unit are optimized away, we + // don't need to codegen it. b) if the function persists, it needs to be + // merged with other definitions. c) C++ has the ODR, so we know the + // definition is dependable. + if (Linkage == GVA_DiscardableODR) + return !astCtx.getLangOpts().AppleKext + ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + + // An explicit instantiation of a template has weak linkage, since + // explicit instantiations can occur in multiple translation units + // and must all be equivalent. However, we are not allowed to + // throw away these explicit instantiations. + // + // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU, + // so say that CUDA templates are either external (for kernels) or internal. + // This lets llvm perform aggressive inter-procedural optimizations. For + // -fgpu-rdc case, device function calls across multiple TU's are allowed, + // therefore we need to follow the normal linkage paradigm. + if (Linkage == GVA_StrongODR) { + if (getLangOpts().AppleKext) + return mlir::cir::GlobalLinkageKind::ExternalLinkage; + if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && + !getLangOpts().GPURelocatableDeviceCode) + return D->hasAttr() + ? mlir::cir::GlobalLinkageKind::ExternalLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + } + + // C++ doesn't have tentative definitions and thus cannot have common + // linkage. + if (!getLangOpts().CPlusPlus && isa(D) && + !isVarDeclStrongDefinition(astCtx, *this, cast(D), + getCodeGenOpts().NoCommon)) + return mlir::cir::GlobalLinkageKind::CommonLinkage; + + // selectany symbols are externally visible, so use weak instead of + // linkonce. MSVC optimizes away references to const selectany globals, so + // all definitions should be the same and ODR linkage should be used. + // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx + if (D->hasAttr()) + return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + + // Otherwise, we have strong external linkage. + assert(Linkage == GVA_StrongExternal); + return mlir::cir::GlobalLinkageKind::ExternalLinkage; +} + +/// This function is called when we implement a function with no prototype, e.g. +/// "int foo() {}". If there are existing call uses of the old function in the +/// module, this adjusts them to call the new function directly. +/// +/// This is not just a cleanup: the always_inline pass requires direct calls to +/// functions to be able to inline them. If there is a bitcast in the way, it +/// won't inline them. Instcombine normally deletes these calls, but it isn't +/// run at -O0. +void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( + mlir::Operation *Old, mlir::cir::FuncOp NewFn) { + + // If we're redefining a global as a function, don't transform it. + auto OldFn = dyn_cast(Old); + if (!OldFn) + return; + + // TODO(cir): this RAUW ignores the features below. + assert(!UnimplementedFeature::exceptions() && "Call vs Invoke NYI"); + assert(!UnimplementedFeature::parameterAttributes()); + assert(!UnimplementedFeature::operandBundles()); + assert(OldFn->getAttrs().size() > 1 && "Attribute forwarding NYI"); + + // Mark new function as originated from a no-proto declaration. + NewFn.setNoProtoAttr(OldFn.getNoProtoAttr()); + + // Iterate through all calls of the no-proto function. + auto SymUses = OldFn.getSymbolUses(OldFn->getParentOp()); + for (auto Use : SymUses.value()) { + mlir::OpBuilder::InsertionGuard guard(builder); + + if (auto noProtoCallOp = dyn_cast(Use.getUser())) { + builder.setInsertionPoint(noProtoCallOp); + + // Patch call type with the real function type. + auto realCallOp = builder.create( + noProtoCallOp.getLoc(), NewFn, noProtoCallOp.getOperands()); + + // Replace old no proto call with fixed call. + noProtoCallOp.replaceAllUsesWith(realCallOp); + noProtoCallOp.erase(); + } else if (auto getGlobalOp = + dyn_cast(Use.getUser())) { + // Replace type + getGlobalOp.getAddr().setType(mlir::cir::PointerType::get( + builder.getContext(), NewFn.getFunctionType())); + } else { + llvm_unreachable("NIY"); + } + } +} + +mlir::cir::GlobalLinkageKind +CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *VD, bool IsConstant) { + assert(!IsConstant && "constant variables NYI"); + GVALinkage Linkage = astCtx.GetGVALinkageForVariable(VD); + return getCIRLinkageForDeclarator(VD, Linkage, IsConstant); +} + +mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { + const auto *D = cast(GD.getDecl()); + + GVALinkage Linkage = astCtx.GetGVALinkageForFunction(D); + + if (const auto *Dtor = dyn_cast(D)) + return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType()); + + if (isa(D) && + cast(D)->isInheritingConstructor() && + astCtx.getTargetInfo().getCXXABI().isMicrosoft()) { + // Just like in LLVM codegen: + // Our approach to inheriting constructors is fundamentally different from + // that used by the MS ABI, so keep our inheriting constructor thunks + // internal rather than trying to pick an unambiguous mangling for them. + return mlir::cir::GlobalLinkageKind::InternalLinkage; + } + + return getCIRLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false); +} + +void CIRGenModule::buildAliasForGlobal(StringRef mangledName, + mlir::Operation *op, GlobalDecl aliasGD, + mlir::cir::FuncOp aliasee, + mlir::cir::GlobalLinkageKind linkage) { + auto *aliasFD = dyn_cast(aliasGD.getDecl()); + assert(aliasFD && "expected FunctionDecl"); + auto alias = + createCIRFunction(getLoc(aliasGD.getDecl()->getSourceRange()), + mangledName, aliasee.getFunctionType(), aliasFD); + alias.setAliasee(aliasee.getName()); + alias.setLinkage(linkage); + mlir::SymbolTable::setSymbolVisibility( + alias, getMLIRVisibilityFromCIRLinkage(linkage)); + + // Alias constructors and destructors are always unnamed_addr. + assert(!UnimplementedFeature::unnamedAddr()); + + // Switch any previous uses to the alias. + if (op) { + llvm_unreachable("NYI"); + } else { + // Name already set by createCIRFunction + } + + // Finally, set up the alias with its proper name and attributes. + setCommonAttributes(aliasGD, alias); +} + +mlir::Type CIRGenModule::getCIRType(const QualType &type) { + return genTypes.ConvertType(type); +} + +bool CIRGenModule::verifyModule() { + // Verify the module after we have finished constructing it, this will + // check the structural properties of the IR and invoke any specific + // verifiers we have on the CIR operations. + return mlir::verify(theModule).succeeded(); +} + +std::pair +CIRGenModule::getAddrAndTypeOfCXXStructor(GlobalDecl GD, + const CIRGenFunctionInfo *FnInfo, + mlir::cir::FuncType FnType, + bool Dontdefer, + ForDefinition_t IsForDefinition) { + auto *MD = cast(GD.getDecl()); + + if (isa(MD)) { + // Always alias equivalent complete destructors to base destructors in the + // MS ABI. + if (getTarget().getCXXABI().isMicrosoft() && + GD.getDtorType() == Dtor_Complete && + MD->getParent()->getNumVBases() == 0) + llvm_unreachable("NYI"); + } + + if (!FnType) { + if (!FnInfo) + FnInfo = &getTypes().arrangeCXXStructorDeclaration(GD); + FnType = getTypes().GetFunctionType(*FnInfo); + } + + auto Fn = GetOrCreateCIRFunction(getMangledName(GD), FnType, GD, + /*ForVtable=*/false, Dontdefer, + /*IsThunk=*/false, IsForDefinition); + + return {FnType, Fn}; +} + +mlir::cir::FuncOp +CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, + bool ForVTable, bool DontDefer, + ForDefinition_t IsForDefinition) { + assert(!cast(GD.getDecl())->isConsteval() && + "consteval function should never be emitted"); + + if (!Ty) { + const auto *FD = cast(GD.getDecl()); + Ty = getTypes().ConvertType(FD->getType()); + } + + // Devirtualized destructor calls may come through here instead of via + // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead + // of the complete destructor when necessary. + if (const auto *DD = dyn_cast(GD.getDecl())) { + if (getTarget().getCXXABI().isMicrosoft() && + GD.getDtorType() == Dtor_Complete && + DD->getParent()->getNumVBases() == 0) + llvm_unreachable("NYI"); + } + + StringRef MangledName = getMangledName(GD); + auto F = GetOrCreateCIRFunction(MangledName, Ty, GD, ForVTable, DontDefer, + /*IsThunk=*/false, IsForDefinition); + + assert(!langOpts.CUDA && "NYI"); + + return F; +} + +// Returns true if GD is a function decl with internal linkage and needs a +// unique suffix after the mangled name. +static bool isUniqueInternalLinkageDecl(GlobalDecl GD, CIRGenModule &CGM) { + assert(CGM.getModuleNameHash().empty() && + "Unique internal linkage names NYI"); + + return false; +} + +static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, + const NamedDecl *ND, + bool OmitMultiVersionMangling = false) { + assert(!OmitMultiVersionMangling && "NYI"); + + SmallString<256> Buffer; + + llvm::raw_svector_ostream Out(Buffer); + MangleContext &MC = CGM.getCXXABI().getMangleContext(); + + assert(CGM.getModuleNameHash().empty() && "NYI"); + auto ShouldMangle = MC.shouldMangleDeclName(ND); + + if (ShouldMangle) { + MC.mangleName(GD.getWithDecl(ND), Out); + } else { + auto *II = ND->getIdentifier(); + assert(II && "Attempt to mangle unnamed decl."); + + const auto *FD = dyn_cast(ND); + + if (FD && + FD->getType()->castAs()->getCallConv() == CC_X86RegCall) { + assert(0 && "NYI"); + } else if (FD && FD->hasAttr() && + GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { + assert(0 && "NYI"); + } else { + Out << II->getName(); + } + } + + // Check if the module name hash should be appended for internal linkage + // symbols. This should come before multi-version target suffixes are + // appendded. This is to keep the name and module hash suffix of the internal + // linkage function together. The unique suffix should only be added when name + // mangling is done to make sure that the final name can be properly + // demangled. For example, for C functions without prototypes, name mangling + // is not done and the unique suffix should not be appended then. + assert(!isUniqueInternalLinkageDecl(GD, CGM) && "NYI"); + + if (const auto *FD = dyn_cast(ND)) { + assert(!FD->isMultiVersion() && "NYI"); + } + assert(!CGM.getLangOpts().GPURelocatableDeviceCode && "NYI"); + + return std::string(Out.str()); +} + +StringRef CIRGenModule::getMangledName(GlobalDecl GD) { + auto CanonicalGD = GD.getCanonicalDecl(); + + // Some ABIs don't have constructor variants. Make sure that base and complete + // constructors get mangled the same. + if (const auto *CD = dyn_cast(CanonicalGD.getDecl())) { + if (!getTarget().getCXXABI().hasConstructorVariants()) { + assert(false && "NYI"); + } + } + + assert(!langOpts.CUDAIsDevice && "NYI"); + + // Keep the first result in the case of a mangling collision. + const auto *ND = cast(GD.getDecl()); + std::string MangledName = getMangledNameImpl(*this, GD, ND); + + auto Result = Manglings.insert(std::make_pair(MangledName, GD)); + return MangledDeclNames[CanonicalGD] = Result.first->first(); +} + +void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { + assert(!D->getInit() && "Cannot emit definite definitions here!"); + + StringRef MangledName = getMangledName(D); + auto *GV = getGlobalValue(MangledName); + + // TODO(cir): can a tentative definition come from something other than a + // global op? If not, the assertion below is wrong and should be removed. If + // so, getGlobalValue might be better of returining a global value interface + // that alows use to manage different globals value types transparently. + if (GV) + assert(isa(GV) && + "tentative definition can only be built from a cir.global_op"); + + // We already have a definition, not declaration, with the same mangled name. + // Emitting of declaration is not required (and actually overwrites emitted + // definition). + if (GV && !dyn_cast(GV).isDeclaration()) + return; + + // If we have not seen a reference to this variable yet, place it into the + // deferred declarations table to be emitted if needed later. + if (!MustBeEmitted(D) && !GV) { + DeferredDecls[MangledName] = D; + return; + } + + // The tentative definition is the only definition. + buildGlobalVarDefinition(D); +} + +void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, + const NamedDecl *D) const { + assert(!UnimplementedFeature::setGlobalVisibility()); +} + +void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { + assert(!UnimplementedFeature::setDSOLocal()); +} + +void CIRGenModule::setGVProperties(mlir::Operation *Op, + const NamedDecl *D) const { + assert(!UnimplementedFeature::setDLLImportDLLExport()); + setGVPropertiesAux(Op, D); +} + +void CIRGenModule::setGVPropertiesAux(mlir::Operation *Op, + const NamedDecl *D) const { + setGlobalVisibility(Op, D); + setDSOLocal(Op); + assert(!UnimplementedFeature::setPartition()); +} + +bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, + GlobalDecl &Result) const { + auto Res = Manglings.find(MangledName); + if (Res == Manglings.end()) + return false; + Result = Res->getValue(); + return true; +} + +mlir::cir::FuncOp +CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, + mlir::cir::FuncType Ty, + const clang::FunctionDecl *FD) { + // At the point we need to create the function, the insertion point + // could be anywhere (e.g. callsite). Do not rely on whatever it might + // be, properly save, find the appropriate place and restore. + FuncOp f; + { + mlir::OpBuilder::InsertionGuard guard(builder); + + // Some global emissions are triggered while emitting a function, e.g. + // void s() { x.method() } + // + // Be sure to insert a new function before a current one. + auto *curCGF = getCurrCIRGenFun(); + if (curCGF) + builder.setInsertionPoint(curCGF->CurFn); + + f = builder.create(loc, name, Ty); + + if (FD) + f.setAstAttr(makeFuncDeclAttr(FD, builder.getContext())); + + if (FD && !FD->hasPrototype()) + f.setNoProtoAttr(builder.getUnitAttr()); + + assert(f.isDeclaration() && "expected empty body"); + + // A declaration gets private visibility by default, but external linkage + // as the default linkage. + f.setLinkageAttr(mlir::cir::GlobalLinkageKindAttr::get( + builder.getContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); + mlir::SymbolTable::setSymbolVisibility( + f, mlir::SymbolTable::Visibility::Private); + + setExtraAttributesForFunc(f, FD); + + if (!curCGF) + theModule.push_back(f); + } + return f; +} + +mlir::cir::FuncOp CIRGenModule::createRuntimeFunction( + mlir::cir::FuncType Ty, StringRef Name, mlir::ArrayAttr, + [[maybe_unused]] bool Local, bool AssumeConvergent) { + if (AssumeConvergent) { + llvm_unreachable("NYI"); + } + + auto entry = GetOrCreateCIRFunction(Name, Ty, GlobalDecl(), + /*ForVtable=*/false); + + // Traditional codegen checks for a valid dyn_cast llvm::Function for `entry`, + // no testcase that cover this path just yet though. + if (!entry) { + // Setup runtime CC, DLL support for windows and set dso local. + llvm_unreachable("NYI"); + } + + return entry; +} + +bool isDefaultedMethod(const clang::FunctionDecl *FD) { + if (FD->isDefaulted() && isa(FD) && + (cast(FD)->isCopyAssignmentOperator() || + cast(FD)->isMoveAssignmentOperator())) + return true; + return false; +} + +mlir::Location CIRGenModule::getLocForFunction(const clang::FunctionDecl *FD) { + bool invalidLoc = !FD || (FD->getSourceRange().getBegin().isInvalid() || + FD->getSourceRange().getEnd().isInvalid()); + if (!invalidLoc) + return getLoc(FD->getSourceRange()); + + // Use the module location + return theModule->getLoc(); +} + +/// Determines whether the language options require us to model +/// unwind exceptions. We treat -fexceptions as mandating this +/// except under the fragile ObjC ABI with only ObjC exceptions +/// enabled. This means, for example, that C with -fexceptions +/// enables this. +/// TODO(cir): can be shared with traditional LLVM codegen. +static bool hasUnwindExceptions(const LangOptions &LangOpts) { + // If exceptions are completely disabled, obviously this is false. + if (!LangOpts.Exceptions) + return false; + + // If C++ exceptions are enabled, this is true. + if (LangOpts.CXXExceptions) + return true; + + // If ObjC exceptions are enabled, this depends on the ABI. + if (LangOpts.ObjCExceptions) { + return LangOpts.ObjCRuntime.hasUnwindExceptions(); + } + + return true; +} + +void CIRGenModule::setExtraAttributesForFunc(FuncOp f, + const clang::FunctionDecl *FD) { + mlir::NamedAttrList attrs; + + if (!hasUnwindExceptions(getLangOpts())) { + auto attr = mlir::cir::NoThrowAttr::get(builder.getContext()); + attrs.set(attr.getMnemonic(), attr); + } + + if (!FD) { + // If we don't have a declaration to control inlining, the function isn't + // explicitly marked as alwaysinline for semantic reasons, and inlining is + // disabled, mark the function as noinline. + if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + auto attr = mlir::cir::InlineAttr::get( + builder.getContext(), mlir::cir::InlineKind::AlwaysInline); + attrs.set(attr.getMnemonic(), attr); + } + } else if (FD->hasAttr()) { + // Add noinline if the function isn't always_inline. + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); + } else if (FD->hasAttr()) { + // (noinline wins over always_inline, and we can't specify both in IR) + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::AlwaysInline); + attrs.set(attr.getMnemonic(), attr); + } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + // If we're not inlining, then force everything that isn't always_inline + // to carry an explicit noinline attribute. + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); + } else { + // Otherwise, propagate the inline hint attribute and potentially use its + // absence to mark things as noinline. + // Search function and template pattern redeclarations for inline. + auto CheckForInline = [](const FunctionDecl *FD) { + auto CheckRedeclForInline = [](const FunctionDecl *Redecl) { + return Redecl->isInlineSpecified(); + }; + if (any_of(FD->redecls(), CheckRedeclForInline)) + return true; + const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(); + if (!Pattern) + return false; + return any_of(Pattern->redecls(), CheckRedeclForInline); + }; + if (CheckForInline(FD)) { + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::InlineHint); + attrs.set(attr.getMnemonic(), attr); + } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyHintInlining) { + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); + } + } + + // Track whether we need to add the optnone attribute, + // starting with the default for this optimization level. + bool ShouldAddOptNone = + !codeGenOpts.DisableO0ImplyOptNone && codeGenOpts.OptimizationLevel == 0; + if (FD) { + ShouldAddOptNone &= !FD->hasAttr(); + ShouldAddOptNone &= !FD->hasAttr(); + ShouldAddOptNone |= FD->hasAttr(); + } + + if (ShouldAddOptNone) { + auto optNoneAttr = mlir::cir::OptNoneAttr::get(builder.getContext()); + attrs.set(optNoneAttr.getMnemonic(), optNoneAttr); + + // OptimizeNone implies noinline; we should not be inlining such functions. + auto noInlineAttr = mlir::cir::InlineAttr::get( + builder.getContext(), mlir::cir::InlineKind::NoInline); + attrs.set(noInlineAttr.getMnemonic(), noInlineAttr); + } + + f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), attrs.getDictionary(builder.getContext()))); +} + +void CIRGenModule::setFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp F, + bool IsIncompleteFunction, + bool IsThunk) { + assert(!UnimplementedFeature::setFunctionAttributes()); +} + +/// If the specified mangled name is not in the module, +/// create and return a CIR Function with the specified type. If there is +/// something in the module with the specified name, return it potentially +/// bitcasted to the right type. +/// +/// If D is non-null, it specifies a decl that corresponded to this. This is +/// used to set the attributes on the function when it is first created. +mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( + StringRef MangledName, mlir::Type Ty, GlobalDecl GD, bool ForVTable, + bool DontDefer, bool IsThunk, ForDefinition_t IsForDefinition, + mlir::ArrayAttr ExtraAttrs) { + assert(!IsThunk && "NYI"); + + const auto *D = GD.getDecl(); + + // Any attempts to use a MultiVersion function should result in retrieving the + // iFunc instead. Name mangling will handle the rest of the changes. + if (const auto *FD = cast_or_null(D)) { + // For the device mark the function as one that should be emitted. + if (getLangOpts().OpenMPIsTargetDevice && FD->isDefined() && !DontDefer && + !IsForDefinition) { + assert(0 && "OpenMP target functions NYI"); + } + if (FD->isMultiVersion()) + llvm_unreachable("NYI"); + } + + // Lookup the entry, lazily creating it if necessary. + mlir::Operation *Entry = getGlobalValue(MangledName); + if (Entry) { + assert(isa(Entry) && + "not implemented, only supports FuncOp for now"); + + if (WeakRefReferences.erase(Entry)) { + llvm_unreachable("NYI"); + } + + // Handle dropped DLL attributes. + if (D && !D->hasAttr() && !D->hasAttr()) { + // TODO(CIR): Entry->setDLLStorageClass + setDSOLocal(Entry); + } + + // If there are two attempts to define the same mangled name, issue an + // error. + auto Fn = cast(Entry); + if (IsForDefinition && Fn && !Fn.isDeclaration()) { + GlobalDecl OtherGD; + // CHeck that GD is not yet in DiagnosedConflictingDefinitions is required + // to make sure that we issue and error only once. + if (lookupRepresentativeDecl(MangledName, OtherGD) && + (GD.getCanonicalDecl().getDecl()) && + DiagnosedConflictingDefinitions.insert(GD).second) { + getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name) + << MangledName; + getDiags().Report(OtherGD.getDecl()->getLocation(), + diag::note_previous_definition); + } + } + + if (Fn && Fn.getFunctionType() == Ty) { + return Fn; + } + + if (!IsForDefinition) { + return Fn; + } + + // TODO: clang checks here if this is a llvm::GlobalAlias... how will we + // support this? + } + + // This function doesn't have a complete type (for example, the return type is + // an incomplete struct). Use a fake type instead, and make sure not to try to + // set attributes. + bool IsIncompleteFunction = false; + + mlir::cir::FuncType FTy; + if (Ty.isa()) { + FTy = Ty.cast(); + } else { + assert(false && "NYI"); + // FTy = mlir::FunctionType::get(VoidTy, false); + IsIncompleteFunction = true; + } + + auto *FD = llvm::cast_or_null(D); + + // TODO: CodeGen includeds the linkage (ExternalLinkage) and only passes the + // mangledname if Entry is nullptr + auto F = createCIRFunction(getLocForFunction(FD), MangledName, FTy, FD); + + // If we already created a function with the same mangled name (but different + // type) before, take its name and add it to the list of functions to be + // replaced with F at the end of CodeGen. + // + // This happens if there is a prototype for a function (e.g. "int f()") and + // then a definition of a different type (e.g. "int f(int x)"). + if (Entry) { + + // Fetch a generic symbol-defining operation and its uses. + auto SymbolOp = dyn_cast(Entry); + assert(SymbolOp && "Expected a symbol-defining operation"); + + // TODO(cir): When can this symbol be something other than a function? + assert(isa(Entry) && "NYI"); + + // This might be an implementation of a function without a prototype, in + // which case, try to do special replacement of calls which match the new + // prototype. The really key thing here is that we also potentially drop + // arguments from the call site so as to make a direct call, which makes the + // inliner happier and suppresses a number of optimizer warnings (!) about + // dropping arguments. + if (SymbolOp.getSymbolUses(SymbolOp->getParentOp())) { + ReplaceUsesOfNonProtoTypeWithRealFunction(Entry, F); + } + + // Obliterate no-proto declaration. + Entry->erase(); + } + + if (D) + setFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk); + if (ExtraAttrs) { + llvm_unreachable("NYI"); + } + + if (!DontDefer) { + // All MSVC dtors other than the base dtor are linkonce_odr and delegate to + // each other bottoming out wiht the base dtor. Therefore we emit non-base + // dtors on usage, even if there is no dtor definition in the TU. + if (isa_and_nonnull(D) && + getCXXABI().useThunkForDtorVariant(cast(D), + GD.getDtorType())) { + llvm_unreachable("NYI"); // addDeferredDeclToEmit(GD); + } + + // This is the first use or definition of a mangled name. If there is a + // deferred decl with this name, remember that we need to emit it at the end + // of the file. + auto DDI = DeferredDecls.find(MangledName); + if (DDI != DeferredDecls.end()) { + // Move the potentially referenced deferred decl to the + // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we + // don't need it anymore). + addDeferredDeclToEmit(DDI->second); + DeferredDecls.erase(DDI); + + // Otherwise, there are cases we have to worry about where we're using a + // declaration for which we must emit a definition but where we might not + // find a top-level definition. + // - member functions defined inline in their classes + // - friend functions defined inline in some class + // - special member functions with implicit definitions + // If we ever change our AST traversal to walk into class methods, this + // will be unnecessary. + // + // We also don't emit a definition for a function if it's going to be an + // entry in a vtable, unless it's already marked as used. + } else if (getLangOpts().CPlusPlus && D) { + // Look for a declaration that's lexically in a record. + for (const auto *FD = cast(D)->getMostRecentDecl(); FD; + FD = FD->getPreviousDecl()) { + if (isa(FD->getLexicalDeclContext())) { + if (FD->doesThisDeclarationHaveABody()) { + if (isDefaultedMethod(FD)) + addDefaultMethodsToEmit(GD.getWithDecl(FD)); + else + addDeferredDeclToEmit(GD.getWithDecl(FD)); + break; + } + } + } + } + } + + if (!IsIncompleteFunction) { + assert(F.getFunctionType() == Ty); + return F; + } + + // TODO(cir): Might need bitcast to different address space. + assert(!UnimplementedFeature::addressSpace()); + return F; +} + +mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { + assert(SLoc.isValid() && "expected valid source location"); + const SourceManager &SM = astCtx.getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(SLoc); + StringRef Filename = PLoc.getFilename(); + return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), + PLoc.getLine(), PLoc.getColumn()); +} + +mlir::Location CIRGenModule::getLoc(SourceRange SLoc) { + assert(SLoc.isValid() && "expected valid source location"); + mlir::Location B = getLoc(SLoc.getBegin()); + mlir::Location E = getLoc(SLoc.getEnd()); + SmallVector locs = {B, E}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); +} + +mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { + SmallVector locs = {lhs, rhs}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); +} + +void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { + // We should call GetAddrOfGlobal with IsForDefinition set to true in order + // to get a Value with exactly the type we need, not something that might + // have been created for another decl with the same mangled name but + // different type. + auto *Op = GetAddrOfGlobal(D, ForDefinition); + + // In case of different address spaces, we may still get a cast, even with + // IsForDefinition equal to true. Query mangled names table to get + // GlobalValue. + if (!Op) { + Op = getGlobalValue(getMangledName(D)); + } + + // In case of different address spaces, we may still get a cast, even with + // IsForDefinition equal to true. Query mangled names table to get + // GlobalValue. + if (!Op) + llvm_unreachable("Address spaces NYI"); + + // Make sure getGlobalValue returned non-null. + assert(Op); + + // Check to see if we've already emitted this. This is necessary for a + // couple of reasons: first, decls can end up in deferred-decls queue + // multiple times, and second, decls can end up with definitions in unusual + // ways (e.g. by an extern inline function acquiring a strong function + // redefinition). Just ignore those cases. + // TODO: Not sure what to map this to for MLIR + if (auto Fn = dyn_cast(Op)) + if (!Fn.isDeclaration()) + return; + + // TODO(cir): create a global value trait that allow us to uniformly handle + // global variables and functions. + if (auto Gv = dyn_cast(Op)) { + auto *result = + mlir::SymbolTable::lookupSymbolIn(getModule(), Gv.getNameAttr()); + if (auto globalOp = dyn_cast(result)) + if (!globalOp.isDeclaration()) + return; + } + + // If this is OpenMP, check if it is legal to emit this global normally. + if (getLangOpts().OpenMP && openMPRuntime && + openMPRuntime->emitTargetGlobal(D)) + return; + + // Otherwise, emit the definition and move on to the next one. + buildGlobalDefinition(D, Op); +} + +void CIRGenModule::buildDeferred(unsigned recursionLimit) { + // Emit deferred declare target declarations + if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) + getOpenMPRuntime().emitDeferredTargetDecls(); + + // Emit code for any potentially referenced deferred decls. Since a previously + // unused static decl may become used during the generation of code for a + // static function, iterate until no changes are made. + + if (!DeferredVTables.empty()) { + buildDeferredVTables(); + + // Emitting a vtable doesn't directly cause more vtables to + // become deferred, although it can cause functions to be + // emitted that then need those vtables. + assert(DeferredVTables.empty()); + } + + // Emit CUDA/HIP static device variables referenced by host code only. Note we + // should not clear CUDADeviceVarODRUsedByHost since it is still needed for + // further handling. + if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { + llvm_unreachable("NYI"); + } + + // Stop if we're out of both deferred vtables and deferred declarations. + if (DeferredDeclsToEmit.empty()) + return; + + // Grab the list of decls to emit. If buildGlobalDefinition schedules more + // work, it will not interfere with this. + std::vector CurDeclsToEmit; + CurDeclsToEmit.swap(DeferredDeclsToEmit); + if (recursionLimit == 0) + return; + recursionLimit--; + + for (auto &D : CurDeclsToEmit) { + if (getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders) { + auto *decl = D.getDecl(); + assert(decl && "expected decl"); + if (astCtx.getSourceManager().isInSystemHeader(decl->getLocation())) + continue; + } + + buildGlobalDecl(D); + + // If we found out that we need to emit more decls, do that recursively. + // This has the advantage that the decls are emitted in a DFS and related + // ones are close together, which is convenient for testing. + if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { + buildDeferred(recursionLimit); + assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); + } + } +} + +void CIRGenModule::buildDefaultMethods() { + // Differently from DeferredDeclsToEmit, there's no recurrent use of + // DefaultMethodsToEmit, so use it directly for emission. + for (auto &D : DefaultMethodsToEmit) + buildGlobalDecl(D); +} + +mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { + return builder.getSizeFromCharUnits(builder.getContext(), size); +} + +mlir::Operation * +CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { + const Decl *D = GD.getDecl(); + + if (isa(D) || isa(D)) + return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr, + /*DontDefer=*/false, IsForDefinition); + + if (isa(D)) { + auto FInfo = + &getTypes().arrangeCXXMethodDeclaration(cast(D)); + auto Ty = getTypes().GetFunctionType(*FInfo); + return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, + IsForDefinition); + } + + if (isa(D)) { + const CIRGenFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); + auto Ty = getTypes().GetFunctionType(FI); + return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, + IsForDefinition); + } + + return getAddrOfGlobalVar(cast(D), /*Ty=*/nullptr, IsForDefinition) + .getDefiningOp(); +} + +void CIRGenModule::Release() { + buildDeferred(getCodeGenOpts().ClangIRBuildDeferredThreshold); + // TODO: buildVTablesOpportunistically(); + // TODO: applyGlobalValReplacements(); + applyReplacements(); + // TODO: checkAliases(); + // TODO: buildMultiVersionFunctions(); + buildCXXGlobalInitFunc(); + // TODO: buildCXXGlobalCleanUpFunc(); + // TODO: registerGlobalDtorsWithAtExit(); + // TODO: buildCXXThreadLocalInitFunc(); + // TODO: ObjCRuntime + if (astCtx.getLangOpts().CUDA) { + llvm_unreachable("NYI"); + } + // TODO: OpenMPRuntime + // TODO: PGOReader + // TODO: buildCtorList(GlobalCtors); + // TODO: builtCtorList(GlobalDtors); + // TODO: buildGlobalAnnotations(); + // TODO: buildDeferredUnusedCoverageMappings(); + // TODO: CIRGenPGO + // TODO: CoverageMapping + if (getCodeGenOpts().SanitizeCfiCrossDso) { + llvm_unreachable("NYI"); + } + // TODO: buildAtAvailableLinkGuard(); + if (astCtx.getTargetInfo().getTriple().isWasm() && + !astCtx.getTargetInfo().getTriple().isOSEmscripten()) { + llvm_unreachable("NYI"); + } + + // Emit reference of __amdgpu_device_library_preserve_asan_functions to + // preserve ASAN functions in bitcode libraries. + if (getLangOpts().Sanitize.has(SanitizerKind::Address)) { + llvm_unreachable("NYI"); + } + + // TODO: buildLLVMUsed(); + // TODO: SanStats + + if (getCodeGenOpts().Autolink) { + // TODO: buildModuleLinkOptions + } + + // TODO: FINISH THE REST OF THIS +} + +bool CIRGenModule::shouldEmitFunction(GlobalDecl GD) { + // TODO: implement this -- requires defining linkage for CIR + return true; +} + +bool CIRGenModule::supportsCOMDAT() const { + return getTriple().supportsCOMDAT(); +} + +void CIRGenModule::maybeSetTrivialComdat(const Decl &D, mlir::Operation *Op) { + if (!shouldBeInCOMDAT(*this, D)) + return; + + // TODO: Op.setComdat + assert(!UnimplementedFeature::setComdat() && "NYI"); +} + +bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::cir::FuncOp Fn, + SourceLocation Loc) const { + const auto &NoSanitizeL = getASTContext().getNoSanitizeList(); + // NoSanitize by function name. + if (NoSanitizeL.containsFunction(Kind, Fn.getName())) + llvm_unreachable("NYI"); + // NoSanitize by location. + if (Loc.isValid()) + return NoSanitizeL.containsLocation(Kind, Loc); + // If location is unknown, this may be a compiler-generated function. Assume + // it's located in the main file. + auto &SM = getASTContext().getSourceManager(); + FileEntryRef MainFile = *SM.getFileEntryRefForID(SM.getMainFileID()); + if (NoSanitizeL.containsFile(Kind, MainFile.getName())) + return true; + + // Check "src" prefix. + if (Loc.isValid()) + return NoSanitizeL.containsLocation(Kind, Loc); + // If location is unknown, this may be a compiler-generated function. Assume + // it's located in the main file. + return NoSanitizeL.containsFile(Kind, MainFile.getName()); +} + +void CIRGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { + // Do we need to generate coverage mapping? + if (!codeGenOpts.CoverageMapping) + return; + + llvm_unreachable("NYI"); +} + +void CIRGenModule::UpdateCompletedType(const TagDecl *TD) { + // Make sure that this type is translated. + genTypes.UpdateCompletedType(TD); +} + +void CIRGenModule::addReplacement(StringRef Name, mlir::Operation *Op) { + Replacements[Name] = Op; +} + +void CIRGenModule::applyReplacements() { + for (auto &I : Replacements) { + StringRef MangledName = I.first(); + mlir::Operation *Replacement = I.second; + auto *Entry = getGlobalValue(MangledName); + if (!Entry) + continue; + assert(isa(Entry) && "expected function"); + auto OldF = cast(Entry); + auto NewF = dyn_cast(Replacement); + assert(NewF && "not implemented"); + + // Replace old with new, but keep the old order. + if (OldF.replaceAllSymbolUses(NewF.getSymNameAttr(), theModule).failed()) + llvm_unreachable("internal error, cannot RAUW symbol"); + if (NewF) { + NewF->moveBefore(OldF); + OldF->erase(); + } + } +} + +void CIRGenModule::buildExplicitCastExprType(const ExplicitCastExpr *E, + CIRGenFunction *CGF) { + // Bind VLAs in the cast type. + if (CGF && E->getType()->isVariablyModifiedType()) + llvm_unreachable("NYI"); + + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); +} + +void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { + auto DK = VD->isThisDeclarationADefinition(); + if (DK == VarDecl::Definition && VD->hasAttr()) + return; + + TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind(); + // If we have a definition, this might be a deferred decl. If the + // instantiation is explicit, make sure we emit it at the end. + if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition) { + llvm_unreachable("NYI"); + } + + buildTopLevelDecl(VD); +} + +mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( + mlir::Location loc, StringRef Name, mlir::Type Ty, + mlir::cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment) { + mlir::cir::GlobalOp OldGV{}; + auto GV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(getModule(), Name)); + + if (GV) { + // Check if the variable has the right type. + if (GV.getSymType() == Ty) + return GV; + + // Because C++ name mangling, the only way we can end up with an already + // existing global with the same name is if it has been declared extern + // "C". + assert(GV.isDeclaration() && "Declaration has wrong type!"); + OldGV = GV; + } + + // Create a new variable. + GV = CIRGenModule::createGlobalOp(*this, loc, Name, Ty); + + // Set up extra information and add to the module + GV.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), Linkage)); + mlir::SymbolTable::setSymbolVisibility(GV, + CIRGenModule::getMLIRVisibility(GV)); + + if (OldGV) { + // Replace occurrences of the old variable if needed. + GV.setName(OldGV.getName()); + if (!OldGV->use_empty()) { + llvm_unreachable("NYI"); + } + OldGV->erase(); + } + + assert(!UnimplementedFeature::setComdat()); + if (supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage) && + !GV.hasAvailableExternallyLinkage()) + assert(!UnimplementedFeature::setComdat()); + + GV.setAlignmentAttr(getSize(Alignment)); + return GV; +} + +bool CIRGenModule::shouldOpportunisticallyEmitVTables() { + if (codeGenOpts.OptimizationLevel != 0) + llvm_unreachable("NYI"); + return codeGenOpts.OptimizationLevel > 0; +} + +void CIRGenModule::buildVTableTypeMetadata(const CXXRecordDecl *RD, + mlir::cir::GlobalOp VTable, + const VTableLayout &VTLayout) { + if (!getCodeGenOpts().LTOUnit) + return; + llvm_unreachable("NYI"); +} + +mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, + QualType Ty, bool ForEH) { + // Return a bogus pointer if RTTI is disabled, unless it's for EH. + // FIXME: should we even be calling this method if RTTI is disabled + // and it's not for EH? + if (!shouldEmitRTTI(ForEH)) + return getBuilder().getConstNullPtrAttr(builder.getUInt8PtrTy()); + + if (ForEH && Ty->isObjCObjectPointerType() && + getLangOpts().ObjCRuntime.isGNUFamily()) { + llvm_unreachable("NYI"); + } + + return getCXXABI().getAddrOfRTTIDescriptor(loc, Ty); +} + +/// TODO(cir): once we have cir.module, add this as a convenience method there. +/// +/// Look up the specified global in the module symbol table. +/// 1. If it does not exist, add a declaration of the global and return it. +/// 2. Else, the global exists but has the wrong type: return the function +/// with a constantexpr cast to the right type. +/// 3. Finally, if the existing global is the correct declaration, return the +/// existing global. +mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal( + mlir::Location loc, StringRef Name, mlir::Type Ty, + llvm::function_ref CreateGlobalCallback) { + // See if we have a definition for the specified global already. + auto GV = dyn_cast_or_null(getGlobalValue(Name)); + if (!GV) { + GV = CreateGlobalCallback(); + } + assert(GV && "The CreateGlobalCallback is expected to create a global"); + + // If the variable exists but has the wrong type, return a bitcast to the + // right type. + auto GVTy = GV.getSymType(); + assert(!UnimplementedFeature::addressSpace()); + auto PTy = builder.getPointerTo(Ty); + + if (GVTy != PTy) + llvm_unreachable("NYI"); + + // Otherwise, we just found the existing function or a prototype. + return GV; +} + +// Overload to construct a global variable using its constructor's defaults. +mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal(mlir::Location loc, + StringRef Name, + mlir::Type Ty) { + return getOrInsertGlobal(loc, Name, Ty, [&] { + return CIRGenModule::createGlobalOp(*this, loc, Name, + builder.getPointerTo(Ty)); + }); +} + +// TODO(cir): this can be shared with LLVM codegen. +CharUnits CIRGenModule::computeNonVirtualBaseClassOffset( + const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, + CastExpr::path_const_iterator End) { + CharUnits Offset = CharUnits::Zero(); + + const ASTContext &Context = getASTContext(); + const CXXRecordDecl *RD = DerivedClass; + + for (CastExpr::path_const_iterator I = Start; I != End; ++I) { + const CXXBaseSpecifier *Base = *I; + assert(!Base->isVirtual() && "Should not see virtual bases here!"); + + // Get the layout. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + const auto *BaseDecl = + cast(Base->getType()->castAs()->getDecl()); + + // Add the offset. + Offset += Layout.getBaseClassOffset(BaseDecl); + + RD = BaseDecl; + } + + return Offset; +} + +void CIRGenModule::Error(SourceLocation loc, StringRef message) { + unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0"); + getDiags().Report(astCtx.getFullLoc(loc), diagID) << message; +} + +/// Print out an error that codegen doesn't support the specified stmt yet. +void CIRGenModule::ErrorUnsupported(const Stmt *S, const char *Type) { + unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, + "cannot compile this %0 yet"); + std::string Msg = Type; + getDiags().Report(astCtx.getFullLoc(S->getBeginLoc()), DiagID) + << Msg << S->getSourceRange(); +} + +/// Print out an error that codegen doesn't support the specified decl yet. +void CIRGenModule::ErrorUnsupported(const Decl *D, const char *Type) { + unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, + "cannot compile this %0 yet"); + std::string Msg = Type; + getDiags().Report(astCtx.getFullLoc(D->getLocation()), DiagID) << Msg; +} + +mlir::cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { + using ClangStd = clang::LangStandard; + using CIRLang = mlir::cir::SourceLanguage; + auto opts = getLangOpts(); + + if (opts.CPlusPlus || opts.CPlusPlus11 || opts.CPlusPlus14 || + opts.CPlusPlus17 || opts.CPlusPlus20 || opts.CPlusPlus23 || + opts.CPlusPlus26) + return CIRLang::CXX; + if (opts.C99 || opts.C11 || opts.C17 || opts.C23 || + opts.LangStd == ClangStd::lang_c89 || + opts.LangStd == ClangStd::lang_gnu89) + return CIRLang::C; + + // TODO(cir): support remaining source languages. + llvm_unreachable("CIR does not yet support the given source language"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h new file mode 100644 index 000000000000..946d3d2e5a7b --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -0,0 +1,698 @@ +//===--- CIRGenModule.h - Per-Module state for CIR gen ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the internal per-translation-unit state used for CIR translation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H +#define LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H + +#include "CIRDataLayout.h" +#include "CIRGenBuilder.h" +#include "CIRGenCall.h" +#include "CIRGenTypeCache.h" +#include "CIRGenTypes.h" +#include "CIRGenVTables.h" +#include "CIRGenValue.h" +#include "UnimplementedFeatureGuarding.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "llvm/ADT/ScopedHashTable.h" +#include "llvm/ADT/SmallPtrSet.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/Value.h" + +using namespace clang; +namespace cir { + +class CIRGenFunction; +class CIRGenCXXABI; +class TargetCIRGenInfo; +class CIRGenOpenMPRuntime; + +enum ForDefinition_t : bool { NotForDefinition = false, ForDefinition = true }; + +/// Implementation of a CIR/MLIR emission from Clang AST. +/// +/// This will emit operations that are specific to C(++)/ObjC(++) language, +/// preserving the semantics of the language and (hopefully) allow to perform +/// accurate analysis and transformation based on these high level semantics. +class CIRGenModule : public CIRGenTypeCache { + CIRGenModule(CIRGenModule &) = delete; + CIRGenModule &operator=(CIRGenModule &) = delete; + +public: + CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, + const clang::CodeGenOptions &CGO, + clang::DiagnosticsEngine &Diags); + + ~CIRGenModule(); + + const std::string &getModuleNameHash() const { return ModuleNameHash; } + +private: + mutable std::unique_ptr TheTargetCIRGenInfo; + + /// The builder is a helper class to create IR inside a function. The + /// builder is stateful, in particular it keeps an "insertion point": this + /// is where the next operations will be introduced. + CIRGenBuilderTy builder; + + /// Hold Clang AST information. + clang::ASTContext &astCtx; + + const clang::LangOptions &langOpts; + + const clang::CodeGenOptions &codeGenOpts; + + /// A "module" matches a c/cpp source file: containing a list of functions. + mlir::ModuleOp theModule; + + clang::DiagnosticsEngine &Diags; + + const clang::TargetInfo ⌖ + + std::unique_ptr ABI; + + /// Used for `UniqueInternalLinkageNames` option + std::string ModuleNameHash = ""; + + /// Per-module type mapping from clang AST to CIR. + CIRGenTypes genTypes; + + /// Holds information about C++ vtables. + CIRGenVTables VTables; + + /// Holds the OpenMP runtime + std::unique_ptr openMPRuntime; + + /// Per-function codegen information. Updated everytime buildCIR is called + /// for FunctionDecls's. + CIRGenFunction *CurCGF = nullptr; + + // A set of references that have only been set via a weakref so far. This is + // used to remove the weak of the reference if we ever see a direct reference + // or a definition. + llvm::SmallPtrSet WeakRefReferences; + + /// ------- + /// Declaring variables + /// ------- + + /// Set of global decls for which we already diagnosed mangled name conflict. + /// Required to not issue a warning (on a mangling conflict) multiple times + /// for the same decl. + llvm::DenseSet DiagnosedConflictingDefinitions; + +public: + mlir::ModuleOp getModule() const { return theModule; } + CIRGenBuilderTy &getBuilder() { return builder; } + clang::ASTContext &getASTContext() const { return astCtx; } + const clang::TargetInfo &getTarget() const { return target; } + const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; } + clang::DiagnosticsEngine &getDiags() const { return Diags; } + CIRGenTypes &getTypes() { return genTypes; } + const clang::LangOptions &getLangOpts() const { return langOpts; } + CIRGenFunction *getCurrCIRGenFun() const { return CurCGF; } + const CIRDataLayout getDataLayout() const { + // FIXME(cir): instead of creating a CIRDataLayout every time, set it as an + // attribute for the CIRModule class. + return {theModule}; + } + + CIRGenCXXABI &getCXXABI() const { return *ABI; } + + /// ------- + /// Handling globals + /// ------- + + // TODO(cir): does this really need to be a state for CIR emission? + GlobalDecl initializedGlobalDecl; + + /// Global variables with initializers that need to run before main. + /// TODO(cir): for now track a generation operation, this is so far only + /// used to sync with DelayedCXXInitPosition. Improve it when we actually + /// use function calls for initialization + std::vector CXXGlobalInits; + + /// Emit the function that initializes C++ globals. + void buildCXXGlobalInitFunc(); + + /// When a C++ decl with an initializer is deferred, null is + /// appended to CXXGlobalInits, and the index of that null is placed + /// here so that the initializer will be performed in the correct + /// order. Once the decl is emitted, the index is replaced with ~0U to ensure + /// that we don't re-emit the initializer. + llvm::DenseMap DelayedCXXInitPosition; + + /// Keep track of a map between lambda fields and names, this needs to be per + /// module since lambdas might get generated later as part of defered work, + /// and since the pointers are supposed to be uniqued, should be fine. Revisit + /// this if it ends up taking too much memory. + llvm::DenseMap LambdaFieldToName; + + /// If the declaration has internal linkage but is inside an + /// extern "C" linkage specification, prepare to emit an alias for it + /// to the expected name. + template + void maybeHandleStaticInExternC(const SomeDecl *D, mlir::cir::GlobalOp GV); + + /// Tell the consumer that this variable has been instantiated. + void HandleCXXStaticMemberVarInstantiation(VarDecl *VD); + + llvm::DenseMap StaticLocalDeclMap; + llvm::DenseMap Globals; + mlir::Operation *getGlobalValue(StringRef Ref); + mlir::Value getGlobalValue(const clang::Decl *D); + + /// If the specified mangled name is not in the module, create and return an + /// mlir::GlobalOp value + mlir::cir::GlobalOp + getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, LangAS AddrSpace, + const VarDecl *D, + ForDefinition_t IsForDefinition = NotForDefinition); + + mlir::cir::GlobalOp getStaticLocalDeclAddress(const VarDecl *D) { + return StaticLocalDeclMap[D]; + } + + void setStaticLocalDeclAddress(const VarDecl *D, mlir::cir::GlobalOp C) { + StaticLocalDeclMap[D] = C; + } + + mlir::cir::GlobalOp + getOrCreateStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalLinkageKind Linkage); + + mlir::cir::GlobalOp buildGlobal(const VarDecl *D, mlir::Type Ty, + ForDefinition_t IsForDefinition); + + /// TODO(cir): once we have cir.module, add this as a convenience method + /// there instead of here. + /// + /// Look up the specified global in the module symbol table. + /// 1. If it does not exist, add a declaration of the global and return it. + /// 2. Else, the global exists but has the wrong type: return the function + /// with a constantexpr cast to the right type. + /// 3. Finally, if the existing global is the correct declaration, return + /// the existing global. + mlir::cir::GlobalOp getOrInsertGlobal( + mlir::Location loc, StringRef Name, mlir::Type Ty, + llvm::function_ref CreateGlobalCallback); + + // Overload to construct a global variable using its constructor's defaults. + mlir::cir::GlobalOp getOrInsertGlobal(mlir::Location loc, StringRef Name, + mlir::Type Ty); + + static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, + mlir::Location loc, StringRef name, + mlir::Type t, bool isCst = false, + mlir::Operation *insertPoint = nullptr); + + // FIXME: Hardcoding priority here is gross. + void AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority = 65535); + void AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority = 65535, + bool IsDtorAttrFunc = false); + + /// Return the mlir::Value for the address of the given global variable. + /// If Ty is non-null and if the global doesn't exist, then it will be created + /// with the specified type instead of whatever the normal requested type + /// would be. If IsForDefinition is true, it is guaranteed that an actual + /// global with type Ty will be returned, not conversion of a variable with + /// the same mangled name but some other type. + mlir::Value + getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty = {}, + ForDefinition_t IsForDefinition = NotForDefinition); + + /// Return the mlir::GlobalViewAttr for the address of the given global. + mlir::cir::GlobalViewAttr + getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty = {}, + ForDefinition_t IsForDefinition = NotForDefinition); + + /// Get a reference to the target of VD. + mlir::Operation *getWeakRefReference(const ValueDecl *VD); + + CharUnits + computeNonVirtualBaseClassOffset(const CXXRecordDecl *DerivedClass, + CastExpr::path_const_iterator Start, + CastExpr::path_const_iterator End); + + /// Get the CIR attributes and calling convention to use for a particular + /// function type. + /// + /// \param Name - The function name. + /// \param Info - The function type information. + /// \param CalleeInfo - The callee information these attributes are being + /// constructed for. If valid, the attributes applied to this decl may + /// contribute to the function attributes and calling convention. + /// \param Attrs [out] - On return, the attribute list to use. + void ConstructAttributeList(StringRef Name, const CIRGenFunctionInfo &Info, + CIRGenCalleeInfo CalleeInfo, + mlir::DictionaryAttr &Attrs, bool AttrOnCallSite, + bool IsThunk); + + /// Will return a global variable of the given type. If a variable with a + /// different type already exists then a new variable with the right type + /// will be created and all uses of the old variable will be replaced with a + /// bitcast to the new variable. + mlir::cir::GlobalOp createOrReplaceCXXRuntimeVariable( + mlir::Location loc, StringRef Name, mlir::Type Ty, + mlir::cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment); + + /// Emit any vtables which we deferred and still have a use for. + void buildDeferredVTables(); + bool shouldOpportunisticallyEmitVTables(); + + /// Return the appropriate linkage for the vtable, VTT, and type information + /// of the given class. + mlir::cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); + + /// Emit type metadata for the given vtable using the given layout. + void buildVTableTypeMetadata(const CXXRecordDecl *RD, + mlir::cir::GlobalOp VTable, + const VTableLayout &VTLayout); + + /// Get the address of the RTTI descriptor for the given type. + mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty, + bool ForEH = false); + + /// TODO(cir): add CIR visibility bits. + static mlir::SymbolTable::Visibility getCIRVisibility(Visibility V) { + switch (V) { + case DefaultVisibility: + return mlir::SymbolTable::Visibility::Public; + case HiddenVisibility: + return mlir::SymbolTable::Visibility::Private; + case ProtectedVisibility: + llvm_unreachable("NYI"); + } + llvm_unreachable("unknown visibility!"); + } + + llvm::DenseMap ConstantStringMap; + + /// Return a constant array for the given string. + mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *E); + + /// Return a global symbol reference to a constant array for the given string + /// literal. + mlir::cir::GlobalViewAttr + getAddrOfConstantStringFromLiteral(const StringLiteral *S, + StringRef Name = ".str"); + unsigned StringLiteralCnt = 0; + + unsigned CompoundLitaralCnt = 0; + /// Return the unique name for global compound literal + std::string createGlobalCompoundLiteralName() { + return (Twine(".compoundLiteral.") + Twine(CompoundLitaralCnt++)).str(); + } + + /// Return the AST address space of constant literal, which is used to emit + /// the constant literal as global variable in LLVM IR. + /// Note: This is not necessarily the address space of the constant literal + /// in AST. For address space agnostic language, e.g. C++, constant literal + /// in AST is always in default address space. + LangAS getGlobalConstantAddressSpace() const; + + /// Set attributes which are common to any form of a global definition (alias, + /// Objective-C method, function, global variable). + /// + /// NOTE: This should only be called for definitions. + void setCommonAttributes(GlobalDecl GD, mlir::Operation *GV); + + // TODO: this obviously overlaps with + const TargetCIRGenInfo &getTargetCIRGenInfo(); + + /// Helpers to convert Clang's SourceLocation to a MLIR Location. + mlir::Location getLoc(clang::SourceLocation SLoc); + mlir::Location getLoc(clang::SourceRange SLoc); + mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); + + /// Helper to convert Clang's alignment to CIR alignment + mlir::IntegerAttr getSize(CharUnits size); + + /// Returns whether the given record has public LTO visibility (regardless of + /// -lto-whole-program-visibility) and therefore may not participate in + /// (single-module) CFI and whole-program vtable optimization. + bool AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD); + + /// Returns whether the given record has hidden LTO visibility and therefore + /// may participate in (single-module) CFI and whole-program vtable + /// optimization. + bool HasHiddenLTOVisibility(const CXXRecordDecl *RD); + + /// Determine whether an object of this type can be emitted + /// as a constant. + /// + /// If ExcludeCtor is true, the duration when the object's constructor runs + /// will not be considered. The caller will need to verify that the object is + /// not written to during its construction. + /// FIXME: in LLVM codegen path this is part of CGM, which doesn't seem + /// like necessary, since (1) it doesn't use CGM at all and (2) is AST type + /// query specific. + bool isTypeConstant(clang::QualType Ty, bool ExcludeCtor, bool ExcludeDtor); + + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// Return the best known alignment for an unknown pointer to a + /// particular class. + clang::CharUnits getClassPointerAlignment(const clang::CXXRecordDecl *RD); + + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// TODO: Add TBAAAccessInfo + clang::CharUnits getNaturalPointeeTypeAlignment(clang::QualType T, + LValueBaseInfo *BaseInfo); + + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// TODO: Add TBAAAccessInfo + clang::CharUnits getNaturalTypeAlignment(clang::QualType T, + LValueBaseInfo *BaseInfo = nullptr, + bool forPointeeType = false); + + /// TODO: Add TBAAAccessInfo + clang::CharUnits + getDynamicOffsetAlignment(clang::CharUnits actualBaseAlign, + const clang::CXXRecordDecl *baseDecl, + clang::CharUnits expectedTargetAlign); + + mlir::cir::FuncOp getAddrOfCXXStructor( + clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, + mlir::cir::FuncType FnType = nullptr, bool DontDefer = false, + ForDefinition_t IsForDefinition = NotForDefinition) { + + return getAddrAndTypeOfCXXStructor(GD, FnInfo, FnType, DontDefer, + IsForDefinition) + .second; + } + + /// A queue of (optional) vtables to consider emitting. + std::vector DeferredVTables; + + mlir::Type getVTableComponentType(); + CIRGenVTables &getVTables() { return VTables; } + + ItaniumVTableContext &getItaniumVTableContext() { + return VTables.getItaniumVTableContext(); + } + const ItaniumVTableContext &getItaniumVTableContext() const { + return VTables.getItaniumVTableContext(); + } + + /// This contains all the decls which have definitions but which are deferred + /// for emission and therefore should only be output if they are actually + /// used. If a decl is in this, then it is known to have not been referenced + /// yet. + std::map DeferredDecls; + + // This is a list of deferred decls which we have seen that *are* actually + // referenced. These get code generated when the module is done. + std::vector DeferredDeclsToEmit; + void addDeferredDeclToEmit(clang::GlobalDecl GD) { + DeferredDeclsToEmit.emplace_back(GD); + } + + // After HandleTranslation finishes, differently from DeferredDeclsToEmit, + // DefaultMethodsToEmit is only called after a set of CIR passes run. See + // addDefaultMethodsToEmit usage for examples. + std::vector DefaultMethodsToEmit; + void addDefaultMethodsToEmit(clang::GlobalDecl GD) { + DefaultMethodsToEmit.emplace_back(GD); + } + + std::pair getAddrAndTypeOfCXXStructor( + clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, + mlir::cir::FuncType FnType = nullptr, bool Dontdefer = false, + ForDefinition_t IsForDefinition = NotForDefinition); + + void buildTopLevelDecl(clang::Decl *decl); + void buildLinkageSpec(const LinkageSpecDecl *D); + + /// Emit code for a single global function or var decl. Forward declarations + /// are emitted lazily. + void buildGlobal(clang::GlobalDecl D); + + bool tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D); + + void buildAliasForGlobal(StringRef mangledName, mlir::Operation *op, + GlobalDecl aliasGD, mlir::cir::FuncOp aliasee, + mlir::cir::GlobalLinkageKind linkage); + + mlir::Type getCIRType(const clang::QualType &type); + + /// Set the visibility for the given global. + void setGlobalVisibility(mlir::Operation *Op, const NamedDecl *D) const; + void setDSOLocal(mlir::Operation *Op) const; + /// Set visibility, dllimport/dllexport and dso_local. + /// This must be called after dllimport/dllexport is set. + void setGVProperties(mlir::Operation *Op, const NamedDecl *D) const; + void setGVPropertiesAux(mlir::Operation *Op, const NamedDecl *D) const; + + /// Set the TLS mode for the given global Op for the thread-local + /// variable declaration D. + void setTLSMode(mlir::Operation *Op, const VarDecl &D) const; + + /// Get TLS mode from CodeGenOptions. + mlir::cir::TLS_Model GetDefaultCIRTLSModel() const; + + /// Replace the present global `Old` with the given global `New`. Their symbol + /// names must match; their types can be different. Usages of the old global + /// will be automatically updated if their types mismatch. + /// + /// This function will erase the old global. This function will NOT insert the + /// new global into the module. + void replaceGlobal(mlir::cir::GlobalOp Old, mlir::cir::GlobalOp New); + + /// Determine whether the definition must be emitted; if this returns \c + /// false, the definition can be emitted lazily if it's used. + bool MustBeEmitted(const clang::ValueDecl *D); + + /// Whether this function's return type has no side effects, and thus may be + /// trivially discared if it is unused. + bool MayDropFunctionReturn(const clang::ASTContext &Context, + clang::QualType ReturnType); + + bool isInNoSanitizeList(clang::SanitizerMask Kind, mlir::cir::FuncOp Fn, + clang::SourceLocation) const; + + /// Determine whether the definition can be emitted eagerly, or should be + /// delayed until the end of the translation unit. This is relevant for + /// definitions whose linkage can change, e.g. implicit function instantions + /// which may later be explicitly instantiated. + bool MayBeEmittedEagerly(const clang::ValueDecl *D); + + bool verifyModule(); + + /// Return the address of the given function. If Ty is non-null, then this + /// function will use the specified type if it has to create it. + // TODO: this is a bit weird as `GetAddr` given we give back a FuncOp? + mlir::cir::FuncOp + GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty = nullptr, + bool ForVTable = false, bool Dontdefer = false, + ForDefinition_t IsForDefinition = NotForDefinition); + + mlir::Operation * + GetAddrOfGlobal(clang::GlobalDecl GD, + ForDefinition_t IsForDefinition = NotForDefinition); + + // Return whether RTTI information should be emitted for this target. + bool shouldEmitRTTI(bool ForEH = false) { + return (ForEH || getLangOpts().RTTI) && !getLangOpts().CUDAIsDevice && + !(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && + getTriple().isNVPTX()); + } + + // C++ related functions. + void buildDeclContext(const DeclContext *DC); + + /// Return the result of value-initializing the given type, i.e. a null + /// expression of the given type. This is usually, but not always, an LLVM + /// null constant. + mlir::Value buildNullConstant(QualType T, mlir::Location loc); + + mlir::Value buildMemberPointerConstant(const UnaryOperator *E); + + llvm::StringRef getMangledName(clang::GlobalDecl GD); + + void buildTentativeDefinition(const VarDecl *D); + + // Make sure that this type is translated. + void UpdateCompletedType(const clang::TagDecl *TD); + + /// Set function attributes for a function declaration. + void setFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp F, + bool IsIncompleteFunction, bool IsThunk); + + void buildGlobalDefinition(clang::GlobalDecl D, + mlir::Operation *Op = nullptr); + void buildGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); + void buildGlobalVarDefinition(const clang::VarDecl *D, + bool IsTentative = false); + + /// Emit the function that initializes the specified global + void buildGlobalVarDeclInit(const VarDecl *D, mlir::cir::GlobalOp Addr, + bool PerformInit); + + void addDeferredVTable(const CXXRecordDecl *RD) { + DeferredVTables.push_back(RD); + } + + /// Stored a deferred empty coverage mapping for an unused and thus + /// uninstrumented top level declaration. + void AddDeferredUnusedCoverageMapping(clang::Decl *D); + + std::nullptr_t getModuleDebugInfo() { return nullptr; } + + /// Emit any needed decls for which code generation was deferred. + void buildDeferred(unsigned recursionLimit); + + /// Helper for `buildDeferred` to apply actual codegen. + void buildGlobalDecl(clang::GlobalDecl &D); + + /// Build default methods not emitted before this point. + void buildDefaultMethods(); + + const llvm::Triple &getTriple() const { return target.getTriple(); } + + // Finalize CIR code generation. + void Release(); + + bool shouldEmitFunction(clang::GlobalDecl GD); + + // Produce code for this constructor/destructor. This method doesn't try to + // apply any ABI rules about which other constructors/destructors are needed + // or if they are alias to each other. + mlir::cir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); + + // Produce code for this constructor/destructor for global initialzation. + void codegenGlobalInitCxxStructor(const clang::VarDecl *D, + mlir::cir::GlobalOp Addr, bool NeedsCtor, + bool NeedsDtor); + + bool lookupRepresentativeDecl(llvm::StringRef MangledName, + clang::GlobalDecl &Result) const; + + bool supportsCOMDAT() const; + void maybeSetTrivialComdat(const clang::Decl &D, mlir::Operation *Op); + + void emitError(const llvm::Twine &message) { theModule.emitError(message); } + + /// ------- + /// Visibility and Linkage + /// ------- + + static void setInitializer(mlir::cir::GlobalOp &op, mlir::Attribute value); + static mlir::SymbolTable::Visibility + getMLIRVisibilityFromCIRLinkage(mlir::cir::GlobalLinkageKind GLK); + static mlir::SymbolTable::Visibility + getMLIRVisibility(mlir::cir::GlobalOp op); + mlir::cir::GlobalLinkageKind getFunctionLinkage(GlobalDecl GD); + mlir::cir::GlobalLinkageKind + getCIRLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage, + bool IsConstantVariable); + void setFunctionLinkage(GlobalDecl GD, mlir::cir::FuncOp f) { + auto L = getFunctionLinkage(GD); + f.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), L)); + mlir::SymbolTable::setSymbolVisibility(f, + getMLIRVisibilityFromCIRLinkage(L)); + } + + mlir::cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *VD, + bool IsConstant); + + void addReplacement(StringRef Name, mlir::Operation *Op); + + mlir::Location getLocForFunction(const clang::FunctionDecl *FD); + + void ReplaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *Old, + mlir::cir::FuncOp NewFn); + + void setExtraAttributesForFunc(mlir::cir::FuncOp f, + const clang::FunctionDecl *FD); + + // TODO: CodeGen also passes an AttributeList here. We'll have to match that + // in CIR + mlir::cir::FuncOp + GetOrCreateCIRFunction(llvm::StringRef MangledName, mlir::Type Ty, + clang::GlobalDecl D, bool ForVTable, + bool DontDefer = false, bool IsThunk = false, + ForDefinition_t IsForDefinition = NotForDefinition, + mlir::ArrayAttr ExtraAttrs = {}); + // Effectively create the CIR instruction, properly handling insertion + // points. + mlir::cir::FuncOp createCIRFunction(mlir::Location loc, StringRef name, + mlir::cir::FuncType Ty, + const clang::FunctionDecl *FD); + + mlir::cir::FuncOp createRuntimeFunction(mlir::cir::FuncType Ty, + StringRef Name, mlir::ArrayAttr = {}, + bool Local = false, + bool AssumeConvergent = false); + + /// Emit type info if type of an expression is a variably modified + /// type. Also emit proper debug info for cast types. + void buildExplicitCastExprType(const ExplicitCastExpr *E, + CIRGenFunction *CGF = nullptr); + + static constexpr const char *builtinCoroId = "__builtin_coro_id"; + static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc"; + static constexpr const char *builtinCoroBegin = "__builtin_coro_begin"; + static constexpr const char *builtinCoroEnd = "__builtin_coro_end"; + + /// Given a builtin id for a function like "__builtin_fabsf", return a + /// Function* for "fabsf". + mlir::cir::FuncOp getBuiltinLibFunction(const FunctionDecl *FD, + unsigned BuiltinID); + + /// Emit a general error that something can't be done. + void Error(SourceLocation loc, StringRef error); + + /// Print out an error that codegen doesn't support the specified stmt yet. + void ErrorUnsupported(const Stmt *S, const char *Type); + + /// Print out an error that codegen doesn't support the specified decl yet. + void ErrorUnsupported(const Decl *D, const char *Type); + + /// Return a reference to the configured OpenMP runtime. + CIRGenOpenMPRuntime &getOpenMPRuntime() { + assert(openMPRuntime != nullptr); + return *openMPRuntime; + } + +private: + // An ordered map of canonical GlobalDecls to their mangled names. + llvm::MapVector MangledDeclNames; + llvm::StringMap Manglings; + + // FIXME: should we use llvm::TrackingVH here? + typedef llvm::StringMap ReplacementsTy; + ReplacementsTy Replacements; + /// Call replaceAllUsesWith on all pairs in Replacements. + void applyReplacements(); + + /// Map source language used to a CIR attribute. + mlir::cir::SourceLanguage getCIRSourceLanguage(); +}; +} // namespace cir + +#endif // LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp new file mode 100644 index 000000000000..2060ce8e2d31 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp @@ -0,0 +1,54 @@ +//===--- CIRGenStmtOpenMP.cpp - Interface to OpenMP Runtimes --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides a class for OpenMP runtime MLIR code generation. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenOpenMPRuntime.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +using namespace cir; +using namespace clang; + +CIRGenOpenMPRuntime::CIRGenOpenMPRuntime(CIRGenModule &CGM) : CGM(CGM) {} + +Address CIRGenOpenMPRuntime::getAddressOfLocalVariable(CIRGenFunction &CGF, + const VarDecl *VD) { + assert(!UnimplementedFeature::openMPRuntime()); + return Address::invalid(); +} + +void CIRGenOpenMPRuntime::checkAndEmitLastprivateConditional( + CIRGenFunction &CGF, const Expr *LHS) { + assert(!UnimplementedFeature::openMPRuntime()); + return; +} + +void CIRGenOpenMPRuntime::registerTargetGlobalVariable( + const clang::VarDecl *VD, mlir::cir::GlobalOp globalOp) { + assert(!UnimplementedFeature::openMPRuntime()); + return; +} + +void CIRGenOpenMPRuntime::emitDeferredTargetDecls() const { + assert(!UnimplementedFeature::openMPRuntime()); + return; +} + +void CIRGenOpenMPRuntime::emitFunctionProlog(CIRGenFunction &CGF, + const clang::Decl *D) { + assert(!UnimplementedFeature::openMPRuntime()); + return; +} + +bool CIRGenOpenMPRuntime::emitTargetGlobal(clang::GlobalDecl &GD) { + assert(!UnimplementedFeature::openMPRuntime()); + return false; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h new file mode 100644 index 000000000000..c4a53db44c92 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h @@ -0,0 +1,77 @@ +//===--- CIRGenOpenMPRuntime.h - Interface to OpenMP Runtimes -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides a class for OpenMP runtime MLIR code generation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H + +#include "CIRGenValue.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +namespace clang { +class Decl; +class Expr; +class GlobalDecl; +class VarDecl; +} // namespace clang + +namespace cir { +class CIRGenModule; +class CIRGenFunction; + +class CIRGenOpenMPRuntime { +public: + explicit CIRGenOpenMPRuntime(CIRGenModule &CGM); + virtual ~CIRGenOpenMPRuntime() {} + + /// Gets the OpenMP-specific address of the local variable. + virtual Address getAddressOfLocalVariable(CIRGenFunction &CGF, + const clang::VarDecl *VD); + + /// Checks if the provided \p LVal is lastprivate conditional and emits the + /// code to update the value of the original variable. + /// \code + /// lastprivate(conditional: a) + /// ... + /// a; + /// lp_a = ...; + /// #pragma omp critical(a) + /// if (last_iv_a <= iv) { + /// last_iv_a = iv; + /// global_a = lp_a; + /// } + /// \endcode + virtual void checkAndEmitLastprivateConditional(CIRGenFunction &CGF, + const clang::Expr *LHS); + + /// Checks if the provided global decl \a GD is a declare target variable and + /// registers it when emitting code for the host. + virtual void registerTargetGlobalVariable(const clang::VarDecl *VD, + mlir::cir::GlobalOp globalOp); + + /// Emit deferred declare target variables marked for deferred emission. + void emitDeferredTargetDecls() const; + + /// Emits OpenMP-specific function prolog. + /// Required for device constructs. + virtual void emitFunctionProlog(CIRGenFunction &CGF, const clang::Decl *D); + + /// Emit the global \a GD if it is meaningful for the target. Returns + /// if it was emitted successfully. + /// \param GD Global to scan. + virtual bool emitTargetGlobal(clang::GlobalDecl &D); + +protected: + CIRGenModule &CGM; +}; +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h new file mode 100644 index 000000000000..16a8a1e2894e --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -0,0 +1,210 @@ +//===--- CIRGenRecordLayout.h - CIR Record Layout Information ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENRECORDLAYOUT_H +#define LLVM_CLANG_LIB_CIR_CIRGENRECORDLAYOUT_H + +#include "clang/AST/Decl.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "llvm/Support/raw_ostream.h" + +namespace cir { + +/// Structure with information about how a bitfield should be accessed. This is +/// very similar to what LLVM codegen does, once CIR evolves it's possible we +/// can use a more higher level representation. +/// TODO(cir): the comment below is extracted from LLVM, build a CIR version of +/// this. +/// +/// Often we layout a sequence of bitfields as a contiguous sequence of bits. +/// When the AST record layout does this, we represent it in the LLVM IR's type +/// as either a sequence of i8 members or a byte array to reserve the number of +/// bytes touched without forcing any particular alignment beyond the basic +/// character alignment. +/// +/// Then accessing a particular bitfield involves converting this byte array +/// into a single integer of that size (i24 or i40 -- may not be power-of-two +/// size), loading it, and shifting and masking to extract the particular +/// subsequence of bits which make up that particular bitfield. This structure +/// encodes the information used to construct the extraction code sequences. +/// The CIRGenRecordLayout also has a field index which encodes which +/// byte-sequence this bitfield falls within. Let's assume the following C +/// struct: +/// +/// struct S { +/// char a, b, c; +/// unsigned bits : 3; +/// unsigned more_bits : 4; +/// unsigned still_more_bits : 7; +/// }; +/// +/// This will end up as the following LLVM type. The first array is the +/// bitfield, and the second is the padding out to a 4-byte alignment. +/// +/// %t = type { i8, i8, i8, i8, i8, [3 x i8] } +/// +/// When generating code to access more_bits, we'll generate something +/// essentially like this: +/// +/// define i32 @foo(%t* %base) { +/// %0 = gep %t* %base, i32 0, i32 3 +/// %2 = load i8* %1 +/// %3 = lshr i8 %2, 3 +/// %4 = and i8 %3, 15 +/// %5 = zext i8 %4 to i32 +/// ret i32 %i +/// } +/// +struct CIRGenBitFieldInfo { + /// The offset within a contiguous run of bitfields that are represented as + /// a single "field" within the LLVM struct type. This offset is in bits. + unsigned Offset : 16; + + /// The total size of the bit-field, in bits. + unsigned Size : 15; + + /// Whether the bit-field is signed. + unsigned IsSigned : 1; + + /// The storage size in bits which should be used when accessing this + /// bitfield. + unsigned StorageSize; + + /// The offset of the bitfield storage from the start of the struct. + clang::CharUnits StorageOffset; + + /// The offset within a contiguous run of bitfields that are represented as a + /// single "field" within the LLVM struct type, taking into account the AAPCS + /// rules for volatile bitfields. This offset is in bits. + unsigned VolatileOffset : 16; + + /// The storage size in bits which should be used when accessing this + /// bitfield. + unsigned VolatileStorageSize; + + /// The offset of the bitfield storage from the start of the struct. + clang::CharUnits VolatileStorageOffset; + + /// The name of a bitfield + llvm::StringRef Name; + + // The actual storage type for the bitfield + mlir::Type StorageType; + + CIRGenBitFieldInfo() + : Offset(), Size(), IsSigned(), StorageSize(), VolatileOffset(), + VolatileStorageSize() {} + + CIRGenBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned, + unsigned StorageSize, clang::CharUnits StorageOffset) + : Offset(Offset), Size(Size), IsSigned(IsSigned), + StorageSize(StorageSize), StorageOffset(StorageOffset) {} + + void print(llvm::raw_ostream &OS) const; + void dump() const; + + /// Given a bit-field decl, build an appropriate helper object for + /// accessing that field (which is expected to have the given offset and + /// size). + static CIRGenBitFieldInfo MakeInfo(class CIRGenTypes &Types, + const clang::FieldDecl *FD, + uint64_t Offset, uint64_t Size, + uint64_t StorageSize, + clang::CharUnits StorageOffset); +}; + +/// This class handles struct and union layout info while lowering AST types +/// to CIR types. +/// +/// These layout objects are only created on demand as CIR generation requires. +class CIRGenRecordLayout { + friend class CIRGenTypes; + + CIRGenRecordLayout(const CIRGenRecordLayout &) = delete; + void operator=(const CIRGenRecordLayout &) = delete; + +private: + /// The CIR type corresponding to this record layout; used when laying it out + /// as a complete object. + mlir::cir::StructType CompleteObjectType; + + /// The CIR type for the non-virtual part of this record layout; used when + /// laying it out as a base subobject. + mlir::cir::StructType BaseSubobjectType; + + /// Map from (non-bit-field) struct field to the corresponding cir struct type + /// field no. This info is populated by the record builder. + llvm::DenseMap FieldInfo; + + /// Map from (bit-field) struct field to the corresponding CIR struct type + /// field no. This info is populated by record builder. + /// TODO(CIR): value is an int for now, fix when we support bitfields + llvm::DenseMap BitFields; + + // FIXME: Maybe we could use CXXBaseSpecifier as the key and use a single map + // for both virtual and non-virtual bases. + llvm::DenseMap NonVirtualBases; + + /// Map from virtual bases to their field index in the complete object. + llvm::DenseMap + CompleteObjectVirtualBases; + + /// False if any direct or indirect subobject of this class, when considered + /// as a complete object, requires a non-zero bitpattern when + /// zero-initialized. + bool IsZeroInitializable : 1; + + /// False if any direct or indirect subobject of this class, when considered + /// as a base subobject, requires a non-zero bitpattern when zero-initialized. + bool IsZeroInitializableAsBase : 1; + +public: + CIRGenRecordLayout(mlir::cir::StructType CompleteObjectType, + mlir::cir::StructType BaseSubobjectType, + bool IsZeroInitializable, bool IsZeroInitializableAsBase) + : CompleteObjectType(CompleteObjectType), + BaseSubobjectType(BaseSubobjectType), + IsZeroInitializable(IsZeroInitializable), + IsZeroInitializableAsBase(IsZeroInitializableAsBase) {} + + /// Return the "complete object" LLVM type associated with + /// this record. + mlir::cir::StructType getCIRType() const { return CompleteObjectType; } + + /// Return the "base subobject" LLVM type associated with + /// this record. + mlir::cir::StructType getBaseSubobjectCIRType() const { + return BaseSubobjectType; + } + + /// Return cir::StructType element number that corresponds to the field FD. + unsigned getCIRFieldNo(const clang::FieldDecl *FD) const { + FD = FD->getCanonicalDecl(); + assert(FieldInfo.count(FD) && "Invalid field for record!"); + return FieldInfo.lookup(FD); + } + + /// Check whether this struct can be C++ zero-initialized with a + /// zeroinitializer. + bool isZeroInitializable() const { return IsZeroInitializable; } + + /// Return the BitFieldInfo that corresponds to the field FD. + const CIRGenBitFieldInfo &getBitFieldInfo(const clang::FieldDecl *FD) const { + FD = FD->getCanonicalDecl(); + assert(FD->isBitField() && "Invalid call for non-bit-field decl!"); + llvm::DenseMap::const_iterator + it = BitFields.find(FD); + assert(it != BitFields.end() && "Unable to find bitfield info"); + return it->second; + } +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp new file mode 100644 index 000000000000..981804892ebb --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -0,0 +1,1096 @@ +//===--- CIRGenStmt.cpp - Emit CIR Code from Statements -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Stmt nodes as CIR code. +// +//===----------------------------------------------------------------------===// + +#include "Address.h" +#include "CIRGenBuilder.h" +#include "CIRGenFunction.h" +#include "mlir/IR/Value.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/Stmt.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, + bool getLast, + AggValueSlot slot) { + const Stmt *ExprResult = S.getStmtExprResult(); + assert((!getLast || (getLast && ExprResult)) && + "If getLast is true then the CompoundStmt must have a StmtExprResult"); + + Address retAlloca = Address::invalid(); + + for (auto *CurStmt : S.body()) { + if (getLast && ExprResult == CurStmt) { + while (!isa(ExprResult)) { + if (const auto *LS = dyn_cast(ExprResult)) + llvm_unreachable("labels are NYI"); + else if (const auto *AS = dyn_cast(ExprResult)) + llvm_unreachable("statement attributes are NYI"); + else + llvm_unreachable("Unknown value statement"); + } + + const Expr *E = cast(ExprResult); + QualType exprTy = E->getType(); + if (hasAggregateEvaluationKind(exprTy)) { + buildAggExpr(E, slot); + } else { + // We can't return an RValue here because there might be cleanups at + // the end of the StmtExpr. Because of that, we have to emit the result + // here into a temporary alloca. + retAlloca = CreateMemTemp(exprTy, getLoc(E->getSourceRange())); + buildAnyExprToMem(E, retAlloca, Qualifiers(), + /*IsInit*/ false); + } + } else { + if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) + llvm_unreachable("failed to build statement"); + } + } + + return retAlloca; +} + +Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, + AggValueSlot slot) { + Address retAlloca = Address::invalid(); + + // Add local scope to track new declared variables. + SymTableScopeTy varScope(symbolTable); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; + retAlloca = buildCompoundStmtWithoutScope(S, getLast, slot); + }); + + return retAlloca; +} + +void CIRGenFunction::buildStopPoint(const Stmt *S) { + assert(!UnimplementedFeature::generateDebugInfo()); +} + +// Build CIR for a statement. useCurrentScope should be true if no +// new scopes need be created when finding a compound statement. +mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, + bool useCurrentScope, + ArrayRef Attrs) { + if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) + return mlir::success(); + + if (getContext().getLangOpts().OpenMP && + getContext().getLangOpts().OpenMPSimd) + assert(0 && "not implemented"); + + switch (S->getStmtClass()) { + case Stmt::OMPScopeDirectiveClass: + llvm_unreachable("NYI"); + case Stmt::OpenACCComputeConstructClass: + case Stmt::OMPErrorDirectiveClass: + case Stmt::NoStmtClass: + case Stmt::CXXCatchStmtClass: + case Stmt::SEHExceptStmtClass: + case Stmt::SEHFinallyStmtClass: + case Stmt::MSDependentExistsStmtClass: + llvm_unreachable("invalid statement class to emit generically"); + case Stmt::NullStmtClass: + case Stmt::CompoundStmtClass: + case Stmt::DeclStmtClass: + case Stmt::LabelStmtClass: + case Stmt::AttributedStmtClass: + case Stmt::GotoStmtClass: + case Stmt::BreakStmtClass: + case Stmt::ContinueStmtClass: + case Stmt::DefaultStmtClass: + case Stmt::CaseStmtClass: + case Stmt::SEHLeaveStmtClass: + llvm_unreachable("should have emitted these statements as simple"); + +#define STMT(Type, Base) +#define ABSTRACT_STMT(Op) +#define EXPR(Type, Base) case Stmt::Type##Class: +#include "clang/AST/StmtNodes.inc" + { + // Remember the block we came in on. + mlir::Block *incoming = builder.getInsertionBlock(); + assert(incoming && "expression emission must have an insertion point"); + + buildIgnoredExpr(cast(S)); + + mlir::Block *outgoing = builder.getInsertionBlock(); + assert(outgoing && "expression emission cleared block!"); + + break; + } + + case Stmt::IfStmtClass: + if (buildIfStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::SwitchStmtClass: + if (buildSwitchStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::ForStmtClass: + if (buildForStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::WhileStmtClass: + if (buildWhileStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::DoStmtClass: + if (buildDoStmt(cast(*S)).failed()) + return mlir::failure(); + break; + + case Stmt::CoroutineBodyStmtClass: + return buildCoroutineBody(cast(*S)); + case Stmt::CoreturnStmtClass: + return buildCoreturnStmt(cast(*S)); + + case Stmt::CXXTryStmtClass: + return buildCXXTryStmt(cast(*S)); + + case Stmt::CXXForRangeStmtClass: + return buildCXXForRangeStmt(cast(*S), Attrs); + + case Stmt::IndirectGotoStmtClass: + case Stmt::ReturnStmtClass: + // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. + case Stmt::GCCAsmStmtClass: + case Stmt::MSAsmStmtClass: + return buildAsmStmt(cast(*S)); + // OMP directives: + case Stmt::OMPParallelDirectiveClass: + return buildOMPParallelDirective(cast(*S)); + // Unsupported AST nodes: + case Stmt::CapturedStmtClass: + case Stmt::ObjCAtTryStmtClass: + case Stmt::ObjCAtThrowStmtClass: + case Stmt::ObjCAtSynchronizedStmtClass: + case Stmt::ObjCForCollectionStmtClass: + case Stmt::ObjCAutoreleasePoolStmtClass: + case Stmt::SEHTryStmtClass: + case Stmt::OMPMetaDirectiveClass: + case Stmt::OMPCanonicalLoopClass: + case Stmt::OMPSimdDirectiveClass: + case Stmt::OMPTileDirectiveClass: + case Stmt::OMPUnrollDirectiveClass: + case Stmt::OMPForDirectiveClass: + case Stmt::OMPForSimdDirectiveClass: + case Stmt::OMPSectionsDirectiveClass: + case Stmt::OMPSectionDirectiveClass: + case Stmt::OMPSingleDirectiveClass: + case Stmt::OMPMasterDirectiveClass: + case Stmt::OMPCriticalDirectiveClass: + case Stmt::OMPParallelForDirectiveClass: + case Stmt::OMPParallelForSimdDirectiveClass: + case Stmt::OMPParallelMasterDirectiveClass: + case Stmt::OMPParallelSectionsDirectiveClass: + case Stmt::OMPTaskDirectiveClass: + case Stmt::OMPTaskyieldDirectiveClass: + case Stmt::OMPBarrierDirectiveClass: + case Stmt::OMPTaskwaitDirectiveClass: + case Stmt::OMPTaskgroupDirectiveClass: + case Stmt::OMPFlushDirectiveClass: + case Stmt::OMPDepobjDirectiveClass: + case Stmt::OMPScanDirectiveClass: + case Stmt::OMPOrderedDirectiveClass: + case Stmt::OMPAtomicDirectiveClass: + case Stmt::OMPTargetDirectiveClass: + case Stmt::OMPTeamsDirectiveClass: + case Stmt::OMPCancellationPointDirectiveClass: + case Stmt::OMPCancelDirectiveClass: + case Stmt::OMPTargetDataDirectiveClass: + case Stmt::OMPTargetEnterDataDirectiveClass: + case Stmt::OMPTargetExitDataDirectiveClass: + case Stmt::OMPTargetParallelDirectiveClass: + case Stmt::OMPTargetParallelForDirectiveClass: + case Stmt::OMPTaskLoopDirectiveClass: + case Stmt::OMPTaskLoopSimdDirectiveClass: + case Stmt::OMPMaskedTaskLoopDirectiveClass: + case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: + case Stmt::OMPMasterTaskLoopDirectiveClass: + case Stmt::OMPMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPParallelGenericLoopDirectiveClass: + case Stmt::OMPParallelMaskedDirectiveClass: + case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: + case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: + case Stmt::OMPParallelMasterTaskLoopDirectiveClass: + case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPDistributeDirectiveClass: + case Stmt::OMPDistributeParallelForDirectiveClass: + case Stmt::OMPDistributeParallelForSimdDirectiveClass: + case Stmt::OMPDistributeSimdDirectiveClass: + case Stmt::OMPTargetParallelGenericLoopDirectiveClass: + case Stmt::OMPTargetParallelForSimdDirectiveClass: + case Stmt::OMPTargetSimdDirectiveClass: + case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: + case Stmt::OMPTargetUpdateDirectiveClass: + case Stmt::OMPTeamsDistributeDirectiveClass: + case Stmt::OMPTeamsDistributeSimdDirectiveClass: + case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: + case Stmt::OMPTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTeamsGenericLoopDirectiveClass: + case Stmt::OMPTargetTeamsDirectiveClass: + case Stmt::OMPTargetTeamsDistributeDirectiveClass: + case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: + case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: + case Stmt::OMPInteropDirectiveClass: + case Stmt::OMPDispatchDirectiveClass: + case Stmt::OMPGenericLoopDirectiveClass: + case Stmt::OMPMaskedDirectiveClass: { + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + assert(0 && "not implemented"); + break; + } + case Stmt::ObjCAtCatchStmtClass: + llvm_unreachable( + "@catch statements should be handled by EmitObjCAtTryStmt"); + case Stmt::ObjCAtFinallyStmtClass: + llvm_unreachable( + "@finally statements should be handled by EmitObjCAtTryStmt"); + } + + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, + bool useCurrentScope) { + switch (S->getStmtClass()) { + default: + return mlir::failure(); + case Stmt::DeclStmtClass: + return buildDeclStmt(cast(*S)); + case Stmt::CompoundStmtClass: + useCurrentScope ? buildCompoundStmtWithoutScope(cast(*S)) + : buildCompoundStmt(cast(*S)); + break; + case Stmt::ReturnStmtClass: + return buildReturnStmt(cast(*S)); + case Stmt::GotoStmtClass: + return buildGotoStmt(cast(*S)); + case Stmt::ContinueStmtClass: + return buildContinueStmt(cast(*S)); + case Stmt::NullStmtClass: + break; + + case Stmt::LabelStmtClass: + return buildLabelStmt(cast(*S)); + + case Stmt::CaseStmtClass: + case Stmt::DefaultStmtClass: + assert(0 && + "Should not get here, currently handled directly from SwitchStmt"); + break; + + case Stmt::BreakStmtClass: + return buildBreakStmt(cast(*S)); + + case Stmt::AttributedStmtClass: + return buildAttributedStmt(cast(*S)); + + case Stmt::SEHLeaveStmtClass: + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + assert(0 && "not implemented"); + } + + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildLabelStmt(const clang::LabelStmt &S) { + if (buildLabel(S.getDecl()).failed()) + return mlir::failure(); + + // IsEHa: not implemented. + assert(!(getContext().getLangOpts().EHAsynch && S.isSideEntry())); + + return buildStmt(S.getSubStmt(), /* useCurrentScope */ true); +} + +mlir::LogicalResult +CIRGenFunction::buildAttributedStmt(const AttributedStmt &S) { + for (const auto *A : S.getAttrs()) { + switch (A->getKind()) { + case attr::NoMerge: + case attr::NoInline: + case attr::AlwaysInline: + case attr::MustTail: + llvm_unreachable("NIY attributes"); + default: + break; + } + } + + return buildStmt(S.getSubStmt(), true, S.getAttrs()); +} + +// Add terminating yield on body regions (loops, ...) in case there are +// not other terminators used. +// FIXME: make terminateCaseRegion use this too. +static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, + mlir::Location loc) { + if (r.empty()) + return; + + SmallVector eraseBlocks; + unsigned numBlocks = r.getBlocks().size(); + for (auto &block : r.getBlocks()) { + // Already cleanup after return operations, which might create + // empty blocks if emitted as last stmt. + if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && + block.hasNoSuccessors()) + eraseBlocks.push_back(&block); + + if (block.empty() || + !block.back().hasTrait()) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + builder.createYield(loc); + } + } + + for (auto *b : eraseBlocks) + b->erase(); +} + +mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { + mlir::LogicalResult res = mlir::success(); + // The else branch of a consteval if statement is always the only branch + // that can be runtime evaluated. + const Stmt *ConstevalExecuted; + if (S.isConsteval()) { + ConstevalExecuted = S.isNegatedConsteval() ? S.getThen() : S.getElse(); + if (!ConstevalExecuted) + // No runtime code execution required + return res; + } + + // C99 6.8.4.1: The first substatement is executed if the expression + // compares unequal to 0. The condition must be a scalar type. + auto ifStmtBuilder = [&]() -> mlir::LogicalResult { + if (S.isConsteval()) + return buildStmt(ConstevalExecuted, /*useCurrentScope=*/true); + + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + + // During LLVM codegen, if the condition constant folds and can be elided, + // it tries to avoid emitting the condition and the dead arm of the if/else. + // TODO(cir): we skip this in CIRGen, but should implement this as part of + // SSCP or a specific CIR pass. + bool CondConstant; + if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, + S.isConstexpr())) { + if (S.isConstexpr()) { + // Handle "if constexpr" explicitly here to avoid generating some + // ill-formed code since in CIR the "if" is no longer simplified + // in this lambda like in Clang but postponed to other MLIR + // passes. + if (const Stmt *Executed = CondConstant ? S.getThen() : S.getElse()) + return buildStmt(Executed, /*useCurrentScope=*/true); + // There is nothing to execute at runtime. + // TODO(cir): there is still an empty cir.scope generated by the caller. + return mlir::success(); + } + assert(!UnimplementedFeature::constantFoldsToSimpleInteger()); + } + + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + assert(!UnimplementedFeature::incrementProfileCounter()); + return buildIfOnBoolExpr(S.getCond(), S.getThen(), S.getElse()); + }; + + // TODO: Add a new scoped symbol table. + // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); + // The if scope contains the full source range for IfStmt. + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; + res = ifStmtBuilder(); + }); + + return res; +} + +mlir::LogicalResult CIRGenFunction::buildDeclStmt(const DeclStmt &S) { + if (!builder.getInsertionBlock()) { + CGM.emitError("Seems like this is unreachable code, what should we do?"); + return mlir::failure(); + } + + for (const auto *I : S.decls()) { + buildDecl(*I); + } + + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { + assert(!UnimplementedFeature::requiresReturnValueCheck()); + auto loc = getLoc(S.getSourceRange()); + + // Emit the result value, even if unused, to evaluate the side effects. + const Expr *RV = S.getRetValue(); + + // TODO(cir): LLVM codegen uses a RunCleanupsScope cleanupScope here, we + // should model this in face of dtors. + + bool createNewScope = false; + if (const auto *EWC = dyn_cast_or_null(RV)) { + RV = EWC->getSubExpr(); + createNewScope = true; + } + + auto handleReturnVal = [&]() { + if (getContext().getLangOpts().ElideConstructors && S.getNRVOCandidate() && + S.getNRVOCandidate()->isNRVOVariable()) { + assert(!UnimplementedFeature::openMP()); + // Apply the named return value optimization for this return statement, + // which means doing nothing: the appropriate result has already been + // constructed into the NRVO variable. + + // If there is an NRVO flag for this variable, set it to 1 into indicate + // that the cleanup code should not destroy the variable. + if (auto NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) + getBuilder().createFlagStore(loc, true, NRVOFlag); + } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { + // Make sure not to return anything, but evaluate the expression + // for side effects. + if (RV) { + assert(0 && "not implemented"); + } + } else if (!RV) { + // Do nothing (return value is left uninitialized) + } else if (FnRetTy->isReferenceType()) { + // If this function returns a reference, take the address of the + // expression rather than the value. + RValue Result = buildReferenceBindingToExpr(RV); + builder.createStore(loc, Result.getScalarVal(), ReturnValue); + } else { + mlir::Value V = nullptr; + switch (CIRGenFunction::getEvaluationKind(RV->getType())) { + case TEK_Scalar: + V = buildScalarExpr(RV); + builder.CIRBaseBuilderTy::createStore(loc, V, *FnRetAlloca); + break; + case TEK_Complex: + llvm_unreachable("NYI"); + break; + case TEK_Aggregate: + buildAggExpr( + RV, AggValueSlot::forAddr( + ReturnValue, Qualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, getOverlapForReturnValue())); + break; + } + } + }; + + if (!createNewScope) + handleReturnVal(); + else { + mlir::Location scopeLoc = + getLoc(RV ? RV->getSourceRange() : S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{*this, loc, + builder.getInsertionBlock()}; + handleReturnVal(); + }); + } + + // Create a new return block (if not existent) and add a branch to + // it. The actual return instruction is only inserted during current + // scope cleanup handling. + auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); + builder.create(loc, retBlock); + + // Insert the new block to continue codegen after branch to ret block. + builder.createBlock(builder.getBlock()->getParent()); + + // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { + // FIXME: LLVM codegen inserts emit stop point here for debug info + // sake when the insertion point is available, but doesn't do + // anything special when there isn't. We haven't implemented debug + // info support just yet, look at this again once we have it. + assert(builder.getInsertionBlock() && "not yet implemented"); + + // A goto marks the end of a block, create a new one for codegen after + // buildGotoStmt can resume building in that block. + + // Build a cir.br to the target label. + auto &JD = LabelMap[S.getLabel()]; + auto brOp = buildBranchThroughCleanup(getLoc(S.getSourceRange()), JD); + if (!JD.isValid()) + currLexScope->PendingGotos.push_back(std::make_pair(brOp, S.getLabel())); + + // Insert the new block to continue codegen after goto. + builder.createBlock(builder.getBlock()->getParent()); + + // What here... + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { + JumpDest &Dest = LabelMap[D]; + + // Create a new block to tag with a label and add a branch from + // the current one to it. If the block is empty just call attach it + // to this label. + mlir::Block *currBlock = builder.getBlock(); + mlir::Block *labelBlock = currBlock; + if (!currBlock->empty()) { + + { + mlir::OpBuilder::InsertionGuard guard(builder); + labelBlock = builder.createBlock(builder.getBlock()->getParent()); + } + + builder.create(getLoc(D->getSourceRange()), labelBlock); + builder.setInsertionPointToEnd(labelBlock); + } + + if (!Dest.isValid()) { + Dest.Block = labelBlock; + currLexScope->SolvedLabels.insert(D); + // FIXME: add a label attribute to block... + } else { + assert(0 && "unimplemented"); + } + + // FIXME: emit debug info for labels, incrementProfileCounter + return mlir::success(); +} + +mlir::LogicalResult +CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { + builder.createContinue(getLoc(S.getContinueLoc())); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { + builder.createBreak(getLoc(S.getBreakLoc())); + return mlir::success(); +} + +const CaseStmt * +CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, + SmallVector &caseAttrs) { + const CaseStmt *caseStmt = &S; + const CaseStmt *lastCase = &S; + SmallVector caseEltValueListAttr; + + // Fold cascading cases whenever possible to simplify codegen a bit. + while (caseStmt) { + lastCase = caseStmt; + auto intVal = caseStmt->getLHS()->EvaluateKnownConstInt(getContext()); + caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); + caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); + } + + auto *ctxt = builder.getContext(); + + auto caseAttr = mlir::cir::CaseAttr::get( + ctxt, builder.getArrayAttr(caseEltValueListAttr), + CaseOpKindAttr::get(ctxt, caseEltValueListAttr.size() > 1 + ? mlir::cir::CaseOpKind::Anyof + : mlir::cir::CaseOpKind::Equal)); + + caseAttrs.push_back(caseAttr); + + return lastCase; +} + +template +mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( + const T *stmt, mlir::Type condType, + SmallVector &caseAttrs) { + + assert((isa(stmt)) && + "only case or default stmt go here"); + + auto res = mlir::success(); + + // Update scope information with the current region we are + // emitting code for. This is useful to allow return blocks to be + // automatically and properly placed during cleanup. + auto *region = currLexScope->createSwitchRegion(); + auto *block = builder.createBlock(region); + builder.setInsertionPointToEnd(block); + + auto *sub = stmt->getSubStmt(); + + if (isa(sub) && isa(stmt)) { + builder.createYield(getLoc(stmt->getBeginLoc())); + res = buildDefaultStmt(*dyn_cast(sub), condType, caseAttrs); + } else if (isa(sub) && isa(stmt)) { + builder.createYield(getLoc(stmt->getBeginLoc())); + res = buildCaseStmt(*dyn_cast(sub), condType, caseAttrs); + } else { + res = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); + } + + return res; +} + +mlir::LogicalResult +CIRGenFunction::buildCaseStmt(const CaseStmt &S, mlir::Type condType, + SmallVector &caseAttrs) { + assert((!S.getRHS() || !S.caseStmtIsGNURange()) && + "case ranges not implemented"); + + auto *caseStmt = foldCaseStmt(S, condType, caseAttrs); + return buildCaseDefaultCascade(caseStmt, condType, caseAttrs); +} + +mlir::LogicalResult +CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, + SmallVector &caseAttrs) { + auto ctxt = builder.getContext(); + + auto defAttr = mlir::cir::CaseAttr::get( + ctxt, builder.getArrayAttr({}), + CaseOpKindAttr::get(ctxt, mlir::cir::CaseOpKind::Default)); + + caseAttrs.push_back(defAttr); + return buildCaseDefaultCascade(&S, condType, caseAttrs); +} + +mlir::LogicalResult +CIRGenFunction::buildSwitchCase(const SwitchCase &S, mlir::Type condType, + SmallVector &caseAttrs) { + if (S.getStmtClass() == Stmt::CaseStmtClass) + return buildCaseStmt(cast(S), condType, caseAttrs); + + if (S.getStmtClass() == Stmt::DefaultStmtClass) + return buildDefaultStmt(cast(S), condType, caseAttrs); + + llvm_unreachable("expect case or default stmt"); +} + +mlir::LogicalResult +CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, + ArrayRef ForAttrs) { + mlir::cir::ForOp forOp; + + // TODO(cir): pass in array of attributes. + auto forStmtBuilder = [&]() -> mlir::LogicalResult { + auto loopRes = mlir::success(); + // Evaluate the first pieces before the loop. + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + if (buildStmt(S.getRangeStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + if (buildStmt(S.getBeginStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + if (buildStmt(S.getEndStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + assert(!UnimplementedFeature::loopInfoStack()); + // From LLVM: if there are any cleanups between here and the loop-exit + // scope, create a block to stage a loop exit along. + // We probably already do the right thing because of ScopeOp, but make + // sure we handle all cases. + assert(!UnimplementedFeature::requiresCleanups()); + + forOp = builder.createFor( + getLoc(S.getSourceRange()), + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + assert(!UnimplementedFeature::createProfileWeightsForLoop()); + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + mlir::Value condVal = evaluateExprAsBool(S.getCond()); + builder.createCondition(condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // https://en.cppreference.com/w/cpp/language/for + // In C++ the scope of the init-statement and the scope of + // statement are one and the same. + bool useCurrentScope = true; + if (buildStmt(S.getLoopVarStmt(), useCurrentScope).failed()) + loopRes = mlir::failure(); + if (buildStmt(S.getBody(), useCurrentScope).failed()) + loopRes = mlir::failure(); + buildStopPoint(&S); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (S.getInc()) + if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + loopRes = mlir::failure(); + builder.createYield(loc); + }); + return loopRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // Create a cleanup scope for the condition variable cleanups. + // Logical equivalent from LLVM codegn for + // LexicalScope ConditionScope(*this, S.getSourceRange())... + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; + res = forStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, forOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { + mlir::cir::ForOp forOp; + + // TODO: pass in array of attributes. + auto forStmtBuilder = [&]() -> mlir::LogicalResult { + auto loopRes = mlir::success(); + // Evaluate the first part before the loop. + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + assert(!UnimplementedFeature::loopInfoStack()); + // From LLVM: if there are any cleanups between here and the loop-exit + // scope, create a block to stage a loop exit along. + // We probably already do the right thing because of ScopeOp, but make + // sure we handle all cases. + assert(!UnimplementedFeature::requiresCleanups()); + + forOp = builder.createFor( + getLoc(S.getSourceRange()), + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + assert(!UnimplementedFeature::createProfileWeightsForLoop()); + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + mlir::Value condVal; + if (S.getCond()) { + // If the for statement has a condition scope, + // emit the local variable declaration. + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + condVal = evaluateExprAsBool(S.getCond()); + } else { + auto boolTy = mlir::cir::BoolType::get(b.getContext()); + condVal = b.create( + loc, boolTy, + mlir::cir::BoolAttr::get(b.getContext(), boolTy, true)); + } + builder.createCondition(condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // https://en.cppreference.com/w/cpp/language/for + // While in C++, the scope of the init-statement and the scope of + // statement are one and the same, in C the scope of statement is + // nested within the scope of init-statement. + bool useCurrentScope = + CGM.getASTContext().getLangOpts().CPlusPlus ? true : false; + if (buildStmt(S.getBody(), useCurrentScope).failed()) + loopRes = mlir::failure(); + buildStopPoint(&S); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (S.getInc()) + if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + loopRes = mlir::failure(); + builder.createYield(loc); + }); + return loopRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; + res = forStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, forOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { + mlir::cir::DoWhileOp doWhileOp; + + // TODO: pass in array of attributes. + auto doStmtBuilder = [&]() -> mlir::LogicalResult { + auto loopRes = mlir::success(); + assert(!UnimplementedFeature::loopInfoStack()); + // From LLVM: if there are any cleanups between here and the loop-exit + // scope, create a block to stage a loop exit along. + // We probably already do the right thing because of ScopeOp, but make + // sure we handle all cases. + assert(!UnimplementedFeature::requiresCleanups()); + + doWhileOp = builder.createDoWhile( + getLoc(S.getSourceRange()), + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + assert(!UnimplementedFeature::createProfileWeightsForLoop()); + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + mlir::Value condVal = evaluateExprAsBool(S.getCond()); + builder.createCondition(condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + loopRes = mlir::failure(); + buildStopPoint(&S); + }); + return loopRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; + res = doStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, doWhileOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { + mlir::cir::WhileOp whileOp; + + // TODO: pass in array of attributes. + auto whileStmtBuilder = [&]() -> mlir::LogicalResult { + auto loopRes = mlir::success(); + assert(!UnimplementedFeature::loopInfoStack()); + // From LLVM: if there are any cleanups between here and the loop-exit + // scope, create a block to stage a loop exit along. + // We probably already do the right thing because of ScopeOp, but make + // sure we handle all cases. + assert(!UnimplementedFeature::requiresCleanups()); + + whileOp = builder.createWhile( + getLoc(S.getSourceRange()), + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + assert(!UnimplementedFeature::createProfileWeightsForLoop()); + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + mlir::Value condVal; + // If the for statement has a condition scope, + // emit the local variable declaration. + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + condVal = evaluateExprAsBool(S.getCond()); + builder.createCondition(condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + loopRes = mlir::failure(); + buildStopPoint(&S); + }); + return loopRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; + res = whileStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, whileOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildSwitchBody( + const Stmt *S, mlir::Type condType, + llvm::SmallVector &caseAttrs) { + if (auto *compoundStmt = dyn_cast(S)) { + mlir::Block *lastCaseBlock = nullptr; + auto res = mlir::success(); + for (auto *c : compoundStmt->body()) { + if (auto *switchCase = dyn_cast(c)) { + res = buildSwitchCase(*switchCase, condType, caseAttrs); + } else if (lastCaseBlock) { + // This means it's a random stmt following up a case, just + // emit it as part of previous known case. + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(lastCaseBlock); + res = buildStmt(c, /*useCurrentScope=*/!isa(c)); + } else { + llvm_unreachable("statement doesn't belong to any case region, NYI"); + } + + lastCaseBlock = builder.getBlock(); + + if (res.failed()) + break; + } + return res; + } + + llvm_unreachable("switch body is not CompoundStmt, NYI"); +} + +mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { + // TODO: LLVM codegen does some early optimization to fold the condition and + // only emit live cases. CIR should use MLIR to achieve similar things, + // nothing to be done here. + // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))... + + auto res = mlir::success(); + SwitchOp swop; + + auto switchStmtBuilder = [&]() -> mlir::LogicalResult { + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + + mlir::Value condV = buildScalarExpr(S.getCond()); + + // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) + // TODO: if the switch has a condition wrapped by __builtin_unpredictable? + + swop = builder.create( + getLoc(S.getBeginLoc()), condV, + /*switchBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { + currLexScope->setAsSwitch(); + + llvm::SmallVector caseAttrs; + + res = buildSwitchBody(S.getBody(), condV.getType(), caseAttrs); + + os.addRegions(currLexScope->getSwitchRegions()); + os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); + }); + + if (res.failed()) + return res; + return mlir::success(); + }; + + // The switch scope contains the full source range for SwitchStmt. + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; + res = switchStmtBuilder(); + }); + + if (res.failed()) + return res; + + // Any block in a case region without a terminator is considered a + // fallthrough yield. In practice there shouldn't be more than one + // block without a terminator, we patch any block we see though and + // let mlir's SwitchOp verifier enforce rules. + auto terminateCaseRegion = [&](mlir::Region &r, mlir::Location loc) { + if (r.empty()) + return; + + SmallVector eraseBlocks; + unsigned numBlocks = r.getBlocks().size(); + for (auto &block : r.getBlocks()) { + // Already cleanup after return operations, which might create + // empty blocks if emitted as last stmt. + if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && + block.hasNoSuccessors()) + eraseBlocks.push_back(&block); + + if (block.empty() || + !block.back().hasTrait()) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + builder.createYield(loc); + } + } + + for (auto *b : eraseBlocks) + b->erase(); + }; + + // Make sure all case regions are terminated by inserting fallthroughs + // when necessary. + // FIXME: find a better way to get accurante with location here. + for (auto &r : swop.getRegions()) + terminateCaseRegion(r, swop.getLoc()); + return mlir::success(); +} + +void CIRGenFunction::buildReturnOfRValue(mlir::Location loc, RValue RV, + QualType Ty) { + if (RV.isScalar()) { + builder.createStore(loc, RV.getScalarVal(), ReturnValue); + } else if (RV.isAggregate()) { + LValue Dest = makeAddrLValue(ReturnValue, Ty); + LValue Src = makeAddrLValue(RV.getAggregateAddress(), Ty); + buildAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); + } else { + llvm_unreachable("NYI"); + } + buildBranchThroughCleanup(loc, ReturnBlock()); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp new file mode 100644 index 000000000000..3874ef3dcee6 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp @@ -0,0 +1,45 @@ +//===--- CIRGenStmtOpenMP.cpp - Emit MLIR Code from OpenMP Statements -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit OpenMP Stmt nodes as MLIR code. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" +#include "CIRGenOpenMPRuntime.h" +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" + +using namespace cir; +using namespace clang; +using namespace mlir::omp; + +mlir::LogicalResult +CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { + mlir::LogicalResult res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + // Create a `omp.parallel` op. + auto parallelOp = builder.create(scopeLoc); + mlir::Block &block = parallelOp.getRegion().emplaceBlock(); + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + // Create a scope for the OpenMP region. + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; + // Emit the body of the region. + if (buildStmt(S.getCapturedStmt(OpenMPDirectiveKind::OMPD_parallel) + ->getCapturedStmt(), + /*useCurrentScope=*/true) + .failed()) + res = mlir::failure(); + }); + // Add the terminator for `omp.parallel`. + builder.create(getLoc(S.getSourceRange().getEnd())); + return res; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h new file mode 100644 index 000000000000..96d3ed851e8a --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -0,0 +1,139 @@ +//===--- CIRGenTypeCache.h - Commonly used LLVM types and info -*- C++ --*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This structure provides a set of common types useful during CIR emission. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H +#define LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H + +#include "UnimplementedFeatureGuarding.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Types.h" +#include "clang/AST/CharUnits.h" +#include "clang/Basic/AddressSpaces.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +namespace cir { + +/// This structure provides a set of types that are commonly used +/// during IR emission. It's initialized once in CodeGenModule's +/// constructor and then copied around into new CIRGenFunction's. +struct CIRGenTypeCache { + CIRGenTypeCache() {} + + /// void + mlir::cir::VoidType VoidTy; + // char, int, short, long + mlir::cir::IntType SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty; + // usigned char, unsigned, unsigned short, unsigned long + mlir::cir::IntType UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty; + /// half, bfloat, float, double + // mlir::Type HalfTy, BFloatTy; + // TODO(cir): perhaps we should abstract long double variations into a custom + // cir.long_double type. Said type would also hold the semantics for lowering. + mlir::cir::SingleType FloatTy; + mlir::cir::DoubleType DoubleTy; + mlir::cir::FP80Type FP80Ty; + + /// int + mlir::Type UIntTy; + + /// char + mlir::Type UCharTy; + + /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size. + union { + mlir::Type UIntPtrTy; + mlir::Type SizeTy; + }; + + mlir::Type PtrDiffTy; + + /// void* in address space 0 + mlir::cir::PointerType VoidPtrTy; + mlir::cir::PointerType UInt8PtrTy; + + /// void** in address space 0 + union { + mlir::cir::PointerType VoidPtrPtrTy; + mlir::cir::PointerType UInt8PtrPtrTy; + }; + + /// void* in alloca address space + union { + mlir::cir::PointerType AllocaVoidPtrTy; + mlir::cir::PointerType AllocaInt8PtrTy; + }; + + /// void* in default globals address space + // union { + // mlir::cir::PointerType GlobalsVoidPtrTy; + // mlir::cir::PointerType GlobalsInt8PtrTy; + // }; + + /// void* in the address space for constant globals + // mlir::cir::PointerType ConstGlobalsPtrTy; + + /// The size and alignment of the builtin C type 'int'. This comes + /// up enough in various ABI lowering tasks to be worth pre-computing. + // union { + // unsigned char IntSizeInBytes; + // unsigned char IntAlignInBytes; + // }; + // clang::CharUnits getIntSize() const { + // return clang::CharUnits::fromQuantity(IntSizeInBytes); + // } + // clang::CharUnits getIntAlign() const { + // return clang::CharUnits::fromQuantity(IntAlignInBytes); + // } + + /// The width of a pointer into the generic address space. + // unsigned char PointerWidthInBits; + + /// The size and alignment of a pointer into the generic address space. + union { + unsigned char PointerAlignInBytes; + unsigned char PointerSizeInBytes; + }; + + /// The size and alignment of size_t. + // union { + // unsigned char SizeSizeInBytes; // sizeof(size_t) + // unsigned char SizeAlignInBytes; + // }; + + clang::LangAS ASTAllocaAddressSpace; + + // clang::CharUnits getSizeSize() const { + // return clang::CharUnits::fromQuantity(SizeSizeInBytes); + // } + // clang::CharUnits getSizeAlign() const { + // return clang::CharUnits::fromQuantity(SizeAlignInBytes); + // } + clang::CharUnits getPointerSize() const { + return clang::CharUnits::fromQuantity(PointerSizeInBytes); + } + clang::CharUnits getPointerAlign() const { + return clang::CharUnits::fromQuantity(PointerAlignInBytes); + } + + clang::LangAS getASTAllocaAddressSpace() const { + // Address spaces are not yet fully supported, but the usage of the default + // alloca address space can be used for now only for comparison with the + // default address space. + assert(!UnimplementedFeature::addressSpace()); + assert(ASTAllocaAddressSpace == clang::LangAS::Default); + return ASTAllocaAddressSpace; + } +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp new file mode 100644 index 000000000000..157d68435571 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -0,0 +1,907 @@ +#include "CIRGenTypes.h" +#include "CIRGenCall.h" +#include "CIRGenFunctionInfo.h" +#include "CIRGenModule.h" +#include "CallingConv.h" +#include "TargetInfo.h" + +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinTypes.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/Expr.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/RecordLayout.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace clang; +using namespace cir; + +unsigned CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { + assert(CC == CC_C && "No other calling conventions implemented."); + return cir::CallingConv::C; +} + +CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) + : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, + Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), + TheABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) { + SkippedLayout = false; +} + +CIRGenTypes::~CIRGenTypes() { + for (llvm::FoldingSet::iterator I = FunctionInfos.begin(), + E = FunctionInfos.end(); + I != E;) + delete &*I++; +} + +// This is CIR's version of CIRGenTypes::addRecordTypeName +std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, + StringRef suffix) { + llvm::SmallString<256> typeName; + llvm::raw_svector_ostream outStream(typeName); + + PrintingPolicy policy = recordDecl->getASTContext().getPrintingPolicy(); + policy.SuppressInlineNamespace = false; + + if (recordDecl->getIdentifier()) { + if (recordDecl->getDeclContext()) + recordDecl->printQualifiedName(outStream, policy); + else + recordDecl->printName(outStream, policy); + + // Ensure each template specialization has a unique name. + if (auto *templateSpecialization = + llvm::dyn_cast(recordDecl)) { + outStream << '<'; + const auto args = templateSpecialization->getTemplateArgs().asArray(); + const auto printer = [&policy, &outStream](const TemplateArgument &arg) { + switch (arg.getKind()) { + case TemplateArgument::Integral: + outStream << arg.getAsIntegral(); + break; + case TemplateArgument::Type: + arg.getAsType().print(outStream, policy); + break; + default: + llvm_unreachable("NYI"); + } + }; + llvm::interleaveComma(args, outStream, printer); + outStream << '>'; + } + + } else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl()) { + if (typedefNameDecl->getDeclContext()) + typedefNameDecl->printQualifiedName(outStream, policy); + else + typedefNameDecl->printName(outStream); + } else { + outStream << Builder.getUniqueAnonRecordName(); + } + + if (!suffix.empty()) + outStream << suffix; + + return std::string(typeName); +} + +/// Return true if the specified type is already completely laid out. +bool CIRGenTypes::isRecordLayoutComplete(const Type *Ty) const { + llvm::DenseMap::const_iterator I = + recordDeclTypes.find(Ty); + return I != recordDeclTypes.end() && I->second.isComplete(); +} + +static bool +isSafeToConvert(QualType T, CIRGenTypes &CGT, + llvm::SmallPtrSet &AlreadyChecked); + +/// Return true if it is safe to convert the specified record decl to IR and lay +/// it out, false if doing so would cause us to get into a recursive compilation +/// mess. +static bool +isSafeToConvert(const RecordDecl *RD, CIRGenTypes &CGT, + llvm::SmallPtrSet &AlreadyChecked) { + // If we have already checked this type (maybe the same type is used by-value + // multiple times in multiple structure fields, don't check again. + if (!AlreadyChecked.insert(RD).second) + return true; + + const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); + + // If this type is already laid out, converting it is a noop. + if (CGT.isRecordLayoutComplete(Key)) + return true; + + // If this type is currently being laid out, we can't recursively compile it. + if (CGT.isRecordBeingLaidOut(Key)) + return false; + + // If this type would require laying out bases that are currently being laid + // out, don't do it. This includes virtual base classes which get laid out + // when a class is translated, even though they aren't embedded by-value into + // the class. + if (const CXXRecordDecl *CRD = dyn_cast(RD)) { + for (const auto &I : CRD->bases()) + if (!isSafeToConvert(I.getType()->castAs()->getDecl(), CGT, + AlreadyChecked)) + return false; + } + + // If this type would require laying out members that are currently being laid + // out, don't do it. + for (const auto *I : RD->fields()) + if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) + return false; + + // If there are no problems, lets do it. + return true; +} + +/// Return true if it is safe to convert this field type, which requires the +/// structure elements contained by-value to all be recursively safe to convert. +static bool +isSafeToConvert(QualType T, CIRGenTypes &CGT, + llvm::SmallPtrSet &AlreadyChecked) { + // Strip off atomic type sugar. + if (const auto *AT = T->getAs()) + T = AT->getValueType(); + + // If this is a record, check it. + if (const auto *RT = T->getAs()) + return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); + + // If this is an array, check the elements, which are embedded inline. + if (const auto *AT = CGT.getContext().getAsArrayType(T)) + return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); + + // Otherwise, there is no concern about transforming this. We only care about + // things that are contained by-value in a structure that can have another + // structure as a member. + return true; +} + +// Return true if it is safe to convert the specified record decl to CIR and lay +// it out, false if doing so would cause us to get into a recursive compilation +// mess. +static bool isSafeToConvert(const RecordDecl *RD, CIRGenTypes &CGT) { + // If no structs are being laid out, we can certainly do this one. + if (CGT.noRecordsBeingLaidOut()) + return true; + + llvm::SmallPtrSet AlreadyChecked; + return isSafeToConvert(RD, CGT, AlreadyChecked); +} + +/// Lay out a tagged decl type like struct or union. +mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { + // TagDecl's are not necessarily unique, instead use the (clang) type + // connected to the decl. + const auto *key = Context.getTagDeclType(RD).getTypePtr(); + mlir::cir::StructType entry = recordDeclTypes[key]; + + // Handle forward decl / incomplete types. + if (!entry) { + auto name = getRecordTypeName(RD, ""); + entry = Builder.getIncompleteStructTy(name, RD); + recordDeclTypes[key] = entry; + } + + RD = RD->getDefinition(); + if (!RD || !RD->isCompleteDefinition() || entry.isComplete()) + return entry; + + // If converting this type would cause us to infinitely loop, don't do it! + if (!isSafeToConvert(RD, *this)) { + DeferredRecords.push_back(RD); + return entry; + } + + // Okay, this is a definition of a type. Compile the implementation now. + bool InsertResult = RecordsBeingLaidOut.insert(key).second; + (void)InsertResult; + assert(InsertResult && "Recursively compiling a struct?"); + + // Force conversion of non-virtual base classes recursively. + if (const auto *cxxRecordDecl = dyn_cast(RD)) { + for (const auto &I : cxxRecordDecl->bases()) { + if (I.isVirtual()) + continue; + convertRecordDeclType(I.getType()->castAs()->getDecl()); + } + } + + // Layout fields. + std::unique_ptr Layout = computeRecordLayout(RD, &entry); + recordDeclTypes[key] = entry; + CIRGenRecordLayouts[key] = std::move(Layout); + + // We're done laying out this struct. + bool EraseResult = RecordsBeingLaidOut.erase(key); + (void)EraseResult; + assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); + + // If this struct blocked a FunctionType conversion, then recompute whatever + // was derived from that. + // FIXME: This is hugely overconservative. + if (SkippedLayout) + TypeCache.clear(); + + // If we're done converting the outer-most record, then convert any deferred + // structs as well. + if (RecordsBeingLaidOut.empty()) + while (!DeferredRecords.empty()) + convertRecordDeclType(DeferredRecords.pop_back_val()); + + return entry; +} + +mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType, + bool forBitField) { + assert(!qualType->isConstantMatrixType() && "Matrix types NYI"); + + mlir::Type convertedType = ConvertType(qualType); + + assert(!forBitField && "Bit fields NYI"); + + // If this is a bit-precise integer type in a bitfield representation, map + // this integer to the target-specified size. + if (forBitField && qualType->isBitIntType()) + assert(!qualType->isBitIntType() && "Bit field with type _BitInt NYI"); + + return convertedType; +} + +mlir::MLIRContext &CIRGenTypes::getMLIRContext() const { + return *Builder.getContext(); +} + +mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType QFT) { + assert(QFT.isCanonical()); + const Type *Ty = QFT.getTypePtr(); + const FunctionType *FT = cast(QFT.getTypePtr()); + // First, check whether we can build the full fucntion type. If the function + // type depends on an incomplete type (e.g. a struct or enum), we cannot lower + // the function type. + assert(isFuncTypeConvertible(FT) && "NYI"); + + // The function type can be built; call the appropriate routines to build it + const CIRGenFunctionInfo *FI; + if (const auto *FPT = dyn_cast(FT)) { + FI = &arrangeFreeFunctionType( + CanQual::CreateUnsafe(QualType(FPT, 0))); + } else { + const FunctionNoProtoType *FNPT = cast(FT); + FI = &arrangeFreeFunctionType( + CanQual::CreateUnsafe(QualType(FNPT, 0))); + } + + mlir::Type ResultType = nullptr; + // If there is something higher level prodding our CIRGenFunctionInfo, then + // don't recurse into it again. + assert(!FunctionsBeingProcessed.count(FI) && "NYI"); + + // Otherwise, we're good to go, go ahead and convert it. + ResultType = GetFunctionType(*FI); + + RecordsBeingLaidOut.erase(Ty); + + assert(!SkippedLayout && "Shouldn't have skipped anything yet"); + + if (RecordsBeingLaidOut.empty()) + while (!DeferredRecords.empty()) + convertRecordDeclType(DeferredRecords.pop_back_val()); + + return ResultType; +} + +/// Return true if the specified type in a function parameter or result position +/// can be converted to a CIR type at this point. This boils down to being +/// whether it is complete, as well as whether we've temporarily deferred +/// expanding the type because we're in a recursive context. +bool CIRGenTypes::isFuncParamTypeConvertible(clang::QualType Ty) { + // Some ABIs cannot have their member pointers represented in LLVM IR unless + // certain circumstances have been reached. + assert(!Ty->getAs() && "NYI"); + + // If this isn't a tagged type, we can convert it! + const TagType *TT = Ty->getAs(); + if (!TT) + return true; + + // Incomplete types cannot be converted. + if (TT->isIncompleteType()) + return false; + + // If this is an enum, then it is always safe to convert. + const RecordType *RT = dyn_cast(TT); + if (!RT) + return true; + + // Otherwise, we have to be careful. If it is a struct that we're in the + // process of expanding, then we can't convert the function type. That's ok + // though because we must be in a pointer context under the struct, so we can + // just convert it to a dummy type. + // + // We decide this by checking whether ConvertRecordDeclType returns us an + // opaque type for a struct that we know is defined. + return isSafeToConvert(RT->getDecl(), *this); +} + +/// Code to verify a given function type is complete, i.e. the return type and +/// all of the parameter types are complete. Also check to see if we are in a +/// RS_StructPointer context, and if so whether any struct types have been +/// pended. If so, we don't want to ask the ABI lowering code to handle a type +/// that cannot be converted to a CIR type. +bool CIRGenTypes::isFuncTypeConvertible(const FunctionType *FT) { + if (!isFuncParamTypeConvertible(FT->getReturnType())) + return false; + + if (const auto *FPT = dyn_cast(FT)) + for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) + if (!isFuncParamTypeConvertible(FPT->getParamType(i))) + return false; + + return true; +} + +/// ConvertType - Convert the specified type to its MLIR form. +mlir::Type CIRGenTypes::ConvertType(QualType T) { + T = Context.getCanonicalType(T); + const Type *Ty = T.getTypePtr(); + + // For the device-side compilation, CUDA device builtin surface/texture types + // may be represented in different types. + assert(!Context.getLangOpts().CUDAIsDevice && "not implemented"); + + if (const auto *recordType = dyn_cast(T)) + return convertRecordDeclType(recordType->getDecl()); + + // See if type is already cached. + TypeCacheTy::iterator TCI = TypeCache.find(Ty); + // If type is found in map then use it. Otherwise, convert type T. + if (TCI != TypeCache.end()) + return TCI->second; + + // If we don't have it in the cache, convert it now. + mlir::Type ResultType = nullptr; + switch (Ty->getTypeClass()) { + case Type::Record: // Handled above. +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical or dependent types aren't possible."); + + case Type::ArrayParameter: + llvm_unreachable("NYI"); + + case Type::Builtin: { + switch (cast(Ty)->getKind()) { + case BuiltinType::WasmExternRef: + case BuiltinType::SveBoolx2: + case BuiltinType::SveBoolx4: + case BuiltinType::SveCount: + llvm_unreachable("NYI"); + case BuiltinType::Void: + // TODO(cir): how should we model this? + ResultType = CGM.VoidTy; + break; + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + // TODO(cir): probably same as BuiltinType::Void + assert(0 && "not implemented"); + break; + + case BuiltinType::Bool: + ResultType = ::mlir::cir::BoolType::get(Builder.getContext()); + break; + + // Signed types. + case BuiltinType::Accum: + case BuiltinType::Char_S: + case BuiltinType::Fract: + case BuiltinType::Int: + case BuiltinType::Long: + case BuiltinType::LongAccum: + case BuiltinType::LongFract: + case BuiltinType::LongLong: + case BuiltinType::SChar: + case BuiltinType::Short: + case BuiltinType::ShortAccum: + case BuiltinType::ShortFract: + case BuiltinType::WChar_S: + // Saturated signed types. + case BuiltinType::SatAccum: + case BuiltinType::SatFract: + case BuiltinType::SatLongAccum: + case BuiltinType::SatLongFract: + case BuiltinType::SatShortAccum: + case BuiltinType::SatShortFract: + ResultType = + mlir::cir::IntType::get(Builder.getContext(), Context.getTypeSize(T), + /*isSigned=*/true); + break; + // Unsigned types. + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Char8: + case BuiltinType::Char_U: + case BuiltinType::UAccum: + case BuiltinType::UChar: + case BuiltinType::UFract: + case BuiltinType::UInt: + case BuiltinType::ULong: + case BuiltinType::ULongAccum: + case BuiltinType::ULongFract: + case BuiltinType::ULongLong: + case BuiltinType::UShort: + case BuiltinType::UShortAccum: + case BuiltinType::UShortFract: + case BuiltinType::WChar_U: + // Saturated unsigned types. + case BuiltinType::SatUAccum: + case BuiltinType::SatUFract: + case BuiltinType::SatULongAccum: + case BuiltinType::SatULongFract: + case BuiltinType::SatUShortAccum: + case BuiltinType::SatUShortFract: + ResultType = + mlir::cir::IntType::get(Builder.getContext(), Context.getTypeSize(T), + /*isSigned=*/false); + break; + + case BuiltinType::Float16: + ResultType = Builder.getF16Type(); + break; + case BuiltinType::Half: + // Should be the same as above? + assert(0 && "not implemented"); + break; + case BuiltinType::BFloat16: + ResultType = Builder.getBF16Type(); + break; + case BuiltinType::Float: + ResultType = CGM.FloatTy; + break; + case BuiltinType::Double: + ResultType = CGM.DoubleTy; + break; + case BuiltinType::LongDouble: + ResultType = Builder.getLongDoubleTy(Context.getFloatTypeSemantics(T)); + break; + case BuiltinType::Float128: + case BuiltinType::Ibm128: + // FIXME: look at Context.getFloatTypeSemantics(T) and getTypeForFormat + // on LLVM codegen. + assert(0 && "not implemented"); + break; + + case BuiltinType::NullPtr: + // Add proper CIR type for it? this looks mostly useful for sema related + // things (like for overloads accepting void), for now, given that + // `sizeof(std::nullptr_t)` is equal to `sizeof(void *)`, model + // std::nullptr_t as !cir.ptr + ResultType = Builder.getVoidPtrTy(); + break; + + case BuiltinType::UInt128: + case BuiltinType::Int128: + assert(0 && "not implemented"); + // FIXME: ResultType = Builder.getIntegerType(128); + break; + +#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: +#include "clang/Basic/OpenCLImageTypes.def" +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id: +#include "clang/Basic/OpenCLExtensionTypes.def" + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: + assert(0 && "not implemented"); + break; + case BuiltinType::SveInt8: + case BuiltinType::SveUint8: + case BuiltinType::SveInt8x2: + case BuiltinType::SveUint8x2: + case BuiltinType::SveInt8x3: + case BuiltinType::SveUint8x3: + case BuiltinType::SveInt8x4: + case BuiltinType::SveUint8x4: + case BuiltinType::SveInt16: + case BuiltinType::SveUint16: + case BuiltinType::SveInt16x2: + case BuiltinType::SveUint16x2: + case BuiltinType::SveInt16x3: + case BuiltinType::SveUint16x3: + case BuiltinType::SveInt16x4: + case BuiltinType::SveUint16x4: + case BuiltinType::SveInt32: + case BuiltinType::SveUint32: + case BuiltinType::SveInt32x2: + case BuiltinType::SveUint32x2: + case BuiltinType::SveInt32x3: + case BuiltinType::SveUint32x3: + case BuiltinType::SveInt32x4: + case BuiltinType::SveUint32x4: + case BuiltinType::SveInt64: + case BuiltinType::SveUint64: + case BuiltinType::SveInt64x2: + case BuiltinType::SveUint64x2: + case BuiltinType::SveInt64x3: + case BuiltinType::SveUint64x3: + case BuiltinType::SveInt64x4: + case BuiltinType::SveUint64x4: + case BuiltinType::SveBool: + case BuiltinType::SveFloat16: + case BuiltinType::SveFloat16x2: + case BuiltinType::SveFloat16x3: + case BuiltinType::SveFloat16x4: + case BuiltinType::SveFloat32: + case BuiltinType::SveFloat32x2: + case BuiltinType::SveFloat32x3: + case BuiltinType::SveFloat32x4: + case BuiltinType::SveFloat64: + case BuiltinType::SveFloat64x2: + case BuiltinType::SveFloat64x3: + case BuiltinType::SveFloat64x4: + case BuiltinType::SveBFloat16: + case BuiltinType::SveBFloat16x2: + case BuiltinType::SveBFloat16x3: + case BuiltinType::SveBFloat16x4: { + assert(0 && "not implemented"); + break; + } +#define PPC_VECTOR_TYPE(Name, Id, Size) \ + case BuiltinType::Id: \ + assert(0 && "not implemented"); \ + break; +#include "clang/Basic/PPCTypes.def" +#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/RISCVVTypes.def" + { + assert(0 && "not implemented"); + break; + } + case BuiltinType::Dependent: +#define BUILTIN_TYPE(Id, SingletonId) +#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("Unexpected placeholder builtin type!"); + } + break; + } + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Unexpected undeduced type!"); + case Type::Complex: { + assert(0 && "not implemented"); + break; + } + case Type::LValueReference: + case Type::RValueReference: { + const ReferenceType *RTy = cast(Ty); + QualType ETy = RTy->getPointeeType(); + auto PointeeType = convertTypeForMem(ETy); + // TODO(cir): use Context.getTargetAddressSpace(ETy) on pointer + ResultType = + ::mlir::cir::PointerType::get(Builder.getContext(), PointeeType); + assert(ResultType && "Cannot get pointer type?"); + break; + } + case Type::Pointer: { + const PointerType *PTy = cast(Ty); + QualType ETy = PTy->getPointeeType(); + assert(!ETy->isConstantMatrixType() && "not implemented"); + + mlir::Type PointeeType = ConvertType(ETy); + + // Treat effectively as a *i8. + // if (PointeeType->isVoidTy()) + // PointeeType = Builder.getI8Type(); + + // FIXME: add address specifier to cir::PointerType? + ResultType = + ::mlir::cir::PointerType::get(Builder.getContext(), PointeeType); + assert(ResultType && "Cannot get pointer type?"); + break; + } + + case Type::VariableArray: { + const VariableArrayType *A = cast(Ty); + assert(A->getIndexTypeCVRQualifiers() == 0 && + "FIXME: We only handle trivial array types so far!"); + // VLAs resolve to the innermost element type; this matches + // the return of alloca, and there isn't any obviously better choice. + ResultType = convertTypeForMem(A->getElementType()); + break; + } + case Type::IncompleteArray: { + const IncompleteArrayType *A = cast(Ty); + assert(A->getIndexTypeCVRQualifiers() == 0 && + "FIXME: We only handle trivial array types so far!"); + // int X[] -> [0 x int], unless the element type is not sized. If it is + // unsized (e.g. an incomplete struct) just use [0 x i8]. + ResultType = convertTypeForMem(A->getElementType()); + if (!Builder.isSized(ResultType)) { + SkippedLayout = true; + ResultType = Builder.getUInt8Ty(); + } + ResultType = Builder.getArrayType(ResultType, 0); + break; + } + case Type::ConstantArray: { + const ConstantArrayType *A = cast(Ty); + auto EltTy = convertTypeForMem(A->getElementType()); + + // FIXME: In LLVM, "lower arrays of undefined struct type to arrays of + // i8 just to have a concrete type". Not sure this makes sense in CIR yet. + assert(Builder.isSized(EltTy) && "not implemented"); + ResultType = ::mlir::cir::ArrayType::get(Builder.getContext(), EltTy, + A->getSize().getZExtValue()); + break; + } + case Type::ExtVector: + case Type::Vector: { + const VectorType *V = cast(Ty); + auto ElementType = convertTypeForMem(V->getElementType()); + ResultType = ::mlir::cir::VectorType::get(Builder.getContext(), ElementType, + V->getNumElements()); + break; + } + case Type::ConstantMatrix: { + assert(0 && "not implemented"); + break; + } + case Type::FunctionNoProto: + case Type::FunctionProto: + ResultType = ConvertFunctionTypeInternal(T); + break; + case Type::ObjCObject: + assert(0 && "not implemented"); + break; + + case Type::ObjCInterface: { + assert(0 && "not implemented"); + break; + } + + case Type::ObjCObjectPointer: { + assert(0 && "not implemented"); + break; + } + + case Type::Enum: { + const EnumDecl *ED = cast(Ty)->getDecl(); + if (ED->isCompleteDefinition() || ED->isFixed()) + return ConvertType(ED->getIntegerType()); + // Return a placeholder 'i32' type. This can be changed later when the + // type is defined (see UpdateCompletedType), but is likely to be the + // "right" answer. + ResultType = CGM.UInt32Ty; + break; + } + + case Type::BlockPointer: { + assert(0 && "not implemented"); + break; + } + + case Type::MemberPointer: { + const auto *MPT = cast(Ty); + assert(MPT->isMemberDataPointer() && "ptr-to-member-function is NYI"); + + auto memberTy = ConvertType(MPT->getPointeeType()); + auto clsTy = + ConvertType(QualType(MPT->getClass(), 0)).cast(); + ResultType = + mlir::cir::DataMemberType::get(Builder.getContext(), memberTy, clsTy); + break; + } + + case Type::Atomic: { + QualType valueType = cast(Ty)->getValueType(); + ResultType = convertTypeForMem(valueType); + + // Pad out to the inflated size if necessary. + uint64_t valueSize = Context.getTypeSize(valueType); + uint64_t atomicSize = Context.getTypeSize(Ty); + if (valueSize != atomicSize) { + llvm_unreachable("NYI"); + } + break; + } + case Type::Pipe: { + assert(0 && "not implemented"); + break; + } + case Type::BitInt: { + const auto *bitIntTy = cast(Ty); + ResultType = mlir::cir::IntType::get( + Builder.getContext(), bitIntTy->getNumBits(), bitIntTy->isSigned()); + break; + } + } + + assert(ResultType && "Didn't convert a type?"); + + TypeCache[Ty] = ResultType; + return ResultType; +} + +const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( + CanQualType resultType, FnInfoOpts opts, + llvm::ArrayRef argTypes, FunctionType::ExtInfo info, + llvm::ArrayRef paramInfos, + RequiredArgs required) { + assert(llvm::all_of(argTypes, + [](CanQualType T) { return T.isCanonicalAsParam(); })); + bool instanceMethod = opts == FnInfoOpts::IsInstanceMethod; + bool chainCall = opts == FnInfoOpts::IsChainCall; + + // Lookup or create unique function info. + llvm::FoldingSetNodeID ID; + CIRGenFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, + required, resultType, argTypes); + + void *insertPos = nullptr; + CIRGenFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); + if (FI) + return *FI; + + unsigned CC = ClangCallConvToCIRCallConv(info.getCC()); + + // Construction the function info. We co-allocate the ArgInfos. + FI = CIRGenFunctionInfo::create(CC, instanceMethod, chainCall, info, + paramInfos, resultType, argTypes, required); + FunctionInfos.InsertNode(FI, insertPos); + + bool inserted = FunctionsBeingProcessed.insert(FI).second; + (void)inserted; + assert(inserted && "Recursively being processed?"); + + // Compute ABI inforamtion. + assert(info.getCC() != clang::CallingConv::CC_SpirFunction && "NYI"); + assert(info.getCC() != CC_Swift && info.getCC() != CC_SwiftAsync && + "Swift NYI"); + getABIInfo().computeInfo(*FI); + + // Loop over all of the computed argument and return value info. If any of + // them are direct or extend without a specified coerce type, specify the + // default now. + ABIArgInfo &retInfo = FI->getReturnInfo(); + if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) + retInfo.setCoerceToType(ConvertType(FI->getReturnType())); + + for (auto &I : FI->arguments()) + if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) + I.info.setCoerceToType(ConvertType(I.type)); + + bool erased = FunctionsBeingProcessed.erase(FI); + (void)erased; + assert(erased && "Not in set?"); + + return *FI; +} + +const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { + assert(!dyn_cast(GD.getDecl()) && + "This is reported as a FIXME in LLVM codegen"); + const auto *FD = cast(GD.getDecl()); + + if (isa(GD.getDecl()) || + isa(GD.getDecl())) + return arrangeCXXStructorDeclaration(GD); + + return arrangeFunctionDeclaration(FD); +} + +// When we find the full definition for a TagDecl, replace the 'opaque' type we +// previously made for it if applicable. +void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { + // If this is an enum being completed, then we flush all non-struct types + // from the cache. This allows function types and other things that may be + // derived from the enum to be recomputed. + if (const auto *ED = dyn_cast(TD)) { + // Only flush the cache if we've actually already converted this type. + if (TypeCache.count(ED->getTypeForDecl())) { + // Okay, we formed some types based on this. We speculated that the enum + // would be lowered to i32, so we only need to flush the cache if this + // didn't happen. + if (!ConvertType(ED->getIntegerType()).isInteger(32)) + TypeCache.clear(); + } + // If necessary, provide the full definition of a type only used with a + // declaration so far. + assert(!UnimplementedFeature::generateDebugInfo()); + return; + } + + // If we completed a RecordDecl that we previously used and converted to an + // anonymous type, then go ahead and complete it now. + const auto *RD = cast(TD); + if (RD->isDependentType()) + return; + + // Only complete if we converted it already. If we haven't converted it yet, + // we'll just do it lazily. + if (recordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) + convertRecordDeclType(RD); + + // If necessary, provide the full definition of a type only used with a + // declaration so far. + if (CGM.getModuleDebugInfo()) + llvm_unreachable("NYI"); +} + +/// Return record layout info for the given record decl. +const CIRGenRecordLayout & +CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *RD) { + const auto *Key = Context.getTagDeclType(RD).getTypePtr(); + + auto I = CIRGenRecordLayouts.find(Key); + if (I != CIRGenRecordLayouts.end()) + return *I->second; + + // Compute the type information. + convertRecordDeclType(RD); + + // Now try again. + I = CIRGenRecordLayouts.find(Key); + + assert(I != CIRGenRecordLayouts.end() && + "Unable to find record layout information for type"); + return *I->second; +} + +bool CIRGenTypes::isPointerZeroInitializable(clang::QualType T) { + assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); + return isZeroInitializable(T); +} + +bool CIRGenTypes::isZeroInitializable(QualType T) { + if (T->getAs()) + return Context.getTargetNullPointerValue(T) == 0; + + if (const auto *AT = Context.getAsArrayType(T)) { + if (isa(AT)) + return true; + if (const auto *CAT = dyn_cast(AT)) + if (Context.getConstantArrayElementCount(CAT) == 0) + return true; + T = Context.getBaseElementType(T); + } + + // Records are non-zero-initializable if they contain any + // non-zero-initializable subobjects. + if (const RecordType *RT = T->getAs()) { + const RecordDecl *RD = RT->getDecl(); + return isZeroInitializable(RD); + } + + // We have to ask the ABI about member pointers. + if (const MemberPointerType *MPT = T->getAs()) + llvm_unreachable("NYI"); + + // Everything else is okay. + return true; +} + +bool CIRGenTypes::isZeroInitializable(const RecordDecl *RD) { + return getCIRGenRecordLayout(RD).isZeroInitializable(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h new file mode 100644 index 000000000000..51350c9ea70e --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -0,0 +1,280 @@ +//===--- CIRGenTypes.h - Type translation for CIR CodeGen -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the code that handles AST -> CIR type lowering. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H +#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H + +#include "ABIInfo.h" +#include "CIRGenCall.h" +#include "CIRGenFunctionInfo.h" +#include "CIRGenRecordLayout.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/Type.h" +#include "clang/Basic/ABI.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "llvm/ADT/SmallPtrSet.h" + +#include "mlir/IR/MLIRContext.h" + +#include + +namespace llvm { +class FunctionType; +class DataLayout; +class Type; +class LLVMContext; +class StructType; +} // namespace llvm + +namespace clang { +class ASTContext; +template class CanQual; +class CXXConstructorDecl; +class CXXDestructorDecl; +class CXXMethodDecl; +class CodeGenOptions; +class FieldDecl; +class FunctionProtoType; +class ObjCInterfaceDecl; +class ObjCIvarDecl; +class PointerType; +class QualType; +class RecordDecl; +class TagDecl; +class TargetInfo; +class Type; +typedef CanQual CanQualType; +class GlobalDecl; + +} // end namespace clang + +namespace mlir { +class Type; +namespace cir { +class StructType; +} // namespace cir +} // namespace mlir + +namespace cir { +class CallArgList; +class CIRGenCXXABI; +class CIRGenModule; +class CIRGenFunctionInfo; +class CIRGenBuilderTy; + +/// This class organizes the cross-module state that is used while lowering +/// AST types to CIR types. +class CIRGenTypes { + clang::ASTContext &Context; + cir::CIRGenBuilderTy &Builder; + CIRGenModule &CGM; + const clang::TargetInfo &Target; + CIRGenCXXABI &TheCXXABI; + + // This should not be moved earlier, since its initialization depends on some + // of the previous reference members being already initialized + const ABIInfo &TheABIInfo; + + /// Contains the CIR type for any converted RecordDecl. + llvm::DenseMap> + CIRGenRecordLayouts; + + /// Contains the CIR type for any converted RecordDecl + llvm::DenseMap recordDeclTypes; + + /// Hold memoized CIRGenFunctionInfo results + llvm::FoldingSet FunctionInfos; + + /// This set keeps track of records that we're currently converting to a CIR + /// type. For example, when converting: + /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B' + /// types will be in this set. + llvm::SmallPtrSet RecordsBeingLaidOut; + + llvm::SmallPtrSet FunctionsBeingProcessed; + + /// True if we didn't layout a function due to being inside a recursive struct + /// conversion, set this to true. + bool SkippedLayout; + + llvm::SmallVector DeferredRecords; + + /// Heper for ConvertType. + mlir::Type ConvertFunctionTypeInternal(clang::QualType FT); + +public: + CIRGenTypes(CIRGenModule &cgm); + ~CIRGenTypes(); + + cir::CIRGenBuilderTy &getBuilder() const { return Builder; } + CIRGenModule &getModule() const { return CGM; } + + /// Utility to check whether a function type can be converted to a CIR type + /// (i.e. doesn't depend on an incomplete tag type). + bool isFuncTypeConvertible(const clang::FunctionType *FT); + bool isFuncParamTypeConvertible(clang::QualType Ty); + + /// Convert clang calling convention to LLVM calling convention. + unsigned ClangCallConvToCIRCallConv(clang::CallingConv CC); + + /// Derives the 'this' type for CIRGen purposes, i.e. ignoring method CVR + /// qualification. + clang::CanQualType DeriveThisType(const clang::CXXRecordDecl *RD, + const clang::CXXMethodDecl *MD); + + /// This map keeps cache of llvm::Types and maps clang::Type to + /// corresponding llvm::Type. + using TypeCacheTy = llvm::DenseMap; + TypeCacheTy TypeCache; + + clang::ASTContext &getContext() const { return Context; } + mlir::MLIRContext &getMLIRContext() const; + + bool isRecordLayoutComplete(const clang::Type *Ty) const; + bool noRecordsBeingLaidOut() const { return RecordsBeingLaidOut.empty(); } + bool isRecordBeingLaidOut(const clang::Type *Ty) const { + return RecordsBeingLaidOut.count(Ty); + } + + /// Return whether a type can be zero-initialized (in the C++ sense) with an + /// LLVM zeroinitializer. + bool isZeroInitializable(clang::QualType T); + + /// Check if the pointer type can be zero-initialized (in the C++ sense) + /// with an LLVM zeroinitializer. + bool isPointerZeroInitializable(clang::QualType T); + + /// Return whether a record type can be zero-initialized (in the C++ sense) + /// with an LLVM zeroinitializer. + bool isZeroInitializable(const clang::RecordDecl *RD); + + const ABIInfo &getABIInfo() const { return TheABIInfo; } + CIRGenCXXABI &getCXXABI() const { return TheCXXABI; } + + /// Convert type T into a mlir::Type. + mlir::Type ConvertType(clang::QualType T); + + mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl); + + std::unique_ptr + computeRecordLayout(const clang::RecordDecl *D, mlir::cir::StructType *Ty); + + std::string getRecordTypeName(const clang::RecordDecl *, + llvm::StringRef suffix); + + /// Determine if a C++ inheriting constructor should have parameters matching + /// those of its inherited constructor. + bool inheritingCtorHasParams(const clang::InheritedConstructor &Inherited, + clang::CXXCtorType Type); + + const CIRGenRecordLayout &getCIRGenRecordLayout(const clang::RecordDecl *RD); + + /// Convert type T into an mlir::Type. This differs from + /// convertType in that it is used to convert to the memory representation + /// for a type. For example, the scalar representation for _Bool is i1, but + /// the memory representation is usually i8 or i32, depending on the target. + // TODO: convert this comment to account for MLIR's equivalence + mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); + + /// Get the CIR function type for \arg Info. + mlir::cir::FuncType GetFunctionType(const CIRGenFunctionInfo &Info); + + mlir::cir::FuncType GetFunctionType(clang::GlobalDecl GD); + + /// Get the LLVM function type for use in a vtable, given a CXXMethodDecl. If + /// the method to has an incomplete return type, and/or incomplete argument + /// types, this will return the opaque type. + mlir::cir::FuncType GetFunctionTypeForVTable(clang::GlobalDecl GD); + + // The arrangement methods are split into three families: + // - those meant to drive the signature and prologue/epilogue + // of a function declaration or definition, + // - those meant for the computation of the CIR type for an abstract + // appearance of a function, and + // - those meant for performing the CIR-generation of a call. + // They differ mainly in how they deal with optional (i.e. variadic) + // arguments, as well as unprototyped functions. + // + // Key points: + // - The CIRGenFunctionInfo for emitting a specific call site must include + // entries for the optional arguments. + // - The function type used at the call site must reflect the formal + // signature + // of the declaration being called, or else the call will go away. + // - For the most part, unprototyped functions are called by casting to a + // formal signature inferred from the specific argument types used at the + // call-site. However, some targets (e.g. x86-64) screw with this for + // compatability reasons. + + const CIRGenFunctionInfo &arrangeGlobalDeclaration(clang::GlobalDecl GD); + + /// UpdateCompletedType - when we find the full definition for a TagDecl, + /// replace the 'opaque' type we previously made for it if applicable. + void UpdateCompletedType(const clang::TagDecl *TD); + + /// Free functions are functions that are compatible with an ordinary C + /// function pointer type. + const CIRGenFunctionInfo & + arrangeFunctionDeclaration(const clang::FunctionDecl *FD); + + const CIRGenFunctionInfo & + arrangeBuiltinFunctionCall(clang::QualType resultType, + const CallArgList &args); + + const CIRGenFunctionInfo &arrangeCXXConstructorCall( + const CallArgList &Args, const clang::CXXConstructorDecl *D, + clang::CXXCtorType CtorKind, unsigned ExtraPrefixArgs, + unsigned ExtraSuffixArgs, bool PassProtoArgs = true); + + const CIRGenFunctionInfo & + arrangeCXXMethodCall(const CallArgList &args, + const clang::FunctionProtoType *type, + RequiredArgs required, unsigned numPrefixArgs); + + /// C++ methods have some special rules and also have implicit parameters. + const CIRGenFunctionInfo & + arrangeCXXMethodDeclaration(const clang::CXXMethodDecl *MD); + const CIRGenFunctionInfo &arrangeCXXStructorDeclaration(clang::GlobalDecl GD); + + const CIRGenFunctionInfo & + arrangeCXXMethodType(const clang::CXXRecordDecl *RD, + const clang::FunctionProtoType *FTP, + const clang::CXXMethodDecl *MD); + + const CIRGenFunctionInfo & + arrangeFreeFunctionCall(const CallArgList &Args, + const clang::FunctionType *Ty, bool ChainCall); + + const CIRGenFunctionInfo & + arrangeFreeFunctionType(clang::CanQual Ty); + + const CIRGenFunctionInfo & + arrangeFreeFunctionType(clang::CanQual FTNP); + + /// "Arrange" the LLVM information for a call or type with the given + /// signature. This is largely an internal method; other clients + /// should use one of the above routines, which ultimately defer to + /// this. + /// + /// \param argTypes - must all actually be canonical as params + const CIRGenFunctionInfo &arrangeCIRFunctionInfo( + clang::CanQualType returnType, FnInfoOpts opts, + llvm::ArrayRef argTypes, + clang::FunctionType::ExtInfo info, + llvm::ArrayRef paramInfos, + RequiredArgs args); +}; +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp new file mode 100644 index 000000000000..fb5a7ac876b9 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -0,0 +1,584 @@ +//===--- CIRGenVTables.cpp - Emit CIR Code for C++ vtables ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation of virtual tables. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "mlir/IR/Attributes.h" +#include "clang/AST/Attr.h" +#include "clang/AST/CXXInheritance.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/VTTBuilder.h" +#include "clang/Basic/CodeGenOptions.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CodeGen/CGFunctionInfo.h" +#include "clang/CodeGen/ConstantInitBuilder.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Format.h" +#include "llvm/Transforms/Utils/Cloning.h" +#include +#include + +using namespace clang; +using namespace cir; + +CIRGenVTables::CIRGenVTables(CIRGenModule &CGM) + : CGM(CGM), VTContext(CGM.getASTContext().getVTableContext()) {} + +static bool UseRelativeLayout(const CIRGenModule &CGM) { + return CGM.getTarget().getCXXABI().isItaniumFamily() && + CGM.getItaniumVTableContext().isRelativeLayout(); +} + +bool CIRGenVTables::useRelativeLayout() const { return UseRelativeLayout(CGM); } + +mlir::Type CIRGenModule::getVTableComponentType() { + mlir::Type ptrTy = builder.getUInt8PtrTy(); + if (UseRelativeLayout(*this)) + ptrTy = builder.getUInt32PtrTy(); + return ptrTy; +} + +mlir::Type CIRGenVTables::getVTableComponentType() { + return CGM.getVTableComponentType(); +} + +mlir::Type CIRGenVTables::getVTableType(const VTableLayout &layout) { + SmallVector tys; + auto ctx = CGM.getBuilder().getContext(); + auto componentType = getVTableComponentType(); + for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) + tys.push_back( + mlir::cir::ArrayType::get(ctx, componentType, layout.getVTableSize(i))); + + // FIXME(cir): should VTableLayout be encoded like we do for some + // AST nodes? + return CGM.getBuilder().getAnonStructTy(tys, /*incomplete=*/false); +} + +/// At this point in the translation unit, does it appear that can we +/// rely on the vtable being defined elsewhere in the program? +/// +/// The response is really only definitive when called at the end of +/// the translation unit. +/// +/// The only semantic restriction here is that the object file should +/// not contain a vtable definition when that vtable is defined +/// strongly elsewhere. Otherwise, we'd just like to avoid emitting +/// vtables when unnecessary. +/// TODO(cir): this should be merged into common AST helper for codegen. +bool CIRGenVTables::isVTableExternal(const CXXRecordDecl *RD) { + assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable."); + + // We always synthesize vtables if they are needed in the MS ABI. MSVC doesn't + // emit them even if there is an explicit template instantiation. + if (CGM.getTarget().getCXXABI().isMicrosoft()) + return false; + + // If we have an explicit instantiation declaration (and not a + // definition), the vtable is defined elsewhere. + TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind(); + if (TSK == TSK_ExplicitInstantiationDeclaration) + return true; + + // Otherwise, if the class is an instantiated template, the + // vtable must be defined here. + if (TSK == TSK_ImplicitInstantiation || + TSK == TSK_ExplicitInstantiationDefinition) + return false; + + // Otherwise, if the class doesn't have a key function (possibly + // anymore), the vtable must be defined here. + const CXXMethodDecl *keyFunction = + CGM.getASTContext().getCurrentKeyFunction(RD); + if (!keyFunction) + return false; + + // Otherwise, if we don't have a definition of the key function, the + // vtable must be defined somewhere else. + return !keyFunction->hasBody(); +} + +static bool shouldEmitAvailableExternallyVTable(const CIRGenModule &CGM, + const CXXRecordDecl *RD) { + return CGM.getCodeGenOpts().OptimizationLevel > 0 && + CGM.getCXXABI().canSpeculativelyEmitVTable(RD); +} + +/// Given that we're currently at the end of the translation unit, and +/// we've emitted a reference to the vtable for this class, should +/// we define that vtable? +static bool shouldEmitVTableAtEndOfTranslationUnit(CIRGenModule &CGM, + const CXXRecordDecl *RD) { + // If vtable is internal then it has to be done. + if (!CGM.getVTables().isVTableExternal(RD)) + return true; + + // If it's external then maybe we will need it as available_externally. + return shouldEmitAvailableExternallyVTable(CGM, RD); +} + +/// Given that at some point we emitted a reference to one or more +/// vtables, and that we are now at the end of the translation unit, +/// decide whether we should emit them. +void CIRGenModule::buildDeferredVTables() { +#ifndef NDEBUG + // Remember the size of DeferredVTables, because we're going to assume + // that this entire operation doesn't modify it. + size_t savedSize = DeferredVTables.size(); +#endif + + for (const CXXRecordDecl *RD : DeferredVTables) + if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD)) { + VTables.GenerateClassData(RD); + } else if (shouldOpportunisticallyEmitVTables()) { + llvm_unreachable("NYI"); + } + + assert(savedSize == DeferredVTables.size() && + "deferred extra vtables during vtable emission?"); + DeferredVTables.clear(); +} + +void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { + assert(!UnimplementedFeature::generateDebugInfo()); + + if (RD->getNumVBases()) + CGM.getCXXABI().emitVirtualInheritanceTables(RD); + + CGM.getCXXABI().emitVTableDefinitions(*this, RD); +} + +static void AddPointerLayoutOffset(CIRGenModule &CGM, + ConstantArrayBuilder &builder, + CharUnits offset) { + builder.add(mlir::cir::ConstPtrAttr::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getUInt8PtrTy(), + offset.getQuantity())); +} + +static void AddRelativeLayoutOffset(CIRGenModule &CGM, + ConstantArrayBuilder &builder, + CharUnits offset) { + llvm_unreachable("NYI"); + // builder.add(llvm::ConstantInt::get(CGM.Int32Ty, offset.getQuantity())); +} + +void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, + const VTableLayout &layout, + unsigned componentIndex, + mlir::Attribute rtti, + unsigned &nextVTableThunkIndex, + unsigned vtableAddressPoint, + bool vtableHasLocalLinkage) { + auto &component = layout.vtable_components()[componentIndex]; + + auto addOffsetConstant = + useRelativeLayout() ? AddRelativeLayoutOffset : AddPointerLayoutOffset; + + switch (component.getKind()) { + case VTableComponent::CK_VCallOffset: + return addOffsetConstant(CGM, builder, component.getVCallOffset()); + + case VTableComponent::CK_VBaseOffset: + return addOffsetConstant(CGM, builder, component.getVBaseOffset()); + + case VTableComponent::CK_OffsetToTop: + return addOffsetConstant(CGM, builder, component.getOffsetToTop()); + + case VTableComponent::CK_RTTI: + if (useRelativeLayout()) { + llvm_unreachable("NYI"); + // return addRelativeComponent(builder, rtti, vtableAddressPoint, + // vtableHasLocalLinkage, + // /*isCompleteDtor=*/false); + } else { + assert((rtti.isa() || + rtti.isa()) && + "expected GlobalViewAttr or ConstPtrAttr"); + return builder.add(rtti); + } + + case VTableComponent::CK_FunctionPointer: + case VTableComponent::CK_CompleteDtorPointer: + case VTableComponent::CK_DeletingDtorPointer: { + GlobalDecl GD = component.getGlobalDecl(); + + if (CGM.getLangOpts().CUDA) { + llvm_unreachable("NYI"); + } + + [[maybe_unused]] auto getSpecialVirtualFn = + [&](StringRef name) -> mlir::Attribute { + // FIXME(PR43094): When merging comdat groups, lld can select a local + // symbol as the signature symbol even though it cannot be accessed + // outside that symbol's TU. The relative vtables ABI would make + // __cxa_pure_virtual and __cxa_deleted_virtual local symbols, and + // depending on link order, the comdat groups could resolve to the one + // with the local symbol. As a temporary solution, fill these components + // with zero. We shouldn't be calling these in the first place anyway. + if (useRelativeLayout()) + llvm_unreachable("NYI"); + + // For NVPTX devices in OpenMP emit special functon as null pointers, + // otherwise linking ends up with unresolved references. + if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsTargetDevice && + CGM.getTriple().isNVPTX()) + llvm_unreachable("NYI"); + + llvm_unreachable("NYI"); + // llvm::FunctionType *fnTy = + // llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); + // llvm::Constant *fn = cast( + // CGM.CreateRuntimeFunction(fnTy, name).getCallee()); + // if (auto f = dyn_cast(fn)) + // f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); + // return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy); + }; + + mlir::cir::FuncOp fnPtr; + // Pure virtual member functions. + if (cast(GD.getDecl())->isPureVirtual()) { + llvm_unreachable("NYI"); + // if (!PureVirtualFn) + // PureVirtualFn = + // getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName()); + // fnPtr = PureVirtualFn; + + // Deleted virtual member functions. + } else if (cast(GD.getDecl())->isDeleted()) { + llvm_unreachable("NYI"); + // if (!DeletedVirtualFn) + // DeletedVirtualFn = + // getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName()); + // fnPtr = DeletedVirtualFn; + + // Thunks. + } else if (nextVTableThunkIndex < layout.vtable_thunks().size() && + layout.vtable_thunks()[nextVTableThunkIndex].first == + componentIndex) { + llvm_unreachable("NYI"); + // auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second; + + // nextVTableThunkIndex++; + // fnPtr = maybeEmitThunk(GD, thunkInfo, /*ForVTable=*/true); + + // Otherwise we can use the method definition directly. + } else { + auto fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD); + fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true); + } + + if (useRelativeLayout()) { + llvm_unreachable("NYI"); + } else { + return builder.add(mlir::cir::GlobalViewAttr::get( + CGM.getBuilder().getUInt8PtrTy(), + mlir::FlatSymbolRefAttr::get(fnPtr.getSymNameAttr()))); + } + } + + case VTableComponent::CK_UnusedFunctionPointer: + if (useRelativeLayout()) + llvm_unreachable("NYI"); + else { + llvm_unreachable("NYI"); + // return builder.addNullPointer(CGM.Int8PtrTy); + } + } + + llvm_unreachable("Unexpected vtable component kind"); +} + +void CIRGenVTables::createVTableInitializer(ConstantStructBuilder &builder, + const VTableLayout &layout, + mlir::Attribute rtti, + bool vtableHasLocalLinkage) { + auto componentType = getVTableComponentType(); + + const auto &addressPoints = layout.getAddressPointIndices(); + unsigned nextVTableThunkIndex = 0; + for (unsigned vtableIndex = 0, endIndex = layout.getNumVTables(); + vtableIndex != endIndex; ++vtableIndex) { + auto vtableElem = builder.beginArray(componentType); + + size_t vtableStart = layout.getVTableOffset(vtableIndex); + size_t vtableEnd = vtableStart + layout.getVTableSize(vtableIndex); + for (size_t componentIndex = vtableStart; componentIndex < vtableEnd; + ++componentIndex) { + addVTableComponent(vtableElem, layout, componentIndex, rtti, + nextVTableThunkIndex, addressPoints[vtableIndex], + vtableHasLocalLinkage); + } + vtableElem.finishAndAddTo(rtti.getContext(), builder); + } +} + +/// Compute the required linkage of the vtable for the given class. +/// +/// Note that we only call this at the end of the translation unit. +mlir::cir::GlobalLinkageKind +CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { + if (!RD->isExternallyVisible()) + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + // We're at the end of the translation unit, so the current key + // function is fully correct. + const CXXMethodDecl *keyFunction = astCtx.getCurrentKeyFunction(RD); + if (keyFunction && !RD->hasAttr()) { + // If this class has a key function, use that to determine the + // linkage of the vtable. + const FunctionDecl *def = nullptr; + if (keyFunction->hasBody(def)) + keyFunction = cast(def); + + switch (keyFunction->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + assert( + (def || codeGenOpts.OptimizationLevel > 0 || + codeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo) && + "Shouldn't query vtable linkage without key function, " + "optimizations, or debug info"); + if (!def && codeGenOpts.OptimizationLevel > 0) + return mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + + if (keyFunction->isInlined()) + return !astCtx.getLangOpts().AppleKext + ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + + return mlir::cir::GlobalLinkageKind::ExternalLinkage; + + case TSK_ImplicitInstantiation: + return !astCtx.getLangOpts().AppleKext + ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + + case TSK_ExplicitInstantiationDefinition: + return !astCtx.getLangOpts().AppleKext + ? mlir::cir::GlobalLinkageKind::WeakODRLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + + case TSK_ExplicitInstantiationDeclaration: + llvm_unreachable("Should not have been asked to emit this"); + } + } + + // -fapple-kext mode does not support weak linkage, so we must use + // internal linkage. + if (astCtx.getLangOpts().AppleKext) + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + auto DiscardableODRLinkage = mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; + auto NonDiscardableODRLinkage = mlir::cir::GlobalLinkageKind::WeakODRLinkage; + if (RD->hasAttr()) { + // Cannot discard exported vtables. + DiscardableODRLinkage = NonDiscardableODRLinkage; + } else if (RD->hasAttr()) { + // Imported vtables are available externally. + DiscardableODRLinkage = + mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + NonDiscardableODRLinkage = + mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + } + + switch (RD->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + case TSK_ImplicitInstantiation: + return DiscardableODRLinkage; + + case TSK_ExplicitInstantiationDeclaration: { + // Explicit instantiations in MSVC do not provide vtables, so we must emit + // our own. + if (getTarget().getCXXABI().isMicrosoft()) + return DiscardableODRLinkage; + auto r = shouldEmitAvailableExternallyVTable(*this, RD) + ? mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage + : mlir::cir::GlobalLinkageKind::ExternalLinkage; + assert(r == mlir::cir::GlobalLinkageKind::ExternalLinkage && + "available external NYI"); + return r; + } + + case TSK_ExplicitInstantiationDefinition: + return NonDiscardableODRLinkage; + } + + llvm_unreachable("Invalid TemplateSpecializationKind!"); +} + +mlir::cir::GlobalOp +getAddrOfVTTVTable(CIRGenVTables &CGVT, CIRGenModule &CGM, + const CXXRecordDecl *MostDerivedClass, + const VTTVTable &vtable, + mlir::cir::GlobalLinkageKind linkage, + VTableLayout::AddressPointsMapTy &addressPoints) { + if (vtable.getBase() == MostDerivedClass) { + assert(vtable.getBaseOffset().isZero() && + "Most derived class vtable must have a zero offset!"); + // This is a regular vtable. + return CGM.getCXXABI().getAddrOfVTable(MostDerivedClass, CharUnits()); + } + + llvm_unreachable("generateConstructionVTable NYI"); +} + +mlir::cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) { + assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT"); + + SmallString<256> OutName; + llvm::raw_svector_ostream Out(OutName); + cast(CGM.getCXXABI().getMangleContext()) + .mangleCXXVTT(RD, Out); + StringRef Name = OutName.str(); + + // This will also defer the definition of the VTT. + (void)CGM.getCXXABI().getAddrOfVTable(RD, CharUnits()); + + VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/false); + + auto ArrayType = mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getUInt8PtrTy(), + Builder.getVTTComponents().size()); + auto Align = + CGM.getDataLayout().getABITypeAlign(CGM.getBuilder().getUInt8PtrTy()); + auto VTT = CGM.createOrReplaceCXXRuntimeVariable( + CGM.getLoc(RD->getSourceRange()), Name, ArrayType, + mlir::cir::GlobalLinkageKind::ExternalLinkage, + CharUnits::fromQuantity(Align)); + CGM.setGVProperties(VTT, RD); + return VTT; +} + +/// Emit the definition of the given vtable. +void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, + mlir::cir::GlobalLinkageKind Linkage, + const CXXRecordDecl *RD) { + VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/true); + + auto ArrayType = mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getUInt8PtrTy(), + Builder.getVTTComponents().size()); + + SmallVector VTables; + SmallVector VTableAddressPoints; + for (const VTTVTable *i = Builder.getVTTVTables().begin(), + *e = Builder.getVTTVTables().end(); + i != e; ++i) { + VTableAddressPoints.push_back(VTableAddressPointsMapTy()); + VTables.push_back(getAddrOfVTTVTable(*this, CGM, RD, *i, Linkage, + VTableAddressPoints.back())); + } + + SmallVector VTTComponents; + for (const VTTComponent *i = Builder.getVTTComponents().begin(), + *e = Builder.getVTTComponents().end(); + i != e; ++i) { + const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex]; + mlir::cir::GlobalOp VTable = VTables[i->VTableIndex]; + VTableLayout::AddressPointLocation AddressPoint; + if (VTTVT.getBase() == RD) { + // Just get the address point for the regular vtable. + AddressPoint = + getItaniumVTableContext().getVTableLayout(RD).getAddressPoint( + i->VTableBase); + } else { + AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase); + assert(AddressPoint.AddressPointIndex != 0 && + "Did not find ctor vtable address point!"); + } + + mlir::Attribute Idxs[3] = { + CGM.getBuilder().getI32IntegerAttr(0), + CGM.getBuilder().getI32IntegerAttr(AddressPoint.VTableIndex), + CGM.getBuilder().getI32IntegerAttr(AddressPoint.AddressPointIndex), + }; + + auto Indices = mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Idxs); + auto Init = CGM.getBuilder().getGlobalViewAttr( + CGM.getBuilder().getUInt8PtrTy(), VTable, Indices); + + VTTComponents.push_back(Init); + } + + auto Init = CGM.getBuilder().getConstArray( + mlir::ArrayAttr::get(CGM.getBuilder().getContext(), VTTComponents), + ArrayType); + + VTT.setInitialValueAttr(Init); + + // Set the correct linkage. + VTT.setLinkage(Linkage); + mlir::SymbolTable::setSymbolVisibility(VTT, + CIRGenModule::getMLIRVisibility(VTT)); + + if (CGM.supportsCOMDAT() && VTT.isWeakForLinker()) { + assert(!UnimplementedFeature::setComdat()); + } +} + +void CIRGenVTables::buildThunks(GlobalDecl GD) { + const CXXMethodDecl *MD = + cast(GD.getDecl())->getCanonicalDecl(); + + // We don't need to generate thunks for the base destructor. + if (isa(MD) && GD.getDtorType() == Dtor_Base) + return; + + const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector = + VTContext->getThunkInfo(GD); + + if (!ThunkInfoVector) + return; + + for ([[maybe_unused]] const ThunkInfo &Thunk : *ThunkInfoVector) + llvm_unreachable("NYI"); +} + +bool CIRGenModule::AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD) { + if (RD->hasAttr() || RD->hasAttr() || + RD->hasAttr() || RD->hasAttr()) + return true; + + if (!getCodeGenOpts().LTOVisibilityPublicStd) + return false; + + const DeclContext *DC = RD; + while (true) { + auto *D = cast(DC); + DC = DC->getParent(); + if (isa(DC->getRedeclContext())) { + if (auto *ND = dyn_cast(D)) + if (const IdentifierInfo *II = ND->getIdentifier()) + if (II->isStr("std") || II->isStr("stdext")) + return true; + break; + } + } + + return false; +} + +bool CIRGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) { + LinkageInfo LV = RD->getLinkageAndVisibility(); + if (!isExternallyVisible(LV.getLinkage())) + return true; + + if (!getTriple().isOSBinFormatCOFF() && + LV.getVisibility() != HiddenVisibility) + return false; + + return !AlwaysHasLTOVisibilityPublic(RD); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h new file mode 100644 index 000000000000..e92f60394270 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -0,0 +1,178 @@ +//===--- CIRGenVTables.h - Emit LLVM Code for C++ vtables -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation of virtual tables. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H + +#include "ConstantInitBuilder.h" +#include "clang/AST/BaseSubobject.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/VTableBuilder.h" +#include "clang/Basic/ABI.h" +#include "llvm/ADT/DenseMap.h" + +namespace clang { +class CXXRecordDecl; +} + +namespace cir { +class CIRGenModule; +// class ConstantArrayBuilder; +// class ConstantStructBuilder; + +class CIRGenVTables { + CIRGenModule &CGM; + + clang::VTableContextBase *VTContext; + + /// VTableAddressPointsMapTy - Address points for a single vtable. + typedef clang::VTableLayout::AddressPointsMapTy VTableAddressPointsMapTy; + + typedef std::pair + BaseSubobjectPairTy; + typedef llvm::DenseMap SubVTTIndiciesMapTy; + + /// SubVTTIndicies - Contains indices into the various sub-VTTs. + SubVTTIndiciesMapTy SubVTTIndicies; + + typedef llvm::DenseMap + SecondaryVirtualPointerIndicesMapTy; + + /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer + /// indices. + SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices; + + // /// Cache for the pure virtual member call function. + // llvm::Constant *PureVirtualFn = nullptr; + + // /// Cache for the deleted virtual member call function. + // llvm::Constant *DeletedVirtualFn = nullptr; + + // /// Get the address of a thunk and emit it if necessary. + // llvm::Constant *maybeEmitThunk(GlobalDecl GD, + // const ThunkInfo &ThunkAdjustments, + // bool ForVTable); + + void addVTableComponent(ConstantArrayBuilder &builder, + const VTableLayout &layout, unsigned componentIndex, + mlir::Attribute rtti, unsigned &nextVTableThunkIndex, + unsigned vtableAddressPoint, + bool vtableHasLocalLinkage); + + // /// Add a 32-bit offset to a component relative to the vtable when using + // the + // /// relative vtables ABI. The array builder points to the start of the + // vtable. void addRelativeComponent(ConstantArrayBuilder &builder, + // llvm::Constant *component, + // unsigned vtableAddressPoint, + // bool vtableHasLocalLinkage, + // bool isCompleteDtor) const; + + // /// Create a dso_local stub that will be used for a relative reference in + // the + // /// relative vtable layout. This stub will just be a tail call to the + // original + // /// function and propagate any function attributes from the original. If + // the + // /// original function is already dso_local, the original is returned + // instead + // /// and a stub is not created. + // llvm::Function * + // getOrCreateRelativeStub(llvm::Function *func, + // llvm::GlobalValue::LinkageTypes stubLinkage, + // bool isCompleteDtor) const; + + bool useRelativeLayout() const; + + mlir::Type getVTableComponentType(); + +public: + /// Add vtable components for the given vtable layout to the given + /// global initializer. + void createVTableInitializer(ConstantStructBuilder &builder, + const VTableLayout &layout, mlir::Attribute rtti, + bool vtableHasLocalLinkage); + + CIRGenVTables(CIRGenModule &CGM); + + clang::ItaniumVTableContext &getItaniumVTableContext() { + return *llvm::cast(VTContext); + } + + const clang::ItaniumVTableContext &getItaniumVTableContext() const { + return *llvm::cast(VTContext); + } + + // MicrosoftVTableContext &getMicrosoftVTableContext() { + // return *cast(VTContext); + // } + + // /// getSubVTTIndex - Return the index of the sub-VTT for the base class + // of the + // /// given record decl. + // uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base); + + // /// getSecondaryVirtualPointerIndex - Return the index in the VTT where + // the + // /// virtual pointer for the given subobject is located. + // uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, + // BaseSubobject Base); + + // /// GenerateConstructionVTable - Generate a construction vtable for the + // given + // /// base subobject. + // llvm::GlobalVariable * + // GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject + // &Base, + // bool BaseIsVirtual, + // llvm::GlobalVariable::LinkageTypes Linkage, + // VTableAddressPointsMapTy &AddressPoints); + + /// Get the address of the VTT for the given record decl. + mlir::cir::GlobalOp getAddrOfVTT(const CXXRecordDecl *RD); + + /// Emit the definition of the given vtable. + void buildVTTDefinition(mlir::cir::GlobalOp VTT, + mlir::cir::GlobalLinkageKind Linkage, + const CXXRecordDecl *RD); + + /// Emit the associated thunks for the given global decl. + void buildThunks(GlobalDecl GD); + + /// Generate all the class data required to be generated upon definition of a + /// KeyFunction. This includes the vtable, the RTTI data structure (if RTTI + /// is enabled) and the VTT (if the class has virtual bases). + void GenerateClassData(const clang::CXXRecordDecl *RD); + + bool isVTableExternal(const clang::CXXRecordDecl *RD); + + /// Returns the type of a vtable with the given layout. Normally a struct of + /// arrays of pointers, with one struct element for each vtable in the vtable + /// group. + mlir::Type getVTableType(const clang::VTableLayout &layout); + + // /// Generate a public facing alias for the vtable and make the vtable + // either + // /// hidden or private. The alias will have the original linkage and + // visibility + // /// of the vtable. This is used for cases under the relative vtables ABI + // /// when a vtable may not be dso_local. + // void GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable, + // llvm::StringRef AliasNameRef); + + // /// Specify a global should not be instrumented with hwasan. + // void RemoveHwasanMetadata(llvm::GlobalValue *GV) const; +}; + +} // end namespace cir +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h new file mode 100644 index 000000000000..4862d32df245 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -0,0 +1,514 @@ +//===-- CIRGenValue.h - CIRGen wrappers for mlir::Value ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes implement wrappers around mlir::Value in order to fully +// represent the range of values for C L- and R- values. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENVALUE_H +#define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H + +#include "Address.h" +#include "CIRGenRecordLayout.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/Type.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "llvm/ADT/PointerIntPair.h" + +#include "mlir/IR/Value.h" + +namespace cir { + +/// This trivial value class is used to represent the result of an +/// expression that is evaluated. It can be one of three things: either a +/// simple MLIR SSA value, a pair of SSA values for complex numbers, or the +/// address of an aggregate value in memory. +class RValue { + enum Flavor { Scalar, Complex, Aggregate }; + + // The shift to make to an aggregate's alignment to make it look + // like a pointer. + enum { AggAlignShift = 4 }; + + // Stores first value and flavor. + llvm::PointerIntPair V1; + // Stores second value and volatility. + llvm::PointerIntPair, 1, bool> V2; + // Stores element type for aggregate values. + mlir::Type ElementType; + +public: + bool isScalar() const { return V1.getInt() == Scalar; } + bool isComplex() const { return V1.getInt() == Complex; } + bool isAggregate() const { return V1.getInt() == Aggregate; } + bool isIgnored() const { return isScalar() && !getScalarVal(); } + + bool isVolatileQualified() const { return V2.getInt(); } + + /// Return the mlir::Value of this scalar value. + mlir::Value getScalarVal() const { + assert(isScalar() && "Not a scalar!"); + return V1.getPointer(); + } + + /// Return the real/imag components of this complex value. + std::pair getComplexVal() const { + assert(0 && "not implemented"); + return {}; + } + + /// Return the mlir::Value of the address of the aggregate. + Address getAggregateAddress() const { + assert(isAggregate() && "Not an aggregate!"); + auto align = reinterpret_cast(V2.getPointer().get()) >> + AggAlignShift; + return Address(V1.getPointer(), ElementType, + clang::CharUnits::fromQuantity(align)); + } + + mlir::Value getAggregatePointer() const { + assert(isAggregate() && "Not an aggregate!"); + return V1.getPointer(); + } + + static RValue getIgnored() { + // FIXME: should we make this a more explicit state? + return get(nullptr); + } + + static RValue get(mlir::Value V) { + RValue ER; + ER.V1.setPointer(V); + ER.V1.setInt(Scalar); + ER.V2.setInt(false); + return ER; + } + static RValue getComplex(mlir::Value V1, mlir::Value V2) { + assert(0 && "not implemented"); + return RValue{}; + } + static RValue getComplex(const std::pair &C) { + assert(0 && "not implemented"); + return RValue{}; + } + // FIXME: Aggregate rvalues need to retain information about whether they are + // volatile or not. Remove default to find all places that probably get this + // wrong. + static RValue getAggregate(Address addr, bool isVolatile = false) { + RValue ER; + ER.V1.setPointer(addr.getPointer()); + ER.V1.setInt(Aggregate); + ER.ElementType = addr.getElementType(); + + auto align = static_cast(addr.getAlignment().getQuantity()); + ER.V2.setPointer(reinterpret_cast(align << AggAlignShift)); + ER.V2.setInt(isVolatile); + return ER; + } +}; + +/// The source of the alignment of an l-value; an expression of +/// confidence in the alignment actually matching the estimate. +enum class AlignmentSource { + /// The l-value was an access to a declared entity or something + /// equivalently strong, like the address of an array allocated by a + /// language runtime. + Decl, + + /// The l-value was considered opaque, so the alignment was + /// determined from a type, but that type was an explicitly-aligned + /// typedef. + AttributedType, + + /// The l-value was considered opaque, so the alignment was + /// determined from a type. + Type +}; + +/// Given that the base address has the given alignment source, what's +/// our confidence in the alignment of the field? +static inline AlignmentSource getFieldAlignmentSource(AlignmentSource Source) { + // For now, we don't distinguish fields of opaque pointers from + // top-level declarations, but maybe we should. + return AlignmentSource::Decl; +} + +class LValueBaseInfo { + AlignmentSource AlignSource; + +public: + explicit LValueBaseInfo(AlignmentSource Source = AlignmentSource::Type) + : AlignSource(Source) {} + AlignmentSource getAlignmentSource() const { return AlignSource; } + void setAlignmentSource(AlignmentSource Source) { AlignSource = Source; } + + void mergeForCast(const LValueBaseInfo &Info) { + setAlignmentSource(Info.getAlignmentSource()); + } +}; + +class LValue { + enum { + Simple, // This is a normal l-value, use getAddress(). + VectorElt, // This is a vector element l-value (V[i]), use getVector* + BitField, // This is a bitfield l-value, use getBitfield*. + ExtVectorElt, // This is an extended vector subset, use getExtVectorComp + GlobalReg, // This is a register l-value, use getGlobalReg() + MatrixElt // This is a matrix element, use getVector* + } LVType; + clang::QualType Type; + clang::Qualifiers Quals; + + // LValue is non-gc'able for any reason, including being a parameter or local + // variable. + bool NonGC : 1; + + // This flag shows if a nontemporal load/stores should be used when accessing + // this lvalue. + bool Nontemporal : 1; + +private: + void Initialize(clang::QualType Type, clang::Qualifiers Quals, + clang::CharUnits Alignment, LValueBaseInfo BaseInfo) { + assert((!Alignment.isZero() || Type->isIncompleteType()) && + "initializing l-value with zero alignment!"); + if (isGlobalReg()) + assert(ElementType == nullptr && "Global reg does not store elem type"); + + this->Type = Type; + this->Quals = Quals; + // This flag shows if a nontemporal load/stores should be used when + // accessing this lvalue. + const unsigned MaxAlign = 1U << 31; + this->Alignment = Alignment.getQuantity() <= MaxAlign + ? Alignment.getQuantity() + : MaxAlign; + assert(this->Alignment == Alignment.getQuantity() && + "Alignment exceeds allowed max!"); + this->BaseInfo = BaseInfo; + + // TODO: ObjC flags + // Initialize Objective-C flags. + this->NonGC = false; + this->Nontemporal = false; + } + + // The alignment to use when accessing this lvalue. (For vector elements, + // this is the alignment of the whole vector) + unsigned Alignment; + mlir::Value V; + mlir::Type ElementType; + mlir::Value VectorIdx; // Index for vector subscript + LValueBaseInfo BaseInfo; + const CIRGenBitFieldInfo *BitFieldInfo{0}; + +public: + bool isSimple() const { return LVType == Simple; } + bool isVectorElt() const { return LVType == VectorElt; } + bool isBitField() const { return LVType == BitField; } + bool isExtVectorElt() const { return LVType == ExtVectorElt; } + bool isGlobalReg() const { return LVType == GlobalReg; } + bool isMatrixElt() const { return LVType == MatrixElt; } + + bool isVolatileQualified() const { return Quals.hasVolatile(); } + + unsigned getVRQualifiers() const { + return Quals.getCVRQualifiers() & ~clang::Qualifiers::Const; + } + + bool isNonGC() const { return NonGC; } + void setNonGC(bool Value) { NonGC = Value; } + + bool isNontemporal() const { return Nontemporal; } + + bool isObjCWeak() const { + return Quals.getObjCGCAttr() == clang::Qualifiers::Weak; + } + bool isObjCStrong() const { + return Quals.getObjCGCAttr() == clang::Qualifiers::Strong; + } + + bool isVolatile() const { return Quals.hasVolatile(); } + + clang::QualType getType() const { return Type; } + + mlir::Value getPointer() const { return V; } + + clang::CharUnits getAlignment() const { + return clang::CharUnits::fromQuantity(Alignment); + } + void setAlignment(clang::CharUnits A) { Alignment = A.getQuantity(); } + + Address getAddress() const { + return Address(getPointer(), ElementType, getAlignment()); + } + + void setAddress(Address address) { + assert(isSimple()); + V = address.getPointer(); + ElementType = address.getElementType(); + Alignment = address.getAlignment().getQuantity(); + // TODO(cir): IsKnownNonNull = address.isKnownNonNull(); + } + + LValueBaseInfo getBaseInfo() const { return BaseInfo; } + void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } + + static LValue makeAddr(Address address, clang::QualType T, + AlignmentSource Source = AlignmentSource::Type) { + LValue R; + R.LVType = Simple; + R.V = address.getPointer(); + R.ElementType = address.getElementType(); + R.Initialize(T, T.getQualifiers(), address.getAlignment(), + LValueBaseInfo(Source)); + return R; + } + + // FIXME: only have one of these static methods. + static LValue makeAddr(Address address, clang::QualType T, + LValueBaseInfo LBI) { + LValue R; + R.LVType = Simple; + R.V = address.getPointer(); + R.ElementType = address.getElementType(); + R.Initialize(T, T.getQualifiers(), address.getAlignment(), LBI); + return R; + } + + static LValue makeAddr(Address address, clang::QualType type, + clang::ASTContext &Context, LValueBaseInfo BaseInfo) { + clang::Qualifiers qs = type.getQualifiers(); + qs.setObjCGCAttr(Context.getObjCGCAttrKind(type)); + + LValue R; + R.LVType = Simple; + assert(address.getPointer().getType().cast()); + R.V = address.getPointer(); + R.ElementType = address.getElementType(); + R.Initialize(type, qs, address.getAlignment(), + BaseInfo); // TODO: TBAAInfo); + return R; + } + + const clang::Qualifiers &getQuals() const { return Quals; } + clang::Qualifiers &getQuals() { return Quals; } + + // vector element lvalue + Address getVectorAddress() const { + return Address(getVectorPointer(), ElementType, getAlignment()); + } + mlir::Value getVectorPointer() const { + assert(isVectorElt()); + return V; + } + mlir::Value getVectorIdx() const { + assert(isVectorElt()); + return VectorIdx; + } + + static LValue MakeVectorElt(Address vecAddress, mlir::Value Index, + clang::QualType type, LValueBaseInfo BaseInfo) { + LValue R; + R.LVType = VectorElt; + R.V = vecAddress.getPointer(); + R.ElementType = vecAddress.getElementType(); + R.VectorIdx = Index; + R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(), + BaseInfo); + return R; + } + + // bitfield lvalue + Address getBitFieldAddress() const { + return Address(getBitFieldPointer(), ElementType, getAlignment()); + } + + mlir::Value getBitFieldPointer() const { + assert(isBitField()); + return V; + } + + const CIRGenBitFieldInfo &getBitFieldInfo() const { + assert(isBitField()); + return *BitFieldInfo; + } + + /// Create a new object to represent a bit-field access. + /// + /// \param Addr - The base address of the bit-field sequence this + /// bit-field refers to. + /// \param Info - The information describing how to perform the bit-field + /// access. + static LValue MakeBitfield(Address Addr, const CIRGenBitFieldInfo &Info, + clang::QualType type, LValueBaseInfo BaseInfo) { + LValue R; + R.LVType = BitField; + R.V = Addr.getPointer(); + R.ElementType = Addr.getElementType(); + R.BitFieldInfo = &Info; + R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo); + return R; + } +}; + +/// An aggregate value slot. +class AggValueSlot { + /// The address. + Address Addr; + + // Qualifiers + clang::Qualifiers Quals; + + /// This is set to true if some external code is responsible for setting up a + /// destructor for the slot. Otherwise the code which constructs it should + /// push the appropriate cleanup. + bool DestructedFlag : 1; + + /// This is set to true if writing to the memory in the slot might require + /// calling an appropriate Objective-C GC barrier. The exact interaction here + /// is unnecessarily mysterious. + bool ObjCGCFlag : 1; + + /// This is set to true if the memory in the slot is known to be zero before + /// the assignment into it. This means that zero fields don't need to be set. + bool ZeroedFlag : 1; + + /// This is set to true if the slot might be aliased and it's not undefined + /// behavior to access it through such an alias. Note that it's always + /// undefined behavior to access a C++ object that's under construction + /// through an alias derived from outside the construction process. + /// + /// This flag controls whether calls that produce the aggregate + /// value may be evaluated directly into the slot, or whether they + /// must be evaluated into an unaliased temporary and then memcpy'ed + /// over. Since it's invalid in general to memcpy a non-POD C++ + /// object, it's important that this flag never be set when + /// evaluating an expression which constructs such an object. + bool AliasedFlag : 1; + + /// This is set to true if the tail padding of this slot might overlap + /// another object that may have already been initialized (and whose + /// value must be preserved by this initialization). If so, we may only + /// store up to the dsize of the type. Otherwise we can widen stores to + /// the size of the type. + bool OverlapFlag : 1; + + /// If is set to true, sanitizer checks are already generated for this address + /// or not required. For instance, if this address represents an object + /// created in 'new' expression, sanitizer checks for memory is made as a part + /// of 'operator new' emission and object constructor should not generate + /// them. + bool SanitizerCheckedFlag : 1; + + AggValueSlot(Address Addr, clang::Qualifiers Quals, bool DestructedFlag, + bool ObjCGCFlag, bool ZeroedFlag, bool AliasedFlag, + bool OverlapFlag, bool SanitizerCheckedFlag) + : Addr(Addr), Quals(Quals), DestructedFlag(DestructedFlag), + ObjCGCFlag(ObjCGCFlag), ZeroedFlag(ZeroedFlag), + AliasedFlag(AliasedFlag), OverlapFlag(OverlapFlag), + SanitizerCheckedFlag(SanitizerCheckedFlag) {} + +public: + enum IsAliased_t { IsNotAliased, IsAliased }; + enum IsDestructed_t { IsNotDestructed, IsDestructed }; + enum IsZeroed_t { IsNotZeroed, IsZeroed }; + enum Overlap_t { DoesNotOverlap, MayOverlap }; + enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers }; + enum IsSanitizerChecked_t { IsNotSanitizerChecked, IsSanitizerChecked }; + + /// ignored - Returns an aggregate value slot indicating that the aggregate + /// value is being ignored. + static AggValueSlot ignored() { + return forAddr(Address::invalid(), clang::Qualifiers(), IsNotDestructed, + DoesNotNeedGCBarriers, IsNotAliased, DoesNotOverlap); + } + + /// forAddr - Make a slot for an aggregate value. + /// + /// \param quals - The qualifiers that dictate how the slot should be + /// initialized. Only 'volatile' and the Objective-C lifetime qualifiers + /// matter. + /// + /// \param isDestructed - true if something else is responsible for calling + /// destructors on this object + /// \param needsGC - true fi the slot is potentially located somewhere that + /// ObjC GC calls should be emitted for + static AggValueSlot + forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, + NeedsGCBarriers_t needsGC, IsAliased_t isAliased, + Overlap_t mayOverlap, IsZeroed_t isZeroed = IsNotZeroed, + IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) { + return AggValueSlot(addr, quals, isDestructed, needsGC, isZeroed, isAliased, + mayOverlap, isChecked); + } + + static AggValueSlot + forLValue(const LValue &LV, IsDestructed_t isDestructed, + NeedsGCBarriers_t needsGC, IsAliased_t isAliased, + Overlap_t mayOverlap, IsZeroed_t isZeroed = IsNotZeroed, + IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) { + return forAddr(LV.getAddress(), LV.getQuals(), isDestructed, needsGC, + isAliased, mayOverlap, isZeroed, isChecked); + } + + IsDestructed_t isExternallyDestructed() const { + return IsDestructed_t(DestructedFlag); + } + void setExternallyDestructed(bool destructed = true) { + DestructedFlag = destructed; + } + + clang::Qualifiers getQualifiers() const { return Quals; } + + bool isVolatile() const { return Quals.hasVolatile(); } + + Address getAddress() const { return Addr; } + + bool isIgnored() const { return !Addr.isValid(); } + + mlir::Value getPointer() const { return Addr.getPointer(); } + + Overlap_t mayOverlap() const { return Overlap_t(OverlapFlag); } + + bool isSanitizerChecked() const { return SanitizerCheckedFlag; } + + IsZeroed_t isZeroed() const { return IsZeroed_t(ZeroedFlag); } + void setZeroed(bool V = true) { ZeroedFlag = V; } + + NeedsGCBarriers_t requiresGCollection() const { + return NeedsGCBarriers_t(ObjCGCFlag); + } + + IsAliased_t isPotentiallyAliased() const { return IsAliased_t(AliasedFlag); } + + RValue asRValue() const { + if (isIgnored()) { + return RValue::getIgnored(); + } else { + return RValue::getAggregate(getAddress(), isVolatile()); + } + } + + /// Get the preferred size to use when storing a value to this slot. This + /// is the type size unless that might overlap another object, in which + /// case it's the dsize. + clang::CharUnits getPreferredSize(clang::ASTContext &Ctx, + clang::QualType Type) { + return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).Width + : Ctx.getTypeSizeInChars(Type); + } +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp new file mode 100644 index 000000000000..4d6a6c6c5d84 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -0,0 +1,192 @@ +//===--- CIRGenerator.cpp - Emit CIR from ASTs ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This builds an AST and converts it to CIR. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenModule.h" + +#include "mlir/Dialect/DLTI/DLTI.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Target/LLVMIR/Import.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +using namespace cir; +using namespace clang; + +void CIRGenerator::anchor() {} + +CIRGenerator::CIRGenerator(clang::DiagnosticsEngine &diags, + llvm::IntrusiveRefCntPtr vfs, + const CodeGenOptions &CGO) + : Diags(diags), fs(std::move(vfs)), codeGenOpts{CGO}, + HandlingTopLevelDecls(0) {} +CIRGenerator::~CIRGenerator() { + // There should normally not be any leftover inline method definitions. + assert(DeferredInlineMemberFuncDefs.empty() || Diags.hasErrorOccurred()); +} + +static void setMLIRDataLayout(mlir::ModuleOp &mod, const llvm::DataLayout &dl) { + auto *context = mod.getContext(); + mod->setAttr(mlir::LLVM::LLVMDialect::getDataLayoutAttrName(), + mlir::StringAttr::get(context, dl.getStringRepresentation())); + mlir::DataLayoutSpecInterface dlSpec = mlir::translateDataLayout(dl, context); + mod->setAttr(mlir::DLTIDialect::kDataLayoutAttrName, dlSpec); +} + +void CIRGenerator::Initialize(ASTContext &astCtx) { + using namespace llvm; + + this->astCtx = &astCtx; + + mlirCtx = std::make_unique(); + mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); + CGM = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts, + Diags); + auto mod = CGM->getModule(); + auto layout = llvm::DataLayout(astCtx.getTargetInfo().getDataLayoutString()); + setMLIRDataLayout(mod, layout); +} + +bool CIRGenerator::verifyModule() { return CGM->verifyModule(); } + +bool CIRGenerator::EmitFunction(const FunctionDecl *FD) { + llvm_unreachable("NYI"); +} + +mlir::ModuleOp CIRGenerator::getModule() { return CGM->getModule(); } + +bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef D) { + if (Diags.hasErrorOccurred()) + return true; + + HandlingTopLevelDeclRAII HandlingDecl(*this); + + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { + CGM->buildTopLevelDecl(*I); + } + + return true; +} + +void CIRGenerator::HandleTranslationUnit(ASTContext &C) { + // Release the Builder when there is no error. + if (!Diags.hasErrorOccurred() && CGM) + CGM->Release(); + + // If there are errors before or when releasing the CGM, reset the module to + // stop here before invoking the backend. + if (Diags.hasErrorOccurred()) { + if (CGM) + // TODO: CGM->clear(); + // TODO: M.reset(); + return; + } +} + +void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { + if (Diags.hasErrorOccurred()) + return; + + assert(D->doesThisDeclarationHaveABody()); + + // We may want to emit this definition. However, that decision might be + // based on computing the linkage, and we have to defer that in case we are + // inside of something that will chagne the method's final linkage, e.g. + // typedef struct { + // void bar(); + // void foo() { bar(); } + // } A; + DeferredInlineMemberFuncDefs.push_back(D); + + // Provide some coverage mapping even for methods that aren't emitted. + // Don't do this for templated classes though, as they may not be + // instantiable. + if (!D->getLexicalDeclContext()->isDependentContext()) + CGM->AddDeferredUnusedCoverageMapping(D); +} + +void CIRGenerator::buildDefaultMethods() { CGM->buildDefaultMethods(); } + +void CIRGenerator::buildDeferredDecls() { + if (DeferredInlineMemberFuncDefs.empty()) + return; + + // Emit any deferred inline method definitions. Note that more deferred + // methods may be added during this loop, since ASTConsumer callbacks can be + // invoked if AST inspection results in declarations being added. + HandlingTopLevelDeclRAII HandlingDecls(*this); + for (unsigned I = 0; I != DeferredInlineMemberFuncDefs.size(); ++I) + CGM->buildTopLevelDecl(DeferredInlineMemberFuncDefs[I]); + DeferredInlineMemberFuncDefs.clear(); +} + +/// HandleTagDeclDefinition - This callback is invoked each time a TagDecl to +/// (e.g. struct, union, enum, class) is completed. This allows the client hack +/// on the type, which can occur at any point in the file (because these can be +/// defined in declspecs). +void CIRGenerator::HandleTagDeclDefinition(TagDecl *D) { + if (Diags.hasErrorOccurred()) + return; + + // Don't allow re-entrant calls to CIRGen triggered by PCH deserialization to + // emit deferred decls. + HandlingTopLevelDeclRAII HandlingDecl(*this, /*EmitDeferred=*/false); + + CGM->UpdateCompletedType(D); + + // For MSVC compatibility, treat declarations of static data members with + // inline initializers as definitions. + if (astCtx->getTargetInfo().getCXXABI().isMicrosoft()) { + llvm_unreachable("NYI"); + } + // For OpenMP emit declare reduction functions, if required. + if (astCtx->getLangOpts().OpenMP) { + llvm_unreachable("NYI"); + } +} + +void CIRGenerator::HandleTagDeclRequiredDefinition(const TagDecl *D) { + if (Diags.hasErrorOccurred()) + return; + + // Don't allow re-entrant calls to CIRGen triggered by PCH deserialization to + // emit deferred decls. + HandlingTopLevelDeclRAII HandlingDecl(*this, /*EmitDeferred=*/false); + + if (CGM->getModuleDebugInfo()) + llvm_unreachable("NYI"); +} + +void CIRGenerator::HandleCXXStaticMemberVarInstantiation(VarDecl *D) { + if (Diags.hasErrorOccurred()) + return; + + CGM->HandleCXXStaticMemberVarInstantiation(D); +} + +void CIRGenerator::CompleteTentativeDefinition(VarDecl *D) { + if (Diags.hasErrorOccurred()) + return; + + CGM->buildTentativeDefinition(D); +} diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp new file mode 100644 index 000000000000..7819d6db21ea --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -0,0 +1,88 @@ +//====- CIRPasses.cpp - Lowering from CIR to LLVM -------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements machinery for any CIR <-> CIR passes used by clang. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/CIR/Dialect/Passes.h" + +#include "mlir/IR/BuiltinOps.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Support/LogicalResult.h" + +namespace cir { +mlir::LogicalResult +runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, + bool enableLifetime, llvm::StringRef lifetimeOpts, + bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, + std::string &passOptParsingFailure, bool flattenCIR) { + mlir::PassManager pm(mlirCtx); + pm.addPass(mlir::createMergeCleanupsPass()); + + // TODO(CIR): Make this actually propagate errors correctly. This is stubbed + // in to get rebases going. + auto errorHandler = [](const llvm::Twine &) -> mlir::LogicalResult { + return mlir::LogicalResult::failure(); + }; + + if (enableLifetime) { + auto lifetimePass = mlir::createLifetimeCheckPass(&astCtx); + if (lifetimePass->initializeOptions(lifetimeOpts, errorHandler).failed()) { + passOptParsingFailure = lifetimeOpts; + return mlir::failure(); + } + pm.addPass(std::move(lifetimePass)); + } + + if (enableIdiomRecognizer) { + auto idiomPass = mlir::createIdiomRecognizerPass(&astCtx); + if (idiomPass->initializeOptions(idiomRecognizerOpts, errorHandler) + .failed()) { + passOptParsingFailure = idiomRecognizerOpts; + return mlir::failure(); + } + pm.addPass(std::move(idiomPass)); + } + + if (enableLibOpt) { + auto libOpPass = mlir::createLibOptPass(&astCtx); + if (libOpPass->initializeOptions(libOptOpts, errorHandler).failed()) { + passOptParsingFailure = libOptOpts; + return mlir::failure(); + } + pm.addPass(std::move(libOpPass)); + } + + pm.addPass(mlir::createLoweringPreparePass(&astCtx)); + if (flattenCIR) + mlir::populateCIRPreLoweringPasses(pm); + + // FIXME: once CIRCodenAction fixes emission other than CIR we + // need to run this right before dialect emission. + pm.addPass(mlir::createDropASTPass()); + pm.enableVerifier(enableVerifier); + (void)mlir::applyPassManagerCLOptions(pm); + return pm.run(theModule); +} + +} // namespace cir + +namespace mlir { + +void populateCIRPreLoweringPasses(OpPassManager &pm) { + pm.addPass(createFlattenCFGPass()); + // add other passes here +} + +} // namespace mlir diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp new file mode 100644 index 000000000000..485b89f75125 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -0,0 +1,750 @@ + +#include "CIRDataLayout.h" +#include "CIRGenBuilder.h" +#include "CIRGenModule.h" +#include "CIRGenTypes.h" + +#include "mlir/IR/BuiltinTypes.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/RecordLayout.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" + +#include + +using namespace llvm; +using namespace clang; +using namespace cir; + +namespace { +/// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to a +/// mlir::Type. Some of the lowering is straightforward, some is not. TODO: Here +/// we detail some of the complexities and weirdnesses? +struct CIRRecordLowering final { + + // MemberInfo is a helper structure that contains information about a record + // member. In addition to the standard member types, there exists a sentinel + // member type that ensures correct rounding. + struct MemberInfo final { + CharUnits offset; + enum class InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } kind; + mlir::Type data; + union { + const FieldDecl *fieldDecl; + const CXXRecordDecl *cxxRecordDecl; + }; + MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data, + const FieldDecl *fieldDecl = nullptr) + : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}; + MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data, + const CXXRecordDecl *RD) + : offset{offset}, kind{kind}, data{data}, cxxRecordDecl{RD} {} + // MemberInfos are sorted so we define a < operator. + bool operator<(const MemberInfo &other) const { + return offset < other.offset; + } + }; + // The constructor. + CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl, + bool isPacked); + + /// ---------------------- + /// Short helper routines. + + /// Constructs a MemberInfo instance from an offset and mlir::Type. + MemberInfo StorageInfo(CharUnits Offset, mlir::Type Data) { + return MemberInfo(Offset, MemberInfo::InfoKind::Field, Data); + } + + // Layout routines. + void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, + mlir::Type StorageType); + + void lower(bool nonVirtualBaseType); + void lowerUnion(); + + /// Determines if we need a packed llvm struct. + void determinePacked(bool NVBaseType); + + void computeVolatileBitfields(); + void accumulateBases(); + void accumulateVPtrs(); + void accumulateVBases(); + void accumulateFields(); + void accumulateBitFields(RecordDecl::field_iterator Field, + RecordDecl::field_iterator FieldEnd); + + mlir::Type getVFPtrType(); + + // Helper function to check if we are targeting AAPCS. + bool isAAPCS() const { + return astContext.getTargetInfo().getABI().starts_with("aapcs"); + } + + /// Helper function to check if the target machine is BigEndian. + bool isBE() const { return astContext.getTargetInfo().isBigEndian(); } + + /// The Microsoft bitfield layout rule allocates discrete storage + /// units of the field's formal type and only combines adjacent + /// fields of the same formal type. We want to emit a layout with + /// these discrete storage units instead of combining them into a + /// continuous run. + bool isDiscreteBitFieldABI() { + return astContext.getTargetInfo().getCXXABI().isMicrosoft() || + recordDecl->isMsStruct(astContext); + } + + // The Itanium base layout rule allows virtual bases to overlap + // other bases, which complicates layout in specific ways. + // + // Note specifically that the ms_struct attribute doesn't change this. + bool isOverlappingVBaseABI() { + return !astContext.getTargetInfo().getCXXABI().isMicrosoft(); + } + // Recursively searches all of the bases to find out if a vbase is + // not the primary vbase of some base class. + bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query); + + CharUnits bitsToCharUnits(uint64_t bitOffset) { + return astContext.toCharUnitsFromBits(bitOffset); + } + + void calculateZeroInit(); + + CharUnits getSize(mlir::Type Ty) { + return CharUnits::fromQuantity(dataLayout.layout.getTypeSize(Ty)); + } + CharUnits getSizeInBits(mlir::Type Ty) { + return CharUnits::fromQuantity(dataLayout.layout.getTypeSizeInBits(Ty)); + } + CharUnits getAlignment(mlir::Type Ty) { + return CharUnits::fromQuantity(dataLayout.layout.getTypeABIAlignment(Ty)); + } + bool isZeroInitializable(const FieldDecl *FD) { + return cirGenTypes.isZeroInitializable(FD->getType()); + } + bool isZeroInitializable(const RecordDecl *RD) { + return cirGenTypes.isZeroInitializable(RD); + } + + mlir::Type getCharType() { + return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), + astContext.getCharWidth(), + /*isSigned=*/false); + } + + /// Wraps mlir::cir::IntType with some implicit arguments. + mlir::Type getUIntNType(uint64_t NumBits) { + unsigned AlignedBits = llvm::PowerOf2Ceil(NumBits); + AlignedBits = std::max(8u, AlignedBits); + return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, + /*isSigned=*/false); + } + + mlir::Type getByteArrayType(CharUnits numberOfChars) { + assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed."); + mlir::Type type = getCharType(); + return numberOfChars == CharUnits::One() + ? type + : mlir::cir::ArrayType::get(type.getContext(), type, + numberOfChars.getQuantity()); + } + + // This is different from LLVM traditional codegen because CIRGen uses arrays + // of bytes instead of arbitrary-sized integers. This is important for packed + // structures support. + mlir::Type getBitfieldStorageType(unsigned numBits) { + unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth()); + if (mlir::cir::IntType::isValidPrimitiveIntBitwidth(alignedBits)) { + return builder.getUIntNTy(alignedBits); + } else { + mlir::Type type = getCharType(); + return mlir::cir::ArrayType::get(type.getContext(), type, + alignedBits / astContext.getCharWidth()); + } + } + + // Gets the llvm Basesubobject type from a CXXRecordDecl. + mlir::Type getStorageType(const CXXRecordDecl *RD) { + return cirGenTypes.getCIRGenRecordLayout(RD).getBaseSubobjectCIRType(); + } + + mlir::Type getStorageType(const FieldDecl *fieldDecl) { + auto type = cirGenTypes.convertTypeForMem(fieldDecl->getType()); + assert(!fieldDecl->isBitField() && "bit fields NYI"); + if (!fieldDecl->isBitField()) + return type; + + // if (isDiscreteBitFieldABI()) + // return type; + + // return getUIntNType(std::min(fielddecl->getBitWidthValue(astContext), + // static_cast(astContext.toBits(getSize(type))))); + llvm_unreachable("getStorageType only supports nonBitFields at this point"); + } + + uint64_t getFieldBitOffset(const FieldDecl *fieldDecl) { + return astRecordLayout.getFieldOffset(fieldDecl->getFieldIndex()); + } + + /// Fills out the structures that are ultimately consumed. + void fillOutputFields(); + + void appendPaddingBytes(CharUnits Size) { + if (!Size.isZero()) + fieldTypes.push_back(getByteArrayType(Size)); + } + + CIRGenTypes &cirGenTypes; + CIRGenBuilderTy &builder; + const ASTContext &astContext; + const RecordDecl *recordDecl; + const CXXRecordDecl *cxxRecordDecl; + const ASTRecordLayout &astRecordLayout; + // Helpful intermediate data-structures + std::vector members; + // Output fields, consumed by CIRGenTypes::computeRecordLayout + llvm::SmallVector fieldTypes; + llvm::DenseMap fields; + llvm::DenseMap bitFields; + llvm::DenseMap nonVirtualBases; + llvm::DenseMap virtualBases; + CIRDataLayout dataLayout; + bool IsZeroInitializable : 1; + bool IsZeroInitializableAsBase : 1; + bool isPacked : 1; + +private: + CIRRecordLowering(const CIRRecordLowering &) = delete; + void operator=(const CIRRecordLowering &) = delete; +}; +} // namespace + +CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, + const RecordDecl *recordDecl, + bool isPacked) + : cirGenTypes{cirGenTypes}, builder{cirGenTypes.getBuilder()}, + astContext{cirGenTypes.getContext()}, recordDecl{recordDecl}, + cxxRecordDecl{llvm::dyn_cast(recordDecl)}, + astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, + dataLayout{cirGenTypes.getModule().getModule()}, + IsZeroInitializable(true), IsZeroInitializableAsBase(true), + isPacked{isPacked} {} + +void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, + CharUnits StartOffset, + mlir::Type StorageType) { + CIRGenBitFieldInfo &Info = bitFields[FD->getCanonicalDecl()]; + Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); + Info.Offset = + (unsigned)(getFieldBitOffset(FD) - astContext.toBits(StartOffset)); + Info.Size = FD->getBitWidthValue(astContext); + Info.StorageSize = getSizeInBits(StorageType).getQuantity(); + Info.StorageOffset = StartOffset; + Info.StorageType = StorageType; + Info.Name = FD->getName(); + + if (Info.Size > Info.StorageSize) + Info.Size = Info.StorageSize; + // Reverse the bit offsets for big endian machines. Because we represent + // a bitfield as a single large integer load, we can imagine the bits + // counting from the most-significant-bit instead of the + // least-significant-bit. + if (dataLayout.isBigEndian()) + Info.Offset = Info.StorageSize - (Info.Offset + Info.Size); + + Info.VolatileStorageSize = 0; + Info.VolatileOffset = 0; + Info.VolatileStorageOffset = CharUnits::Zero(); +} + +void CIRRecordLowering::lower(bool nonVirtualBaseType) { + if (recordDecl->isUnion()) { + lowerUnion(); + computeVolatileBitfields(); + return; + } + + CharUnits Size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize() + : astRecordLayout.getSize(); + if (recordDecl->isUnion()) { + llvm_unreachable("NYI"); + // lowerUnion(); + // computeVolatileBitfields(); + return; + } + accumulateFields(); + + // RD implies C++ + if (cxxRecordDecl) { + accumulateVPtrs(); + accumulateBases(); + if (members.empty()) { + appendPaddingBytes(Size); + computeVolatileBitfields(); + return; + } + if (!nonVirtualBaseType) + accumulateVBases(); + } + + llvm::stable_sort(members); + // TODO: implement clipTailPadding once bitfields are implemented + // TODO: implemented packed structs + // TODO: implement padding + // TODO: support zeroInit + + members.push_back(StorageInfo(Size, getUIntNType(8))); + determinePacked(nonVirtualBaseType); + members.pop_back(); + + fillOutputFields(); + computeVolatileBitfields(); +} + +void CIRRecordLowering::lowerUnion() { + CharUnits LayoutSize = astRecordLayout.getSize(); + mlir::Type StorageType = nullptr; + bool SeenNamedMember = false; + // Iterate through the fields setting bitFieldInfo and the Fields array. Also + // locate the "most appropriate" storage type. The heuristic for finding the + // storage type isn't necessary, the first (non-0-length-bitfield) field's + // type would work fine and be simpler but would be different than what we've + // been doing and cause lit tests to change. + for (const auto *Field : recordDecl->fields()) { + if (Field->isBitField()) { + if (Field->isZeroLengthBitField(astContext)) + continue; + llvm_unreachable("NYI"); + } + fields[Field->getCanonicalDecl()] = 0; + auto FieldType = getStorageType(Field); + // Compute zero-initializable status. + // This union might not be zero initialized: it may contain a pointer to + // data member which might have some exotic initialization sequence. + // If this is the case, then we aught not to try and come up with a "better" + // type, it might not be very easy to come up with a Constant which + // correctly initializes it. + if (!SeenNamedMember) { + SeenNamedMember = Field->getIdentifier(); + if (!SeenNamedMember) + if (const auto *FieldRD = Field->getType()->getAsRecordDecl()) + SeenNamedMember = FieldRD->findFirstNamedDataMember(); + if (SeenNamedMember && !isZeroInitializable(Field)) { + IsZeroInitializable = IsZeroInitializableAsBase = false; + StorageType = FieldType; + } + } + // Because our union isn't zero initializable, we won't be getting a better + // storage type. + if (!IsZeroInitializable) + continue; + + // Conditionally update our storage type if we've got a new "better" one. + if (!StorageType || getAlignment(FieldType) > getAlignment(StorageType) || + (getAlignment(FieldType) == getAlignment(StorageType) && + getSize(FieldType) > getSize(StorageType))) + StorageType = FieldType; + + // NOTE(cir): Track all union member's types, not just the largest one. It + // allows for proper type-checking and retain more info for analisys. + fieldTypes.push_back(FieldType); + } + // If we have no storage type just pad to the appropriate size and return. + if (!StorageType) + llvm_unreachable("no-storage union NYI"); + // If our storage size was bigger than our required size (can happen in the + // case of packed bitfields on Itanium) then just use an I8 array. + if (LayoutSize < getSize(StorageType)) + StorageType = getByteArrayType(LayoutSize); + // NOTE(cir): Defer padding calculations to the lowering process. + // appendPaddingBytes(LayoutSize - getSize(StorageType)); + // Set packed if we need it. + if (LayoutSize % getAlignment(StorageType)) + isPacked = true; +} + +bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl, + const CXXRecordDecl *Query) { + const ASTRecordLayout &DeclLayout = astContext.getASTRecordLayout(Decl); + if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query) + return false; + for (const auto &Base : Decl->bases()) + if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query)) + return false; + return true; +} + +/// The AAPCS that defines that, when possible, bit-fields should +/// be accessed using containers of the declared type width: +/// When a volatile bit-field is read, and its container does not overlap with +/// any non-bit-field member or any zero length bit-field member, its container +/// must be read exactly once using the access width appropriate to the type of +/// the container. When a volatile bit-field is written, and its container does +/// not overlap with any non-bit-field member or any zero-length bit-field +/// member, its container must be read exactly once and written exactly once +/// using the access width appropriate to the type of the container. The two +/// accesses are not atomic. +/// +/// Enforcing the width restriction can be disabled using +/// -fno-aapcs-bitfield-width. +void CIRRecordLowering::computeVolatileBitfields() { + if (!isAAPCS() || + !cirGenTypes.getModule().getCodeGenOpts().AAPCSBitfieldWidth) + return; + + for ([[maybe_unused]] auto &I : bitFields) { + assert(!UnimplementedFeature::armComputeVolatileBitfields()); + } +} + +void CIRRecordLowering::accumulateBases() { + // If we've got a primary virtual base, we need to add it with the bases. + if (astRecordLayout.isPrimaryBaseVirtual()) { + llvm_unreachable("NYI"); + } + + // Accumulate the non-virtual bases. + for ([[maybe_unused]] const auto &Base : cxxRecordDecl->bases()) { + if (Base.isVirtual()) + continue; + // Bases can be zero-sized even if not technically empty if they + // contain only a trailing array member. + const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); + if (!BaseDecl->isEmpty() && + !astContext.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero()) { + members.push_back(MemberInfo(astRecordLayout.getBaseClassOffset(BaseDecl), + MemberInfo::InfoKind::Base, + getStorageType(BaseDecl), BaseDecl)); + } + } +} + +void CIRRecordLowering::accumulateVBases() { + CharUnits ScissorOffset = astRecordLayout.getNonVirtualSize(); + // In the itanium ABI, it's possible to place a vbase at a dsize that is + // smaller than the nvsize. Here we check to see if such a base is placed + // before the nvsize and set the scissor offset to that, instead of the + // nvsize. + if (isOverlappingVBaseABI()) + for (const auto &Base : cxxRecordDecl->vbases()) { + const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); + if (BaseDecl->isEmpty()) + continue; + // If the vbase is a primary virtual base of some base, then it doesn't + // get its own storage location but instead lives inside of that base. + if (astContext.isNearlyEmpty(BaseDecl) && + !hasOwnStorage(cxxRecordDecl, BaseDecl)) + continue; + ScissorOffset = std::min(ScissorOffset, + astRecordLayout.getVBaseClassOffset(BaseDecl)); + } + members.push_back(MemberInfo(ScissorOffset, MemberInfo::InfoKind::Scissor, + mlir::Type{}, cxxRecordDecl)); + for (const auto &Base : cxxRecordDecl->vbases()) { + const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); + if (BaseDecl->isEmpty()) + continue; + CharUnits Offset = astRecordLayout.getVBaseClassOffset(BaseDecl); + // If the vbase is a primary virtual base of some base, then it doesn't + // get its own storage location but instead lives inside of that base. + if (isOverlappingVBaseABI() && astContext.isNearlyEmpty(BaseDecl) && + !hasOwnStorage(cxxRecordDecl, BaseDecl)) { + members.push_back( + MemberInfo(Offset, MemberInfo::InfoKind::VBase, nullptr, BaseDecl)); + continue; + } + // If we've got a vtordisp, add it as a storage type. + if (astRecordLayout.getVBaseOffsetsMap() + .find(BaseDecl) + ->second.hasVtorDisp()) + members.push_back( + StorageInfo(Offset - CharUnits::fromQuantity(4), getUIntNType(32))); + members.push_back(MemberInfo(Offset, MemberInfo::InfoKind::VBase, + getStorageType(BaseDecl), BaseDecl)); + } +} + +void CIRRecordLowering::accumulateVPtrs() { + if (astRecordLayout.hasOwnVFPtr()) + members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr, + getVFPtrType())); + if (astRecordLayout.hasOwnVBPtr()) + llvm_unreachable("NYI"); +} + +mlir::Type CIRRecordLowering::getVFPtrType() { + // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special + // type so it's a bit more clear and C++ idiomatic. + return builder.getVirtualFnPtrType(); +} + +void CIRRecordLowering::fillOutputFields() { + for (auto &member : members) { + if (member.data) + fieldTypes.push_back(member.data); + if (member.kind == MemberInfo::InfoKind::Field) { + if (member.fieldDecl) + fields[member.fieldDecl->getCanonicalDecl()] = fieldTypes.size() - 1; + // A field without storage must be a bitfield. + if (!member.data) + setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back()); + } else if (member.kind == MemberInfo::InfoKind::Base) { + nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; + } else if (member.kind == MemberInfo::InfoKind::VBase) { + virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; + } + } +} + +void CIRRecordLowering::accumulateBitFields( + RecordDecl::field_iterator Field, RecordDecl::field_iterator FieldEnd) { + // Run stores the first element of the current run of bitfields. FieldEnd is + // used as a special value to note that we don't have a current run. A + // bitfield run is a contiguous collection of bitfields that can be stored in + // the same storage block. Zero-sized bitfields and bitfields that would + // cross an alignment boundary break a run and start a new one. + RecordDecl::field_iterator Run = FieldEnd; + // Tail is the offset of the first bit off the end of the current run. It's + // used to determine if the ASTRecordLayout is treating these two bitfields as + // contiguous. StartBitOffset is offset of the beginning of the Run. + uint64_t StartBitOffset, Tail = 0; + if (isDiscreteBitFieldABI()) { + llvm_unreachable("NYI"); + } + + // Check if OffsetInRecord (the size in bits of the current run) is better + // as a single field run. When OffsetInRecord has legal integer width, and + // its bitfield offset is naturally aligned, it is better to make the + // bitfield a separate storage component so as it can be accessed directly + // with lower cost. + auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, + uint64_t StartBitOffset, + uint64_t nextTail = 0) { + if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) + return false; + llvm_unreachable("NYI"); + // if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) || + // !DataLayout.fitsInLegalInteger(OffsetInRecord)) + // return false; + // Make sure StartBitOffset is naturally aligned if it is treated as an + // IType integer. + // if (StartBitOffset % + // astContext.toBits(getAlignment(getUIntNType(OffsetInRecord))) != + // 0) + // return false; + return true; + }; + + // The start field is better as a single field run. + bool StartFieldAsSingleRun = false; + for (;;) { + // Check to see if we need to start a new run. + if (Run == FieldEnd) { + // If we're out of fields, return. + if (Field == FieldEnd) + break; + // Any non-zero-length bitfield can start a new run. + if (!Field->isZeroLengthBitField(astContext)) { + Run = Field; + StartBitOffset = getFieldBitOffset(*Field); + Tail = StartBitOffset + Field->getBitWidthValue(astContext); + StartFieldAsSingleRun = + IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset); + } + ++Field; + continue; + } + + // If the start field of a new run is better as a single run, or if current + // field (or consecutive fields) is better as a single run, or if current + // field has zero width bitfield and either UseZeroLengthBitfieldAlignment + // or UseBitFieldTypeAlignment is set to true, or if the offset of current + // field is inconsistent with the offset of previous field plus its offset, + // skip the block below and go ahead to emit the storage. Otherwise, try to + // add bitfields to the run. + uint64_t nextTail = Tail; + if (Field != FieldEnd) + nextTail += Field->getBitWidthValue(astContext); + + if (!StartFieldAsSingleRun && Field != FieldEnd && + !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset, + nextTail) && + (!Field->isZeroLengthBitField(astContext) || + (!astContext.getTargetInfo().useZeroLengthBitfieldAlignment() && + !astContext.getTargetInfo().useBitFieldTypeAlignment())) && + Tail == getFieldBitOffset(*Field)) { + Tail = nextTail; + ++Field; + continue; + } + + // We've hit a break-point in the run and need to emit a storage field. + auto Type = getBitfieldStorageType(Tail - StartBitOffset); + + // Add the storage member to the record and set the bitfield info for all of + // the bitfields in the run. Bitfields get the offset of their storage but + // come afterward and remain there after a stable sort. + members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type)); + for (; Run != Field; ++Run) + members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset), + MemberInfo::InfoKind::Field, nullptr, *Run)); + Run = FieldEnd; + StartFieldAsSingleRun = false; + } +} + +void CIRRecordLowering::accumulateFields() { + for (RecordDecl::field_iterator field = recordDecl->field_begin(), + fieldEnd = recordDecl->field_end(); + field != fieldEnd;) { + if (field->isBitField()) { + RecordDecl::field_iterator start = field; + // Iterate to gather the list of bitfields. + for (++field; field != fieldEnd && field->isBitField(); ++field) + ; + accumulateBitFields(start, field); + } else if (!field->isZeroSize(astContext)) { + members.push_back(MemberInfo{bitsToCharUnits(getFieldBitOffset(*field)), + MemberInfo::InfoKind::Field, + getStorageType(*field), *field}); + ++field; + } else { + // TODO(cir): do we want to do anything special about zero size + // members? + ++field; + } + } +} + +void CIRRecordLowering::determinePacked(bool NVBaseType) { + if (isPacked) + return; + CharUnits Alignment = CharUnits::One(); + CharUnits NVAlignment = CharUnits::One(); + CharUnits NVSize = !NVBaseType && cxxRecordDecl + ? astRecordLayout.getNonVirtualSize() + : CharUnits::Zero(); + for (std::vector::const_iterator Member = members.begin(), + MemberEnd = members.end(); + Member != MemberEnd; ++Member) { + if (!Member->data) + continue; + // If any member falls at an offset that it not a multiple of its alignment, + // then the entire record must be packed. + if (Member->offset % getAlignment(Member->data)) + isPacked = true; + if (Member->offset < NVSize) + NVAlignment = std::max(NVAlignment, getAlignment(Member->data)); + Alignment = std::max(Alignment, getAlignment(Member->data)); + } + // If the size of the record (the capstone's offset) is not a multiple of the + // record's alignment, it must be packed. + if (members.back().offset % Alignment) + isPacked = true; + // If the non-virtual sub-object is not a multiple of the non-virtual + // sub-object's alignment, it must be packed. We cannot have a packed + // non-virtual sub-object and an unpacked complete object or vise versa. + if (NVSize % NVAlignment) + isPacked = true; + // Update the alignment of the sentinel. + if (!isPacked) + members.back().data = getUIntNType(astContext.toBits(Alignment)); +} + +std::unique_ptr +CIRGenTypes::computeRecordLayout(const RecordDecl *D, + mlir::cir::StructType *Ty) { + CIRRecordLowering builder(*this, D, /*packed=*/false); + assert(Ty->isIncomplete() && "recomputing record layout?"); + builder.lower(/*nonVirtualBaseType=*/false); + + // If we're in C++, compute the base subobject type. + mlir::cir::StructType BaseTy; + if (llvm::isa(D) && !D->isUnion() && + !D->hasAttr()) { + BaseTy = *Ty; + if (builder.astRecordLayout.getNonVirtualSize() != + builder.astRecordLayout.getSize()) { + CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); + baseBuilder.lower(/*NonVirtualBaseType=*/true); + auto baseIdentifier = getRecordTypeName(D, ".base"); + BaseTy = + Builder.getCompleteStructTy(baseBuilder.fieldTypes, baseIdentifier, + /*packed=*/false, D); + // TODO(cir): add something like addRecordTypeName + + // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work + // on both of them with the same index. + assert(builder.isPacked == baseBuilder.isPacked && + "Non-virtual and complete types must agree on packedness"); + } + } + + // Fill in the struct *after* computing the base type. Filling in the body + // signifies that the type is no longer opaque and record layout is complete, + // but we may need to recursively layout D while laying D out as a base type. + *Ty = Builder.getCompleteStructTy( + builder.fieldTypes, getRecordTypeName(D, ""), builder.isPacked, D); + + auto RL = std::make_unique( + Ty ? *Ty : mlir::cir::StructType{}, + BaseTy ? BaseTy : mlir::cir::StructType{}, + (bool)builder.IsZeroInitializable, + (bool)builder.IsZeroInitializableAsBase); + + RL->NonVirtualBases.swap(builder.nonVirtualBases); + RL->CompleteObjectVirtualBases.swap(builder.virtualBases); + + // Add all the field numbers. + RL->FieldInfo.swap(builder.fields); + + // Add bitfield info. + RL->BitFields.swap(builder.bitFields); + + // Dump the layout, if requested. + if (getContext().getLangOpts().DumpRecordLayouts) { + llvm_unreachable("NYI"); + } + + // TODO: implement verification + return RL; +} + +CIRGenBitFieldInfo CIRGenBitFieldInfo::MakeInfo(CIRGenTypes &Types, + const FieldDecl *FD, + uint64_t Offset, uint64_t Size, + uint64_t StorageSize, + CharUnits StorageOffset) { + llvm_unreachable("NYI"); +} + +CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { + auto dlSpec = modOp->getAttr(mlir::DLTIDialect::kDataLayoutAttrName) + .dyn_cast(); + assert(dlSpec && "expected dl_spec in the module"); + auto entries = dlSpec.getEntries(); + + for (auto entry : entries) { + auto entryKey = entry.getKey(); + auto strKey = entryKey.dyn_cast(); + if (!strKey) + continue; + auto entryName = strKey.strref(); + if (entryName == mlir::DLTIDialect::kDataLayoutEndiannessKey) { + auto value = entry.getValue().dyn_cast(); + assert(value && "expected string attribute"); + auto endian = value.getValue(); + if (endian == mlir::DLTIDialect::kDataLayoutEndiannessBig) + bigEndian = true; + else if (endian == mlir::DLTIDialect::kDataLayoutEndiannessLittle) + bigEndian = false; + else + llvm_unreachable("unknown endianess"); + } + } +} diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt new file mode 100644 index 000000000000..154aefbdba02 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -0,0 +1,77 @@ +set( + LLVM_LINK_COMPONENTS + Core + Support +) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIR + CIRAsm.cpp + CIRGenAtomic.cpp + CIRGenBuiltin.cpp + CIRGenCXX.cpp + CIRGenCXXABI.cpp + CIRGenCall.cpp + CIRGenClass.cpp + CIRGenCleanup.cpp + CIRGenCoroutine.cpp + CIRGenDecl.cpp + CIRGenDeclCXX.cpp + CIRGenException.cpp + CIRGenExpr.cpp + CIRGenExprConst.cpp + CIRGenExprAgg.cpp + CIRGenExprCXX.cpp + CIRGenExprScalar.cpp + CIRGenFunction.cpp + CIRGenItaniumCXXABI.cpp + CIRGenModule.cpp + CIRGenOpenMPRuntime.cpp + CIRGenStmt.cpp + CIRGenStmtOpenMP.cpp + CIRGenTBAA.cpp + CIRGenTypes.cpp + CIRGenVTables.cpp + CIRGenerator.cpp + CIRPasses.cpp + CIRRecordLayoutBuilder.cpp + ConstantInitBuilder.cpp + TargetInfo.cpp + + DEPENDS + MLIRCIR + MLIRCIROpsIncGen + MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen + ${dialect_libs} + + LINK_LIBS + clangAST + clangBasic + clangLex + ${dialect_libs} + MLIRCIR + MLIRCIRTransforms + MLIRCIRInterfaces + MLIRAffineToStandard + MLIRAnalysis + MLIRDLTIDialect + MLIRFuncToLLVM + MLIRIR + MLIRLLVMCommonConversion + MLIRLLVMDialect + MLIROpenMPDialect + MLIRLLVMToLLVMIRTranslation + MLIRMemRefDialect + MLIRMemRefToLLVM + MLIRParser + MLIRPass + MLIRSCFToControlFlow + MLIRSideEffectInterfaces + MLIRSupport + MLIRTargetLLVMIRImport + MLIRTargetLLVMIRExport + MLIRTransforms +) diff --git a/clang/lib/CIR/CodeGen/CallingConv.h b/clang/lib/CIR/CodeGen/CallingConv.h new file mode 100644 index 000000000000..e6b41cdb550c --- /dev/null +++ b/clang/lib/CIR/CodeGen/CallingConv.h @@ -0,0 +1,43 @@ +//===- CallingConv.h - CIR Calling Conventions ------------*- C++ -------*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines CIR's set of calling conventions. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CIR_CALLINGCONV_H +#define CLANG_CIR_CALLINGCONV_H + +// TODO: This whole file needs translated to CIR + +namespace cir { + +/// CallingConv Namespace - This namespace contains an enum with a value for the +/// well-known calling conventions. +namespace CallingConv { + +/// LLVM IR allows to use arbitrary numbers as calling convention identifiers. +/// TODO: What should we do for this for CIR +using ID = unsigned; + +/// A set of enums which specify the assigned numeric values for known llvm +/// calling conventions. +/// LLVM Calling Convention Represetnation +enum { + /// C - The default llvm calling convention, compatible with C. This + /// convention is the only calling convention that supports varargs calls. As + /// with typical C calling conventions, the callee/caller have to tolerate + /// certain amounts of prototype mismatch. + C = 0, +}; + +} // namespace CallingConv + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp new file mode 100644 index 000000000000..89852f29e648 --- /dev/null +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -0,0 +1,327 @@ +//===--- ConstantInitBuilder.cpp - Global initializer builder -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines out-of-line routines for building initializers for +// global variables, in particular the kind of globals that are implicitly +// introduced by various language ABIs. +// +//===----------------------------------------------------------------------===// + +#include "ConstantInitBuilder.h" +#include "CIRGenModule.h" + +using namespace clang; +using namespace cir; + +ConstantInitBuilderBase::ConstantInitBuilderBase(CIRGenModule &CGM) + : CGM(CGM), builder(CGM.getBuilder()) {} + +mlir::Type ConstantInitFuture::getType() const { + assert(Data && "dereferencing null future"); + if (Data.is()) { + auto attr = Data.get().dyn_cast(); + assert(attr && "expected typed attribute"); + return attr.getType(); + } else { + llvm_unreachable("Only sypport typed attributes here"); + } +} + +void ConstantInitFuture::abandon() { + assert(Data && "abandoning null future"); + if (auto builder = Data.dyn_cast()) { + builder->abandon(0); + } + Data = nullptr; +} + +void ConstantInitFuture::installInGlobal(mlir::cir::GlobalOp GV) { + assert(Data && "installing null future"); + if (Data.is()) { + CIRGenModule::setInitializer(GV, Data.get()); + } else { + llvm_unreachable("NYI"); + // auto &builder = *Data.get(); + // assert(builder.Buffer.size() == 1); + // builder.setGlobalInitializer(GV, builder.Buffer[0]); + // builder.Buffer.clear(); + // Data = nullptr; + } +} + +ConstantInitFuture +ConstantInitBuilderBase::createFuture(mlir::Attribute initializer) { + assert(Buffer.empty() && "buffer not current empty"); + Buffer.push_back(initializer); + return ConstantInitFuture(this); +} + +// Only used in this file. +inline ConstantInitFuture::ConstantInitFuture(ConstantInitBuilderBase *builder) + : Data(builder) { + assert(!builder->Frozen); + assert(builder->Buffer.size() == 1); + assert(builder->Buffer[0] != nullptr); +} + +mlir::cir::GlobalOp ConstantInitBuilderBase::createGlobal( + mlir::Attribute initializer, const llvm::Twine &name, CharUnits alignment, + bool constant, mlir::cir::GlobalLinkageKind linkage, + unsigned addressSpace) { + llvm_unreachable("NYI"); + // auto GV = + // new llvm::GlobalVariable(CGM.getModule(), initializer->getType(), + // constant, linkage, initializer, name, + // /*insert before*/ nullptr, + // llvm::GlobalValue::NotThreadLocal, + // addressSpace); + // GV->setAlignment(alignment.getAsAlign()); + // resolveSelfReferences(GV); + // return GV; +} + +void ConstantInitBuilderBase::setGlobalInitializer( + mlir::cir::GlobalOp GV, mlir::Attribute initializer) { + CIRGenModule::setInitializer(GV, initializer); + + if (!SelfReferences.empty()) + resolveSelfReferences(GV); +} + +void ConstantInitBuilderBase::resolveSelfReferences(mlir::cir::GlobalOp GV) { + llvm_unreachable("NYI"); + // for (auto &entry : SelfReferences) { + // mlir::Attribute resolvedReference = + // llvm::ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV, + // entry.Indices); + // auto dummy = entry.Dummy; + // dummy->replaceAllUsesWith(resolvedReference); + // dummy->eraseFromParent(); + // } + // SelfReferences.clear(); +} + +void ConstantInitBuilderBase::abandon(size_t newEnd) { + llvm_unreachable("NYI"); + // // Remove all the entries we've added. + // Buffer.erase(Buffer.begin() + newEnd, Buffer.end()); + + // // If we're abandoning all the way to the beginning, destroy + // // all the self-references, because we might not get another + // // opportunity. + // if (newEnd == 0) { + // for (auto &entry : SelfReferences) { + // auto dummy = entry.Dummy; + // dummy->replaceAllUsesWith(llvm::PoisonValue::get(dummy->getType())); + // dummy->eraseFromParent(); + // } + // SelfReferences.clear(); + // } +} + +void ConstantAggregateBuilderBase::addSize(CharUnits size) { + add(Builder.CGM.getSize(size)); +} + +mlir::Attribute +ConstantAggregateBuilderBase::getRelativeOffset(mlir::cir::IntType offsetType, + mlir::Attribute target) { + return getRelativeOffsetToPosition(offsetType, target, + Builder.Buffer.size() - Begin); +} + +mlir::Attribute ConstantAggregateBuilderBase::getRelativeOffsetToPosition( + mlir::cir::IntType offsetType, mlir::Attribute target, size_t position) { + llvm_unreachable("NYI"); + // // Compute the address of the relative-address slot. + // auto base = getAddrOfPosition(offsetType, position); + + // // Subtract. + // base = llvm::ConstantExpr::getPtrToInt(base, Builder.CGM.IntPtrTy); + // target = llvm::ConstantExpr::getPtrToInt(target, Builder.CGM.IntPtrTy); + // mlir::Attribute offset = llvm::ConstantExpr::getSub(target, base); + + // // Truncate to the relative-address type if necessary. + // if (Builder.CGM.IntPtrTy != offsetType) { + // offset = llvm::ConstantExpr::getTrunc(offset, offsetType); + // } + + // return offset; +} + +mlir::Attribute +ConstantAggregateBuilderBase::getAddrOfPosition(mlir::Type type, + size_t position) { + llvm_unreachable("NYI"); + // // Make a global variable. We will replace this with a GEP to this + // // position after installing the initializer. + // auto dummy = new llvm::GlobalVariable(Builder.CGM.getModule(), type, true, + // llvm::GlobalVariable::PrivateLinkage, + // nullptr, ""); + // Builder.SelfReferences.emplace_back(dummy); + // auto &entry = Builder.SelfReferences.back(); + // (void)getGEPIndicesTo(entry.Indices, position + Begin); + // return dummy; +} + +mlir::Attribute +ConstantAggregateBuilderBase::getAddrOfCurrentPosition(mlir::Type type) { + llvm_unreachable("NYI"); + // // Make a global variable. We will replace this with a GEP to this + // // position after installing the initializer. + // auto dummy = new llvm::GlobalVariable(Builder.CGM.getModule(), type, true, + // llvm::GlobalVariable::PrivateLinkage, + // nullptr, ""); + // Builder.SelfReferences.emplace_back(dummy); + // auto &entry = Builder.SelfReferences.back(); + // (void)getGEPIndicesToCurrentPosition(entry.Indices); + // return dummy; +} + +void ConstantAggregateBuilderBase::getGEPIndicesTo( + llvm::SmallVectorImpl &indices, size_t position) const { + llvm_unreachable("NYI"); + // // Recurse on the parent builder if present. + // if (Parent) { + // Parent->getGEPIndicesTo(indices, Begin); + + // // Otherwise, add an index to drill into the first level of pointer. + // } else { + // assert(indices.empty()); + // indices.push_back(llvm::ConstantInt::get(Builder.CGM.Int32Ty, 0)); + // } + + // assert(position >= Begin); + // // We have to use i32 here because struct GEPs demand i32 indices. + // // It's rather unlikely to matter in practice. + // indices.push_back( + // llvm::ConstantInt::get(Builder.CGM.Int32Ty, position - Begin)); +} + +ConstantAggregateBuilderBase::PlaceholderPosition +ConstantAggregateBuilderBase::addPlaceholderWithSize(mlir::Type type) { + llvm_unreachable("NYI"); + // // Bring the offset up to the last field. + // CharUnits offset = getNextOffsetFromGlobal(); + + // // Create the placeholder. + // auto position = addPlaceholder(); + + // // Advance the offset past that field. + // auto &layout = Builder.CGM.getDataLayout(); + // if (!Packed) + // offset = + // offset.alignTo(CharUnits::fromQuantity(layout.getABITypeAlign(type))); + // offset += CharUnits::fromQuantity(layout.getTypeStoreSize(type)); + + // CachedOffsetEnd = Builder.Buffer.size(); + // CachedOffsetFromGlobal = offset; + + // return position; +} + +CharUnits +ConstantAggregateBuilderBase::getOffsetFromGlobalTo(size_t end) const { + size_t cacheEnd = CachedOffsetEnd; + assert(cacheEnd <= end); + + // Fast path: if the cache is valid, just use it. + if (cacheEnd == end) { + return CachedOffsetFromGlobal; + } + + // If the cached range ends before the index at which the current + // aggregate starts, recurse for the parent. + CharUnits offset; + if (cacheEnd < Begin) { + assert(cacheEnd == 0); + assert(Parent && "Begin != 0 for root builder"); + cacheEnd = Begin; + offset = Parent->getOffsetFromGlobalTo(Begin); + } else { + offset = CachedOffsetFromGlobal; + } + + // Perform simple layout on the elements in cacheEnd..getType(); + // if (!Packed) + // offset = offset.alignTo( + // CharUnits::fromQuantity(layout.getABITypeAlign(elementType))); + // offset += + // CharUnits::fromQuantity(layout.getTypeStoreSize(elementType)); + // } while (++cacheEnd != end); + } + + // Cache and return. + CachedOffsetEnd = cacheEnd; + CachedOffsetFromGlobal = offset; + return offset; +} + +// FIXME(cir): ideally we should use CIRGenBuilder for both static function +// bellow by threading ConstantAggregateBuilderBase through +// ConstantAggregateBuilderBase. +static mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, + mlir::cir::ArrayType arrayTy) { + return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); +} + +mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { + markFinished(); + + auto &buffer = getBuffer(); + assert((Begin < buffer.size() || (Begin == buffer.size() && eltTy)) && + "didn't add any array elements without element type"); + auto elts = llvm::ArrayRef(buffer).slice(Begin); + if (!eltTy) { + llvm_unreachable("NYI"); + // Uncomment this once we get a testcase. + // auto tAttr = elts[0].dyn_cast(); + // assert(tAttr && "expected typed attribute"); + // eltTy = tAttr.getType(); + } + + auto constant = getConstArray( + mlir::ArrayAttr::get(eltTy.getContext(), elts), + mlir::cir::ArrayType::get(eltTy.getContext(), eltTy, elts.size())); + buffer.erase(buffer.begin() + Begin, buffer.end()); + return constant; +} + +mlir::Attribute +ConstantAggregateBuilderBase::finishStruct(mlir::MLIRContext *ctx, + mlir::cir::StructType ty) { + markFinished(); + + auto &buffer = getBuffer(); + auto elts = llvm::ArrayRef(buffer).slice(Begin); + + if (ty == nullptr && elts.empty()) { + llvm_unreachable("NYI"); + } + + mlir::Attribute constant; + if (ty) { + llvm_unreachable("NYI"); + // assert(ty->isPacked() == Packed); + // constant = llvm::ConstantStruct::get(ty, elts); + } else { + const auto members = mlir::ArrayAttr::get(ctx, elts); + constant = Builder.CGM.getBuilder().getAnonConstStruct(members, Packed); + } + + buffer.erase(buffer.begin() + Begin, buffer.end()); + return constant; +} diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h new file mode 100644 index 000000000000..7a32aa591182 --- /dev/null +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -0,0 +1,589 @@ +//===- ConstantInitBuilder.h - Builder for CIR attributes -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class provides a convenient interface for building complex +// global initializers of the sort that are frequently required for +// language ABIs. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_CODEGEN_CONSTANTINITBUILDER_H +#define LLVM_CLANG_CIR_CODEGEN_CONSTANTINITBUILDER_H + +#include "clang/AST/CharUnits.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallVector.h" + +#include "CIRGenBuilder.h" +#include "ConstantInitFuture.h" + +#include +#include + +using namespace clang; + +namespace cir { + +class CIRGenModule; + +/// A convenience builder class for complex constant initializers, +/// especially for anonymous global structures used by various language +/// runtimes. +/// +/// The basic usage pattern is expected to be something like: +/// ConstantInitBuilder builder(CGM); +/// auto toplevel = builder.beginStruct(); +/// toplevel.addInt(CGM.SizeTy, widgets.size()); +/// auto widgetArray = builder.beginArray(); +/// for (auto &widget : widgets) { +/// auto widgetDesc = widgetArray.beginStruct(); +/// widgetDesc.addInt(CGM.SizeTy, widget.getPower()); +/// widgetDesc.add(CGM.GetAddrOfConstantString(widget.getName())); +/// widgetDesc.add(CGM.GetAddrOfGlobal(widget.getInitializerDecl())); +/// widgetDesc.finishAndAddTo(widgetArray); +/// } +/// widgetArray.finishAndAddTo(toplevel); +/// auto global = toplevel.finishAndCreateGlobal("WIDGET_LIST", Align, +/// /*constant*/ true); +class ConstantInitBuilderBase { + struct SelfReference { + mlir::cir::GlobalOp Dummy; + llvm::SmallVector Indices; + + SelfReference(mlir::cir::GlobalOp dummy) : Dummy(dummy) {} + }; + CIRGenModule &CGM; + CIRGenBuilderTy &builder; + llvm::SmallVector Buffer; + std::vector SelfReferences; + bool Frozen = false; + + friend class ConstantInitFuture; + friend class ConstantAggregateBuilderBase; + template friend class ConstantAggregateBuilderTemplateBase; + +protected: + explicit ConstantInitBuilderBase(CIRGenModule &CGM); + + ~ConstantInitBuilderBase() { + assert(Buffer.empty() && "didn't claim all values out of buffer"); + assert(SelfReferences.empty() && "didn't apply all self-references"); + } + +private: + mlir::cir::GlobalOp + createGlobal(mlir::Attribute initializer, const llvm::Twine &name, + CharUnits alignment, bool constant = false, + mlir::cir::GlobalLinkageKind linkage = + mlir::cir::GlobalLinkageKind::InternalLinkage, + unsigned addressSpace = 0); + + ConstantInitFuture createFuture(mlir::Attribute initializer); + + void setGlobalInitializer(mlir::cir::GlobalOp GV, + mlir::Attribute initializer); + + void resolveSelfReferences(mlir::cir::GlobalOp GV); + + void abandon(size_t newEnd); +}; + +/// A concrete base class for struct and array aggregate +/// initializer builders. +class ConstantAggregateBuilderBase { +protected: + ConstantInitBuilderBase &Builder; + ConstantAggregateBuilderBase *Parent; + size_t Begin; + mutable size_t CachedOffsetEnd = 0; + bool Finished = false; + bool Frozen = false; + bool Packed = false; + mutable CharUnits CachedOffsetFromGlobal; + + llvm::SmallVectorImpl &getBuffer() { return Builder.Buffer; } + + const llvm::SmallVectorImpl &getBuffer() const { + return Builder.Buffer; + } + + ConstantAggregateBuilderBase(ConstantInitBuilderBase &builder, + ConstantAggregateBuilderBase *parent) + : Builder(builder), Parent(parent), Begin(builder.Buffer.size()) { + if (parent) { + assert(!parent->Frozen && "parent already has child builder active"); + parent->Frozen = true; + } else { + assert(!builder.Frozen && "builder already has child builder active"); + builder.Frozen = true; + } + } + + ~ConstantAggregateBuilderBase() { + assert(Finished && "didn't finish aggregate builder"); + } + + void markFinished() { + assert(!Frozen && "child builder still active"); + assert(!Finished && "builder already finished"); + Finished = true; + if (Parent) { + assert(Parent->Frozen && "parent not frozen while child builder active"); + Parent->Frozen = false; + } else { + assert(Builder.Frozen && "builder not frozen while child builder active"); + Builder.Frozen = false; + } + } + +public: + // Not copyable. + ConstantAggregateBuilderBase(const ConstantAggregateBuilderBase &) = delete; + ConstantAggregateBuilderBase & + operator=(const ConstantAggregateBuilderBase &) = delete; + + // Movable, mostly to allow returning. But we have to write this out + // properly to satisfy the assert in the destructor. + ConstantAggregateBuilderBase(ConstantAggregateBuilderBase &&other) + : Builder(other.Builder), Parent(other.Parent), Begin(other.Begin), + CachedOffsetEnd(other.CachedOffsetEnd), Finished(other.Finished), + Frozen(other.Frozen), Packed(other.Packed), + CachedOffsetFromGlobal(other.CachedOffsetFromGlobal) { + other.Finished = true; + } + ConstantAggregateBuilderBase & + operator=(ConstantAggregateBuilderBase &&other) = delete; + + /// Return the number of elements that have been added to + /// this struct or array. + size_t size() const { + assert(!this->Finished && "cannot query after finishing builder"); + assert(!this->Frozen && "cannot query while sub-builder is active"); + assert(this->Begin <= this->getBuffer().size()); + return this->getBuffer().size() - this->Begin; + } + + /// Return true if no elements have yet been added to this struct or array. + bool empty() const { return size() == 0; } + + /// Abandon this builder completely. + void abandon() { + markFinished(); + Builder.abandon(Begin); + } + + /// Add a new value to this initializer. + void add(mlir::Attribute value) { + assert(value && "adding null value to constant initializer"); + assert(!Finished && "cannot add more values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + Builder.Buffer.push_back(value); + } + + /// Add an integer value of type size_t. + void addSize(CharUnits size); + + /// Add an integer value of a specific type. + void addInt(mlir::cir::IntType intTy, uint64_t value, bool isSigned = false) { + add(mlir::IntegerAttr::get(intTy, + llvm::APInt{intTy.getWidth(), value, isSigned})); + } + + /// Add a pointer of a specific type. + void addPointer(mlir::cir::PointerType ptrTy, uint64_t value) { + add(mlir::cir::ConstPtrAttr::get(ptrTy.getContext(), ptrTy, value)); + } + + /// Add a bitcast of a value to a specific type. + void addBitCast(mlir::Attribute value, mlir::Type type) { + llvm_unreachable("NYI"); + // add(llvm::ConstantExpr::getBitCast(value, type)); + } + + /// Add a bunch of new values to this initializer. + void addAll(llvm::ArrayRef values) { + assert(!Finished && "cannot add more values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + Builder.Buffer.append(values.begin(), values.end()); + } + + /// Add a relative offset to the given target address, i.e. the + /// static difference between the target address and the address + /// of the relative offset. The target must be known to be defined + /// in the current linkage unit. The offset will have the given + /// integer type, which must be no wider than intptr_t. Some + /// targets may not fully support this operation. + void addRelativeOffset(mlir::cir::IntType type, mlir::Attribute target) { + llvm_unreachable("NYI"); + // add(getRelativeOffset(type, target)); + } + + /// Same as addRelativeOffset(), but instead relative to an element in this + /// aggregate, identified by its index. + void addRelativeOffsetToPosition(mlir::cir::IntType type, + mlir::Attribute target, size_t position) { + llvm_unreachable("NYI"); + // add(getRelativeOffsetToPosition(type, target, position)); + } + + /// Add a relative offset to the target address, plus a small + /// constant offset. This is primarily useful when the relative + /// offset is known to be a multiple of (say) four and therefore + /// the tag can be used to express an extra two bits of information. + void addTaggedRelativeOffset(mlir::cir::IntType type, mlir::Attribute address, + unsigned tag) { + llvm_unreachable("NYI"); + // mlir::Attribute offset = + // getRelativeOffset(type, address); if + // (tag) { + // offset = + // llvm::ConstantExpr::getAdd(offset, + // llvm::ConstantInt::get(type, tag)); + // } + // add(offset); + } + + /// Return the offset from the start of the initializer to the + /// next position, assuming no padding is required prior to it. + /// + /// This operation will not succeed if any unsized placeholders are + /// currently in place in the initializer. + CharUnits getNextOffsetFromGlobal() const { + assert(!Finished && "cannot add more values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + return getOffsetFromGlobalTo(Builder.Buffer.size()); + } + + /// An opaque class to hold the abstract position of a placeholder. + class PlaceholderPosition { + size_t Index; + friend class ConstantAggregateBuilderBase; + PlaceholderPosition(size_t index) : Index(index) {} + }; + + /// Add a placeholder value to the structure. The returned position + /// can be used to set the value later; it will not be invalidated by + /// any intermediate operations except (1) filling the same position or + /// (2) finishing the entire builder. + /// + /// This is useful for emitting certain kinds of structure which + /// contain some sort of summary field, generally a count, before any + /// of the data. By emitting a placeholder first, the structure can + /// be emitted eagerly. + PlaceholderPosition addPlaceholder() { + assert(!Finished && "cannot add more values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + Builder.Buffer.push_back(nullptr); + return Builder.Buffer.size() - 1; + } + + /// Add a placeholder, giving the expected type that will be filled in. + PlaceholderPosition addPlaceholderWithSize(mlir::Type expectedType); + + /// Fill a previously-added placeholder. + void fillPlaceholderWithInt(PlaceholderPosition position, + mlir::cir::IntType type, uint64_t value, + bool isSigned = false) { + llvm_unreachable("NYI"); + // fillPlaceholder(position, llvm::ConstantInt::get(type, value, isSigned)); + } + + /// Fill a previously-added placeholder. + void fillPlaceholder(PlaceholderPosition position, mlir::Attribute value) { + assert(!Finished && "cannot change values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + mlir::Attribute &slot = Builder.Buffer[position.Index]; + assert(slot == nullptr && "placeholder already filled"); + slot = value; + } + + /// Produce an address which will eventually point to the next + /// position to be filled. This is computed with an indexed + /// getelementptr rather than by computing offsets. + /// + /// The returned pointer will have type T*, where T is the given type. This + /// type can differ from the type of the actual element. + mlir::Attribute getAddrOfCurrentPosition(mlir::Type type); + + /// Produce an address which points to a position in the aggregate being + /// constructed. This is computed with an indexed getelementptr rather than by + /// computing offsets. + /// + /// The returned pointer will have type T*, where T is the given type. This + /// type can differ from the type of the actual element. + mlir::Attribute getAddrOfPosition(mlir::Type type, size_t position); + + llvm::ArrayRef getGEPIndicesToCurrentPosition( + llvm::SmallVectorImpl &indices) { + getGEPIndicesTo(indices, Builder.Buffer.size()); + return indices; + } + +protected: + mlir::Attribute finishArray(mlir::Type eltTy); + mlir::Attribute finishStruct(mlir::MLIRContext *ctx, + mlir::cir::StructType structTy); + +private: + void getGEPIndicesTo(llvm::SmallVectorImpl &indices, + size_t position) const; + + mlir::Attribute getRelativeOffset(mlir::cir::IntType offsetType, + mlir::Attribute target); + + mlir::Attribute getRelativeOffsetToPosition(mlir::cir::IntType offsetType, + mlir::Attribute target, + size_t position); + + CharUnits getOffsetFromGlobalTo(size_t index) const; +}; + +template +class ConstantAggregateBuilderTemplateBase + : public Traits::AggregateBuilderBase { + using super = typename Traits::AggregateBuilderBase; + +public: + using InitBuilder = typename Traits::InitBuilder; + using ArrayBuilder = typename Traits::ArrayBuilder; + using StructBuilder = typename Traits::StructBuilder; + using AggregateBuilderBase = typename Traits::AggregateBuilderBase; + +protected: + ConstantAggregateBuilderTemplateBase(InitBuilder &builder, + AggregateBuilderBase *parent) + : super(builder, parent) {} + + Impl &asImpl() { return *static_cast(this); } + +public: + ArrayBuilder beginArray(mlir::Type eltTy = nullptr) { + return ArrayBuilder(static_cast(this->Builder), this, eltTy); + } + + StructBuilder beginStruct(mlir::cir::StructType ty = nullptr) { + return StructBuilder(static_cast(this->Builder), this, ty); + } + + /// Given that this builder was created by beginning an array or struct + /// component on the given parent builder, finish the array/struct + /// component and add it to the parent. + /// + /// It is an intentional choice that the parent is passed in explicitly + /// despite it being redundant with information already kept in the + /// builder. This aids in readability by making it easier to find the + /// places that add components to a builder, as well as "bookending" + /// the sub-builder more explicitly. + void finishAndAddTo(mlir::MLIRContext *ctx, AggregateBuilderBase &parent) { + assert(this->Parent == &parent && "adding to non-parent builder"); + parent.add(asImpl().finishImpl(ctx)); + } + + /// Given that this builder was created by beginning an array or struct + /// directly on a ConstantInitBuilder, finish the array/struct and + /// create a global variable with it as the initializer. + template + mlir::cir::GlobalOp finishAndCreateGlobal(mlir::MLIRContext *ctx, + As &&...args) { + assert(!this->Parent && "finishing non-root builder"); + return this->Builder.createGlobal(asImpl().finishImpl(ctx), + std::forward(args)...); + } + + /// Given that this builder was created by beginning an array or struct + /// directly on a ConstantInitBuilder, finish the array/struct and + /// set it as the initializer of the given global variable. + void finishAndSetAsInitializer(mlir::cir::GlobalOp global, + bool forVTable = false) { + assert(!this->Parent && "finishing non-root builder"); + mlir::Attribute init = asImpl().finishImpl(global.getContext()); + auto initCSA = init.dyn_cast(); + assert(initCSA && + "expected #cir.const_struct attribute to represent vtable data"); + return this->Builder.setGlobalInitializer( + global, forVTable ? mlir::cir::VTableAttr::get(initCSA.getType(), + initCSA.getMembers()) + : init); + } + + /// Given that this builder was created by beginning an array or struct + /// directly on a ConstantInitBuilder, finish the array/struct and + /// return a future which can be used to install the initializer in + /// a global later. + /// + /// This is useful for allowing a finished initializer to passed to + /// an API which will build the global. However, the "future" preserves + /// a dependency on the original builder; it is an error to pass it aside. + ConstantInitFuture finishAndCreateFuture(mlir::MLIRContext *ctx) { + assert(!this->Parent && "finishing non-root builder"); + return this->Builder.createFuture(asImpl().finishImpl(ctx)); + } +}; + +template +class ConstantArrayBuilderTemplateBase + : public ConstantAggregateBuilderTemplateBase { + using super = + ConstantAggregateBuilderTemplateBase; + +public: + using InitBuilder = typename Traits::InitBuilder; + using AggregateBuilderBase = typename Traits::AggregateBuilderBase; + +private: + mlir::Type EltTy; + + template friend class ConstantAggregateBuilderTemplateBase; + +protected: + ConstantArrayBuilderTemplateBase(InitBuilder &builder, + AggregateBuilderBase *parent, + mlir::Type eltTy) + : super(builder, parent), EltTy(eltTy) {} + +private: + /// Form an array constant from the values that have been added to this + /// builder. + mlir::Attribute finishImpl([[maybe_unused]] mlir::MLIRContext *ctx) { + return AggregateBuilderBase::finishArray(EltTy); + } +}; + +/// A template class designed to allow other frontends to +/// easily customize the builder classes used by ConstantInitBuilder, +/// and thus to extend the API to work with the abstractions they +/// prefer. This would probably not be necessary if C++ just +/// supported extension methods. +template +class ConstantStructBuilderTemplateBase + : public ConstantAggregateBuilderTemplateBase< + typename Traits::StructBuilder, Traits> { + using super = + ConstantAggregateBuilderTemplateBase; + +public: + using InitBuilder = typename Traits::InitBuilder; + using AggregateBuilderBase = typename Traits::AggregateBuilderBase; + +private: + mlir::cir::StructType StructTy; + + template friend class ConstantAggregateBuilderTemplateBase; + +protected: + ConstantStructBuilderTemplateBase(InitBuilder &builder, + AggregateBuilderBase *parent, + mlir::cir::StructType structTy) + : super(builder, parent), StructTy(structTy) { + if (structTy) { + llvm_unreachable("NYI"); + // this->Packed = structTy->isPacked(); + } + } + +public: + void setPacked(bool packed) { this->Packed = packed; } + + /// Use the given type for the struct if its element count is correct. + /// Don't add more elements after calling this. + void suggestType(mlir::cir::StructType structTy) { + if (this->size() == structTy.getNumElements()) { + StructTy = structTy; + } + } + +private: + /// Form an array constant from the values that have been added to this + /// builder. + mlir::Attribute finishImpl(mlir::MLIRContext *ctx) { + return AggregateBuilderBase::finishStruct(ctx, StructTy); + } +}; + +/// A template class designed to allow other frontends to +/// easily customize the builder classes used by ConstantInitBuilder, +/// and thus to extend the API to work with the abstractions they +/// prefer. This would probably not be necessary if C++ just +/// supported extension methods. +template +class ConstantInitBuilderTemplateBase : public ConstantInitBuilderBase { +protected: + ConstantInitBuilderTemplateBase(CIRGenModule &CGM) + : ConstantInitBuilderBase(CGM) {} + +public: + using InitBuilder = typename Traits::InitBuilder; + using ArrayBuilder = typename Traits::ArrayBuilder; + using StructBuilder = typename Traits::StructBuilder; + + ArrayBuilder beginArray(mlir::Type eltTy = nullptr) { + return ArrayBuilder(static_cast(*this), nullptr, eltTy); + } + + StructBuilder beginStruct(mlir::cir::StructType structTy = nullptr) { + return StructBuilder(static_cast(*this), nullptr, structTy); + } +}; + +class ConstantInitBuilder; +class ConstantStructBuilder; +class ConstantArrayBuilder; + +struct ConstantInitBuilderTraits { + using InitBuilder = ConstantInitBuilder; + using AggregateBuilderBase = ConstantAggregateBuilderBase; + using ArrayBuilder = ConstantArrayBuilder; + using StructBuilder = ConstantStructBuilder; +}; + +/// The standard implementation of ConstantInitBuilder used in Clang. +class ConstantInitBuilder + : public ConstantInitBuilderTemplateBase { +public: + explicit ConstantInitBuilder(CIRGenModule &CGM) + : ConstantInitBuilderTemplateBase(CGM) {} +}; + +/// A helper class of ConstantInitBuilder, used for building constant +/// array initializers. +class ConstantArrayBuilder + : public ConstantArrayBuilderTemplateBase { + template friend class ConstantInitBuilderTemplateBase; + + // The use of explicit qualification is a GCC workaround. + template + friend class cir::ConstantAggregateBuilderTemplateBase; + + ConstantArrayBuilder(ConstantInitBuilder &builder, + ConstantAggregateBuilderBase *parent, mlir::Type eltTy) + : ConstantArrayBuilderTemplateBase(builder, parent, eltTy) {} +}; + +/// A helper class of ConstantInitBuilder, used for building constant +/// struct initializers. +class ConstantStructBuilder + : public ConstantStructBuilderTemplateBase { + template friend class ConstantInitBuilderTemplateBase; + + // The use of explicit qualification is a GCC workaround. + template + friend class cir::ConstantAggregateBuilderTemplateBase; + + ConstantStructBuilder(ConstantInitBuilder &builder, + ConstantAggregateBuilderBase *parent, + mlir::cir::StructType structTy) + : ConstantStructBuilderTemplateBase(builder, parent, structTy) {} +}; + +} // end namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/ConstantInitFuture.h b/clang/lib/CIR/CodeGen/ConstantInitFuture.h new file mode 100644 index 000000000000..97631d5da88c --- /dev/null +++ b/clang/lib/CIR/CodeGen/ConstantInitFuture.h @@ -0,0 +1,102 @@ +//===- ConstantInitFuture.h - "Future" constant initializers ----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class defines the ConstantInitFuture class. This is split out +// from ConstantInitBuilder.h in order to allow APIs to work with it +// without having to include that entire header. This is particularly +// important because it is often useful to be able to default-construct +// a future in, say, a default argument. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_CODEGEN_CONSTANTINITFUTURE_H +#define LLVM_CLANG_CIR_CODEGEN_CONSTANTINITFUTURE_H + +#include "mlir/IR/Attributes.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/ADT/PointerUnion.h" + +// Forward-declare ConstantInitBuilderBase and give it a +// PointerLikeTypeTraits specialization so that we can safely use it +// in a PointerUnion below. +namespace cir { +class ConstantInitBuilderBase; +} // namespace cir + +namespace llvm { +template <> struct PointerLikeTypeTraits<::cir::ConstantInitBuilderBase *> { + using T = ::cir::ConstantInitBuilderBase *; + + static inline void *getAsVoidPointer(T p) { return p; } + static inline T getFromVoidPointer(void *p) { return static_cast(p); } + static constexpr int NumLowBitsAvailable = 2; +}; +} // namespace llvm + +namespace cir { + +/// A "future" for a completed constant initializer, which can be passed +/// around independently of any sub-builders (but not the original parent). +class ConstantInitFuture { + using PairTy = llvm::PointerUnion; + + PairTy Data; + + friend class ConstantInitBuilderBase; + explicit ConstantInitFuture(ConstantInitBuilderBase *builder); + +public: + ConstantInitFuture() {} + + /// A future can be explicitly created from a fixed initializer. + explicit ConstantInitFuture(mlir::Attribute initializer) : Data(initializer) { + assert(initializer && "creating null future"); + } + + /// Is this future non-null? + explicit operator bool() const { return bool(Data); } + + /// Return the type of the initializer. + mlir::Type getType() const; + + /// Abandon this initializer. + void abandon(); + + /// Install the initializer into a global variable. This cannot + /// be called multiple times. + void installInGlobal(mlir::cir::GlobalOp global); + + void *getOpaqueValue() const { return Data.getOpaqueValue(); } + static ConstantInitFuture getFromOpaqueValue(void *value) { + ConstantInitFuture result; + result.Data = PairTy::getFromOpaqueValue(value); + return result; + } + static constexpr int NumLowBitsAvailable = + llvm::PointerLikeTypeTraits::NumLowBitsAvailable; +}; + +} // namespace cir + +namespace llvm { + +template <> struct PointerLikeTypeTraits<::cir::ConstantInitFuture> { + using T = ::cir::ConstantInitFuture; + + static inline void *getAsVoidPointer(T future) { + return future.getOpaqueValue(); + } + static inline T getFromVoidPointer(void *p) { + return T::getFromOpaqueValue(p); + } + static constexpr int NumLowBitsAvailable = T::NumLowBitsAvailable; +}; + +} // end namespace llvm + +#endif diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h new file mode 100644 index 000000000000..5ab356df319f --- /dev/null +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -0,0 +1,421 @@ +//===-- EHScopeStack.h - Stack for cleanup CIR generation -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes should be the minimum interface required for other parts of +// CodeGen to emit cleanups. The implementation is in CIRGenCleanup.cpp and +// other implemenentation details that are not widely needed are in +// CIRGenCleanup.h. +// +// TODO(cir): this header should be shared between LLVM and CIR codegen. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIRGEN_EHSCOPESTACK_H +#define LLVM_CLANG_LIB_CIRGEN_EHSCOPESTACK_H + +#include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" + +namespace cir { + +class CIRGenFunction; + +/// A branch fixup. These are required when emitting a goto to a +/// label which hasn't been emitted yet. The goto is optimistically +/// emitted as a branch to the basic block for the label, and (if it +/// occurs in a scope with non-trivial cleanups) a fixup is added to +/// the innermost cleanup. When a (normal) cleanup is popped, any +/// unresolved fixups in that scope are threaded through the cleanup. +struct BranchFixup { + // /// The block containing the terminator which needs to be modified + // /// into a switch if this fixup is resolved into the current scope. + // /// If null, LatestBranch points directly to the destination. + // llvm::BasicBlock *OptimisticBranchBlock; + + // /// The ultimate destination of the branch. + // /// + // /// This can be set to null to indicate that this fixup was + // /// successfully resolved. + // llvm::BasicBlock *Destination; + + // /// The destination index value. + // unsigned DestinationIndex; + + // /// The initial branch of the fixup. + // llvm::BranchInst *InitialBranch; +}; + +template struct InvariantValue { + typedef T type; + typedef T saved_type; + static bool needsSaving(type value) { return false; } + static saved_type save(CIRGenFunction &CGF, type value) { return value; } + static type restore(CIRGenFunction &CGF, saved_type value) { return value; } +}; + +/// A metaprogramming class for ensuring that a value will dominate an +/// arbitrary position in a function. +template struct DominatingValue : InvariantValue {}; + +template ::value || + std::is_base_of::value) && + !std::is_base_of::value && + !std::is_base_of::value> +struct DominatingPointer; +template struct DominatingPointer : InvariantValue {}; +// template struct DominatingPointer at end of file + +template struct DominatingValue : DominatingPointer {}; + +enum CleanupKind : unsigned { + /// Denotes a cleanup that should run when a scope is exited using exceptional + /// control flow (a throw statement leading to stack unwinding, ). + EHCleanup = 0x1, + + /// Denotes a cleanup that should run when a scope is exited using normal + /// control flow (falling off the end of the scope, return, goto, ...). + NormalCleanup = 0x2, + + NormalAndEHCleanup = EHCleanup | NormalCleanup, + + LifetimeMarker = 0x8, + NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup, +}; + +/// A stack of scopes which respond to exceptions, including cleanups +/// and catch blocks. +class EHScopeStack { +public: + /* Should switch to alignof(uint64_t) instead of 8, when EHCleanupScope can */ + enum { ScopeStackAlignment = 8 }; + + /// A saved depth on the scope stack. This is necessary because + /// pushing scopes onto the stack invalidates iterators. + class stable_iterator { + friend class EHScopeStack; + + /// Offset from StartOfData to EndOfBuffer. + ptrdiff_t Size; + + stable_iterator(ptrdiff_t Size) : Size(Size) {} + + public: + static stable_iterator invalid() { return stable_iterator(-1); } + stable_iterator() : Size(-1) {} + + bool isValid() const { return Size >= 0; } + + /// Returns true if this scope encloses I. + /// Returns false if I is invalid. + /// This scope must be valid. + bool encloses(stable_iterator I) const { return Size <= I.Size; } + + /// Returns true if this scope strictly encloses I: that is, + /// if it encloses I and is not I. + /// Returns false is I is invalid. + /// This scope must be valid. + bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; } + + friend bool operator==(stable_iterator A, stable_iterator B) { + return A.Size == B.Size; + } + friend bool operator!=(stable_iterator A, stable_iterator B) { + return A.Size != B.Size; + } + }; + + /// Information for lazily generating a cleanup. Subclasses must be + /// POD-like: cleanups will not be destructed, and they will be + /// allocated on the cleanup stack and freely copied and moved + /// around. + /// + /// Cleanup implementations should generally be declared in an + /// anonymous namespace. + class Cleanup { + // Anchor the construction vtable. + virtual void anchor(); + + protected: + ~Cleanup() = default; + + public: + Cleanup(const Cleanup &) = default; + Cleanup(Cleanup &&) {} + Cleanup() = default; + + virtual bool isRedundantBeforeReturn() { return false; } + + /// Generation flags. + class Flags { + enum { + F_IsForEH = 0x1, + F_IsNormalCleanupKind = 0x2, + F_IsEHCleanupKind = 0x4, + F_HasExitSwitch = 0x8, + }; + unsigned flags; + + public: + Flags() : flags(0) {} + + /// isForEH - true if the current emission is for an EH cleanup. + bool isForEHCleanup() const { return flags & F_IsForEH; } + bool isForNormalCleanup() const { return !isForEHCleanup(); } + void setIsForEHCleanup() { flags |= F_IsForEH; } + + bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; } + void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; } + + /// isEHCleanupKind - true if the cleanup was pushed as an EH + /// cleanup. + bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; } + void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; } + + bool hasExitSwitch() const { return flags & F_HasExitSwitch; } + void setHasExitSwitch() { flags |= F_HasExitSwitch; } + }; + + /// Emit the cleanup. For normal cleanups, this is run in the + /// same EH context as when the cleanup was pushed, i.e. the + /// immediately-enclosing context of the cleanup scope. For + /// EH cleanups, this is run in a terminate context. + /// + // \param flags cleanup kind. + virtual void Emit(CIRGenFunction &CGF, Flags flags) = 0; + }; + + /// ConditionalCleanup stores the saved form of its parameters, + /// then restores them and performs the cleanup. + template + class ConditionalCleanup final : public Cleanup { + typedef std::tuple::saved_type...> SavedTuple; + SavedTuple Saved; + + template + T restore(CIRGenFunction &CGF, std::index_sequence) { + // It's important that the restores are emitted in order. The braced init + // list guarantees that. + return T{DominatingValue::restore(CGF, std::get(Saved))...}; + } + + void Emit(CIRGenFunction &CGF, Flags flags) override { + restore(CGF, std::index_sequence_for()).Emit(CGF, flags); + } + + public: + ConditionalCleanup(typename DominatingValue::saved_type... A) + : Saved(A...) {} + + ConditionalCleanup(SavedTuple Tuple) : Saved(std::move(Tuple)) {} + }; + +private: + // The implementation for this class is in CGException.h and + // CGException.cpp; the definition is here because it's used as a + // member of CIRGenFunction. + + /// The start of the scope-stack buffer, i.e. the allocated pointer + /// for the buffer. All of these pointers are either simultaneously + /// null or simultaneously valid. + char *StartOfBuffer; + + /// The end of the buffer. + char *EndOfBuffer; + + /// The first valid entry in the buffer. + char *StartOfData; + + /// The innermost normal cleanup on the stack. + stable_iterator InnermostNormalCleanup; + + /// The innermost EH scope on the stack. + stable_iterator InnermostEHScope; + + /// The CGF this Stack belong to + CIRGenFunction *CGF; + + /// The current set of branch fixups. A branch fixup is a jump to + /// an as-yet unemitted label, i.e. a label for which we don't yet + /// know the EH stack depth. Whenever we pop a cleanup, we have + /// to thread all the current branch fixups through it. + /// + /// Fixups are recorded as the Use of the respective branch or + /// switch statement. The use points to the final destination. + /// When popping out of a cleanup, these uses are threaded through + /// the cleanup and adjusted to point to the new cleanup. + /// + /// Note that branches are allowed to jump into protected scopes + /// in certain situations; e.g. the following code is legal: + /// struct A { ~A(); }; // trivial ctor, non-trivial dtor + /// goto foo; + /// A a; + /// foo: + /// bar(); + llvm::SmallVector BranchFixups; + + char *allocate(size_t Size); + void deallocate(size_t Size); + + void *pushCleanup(CleanupKind K, size_t DataSize); + +public: + EHScopeStack() + : StartOfBuffer(nullptr), EndOfBuffer(nullptr), StartOfData(nullptr), + InnermostNormalCleanup(stable_end()), InnermostEHScope(stable_end()), + CGF(nullptr) {} + ~EHScopeStack() { delete[] StartOfBuffer; } + + /// Push a lazily-created cleanup on the stack. + template void pushCleanup(CleanupKind Kind, As... A) { + static_assert(alignof(T) <= ScopeStackAlignment, + "Cleanup's alignment is too large."); + void *Buffer = pushCleanup(Kind, sizeof(T)); + Cleanup *Obj = new (Buffer) T(A...); + (void)Obj; + } + + /// Push a lazily-created cleanup on the stack. Tuple version. + template + void pushCleanupTuple(CleanupKind Kind, std::tuple A) { + static_assert(alignof(T) <= ScopeStackAlignment, + "Cleanup's alignment is too large."); + void *Buffer = pushCleanup(Kind, sizeof(T)); + Cleanup *Obj = new (Buffer) T(std::move(A)); + (void)Obj; + } + + // Feel free to add more variants of the following: + + /// Push a cleanup with non-constant storage requirements on the + /// stack. The cleanup type must provide an additional static method: + /// static size_t getExtraSize(size_t); + /// The argument to this method will be the value N, which will also + /// be passed as the first argument to the constructor. + /// + /// The data stored in the extra storage must obey the same + /// restrictions as normal cleanup member data. + /// + /// The pointer returned from this method is valid until the cleanup + /// stack is modified. + template + T *pushCleanupWithExtra(CleanupKind Kind, size_t N, As... A) { + static_assert(alignof(T) <= ScopeStackAlignment, + "Cleanup's alignment is too large."); + void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N)); + return new (Buffer) T(N, A...); + } + + void pushCopyOfCleanup(CleanupKind Kind, const void *Cleanup, size_t Size) { + void *Buffer = pushCleanup(Kind, Size); + std::memcpy(Buffer, Cleanup, Size); + } + + void setCGF(CIRGenFunction *inCGF) { CGF = inCGF; } + + /// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp. + void popCleanup(); + + /// Push a set of catch handlers on the stack. The catch is + /// uninitialized and will need to have the given number of handlers + /// set on it. + class EHCatchScope *pushCatch(unsigned NumHandlers); + + /// Pops a catch scope off the stack. This is private to CGException.cpp. + void popCatch(); + + /// Push an exceptions filter on the stack. + class EHFilterScope *pushFilter(unsigned NumFilters); + + /// Pops an exceptions filter off the stack. + void popFilter(); + + /// Push a terminate handler on the stack. + void pushTerminate(); + + /// Pops a terminate handler off the stack. + void popTerminate(); + + // Returns true iff the current scope is either empty or contains only + // lifetime markers, i.e. no real cleanup code + bool containsOnlyLifetimeMarkers(stable_iterator Old) const; + + /// Determines whether the exception-scopes stack is empty. + bool empty() const { return StartOfData == EndOfBuffer; } + + bool requiresLandingPad() const; + + /// Determines whether there are any normal cleanups on the stack. + bool hasNormalCleanups() const { + return InnermostNormalCleanup != stable_end(); + } + + /// Returns the innermost normal cleanup on the stack, or + /// stable_end() if there are no normal cleanups. + stable_iterator getInnermostNormalCleanup() const { + return InnermostNormalCleanup; + } + stable_iterator getInnermostActiveNormalCleanup() const; + + stable_iterator getInnermostEHScope() const { return InnermostEHScope; } + + /// An unstable reference to a scope-stack depth. Invalidated by + /// pushes but not pops. + class iterator; + + /// Returns an iterator pointing to the innermost EH scope. + iterator begin() const; + + /// Returns an iterator pointing to the outermost EH scope. + iterator end() const; + + /// Create a stable reference to the top of the EH stack. The + /// returned reference is valid until that scope is popped off the + /// stack. + stable_iterator stable_begin() const { + return stable_iterator(EndOfBuffer - StartOfData); + } + + /// Create a stable reference to the bottom of the EH stack. + static stable_iterator stable_end() { return stable_iterator(0); } + + /// Translates an iterator into a stable_iterator. + stable_iterator stabilize(iterator it) const; + + /// Turn a stable reference to a scope depth into a unstable pointer + /// to the EH stack. + iterator find(stable_iterator save) const; + + /// Add a branch fixup to the current cleanup scope. + BranchFixup &addBranchFixup() { + assert(hasNormalCleanups() && "adding fixup in scope without cleanups"); + BranchFixups.push_back(BranchFixup()); + return BranchFixups.back(); + } + + unsigned getNumBranchFixups() const { return BranchFixups.size(); } + BranchFixup &getBranchFixup(unsigned I) { + assert(I < getNumBranchFixups()); + return BranchFixups[I]; + } + + /// Pops lazily-removed fixups from the end of the list. This + /// should only be called by procedures which have just popped a + /// cleanup or resolved one or more fixups. + void popNullFixups(); + + /// Clears the branch-fixups list. This should only be called by + /// ResolveAllBranchFixups. + void clearFixups() { BranchFixups.clear(); } +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp new file mode 100644 index 000000000000..dc5ab92b4121 --- /dev/null +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -0,0 +1,460 @@ +#include "TargetInfo.h" +#include "ABIInfo.h" +#include "CIRGenCXXABI.h" +#include "CIRGenFunctionInfo.h" +#include "CIRGenTypes.h" +#include "CallingConv.h" + +#include "clang/Basic/TargetInfo.h" + +using namespace cir; +using namespace clang; + +static bool testIfIsVoidTy(QualType Ty) { + const auto *BT = Ty->getAs(); + if (!BT) + return false; + + BuiltinType::Kind k = BT->getKind(); + return k == BuiltinType::Void; +} + +//===----------------------------------------------------------------------===// +// AArch64 ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { + +class AArch64ABIInfo : public ABIInfo { +public: + enum ABIKind { AAPCS = 0, DarwinPCS, Win64 }; + +private: + ABIKind Kind; + +public: + AArch64ABIInfo(CIRGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} + +private: + ABIKind getABIKind() const { return Kind; } + bool isDarwinPCS() const { return Kind == DarwinPCS; } + + ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; + ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic, + unsigned CallingConvention) const; + + void computeInfo(CIRGenFunctionInfo &FI) const override { + // Top leevl CIR has unlimited arguments and return types. Lowering for ABI + // specific concerns should happen during a lowering phase. Assume + // everything is direct for now. + for (CIRGenFunctionInfo::arg_iterator it = FI.arg_begin(), + ie = FI.arg_end(); + it != ie; ++it) { + if (testIfIsVoidTy(it->type)) + it->info = ABIArgInfo::getIgnore(); + else + it->info = ABIArgInfo::getDirect(CGT.ConvertType(it->type)); + } + auto RetTy = FI.getReturnType(); + if (testIfIsVoidTy(RetTy)) + FI.getReturnInfo() = ABIArgInfo::getIgnore(); + else + FI.getReturnInfo() = ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); + + return; + } +}; + +class AArch64TargetCIRGenInfo : public TargetCIRGenInfo { +public: + AArch64TargetCIRGenInfo(CIRGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) + : TargetCIRGenInfo(std::make_unique(CGT, Kind)) {} +}; + +} // namespace + +namespace { + +/// The AVX ABI leel for X86 targets. +enum class X86AVXABILevel { None, AVX, AVX512 }; + +class X86_64ABIInfo : public ABIInfo { + enum Class { + Integer = 0, + SSE, + SSEUp, + X87, + X87Up, + ComplexX87, + NoClass, + Memory + }; + + // X86AVXABILevel AVXLevel; + // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 64-bit + // hardware. + // bool Has64BitPointers; + +public: + X86_64ABIInfo(CIRGenTypes &CGT, X86AVXABILevel AVXLevel) + : ABIInfo(CGT) + // , AVXLevel(AVXLevel) + // , Has64BitPointers(CGT.getDataLayout().getPointeSize(0) == 8) + {} + + virtual void computeInfo(CIRGenFunctionInfo &FI) const override; + + /// classify - Determine the x86_64 register classes in which the given type T + /// should be passed. + /// + /// \param Lo - The classification for the parts of the type residing in the + /// low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type residing in the + /// high word of the containing object. + /// + /// \param OffsetBase - The bit offset of this type in the containing object. + /// Some parameters are classified different depending on whether they + /// straddle an eightbyte boundary. + /// + /// \param isNamedArg - Whether the argument in question is a "named" + /// argument, as used in AMD64-ABI 3.5.7. + /// + /// If a word is unused its result will be NoClass; if a type should be passed + /// in Memory then at least the classification of \arg Lo will be Memory. + /// + /// The \arg Lo class will be NoClass iff the argument is ignored. + /// + /// If the \arg Lo class is ComplexX87, then the \arg Hi class will also be + /// ComplexX87. + void classify(clang::QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, + bool isNamedArg) const; + + mlir::Type GetSSETypeAtOffset(mlir::Type CIRType, unsigned CIROffset, + clang::QualType SourceTy, + unsigned SourceOffset) const; + + ABIArgInfo classifyReturnType(QualType RetTy) const; + + ABIArgInfo classifyArgumentType(clang::QualType Ty, unsigned freeIntRegs, + unsigned &neededInt, unsigned &neededSSE, + bool isNamedArg) const; + + mlir::Type GetINTEGERTypeAtOffset(mlir::Type CIRType, unsigned CIROffset, + QualType SourceTy, + unsigned SourceOffset) const; + + /// getIndirectResult - Give a source type \arg Ty, return a suitable result + /// such that the argument will be passed in memory. + /// + /// \param freeIntRegs - The number of free integer registers remaining + /// available. + ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; +}; + +class X86_64TargetCIRGenInfo : public TargetCIRGenInfo { +public: + X86_64TargetCIRGenInfo(CIRGenTypes &CGT, X86AVXABILevel AVXLevel) + : TargetCIRGenInfo(std::make_unique(CGT, AVXLevel)) {} +}; +} // namespace + +// TODO(cir): remove the attribute once this gets used. +LLVM_ATTRIBUTE_UNUSED +static bool classifyReturnType(const CIRGenCXXABI &CXXABI, + CIRGenFunctionInfo &FI, const ABIInfo &Info) { + QualType Ty = FI.getReturnType(); + + assert(!Ty->getAs() && "RecordType returns NYI"); + + return CXXABI.classifyReturnType(FI); +} + +CIRGenCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } + +clang::ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } + +ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, + unsigned freeIntRegs) const { + assert(false && "NYI"); +} + +void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { + // Top level CIR has unlimited arguments and return types. Lowering for ABI + // specific concerns should happen during a lowering phase. Assume everything + // is direct for now. + for (CIRGenFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) { + if (testIfIsVoidTy(it->type)) + it->info = ABIArgInfo::getIgnore(); + else + it->info = ABIArgInfo::getDirect(CGT.ConvertType(it->type)); + } + auto RetTy = FI.getReturnType(); + if (testIfIsVoidTy(RetTy)) + FI.getReturnInfo() = ABIArgInfo::getIgnore(); + else + FI.getReturnInfo() = ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); +} + +/// Pass transparent unions as if they were the type of the first element. Sema +/// should ensure that all elements of the union have the same "machine type". +static QualType useFirstFieldIfTransparentUnion(QualType Ty) { + assert(!Ty->getAsUnionType() && "NYI"); + return Ty; +} + +/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in +/// an 8-byte GPR. This means that we either have a scalar or we are talking +/// about the high or low part of an up-to-16-byte struct. This routine picks +/// the best CIR type to represent this, which may be i64 or may be anything +/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, +/// etc). +/// +/// PrefType is a CIR type that corresponds to (part of) the IR type for the +/// source type. CIROffset is an offset in bytes into the CIR type taht the +/// 8-byte value references. PrefType may be null. +/// +/// SourceTy is the source-level type for the entire argument. SourceOffset is +/// an offset into this that we're processing (which is always either 0 or 8). +/// +mlir::Type X86_64ABIInfo::GetINTEGERTypeAtOffset(mlir::Type CIRType, + unsigned CIROffset, + QualType SourceTy, + unsigned SourceOffset) const { + // TODO: entirely stubbed out + assert(CIROffset == 0 && "NYI"); + assert(SourceOffset == 0 && "NYI"); + return CIRType; +} + +ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, + unsigned int freeIntRegs, + unsigned int &neededInt, + unsigned int &neededSSE, + bool isNamedArg) const { + Ty = useFirstFieldIfTransparentUnion(Ty); + + X86_64ABIInfo::Class Lo, Hi; + classify(Ty, 0, Lo, Hi, isNamedArg); + + // Check some invariants + // FIXME: Enforce these by construction. + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + neededInt = 0; + neededSSE = 0; + mlir::Type ResType = nullptr; + switch (Lo) { + default: + assert(false && "NYI"); + + // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next available + // register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 and %r9 is used. + case Integer: + ++neededInt; + + // Pick an 8-byte type based on the preferred type. + ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); + + // If we have a sign or zero extended integer, make sure to return Extend so + // that the parameter gets the right LLVM IR attributes. + if (Hi == NoClass && ResType.isa()) { + assert(!Ty->getAs() && "NYI"); + if (Ty->isSignedIntegerOrEnumerationType() && + isPromotableIntegerTypeForABI(Ty)) + return ABIArgInfo::getExtend(Ty); + } + + break; + + // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next available SSE + // register is used, the registers are taken in the order from %xmm0 to + // %xmm7. + case SSE: { + mlir::Type CIRType = CGT.ConvertType(Ty); + ResType = GetSSETypeAtOffset(CIRType, 0, Ty, 0); + ++neededSSE; + break; + } + } + + mlir::Type HighPart = nullptr; + switch (Hi) { + default: + assert(false && "NYI"); + case NoClass: + break; + } + + assert(!HighPart && "NYI"); + + return ABIArgInfo::getDirect(ResType); +} + +ABIInfo::~ABIInfo() {} + +bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { + if (getContext().isPromotableIntegerType(Ty)) + return true; + + assert(!Ty->getAs() && "NYI"); + + return false; +} + +void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, + Class &Hi, bool isNamedArg) const { + // FIXME: This code can be simplified by introducing a simple value class for + // Class pairs with appropriate constructor methods for the various + // situations. + + // FIXME: Some of the split computations are wrong; unaligned vectors + // shouldn't be passed in registers for example, so there is no chance they + // can straddle an eightbyte. Verify & simplify. + + Lo = Hi = NoClass; + Class &Current = OffsetBase < 64 ? Lo : Hi; + Current = Memory; + + if (const auto *BT = Ty->getAs()) { + BuiltinType::Kind k = BT->getKind(); + if (k == BuiltinType::Void) { + Current = NoClass; + } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { + assert(false && "NYI"); + Lo = Integer; + Hi = Integer; + } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { + Current = Integer; + } else if (k == BuiltinType::Float || k == BuiltinType::Double || + k == BuiltinType::Float16) { + Current = SSE; + } else if (k == BuiltinType::LongDouble) { + assert(false && "NYI"); + } else + assert(false && + "Only void and Integer supported so far for builtin types"); + // FIXME: _Decimal32 and _Decimal64 are SSE. + // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). + return; + } + + assert(!Ty->getAs() && "Enums NYI"); + if (Ty->hasPointerRepresentation()) { + Current = Integer; + return; + } + + assert(false && "Nothing else implemented yet"); +} + +/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the +/// low 8 bytes of an XMM register, corresponding to the SSE class. +mlir::Type X86_64ABIInfo::GetSSETypeAtOffset(mlir::Type CIRType, + unsigned int CIROffset, + clang::QualType SourceTy, + unsigned int SourceOffset) const { + // TODO: entirely stubbed out + assert(CIROffset == 0 && "NYI"); + assert(SourceOffset == 0 && "NYI"); + return CIRType; +} + +ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { + // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the classification + // algorithm. + X86_64ABIInfo::Class Lo, Hi; + classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); + + // Check some invariants. + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + mlir::Type ResType = nullptr; + assert(Lo == NoClass || Lo == Integer || + Lo == SSE && "Only NoClass and Integer supported so far"); + + switch (Lo) { + case NoClass: + assert(Hi == NoClass && "Only NoClass supported so far for Hi"); + return ABIArgInfo::getIgnore(); + + // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next available + // register of the sequence %rax, %rdx is used. + case Integer: + ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); + + // If we have a sign or zero extended integer, make sure to return Extend so + // that the parameter gets the right LLVM IR attributes. + // TODO: extend the above consideration to MLIR + if (Hi == NoClass && ResType.isa()) { + // Treat an enum type as its underlying type. + if (const auto *EnumTy = RetTy->getAs()) + RetTy = EnumTy->getDecl()->getIntegerType(); + + if (RetTy->isIntegralOrEnumerationType() && + isPromotableIntegerTypeForABI(RetTy)) { + return ABIArgInfo::getExtend(RetTy); + } + } + break; + + // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next available SSE + // register of the sequence %xmm0, %xmm1 is used. + case SSE: + ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); + break; + + default: + llvm_unreachable("NYI"); + } + + mlir::Type HighPart = nullptr; + + if (HighPart) + assert(false && "NYI"); + + return ABIArgInfo::getDirect(ResType); +} + +const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { + if (TheTargetCIRGenInfo) + return *TheTargetCIRGenInfo; + + // Helper to set the unique_ptr while still keeping the return value. + auto SetCIRGenInfo = [&](TargetCIRGenInfo *P) -> const TargetCIRGenInfo & { + this->TheTargetCIRGenInfo.reset(P); + return *P; + }; + + const llvm::Triple &Triple = getTarget().getTriple(); + + switch (Triple.getArch()) { + default: + assert(false && "Target not yet supported!"); + case llvm::Triple::aarch64: { + AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; + assert(getTarget().getABI() == "aapcs" || + getTarget().getABI() == "darwinpcs" && + "Only Darwin supported for aarch64"); + Kind = AArch64ABIInfo::DarwinPCS; + return SetCIRGenInfo(new AArch64TargetCIRGenInfo(genTypes, Kind)); + } + + case llvm::Triple::x86_64: { + StringRef ABI = getTarget().getABI(); + X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 + : ABI == "avx" ? X86AVXABILevel::AVX + : X86AVXABILevel::None); + + switch (Triple.getOS()) { + default: + assert(false && "OSType NYI"); + case llvm::Triple::Linux: + return SetCIRGenInfo(new X86_64TargetCIRGenInfo(genTypes, AVXLevel)); + } + } + } +} diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h new file mode 100644 index 000000000000..e4fee4f2c330 --- /dev/null +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -0,0 +1,73 @@ +//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_TARGETINFO_H +#define LLVM_CLANG_LIB_CIR_TARGETINFO_H + +#include "ABIInfo.h" +#include "CIRGenValue.h" +#include "mlir/IR/Types.h" + +#include + +namespace cir { + +class CIRGenFunction; + +/// This class organizes various target-specific codegeneration issues, like +/// target-specific attributes, builtins and so on. +/// Equivalent to LLVM's TargetCodeGenInfo. +class TargetCIRGenInfo { + std::unique_ptr Info = nullptr; + +public: + TargetCIRGenInfo(std::unique_ptr Info) : Info(std::move(Info)) {} + + /// Returns ABI info helper for the target. + const ABIInfo &getABIInfo() const { return *Info; } + + virtual bool isScalarizableAsmOperand(CIRGenFunction &CGF, + mlir::Type Ty) const { + return false; + } + + /// Corrects the MLIR type for a given constraint and "usual" + /// type. + /// + /// \returns A new MLIR type, possibly the same as the original + /// on success + virtual mlir::Type adjustInlineAsmType(CIRGenFunction &CGF, + llvm::StringRef Constraint, + mlir::Type Ty) const { + return Ty; + } + + virtual void + addReturnRegisterOutputs(CIRGenFunction &CGF, LValue ReturnValue, + std::string &Constraints, + std::vector &ResultRegTypes, + std::vector &ResultTruncRegTypes, + std::vector &ResultRegDests, + std::string &AsmString, unsigned NumOutputs) const {} + + /// Get the AST address space for alloca. + virtual clang::LangAS getASTAllocaAddressSpace() const { + return clang::LangAS::Default; + } + + virtual ~TargetCIRGenInfo() {} +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h new file mode 100644 index 000000000000..e93a564ce076 --- /dev/null +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -0,0 +1,178 @@ +//===---- UnimplementedFeatureGuarding.h - Checks against NYI ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file introduces some helper classes to guard against features that +// CodeGen supports that we do not have and also do not have great ways to +// assert against. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_UFG +#define LLVM_CLANG_LIB_CIR_UFG + +namespace cir { +struct UnimplementedFeature { + // TODO(CIR): Implement the CIRGenFunction::buildTypeCheck method that handles + // sanitizer related type check features + static bool buildTypeCheck() { return false; } + static bool tbaa() { return false; } + static bool cleanups() { return false; } + + // GNU vectors are done, but other kinds of vectors haven't been implemented. + static bool scalableVectors() { return false; } + static bool vectorConstants() { return false; } + + // Address space related + static bool addressSpace() { return false; } + static bool addressSpaceInGlobalVar() { return false; } + + // Clang codegen options + static bool strictVTablePointers() { return false; } + + // Unhandled global/linkage information. + static bool unnamedAddr() { return false; } + static bool setComdat() { return false; } + static bool setDSOLocal() { return false; } + static bool threadLocal() { return false; } + static bool setDLLStorageClass() { return false; } + static bool setDLLImportDLLExport() { return false; } + static bool setPartition() { return false; } + static bool setGlobalVisibility() { return false; } + static bool hiddenVisibility() { return false; } + static bool protectedVisibility() { return false; } + static bool addCompilerUsedGlobal() { return false; } + + // Sanitizers + static bool reportGlobalToASan() { return false; } + static bool emitAsanPrologueOrEpilogue() { return false; } + static bool emitCheckedInBoundsGEP() { return false; } + static bool pointerOverflowSanitizer() { return false; } + static bool sanitizeDtor() { return false; } + static bool sanitizeVLABound() { return false; } + static bool sanitizerBuiltin() { return false; } + static bool sanitizerReturn() { return false; } + + // ObjC + static bool setObjCGCLValueClass() { return false; } + + // Debug info + static bool generateDebugInfo() { return false; } + + // LLVM Attributes + static bool setFunctionAttributes() { return false; } + static bool attributeBuiltin() { return false; } + static bool attributeNoBuiltin() { return false; } + static bool parameterAttributes() { return false; } + static bool minLegalVectorWidthAttr() { return false; } + static bool vscaleRangeAttr() { return false; } + + // Coroutines + static bool unhandledException() { return false; } + + // Missing Emissions + static bool variablyModifiedTypeEmission() { return false; } + static bool buildLValueAlignmentAssumption() { return false; } + static bool buildDerivedToBaseCastForDevirt() { return false; } + static bool emitFunctionEpilog() { return false; } + + // Data layout + static bool dataLayoutGetIndexTypeSizeInBits() { return false; } + + // References related stuff + static bool ARC() { return false; } // Automatic reference counting + + // Clang early optimizations or things defered to LLVM lowering. + static bool shouldUseBZeroPlusStoresToInitialize() { return false; } + static bool shouldUseMemSetToInitialize() { return false; } + static bool shouldSplitConstantStore() { return false; } + static bool shouldCreateMemCpyFromGlobal() { return false; } + static bool shouldReverseUnaryCondOnBoolExpr() { return false; } + static bool fieldMemcpyizerBuildMemcpy() { return false; } + static bool isTrivialAndisDefaultConstructor() { return false; } + static bool isMemcpyEquivalentSpecialMember() { return false; } + static bool constructABIArgDirectExtend() { return false; } + static bool mayHaveIntegerOverflow() { return false; } + static bool llvmLoweringPtrDiffConsidersPointee() { return false; } + static bool emitNullCheckForDeleteCalls() { return false; } + + // Folding methods. + static bool foldBinOpFMF() { return false; } + + // Fast math. + static bool fastMathGuard() { return false; } + static bool fastMathFlags() { return false; } + static bool fastMathFuncAttributes() { return false; } + + // Exception handling + static bool setLandingPadCleanup() { return false; } + static bool isSEHTryScope() { return false; } + static bool ehStack() { return false; } + static bool emitStartEHSpec() { return false; } + static bool emitEndEHSpec() { return false; } + static bool simplifyCleanupEntry() { return false; } + + // Type qualifiers. + static bool atomicTypes() { return false; } + static bool volatileTypes() { return false; } + static bool syncScopeID() { return false; } + + static bool capturedByInit() { return false; } + static bool tryEmitAsConstant() { return false; } + static bool incrementProfileCounter() { return false; } + static bool createProfileWeightsForLoop() { return false; } + static bool emitCondLikelihoodViaExpectIntrinsic() { return false; } + static bool requiresReturnValueCheck() { return false; } + static bool shouldEmitLifetimeMarkers() { return false; } + static bool peepholeProtection() { return false; } + static bool CGCapturedStmtInfo() { return false; } + static bool cxxABI() { return false; } + static bool openCL() { return false; } + static bool CUDA() { return false; } + static bool openMP() { return false; } + static bool openMPRuntime() { return false; } + static bool openMPTarget() { return false; } + static bool isVarArg() { return false; } + static bool setNonGC() { return false; } + static bool volatileLoadOrStore() { return false; } + static bool armComputeVolatileBitfields() { return false; } + static bool setCommonAttributes() { return false; } + static bool insertBuiltinUnpredictable() { return false; } + static bool createInvariantGroup() { return false; } + static bool addAutoInitAnnotation() { return false; } + static bool addHeapAllocSiteMetadata() { return false; } + static bool loopInfoStack() { return false; } + static bool requiresCleanups() { return false; } + static bool constantFoldsToSimpleInteger() { return false; } + static bool alignedLoad() { return false; } + static bool checkFunctionCallABI() { return false; } + static bool zeroInitializer() { return false; } + static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } + static bool chainCalls() { return false; } + static bool operandBundles() { return false; } + static bool exceptions() { return false; } + static bool metaDataNode() { return false; } + static bool emitDeclMetadata() { return false; } + static bool emitScalarRangeCheck() { return false; } + static bool stmtExprEvaluation() { return false; } + static bool setCallingConv() { return false; } + static bool tryMarkNoThrow() { return false; } + static bool indirectBranch() { return false; } + static bool escapedLocals() { return false; } + static bool deferredReplacements() { return false; } + static bool shouldInstrumentFunction() { return false; } + + // Inline assembly + static bool asm_goto() { return false; } + static bool asm_unwind_clobber() { return false; } + static bool asm_memory_effects() { return false; } + static bool asm_vector_type() { return false; } + static bool asm_llvm_assume() { return false; } +}; +} // namespace cir + +#endif diff --git a/clang/lib/CIR/Dialect/CMakeLists.txt b/clang/lib/CIR/Dialect/CMakeLists.txt index f33061b2d87c..9f57627c321f 100644 --- a/clang/lib/CIR/Dialect/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/CMakeLists.txt @@ -1 +1,2 @@ add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp new file mode 100644 index 000000000000..6702a5d8f276 --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -0,0 +1,499 @@ +//===- CIRTypes.cpp - MLIR CIR Types --------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the types in the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/Location.h" +#include "mlir/IR/OpImplementation.h" +#include "mlir/Support/LLVM.h" +#include "mlir/Support/LogicalResult.h" + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/TypeSwitch.h" + +// ClangIR holds back AST references when available. +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/ExprCXX.h" + +static void printStructMembers(mlir::AsmPrinter &p, mlir::ArrayAttr members); +static mlir::ParseResult parseStructMembers(::mlir::AsmParser &parser, + mlir::ArrayAttr &members); + +static void printFloatLiteral(mlir::AsmPrinter &p, llvm::APFloat value, + mlir::Type ty); +static mlir::ParseResult +parseFloatLiteral(mlir::AsmParser &parser, + mlir::FailureOr &value, + mlir::cir::CIRFPTypeInterface fpType); + +#define GET_ATTRDEF_CLASSES +#include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" + +using namespace mlir; +using namespace mlir::cir; + +//===----------------------------------------------------------------------===// +// CIR AST Attr helpers +//===----------------------------------------------------------------------===// + +namespace mlir { +namespace cir { + +mlir::Attribute makeFuncDeclAttr(const clang::Decl *decl, + mlir::MLIRContext *ctx) { + return llvm::TypeSwitch(decl) + .Case([ctx](const clang::CXXConstructorDecl *ast) { + return ASTCXXConstructorDeclAttr::get(ctx, ast); + }) + .Case([ctx](const clang::CXXConversionDecl *ast) { + return ASTCXXConversionDeclAttr::get(ctx, ast); + }) + .Case([ctx](const clang::CXXDestructorDecl *ast) { + return ASTCXXDestructorDeclAttr::get(ctx, ast); + }) + .Case([ctx](const clang::CXXMethodDecl *ast) { + return ASTCXXMethodDeclAttr::get(ctx, ast); + }) + .Case([ctx](const clang::FunctionDecl *ast) { + return ASTFunctionDeclAttr::get(ctx, ast); + }) + .Default([](auto) { + llvm_unreachable("unexpected Decl kind"); + return mlir::Attribute(); + }); +} + +} // namespace cir +} // namespace mlir + +//===----------------------------------------------------------------------===// +// General CIR parsing / printing +//===----------------------------------------------------------------------===// + +Attribute CIRDialect::parseAttribute(DialectAsmParser &parser, + Type type) const { + llvm::SMLoc typeLoc = parser.getCurrentLocation(); + StringRef mnemonic; + Attribute genAttr; + OptionalParseResult parseResult = + generatedAttributeParser(parser, &mnemonic, type, genAttr); + if (parseResult.has_value()) + return genAttr; + parser.emitError(typeLoc, "unknown attribute in CIR dialect"); + return Attribute(); +} + +void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { + if (failed(generatedAttributePrinter(attr, os))) + llvm_unreachable("unexpected CIR type kind"); +} + +static void printStructMembers(mlir::AsmPrinter &printer, + mlir::ArrayAttr members) { + printer << '{'; + llvm::interleaveComma(members, printer); + printer << '}'; +} + +static ParseResult parseStructMembers(mlir::AsmParser &parser, + mlir::ArrayAttr &members) { + SmallVector elts; + + auto delimiter = AsmParser::Delimiter::Braces; + auto result = parser.parseCommaSeparatedList(delimiter, [&]() { + mlir::TypedAttr attr; + if (parser.parseAttribute(attr).failed()) + return mlir::failure(); + elts.push_back(attr); + return mlir::success(); + }); + + if (result.failed()) + return mlir::failure(); + + members = mlir::ArrayAttr::get(parser.getContext(), elts); + return mlir::success(); +} + +LogicalResult ConstStructAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + mlir::Type type, ArrayAttr members) { + auto sTy = type.dyn_cast_or_null(); + if (!sTy) { + emitError() << "expected !cir.struct type"; + return failure(); + } + + if (sTy.getMembers().size() != members.size()) { + emitError() << "number of elements must match"; + return failure(); + } + + unsigned attrIdx = 0; + for (auto &member : sTy.getMembers()) { + auto m = members[attrIdx].dyn_cast_or_null(); + if (!m) { + emitError() << "expected mlir::TypedAttr attribute"; + return failure(); + } + if (member != m.getType()) { + emitError() << "element at index " << attrIdx << " has type " + << m.getType() << " but return type for this element is " + << member; + return failure(); + } + attrIdx++; + } + + return success(); +} + +LogicalResult StructLayoutAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, unsigned size, + unsigned alignment, bool padded, mlir::Type largest_member, + mlir::ArrayAttr offsets) { + if (not std::all_of(offsets.begin(), offsets.end(), [](mlir::Attribute attr) { + return attr.isa(); + })) { + return emitError() << "all index values must be integers"; + } + return success(); +} + +//===----------------------------------------------------------------------===// +// LangAttr definitions +//===----------------------------------------------------------------------===// + +Attribute LangAttr::parse(AsmParser &parser, Type odsType) { + auto loc = parser.getCurrentLocation(); + if (parser.parseLess()) + return {}; + + // Parse variable 'lang'. + llvm::StringRef lang; + if (parser.parseKeyword(&lang)) + return {}; + + // Check if parsed value is a valid language. + auto langEnum = symbolizeSourceLanguage(lang); + if (!langEnum.has_value()) { + parser.emitError(loc) << "invalid language keyword '" << lang << "'"; + return {}; + } + + if (parser.parseGreater()) + return {}; + + return get(parser.getContext(), langEnum.value()); +} + +void LangAttr::print(AsmPrinter &printer) const { + printer << "<" << getLang() << '>'; +} + +//===----------------------------------------------------------------------===// +// ConstPtrAttr definitions +//===----------------------------------------------------------------------===// + +Attribute ConstPtrAttr::parse(AsmParser &parser, Type odsType) { + uint64_t value; + + if (!odsType.isa()) + return {}; + + // Consume the '<' symbol. + if (parser.parseLess()) + return {}; + + if (parser.parseOptionalKeyword("null").succeeded()) { + value = 0; + } else { + if (parser.parseInteger(value)) + parser.emitError(parser.getCurrentLocation(), "expected integer value"); + } + + // Consume the '>' symbol. + if (parser.parseGreater()) + return {}; + + return ConstPtrAttr::get(odsType, value); +} + +void ConstPtrAttr::print(AsmPrinter &printer) const { + printer << '<'; + if (isNullValue()) + printer << "null"; + else + printer << getValue(); + printer << '>'; +} + +//===----------------------------------------------------------------------===// +// IntAttr definitions +//===----------------------------------------------------------------------===// + +Attribute IntAttr::parse(AsmParser &parser, Type odsType) { + mlir::APInt APValue; + + if (!odsType.isa()) + return {}; + auto type = odsType.cast(); + + // Consume the '<' symbol. + if (parser.parseLess()) + return {}; + + // Fetch arbitrary precision integer value. + if (type.isSigned()) { + int64_t value; + if (parser.parseInteger(value)) + parser.emitError(parser.getCurrentLocation(), "expected integer value"); + APValue = mlir::APInt(type.getWidth(), value, type.isSigned()); + if (APValue.getSExtValue() != value) + parser.emitError(parser.getCurrentLocation(), + "integer value too large for the given type"); + } else { + uint64_t value; + if (parser.parseInteger(value)) + parser.emitError(parser.getCurrentLocation(), "expected integer value"); + APValue = mlir::APInt(type.getWidth(), value, type.isSigned()); + if (APValue.getZExtValue() != value) + parser.emitError(parser.getCurrentLocation(), + "integer value too large for the given type"); + } + + // Consume the '>' symbol. + if (parser.parseGreater()) + return {}; + + return IntAttr::get(type, APValue); +} + +void IntAttr::print(AsmPrinter &printer) const { + auto type = getType().cast(); + printer << '<'; + if (type.isSigned()) + printer << getSInt(); + else + printer << getUInt(); + printer << '>'; +} + +LogicalResult IntAttr::verify(function_ref emitError, + Type type, APInt value) { + if (!type.isa()) { + emitError() << "expected 'simple.int' type"; + return failure(); + } + + auto intType = type.cast(); + if (value.getBitWidth() != intType.getWidth()) { + emitError() << "type and value bitwidth mismatch: " << intType.getWidth() + << " != " << value.getBitWidth(); + return failure(); + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// FPAttr definitions +//===----------------------------------------------------------------------===// + +static void printFloatLiteral(AsmPrinter &p, APFloat value, Type ty) { + p << value; +} + +static ParseResult parseFloatLiteral(AsmParser &parser, + FailureOr &value, + CIRFPTypeInterface fpType) { + + APFloat parsedValue(0.0); + if (parser.parseFloat(fpType.getFloatSemantics(), parsedValue)) + return failure(); + + value.emplace(parsedValue); + return success(); +} + +FPAttr FPAttr::getZero(Type type) { + return get(type, APFloat::getZero( + type.cast().getFloatSemantics())); +} + +LogicalResult FPAttr::verify(function_ref emitError, + CIRFPTypeInterface fpType, APFloat value) { + if (APFloat::SemanticsToEnum(fpType.getFloatSemantics()) != + APFloat::SemanticsToEnum(value.getSemantics())) { + emitError() << "floating-point semantics mismatch"; + return failure(); + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// CmpThreeWayInfoAttr definitions +//===----------------------------------------------------------------------===// + +std::string CmpThreeWayInfoAttr::getAlias() const { + std::string alias = "cmp3way_info"; + + if (getOrdering() == CmpOrdering::Strong) + alias.append("_strong_"); + else + alias.append("_partial_"); + + auto appendInt = [&](int64_t value) { + if (value < 0) { + alias.push_back('n'); + value = -value; + } + alias.append(std::to_string(value)); + }; + + alias.append("lt"); + appendInt(getLt()); + alias.append("eq"); + appendInt(getEq()); + alias.append("gt"); + appendInt(getGt()); + + if (auto unordered = getUnordered()) { + alias.append("un"); + appendInt(unordered.value()); + } + + return alias; +} + +LogicalResult +CmpThreeWayInfoAttr::verify(function_ref emitError, + CmpOrdering ordering, int64_t lt, int64_t eq, + int64_t gt, std::optional unordered) { + // The presense of unordered must match the value of ordering. + if (ordering == CmpOrdering::Strong && unordered) { + emitError() << "strong ordering does not include unordered ordering"; + return failure(); + } + if (ordering == CmpOrdering::Partial && !unordered) { + emitError() << "partial ordering lacks unordered ordering"; + return failure(); + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// DataMemberAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult +DataMemberAttr::verify(function_ref emitError, + mlir::cir::DataMemberType ty, + std::optional memberIndex) { + if (!memberIndex.has_value()) { + // DataMemberAttr without a given index represents a null value. + return success(); + } + + auto clsStructTy = ty.getClsTy(); + if (clsStructTy.isIncomplete()) { + emitError() << "incomplete 'cir.struct' cannot be used to build a non-null " + "data member pointer"; + return failure(); + } + + auto memberIndexValue = memberIndex.value(); + if (memberIndexValue >= clsStructTy.getNumElements()) { + emitError() + << "member index of a #cir.data_member attribute is out of range"; + return failure(); + } + + auto memberTy = clsStructTy.getMembers()[memberIndexValue]; + if (memberTy != ty.getMemberTy()) { + emitError() << "member type of a #cir.data_member attribute must match the " + "attribute type"; + return failure(); + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// DynamicCastInfoAtttr definitions +//===----------------------------------------------------------------------===// + +std::string DynamicCastInfoAttr::getAlias() const { + // The alias looks like: `dyn_cast_info__` + + std::string alias = "dyn_cast_info_"; + + alias.append(getSrcRtti().getSymbol().getValue()); + alias.push_back('_'); + alias.append(getDestRtti().getSymbol().getValue()); + + return alias; +} + +LogicalResult DynamicCastInfoAttr::verify( + function_ref emitError, + mlir::cir::GlobalViewAttr srcRtti, mlir::cir::GlobalViewAttr destRtti, + mlir::FlatSymbolRefAttr runtimeFunc, mlir::FlatSymbolRefAttr badCastFunc, + mlir::cir::IntAttr offsetHint) { + auto isRttiPtr = [](mlir::Type ty) { + // RTTI pointers are !cir.ptr. + + auto ptrTy = ty.dyn_cast(); + if (!ptrTy) + return false; + + auto pointeeIntTy = ptrTy.getPointee().dyn_cast(); + if (!pointeeIntTy) + return false; + + return pointeeIntTy.isUnsigned() && pointeeIntTy.getWidth() == 8; + }; + + if (!isRttiPtr(srcRtti.getType())) { + emitError() << "srcRtti must be an RTTI pointer"; + return failure(); + } + + if (!isRttiPtr(destRtti.getType())) { + emitError() << "destRtti must be an RTTI pointer"; + return failure(); + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// CIR Dialect +//===----------------------------------------------------------------------===// + +void CIRDialect::registerAttributes() { + addAttributes< +#define GET_ATTRDEF_LIST +#include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" + >(); +} diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c2829c3ff2af..44e69e533b93 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -10,4 +10,3062 @@ // //===----------------------------------------------------------------------===// -#include +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/AST/Attrs.inc" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Diagnostics.h" +#include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/DialectInterface.h" +#include "mlir/IR/Location.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/IR/OpImplementation.h" +#include "mlir/IR/StorageUniquerSupport.h" +#include "mlir/IR/TypeUtilities.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "mlir/Interfaces/FunctionImplementation.h" +#include "mlir/Interfaces/InferTypeOpInterface.h" +#include "mlir/Support/LLVM.h" +#include "mlir/Support/LogicalResult.h" + +using namespace mlir; +using namespace mlir::cir; + +#include "clang/CIR/Dialect/IR/CIROpsEnums.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROpsStructs.cpp.inc" + +#include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "clang/CIR/Interfaces/CIROpInterfaces.h" + +//===----------------------------------------------------------------------===// +// CIR Dialect +//===----------------------------------------------------------------------===// +namespace { +struct CIROpAsmDialectInterface : public OpAsmDialectInterface { + using OpAsmDialectInterface::OpAsmDialectInterface; + + AliasResult getAlias(Type type, raw_ostream &os) const final { + if (auto structType = type.dyn_cast()) { + if (!structType.getName()) { + os << "ty_anon_" << structType.getKindAsStr(); + return AliasResult::OverridableAlias; + } + os << "ty_" << structType.getName(); + return AliasResult::OverridableAlias; + } + if (auto intType = type.dyn_cast()) { + // We only provide alias for standard integer types (i.e. integer types + // whose width is divisible by 8). + if (intType.getWidth() % 8 != 0) + return AliasResult::NoAlias; + os << intType.getAlias(); + return AliasResult::OverridableAlias; + } + if (auto voidType = type.dyn_cast()) { + os << voidType.getAlias(); + return AliasResult::OverridableAlias; + } + + return AliasResult::NoAlias; + } + + AliasResult getAlias(Attribute attr, raw_ostream &os) const final { + if (auto boolAttr = attr.dyn_cast()) { + os << (boolAttr.getValue() ? "true" : "false"); + return AliasResult::FinalAlias; + } + if (auto bitfield = attr.dyn_cast()) { + os << "bfi_" << bitfield.getName().str(); + return AliasResult::FinalAlias; + } + if (auto extraFuncAttr = + attr.dyn_cast()) { + os << "fn_attr"; + return AliasResult::FinalAlias; + } + if (auto cmpThreeWayInfoAttr = + attr.dyn_cast()) { + os << cmpThreeWayInfoAttr.getAlias(); + return AliasResult::FinalAlias; + } + if (auto dynCastInfoAttr = + attr.dyn_cast()) { + os << dynCastInfoAttr.getAlias(); + return AliasResult::FinalAlias; + } + + return AliasResult::NoAlias; + } +}; +} // namespace + +/// Dialect initialization, the instance will be owned by the context. This is +/// the point of registration of types and operations for the dialect. +void cir::CIRDialect::initialize() { + registerTypes(); + registerAttributes(); + addOperations< +#define GET_OP_LIST +#include "clang/CIR/Dialect/IR/CIROps.cpp.inc" + >(); + addInterfaces(); +} + +//===----------------------------------------------------------------------===// +// Helpers +//===----------------------------------------------------------------------===// + +// Parses one of the keywords provided in the list `keywords` and returns the +// position of the parsed keyword in the list. If none of the keywords from the +// list is parsed, returns -1. +static int parseOptionalKeywordAlternative(AsmParser &parser, + ArrayRef keywords) { + for (auto en : llvm::enumerate(keywords)) { + if (succeeded(parser.parseOptionalKeyword(en.value()))) + return en.index(); + } + return -1; +} + +namespace { +template struct EnumTraits {}; + +#define REGISTER_ENUM_TYPE(Ty) \ + template <> struct EnumTraits { \ + static StringRef stringify(Ty value) { return stringify##Ty(value); } \ + static unsigned getMaxEnumVal() { return getMaxEnumValFor##Ty(); } \ + } +#define REGISTER_ENUM_TYPE_WITH_NS(NS, Ty) \ + template <> struct EnumTraits { \ + static StringRef stringify(NS::Ty value) { \ + return NS::stringify##Ty(value); \ + } \ + static unsigned getMaxEnumVal() { return NS::getMaxEnumValFor##Ty(); } \ + } + +REGISTER_ENUM_TYPE(GlobalLinkageKind); +REGISTER_ENUM_TYPE_WITH_NS(sob, SignedOverflowBehavior); +} // namespace + +/// Parse an enum from the keyword, or default to the provided default value. +/// The return type is the enum type by default, unless overriden with the +/// second template argument. +/// TODO: teach other places in this file to use this function. +template +static RetTy parseOptionalCIRKeyword(AsmParser &parser, EnumTy defaultValue) { + SmallVector names; + for (unsigned i = 0, e = EnumTraits::getMaxEnumVal(); i <= e; ++i) + names.push_back(EnumTraits::stringify(static_cast(i))); + + int index = parseOptionalKeywordAlternative(parser, names); + if (index == -1) + return static_cast(defaultValue); + return static_cast(index); +} + +// Check if a region's termination omission is valid and, if so, creates and +// inserts the omitted terminator into the region. +LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, + SMLoc errLoc) { + Location eLoc = parser.getEncodedSourceLoc(parser.getCurrentLocation()); + OpBuilder builder(parser.getBuilder().getContext()); + + // Region is empty or properly terminated: nothing to do. + if (region.empty() || + (region.back().mightHaveTerminator() && region.back().getTerminator())) + return success(); + + // Check for invalid terminator omissions. + if (!region.hasOneBlock()) + return parser.emitError(errLoc, + "multi-block region must not omit terminator"); + if (region.back().empty()) + return parser.emitError(errLoc, "empty region must not omit terminator"); + + // Terminator was omited correctly: recreate it. + region.back().push_back(builder.create(eLoc)); + return success(); +} + +// True if the region's terminator should be omitted. +bool omitRegionTerm(mlir::Region &r) { + const auto singleNonEmptyBlock = r.hasOneBlock() && !r.back().empty(); + const auto yieldsNothing = [&r]() { + YieldOp y = dyn_cast(r.back().getTerminator()); + return y && y.getArgs().empty(); + }; + return singleNonEmptyBlock && yieldsNothing(); +} + +//===----------------------------------------------------------------------===// +// CIR Custom Parsers/Printers +//===----------------------------------------------------------------------===// + +static mlir::ParseResult parseOmittedTerminatorRegion(mlir::OpAsmParser &parser, + mlir::Region ®ion) { + auto regionLoc = parser.getCurrentLocation(); + if (parser.parseRegion(region)) + return failure(); + if (ensureRegionTerm(parser, region, regionLoc).failed()) + return failure(); + return success(); +} + +static void printOmittedTerminatorRegion(mlir::OpAsmPrinter &printer, + mlir::cir::ScopeOp &op, + mlir::Region ®ion) { + printer.printRegion(region, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/!omitRegionTerm(region)); +} + +//===----------------------------------------------------------------------===// +// AllocaOp +//===----------------------------------------------------------------------===// + +void AllocaOp::build(::mlir::OpBuilder &odsBuilder, + ::mlir::OperationState &odsState, ::mlir::Type addr, + ::mlir::Type allocaType, ::llvm::StringRef name, + ::mlir::IntegerAttr alignment) { + odsState.addAttribute(getAllocaTypeAttrName(odsState.name), + ::mlir::TypeAttr::get(allocaType)); + odsState.addAttribute(getNameAttrName(odsState.name), + odsBuilder.getStringAttr(name)); + if (alignment) { + odsState.addAttribute(getAlignmentAttrName(odsState.name), alignment); + } + odsState.addTypes(addr); +} + +//===----------------------------------------------------------------------===// +// BreakOp +//===----------------------------------------------------------------------===// + +LogicalResult BreakOp::verify() { + if (!getOperation()->getParentOfType() && + !getOperation()->getParentOfType()) + return emitOpError("must be within a loop or switch"); + return success(); +} + +//===----------------------------------------------------------------------===// +// ConditionOp +//===-----------------------------------------------------------------------===// + +//===---------------------------------- +// BranchOpTerminatorInterface Methods + +void ConditionOp::getSuccessorRegions( + ArrayRef operands, SmallVectorImpl ®ions) { + // TODO(cir): The condition value may be folded to a constant, narrowing + // down its list of possible successors. + + // Parent is a loop: condition may branch to the body or to the parent op. + if (auto loopOp = dyn_cast(getOperation()->getParentOp())) { + regions.emplace_back(&loopOp.getBody(), loopOp.getBody().getArguments()); + regions.emplace_back(loopOp->getResults()); + } + + // Parent is an await: condition may branch to resume or suspend regions. + auto await = cast(getOperation()->getParentOp()); + regions.emplace_back(&await.getResume(), await.getResume().getArguments()); + regions.emplace_back(&await.getSuspend(), await.getSuspend().getArguments()); +} + +MutableOperandRange +ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { + // No values are yielded to the successor region. + return MutableOperandRange(getOperation(), 0, 0); +} + +LogicalResult ConditionOp::verify() { + if (!isa(getOperation()->getParentOp())) + return emitOpError("condition must be within a conditional region"); + return success(); +} + +//===----------------------------------------------------------------------===// +// ConstantOp +//===----------------------------------------------------------------------===// + +static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, + mlir::Attribute attrType) { + if (attrType.isa()) { + if (opType.isa<::mlir::cir::PointerType>()) + return success(); + return op->emitOpError("nullptr expects pointer type"); + } + + if (attrType.isa()) { + // More detailed type verifications are already done in + // DataMemberAttr::verify. Don't need to repeat here. + return success(); + } + + if (attrType.isa()) { + if (opType.isa<::mlir::cir::StructType, ::mlir::cir::ArrayType>()) + return success(); + return op->emitOpError("zero expects struct or array type"); + } + + if (attrType.isa()) { + if (!opType.isa()) + return op->emitOpError("result type (") + << opType << ") must be '!cir.bool' for '" << attrType << "'"; + return success(); + } + + if (attrType.isa()) { + auto at = attrType.cast(); + if (at.getType() != opType) { + return op->emitOpError("result type (") + << opType << ") does not match value type (" << at.getType() + << ")"; + } + return success(); + } + + if (attrType.isa()) { + if (opType.isa<::mlir::cir::PointerType>()) + return success(); + return op->emitOpError("symbolref expects pointer type"); + } + + if (attrType.isa() || + attrType.isa() || + attrType.isa() || + attrType.isa() || + attrType.isa()) + return success(); + if (attrType.isa()) + return success(); + + assert(attrType.isa() && "What else could we be looking at here?"); + return op->emitOpError("global with type ") + << attrType.cast().getType() << " not supported"; +} + +LogicalResult ConstantOp::verify() { + // ODS already generates checks to make sure the result type is valid. We just + // need to additionally check that the value's attribute type is consistent + // with the result type. + return checkConstantTypes(getOperation(), getType(), getValue()); +} + +static ParseResult parseConstantValue(OpAsmParser &parser, + mlir::Attribute &valueAttr) { + NamedAttrList attr; + return parser.parseAttribute(valueAttr, "value", attr); +} + +// FIXME: create a CIRConstAttr and hide this away for both global +// initialization and cir.const operation. +static void printConstant(OpAsmPrinter &p, Attribute value) { + p.printAttribute(value); +} + +static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, + Attribute value) { + printConstant(p, value); +} + +OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } + +//===----------------------------------------------------------------------===// +// ContinueOp +//===----------------------------------------------------------------------===// + +LogicalResult ContinueOp::verify() { + if (!this->getOperation()->getParentOfType()) + return emitOpError("must be within a loop"); + return success(); +} + +//===----------------------------------------------------------------------===// +// CastOp +//===----------------------------------------------------------------------===// + +LogicalResult CastOp::verify() { + auto resType = getResult().getType(); + auto srcType = getSrc().getType(); + + if (srcType.isa() && + resType.isa()) { + // Use the element type of the vector to verify the cast kind. (Except for + // bitcast, see below.) + srcType = srcType.dyn_cast().getEltType(); + resType = resType.dyn_cast().getEltType(); + } + + switch (getKind()) { + case cir::CastKind::int_to_bool: { + if (!resType.isa()) + return emitOpError() << "requires !cir.bool type for result"; + if (!srcType.isa()) + return emitOpError() << "requires !cir.int type for source"; + return success(); + } + case cir::CastKind::ptr_to_bool: { + if (!resType.isa()) + return emitOpError() << "requires !cir.bool type for result"; + if (!srcType.isa()) + return emitOpError() << "requires !cir.ptr type for source"; + return success(); + } + case cir::CastKind::integral: { + if (!resType.isa()) + return emitOpError() << "requires !cir.int type for result"; + if (!srcType.isa()) + return emitOpError() << "requires !cir.int type for source"; + return success(); + } + case cir::CastKind::array_to_ptrdecay: { + auto arrayPtrTy = srcType.dyn_cast(); + auto flatPtrTy = resType.dyn_cast(); + if (!arrayPtrTy || !flatPtrTy) + return emitOpError() << "requires !cir.ptr type for source and result"; + + auto arrayTy = arrayPtrTy.getPointee().dyn_cast(); + if (!arrayTy) + return emitOpError() << "requires !cir.array pointee"; + + if (arrayTy.getEltType() != flatPtrTy.getPointee()) + return emitOpError() + << "requires same type for array element and pointee result"; + return success(); + } + case cir::CastKind::bitcast: { + // This is the only cast kind where we don't want vector types to decay + // into the element type. + if ((!getSrc().getType().isa() || + !getResult().getType().isa()) && + (!getSrc().getType().isa() || + !getResult().getType().isa())) + return emitOpError() + << "requires !cir.ptr or !cir.vector type for source and result"; + return success(); + } + case cir::CastKind::floating: { + if (!srcType.isa() || + !resType.isa()) + return emitOpError() << "requires !cir.float type for source and result"; + return success(); + } + case cir::CastKind::float_to_int: { + if (!srcType.isa()) + return emitOpError() << "requires !cir.float type for source"; + if (!resType.dyn_cast()) + return emitOpError() << "requires !cir.int type for result"; + return success(); + } + case cir::CastKind::int_to_ptr: { + if (!srcType.dyn_cast()) + return emitOpError() << "requires !cir.int type for source"; + if (!resType.dyn_cast()) + return emitOpError() << "requires !cir.ptr type for result"; + return success(); + } + case cir::CastKind::ptr_to_int: { + if (!srcType.dyn_cast()) + return emitOpError() << "requires !cir.ptr type for source"; + if (!resType.dyn_cast()) + return emitOpError() << "requires !cir.int type for result"; + return success(); + } + case cir::CastKind::float_to_bool: { + if (!srcType.isa()) + return emitOpError() << "requires !cir.float type for source"; + if (!resType.isa()) + return emitOpError() << "requires !cir.bool type for result"; + return success(); + } + case cir::CastKind::bool_to_int: { + if (!srcType.isa()) + return emitOpError() << "requires !cir.bool type for source"; + if (!resType.isa()) + return emitOpError() << "requires !cir.int type for result"; + return success(); + } + case cir::CastKind::int_to_float: { + if (!srcType.isa()) + return emitOpError() << "requires !cir.int type for source"; + if (!resType.isa()) + return emitOpError() << "requires !cir.float type for result"; + return success(); + } + case cir::CastKind::bool_to_float: { + if (!srcType.isa()) + return emitOpError() << "requires !cir.bool type for source"; + if (!resType.isa()) + return emitOpError() << "requires !cir.float type for result"; + return success(); + } + } + + llvm_unreachable("Unknown CastOp kind?"); +} + +OpFoldResult CastOp::fold(FoldAdaptor adaptor) { + if (getKind() != mlir::cir::CastKind::integral) + return {}; + if (getSrc().getType() != getResult().getType()) + return {}; + // TODO: for sign differences, it's possible in certain conditions to + // create a new attributes that's capable or representing the source. + SmallVector foldResults; + auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); + if (foldOrder.succeeded() && foldResults[0].is()) + return foldResults[0].get(); + return {}; +} + +//===----------------------------------------------------------------------===// +// VecCreateOp +//===----------------------------------------------------------------------===// + +LogicalResult VecCreateOp::verify() { + // Verify that the number of arguments matches the number of elements in the + // vector, and that the type of all the arguments matches the type of the + // elements in the vector. + auto VecTy = getResult().getType(); + if (getElements().size() != VecTy.getSize()) { + return emitOpError() << "operand count of " << getElements().size() + << " doesn't match vector type " << VecTy + << " element count of " << VecTy.getSize(); + } + auto ElementType = VecTy.getEltType(); + for (auto Element : getElements()) { + if (Element.getType() != ElementType) { + return emitOpError() << "operand type " << Element.getType() + << " doesn't match vector element type " + << ElementType; + } + } + return success(); +} + +//===----------------------------------------------------------------------===// +// VecTernaryOp +//===----------------------------------------------------------------------===// + +LogicalResult VecTernaryOp::verify() { + // Verify that the condition operand has the same number of elements as the + // other operands. (The automatic verification already checked that all + // operands are vector types and that the second and third operands are the + // same type.) + if (getCond().getType().cast().getSize() != + getVec1().getType().getSize()) { + return emitOpError() << ": the number of elements in " + << getCond().getType() << " and " + << getVec1().getType() << " don't match"; + } + return success(); +} + +//===----------------------------------------------------------------------===// +// VecShuffle +//===----------------------------------------------------------------------===// + +LogicalResult VecShuffleOp::verify() { + // The number of elements in the indices array must match the number of + // elements in the result type. + if (getIndices().size() != getResult().getType().getSize()) { + return emitOpError() << ": the number of elements in " << getIndices() + << " and " << getResult().getType() << " don't match"; + } + // The element types of the two input vectors and of the result type must + // match. + if (getVec1().getType().getEltType() != getResult().getType().getEltType()) { + return emitOpError() << ": element types of " << getVec1().getType() + << " and " << getResult().getType() << " don't match"; + } + // The indices must all be integer constants + if (not std::all_of(getIndices().begin(), getIndices().end(), + [](mlir::Attribute attr) { + return attr.isa(); + })) { + return emitOpError() << "all index values must be integers"; + } + return success(); +} + +//===----------------------------------------------------------------------===// +// VecShuffleDynamic +//===----------------------------------------------------------------------===// + +LogicalResult VecShuffleDynamicOp::verify() { + // The number of elements in the two input vectors must match. + if (getVec().getType().getSize() != + getIndices().getType().cast().getSize()) { + return emitOpError() << ": the number of elements in " << getVec().getType() + << " and " << getIndices().getType() << " don't match"; + } + return success(); +} + +//===----------------------------------------------------------------------===// +// ReturnOp +//===----------------------------------------------------------------------===// + +static mlir::LogicalResult checkReturnAndFunction(ReturnOp op, + cir::FuncOp function) { + // ReturnOps currently only have a single optional operand. + if (op.getNumOperands() > 1) + return op.emitOpError() << "expects at most 1 return operand"; + + // Ensure returned type matches the function signature. + auto expectedTy = function.getFunctionType().getReturnType(); + auto actualTy = + (op.getNumOperands() == 0 ? mlir::cir::VoidType::get(op.getContext()) + : op.getOperand(0).getType()); + if (actualTy != expectedTy) + return op.emitOpError() << "returns " << actualTy + << " but enclosing function returns " << expectedTy; + + return mlir::success(); +} + +mlir::LogicalResult ReturnOp::verify() { + // Returns can be present in multiple different scopes, get the + // wrapping function and start from there. + auto *fnOp = getOperation()->getParentOp(); + while (!isa(fnOp)) + fnOp = fnOp->getParentOp(); + + // Make sure return types match function return type. + if (checkReturnAndFunction(*this, cast(fnOp)).failed()) + return failure(); + + return success(); +} + +//===----------------------------------------------------------------------===// +// ThrowOp +//===----------------------------------------------------------------------===// + +mlir::LogicalResult ThrowOp::verify() { + // For the no-rethrow version, it must have at least the exception pointer. + if (rethrows()) + return success(); + + if (getNumOperands() == 1) { + if (!getTypeInfo()) + return emitOpError() << "'type_info' symbol attribute missing"; + return success(); + } + + return failure(); +} + +//===----------------------------------------------------------------------===// +// IfOp +//===----------------------------------------------------------------------===// + +ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { + // Create the regions for 'then'. + result.regions.reserve(2); + Region *thenRegion = result.addRegion(); + Region *elseRegion = result.addRegion(); + + auto &builder = parser.getBuilder(); + OpAsmParser::UnresolvedOperand cond; + Type boolType = ::mlir::cir::BoolType::get(builder.getContext()); + + if (parser.parseOperand(cond) || + parser.resolveOperand(cond, boolType, result.operands)) + return failure(); + + // Parse the 'then' region. + auto parseThenLoc = parser.getCurrentLocation(); + if (parser.parseRegion(*thenRegion, /*arguments=*/{}, + /*argTypes=*/{})) + return failure(); + if (ensureRegionTerm(parser, *thenRegion, parseThenLoc).failed()) + return failure(); + + // If we find an 'else' keyword, parse the 'else' region. + if (!parser.parseOptionalKeyword("else")) { + auto parseElseLoc = parser.getCurrentLocation(); + if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + if (ensureRegionTerm(parser, *elseRegion, parseElseLoc).failed()) + return failure(); + } + + // Parse the optional attribute list. + if (parser.parseOptionalAttrDict(result.attributes)) + return failure(); + return success(); +} + +void cir::IfOp::print(OpAsmPrinter &p) { + p << " " << getCondition() << " "; + auto &thenRegion = this->getThenRegion(); + p.printRegion(thenRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/!omitRegionTerm(thenRegion)); + + // Print the 'else' regions if it exists and has a block. + auto &elseRegion = this->getElseRegion(); + if (!elseRegion.empty()) { + p << " else "; + p.printRegion(elseRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/!omitRegionTerm(elseRegion)); + } + + p.printOptionalAttrDict(getOperation()->getAttrs()); +} + +/// Default callback for IfOp builders. Inserts nothing for now. +void mlir::cir::buildTerminatedBody(OpBuilder &builder, Location loc) {} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The `then` and the `else` region branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // Don't consider the else region if it is empty. + Region *elseRegion = &this->getElseRegion(); + if (elseRegion->empty()) + elseRegion = nullptr; + + // Otherwise, the successor is dependent on the condition. + // bool condition; + // if (auto condAttr = operands.front().dyn_cast_or_null()) { + // assert(0 && "not implemented"); + // condition = condAttr.getValue().isOneValue(); + // Add the successor regions using the condition. + // regions.push_back(RegionSuccessor(condition ? &thenRegion() : + // elseRegion)); + // return; + // } + + // If the condition isn't constant, both regions may be executed. + regions.push_back(RegionSuccessor(&getThenRegion())); + // If the else region does not exist, it is not a viable successor. + if (elseRegion) + regions.push_back(RegionSuccessor(elseRegion)); + return; +} + +void IfOp::build(OpBuilder &builder, OperationState &result, Value cond, + bool withElseRegion, + function_ref thenBuilder, + function_ref elseBuilder) { + assert(thenBuilder && "the builder callback for 'then' must be present"); + + result.addOperands(cond); + + OpBuilder::InsertionGuard guard(builder); + Region *thenRegion = result.addRegion(); + builder.createBlock(thenRegion); + thenBuilder(builder, result.location); + + Region *elseRegion = result.addRegion(); + if (!withElseRegion) + return; + + builder.createBlock(elseRegion); + elseBuilder(builder, result.location); +} + +LogicalResult IfOp::verify() { return success(); } + +//===----------------------------------------------------------------------===// +// ScopeOp +//===----------------------------------------------------------------------===// + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void ScopeOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The only region always branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor(getODSResults(0))); + return; + } + + // If the condition isn't constant, both regions may be executed. + regions.push_back(RegionSuccessor(&getScopeRegion())); +} + +void ScopeOp::build( + OpBuilder &builder, OperationState &result, + function_ref scopeBuilder) { + assert(scopeBuilder && "the builder callback for 'then' must be present"); + + OpBuilder::InsertionGuard guard(builder); + Region *scopeRegion = result.addRegion(); + builder.createBlock(scopeRegion); + + mlir::Type yieldTy; + scopeBuilder(builder, yieldTy, result.location); + + if (yieldTy) + result.addTypes(TypeRange{yieldTy}); +} + +void ScopeOp::build(OpBuilder &builder, OperationState &result, + function_ref scopeBuilder) { + assert(scopeBuilder && "the builder callback for 'then' must be present"); + OpBuilder::InsertionGuard guard(builder); + Region *scopeRegion = result.addRegion(); + builder.createBlock(scopeRegion); + scopeBuilder(builder, result.location); +} + +LogicalResult ScopeOp::verify() { return success(); } + +//===----------------------------------------------------------------------===// +// TryOp +//===----------------------------------------------------------------------===// + +void TryOp::build( + OpBuilder &builder, OperationState &result, + function_ref scopeBuilder) { + assert(scopeBuilder && "the builder callback for 'then' must be present"); + + OpBuilder::InsertionGuard guard(builder); + Region *scopeRegion = result.addRegion(); + builder.createBlock(scopeRegion); + + mlir::Type yieldTy; + scopeBuilder(builder, yieldTy, result.location); + + if (yieldTy) + result.addTypes(TypeRange{yieldTy}); +} + +void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The only region always branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor(this->getODSResults(0))); + return; + } + + // If the condition isn't constant, both regions may be executed. + regions.push_back(RegionSuccessor(&getBody())); +} + +//===----------------------------------------------------------------------===// +// TernaryOp +//===----------------------------------------------------------------------===// + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void TernaryOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The `true` and the `false` region branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor(this->getODSResults(0))); + return; + } + + // Try optimize if we have more information + // if (auto condAttr = operands.front().dyn_cast_or_null()) { + // assert(0 && "not implemented"); + // } + + // If the condition isn't constant, both regions may be executed. + regions.push_back(RegionSuccessor(&getTrueRegion())); + regions.push_back(RegionSuccessor(&getFalseRegion())); + return; +} + +void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, + function_ref trueBuilder, + function_ref falseBuilder) { + result.addOperands(cond); + OpBuilder::InsertionGuard guard(builder); + Region *trueRegion = result.addRegion(); + auto *block = builder.createBlock(trueRegion); + trueBuilder(builder, result.location); + Region *falseRegion = result.addRegion(); + builder.createBlock(falseRegion); + falseBuilder(builder, result.location); + + auto yield = dyn_cast(block->getTerminator()); + assert((yield && yield.getNumOperands() <= 1) && + "expected zero or one result type"); + if (yield.getNumOperands() == 1) + result.addTypes(TypeRange{yield.getOperandTypes().front()}); +} + +//===----------------------------------------------------------------------===// +// BrOp +//===----------------------------------------------------------------------===// + +mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { + assert(index == 0 && "invalid successor index"); + return mlir::SuccessorOperands(getDestOperandsMutable()); +} + +Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } + +//===----------------------------------------------------------------------===// +// BrCondOp +//===----------------------------------------------------------------------===// + +mlir::SuccessorOperands BrCondOp::getSuccessorOperands(unsigned index) { + assert(index < getNumSuccessors() && "invalid successor index"); + return SuccessorOperands(index == 0 ? getDestOperandsTrueMutable() + : getDestOperandsFalseMutable()); +} + +Block *BrCondOp::getSuccessorForOperands(ArrayRef operands) { + if (IntegerAttr condAttr = operands.front().dyn_cast_or_null()) + return condAttr.getValue().isOne() ? getDestTrue() : getDestFalse(); + return nullptr; +} + +//===----------------------------------------------------------------------===// +// SwitchOp +//===----------------------------------------------------------------------===// + +ParseResult +parseSwitchOp(OpAsmParser &parser, + llvm::SmallVectorImpl> ®ions, + ::mlir::ArrayAttr &casesAttr, + mlir::OpAsmParser::UnresolvedOperand &cond, + mlir::Type &condType) { + mlir::cir::IntType intCondType; + SmallVector cases; + + auto parseAndCheckRegion = [&]() -> ParseResult { + // Parse region attached to case + regions.emplace_back(new Region); + Region &currRegion = *regions.back().get(); + auto parserLoc = parser.getCurrentLocation(); + if (parser.parseRegion(currRegion, /*arguments=*/{}, /*argTypes=*/{})) { + regions.clear(); + return failure(); + } + + if (currRegion.empty()) { + return parser.emitError(parser.getCurrentLocation(), + "case region shall not be empty"); + } + + if (!(currRegion.back().mightHaveTerminator() && + currRegion.back().getTerminator())) + return parser.emitError(parserLoc, + "case regions must be explicitly terminated"); + + return success(); + }; + + auto parseCase = [&]() -> ParseResult { + auto loc = parser.getCurrentLocation(); + if (parser.parseKeyword("case").failed()) + return parser.emitError(loc, "expected 'case' keyword here"); + + if (parser.parseLParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected '('"); + + ::llvm::StringRef attrStr; + ::mlir::NamedAttrList attrStorage; + + // case (equal, 20) { + // ... + // 1. Get the case kind + // 2. Get the value (next in list) + + // These needs to be in sync with CIROps.td + if (parser.parseOptionalKeyword(&attrStr, {"default", "equal", "anyof"})) { + ::mlir::StringAttr attrVal; + ::mlir::OptionalParseResult parseResult = parser.parseOptionalAttribute( + attrVal, parser.getBuilder().getNoneType(), "kind", attrStorage); + if (parseResult.has_value()) { + if (failed(*parseResult)) + return ::mlir::failure(); + attrStr = attrVal.getValue(); + } + } + + if (attrStr.empty()) { + return parser.emitError( + loc, "expected string or keyword containing one of the following " + "enum values for attribute 'kind' [default, equal, anyof]"); + } + + auto attrOptional = ::mlir::cir::symbolizeCaseOpKind(attrStr.str()); + if (!attrOptional) + return parser.emitError(loc, "invalid ") + << "kind attribute specification: \"" << attrStr << '"'; + + auto kindAttr = ::mlir::cir::CaseOpKindAttr::get( + parser.getBuilder().getContext(), attrOptional.value()); + + // `,` value or `,` [values,...] + SmallVector caseEltValueListAttr; + mlir::ArrayAttr caseValueList; + + switch (kindAttr.getValue()) { + case cir::CaseOpKind::Equal: { + if (parser.parseComma().failed()) + return mlir::failure(); + int64_t val = 0; + if (parser.parseInteger(val).failed()) + return ::mlir::failure(); + caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(intCondType, val)); + break; + } + case cir::CaseOpKind::Anyof: { + if (parser.parseComma().failed()) + return mlir::failure(); + if (parser.parseLSquare().failed()) + return mlir::failure(); + if (parser.parseCommaSeparatedList([&]() { + int64_t val = 0; + if (parser.parseInteger(val).failed()) + return ::mlir::failure(); + caseEltValueListAttr.push_back( + mlir::cir::IntAttr::get(intCondType, val)); + return ::mlir::success(); + })) + return mlir::failure(); + if (parser.parseRSquare().failed()) + return mlir::failure(); + break; + } + case cir::CaseOpKind::Default: { + if (parser.parseRParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected ')'"); + cases.push_back(cir::CaseAttr::get( + parser.getContext(), parser.getBuilder().getArrayAttr({}), kindAttr)); + return parseAndCheckRegion(); + } + } + + caseValueList = parser.getBuilder().getArrayAttr(caseEltValueListAttr); + cases.push_back( + cir::CaseAttr::get(parser.getContext(), caseValueList, kindAttr)); + if (succeeded(parser.parseOptionalColon())) { + Type caseIntTy; + if (parser.parseType(caseIntTy).failed()) + return parser.emitError(parser.getCurrentLocation(), "expected type"); + if (intCondType != caseIntTy) + return parser.emitError(parser.getCurrentLocation(), + "expected a match with the condition type"); + } + if (parser.parseRParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected ')'"); + return parseAndCheckRegion(); + }; + + if (parser.parseLParen()) + return ::mlir::failure(); + + if (parser.parseOperand(cond)) + return ::mlir::failure(); + if (parser.parseColon()) + return ::mlir::failure(); + if (parser.parseCustomTypeWithFallback(intCondType)) + return ::mlir::failure(); + condType = intCondType; + if (parser.parseRParen()) + return ::mlir::failure(); + + if (parser + .parseCommaSeparatedList(OpAsmParser::Delimiter::Square, parseCase, + " in cases list") + .failed()) + return failure(); + + casesAttr = parser.getBuilder().getArrayAttr(cases); + return ::mlir::success(); +} + +void printSwitchOp(OpAsmPrinter &p, SwitchOp op, + mlir::MutableArrayRef<::mlir::Region> regions, + mlir::ArrayAttr casesAttr, mlir::Value condition, + mlir::Type condType) { + int idx = 0, lastIdx = regions.size() - 1; + + p << "("; + p << condition; + p << " : "; + p.printStrippedAttrOrType(condType); + p << ") ["; + // FIXME: ideally we want some extra indentation for "cases" but too + // cumbersome to pull it out now, since most handling is private. Perhaps + // better improve overall mechanism. + p.printNewline(); + for (auto &r : regions) { + p << "case ("; + + auto attr = casesAttr[idx].cast(); + auto kind = attr.getKind().getValue(); + assert((kind == CaseOpKind::Default || kind == CaseOpKind::Equal || + kind == CaseOpKind::Anyof) && + "unknown case"); + + // Case kind + p << stringifyCaseOpKind(kind); + + // Case value + switch (kind) { + case cir::CaseOpKind::Equal: { + p << ", "; + auto intAttr = attr.getValue()[0].cast(); + auto intAttrTy = intAttr.getType().cast(); + (intAttrTy.isSigned() ? p << intAttr.getSInt() : p << intAttr.getUInt()); + break; + } + case cir::CaseOpKind::Anyof: { + p << ", ["; + llvm::interleaveComma(attr.getValue(), p, [&](const Attribute &a) { + auto intAttr = a.cast(); + auto intAttrTy = intAttr.getType().cast(); + (intAttrTy.isSigned() ? p << intAttr.getSInt() + : p << intAttr.getUInt()); + }); + p << "] : "; + auto typedAttr = attr.getValue()[0].dyn_cast(); + assert(typedAttr && "this should never not have a type!"); + p.printType(typedAttr.getType()); + break; + } + case cir::CaseOpKind::Default: + break; + } + + p << ") "; + p.printRegion(r, /*printEntryBLockArgs=*/false, + /*printBlockTerminators=*/true); + if (idx < lastIdx) + p << ","; + p.printNewline(); + idx++; + } + p << "]"; +} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes +/// that correspond to a constant value for each operand, or null if that +/// operand is not a constant. +void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // If any index all the underlying regions branch back to the parent + // operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // for (auto &r : this->getRegions()) { + // If we can figure out the case stmt we are landing, this can be + // overly simplified. + // bool condition; + // if (auto condAttr = operands.front().dyn_cast_or_null()) { + // assert(0 && "not implemented"); + // (void)r; + // condition = condAttr.getValue().isOneValue(); + // Add the successor regions using the condition. + // regions.push_back(RegionSuccessor(condition ? &thenRegion() : + // elseRegion)); + // return; + // } + // } + + // If the condition isn't constant, all regions may be executed. + for (auto &r : this->getRegions()) + regions.push_back(RegionSuccessor(&r)); +} + +LogicalResult SwitchOp::verify() { + if (getCases().has_value() && getCases()->size() != getNumRegions()) + return emitOpError("number of cases attributes and regions must match"); + return success(); +} + +void SwitchOp::build( + OpBuilder &builder, OperationState &result, Value cond, + function_ref switchBuilder) { + assert(switchBuilder && "the builder callback for regions must be present"); + OpBuilder::InsertionGuard guardSwitch(builder); + result.addOperands({cond}); + switchBuilder(builder, result.location, result); +} + +//===----------------------------------------------------------------------===// +// SwitchFlatOp +//===----------------------------------------------------------------------===// + +void SwitchFlatOp::build(OpBuilder &builder, OperationState &result, + Value value, Block *defaultDestination, + ValueRange defaultOperands, ArrayRef caseValues, + BlockRange caseDestinations, + ArrayRef caseOperands) { + + std::vector caseValuesAttrs; + for (auto &val : caseValues) { + caseValuesAttrs.push_back(mlir::cir::IntAttr::get(value.getType(), val)); + } + auto attrs = ArrayAttr::get(builder.getContext(), caseValuesAttrs); + + build(builder, result, value, defaultOperands, caseOperands, attrs, + defaultDestination, caseDestinations); +} + +/// ::= `[` (case (`,` case )* )? `]` +/// ::= integer `:` bb-id (`(` ssa-use-and-type-list `)`)? +static ParseResult parseSwitchFlatOpCases( + OpAsmParser &parser, Type flagType, mlir::ArrayAttr &caseValues, + SmallVectorImpl &caseDestinations, + SmallVectorImpl> &caseOperands, + SmallVectorImpl> &caseOperandTypes) { + if (failed(parser.parseLSquare())) + return failure(); + if (succeeded(parser.parseOptionalRSquare())) + return success(); + SmallVector values; + + auto parseCase = [&]() { + int64_t value = 0; + if (failed(parser.parseInteger(value))) + return failure(); + + values.push_back(IntAttr::get(flagType, value)); + + Block *destination; + SmallVector operands; + SmallVector operandTypes; + if (parser.parseColon() || parser.parseSuccessor(destination)) + return failure(); + if (!parser.parseOptionalLParen()) { + if (parser.parseOperandList(operands, OpAsmParser::Delimiter::None, + /*allowResultNumber=*/false) || + parser.parseColonTypeList(operandTypes) || parser.parseRParen()) + return failure(); + } + caseDestinations.push_back(destination); + caseOperands.emplace_back(operands); + caseOperandTypes.emplace_back(operandTypes); + return success(); + }; + if (failed(parser.parseCommaSeparatedList(parseCase))) + return failure(); + + caseValues = ArrayAttr::get(flagType.getContext(), values); + + return parser.parseRSquare(); +} + +static void printSwitchFlatOpCases(OpAsmPrinter &p, SwitchFlatOp op, + Type flagType, mlir::ArrayAttr caseValues, + SuccessorRange caseDestinations, + OperandRangeRange caseOperands, + const TypeRangeRange &caseOperandTypes) { + p << '['; + p.printNewline(); + if (!caseValues) { + p << ']'; + return; + } + + size_t index = 0; + llvm::interleave( + llvm::zip(caseValues, caseDestinations), + [&](auto i) { + p << " "; + mlir::Attribute a = std::get<0>(i); + p << a.cast().getValue(); + p << ": "; + p.printSuccessorAndUseList(std::get<1>(i), caseOperands[index++]); + }, + [&] { + p << ','; + p.printNewline(); + }); + p.printNewline(); + p << ']'; +} + +//===----------------------------------------------------------------------===// +// CatchOp +//===----------------------------------------------------------------------===// + +ParseResult +parseCatchOp(OpAsmParser &parser, + llvm::SmallVectorImpl> ®ions, + ::mlir::ArrayAttr &catchersAttr) { + SmallVector catchList; + + auto parseAndCheckRegion = [&]() -> ParseResult { + // Parse region attached to catch + regions.emplace_back(new Region); + Region &currRegion = *regions.back().get(); + auto parserLoc = parser.getCurrentLocation(); + if (parser.parseRegion(currRegion, /*arguments=*/{}, /*argTypes=*/{})) { + regions.clear(); + return failure(); + } + + if (currRegion.empty()) { + return parser.emitError(parser.getCurrentLocation(), + "catch region shall not be empty"); + } + + if (!(currRegion.back().mightHaveTerminator() && + currRegion.back().getTerminator())) + return parser.emitError( + parserLoc, "blocks are expected to be explicitly terminated"); + + return success(); + }; + + auto parseCatchEntry = [&]() -> ParseResult { + mlir::Type exceptionType; + mlir::Attribute exceptionTypeInfo; + + // cir.catch(..., [ + // type (!cir.ptr, @type_info_char_star) { + // ... + // }, + // all { + // ... + // } + // ] + ::llvm::StringRef attrStr; + if (!parser.parseOptionalKeyword(&attrStr, {"all"})) { + if (parser.parseKeyword("type").failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected 'type' keyword here"); + + if (parser.parseLParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected '('"); + + if (parser.parseType(exceptionType).failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected valid exception type"); + if (parser.parseAttribute(exceptionTypeInfo).failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected valid RTTI info attribute"); + if (parser.parseRParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected ')'"); + } + catchList.push_back(exceptionTypeInfo); + return parseAndCheckRegion(); + }; + + if (parser + .parseCommaSeparatedList(OpAsmParser::Delimiter::Square, + parseCatchEntry, " in catch list") + .failed()) + return failure(); + + catchersAttr = parser.getBuilder().getArrayAttr(catchList); + return ::mlir::success(); +} + +void printCatchOp(OpAsmPrinter &p, CatchOp op, + mlir::MutableArrayRef<::mlir::Region> regions, + mlir::ArrayAttr catchList) { + + int currCatchIdx = 0; + p << "["; + llvm::interleaveComma(catchList, p, [&](const Attribute &a) { + p.printNewline(); + p.increaseIndent(); + auto exRtti = a; + + if (a.isa()) { + p.printAttribute(a); + } else if (!exRtti) { + p << "all"; + } else { + p << "type ("; + p.printAttribute(exRtti); + p << ") "; + } + p.printNewline(); + p.increaseIndent(); + p.printRegion(regions[currCatchIdx], /*printEntryBLockArgs=*/false, + /*printBlockTerminators=*/true); + currCatchIdx++; + p.decreaseIndent(); + p.decreaseIndent(); + }); + p << "]"; +} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes +/// that correspond to a constant value for each operand, or null if that +/// operand is not a constant. +void CatchOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // If any index all the underlying regions branch back to the parent + // operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // FIXME: optimize, ideas include: + // - If we know a target function never throws a specific type, we can + // remove the catch handler. + // - ??? + + // If the condition isn't constant, all regions may be executed. + for (auto &r : this->getRegions()) + regions.push_back(RegionSuccessor(&r)); +} + +void CatchOp::build( + OpBuilder &builder, OperationState &result, mlir::Value exceptionInfo, + function_ref catchBuilder) { + assert(catchBuilder && "the builder callback for regions must be present"); + result.addOperands(ValueRange{exceptionInfo}); + OpBuilder::InsertionGuard guardCatch(builder); + catchBuilder(builder, result.location, result); +} + +//===----------------------------------------------------------------------===// +// LoopOpInterface Methods +//===----------------------------------------------------------------------===// + +void DoWhileOp::getSuccessorRegions( + ::mlir::RegionBranchPoint point, + ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { + LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); +} + +::llvm::SmallVector DoWhileOp::getLoopRegions() { + return {&getBody()}; +} + +void WhileOp::getSuccessorRegions( + ::mlir::RegionBranchPoint point, + ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { + LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); +} + +::llvm::SmallVector WhileOp::getLoopRegions() { return {&getBody()}; } + +void ForOp::getSuccessorRegions( + ::mlir::RegionBranchPoint point, + ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { + LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); +} + +::llvm::SmallVector ForOp::getLoopRegions() { return {&getBody()}; } + +//===----------------------------------------------------------------------===// +// GlobalOp +//===----------------------------------------------------------------------===// + +static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, + TypeAttr type, Attribute initAttr, + mlir::Region &ctorRegion, + mlir::Region &dtorRegion) { + auto printType = [&]() { p << ": " << type; }; + if (!op.isDeclaration()) { + p << "= "; + if (!ctorRegion.empty()) { + p << "ctor "; + printType(); + p << " "; + p.printRegion(ctorRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + } else { + // This also prints the type... + if (initAttr) + printConstant(p, initAttr); + } + + if (!dtorRegion.empty()) { + p << " dtor "; + p.printRegion(dtorRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + } + } else { + printType(); + } +} + +static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, + TypeAttr &typeAttr, + Attribute &initialValueAttr, + mlir::Region &ctorRegion, + mlir::Region &dtorRegion) { + mlir::Type opTy; + if (parser.parseOptionalEqual().failed()) { + // Absence of equal means a declaration, so we need to parse the type. + // cir.global @a : i32 + if (parser.parseColonType(opTy)) + return failure(); + } else { + // Parse contructor, example: + // cir.global @rgb = ctor : type { ... } + if (!parser.parseOptionalKeyword("ctor")) { + if (parser.parseColonType(opTy)) + return failure(); + auto parseLoc = parser.getCurrentLocation(); + if (parser.parseRegion(ctorRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + if (!ctorRegion.hasOneBlock()) + return parser.emitError(parser.getCurrentLocation(), + "ctor region must have exactly one block"); + if (ctorRegion.back().empty()) + return parser.emitError(parser.getCurrentLocation(), + "ctor region shall not be empty"); + if (ensureRegionTerm(parser, ctorRegion, parseLoc).failed()) + return failure(); + } else { + // Parse constant with initializer, examples: + // cir.global @y = 3.400000e+00 : f32 + // cir.global @rgb = #cir.const_array<[...] : !cir.array> + if (parseConstantValue(parser, initialValueAttr).failed()) + return failure(); + + assert(initialValueAttr.isa() && + "Non-typed attrs shouldn't appear here."); + auto typedAttr = initialValueAttr.cast(); + opTy = typedAttr.getType(); + } + + // Parse destructor, example: + // dtor { ... } + if (!parser.parseOptionalKeyword("dtor")) { + auto parseLoc = parser.getCurrentLocation(); + if (parser.parseRegion(dtorRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + if (!dtorRegion.hasOneBlock()) + return parser.emitError(parser.getCurrentLocation(), + "dtor region must have exactly one block"); + if (dtorRegion.back().empty()) + return parser.emitError(parser.getCurrentLocation(), + "dtor region shall not be empty"); + if (ensureRegionTerm(parser, dtorRegion, parseLoc).failed()) + return failure(); + } + } + + typeAttr = TypeAttr::get(opTy); + return success(); +} + +LogicalResult GlobalOp::verify() { + // Verify that the initial value, if present, is either a unit attribute or + // an attribute CIR supports. + if (getInitialValue().has_value()) { + if (checkConstantTypes(getOperation(), getSymType(), *getInitialValue()) + .failed()) + return failure(); + } + + // Verify that the constructor region, if present, has only one block which is + // not empty. + auto &ctorRegion = getCtorRegion(); + if (!ctorRegion.empty()) { + if (!ctorRegion.hasOneBlock()) { + return emitError() << "ctor region must have exactly one block."; + } + + auto &block = ctorRegion.front(); + if (block.empty()) { + return emitError() << "ctor region shall not be empty."; + } + } + + // Verify that the destructor region, if present, has only one block which is + // not empty. + auto &dtorRegion = getDtorRegion(); + if (!dtorRegion.empty()) { + if (!dtorRegion.hasOneBlock()) { + return emitError() << "dtor region must have exactly one block."; + } + + auto &block = dtorRegion.front(); + if (block.empty()) { + return emitError() << "dtor region shall not be empty."; + } + } + + if (std::optional alignAttr = getAlignment()) { + uint64_t alignment = alignAttr.value(); + if (!llvm::isPowerOf2_64(alignment)) + return emitError() << "alignment attribute value " << alignment + << " is not a power of 2"; + } + + switch (getLinkage()) { + case GlobalLinkageKind::InternalLinkage: + case GlobalLinkageKind::PrivateLinkage: + if (isPublic()) + return emitError() << "public visibility not allowed with '" + << stringifyGlobalLinkageKind(getLinkage()) + << "' linkage"; + break; + case GlobalLinkageKind::ExternalLinkage: + case GlobalLinkageKind::ExternalWeakLinkage: + case GlobalLinkageKind::LinkOnceODRLinkage: + case GlobalLinkageKind::LinkOnceAnyLinkage: + case GlobalLinkageKind::CommonLinkage: + // FIXME: mlir's concept of visibility gets tricky with LLVM ones, + // for instance, symbol declarations cannot be "public", so we + // have to mark them "private" to workaround the symbol verifier. + if (isPrivate() && !isDeclaration()) + return emitError() << "private visibility not allowed with '" + << stringifyGlobalLinkageKind(getLinkage()) + << "' linkage"; + break; + default: + emitError() << stringifyGlobalLinkageKind(getLinkage()) + << ": verifier not implemented\n"; + return failure(); + } + + // TODO: verify visibility for declarations? + return success(); +} + +void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, + StringRef sym_name, Type sym_type, bool isConstant, + cir::GlobalLinkageKind linkage, + function_ref ctorBuilder, + function_ref dtorBuilder) { + odsState.addAttribute(getSymNameAttrName(odsState.name), + odsBuilder.getStringAttr(sym_name)); + odsState.addAttribute(getSymTypeAttrName(odsState.name), + ::mlir::TypeAttr::get(sym_type)); + if (isConstant) + odsState.addAttribute(getConstantAttrName(odsState.name), + odsBuilder.getUnitAttr()); + + ::mlir::cir::GlobalLinkageKindAttr linkageAttr = + cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); + odsState.addAttribute(getLinkageAttrName(odsState.name), linkageAttr); + + Region *ctorRegion = odsState.addRegion(); + if (ctorBuilder) { + odsBuilder.createBlock(ctorRegion); + ctorBuilder(odsBuilder, odsState.location); + } + + Region *dtorRegion = odsState.addRegion(); + if (dtorBuilder) { + odsBuilder.createBlock(dtorRegion); + dtorBuilder(odsBuilder, odsState.location); + } +} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void GlobalOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The `ctor` and `dtor` regions always branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // Don't consider the ctor region if it is empty. + Region *ctorRegion = &this->getCtorRegion(); + if (ctorRegion->empty()) + ctorRegion = nullptr; + + // Don't consider the dtor region if it is empty. + Region *dtorRegion = &this->getCtorRegion(); + if (dtorRegion->empty()) + dtorRegion = nullptr; + + // If the condition isn't constant, both regions may be executed. + if (ctorRegion) + regions.push_back(RegionSuccessor(ctorRegion)); + if (dtorRegion) + regions.push_back(RegionSuccessor(dtorRegion)); +} + +//===----------------------------------------------------------------------===// +// GetGlobalOp +//===----------------------------------------------------------------------===// + +LogicalResult +GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + // Verify that the result type underlying pointer type matches the type of + // the referenced cir.global or cir.func op. + auto op = symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); + if (!(isa(op) || isa(op))) + return emitOpError("'") + << getName() + << "' does not reference a valid cir.global or cir.func"; + + mlir::Type symTy; + if (auto g = dyn_cast(op)) { + symTy = g.getSymType(); + // Verify that for thread local global access, the global needs to + // be marked with tls bits. + if (getTls() && !g.getTlsModel()) + return emitOpError("access to global not marked thread local"); + } else if (auto f = dyn_cast(op)) + symTy = f.getFunctionType(); + else + llvm_unreachable("shall not get here"); + + auto resultType = getAddr().getType().dyn_cast(); + if (!resultType || symTy != resultType.getPointee()) + return emitOpError("result type pointee type '") + << resultType.getPointee() << "' does not match type " << symTy + << " of the global @" << getName(); + return success(); +} + +//===----------------------------------------------------------------------===// +// VTableAddrPointOp +//===----------------------------------------------------------------------===// + +LogicalResult +VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + // vtable ptr is not coming from a symbol. + if (!getName()) + return success(); + auto name = *getName(); + + // Verify that the result type underlying pointer type matches the type of + // the referenced cir.global or cir.func op. + auto op = dyn_cast_or_null( + symbolTable.lookupNearestSymbolFrom(*this, getNameAttr())); + if (!op) + return emitOpError("'") + << name << "' does not reference a valid cir.global"; + auto init = op.getInitialValue(); + if (!init) + return success(); + if (!isa(*init)) + return emitOpError("Expected #cir.vtable in initializer for global '") + << name << "'"; + return success(); +} + +LogicalResult cir::VTableAddrPointOp::verify() { + // The operation uses either a symbol or a value to operate, but not both + if (getName() && getSymAddr()) + return emitOpError("should use either a symbol or value, but not both"); + + // If not a symbol, stick with the concrete type used for getSymAddr. + if (getSymAddr()) + return success(); + + auto resultType = getAddr().getType(); + auto intTy = mlir::cir::IntType::get(getContext(), 32, /*isSigned=*/false); + auto fnTy = mlir::cir::FuncType::get({}, intTy); + + auto resTy = mlir::cir::PointerType::get( + getContext(), mlir::cir::PointerType::get(getContext(), fnTy)); + + if (resultType != resTy) + return emitOpError("result type must be '") + << resTy << "', but provided result type is '" << resultType << "'"; + return success(); +} + +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +/// Returns the name used for the linkage attribute. This *must* correspond to +/// the name of the attribute in ODS. +static StringRef getLinkageAttrNameString() { return "linkage"; } + +void cir::FuncOp::build(OpBuilder &builder, OperationState &result, + StringRef name, cir::FuncType type, + GlobalLinkageKind linkage, + ArrayRef attrs, + ArrayRef argAttrs) { + result.addRegion(); + result.addAttribute(SymbolTable::getSymbolAttrName(), + builder.getStringAttr(name)); + result.addAttribute(getFunctionTypeAttrName(result.name), + TypeAttr::get(type)); + result.addAttribute( + getLinkageAttrNameString(), + GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + result.attributes.append(attrs.begin(), attrs.end()); + if (argAttrs.empty()) + return; + + function_interface_impl::addArgAndResultAttrs( + builder, result, argAttrs, + /*resultAttrs=*/std::nullopt, getArgAttrsAttrName(result.name), + getResAttrsAttrName(result.name)); +} + +ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { + llvm::SMLoc loc = parser.getCurrentLocation(); + + auto builtinNameAttr = getBuiltinAttrName(state.name); + auto coroutineNameAttr = getCoroutineAttrName(state.name); + auto lambdaNameAttr = getLambdaAttrName(state.name); + auto visNameAttr = getSymVisibilityAttrName(state.name); + auto noProtoNameAttr = getNoProtoAttrName(state.name); + if (::mlir::succeeded(parser.parseOptionalKeyword(builtinNameAttr.strref()))) + state.addAttribute(builtinNameAttr, parser.getBuilder().getUnitAttr()); + if (::mlir::succeeded( + parser.parseOptionalKeyword(coroutineNameAttr.strref()))) + state.addAttribute(coroutineNameAttr, parser.getBuilder().getUnitAttr()); + if (::mlir::succeeded(parser.parseOptionalKeyword(lambdaNameAttr.strref()))) + state.addAttribute(lambdaNameAttr, parser.getBuilder().getUnitAttr()); + if (parser.parseOptionalKeyword(noProtoNameAttr).succeeded()) + state.addAttribute(noProtoNameAttr, parser.getBuilder().getUnitAttr()); + + // Default to external linkage if no keyword is provided. + state.addAttribute(getLinkageAttrNameString(), + GlobalLinkageKindAttr::get( + parser.getContext(), + parseOptionalCIRKeyword( + parser, GlobalLinkageKind::ExternalLinkage))); + + ::llvm::StringRef visAttrStr; + if (parser.parseOptionalKeyword(&visAttrStr, {"private", "public", "nested"}) + .succeeded()) { + state.addAttribute(visNameAttr, + parser.getBuilder().getStringAttr(visAttrStr)); + } + + StringAttr nameAttr; + SmallVector arguments; + SmallVector resultAttrs; + SmallVector argTypes; + SmallVector resultTypes; + auto &builder = parser.getBuilder(); + + // Parse the name as a symbol. + if (parser.parseSymbolName(nameAttr, SymbolTable::getSymbolAttrName(), + state.attributes)) + return failure(); + + // Parse the function signature. + bool isVariadic = false; + if (function_interface_impl::parseFunctionSignature( + parser, /*allowVariadic=*/true, arguments, isVariadic, resultTypes, + resultAttrs)) + return failure(); + + for (auto &arg : arguments) + argTypes.push_back(arg.type); + + if (resultTypes.size() > 1) + return parser.emitError(loc, "functions only supports zero or one results"); + + // Fetch return type or set it to void if empty/ommited. + mlir::Type returnType = + (resultTypes.empty() ? mlir::cir::VoidType::get(builder.getContext()) + : resultTypes.front()); + + // Build the function type. + auto fnType = mlir::cir::FuncType::get(argTypes, returnType, isVariadic); + if (!fnType) + return failure(); + state.addAttribute(getFunctionTypeAttrName(state.name), + TypeAttr::get(fnType)); + + // If additional attributes are present, parse them. + if (parser.parseOptionalAttrDictWithKeyword(state.attributes)) + return failure(); + + // Add the attributes to the function arguments. + assert(resultAttrs.size() == resultTypes.size()); + function_interface_impl::addArgAndResultAttrs( + builder, state, arguments, resultAttrs, getArgAttrsAttrName(state.name), + getResAttrsAttrName(state.name)); + + bool hasAlias = false; + auto aliaseeNameAttr = getAliaseeAttrName(state.name); + if (::mlir::succeeded(parser.parseOptionalKeyword("alias"))) { + if (parser.parseLParen().failed()) + return failure(); + StringAttr aliaseeAttr; + if (parser.parseOptionalSymbolName(aliaseeAttr).failed()) + return failure(); + state.addAttribute(aliaseeNameAttr, FlatSymbolRefAttr::get(aliaseeAttr)); + if (parser.parseRParen().failed()) + return failure(); + hasAlias = true; + } + + auto parseGlobalDtorCtor = + [&](StringRef keyword, + llvm::function_ref prio)> createAttr) + -> mlir::LogicalResult { + if (::mlir::succeeded(parser.parseOptionalKeyword(keyword))) { + std::optional prio; + if (mlir::succeeded(parser.parseOptionalLParen())) { + auto parsedPrio = mlir::FieldParser::parse(parser); + if (mlir::failed(parsedPrio)) { + return parser.emitError(parser.getCurrentLocation(), + "failed to parse 'priority', of type 'int'"); + return failure(); + } + prio = parsedPrio.value_or(int()); + // Parse literal ')' + if (parser.parseRParen()) + return failure(); + } + createAttr(prio); + } + return success(); + }; + + if (parseGlobalDtorCtor("global_ctor", [&](std::optional prio) { + mlir::cir::GlobalCtorAttr globalCtorAttr = + prio ? mlir::cir::GlobalCtorAttr::get(builder.getContext(), + nameAttr, *prio) + : mlir::cir::GlobalCtorAttr::get(builder.getContext(), + nameAttr); + state.addAttribute(getGlobalCtorAttrName(state.name), globalCtorAttr); + }).failed()) + return failure(); + + if (parseGlobalDtorCtor("global_dtor", [&](std::optional prio) { + mlir::cir::GlobalDtorAttr globalDtorAttr = + prio ? mlir::cir::GlobalDtorAttr::get(builder.getContext(), + nameAttr, *prio) + : mlir::cir::GlobalDtorAttr::get(builder.getContext(), + nameAttr); + state.addAttribute(getGlobalDtorAttrName(state.name), globalDtorAttr); + }).failed()) + return failure(); + + Attribute extraAttrs; + if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { + if (parser.parseLParen().failed()) + return failure(); + if (parser.parseAttribute(extraAttrs).failed()) + return failure(); + if (parser.parseRParen().failed()) + return failure(); + } else { + NamedAttrList empty; + extraAttrs = mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), empty.getDictionary(builder.getContext())); + } + state.addAttribute(getExtraAttrsAttrName(state.name), extraAttrs); + + // Parse the optional function body. + auto *body = state.addRegion(); + OptionalParseResult parseResult = parser.parseOptionalRegion( + *body, arguments, /*enableNameShadowing=*/false); + if (parseResult.has_value()) { + if (hasAlias) + parser.emitError(loc, "function alias shall not have a body"); + if (failed(*parseResult)) + return failure(); + // Function body was parsed, make sure its not empty. + if (body->empty()) + return parser.emitError(loc, "expected non-empty function body"); + } + return success(); +} + +bool cir::FuncOp::isDeclaration() { + auto aliasee = getAliasee(); + if (!aliasee) + return isExternal(); + + auto *modOp = getOperation()->getParentOp(); + auto targetFn = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(modOp, *aliasee)); + assert(targetFn && "expected aliasee to exist"); + return targetFn.isDeclaration(); +} + +::mlir::Region *cir::FuncOp::getCallableRegion() { + auto aliasee = getAliasee(); + if (!aliasee) + return isExternal() ? nullptr : &getBody(); + + // Note that we forward the region from the original aliasee + // function. + auto *modOp = getOperation()->getParentOp(); + auto targetFn = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(modOp, *aliasee)); + assert(targetFn && "expected aliasee to exist"); + return targetFn.getCallableRegion(); +} + +void cir::FuncOp::print(OpAsmPrinter &p) { + p << ' '; + + if (getBuiltin()) + p << "builtin "; + + if (getCoroutine()) + p << "coroutine "; + + if (getLambda()) + p << "lambda "; + + if (getNoProto()) + p << "no_proto "; + + if (getLinkage() != GlobalLinkageKind::ExternalLinkage) + p << stringifyGlobalLinkageKind(getLinkage()) << ' '; + + auto vis = getVisibility(); + if (vis != mlir::SymbolTable::Visibility::Public) + p << vis << " "; + + // Print function name, signature, and control. + p.printSymbolName(getSymName()); + auto fnType = getFunctionType(); + SmallVector resultTypes; + if (!fnType.isVoid()) + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), + fnType.getReturnTypes()); + else + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), {}); + function_interface_impl::printFunctionAttributes( + p, *this, + // These are all omitted since they are custom printed already. + {getSymVisibilityAttrName(), getAliaseeAttrName(), + getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName(), + getNoProtoAttrName(), getGlobalCtorAttrName(), getGlobalDtorAttrName(), + getExtraAttrsAttrName()}); + + if (auto aliaseeName = getAliasee()) { + p << " alias("; + p.printSymbolName(*aliaseeName); + p << ")"; + } + + if (auto globalCtor = getGlobalCtorAttr()) { + p << " global_ctor"; + if (!globalCtor.isDefaultPriority()) + p << "(" << globalCtor.getPriority() << ")"; + } + + if (auto globalDtor = getGlobalDtorAttr()) { + p << " global_dtor"; + if (!globalDtor.isDefaultPriority()) + p << "(" << globalDtor.getPriority() << ")"; + } + + if (!getExtraAttrs().getElements().empty()) { + p << " extra("; + p.printAttributeWithoutType(getExtraAttrs()); + p << ")"; + } + + // Print the body if this is not an external function. + Region &body = getOperation()->getRegion(0); + if (!body.empty()) { + p << ' '; + p.printRegion(body, /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/true); + } +} + +// Hook for OpTrait::FunctionLike, called after verifying that the 'type' +// attribute is present. This can check for preconditions of the +// getNumArguments hook not failing. +LogicalResult cir::FuncOp::verifyType() { + auto type = getFunctionType(); + if (!type.isa()) + return emitOpError("requires '" + getFunctionTypeAttrName().str() + + "' attribute of function type"); + if (!getNoProto() && type.isVarArg() && type.getNumInputs() == 0) + return emitError() + << "prototyped function must have at least one non-variadic input"; + return success(); +} + +// Verifies linkage types +// - functions don't have 'common' linkage +// - external functions have 'external' or 'extern_weak' linkage +// - coroutine body must use at least one cir.await operation. +LogicalResult cir::FuncOp::verify() { + if (getLinkage() == cir::GlobalLinkageKind::CommonLinkage) + return emitOpError() << "functions cannot have '" + << stringifyGlobalLinkageKind( + cir::GlobalLinkageKind::CommonLinkage) + << "' linkage"; + + if (isExternal()) { + if (getLinkage() != cir::GlobalLinkageKind::ExternalLinkage && + getLinkage() != cir::GlobalLinkageKind::ExternalWeakLinkage) + return emitOpError() << "external functions must have '" + << stringifyGlobalLinkageKind( + cir::GlobalLinkageKind::ExternalLinkage) + << "' or '" + << stringifyGlobalLinkageKind( + cir::GlobalLinkageKind::ExternalWeakLinkage) + << "' linkage"; + return success(); + } + + if (!isDeclaration() && getCoroutine()) { + bool foundAwait = false; + this->walk([&](Operation *op) { + if (auto await = dyn_cast(op)) { + foundAwait = true; + return; + } + }); + if (!foundAwait) + return emitOpError() + << "coroutine body must use at least one cir.await op"; + } + + // Function alias should have an empty body. + if (auto fn = getAliasee()) { + if (fn && !getBody().empty()) + return emitOpError() << "a function alias '" << *fn + << "' must have empty body"; + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// CallOp +//===----------------------------------------------------------------------===// + +mlir::Value cir::CallOp::getIndirectCall() { + assert(isIndirect()); + return getOperand(0); +} + +mlir::Operation::operand_iterator cir::CallOp::arg_operand_begin() { + auto arg_begin = operand_begin(); + if (isIndirect()) + arg_begin++; + return arg_begin; +} +mlir::Operation::operand_iterator cir::CallOp::arg_operand_end() { + return operand_end(); +} + +/// Return the operand at index 'i', accounts for indirect call. +Value cir::CallOp::getArgOperand(unsigned i) { + if (isIndirect()) + i++; + return getOperand(i); +} +/// Return the number of operands, accounts for indirect call. +unsigned cir::CallOp::getNumArgOperands() { + if (isIndirect()) + return this->getOperation()->getNumOperands() - 1; + return this->getOperation()->getNumOperands(); +} + +static LogicalResult +verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { + // Callee attribute only need on indirect calls. + auto fnAttr = op->getAttrOfType("callee"); + if (!fnAttr) + return success(); + + FuncOp fn = + symbolTable.lookupNearestSymbolFrom(op, fnAttr); + if (!fn) + return op->emitOpError() << "'" << fnAttr.getValue() + << "' does not reference a valid function"; + auto callIf = dyn_cast(op); + assert(callIf && "expected CIR call interface to be always available"); + + // Verify that the operand and result types match the callee. Note that + // argument-checking is disabled for functions without a prototype. + auto fnType = fn.getFunctionType(); + if (!fn.getNoProto()) { + unsigned numCallOperands = callIf.getNumArgOperands(); + unsigned numFnOpOperands = fnType.getNumInputs(); + + if (!fnType.isVarArg() && numCallOperands != numFnOpOperands) + return op->emitOpError("incorrect number of operands for callee"); + + if (fnType.isVarArg() && numCallOperands < numFnOpOperands) + return op->emitOpError("too few operands for callee"); + + for (unsigned i = 0, e = numFnOpOperands; i != e; ++i) + if (callIf.getArgOperand(i).getType() != fnType.getInput(i)) + return op->emitOpError("operand type mismatch: expected operand type ") + << fnType.getInput(i) << ", but provided " + << op->getOperand(i).getType() << " for operand number " << i; + } + + // Void function must not return any results. + if (fnType.isVoid() && op->getNumResults() != 0) + return op->emitOpError("callee returns void but call has results"); + + // Non-void function calls must return exactly one result. + if (!fnType.isVoid() && op->getNumResults() != 1) + return op->emitOpError("incorrect number of results for callee"); + + // Parent function and return value types must match. + if (!fnType.isVoid() && + op->getResultTypes().front() != fnType.getReturnType()) { + return op->emitOpError("result type mismatch: expected ") + << fnType.getReturnType() << ", but provided " + << op->getResult(0).getType(); + } + + return success(); +} + +static ::mlir::ParseResult parseCallCommon( + ::mlir::OpAsmParser &parser, ::mlir::OperationState &result, + llvm::function_ref<::mlir::ParseResult(::mlir::OpAsmParser &, + ::mlir::OperationState &)> + customOpHandler = + [](::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { + return mlir::success(); + }) { + mlir::FlatSymbolRefAttr calleeAttr; + llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> ops; + llvm::SMLoc opsLoc; + (void)opsLoc; + llvm::ArrayRef<::mlir::Type> operandsTypes; + llvm::ArrayRef<::mlir::Type> allResultTypes; + + if (customOpHandler(parser, result)) + return ::mlir::failure(); + + // If we cannot parse a string callee, it means this is an indirect call. + if (!parser.parseOptionalAttribute(calleeAttr, "callee", result.attributes) + .has_value()) { + OpAsmParser::UnresolvedOperand indirectVal; + // Do not resolve right now, since we need to figure out the type + if (parser.parseOperand(indirectVal).failed()) + return failure(); + ops.push_back(indirectVal); + } + + if (parser.parseLParen()) + return ::mlir::failure(); + + opsLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(ops)) + return ::mlir::failure(); + if (parser.parseRParen()) + return ::mlir::failure(); + if (parser.parseOptionalAttrDict(result.attributes)) + return ::mlir::failure(); + if (parser.parseColon()) + return ::mlir::failure(); + + ::mlir::FunctionType opsFnTy; + if (parser.parseType(opsFnTy)) + return ::mlir::failure(); + operandsTypes = opsFnTy.getInputs(); + allResultTypes = opsFnTy.getResults(); + result.addTypes(allResultTypes); + + if (parser.resolveOperands(ops, operandsTypes, opsLoc, result.operands)) + return ::mlir::failure(); + return ::mlir::success(); +} + +void printCallCommon( + Operation *op, mlir::Value indirectCallee, mlir::FlatSymbolRefAttr flatSym, + ::mlir::OpAsmPrinter &state, + llvm::function_ref customOpHandler = []() {}) { + state << ' '; + + auto callLikeOp = mlir::cast(op); + auto ops = callLikeOp.getArgOperands(); + + if (flatSym) { // Direct calls + state.printAttributeWithoutType(flatSym); + } else { // Indirect calls + assert(indirectCallee); + state << indirectCallee; + } + state << "("; + state << ops; + state << ")"; + llvm::SmallVector<::llvm::StringRef, 2> elidedAttrs; + elidedAttrs.push_back("callee"); + elidedAttrs.push_back("ast"); + state.printOptionalAttrDict(op->getAttrs(), elidedAttrs); + state << ' ' << ":"; + state << ' '; + state.printFunctionalType(op->getOperands().getTypes(), op->getResultTypes()); +} + +LogicalResult +cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + return verifyCallCommInSymbolUses(*this, symbolTable); +} + +::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { + return parseCallCommon(parser, result); +} + +void CallOp::print(::mlir::OpAsmPrinter &state) { + mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + printCallCommon(*this, indirectCallee, getCalleeAttr(), state); +} + +//===----------------------------------------------------------------------===// +// TryCallOp +//===----------------------------------------------------------------------===// + +mlir::Value cir::TryCallOp::getIndirectCall() { + // First operand is the exception pointer, skip it + assert(isIndirect()); + return getOperand(1); +} + +mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { + auto arg_begin = operand_begin(); + // First operand is the exception pointer, skip it. + arg_begin++; + if (isIndirect()) + arg_begin++; + + // FIXME(cir): for this and all the other calculations in the other methods: + // we currently have no basic block arguments on cir.try_call, but if it gets + // to that, this needs further adjustment. + return arg_begin; +} +mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { + return operand_end(); +} + +/// Return the operand at index 'i', accounts for indirect call. +Value cir::TryCallOp::getArgOperand(unsigned i) { + // First operand is the exception pointer, skip it. + i++; + if (isIndirect()) + i++; + return getOperand(i); +} +/// Return the number of operands, , accounts for indirect call. +unsigned cir::TryCallOp::getNumArgOperands() { + unsigned numOperands = this->getOperation()->getNumOperands(); + // First operand is the exception pointer, skip it. + numOperands--; + if (isIndirect()) + numOperands--; + return numOperands; +} + +LogicalResult +cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + return verifyCallCommInSymbolUses(*this, symbolTable); +} + +LogicalResult cir::TryCallOp::verify() { return mlir::success(); } + +::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { + return parseCallCommon( + parser, result, + [](::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) -> ::mlir::ParseResult { + ::mlir::OpAsmParser::UnresolvedOperand exceptionRawOperands[1]; + ::llvm::ArrayRef<::mlir::OpAsmParser::UnresolvedOperand> + exceptionOperands(exceptionRawOperands); + ::llvm::SMLoc exceptionOperandsLoc; + (void)exceptionOperandsLoc; + + if (parser.parseKeyword("exception").failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected 'exception' keyword here"); + + if (parser.parseLParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected '('"); + + exceptionOperandsLoc = parser.getCurrentLocation(); + if (parser.parseOperand(exceptionRawOperands[0])) + return ::mlir::failure(); + + if (parser.parseRParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected ')'"); + + auto &builder = parser.getBuilder(); + auto exceptionPtrPtrTy = cir::PointerType::get( + builder.getContext(), + cir::PointerType::get( + builder.getContext(), + builder.getType<::mlir::cir::ExceptionInfoType>())); + if (parser.resolveOperands(exceptionOperands, exceptionPtrPtrTy, + exceptionOperandsLoc, result.operands)) + return ::mlir::failure(); + + return ::mlir::success(); + }); +} + +void TryCallOp::print(::mlir::OpAsmPrinter &state) { + state << " exception("; + state << getExceptionInfo(); + state << ")"; + mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + printCallCommon(*this, indirectCallee, getCalleeAttr(), state); +} + +//===----------------------------------------------------------------------===// +// UnaryOp +//===----------------------------------------------------------------------===// + +LogicalResult UnaryOp::verify() { + switch (getKind()) { + case cir::UnaryOpKind::Inc: + case cir::UnaryOpKind::Dec: + case cir::UnaryOpKind::Plus: + case cir::UnaryOpKind::Minus: + case cir::UnaryOpKind::Not: + // Nothing to verify. + return success(); + } + + llvm_unreachable("Unknown UnaryOp kind?"); +} + +//===----------------------------------------------------------------------===// +// AwaitOp +//===----------------------------------------------------------------------===// + +void AwaitOp::build(OpBuilder &builder, OperationState &result, + mlir::cir::AwaitKind kind, + function_ref readyBuilder, + function_ref suspendBuilder, + function_ref resumeBuilder) { + result.addAttribute(getKindAttrName(result.name), + cir::AwaitKindAttr::get(builder.getContext(), kind)); + { + OpBuilder::InsertionGuard guard(builder); + Region *readyRegion = result.addRegion(); + builder.createBlock(readyRegion); + readyBuilder(builder, result.location); + } + + { + OpBuilder::InsertionGuard guard(builder); + Region *suspendRegion = result.addRegion(); + builder.createBlock(suspendRegion); + suspendBuilder(builder, result.location); + } + + { + OpBuilder::InsertionGuard guard(builder); + Region *resumeRegion = result.addRegion(); + builder.createBlock(resumeRegion); + resumeBuilder(builder, result.location); + } +} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes +/// that correspond to a constant value for each operand, or null if that +/// operand is not a constant. +void AwaitOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // If any index all the underlying regions branch back to the parent + // operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // FIXME: we want to look at cond region for getting more accurate results + // if the other regions will get a chance to execute. + regions.push_back(RegionSuccessor(&this->getReady())); + regions.push_back(RegionSuccessor(&this->getSuspend())); + regions.push_back(RegionSuccessor(&this->getResume())); +} + +LogicalResult AwaitOp::verify() { + if (!isa(this->getReady().back().getTerminator())) + return emitOpError("ready region must end with cir.condition"); + return success(); +} + +//===----------------------------------------------------------------------===// +// CIR defined traits +//===----------------------------------------------------------------------===// + +LogicalResult +mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 1)) || failed(verifyOneResult(op))) + return failure(); + + auto type = op->getResult(0).getType(); + auto opType = op->getOperand(0).getType(); + + if (type != opType) + return op->emitOpError() + << "requires the same type for first operand and result"; + + return success(); +} + +LogicalResult +mlir::OpTrait::impl::verifySameSecondOperandAndResultType(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 2)) || failed(verifyOneResult(op))) + return failure(); + + auto type = op->getResult(0).getType(); + auto opType = op->getOperand(1).getType(); + + if (type != opType) + return op->emitOpError() + << "requires the same type for second operand and result"; + + return success(); +} + +LogicalResult +mlir::OpTrait::impl::verifySameFirstSecondOperandAndResultType(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 3)) || failed(verifyOneResult(op))) + return failure(); + + auto checkType = op->getResult(0).getType(); + if (checkType != op->getOperand(0).getType() && + checkType != op->getOperand(1).getType()) + return op->emitOpError() + << "requires the same type for first, second operand and result"; + + return success(); +} + +//===----------------------------------------------------------------------===// +// CIR attributes +// FIXME: move all of these to CIRAttrs.cpp +//===----------------------------------------------------------------------===// + +LogicalResult mlir::cir::ConstArrayAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, Attribute attr, int trailingZerosNum) { + + if (!(attr.isa() || attr.isa())) + return emitError() << "constant array expects ArrayAttr or StringAttr"; + + if (auto strAttr = attr.dyn_cast()) { + mlir::cir::ArrayType at = type.cast(); + auto intTy = at.getEltType().dyn_cast(); + + // TODO: add CIR type for char. + if (!intTy || intTy.getWidth() != 8) { + emitError() << "constant array element for string literals expects " + "!cir.int element type"; + return failure(); + } + return success(); + } + + assert(attr.isa()); + auto arrayAttr = attr.cast(); + auto at = type.cast(); + + // Make sure both number of elements and subelement types match type. + if (at.getSize() != arrayAttr.size() + trailingZerosNum) + return emitError() << "constant array size should match type size"; + LogicalResult eltTypeCheck = success(); + arrayAttr.walkImmediateSubElements( + [&](Attribute attr) { + // Once we find a mismatch, stop there. + if (eltTypeCheck.failed()) + return; + auto typedAttr = attr.dyn_cast(); + if (!typedAttr || typedAttr.getType() != at.getEltType()) { + eltTypeCheck = failure(); + emitError() + << "constant array element should match array element type"; + } + }, + [&](Type type) {}); + return eltTypeCheck; +} + +::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + ::mlir::FailureOr<::mlir::Type> resultTy; + ::mlir::FailureOr resultVal; + ::llvm::SMLoc loc = parser.getCurrentLocation(); + (void)loc; + // Parse literal '<' + if (parser.parseLess()) + return {}; + + // Parse variable 'value' + resultVal = ::mlir::FieldParser::parse(parser); + if (failed(resultVal)) { + parser.emitError( + parser.getCurrentLocation(), + "failed to parse ConstArrayAttr parameter 'value' which is " + "to be a `Attribute`"); + return {}; + } + + // ArrayAttrrs have per-element type, not the type of the array... + if (resultVal->dyn_cast()) { + // Array has implicit type: infer from const array type. + if (parser.parseOptionalColon().failed()) { + resultTy = type; + } else { // Array has explicit type: parse it. + resultTy = ::mlir::FieldParser<::mlir::Type>::parse(parser); + if (failed(resultTy)) { + parser.emitError( + parser.getCurrentLocation(), + "failed to parse ConstArrayAttr parameter 'type' which is " + "to be a `::mlir::Type`"); + return {}; + } + } + } else { + assert(resultVal->isa() && "IDK"); + auto ta = resultVal->cast(); + resultTy = ta.getType(); + if (resultTy->isa()) { + parser.emitError(parser.getCurrentLocation(), + "expected type declaration for string literal"); + return {}; + } + } + + auto zeros = 0; + if (parser.parseOptionalComma().succeeded()) { + if (parser.parseOptionalKeyword("trailing_zeros").succeeded()) { + auto typeSize = resultTy.value().cast().getSize(); + auto elts = resultVal.value(); + if (auto str = elts.dyn_cast()) + zeros = typeSize - str.size(); + else + zeros = typeSize - elts.cast().size(); + } else { + return {}; + } + } + + // Parse literal '>' + if (parser.parseGreater()) + return {}; + + return parser.getChecked( + loc, parser.getContext(), resultTy.value(), resultVal.value(), zeros); +} + +void ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { + printer << "<"; + printer.printStrippedAttrOrType(getElts()); + if (auto zeros = getTrailingZerosNum()) + printer << ", trailing_zeros"; + printer << ">"; +} + +::mlir::Attribute SignedOverflowBehaviorAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + if (parser.parseLess()) + return {}; + auto behavior = parseOptionalCIRKeyword( + parser, mlir::cir::sob::SignedOverflowBehavior::undefined); + if (parser.parseGreater()) + return {}; + + return SignedOverflowBehaviorAttr::get(parser.getContext(), behavior); +} + +void SignedOverflowBehaviorAttr::print(::mlir::AsmPrinter &printer) const { + printer << "<"; + switch (getBehavior()) { + case sob::SignedOverflowBehavior::undefined: + printer << "undefined"; + break; + case sob::SignedOverflowBehavior::defined: + printer << "defined"; + break; + case sob::SignedOverflowBehavior::trapping: + printer << "trapping"; + break; + } + printer << ">"; +} + +LogicalResult TypeInfoAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, ::mlir::ArrayAttr typeinfoData) { + + if (mlir::cir::ConstStructAttr::verify(emitError, type, typeinfoData) + .failed()) + return failure(); + + for (auto &member : typeinfoData) { + if (llvm::isa(member)) + continue; + emitError() << "expected GlobalViewAttr or IntAttr attribute"; + return failure(); + } + + return success(); +} + +LogicalResult +VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, ::mlir::ArrayAttr vtableData) { + auto sTy = type.dyn_cast_or_null(); + if (!sTy) { + emitError() << "expected !cir.struct type result"; + return failure(); + } + if (sTy.getMembers().size() != 1 || vtableData.size() != 1) { + emitError() << "expected struct type with only one subtype"; + return failure(); + } + + auto arrayTy = sTy.getMembers()[0].dyn_cast(); + auto constArrayAttr = vtableData[0].dyn_cast(); + if (!arrayTy || !constArrayAttr) { + emitError() << "expected struct type with one array element"; + return failure(); + } + + if (mlir::cir::ConstStructAttr::verify(emitError, type, vtableData).failed()) + return failure(); + + LogicalResult eltTypeCheck = success(); + if (auto arrayElts = constArrayAttr.getElts().dyn_cast()) { + arrayElts.walkImmediateSubElements( + [&](Attribute attr) { + if (attr.isa() || attr.isa()) + return; + emitError() << "expected GlobalViewAttr attribute"; + eltTypeCheck = failure(); + }, + [&](Type type) {}); + return eltTypeCheck; + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// CopyOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult CopyOp::verify() { + + // A data layout is required for us to know the number of bytes to be copied. + if (!getType().getPointee().hasTrait()) + return emitError() << "missing data layout for pointee type"; + + if (getSrc() == getDst()) + return emitError() << "source and destination are the same"; + + return mlir::success(); +} + +//===----------------------------------------------------------------------===// +// MemCpyOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult MemCpyOp::verify() { + auto voidPtr = + cir::PointerType::get(getContext(), cir::VoidType::get(getContext())); + + if (!getLenTy().isUnsigned()) + return emitError() << "memcpy length must be an unsigned integer"; + + if (getSrcTy() != voidPtr || getDstTy() != voidPtr) + return emitError() << "memcpy src and dst must be void pointers"; + + return mlir::success(); +} + +//===----------------------------------------------------------------------===// +// GetMemberOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult GetMemberOp::verify() { + + const auto recordTy = getAddrTy().getPointee().dyn_cast(); + if (!recordTy) + return emitError() << "expected pointer to a record type"; + + if (recordTy.getMembers().size() <= getIndex()) + return emitError() << "member index out of bounds"; + + // FIXME(cir): member type check is disabled for classes as the codegen for + // these still need to be patched. + if (!recordTy.isClass() && + recordTy.getMembers()[getIndex()] != getResultTy().getPointee()) + return emitError() << "member type mismatch"; + + return mlir::success(); +} + +//===----------------------------------------------------------------------===// +// GetRuntimeMemberOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult GetRuntimeMemberOp::verify() { + auto recordTy = + getAddr().getType().cast().getPointee().cast(); + auto memberPtrTy = getMember().getType(); + + if (recordTy != memberPtrTy.getClsTy()) { + emitError() << "record type does not match the member pointer type"; + return mlir::failure(); + } + + if (getType().getPointee() != memberPtrTy.getMemberTy()) { + emitError() << "result type does not match the member pointer type"; + return mlir::failure(); + } + + return mlir::success(); +} + +//===----------------------------------------------------------------------===// +// InlineAsmOp Definitions +//===----------------------------------------------------------------------===// + +void cir::InlineAsmOp::print(OpAsmPrinter &p) { + p << '(' << getAsmFlavor() << ", "; + p.increaseIndent(); + p.printNewline(); + + llvm::SmallVector names{"out", "in", "in_out"}; + auto nameIt = names.begin(); + auto attrIt = getOperandAttrs().begin(); + + for (auto ops : getOperands()) { + p << *nameIt << " = "; + + p << '['; + llvm::interleaveComma(llvm::make_range(ops.begin(), ops.end()), p, + [&](Value value) { + p.printOperand(value); + p << " : " << value.getType(); + if (*attrIt) + p << " (maybe_memory)"; + attrIt++; + }); + p << "],"; + p.printNewline(); + ++nameIt; + } + + p << "{"; + p.printString(getAsmString()); + p << " "; + p.printString(getConstraints()); + p << "}"; + p.decreaseIndent(); + p << ')'; + if (getSideEffects()) + p << " side_effects"; + + llvm::SmallVector<::llvm::StringRef, 2> elidedAttrs; + elidedAttrs.push_back("asm_flavor"); + elidedAttrs.push_back("asm_string"); + elidedAttrs.push_back("constraints"); + elidedAttrs.push_back("operand_attrs"); + elidedAttrs.push_back("operands_segments"); + elidedAttrs.push_back("side_effects"); + p.printOptionalAttrDict(getOperation()->getAttrs(), elidedAttrs); + + if (auto v = getRes()) + p << " -> " << v.getType(); +} + +ParseResult cir::InlineAsmOp::parse(OpAsmParser &parser, + OperationState &result) { + llvm::SmallVector operand_attrs; + llvm::SmallVector operandsGroupSizes; + std::string asm_string, constraints; + Type resType; + auto *ctxt = parser.getBuilder().getContext(); + + auto error = [&](const Twine &msg) { + parser.emitError(parser.getCurrentLocation(), msg); + ; + return mlir::failure(); + }; + + auto expected = [&](const std::string &c) { + return error("expected '" + c + "'"); + }; + + if (parser.parseLParen().failed()) + return expected("("); + + auto flavor = mlir::FieldParser::parse(parser); + if (failed(flavor)) + return error("Unknown AsmFlavor"); + + if (parser.parseComma().failed()) + return expected(","); + + auto parseValue = [&](Value &v) { + OpAsmParser::UnresolvedOperand op; + + if (parser.parseOperand(op) || parser.parseColon()) + return mlir::failure(); + + Type typ; + if (parser.parseType(typ).failed()) + return error("can't parse operand type"); + llvm::SmallVector tmp; + if (parser.resolveOperand(op, typ, tmp)) + return error("can't resolve operand"); + v = tmp[0]; + return mlir::success(); + }; + + auto parseOperands = [&](llvm::StringRef name) { + if (parser.parseKeyword(name).failed()) + return error("expected " + name + " operands here"); + if (parser.parseEqual().failed()) + return expected("="); + if (parser.parseLSquare().failed()) + return expected("["); + + int size = 0; + if (parser.parseOptionalRSquare().succeeded()) { + operandsGroupSizes.push_back(size); + if (parser.parseComma()) + return expected(","); + return mlir::success(); + } + + if (parser.parseCommaSeparatedList([&]() { + Value val; + if (parseValue(val).succeeded()) { + result.operands.push_back(val); + size++; + + if (parser.parseOptionalLParen().failed()) { + operand_attrs.push_back(mlir::Attribute()); + return mlir::success(); + } + + if (parser.parseKeyword("maybe_memory").succeeded()) { + operand_attrs.push_back(mlir::UnitAttr::get(ctxt)); + if (parser.parseRParen()) + return expected(")"); + return mlir::success(); + } + } + return mlir::failure(); + })) + return mlir::failure(); + + if (parser.parseRSquare().failed() || parser.parseComma().failed()) + return expected("]"); + operandsGroupSizes.push_back(size); + return mlir::success(); + }; + + if (parseOperands("out").failed() || parseOperands("in").failed() || + parseOperands("in_out").failed()) + return error("failed to parse operands"); + + if (parser.parseLBrace()) + return expected("{"); + if (parser.parseString(&asm_string)) + return error("asm string parsing failed"); + if (parser.parseString(&constraints)) + return error("constraints string parsing failed"); + if (parser.parseRBrace()) + return expected("}"); + if (parser.parseRParen()) + return expected(")"); + + if (parser.parseOptionalKeyword("side_effects").succeeded()) + result.attributes.set("side_effects", UnitAttr::get(ctxt)); + + if (parser.parseOptionalArrow().failed()) + return mlir::failure(); + + if (parser.parseType(resType).failed()) + return mlir::failure(); + + if (parser.parseOptionalAttrDict(result.attributes)) + return mlir::failure(); + + result.attributes.set("asm_flavor", AsmFlavorAttr::get(ctxt, *flavor)); + result.attributes.set("asm_string", StringAttr::get(ctxt, asm_string)); + result.attributes.set("constraints", StringAttr::get(ctxt, constraints)); + result.attributes.set("operand_attrs", ArrayAttr::get(ctxt, operand_attrs)); + result.getOrAddProperties().operands_segments = + parser.getBuilder().getDenseI32ArrayAttr(operandsGroupSizes); + if (resType) + result.addTypes(TypeRange{resType}); + + return mlir::success(); +} + +//===----------------------------------------------------------------------===// +// Atomic Definitions +//===----------------------------------------------------------------------===// + +LogicalResult AtomicFetch::verify() { + if (getBinop() == mlir::cir::AtomicFetchKind::Add || + getBinop() == mlir::cir::AtomicFetchKind::Sub) + return mlir::success(); + + if (!getVal().getType().isa()) + return emitError() << "only operates on integer values"; + + return mlir::success(); +} + +LogicalResult BinOp::verify() { + bool noWrap = getNoUnsignedWrap() || getNoSignedWrap(); + + if (!isa(getType()) && noWrap) + return emitError() + << "only operations on integer values may have nsw/nuw flags"; + + bool noWrapOps = getKind() == mlir::cir::BinOpKind::Add || + getKind() == mlir::cir::BinOpKind::Sub || + getKind() == mlir::cir::BinOpKind::Mul; + + if (noWrap && !noWrapOps) + return emitError() << "The nsw/nuw flags are applicable to opcodes: 'add', " + "'sub' and 'mul'"; + + return mlir::success(); +} + +//===----------------------------------------------------------------------===// +// TableGen'd op method definitions +//===----------------------------------------------------------------------===// + +#define GET_OP_CLASSES +#include "clang/CIR/Dialect/IR/CIROps.cpp.inc" diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp new file mode 100644 index 000000000000..c924c1e0bc78 --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -0,0 +1,845 @@ +//===- CIRTypes.cpp - MLIR CIR Types --------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the types in the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#include "MissingFeatures.h" + +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypesDetails.h" + +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Diagnostics.h" +#include "mlir/IR/DialectImplementation.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "mlir/Support/LLVM.h" +#include "mlir/Support/LogicalResult.h" + +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/TypeSwitch.h" +#include "llvm/Support/ErrorHandling.h" +#include + +using cir::MissingFeatures; + +//===----------------------------------------------------------------------===// +// CIR Custom Parser/Printer Signatures +//===----------------------------------------------------------------------===// + +static mlir::ParseResult +parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, + bool &isVarArg); +static void printFuncTypeArgs(mlir::AsmPrinter &p, + mlir::ArrayRef params, bool isVarArg); + +//===----------------------------------------------------------------------===// +// Get autogenerated stuff +//===----------------------------------------------------------------------===// + +#define GET_TYPEDEF_CLASSES +#include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" + +using namespace mlir; +using namespace mlir::cir; + +//===----------------------------------------------------------------------===// +// General CIR parsing / printing +//===----------------------------------------------------------------------===// + +Type CIRDialect::parseType(DialectAsmParser &parser) const { + llvm::SMLoc typeLoc = parser.getCurrentLocation(); + StringRef mnemonic; + Type genType; + + // Try to parse as a tablegen'd type. + OptionalParseResult parseResult = + generatedTypeParser(parser, &mnemonic, genType); + if (parseResult.has_value()) + return genType; + + // Type is not tablegen'd: try to parse as a raw C++ type. + return StringSwitch>(mnemonic) + .Case("struct", [&] { return StructType::parse(parser); }) + .Default([&] { + parser.emitError(typeLoc) << "unknown CIR type: " << mnemonic; + return Type(); + })(); +} + +void CIRDialect::printType(Type type, DialectAsmPrinter &os) const { + // Try to print as a tablegen'd type. + if (generatedTypePrinter(type, os).succeeded()) + return; + + // Type is not tablegen'd: try printing as a raw C++ type. + TypeSwitch(type) + .Case([&](StructType type) { + os << type.getMnemonic(); + type.print(os); + }) + .Default([](Type) { + llvm::report_fatal_error("printer is missing a handler for this type"); + }); +} + +Type PointerType::parse(mlir::AsmParser &parser) { + if (parser.parseLess()) + return Type(); + Type pointeeType; + if (parser.parseType(pointeeType)) + return Type(); + if (parser.parseGreater()) + return Type(); + return get(parser.getContext(), pointeeType); +} + +void PointerType::print(mlir::AsmPrinter &printer) const { + printer << "<"; + printer.printType(getPointee()); + printer << '>'; +} + +Type BoolType::parse(mlir::AsmParser &parser) { + return get(parser.getContext()); +} + +void BoolType::print(mlir::AsmPrinter &printer) const {} + +//===----------------------------------------------------------------------===// +// StructType Definitions +//===----------------------------------------------------------------------===// + +/// Return the largest member of in the type. +/// +/// Recurses into union members never returning a union as the largest member. +Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { + if (!layoutInfo) + computeSizeAndAlignment(dataLayout); + return layoutInfo.cast().getLargestMember(); +} + +Type StructType::parse(mlir::AsmParser &parser) { + FailureOr cyclicParseGuard; + const auto loc = parser.getCurrentLocation(); + const auto eLoc = parser.getEncodedSourceLoc(loc); + bool packed = false; + RecordKind kind; + auto *context = parser.getContext(); + + if (parser.parseLess()) + return {}; + + // TODO(cir): in the future we should probably separate types for different + // source language declarations such as cir.class, cir.union, and cir.struct + if (parser.parseOptionalKeyword("struct").succeeded()) + kind = RecordKind::Struct; + else if (parser.parseOptionalKeyword("union").succeeded()) + kind = RecordKind::Union; + else if (parser.parseOptionalKeyword("class").succeeded()) + kind = RecordKind::Class; + else { + parser.emitError(loc, "unknown struct type"); + return {}; + } + + mlir::StringAttr name; + parser.parseOptionalAttribute(name); + + // Is a self reference: ensure referenced type was parsed. + if (name && parser.parseOptionalGreater().succeeded()) { + auto type = getChecked(eLoc, context, name, kind); + if (succeeded(parser.tryStartCyclicParse(type))) { + parser.emitError(loc, "invalid self-reference within record"); + return {}; + } + return type; + } + + // Is a named record definition: ensure name has not been parsed yet. + if (name) { + auto type = getChecked(eLoc, context, name, kind); + cyclicParseGuard = parser.tryStartCyclicParse(type); + if (failed(cyclicParseGuard)) { + parser.emitError(loc, "record already defined"); + return {}; + } + } + + if (parser.parseOptionalKeyword("packed").succeeded()) + packed = true; + + // Parse record members or lack thereof. + bool incomplete = true; + llvm::SmallVector members; + if (parser.parseOptionalKeyword("incomplete").failed()) { + incomplete = false; + const auto delimiter = AsmParser::Delimiter::Braces; + const auto parseElementFn = [&parser, &members]() { + return parser.parseType(members.emplace_back()); + }; + if (parser.parseCommaSeparatedList(delimiter, parseElementFn).failed()) + return {}; + } + + // Parse optional AST attribute. This is just a formality for now, since CIR + // cannot yet read serialized AST. + mlir::cir::ASTRecordDeclAttr ast = nullptr; + parser.parseOptionalAttribute(ast); + + if (parser.parseGreater()) + return {}; + + // Try to create the proper record type. + ArrayRef membersRef(members); // Needed for template deduction. + mlir::Type type = {}; + if (name && incomplete) { // Identified & incomplete + type = getChecked(eLoc, context, name, kind); + } else if (name && !incomplete) { // Identified & complete + type = getChecked(eLoc, context, membersRef, name, packed, kind); + // If the record has a self-reference, its type already exists in a + // incomplete state. In this case, we must complete it. + if (type.cast().isIncomplete()) + type.cast().complete(membersRef, packed, ast); + } else if (!name && !incomplete) { // anonymous & complete + type = getChecked(eLoc, context, membersRef, packed, kind); + } else { // anonymous & incomplete + parser.emitError(loc, "anonymous structs must be complete"); + return {}; + } + + return type; +} + +void StructType::print(mlir::AsmPrinter &printer) const { + FailureOr cyclicPrintGuard; + printer << '<'; + + switch (getKind()) { + case RecordKind::Struct: + printer << "struct "; + break; + case RecordKind::Union: + printer << "union "; + break; + case RecordKind::Class: + printer << "class "; + break; + } + + if (getName()) + printer << getName(); + + // Current type has already been printed: print as self reference. + cyclicPrintGuard = printer.tryStartCyclicPrint(*this); + if (failed(cyclicPrintGuard)) { + printer << '>'; + return; + } + + // Type not yet printed: continue printing the entire record. + printer << ' '; + + if (getPacked()) + printer << "packed "; + + if (isIncomplete()) { + printer << "incomplete"; + } else { + printer << "{"; + llvm::interleaveComma(getMembers(), printer); + printer << "}"; + } + + if (getAst()) { + printer << " "; + printer.printAttribute(getAst()); + } + + printer << '>'; +} + +mlir::LogicalResult +StructType::verify(llvm::function_ref emitError, + llvm::ArrayRef members, mlir::StringAttr name, + bool incomplete, bool packed, + mlir::cir::StructType::RecordKind kind, + ASTRecordDeclInterface ast) { + if (name && name.getValue().empty()) { + emitError() << "identified structs cannot have an empty name"; + return mlir::failure(); + } + return mlir::success(); +} + +void StructType::dropAst() { getImpl()->ast = nullptr; } +StructType StructType::get(::mlir::MLIRContext *context, ArrayRef members, + StringAttr name, bool packed, RecordKind kind, + ASTRecordDeclInterface ast) { + return Base::get(context, members, name, /*incomplete=*/false, packed, kind, + ast); +} + +StructType StructType::getChecked( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::MLIRContext *context, ArrayRef members, StringAttr name, + bool packed, RecordKind kind, ASTRecordDeclInterface ast) { + return Base::getChecked(emitError, context, members, name, + /*incomplete=*/false, packed, kind, ast); +} + +StructType StructType::get(::mlir::MLIRContext *context, StringAttr name, + RecordKind kind) { + return Base::get(context, /*members=*/ArrayRef{}, name, + /*incomplete=*/true, /*packed=*/false, kind, + /*ast=*/ASTRecordDeclInterface{}); +} + +StructType StructType::getChecked( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::MLIRContext *context, StringAttr name, RecordKind kind) { + return Base::getChecked(emitError, context, ArrayRef{}, name, + /*incomplete=*/true, /*packed=*/false, kind, + ASTRecordDeclInterface{}); +} + +StructType StructType::get(::mlir::MLIRContext *context, ArrayRef members, + bool packed, RecordKind kind, + ASTRecordDeclInterface ast) { + return Base::get(context, members, StringAttr{}, /*incomplete=*/false, packed, + kind, ast); +} + +StructType StructType::getChecked( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::MLIRContext *context, ArrayRef members, bool packed, + RecordKind kind, ASTRecordDeclInterface ast) { + return Base::getChecked(emitError, context, members, StringAttr{}, + /*incomplete=*/false, packed, kind, ast); +} + +::llvm::ArrayRef StructType::getMembers() const { + return getImpl()->members; +} + +bool StructType::isIncomplete() const { return getImpl()->incomplete; } + +mlir::StringAttr StructType::getName() const { return getImpl()->name; } + +bool StructType::getIncomplete() const { return getImpl()->incomplete; } + +bool StructType::getPacked() const { return getImpl()->packed; } + +mlir::cir::StructType::RecordKind StructType::getKind() const { + return getImpl()->kind; +} + +ASTRecordDeclInterface StructType::getAst() const { return getImpl()->ast; } + +void StructType::complete(ArrayRef members, bool packed, + ASTRecordDeclInterface ast) { + if (mutate(members, packed, ast).failed()) + llvm_unreachable("failed to complete struct"); +} + +bool StructType::isLayoutIdentical(const StructType &other) { + if (getImpl() == other.getImpl()) + return true; + + if (getPacked() != other.getPacked()) + return false; + + return getMembers() == other.getMembers(); +} + +//===----------------------------------------------------------------------===// +// Data Layout information for types +//===----------------------------------------------------------------------===// + +llvm::TypeSize +BoolType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(8); +} + +uint64_t +BoolType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return 1; +} + +uint64_t +BoolType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return 1; +} + +llvm::TypeSize +PointerType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: improve this in face of address spaces + return llvm::TypeSize::getFixed(64); +} + +uint64_t +PointerType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: improve this in face of address spaces + return 8; +} + +uint64_t PointerType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: improve this in face of address spaces + return 8; +} + +llvm::TypeSize +DataMemberType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: consider size differences under different ABIs + assert(!MissingFeatures::cxxABI()); + return llvm::TypeSize::getFixed(64); +} + +uint64_t +DataMemberType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: consider alignment differences under different ABIs + assert(!MissingFeatures::cxxABI()); + return 8; +} + +uint64_t DataMemberType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: consider alignment differences under different ABIs + assert(!MissingFeatures::cxxABI()); + return 8; +} + +llvm::TypeSize +ArrayType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return getSize() * dataLayout.getTypeSizeInBits(getEltType()); +} + +uint64_t +ArrayType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return dataLayout.getTypeABIAlignment(getEltType()); +} + +uint64_t +ArrayType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return dataLayout.getTypePreferredAlignment(getEltType()); +} + +llvm::TypeSize mlir::cir::VectorType::getTypeSizeInBits( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getSize() * + dataLayout.getTypeSizeInBits(getEltType())); +} + +uint64_t mlir::cir::VectorType::getABIAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return getSize() * dataLayout.getTypeABIAlignment(getEltType()); +} + +uint64_t mlir::cir::VectorType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return getSize() * dataLayout.getTypePreferredAlignment(getEltType()); +} + +llvm::TypeSize +StructType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + if (!layoutInfo) + computeSizeAndAlignment(dataLayout); + return llvm::TypeSize::getFixed( + layoutInfo.cast().getSize() * 8); +} + +uint64_t +StructType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + if (!layoutInfo) + computeSizeAndAlignment(dataLayout); + return layoutInfo.cast().getAlignment(); +} + +uint64_t +StructType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + llvm_unreachable("NYI"); +} + +bool StructType::isPadded(const ::mlir::DataLayout &dataLayout) const { + if (!layoutInfo) + computeSizeAndAlignment(dataLayout); + return layoutInfo.cast().getPadded(); +} + +uint64_t StructType::getElementOffset(const ::mlir::DataLayout &dataLayout, + unsigned idx) const { + assert(idx < getMembers().size() && "access not valid"); + if (!layoutInfo) + computeSizeAndAlignment(dataLayout); + auto offsets = layoutInfo.cast().getOffsets(); + auto intAttr = offsets[idx].cast(); + return intAttr.getInt(); +} + +void StructType::computeSizeAndAlignment( + const ::mlir::DataLayout &dataLayout) const { + assert(isComplete() && "Cannot get layout of incomplete structs"); + // Do not recompute. + if (layoutInfo) + return; + + // This is a similar algorithm to LLVM's StructLayout. + unsigned structSize = 0; + llvm::Align structAlignment{1}; + bool isPadded = false; + unsigned numElements = getNumElements(); + auto members = getMembers(); + mlir::Type largestMember; + unsigned largestMemberSize = 0; + SmallVector memberOffsets; + + // Loop over each of the elements, placing them in memory. + memberOffsets.reserve(numElements); + for (unsigned i = 0, e = numElements; i != e; ++i) { + auto ty = members[i]; + + // Found a nested union: recurse into it to fetch its largest member. + auto structMember = ty.dyn_cast(); + if (structMember && structMember.isUnion()) { + auto candidate = structMember.getLargestMember(dataLayout); + if (dataLayout.getTypeSize(candidate) > largestMemberSize) { + largestMember = candidate; + largestMemberSize = dataLayout.getTypeSize(largestMember); + } + } else if (dataLayout.getTypeSize(ty) > largestMemberSize) { + largestMember = ty; + largestMemberSize = dataLayout.getTypeSize(largestMember); + } + + // This matches LLVM since it uses the ABI instead of preferred alignment. + const llvm::Align tyAlign = + llvm::Align(getPacked() ? 1 : dataLayout.getTypeABIAlignment(ty)); + + // Add padding if necessary to align the data element properly. + if (!llvm::isAligned(tyAlign, structSize)) { + isPadded = true; + structSize = llvm::alignTo(structSize, tyAlign); + } + + // Keep track of maximum alignment constraint. + structAlignment = std::max(tyAlign, structAlignment); + + // Struct size up to each element is the element offset. + memberOffsets.push_back(mlir::IntegerAttr::get( + mlir::IntegerType::get(getContext(), 32), structSize)); + + // Consume space for this data item + structSize += dataLayout.getTypeSize(ty); + } + + // For unions, the size and aligment is that of the largest element. + if (isUnion()) { + structSize = largestMemberSize; + isPadded = false; + } else { + // Add padding to the end of the struct so that it could be put in an array + // and all array elements would be aligned correctly. + if (!llvm::isAligned(structAlignment, structSize)) { + isPadded = true; + structSize = llvm::alignTo(structSize, structAlignment); + } + } + + auto offsets = mlir::ArrayAttr::get(getContext(), memberOffsets); + layoutInfo = mlir::cir::StructLayoutAttr::get( + getContext(), structSize, structAlignment.value(), isPadded, + largestMember, offsets); +} + +//===----------------------------------------------------------------------===// +// IntType Definitions +//===----------------------------------------------------------------------===// + +Type IntType::parse(mlir::AsmParser &parser) { + auto *context = parser.getBuilder().getContext(); + auto loc = parser.getCurrentLocation(); + bool isSigned; + unsigned width; + + if (parser.parseLess()) + return {}; + + // Fetch integer sign. + llvm::StringRef sign; + if (parser.parseKeyword(&sign)) + return {}; + if (sign.equals("s")) + isSigned = true; + else if (sign.equals("u")) + isSigned = false; + else { + parser.emitError(loc, "expected 's' or 'u'"); + return {}; + } + + if (parser.parseComma()) + return {}; + + // Fetch integer size. + if (parser.parseInteger(width)) + return {}; + if (width < 1 || width > 64) { + parser.emitError(loc, "expected integer width to be from 1 up to 64"); + return {}; + } + + if (parser.parseGreater()) + return {}; + + return IntType::get(context, width, isSigned); +} + +void IntType::print(mlir::AsmPrinter &printer) const { + auto sign = isSigned() ? 's' : 'u'; + printer << '<' << sign << ", " << getWidth() << '>'; +} + +llvm::TypeSize +IntType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getWidth()); +} + +uint64_t IntType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +uint64_t +IntType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +mlir::LogicalResult +IntType::verify(llvm::function_ref emitError, + unsigned width, bool isSigned) { + + if (width < IntType::minBitwidth() || width > IntType::maxBitwidth()) { + emitError() << "IntType only supports widths from " + << IntType::minBitwidth() << "up to " << IntType::maxBitwidth(); + return mlir::failure(); + } + + return mlir::success(); +} + +//===----------------------------------------------------------------------===// +// Floating-point type definitions +//===----------------------------------------------------------------------===// + +const llvm::fltSemantics &SingleType::getFloatSemantics() const { + return llvm::APFloat::IEEEsingle(); +} + +llvm::TypeSize +SingleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getWidth()); +} + +uint64_t +SingleType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +uint64_t +SingleType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +const llvm::fltSemantics &DoubleType::getFloatSemantics() const { + return llvm::APFloat::IEEEdouble(); +} + +llvm::TypeSize +DoubleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getWidth()); +} + +uint64_t +DoubleType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +uint64_t +DoubleType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +const llvm::fltSemantics &FP80Type::getFloatSemantics() const { + return llvm::APFloat::x87DoubleExtended(); +} + +llvm::TypeSize +FP80Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(16); +} + +uint64_t FP80Type::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return 16; +} + +uint64_t +FP80Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return 16; +} + +const llvm::fltSemantics &LongDoubleType::getFloatSemantics() const { + return getUnderlying() + .cast() + .getFloatSemantics(); +} + +llvm::TypeSize +LongDoubleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return getUnderlying() + .cast() + .getTypeSizeInBits(dataLayout, params); +} + +uint64_t +LongDoubleType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return getUnderlying().cast().getABIAlignment( + dataLayout, params); +} + +uint64_t LongDoubleType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return getUnderlying() + .cast() + .getPreferredAlignment(dataLayout, params); +} + +LogicalResult +LongDoubleType::verify(function_ref emitError, + mlir::Type underlying) { + if (!underlying.isa()) { + emitError() << "invalid underlying type for long double"; + return failure(); + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// FuncType Definitions +//===----------------------------------------------------------------------===// + +FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { + assert(results.size() == 1 && "expected exactly one result type"); + return get(llvm::to_vector(inputs), results[0], isVarArg()); +} + +mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, + llvm::SmallVector ¶ms, + bool &isVarArg) { + isVarArg = false; + // `(` `)` + if (succeeded(p.parseOptionalRParen())) + return mlir::success(); + + // `(` `...` `)` + if (succeeded(p.parseOptionalEllipsis())) { + isVarArg = true; + return p.parseRParen(); + } + + // type (`,` type)* (`,` `...`)? + mlir::Type type; + if (p.parseType(type)) + return mlir::failure(); + params.push_back(type); + while (succeeded(p.parseOptionalComma())) { + if (succeeded(p.parseOptionalEllipsis())) { + isVarArg = true; + return p.parseRParen(); + } + if (p.parseType(type)) + return mlir::failure(); + params.push_back(type); + } + + return p.parseRParen(); +} + +void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, + bool isVarArg) { + llvm::interleaveComma(params, p, + [&p](mlir::Type type) { p.printType(type); }); + if (isVarArg) { + if (!params.empty()) + p << ", "; + p << "..."; + } + p << ')'; +} + +llvm::ArrayRef FuncType::getReturnTypes() const { + return static_cast(getImpl())->returnType; +} + +bool FuncType::isVoid() const { return getReturnType().isa(); } + +//===----------------------------------------------------------------------===// +// CIR Dialect +//===----------------------------------------------------------------------===// + +void CIRDialect::registerTypes() { + // Register tablegen'd types. + addTypes< +#define GET_TYPEDEF_LIST +#include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" + >(); + + // Register raw C++ types. + addTypes(); +} diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 0d7476b55570..27d826e84489 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -1,3 +1,25 @@ add_clang_library(MLIRCIR + CIRAttrs.cpp CIRDialect.cpp + CIRTypes.cpp + FPEnv.cpp + + DEPENDS + MLIRBuiltinLocationAttributesIncGen + MLIRCIROpsIncGen + MLIRCIREnumsGen + MLIRSymbolInterfacesIncGen + MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen + + LINK_LIBS PUBLIC + MLIRIR + MLIRCIRInterfaces + MLIRDataLayoutInterfaces + MLIRFuncDialect + MLIRLoopLikeInterface + MLIRLLVMDialect + MLIRSideEffectInterfaces + clangAST ) diff --git a/clang/lib/CIR/Dialect/IR/FPEnv.cpp b/clang/lib/CIR/Dialect/IR/FPEnv.cpp new file mode 100644 index 000000000000..01dfe1e92640 --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/FPEnv.cpp @@ -0,0 +1,64 @@ +//===-- FPEnv.cpp ---- FP Environment -------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// @file +/// This file contains the implementations of entities that describe floating +/// point environment. +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Dialect/IR/FPEnv.h" + +namespace cir { + +std::optional +convertRoundingModeToStr(llvm::RoundingMode UseRounding) { + std::optional RoundingStr; + switch (UseRounding) { + case llvm::RoundingMode::Dynamic: + RoundingStr = "round.dynamic"; + break; + case llvm::RoundingMode::NearestTiesToEven: + RoundingStr = "round.tonearest"; + break; + case llvm::RoundingMode::NearestTiesToAway: + RoundingStr = "round.tonearestaway"; + break; + case llvm::RoundingMode::TowardNegative: + RoundingStr = "round.downward"; + break; + case llvm::RoundingMode::TowardPositive: + RoundingStr = "round.upward"; + break; + case llvm::RoundingMode::TowardZero: + RoundingStr = "round.towardZero"; + break; + default: + break; + } + return RoundingStr; +} + +std::optional +convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept) { + std::optional ExceptStr; + switch (UseExcept) { + case fp::ebStrict: + ExceptStr = "fpexcept.strict"; + break; + case fp::ebIgnore: + ExceptStr = "fpexcept.ignore"; + break; + case fp::ebMayTrap: + ExceptStr = "fpexcept.maytrap"; + break; + } + return ExceptStr; +} + +} // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/MissingFeatures.h b/clang/lib/CIR/Dialect/IR/MissingFeatures.h new file mode 100644 index 000000000000..e21fc0e0b191 --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/MissingFeatures.h @@ -0,0 +1,34 @@ +//===---- UnimplementedFeatureGuarding.h - Checks against NYI ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file introduces some helper classes to guard against features that +// CIR dialect supports that we do not have and also do not have great ways to +// assert against. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG +#define LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG + +namespace cir { + +struct MissingFeatures { + // C++ ABI support + static bool cxxABI() { return false; } + static bool setCallingConv() { return false; } + + // Address space related + static bool addressSpace() { return false; } + + // Sanitizers + static bool buildTypeCheck() { return false; } +}; + +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..8bd6a06b7c4e --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -0,0 +1,26 @@ +add_clang_library(MLIRCIRTransforms + LifetimeCheck.cpp + LoweringPrepare.cpp + LoweringPrepareItaniumCXXABI.cpp + MergeCleanups.cpp + DropAST.cpp + IdiomRecognizer.cpp + LibOpt.cpp + StdHelpers.cpp + FlattenCFG.cpp + + DEPENDS + MLIRCIRPassIncGen + + LINK_LIBS PUBLIC + clangAST + clangBasic + + MLIRAnalysis + MLIRIR + MLIRPass + MLIRTransformUtils + + MLIRCIR + MLIRCIRInterfaces +) diff --git a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp new file mode 100644 index 000000000000..b72e7a686788 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp @@ -0,0 +1,50 @@ +//===- DropAST.cpp - emit diagnostic checks for lifetime violations -===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Dialect/Passes.h" + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "clang/AST/ASTContext.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +#include "llvm/ADT/SetOperations.h" +#include "llvm/ADT/SmallSet.h" + +using namespace mlir; +using namespace cir; + +namespace { +struct DropASTPass : public DropASTBase { + DropASTPass() = default; + void runOnOperation() override; +}; +} // namespace + +void DropASTPass::runOnOperation() { + Operation *op = getOperation(); + // This needs to be updated with operations that start + // carrying AST around. + op->walk([&](Operation *op) { + if (auto alloca = dyn_cast(op)) { + alloca.removeAstAttr(); + auto ty = alloca.getAllocaType().dyn_cast(); + if (!ty) + return; + ty.dropAst(); + return; + } + + if (auto funcOp = dyn_cast(op)) + funcOp.removeAstAttr(); + }); +} + +std::unique_ptr mlir::createDropASTPass() { + return std::make_unique(); +} diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp new file mode 100644 index 000000000000..ea1b413fc685 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -0,0 +1,426 @@ +//====- FlattenCFG.cpp - Flatten CIR CFG ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements pass that inlines CIR operations regions into the parent +// function region. +// +//===----------------------------------------------------------------------===// +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/DialectConversion.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +using namespace mlir; +using namespace mlir::cir; + +namespace { + +/// Lowers operations with the terminator trait that have a single successor. +void lowerTerminator(mlir::Operation *op, mlir::Block *dest, + mlir::PatternRewriter &rewriter) { + assert(op->hasTrait() && "not a terminator"); + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, dest); +} + +/// Walks a region while skipping operations of type `Ops`. This ensures the +/// callback is not applied to said operations and its children. +template +void walkRegionSkipping(mlir::Region ®ion, + mlir::function_ref callback) { + region.walk([&](mlir::Operation *op) { + if (isa(op)) + return mlir::WalkResult::skip(); + callback(op); + return mlir::WalkResult::advance(); + }); +} + +struct FlattenCFGPass : public FlattenCFGBase { + + FlattenCFGPass() = default; + void runOnOperation() override; +}; + +struct CIRIfFlattening : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::IfOp ifOp, + mlir::PatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto loc = ifOp.getLoc(); + auto emptyElse = ifOp.getElseRegion().empty(); + + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + if (ifOp->getResults().size() == 0) + continueBlock = remainingOpsBlock; + else + llvm_unreachable("NYI"); + + // Inline then region + auto *thenBeforeBody = &ifOp.getThenRegion().front(); + auto *thenAfterBody = &ifOp.getThenRegion().back(); + rewriter.inlineRegionBefore(ifOp.getThenRegion(), continueBlock); + + rewriter.setInsertionPointToEnd(thenAfterBody); + if (auto thenYieldOp = + dyn_cast(thenAfterBody->getTerminator())) { + rewriter.replaceOpWithNewOp( + thenYieldOp, thenYieldOp.getArgs(), continueBlock); + } + + rewriter.setInsertionPointToEnd(continueBlock); + + // Has else region: inline it. + mlir::Block *elseBeforeBody = nullptr; + mlir::Block *elseAfterBody = nullptr; + if (!emptyElse) { + elseBeforeBody = &ifOp.getElseRegion().front(); + elseAfterBody = &ifOp.getElseRegion().back(); + rewriter.inlineRegionBefore(ifOp.getElseRegion(), thenAfterBody); + } else { + elseBeforeBody = elseAfterBody = continueBlock; + } + + rewriter.setInsertionPointToEnd(currentBlock); + rewriter.create(loc, ifOp.getCondition(), + thenBeforeBody, elseBeforeBody); + + if (!emptyElse) { + rewriter.setInsertionPointToEnd(elseAfterBody); + if (auto elseYieldOp = + dyn_cast(elseAfterBody->getTerminator())) { + rewriter.replaceOpWithNewOp( + elseYieldOp, elseYieldOp.getArgs(), continueBlock); + } + } + + rewriter.replaceOp(ifOp, continueBlock->getArguments()); + return mlir::success(); + } +}; + +class CIRScopeOpFlattening : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ScopeOp scopeOp, + mlir::PatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto loc = scopeOp.getLoc(); + + // Empty scope: just remove it. + if (scopeOp.getRegion().empty()) { + rewriter.eraseOp(scopeOp); + return mlir::success(); + } + + // Split the current block before the ScopeOp to create the inlining + // point. + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + if (scopeOp.getNumResults() == 0) + continueBlock = remainingOpsBlock; + else + llvm_unreachable("NYI"); + + // Inline body region. + auto *beforeBody = &scopeOp.getRegion().front(); + auto *afterBody = &scopeOp.getRegion().back(); + rewriter.inlineRegionBefore(scopeOp.getRegion(), continueBlock); + + // Save stack and then branch into the body of the region. + rewriter.setInsertionPointToEnd(currentBlock); + // TODO(CIR): stackSaveOp + // auto stackSaveOp = rewriter.create( + // loc, mlir::LLVM::LLVMPointerType::get( + // mlir::IntegerType::get(scopeOp.getContext(), 8))); + rewriter.create(loc, mlir::ValueRange(), beforeBody); + + // Replace the scopeop return with a branch that jumps out of the body. + // Stack restore before leaving the body region. + rewriter.setInsertionPointToEnd(afterBody); + if (auto yieldOp = + dyn_cast(afterBody->getTerminator())) { + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getArgs(), + continueBlock); + } + + // TODO(cir): stackrestore? + + // Replace the op with values return from the body region. + rewriter.replaceOp(scopeOp, continueBlock->getArguments()); + + return mlir::success(); + } +}; + +class CIRLoopOpInterfaceFlattening + : public mlir::OpInterfaceRewritePattern { +public: + using mlir::OpInterfaceRewritePattern< + mlir::cir::LoopOpInterface>::OpInterfaceRewritePattern; + + inline void lowerConditionOp(mlir::cir::ConditionOp op, mlir::Block *body, + mlir::Block *exit, + mlir::PatternRewriter &rewriter) const { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, op.getCondition(), + body, exit); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoopOpInterface op, + mlir::PatternRewriter &rewriter) const final { + // Setup CFG blocks. + auto *entry = rewriter.getInsertionBlock(); + auto *exit = rewriter.splitBlock(entry, rewriter.getInsertionPoint()); + auto *cond = &op.getCond().front(); + auto *body = &op.getBody().front(); + auto *step = (op.maybeGetStep() ? &op.maybeGetStep()->front() : nullptr); + + // Setup loop entry branch. + rewriter.setInsertionPointToEnd(entry); + rewriter.create(op.getLoc(), &op.getEntry().front()); + + // Branch from condition region to body or exit. + auto conditionOp = cast(cond->getTerminator()); + lowerConditionOp(conditionOp, body, exit, rewriter); + + // TODO(cir): Remove the walks below. It visits operations unnecessarily, + // however, to solve this we would likely need a custom DialecConversion + // driver to customize the order that operations are visited. + + // Lower continue statements. + mlir::Block *dest = (step ? step : cond); + op.walkBodySkippingNestedLoops([&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, dest, rewriter); + }); + + // Lower break statements. + walkRegionSkipping( + op.getBody(), [&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, exit, rewriter); + }); + + // Lower optional body region yield. + auto bodyYield = dyn_cast(body->getTerminator()); + if (bodyYield) + lowerTerminator(bodyYield, (step ? step : cond), rewriter); + + // Lower mandatory step region yield. + if (step) + lowerTerminator(cast(step->getTerminator()), cond, + rewriter); + + // Move region contents out of the loop op. + rewriter.inlineRegionBefore(op.getCond(), exit); + rewriter.inlineRegionBefore(op.getBody(), exit); + if (step) + rewriter.inlineRegionBefore(*op.maybeGetStep(), exit); + + rewriter.eraseOp(op); + return mlir::success(); + } +}; + +class CIRSwitchOpFlattening + : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + inline void rewriteYieldOp(mlir::PatternRewriter &rewriter, + mlir::cir::YieldOp yieldOp, + mlir::Block *destination) const { + rewriter.setInsertionPoint(yieldOp); + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getOperands(), + destination); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SwitchOp op, + mlir::PatternRewriter &rewriter) const override { + // Empty switch statement: just erase it. + if (!op.getCases().has_value() || op.getCases()->empty()) { + rewriter.eraseOp(op); + return mlir::success(); + } + + // Create exit block. + rewriter.setInsertionPointAfter(op); + auto *exitBlock = + rewriter.splitBlock(rewriter.getBlock(), rewriter.getInsertionPoint()); + + // Allocate required data structures (disconsider default case in + // vectors). + llvm::SmallVector caseValues; + llvm::SmallVector caseDestinations; + llvm::SmallVector caseOperands; + + // Initialize default case as optional. + mlir::Block *defaultDestination = exitBlock; + mlir::ValueRange defaultOperands = exitBlock->getArguments(); + + // Track fallthrough between cases. + mlir::cir::YieldOp fallthroughYieldOp = nullptr; + + // Digest the case statements values and bodies. + for (size_t i = 0; i < op.getCases()->size(); ++i) { + auto ®ion = op.getRegion(i); + auto caseAttr = op.getCases()->getValue()[i].cast(); + + // Found default case: save destination and operands. + if (caseAttr.getKind().getValue() == mlir::cir::CaseOpKind::Default) { + defaultDestination = ®ion.front(); + defaultOperands = region.getArguments(); + } else { + // AnyOf cases kind can have multiple values, hence the loop below. + for (auto &value : caseAttr.getValue()) { + caseValues.push_back(value.cast().getValue()); + caseOperands.push_back(region.getArguments()); + caseDestinations.push_back(®ion.front()); + } + } + + // Previous case is a fallthrough: branch it to this case. + if (fallthroughYieldOp) { + rewriteYieldOp(rewriter, fallthroughYieldOp, ®ion.front()); + fallthroughYieldOp = nullptr; + } + + for (auto &blk : region.getBlocks()) { + if (blk.getNumSuccessors()) + continue; + + // Handle switch-case yields. + if (auto yieldOp = dyn_cast(blk.getTerminator())) + fallthroughYieldOp = yieldOp; + } + + // Handle break statements. + walkRegionSkipping( + region, [&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, exitBlock, rewriter); + }); + + // Extract region contents before erasing the switch op. + rewriter.inlineRegionBefore(region, exitBlock); + } + + // Last case is a fallthrough: branch it to exit. + if (fallthroughYieldOp) { + rewriteYieldOp(rewriter, fallthroughYieldOp, exitBlock); + fallthroughYieldOp = nullptr; + } + + // Set switch op to branch to the newly created blocks. + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp( + op, op.getCondition(), defaultDestination, defaultOperands, caseValues, + caseDestinations, caseOperands); + + return mlir::success(); + } +}; +class CIRTernaryOpFlattening + : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TernaryOp op, + mlir::PatternRewriter &rewriter) const override { + auto loc = op->getLoc(); + auto *condBlock = rewriter.getInsertionBlock(); + auto opPosition = rewriter.getInsertionPoint(); + auto *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition); + auto *continueBlock = rewriter.createBlock( + remainingOpsBlock, op->getResultTypes(), + SmallVector(/* result number always 1 */ 1, loc)); + rewriter.create(loc, remainingOpsBlock); + + auto &trueRegion = op.getTrueRegion(); + auto *trueBlock = &trueRegion.front(); + mlir::Operation *trueTerminator = trueRegion.back().getTerminator(); + rewriter.setInsertionPointToEnd(&trueRegion.back()); + auto trueYieldOp = dyn_cast(trueTerminator); + + rewriter.replaceOpWithNewOp( + trueYieldOp, trueYieldOp.getArgs(), continueBlock); + rewriter.inlineRegionBefore(trueRegion, continueBlock); + + auto *falseBlock = continueBlock; + auto &falseRegion = op.getFalseRegion(); + + falseBlock = &falseRegion.front(); + mlir::Operation *falseTerminator = falseRegion.back().getTerminator(); + rewriter.setInsertionPointToEnd(&falseRegion.back()); + auto falseYieldOp = dyn_cast(falseTerminator); + rewriter.replaceOpWithNewOp( + falseYieldOp, falseYieldOp.getArgs(), continueBlock); + rewriter.inlineRegionBefore(falseRegion, continueBlock); + + rewriter.setInsertionPointToEnd(condBlock); + rewriter.create(loc, op.getCond(), trueBlock, + falseBlock); + + rewriter.replaceOp(op, continueBlock->getArguments()); + + // Ok, we're done! + return mlir::success(); + } +}; + +void populateFlattenCFGPatterns(RewritePatternSet &patterns) { + patterns + .add( + patterns.getContext()); +} + +void FlattenCFGPass::runOnOperation() { + RewritePatternSet patterns(&getContext()); + populateFlattenCFGPatterns(patterns); + + // Collect operations to apply patterns. + SmallVector ops; + getOperation()->walk([&](Operation *op) { + if (isa(op)) + ops.push_back(op); + }); + + // Apply patterns. + if (applyOpPatternsAndFold(ops, std::move(patterns)).failed()) + signalPassFailure(); +} + +} // namespace + +namespace mlir { + +std::unique_ptr createFlattenCFGPass() { + return std::make_unique(); +} + +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp new file mode 100644 index 000000000000..7b1218ad7c27 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -0,0 +1,213 @@ +//===- IdiomRecognizer.cpp - Recognize and raise C/C++ library calls ------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Region.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Mangle.h" +#include "clang/Basic/Module.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Path.h" + +#include "StdHelpers.h" + +using cir::CIRBaseBuilderTy; +using namespace mlir; +using namespace mlir::cir; + +namespace { + +struct IdiomRecognizerPass : public IdiomRecognizerBase { + IdiomRecognizerPass() = default; + void runOnOperation() override; + void recognizeCall(CallOp call); + bool raiseStdFind(CallOp call); + bool raiseIteratorBeginEnd(CallOp call); + + // Handle pass options + struct Options { + enum : unsigned { + None = 0, + RemarkFoundCalls = 1, + RemarkAll = 1 << 1, + }; + unsigned val = None; + bool isOptionsParsed = false; + + void parseOptions(ArrayRef remarks) { + if (isOptionsParsed) + return; + + for (auto &remark : remarks) { + val |= StringSwitch(remark) + .Case("found-calls", RemarkFoundCalls) + .Case("all", RemarkAll) + .Default(None); + } + isOptionsParsed = true; + } + + void parseOptions(IdiomRecognizerPass &pass) { + SmallVector remarks; + + for (auto &r : pass.remarksList) + remarks.push_back(r); + + parseOptions(remarks); + } + + bool emitRemarkAll() { return val & RemarkAll; } + bool emitRemarkFoundCalls() { + return emitRemarkAll() || val & RemarkFoundCalls; + } + } opts; + + /// + /// AST related + /// ----------- + clang::ASTContext *astCtx; + void setASTContext(clang::ASTContext *c) { astCtx = c; } + + /// Tracks current module. + ModuleOp theModule; +}; +} // namespace + +bool IdiomRecognizerPass::raiseStdFind(CallOp call) { + // FIXME: tablegen all of this function. + if (call.getNumOperands() != 3) + return false; + + auto callExprAttr = call.getAstAttr(); + if (!callExprAttr || !callExprAttr.isStdFunctionCall("find")) { + return false; + } + + if (opts.emitRemarkFoundCalls()) + emitRemark(call.getLoc()) << "found call to std::find()"; + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(call.getOperation()); + auto findOp = builder.create( + call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getOperand(0), call.getOperand(1), call.getOperand(2)); + + call.replaceAllUsesWith(findOp); + call.erase(); + return true; +} + +static bool isIteratorLikeType(mlir::Type t) { + // TODO: some iterators are going to be represented with structs, + // in which case we could look at ASTRecordDeclInterface for more + // information. + auto pTy = t.dyn_cast(); + if (!pTy || !pTy.getPointee().isa()) + return false; + return true; +} + +static bool isIteratorInStdContainter(mlir::Type t) { + // TODO: only std::array supported for now, generalize and + // use tablegen. CallDescription.cpp in the static analyzer + // could be a good inspiration source too. + return isStdArrayType(t); +} + +bool IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { + // FIXME: tablegen all of this function. + CIRBaseBuilderTy builder(getContext()); + + if (call.getNumOperands() != 1 || call.getNumResults() != 1) + return false; + + auto callExprAttr = call.getAstAttr(); + if (!callExprAttr) + return false; + + if (!isIteratorLikeType(call.getResult(0).getType())) + return false; + + // First argument is the container "this" pointer. + auto thisPtr = call.getOperand(0).getType().dyn_cast(); + if (!thisPtr || !isIteratorInStdContainter(thisPtr.getPointee())) + return false; + + builder.setInsertionPointAfter(call.getOperation()); + mlir::Operation *iterOp; + if (callExprAttr.isIteratorBeginCall()) { + if (opts.emitRemarkFoundCalls()) + emitRemark(call.getLoc()) << "found call to begin() iterator"; + iterOp = builder.create( + call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getOperand(0)); + } else if (callExprAttr.isIteratorEndCall()) { + if (opts.emitRemarkFoundCalls()) + emitRemark(call.getLoc()) << "found call to end() iterator"; + iterOp = builder.create( + call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getOperand(0)); + } else { + return false; + } + + call.replaceAllUsesWith(iterOp); + call.erase(); + return true; +} + +void IdiomRecognizerPass::recognizeCall(CallOp call) { + if (raiseIteratorBeginEnd(call)) + return; + + if (raiseStdFind(call)) + return; +} + +void IdiomRecognizerPass::runOnOperation() { + assert(astCtx && "Missing ASTContext, please construct with the right ctor"); + opts.parseOptions(*this); + auto *op = getOperation(); + if (isa<::mlir::ModuleOp>(op)) + theModule = cast<::mlir::ModuleOp>(op); + + SmallVector callsToTransform; + op->walk([&](CallOp callOp) { + // Process call operations + + // Skip indirect calls. + auto c = callOp.getCallee(); + if (!c) + return; + callsToTransform.push_back(callOp); + }); + + for (auto c : callsToTransform) + recognizeCall(c); +} + +std::unique_ptr mlir::createIdiomRecognizerPass() { + return std::make_unique(); +} + +std::unique_ptr +mlir::createIdiomRecognizerPass(clang::ASTContext *astCtx) { + auto pass = std::make_unique(); + pass->setASTContext(astCtx); + return std::move(pass); +} diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp new file mode 100644 index 000000000000..762ee961bcba --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -0,0 +1,247 @@ +//===- LibOpt.cpp - Optimize CIR raised C/C++ library idioms --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Region.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Mangle.h" +#include "clang/Basic/Module.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Path.h" + +#include "StdHelpers.h" + +using cir::CIRBaseBuilderTy; +using namespace mlir; +using namespace mlir::cir; + +namespace { + +struct LibOptPass : public LibOptBase { + LibOptPass() = default; + void runOnOperation() override; + void xformStdFindIntoMemchr(StdFindOp findOp); + + // Handle pass options + struct Options { + enum : unsigned { + None = 0, + RemarkTransforms = 1, + RemarkAll = 1 << 1, + }; + unsigned val = None; + bool isOptionsParsed = false; + + void parseOptions(ArrayRef remarks) { + if (isOptionsParsed) + return; + + for (auto &remark : remarks) { + val |= StringSwitch(remark) + .Case("transforms", RemarkTransforms) + .Case("all", RemarkAll) + .Default(None); + } + isOptionsParsed = true; + } + + void parseOptions(LibOptPass &pass) { + SmallVector remarks; + + for (auto &r : pass.remarksList) + remarks.push_back(r); + + parseOptions(remarks); + } + + bool emitRemarkAll() { return val & RemarkAll; } + bool emitRemarkTransforms() { + return emitRemarkAll() || val & RemarkTransforms; + } + } opts; + + /// + /// AST related + /// ----------- + clang::ASTContext *astCtx; + void setASTContext(clang::ASTContext *c) { astCtx = c; } + + /// Tracks current module. + ModuleOp theModule; +}; +} // namespace + +static bool isSequentialContainer(mlir::Type t) { + // TODO: other sequential ones, vector, dequeue, list, forward_list. + return isStdArrayType(t); +} + +static bool getIntegralNTTPAt(StructType t, size_t pos, unsigned &size) { + auto *d = + dyn_cast(t.getAst().getRawDecl()); + if (!d) + return false; + + auto &templArgs = d->getTemplateArgs(); + if (pos >= templArgs.size()) + return false; + + auto arraySizeTemplateArg = templArgs[pos]; + if (arraySizeTemplateArg.getKind() != clang::TemplateArgument::Integral) + return false; + + size = arraySizeTemplateArg.getAsIntegral().getSExtValue(); + return true; +} + +static bool containerHasStaticSize(StructType t, unsigned &size) { + // TODO: add others. + if (!isStdArrayType(t)) + return false; + + // Get "size" from std::array + unsigned sizeNTTPPos = 1; + return getIntegralNTTPAt(t, sizeNTTPPos, size); +} + +void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { + // template + // requires (sizeof(T) == 1 && is_integral_v) + // T* find(T* first, T* last, T value) { + // if (auto result = __builtin_memchr(first, value, last - first)) + // return result; + // return last; + // } + + auto first = findOp.getOperand(0); + auto last = findOp.getOperand(1); + auto value = findOp->getOperand(2); + if (!first.getType().isa() || !last.getType().isa()) + return; + + // Transformation: + // - 1st arg: the data pointer + // - Assert the Iterator is a pointer to primitive type. + // - Check IterBeginOp is char sized. TODO: add other types that map to + // char size. + auto iterResTy = findOp.getType().dyn_cast(); + assert(iterResTy && "expected pointer type for iterator"); + auto underlyingDataTy = iterResTy.getPointee().dyn_cast(); + if (!underlyingDataTy || underlyingDataTy.getWidth() != 8) + return; + + // - 2nd arg: the pattern + // - Check it's a pointer type. + // - Load the pattern from memory + // - cast it to `int`. + auto patternAddrTy = value.getType().dyn_cast(); + if (!patternAddrTy || patternAddrTy.getPointee() != underlyingDataTy) + return; + + // - 3rd arg: the size + // - Create and pass a cir.const with NTTP value + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(findOp.getOperation()); + auto memchrOp0 = + builder.createBitcast(first.getLoc(), first, builder.getVoidPtrTy()); + + // FIXME: get datalayout based "int" instead of fixed size 4. + auto loadPattern = + builder.create(value.getLoc(), underlyingDataTy, value); + auto memchrOp1 = builder.createIntCast( + loadPattern, IntType::get(builder.getContext(), 32, true)); + + const auto uInt64Ty = IntType::get(builder.getContext(), 64, false); + + // Build memchr op: + // void *memchr(const void *s, int c, size_t n); + auto memChr = [&] { + if (auto iterBegin = dyn_cast(first.getDefiningOp()); + iterBegin && isa(last.getDefiningOp())) { + // Both operands have the same type, use iterBegin. + + // Look at this pointer to retrieve container information. + auto thisPtr = + iterBegin.getOperand().getType().cast().getPointee(); + auto containerTy = dyn_cast(thisPtr); + + unsigned staticSize = 0; + if (containerTy && isSequentialContainer(containerTy) && + containerHasStaticSize(containerTy, staticSize)) { + return builder.create( + findOp.getLoc(), memchrOp0, memchrOp1, + builder.create( + findOp.getLoc(), uInt64Ty, + mlir::cir::IntAttr::get(uInt64Ty, staticSize))); + } + } + return builder.create( + findOp.getLoc(), memchrOp0, memchrOp1, + builder.create(findOp.getLoc(), uInt64Ty, last, first)); + }(); + + auto MemChrResult = + builder.createBitcast(findOp.getLoc(), memChr.getResult(), iterResTy); + + // if (result) + // return result; + // else + // return last; + auto NullPtr = builder.create( + findOp.getLoc(), first.getType(), ConstPtrAttr::get(first.getType(), 0)); + auto CmpResult = builder.create( + findOp.getLoc(), BoolType::get(builder.getContext()), CmpOpKind::eq, + NullPtr.getRes(), MemChrResult); + + auto result = builder.create( + findOp.getLoc(), CmpResult.getResult(), + [&](mlir::OpBuilder &ob, mlir::Location Loc) { + ob.create(Loc, last); + }, + [&](mlir::OpBuilder &ob, mlir::Location Loc) { + ob.create(Loc, MemChrResult); + }); + + findOp.replaceAllUsesWith(result); + findOp.erase(); +} + +void LibOptPass::runOnOperation() { + assert(astCtx && "Missing ASTContext, please construct with the right ctor"); + opts.parseOptions(*this); + auto *op = getOperation(); + if (isa<::mlir::ModuleOp>(op)) + theModule = cast<::mlir::ModuleOp>(op); + + SmallVector stdFindToTransform; + op->walk([&](StdFindOp findOp) { stdFindToTransform.push_back(findOp); }); + + for (auto c : stdFindToTransform) + xformStdFindIntoMemchr(c); +} + +std::unique_ptr mlir::createLibOptPass() { + return std::make_unique(); +} + +std::unique_ptr mlir::createLibOptPass(clang::ASTContext *astCtx) { + auto pass = std::make_unique(); + pass->setASTContext(astCtx); + return std::move(pass); +} diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp new file mode 100644 index 000000000000..e77a6bdf14b8 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -0,0 +1,1961 @@ +//===- Lifetimecheck.cpp - emit diagnostic checks for lifetime violations -===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h" +#include "llvm/ADT/SetOperations.h" +#include "llvm/ADT/SmallSet.h" + +#include + +using namespace mlir; +using namespace cir; + +namespace { + +struct LocOrdering { + bool operator()(mlir::Location L1, mlir::Location L2) const { + return std::less()(L1.getAsOpaquePointer(), + L2.getAsOpaquePointer()); + } +}; + +struct LifetimeCheckPass : public LifetimeCheckBase { + LifetimeCheckPass() = default; + void runOnOperation() override; + + void checkOperation(Operation *op); + void checkFunc(cir::FuncOp fnOp); + void checkBlock(Block &block); + + void checkRegionWithScope(Region ®ion); + void checkRegion(Region ®ion); + + void checkIf(IfOp op); + void checkSwitch(SwitchOp op); + void checkLoop(LoopOpInterface op); + void checkAlloca(AllocaOp op); + void checkStore(StoreOp op); + void checkLoad(LoadOp op); + void checkCall(CallOp callOp); + void checkAwait(AwaitOp awaitOp); + void checkReturn(ReturnOp retOp); + + void classifyAndInitTypeCategories(mlir::Value addr, mlir::Type t, + mlir::Location loc, unsigned nestLevel); + void updatePointsTo(mlir::Value addr, mlir::Value data, mlir::Location loc); + void updatePointsToForConstStruct(mlir::Value addr, + mlir::cir::ConstStructAttr value, + mlir::Location loc); + void updatePointsToForZeroStruct(mlir::Value addr, StructType sTy, + mlir::Location loc); + + enum DerefStyle { + Direct, + RetLambda, + CallParam, + IndirectCallParam, + }; + void checkPointerDeref(mlir::Value addr, mlir::Location loc, + DerefStyle derefStyle = DerefStyle::Direct); + void checkCoroTaskStore(StoreOp storeOp); + void checkLambdaCaptureStore(StoreOp storeOp); + void trackCallToCoroutine(CallOp callOp); + + void checkCtor(CallOp callOp, ASTCXXConstructorDeclInterface ctor); + void checkMoveAssignment(CallOp callOp, ASTCXXMethodDeclInterface m); + void checkCopyAssignment(CallOp callOp, ASTCXXMethodDeclInterface m); + void checkNonConstUseOfOwner(mlir::Value ownerAddr, mlir::Location loc); + void checkOperators(CallOp callOp, ASTCXXMethodDeclInterface m); + void checkOtherMethodsAndFunctions(CallOp callOp, + ASTCXXMethodDeclInterface m); + void checkForOwnerAndPointerArguments(CallOp callOp, unsigned firstArgIdx); + + // TODO: merge both methods below and pass down an enum. + // + // Check if a method's 'this' pointer (first arg) is tracked as + // a pointer category. Assumes the CallOp in question represents a method + // and returns the actual value associated with the tracked 'this' or an + // empty value if none is found. + mlir::Value getThisParamPointerCategory(CallOp callOp); + // Check if a method's 'this' pointer (first arg) is tracked as + // a owner category. Assumes the CallOp in question represents a method + // and returns the actual value associated with the tracked 'this' or an + // empty value if none is found. + mlir::Value getThisParamOwnerCategory(CallOp callOp); + + // Tracks current module. + ModuleOp theModule; + // Track current function under analysis + std::optional currFunc; + + // Common helpers. + bool isCtorInitPointerFromOwner(CallOp callOp); + mlir::Value getNonConstUseOfOwner(CallOp callOp, ASTCXXMethodDeclInterface m); + bool isOwnerOrPointerClassMethod(CallOp callOp, ASTCXXMethodDeclInterface m); + + // Diagnostic helpers. + void emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey, + mlir::Location warningLoc, + DerefStyle derefStyle = DerefStyle::Direct); + + /// + /// Pass options handling + /// --------------------- + + struct Options { + enum : unsigned { + None = 0, + // Emit pset remarks only detecting invalid derefs + RemarkPsetInvalid = 1, + // Emit pset remarks for all derefs + RemarkPsetAlways = 1 << 1, + RemarkAll = 1 << 2, + HistoryNull = 1 << 3, + HistoryInvalid = 1 << 4, + HistoryAll = 1 << 5, + }; + unsigned val = None; + unsigned histLimit = 1; + bool isOptionsParsed = false; + + void parseOptions(ArrayRef remarks, ArrayRef hist, + unsigned hist_limit) { + if (isOptionsParsed) + return; + + for (auto &remark : remarks) { + val |= StringSwitch(remark) + .Case("pset-invalid", RemarkPsetInvalid) + .Case("pset-always", RemarkPsetAlways) + .Case("all", RemarkAll) + .Default(None); + } + for (auto &h : hist) { + val |= StringSwitch(h) + .Case("invalid", HistoryInvalid) + .Case("null", HistoryNull) + .Case("all", HistoryAll) + .Default(None); + } + histLimit = hist_limit; + isOptionsParsed = true; + } + + void parseOptions(LifetimeCheckPass &pass) { + SmallVector remarks; + SmallVector hists; + + for (auto &r : pass.remarksList) + remarks.push_back(r); + + for (auto &h : pass.historyList) + hists.push_back(h); + + parseOptions(remarks, hists, pass.historyLimit); + } + + bool emitRemarkAll() { return val & RemarkAll; } + bool emitRemarkPsetInvalid() { + return emitRemarkAll() || val & RemarkPsetInvalid; + } + bool emitRemarkPsetAlways() { + return emitRemarkAll() || val & RemarkPsetAlways; + } + + bool emitHistoryAll() { return val & HistoryAll; } + bool emitHistoryNull() { return emitHistoryAll() || val & HistoryNull; } + bool emitHistoryInvalid() { + return emitHistoryAll() || val & HistoryInvalid; + } + } opts; + + /// + /// State + /// ----- + + // Represents the state of an element in a pointer set (pset) + struct State { + using DataTy = enum { + Invalid, + NullPtr, + Global, + // FIXME: currently only supports one level of OwnedBy! + OwnedBy, + LocalValue, + NumKindsMinusOne = LocalValue + }; + State() { val.setInt(Invalid); } + State(DataTy d) { val.setInt(d); } + State(mlir::Value v, DataTy d = LocalValue) { + assert((d == LocalValue || d == OwnedBy) && "expected value or owned"); + val.setPointerAndInt(v, d); + } + + static constexpr int KindBits = 3; + static_assert((1 << KindBits) > NumKindsMinusOne, + "Not enough room for kind!"); + llvm::PointerIntPair val; + + /// Provide less/equal than operator for sorting / set ops. + bool operator<(const State &RHS) const { + // FIXME: note that this makes the ordering non-deterministic, do + // we really care? + if (hasValue() && RHS.hasValue()) + return val.getPointer().getAsOpaquePointer() < + RHS.val.getPointer().getAsOpaquePointer(); + return val.getInt() < RHS.val.getInt(); + } + bool operator==(const State &RHS) const { + if (hasValue() && RHS.hasValue()) + return val.getPointer() == RHS.val.getPointer(); + return val.getInt() == RHS.val.getInt(); + } + + bool isLocalValue() const { return val.getInt() == LocalValue; } + bool isOwnedBy() const { return val.getInt() == OwnedBy; } + bool hasValue() const { return isLocalValue() || isOwnedBy(); } + + mlir::Value getData() const { + assert(hasValue() && "data type does not hold a mlir::Value"); + return val.getPointer(); + } + + void dump(llvm::raw_ostream &OS = llvm::errs(), int ownedGen = 0); + + static State getInvalid() { return {Invalid}; } + static State getNullPtr() { return {NullPtr}; } + static State getLocalValue(mlir::Value v) { return {v, LocalValue}; } + static State getOwnedBy(mlir::Value v) { return {v, State::OwnedBy}; } + }; + + /// + /// Invalid and null history tracking + /// --------------------------------- + enum InvalidStyle { + Unknown, + EndOfScope, + NotInitialized, + MovedFrom, + NonConstUseOfOwner, + }; + + struct InvalidHistEntry { + InvalidStyle style = Unknown; + std::optional loc; + std::optional val; + InvalidHistEntry() = default; + InvalidHistEntry(InvalidStyle s, std::optional l, + std::optional v) + : style(s), loc(l), val(v) {} + }; + + struct InvalidHist { + llvm::SmallVector entries; + void add(mlir::Value ptr, InvalidStyle invalidStyle, mlir::Location loc, + std::optional val = {}) { + entries.emplace_back(InvalidHistEntry(invalidStyle, loc, val)); + } + }; + + llvm::DenseMap invalidHist; + + using PMapNullHistType = + llvm::DenseMap>; + PMapNullHistType pmapNullHist; + + // Track emitted diagnostics, and do not repeat them. + llvm::SmallSet emittedDiagnostics; + + /// + /// Pointer Map and Pointer Set + /// --------------------------- + + using PSetType = llvm::SmallSet; + // FIXME: this should be a ScopedHashTable for consistency. + using PMapType = llvm::DenseMap; + + // FIXME: we probably don't need to track it at this level, perhaps + // just tracking at the scope level should be enough? + PMapType *currPmap = nullptr; + PMapType &getPmap() { return *currPmap; } + void markPsetInvalid(mlir::Value ptr, InvalidStyle invalidStyle, + mlir::Location loc, + std::optional extraVal = {}) { + auto &pset = getPmap()[ptr]; + + // If pset is already invalid, don't bother. + if (pset.count(State::getInvalid())) + return; + + // 2.3 - putting invalid into pset(x) is said to invalidate it + pset.insert(State::getInvalid()); + invalidHist[ptr].add(ptr, invalidStyle, loc, extraVal); + } + + void markPsetNull(mlir::Value addr, mlir::Location loc) { + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getNullPtr()); + pmapNullHist[addr] = loc; + } + + void joinPmaps(SmallVectorImpl &pmaps); + + // Provides p1179's 'KILL' functionality. See implementation for more + // information. + void kill(const State &s, InvalidStyle invalidStyle, mlir::Location loc); + void killInPset(mlir::Value ptrKey, const State &s, InvalidStyle invalidStyle, + mlir::Location loc, std::optional extraVal); + + // Local pointers + SmallPtrSet ptrs; + + // Local owners. We use a map instead of a set to track the current generation + // for this owner type internal pointee's. For instance, this allows tracking + // subsequent reuse of owner storage when a non-const use happens. + DenseMap owners; + void addOwner(mlir::Value o) { + assert(!owners.count(o) && "already tracked"); + owners[o] = 0; + } + void incOwner(mlir::Value o) { + assert(owners.count(o) && "entry expected"); + owners[o]++; + } + + // Aggregates and exploded fields. + using ExplodedFieldsTy = llvm::SmallVector; + DenseMap aggregates; + void addAggregate(mlir::Value a, SmallVectorImpl &fields) { + assert(!aggregates.count(a) && "already tracked"); + aggregates[a].swap(fields); + } + + // Useful helpers for debugging + void printPset(PSetType &pset, llvm::raw_ostream &OS = llvm::errs()); + LLVM_DUMP_METHOD void dumpPmap(PMapType &pmap); + LLVM_DUMP_METHOD void dumpCurrentPmap(); + + /// + /// Coroutine tasks (promise_type) + /// ------------------------------ + + // Track types we already know to be a coroutine task (promise_type) + llvm::DenseMap IsTaskTyCache; + // Is the type associated with taskVal a coroutine task? Uses IsTaskTyCache + // or compute it from associated AST node. + bool isTaskType(mlir::Value taskVal); + // Addresses of coroutine Tasks found in the current function. + SmallPtrSet tasks; + + /// + /// Lambdas + /// ------- + + // Track types we already know to be a lambda + llvm::DenseMap IsLambdaTyCache; + // Check if a given cir type is a struct containing a lambda + bool isLambdaType(mlir::Type ty); + // Get the lambda struct from a member access to it. + mlir::Value getLambdaFromMemberAccess(mlir::Value addr); + + /// + /// Scope, context and guards + /// ------------------------- + + // Represents the scope context for IR operations (cir.scope, cir.if, + // then/else regions, etc). Tracks the declaration of variables in the current + // local scope. + struct LexicalScopeContext { + unsigned Depth = 0; + LexicalScopeContext() = delete; + + llvm::PointerUnion parent; + LexicalScopeContext(mlir::Region *R) : parent(R) {} + LexicalScopeContext(mlir::Operation *Op) : parent(Op) {} + ~LexicalScopeContext() = default; + + // Track all local values added in this scope + SmallPtrSet localValues; + + // Track the result of temporaries with coroutine call results, + // they are used to initialize a task. + // + // Value must come directly out of a cir.call to a cir.func which + // is a coroutine. + SmallPtrSet localTempTasks; + + // Track seen lambdas that escape out of the current scope + // (e.g. lambdas returned out of functions). + DenseMap localRetLambdas; + + LLVM_DUMP_METHOD void dumpLocalValues(); + }; + + class LexicalScopeGuard { + LifetimeCheckPass &Pass; + LexicalScopeContext *OldVal = nullptr; + + public: + LexicalScopeGuard(LifetimeCheckPass &p, LexicalScopeContext *L) : Pass(p) { + if (Pass.currScope) { + OldVal = Pass.currScope; + L->Depth++; + } + Pass.currScope = L; + } + + LexicalScopeGuard(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(LexicalScopeGuard &&other) = delete; + + void cleanup(); + void restore() { Pass.currScope = OldVal; } + ~LexicalScopeGuard() { + cleanup(); + restore(); + } + }; + + class PmapGuard { + LifetimeCheckPass &Pass; + PMapType *OldVal = nullptr; + + public: + PmapGuard(LifetimeCheckPass &lcp, PMapType *L) : Pass(lcp) { + if (Pass.currPmap) { + OldVal = Pass.currPmap; + } + Pass.currPmap = L; + } + + PmapGuard(const PmapGuard &) = delete; + PmapGuard &operator=(const PmapGuard &) = delete; + PmapGuard &operator=(PmapGuard &&other) = delete; + + void restore() { Pass.currPmap = OldVal; } + ~PmapGuard() { restore(); } + }; + + LexicalScopeContext *currScope = nullptr; + + /// + /// AST related + /// ----------- + + std::optional astCtx; + void setASTContext(clang::ASTContext *c) { astCtx = c; } +}; +} // namespace + +static std::string getVarNameFromValue(mlir::Value v) { + + auto srcOp = v.getDefiningOp(); + if (!srcOp) { + auto blockArg = cast(v); + assert(blockArg.getOwner()->isEntryBlock() && "random block args NYI"); + llvm::SmallString<128> finalName; + llvm::raw_svector_ostream Out(finalName); + Out << "fn_arg:" << blockArg.getArgNumber(); + return Out.str().str(); + } + + if (auto allocaOp = dyn_cast(srcOp)) + return allocaOp.getName().str(); + if (auto getElemOp = dyn_cast(srcOp)) { + auto parent = dyn_cast(getElemOp.getAddr().getDefiningOp()); + if (parent) { + llvm::SmallString<128> finalName; + llvm::raw_svector_ostream Out(finalName); + Out << parent.getName() << "." << getElemOp.getName(); + return Out.str().str(); + } + } + if (auto callOp = dyn_cast(srcOp)) { + if (callOp.getCallee()) { + llvm::SmallString<128> finalName; + llvm::raw_svector_ostream Out(finalName); + Out << "call:" << callOp.getCallee()->str(); + return Out.str().str(); + } + } + assert(0 && "how did it get here?"); + return ""; +} + +static Location getEndLoc(Location loc, int idx = 1) { + auto fusedLoc = loc.dyn_cast(); + if (!fusedLoc) + return loc; + return fusedLoc.getLocations()[idx]; +} + +static Location getEndLocForHist(Operation *Op) { + return getEndLoc(Op->getLoc()); +} + +static Location getEndLocIf(IfOp ifOp, Region *R) { + assert(ifOp && "what other regions create their own scope?"); + if (&ifOp.getThenRegion() == R) + return getEndLoc(ifOp.getLoc()); + return getEndLoc(ifOp.getLoc(), /*idx=*/3); +} + +static Location getEndLocForHist(Region *R) { + auto parentOp = R->getParentOp(); + if (isa(parentOp)) + return getEndLocIf(cast(parentOp), R); + if (isa(parentOp)) + return getEndLoc(parentOp->getLoc()); + llvm_unreachable("what other regions create their own scope?"); +} + +static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { + assert(!lsc.parent.isNull() && "shouldn't be null"); + if (lsc.parent.is()) + return getEndLocForHist(lsc.parent.get()); + assert(lsc.parent.is() && + "Only support operation beyond this point"); + return getEndLocForHist(lsc.parent.get()); +} + +void LifetimeCheckPass::killInPset(mlir::Value ptrKey, const State &s, + InvalidStyle invalidStyle, + mlir::Location loc, + std::optional extraVal) { + auto &pset = getPmap()[ptrKey]; + if (pset.contains(s)) { + pset.erase(s); + markPsetInvalid(ptrKey, invalidStyle, loc, extraVal); + } +} + +// 2.3 - KILL(x) means to replace all occurrences of x and x' and x'' (etc.) +// in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, +// KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and +// p2. +void LifetimeCheckPass::kill(const State &s, InvalidStyle invalidStyle, + mlir::Location loc) { + assert(s.hasValue() && "does not know how to kill other data types"); + mlir::Value v = s.getData(); + std::optional extraVal; + if (invalidStyle == InvalidStyle::EndOfScope) + extraVal = v; + + for (auto &mapEntry : getPmap()) { + auto ptr = mapEntry.first; + + // We are deleting this entry anyways, nothing to do here. + if (v == ptr) + continue; + + // ... replace all occurrences of x and x' and x''. Start with the primes + // so we first remove uses and then users. + // + // FIXME: add x'', x''', etc... + if (s.isLocalValue() && owners.count(v)) + killInPset(ptr, State::getOwnedBy(v), invalidStyle, loc, extraVal); + killInPset(ptr, s, invalidStyle, loc, extraVal); + } + + // Delete the local value from pmap, since its scope has ended. + if (invalidStyle == InvalidStyle::EndOfScope) { + owners.erase(v); + ptrs.erase(v); + tasks.erase(v); + aggregates.erase(v); + } +} + +void LifetimeCheckPass::LexicalScopeGuard::cleanup() { + auto *localScope = Pass.currScope; + for (auto pointee : localScope->localValues) + Pass.kill(State::getLocalValue(pointee), InvalidStyle::EndOfScope, + getEndLocForHist(*localScope)); + + // Catch interesting dangling references out of returns. + for (auto l : localScope->localRetLambdas) + Pass.checkPointerDeref(l.first, l.second, DerefStyle::RetLambda); +} + +void LifetimeCheckPass::checkBlock(Block &block) { + // Block main role is to hold a list of Operations. + for (Operation &op : block.getOperations()) + checkOperation(&op); +} + +void LifetimeCheckPass::checkRegion(Region ®ion) { + for (Block &block : region.getBlocks()) + checkBlock(block); +} + +void LifetimeCheckPass::checkRegionWithScope(Region ®ion) { + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. + LexicalScopeContext lexScope{®ion}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + for (Block &block : region.getBlocks()) + checkBlock(block); +} + +void LifetimeCheckPass::checkFunc(cir::FuncOp fnOp) { + currFunc = fnOp; + // FIXME: perhaps this should be a function pass, but for now make + // sure we reset the state before looking at other functions. + if (currPmap) + getPmap().clear(); + pmapNullHist.clear(); + invalidHist.clear(); + + // Create a new pmap for this function. + PMapType localPmap{}; + PmapGuard pmapGuard{*this, &localPmap}; + + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. + for (Region ®ion : fnOp->getRegions()) + checkRegionWithScope(region); + + // FIXME: store the pmap result for this function, we + // could do some interesting IPA stuff using this info. + currFunc.reset(); +} + +// The join operation between pmap as described in section 2.3. +// +// JOIN({pmap1,...,pmapN}) => +// { (p, pset1(p) U ... U psetN(p) | (p,*) U pmap1 U ... U pmapN }. +// +void LifetimeCheckPass::joinPmaps(SmallVectorImpl &pmaps) { + for (auto &mapEntry : getPmap()) { + auto &val = mapEntry.first; + + PSetType joinPset; + for (auto &pmapOp : pmaps) + llvm::set_union(joinPset, pmapOp[val]); + + getPmap()[val] = joinPset; + } +} + +void LifetimeCheckPass::checkLoop(LoopOpInterface loopOp) { + // 2.4.9. Loops + // + // A loop is treated as if it were the first two loop iterations unrolled + // using an if. For example: + // + // for (/*init*/; /*cond*/; /*incr*/) + // { /*body*/ } + // + // is treated as: + // + // if (/*init*/; /*cond*/) + // { /*body*/; /*incr*/ } + // if (/*cond*/) + // { /*body*/ } + // + // See checkIf for additional explanations. + SmallVector pmapOps; + SmallVector regionsToCheck; + + auto setupLoopRegionsToCheck = [&](bool isSubsequentTaken = false) { + regionsToCheck = loopOp.getRegionsInExecutionOrder(); + // Drop step if it exists and we are not checking the subsequent taken. + if (loopOp.maybeGetStep() && !isSubsequentTaken) + regionsToCheck.pop_back(); + }; + + // From 2.4.9 "Note": + // + // There are only three paths to analyze: + // (1) never taken (the loop body was not entered) + pmapOps.push_back(getPmap()); + + // (2) first taken (the first pass through the loop body, which begins + // with the loop entry pmap) + PMapType loopExitPmap; + { + // Intentional copy from loop entry map + loopExitPmap = getPmap(); + PmapGuard pmapGuard{*this, &loopExitPmap}; + setupLoopRegionsToCheck(); + for (auto *r : regionsToCheck) + checkRegion(*r); + pmapOps.push_back(loopExitPmap); + } + + // (3) and subsequent taken (second or later iteration, which begins with the + // loop body exit pmap and so takes into account any invalidations performed + // in the loop body on any path that could affect the next loop). + // + // This ensures that a subsequent loop iteration does not use a Pointer that + // was invalidated during a previous loop iteration. + // + // Because this analysis gives the same answer for each block of code (always + // converges), all loop iterations after the first get the same answer and + // so we only need to consider the second iteration, and so the analysis + // algorithm remains linear, single-pass. As an optimization, if the loop + // entry pmap is the same as the first loop body exit pmap, there is no need + // to perform the analysis on the second loop iteration; the answer will be + // the same. + if (getPmap() != loopExitPmap) { + // Intentional copy from first taken loop exit pmap + PMapType otherTakenPmap = loopExitPmap; + PmapGuard pmapGuard{*this, &otherTakenPmap}; + setupLoopRegionsToCheck(/*isSubsequentTaken=*/true); + for (auto *r : regionsToCheck) + checkRegion(*r); + pmapOps.push_back(otherTakenPmap); + } + + joinPmaps(pmapOps); +} + +void LifetimeCheckPass::checkAwait(AwaitOp awaitOp) { + // Pretty conservative: assume all regions execute + // sequencially. + // + // FIXME: use branch interface here and only tackle + // the necessary regions. + SmallVector pmapOps; + + for (auto r : awaitOp.getRegions()) { + PMapType regionPmap = getPmap(); + PmapGuard pmapGuard{*this, ®ionPmap}; + checkRegion(*r); + pmapOps.push_back(regionPmap); + } + + joinPmaps(pmapOps); +} + +void LifetimeCheckPass::checkReturn(ReturnOp retOp) { + // Upon return invalidate all local values. Since some return + // values might depend on other local address, check for the + // dangling aspects for this. + if (retOp.getNumOperands() == 0) + return; + + auto retTy = retOp.getOperand(0).getType(); + // FIXME: this can be extended to cover more leaking/dandling + // semantics out of functions. + if (!isLambdaType(retTy)) + return; + + // The return value is loaded from the return slot before + // returning. + auto loadOp = dyn_cast(retOp.getOperand(0).getDefiningOp()); + assert(loadOp && "expected cir.load"); + if (!isa(loadOp.getAddr().getDefiningOp())) + return; + + // Keep track of interesting lambda. + assert(!currScope->localRetLambdas.count(loadOp.getAddr()) && + "lambda already returned?"); + currScope->localRetLambdas.insert( + std::make_pair(loadOp.getAddr(), loadOp.getLoc())); +} + +void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { + // 2.4.7. A switch(cond) is treated as if it were an equivalent series of + // non-nested if statements with single evaluation of cond; for example: + // + // switch (a) { + // case 1:/*1*/ + // case 2:/*2*/ break; + // default:/*3*/ + // } + // + // is treated as: + // + // if (auto& a=a; a==1) {/*1*/} + // else if (a==1 || a==2) {/*2*/} + // else {/*3*/}. + // + // See checkIf for additional explanations. + SmallVector pmapOps; + + // If there are no regions, pmap is the same. + if (switchOp.getRegions().empty()) + return; + + auto isCaseFallthroughTerminated = [&](Region &r) { + assert(r.getBlocks().size() == 1 && "cannot yet handle branches"); + Block &block = r.back(); + assert(!block.empty() && "case regions cannot be empty"); + + // FIXME: do something special about return terminated? + YieldOp y = dyn_cast(block.back()); + if (!y) + return false; + return true; + }; + + auto regions = switchOp.getRegions(); + for (unsigned regionCurrent = 0, regionPastEnd = regions.size(); + regionCurrent != regionPastEnd; ++regionCurrent) { + // Intentional pmap copy, basis to start new path. + PMapType locaCasePmap = getPmap(); + PmapGuard pmapGuard{*this, &locaCasePmap}; + + // At any given point, fallbacks (if not empty) will increase the + // number of control-flow possibilities. For each region ending up + // with a fallback, keep computing the pmap until we hit a region + // that has a non-fallback terminator for the region. + unsigned idx = regionCurrent; + while (idx < regionPastEnd) { + // Note that for 'if' regions we use checkRegionWithScope, since + // there are lexical scopes associated with each region, this is + // not the case for switch's. + checkRegion(regions[idx]); + if (!isCaseFallthroughTerminated(regions[idx])) + break; + idx++; + } + pmapOps.push_back(locaCasePmap); + } + + joinPmaps(pmapOps); +} + +void LifetimeCheckPass::checkIf(IfOp ifOp) { + // Both then and else create their own lexical scopes, take that into account + // while checking then/else. + // + // This is also the moment where pmaps are joined because flow forks: + // pmap(ifOp) = JOIN( pmap(then), pmap(else) ) + // + // To that intent the pmap is copied out before checking each region and + // pmap(ifOp) computed after analysing both paths. + SmallVector pmapOps; + + { + PMapType localThenPmap = getPmap(); + PmapGuard pmapGuard{*this, &localThenPmap}; + checkRegionWithScope(ifOp.getThenRegion()); + pmapOps.push_back(localThenPmap); + } + + // In case there's no 'else' branch, the 'else' pmap is the same as + // prior to the if condition. + if (!ifOp.getElseRegion().empty()) { + PMapType localElsePmap = getPmap(); + PmapGuard pmapGuard{*this, &localElsePmap}; + checkRegionWithScope(ifOp.getElseRegion()); + pmapOps.push_back(localElsePmap); + } else { + pmapOps.push_back(getPmap()); + } + + joinPmaps(pmapOps); +} + +template bool isStructAndHasAttr(mlir::Type ty) { + if (!ty.isa()) + return false; + return hasAttr(ty.cast().getAst()); +} + +static bool isOwnerType(mlir::Type ty) { + // From 2.1: + // + // An Owner uniquely owns another object (cannot dangle). An Owner type is + // expressed using the annotation [[gsl::Owner(DerefType)]] where DerefType is + // the owned type (and (DerefType) may be omitted and deduced as below). For + // example: + // + // template class [[gsl::Owner(T)]] my_unique_smart_pointer; + // + // TODO: The following standard or other types are treated as-if annotated as + // Owners, if not otherwise annotated and if not SharedOwners: + // + // - Every type that satisfies the standard Container requirements and has a + // user-provided destructor. (Example: vector.) DerefType is ::value_type. + // - Every type that provides unary * and has a user-provided destructor. + // (Example: unique_ptr.) DerefType is the ref-unqualified return type of + // operator*. + // - Every type that has a data member or public base class of an Owner type. + // Additionally, for convenient adoption without modifying existing standard + // library headers, the following well known standard types are treated as-if + // annotated as Owners: stack, queue, priority_queue, optional, variant, any, + // and regex. + return isStructAndHasAttr(ty); +} + +static bool containsPointerElts(mlir::cir::StructType s) { + auto members = s.getMembers(); + return std::any_of(members.begin(), members.end(), [](mlir::Type t) { + return t.isa(); + }); +} + +static bool isAggregateType(LifetimeCheckPass *pass, mlir::Type agg) { + auto t = agg.dyn_cast(); + if (!t) + return false; + // Lambdas have their special handling, and shall not be considered as + // aggregate types. + if (pass->isLambdaType(agg)) + return false; + // FIXME: For now we handle this in a more naive way: any pointer + // element we find is enough to consider this an aggregate. But in + // reality it should be as defined in 2.1: + // + // An Aggregate is a type that is not an Indirection and is a class type with + // public data members none of which are references (& or &&) and no + // user-provided copy or move operations, and no base class that is not also + // an Aggregate. The elements of an Aggregate are its public data members. + return containsPointerElts(t); +} + +static bool isPointerType(mlir::Type t) { + // From 2.1: + // + // A Pointer is not an Owner and provides indirect access to an object it does + // not own (can dangle). A Pointer type is expressed using the annotation + // [[gsl::Pointer(DerefType)]] where DerefType is the pointed-to type (and + // (Dereftype) may be omitted and deduced as below). For example: + // + // template class [[gsl::Pointer(T)]] my_span; + // + // TODO: The following standard or other types are treated as-if annotated as + // Pointer, if not otherwise annotated and if not Owners: + // + // - Every type that satisfies the standard Iterator requirements. (Example: + // regex_iterator.) DerefType is the ref-unqualified return type of operator*. + // - Every type that satisfies the Ranges TS Range concept. (Example: + // basic_string_view.) DerefType is the ref-unqualified type of *begin(). + // - Every type that satisfies the following concept. DerefType is the + // ref-unqualified return type of operator*. + // + // template concept + // TriviallyCopyableAndNonOwningAndDereferenceable = + // std::is_trivially_copyable_v && std::is_copy_constructible_v && + // std::is_copy_assignable_v && requires(T t) { *t; }; + // + // - Every closure type of a lambda that captures by reference or captures a + // Pointer by value. DerefType is void. + // - Every type that has a data member or public base class of a Pointer type. + // Additionally, for convenient adoption without modifying existing standard + // library headers, the following well- known standard types are treated as-if + // annotated as Pointers, in addition to raw pointers and references: ref- + // erence_wrapper, and vector::reference. + if (t.isa()) + return true; + return isStructAndHasAttr(t); +} + +void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, + mlir::Type t, + mlir::Location loc, + unsigned nestLevel) { + // The same alloca can be hit more than once when checking for dangling + // pointers out of subsequent loop iterations (e.g. second iteraton using + // pointer invalidated in the first run). Since we copy the pmap out to + // start those subsequent checks, make sure sure we skip existing alloca + // tracking. + if (getPmap().count(addr)) + return; + getPmap()[addr] = {}; + + enum TypeCategory { + Unknown = 0, + SharedOwner = 1, + Owner = 1 << 2, + Pointer = 1 << 3, + Indirection = 1 << 4, + Aggregate = 1 << 5, + Value = 1 << 6, + }; + + auto localStyle = [&]() { + if (isPointerType(t)) + return TypeCategory::Pointer; + if (isOwnerType(t)) + return TypeCategory::Owner; + if (isAggregateType(this, t)) + return TypeCategory::Aggregate; + return TypeCategory::Value; + }(); + + switch (localStyle) { + case TypeCategory::Pointer: + // 2.4.2 - When a non-parameter non-member Pointer p is declared, add + // (p, {invalid}) to pmap. + ptrs.insert(addr); + markPsetInvalid(addr, InvalidStyle::NotInitialized, loc); + break; + case TypeCategory::Owner: + // 2.4.2 - When a local Owner x is declared, add (x, {x__1'}) to pmap. + addOwner(addr); + getPmap()[addr].insert(State::getOwnedBy(addr)); + currScope->localValues.insert(addr); + break; + case TypeCategory::Aggregate: { + // 2.1 - Aggregates are types we will “explode” (consider memberwise) at + // local scopes, because the function can operate on the members directly. + + // TODO: only track first level of aggregates subobjects for now, get some + // data before we increase this. + if (nestLevel > 1) + break; + + // Map values for members to it's index in the aggregate. + auto members = t.cast().getMembers(); + SmallVector fieldVals; + fieldVals.assign(members.size(), {}); + + // Go through uses of the alloca via `cir.struct_element_addr`, and + // track only the fields that are actually used. + std::for_each(addr.use_begin(), addr.use_end(), [&](mlir::OpOperand &use) { + auto op = dyn_cast(use.getOwner()); + if (!op) + return; + + auto eltAddr = op.getResult(); + // If nothing is using this GetMemberOp, don't bother since + // it could lead to even more noisy outcomes. + if (eltAddr.use_empty()) + return; + + auto eltTy = + eltAddr.getType().cast().getPointee(); + + // Classify exploded types. Keep alloca original location. + classifyAndInitTypeCategories(eltAddr, eltTy, loc, ++nestLevel); + fieldVals[op.getIndex()] = eltAddr; + }); + + // In case this aggregate gets initialized at once, the fields need + // to be mapped to the elements values. + addAggregate(addr, fieldVals); + + // There might be pointers to this aggregate, so also make a value + // for it. + LLVM_FALLTHROUGH; + } + case TypeCategory::Value: { + // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. + getPmap()[addr].insert(State::getLocalValue(addr)); + currScope->localValues.insert(addr); + break; + } + default: + llvm_unreachable("NYI"); + } +} + +void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { + classifyAndInitTypeCategories(allocaOp.getAddr(), allocaOp.getAllocaType(), + allocaOp.getLoc(), /*nestLevel=*/0); +} + +void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { + // Given: + // auto task = [init task]; + // Extend pset(task) such that: + // pset(task) = pset(task) U {any local values used to init task} + auto taskTmp = storeOp.getValue(); + // FIXME: check it's initialization 'init' attr. + auto taskAddr = storeOp.getAddr(); + + // Take the following coroutine creation pattern: + // + // %task = cir.alloca ... + // cir.scope { + // %arg0 = cir.alloca ... + // ... + // %tmp_task = cir.call @corotine_call(%arg0, %arg1, ...) + // cir.store %tmp_task, %task + // ... + // } + // + // Bind values that are coming from alloca's (like %arg0 above) to the + // pset of %task - this effectively leads to some invalidation of %task + // when %arg0 finishes its lifetime at the end of the enclosing cir.scope. + if (auto call = dyn_cast(taskTmp.getDefiningOp())) { + bool potentialTaintedTask = false; + for (auto arg : call.getArgOperands()) { + auto alloca = dyn_cast(arg.getDefiningOp()); + if (alloca && currScope->localValues.count(alloca)) { + getPmap()[taskAddr].insert(State::getLocalValue(alloca)); + potentialTaintedTask = true; + } + } + + // Task are only interesting when there are local addresses leaking + // via the coroutine creation, only track those. + if (potentialTaintedTask) + tasks.insert(taskAddr); + return; + } + llvm_unreachable("expecting cir.call defining op"); +} + +mlir::Value LifetimeCheckPass::getLambdaFromMemberAccess(mlir::Value addr) { + auto op = addr.getDefiningOp(); + // FIXME: we likely want to consider more indirections here... + if (!isa(op)) + return nullptr; + auto allocaOp = + dyn_cast(op->getOperand(0).getDefiningOp()); + if (!allocaOp || !isLambdaType(allocaOp.getAllocaType())) + return nullptr; + return allocaOp; +} + +void LifetimeCheckPass::checkLambdaCaptureStore(StoreOp storeOp) { + auto localByRefAddr = storeOp.getValue(); + auto lambdaCaptureAddr = storeOp.getAddr(); + + if (!isa_and_nonnull(localByRefAddr.getDefiningOp())) + return; + auto lambdaAddr = getLambdaFromMemberAccess(lambdaCaptureAddr); + if (!lambdaAddr) + return; + + if (currScope->localValues.count(localByRefAddr)) + getPmap()[lambdaAddr].insert(State::getLocalValue(localByRefAddr)); +} + +void LifetimeCheckPass::updatePointsToForConstStruct( + mlir::Value addr, mlir::cir::ConstStructAttr value, mlir::Location loc) { + assert(aggregates.count(addr) && "expected association with aggregate"); + int memberIdx = 0; + for (auto &attr : value.getMembers()) { + auto ta = attr.dyn_cast(); + assert(ta && "expected typed attribute"); + auto fieldAddr = aggregates[addr][memberIdx]; + // Unseen fields are not tracked. + if (fieldAddr && ta.getType().isa()) { + assert(ta.isa() && + "other than null not implemented"); + markPsetNull(fieldAddr, loc); + } + memberIdx++; + } +} + +void LifetimeCheckPass::updatePointsToForZeroStruct(mlir::Value addr, + StructType sTy, + mlir::Location loc) { + assert(aggregates.count(addr) && "expected association with aggregate"); + int memberIdx = 0; + for (auto &t : sTy.getMembers()) { + auto fieldAddr = aggregates[addr][memberIdx]; + // Unseen fields are not tracked. + if (fieldAddr && t.isa()) { + markPsetNull(fieldAddr, loc); + } + memberIdx++; + } +} + +static mlir::Operation *ignoreBitcasts(mlir::Operation *op) { + while (auto bitcast = dyn_cast(op)) { + if (bitcast.getKind() != CastKind::bitcast) + return op; + auto b = bitcast.getSrc().getDefiningOp(); + // Do not handle block arguments just yet. + if (!b) + return op; + op = b; + } + return op; +} + +void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, + mlir::Location loc) { + + auto getArrayFromSubscript = [&](PtrStrideOp strideOp) -> mlir::Value { + auto castOp = dyn_cast(strideOp.getBase().getDefiningOp()); + if (!castOp) + return {}; + if (castOp.getKind() != cir::CastKind::array_to_ptrdecay) + return {}; + return castOp.getSrc(); + }; + + auto dataSrcOp = data.getDefiningOp(); + + // Handle function arguments but not all block arguments just yet. + if (!dataSrcOp) { + auto blockArg = cast(data); + if (!blockArg.getOwner()->isEntryBlock()) + return; + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getLocalValue(data)); + return; + } + + // Ignore chains of bitcasts and update data source. Note that when + // dataSrcOp gets updated, `data` might not be the most updated resource + // to use, so avoid using it directly, and instead get things from newer + // dataSrcOp. + dataSrcOp = ignoreBitcasts(dataSrcOp); + + // 2.4.2 - If the declaration includes an initialization, the + // initialization is treated as a separate operation + if (auto cstOp = dyn_cast(dataSrcOp)) { + // Aggregates can be bulk materialized in CIR, handle proper update of + // individual exploded fields. + if (aggregates.count(addr)) { + if (auto constStruct = + cstOp.getValue().dyn_cast()) { + updatePointsToForConstStruct(addr, constStruct, loc); + return; + } + + if (auto zero = cstOp.getValue().dyn_cast()) { + if (auto zeroStructTy = zero.getType().dyn_cast()) { + updatePointsToForZeroStruct(addr, zeroStructTy, loc); + return; + } + } + return; + } + + assert(cstOp.isNullPtr() && "other than null not implemented"); + assert(getPmap().count(addr) && "address should always be valid"); + // 2.4.2 - If the initialization is default initialization or zero + // initialization, set pset(p) = {null}; for example: + // + // int* p; => pset(p) == {invalid} + // int* p{}; or string_view p; => pset(p) == {null}. + // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} + markPsetNull(addr, loc); + return; + } + + if (auto allocaOp = dyn_cast(dataSrcOp)) { + // p = &x; + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getLocalValue(allocaOp.getAddr())); + return; + } + + if (auto ptrStrideOp = dyn_cast(dataSrcOp)) { + // p = &a[0]; + auto array = getArrayFromSubscript(ptrStrideOp); + if (array) { + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getLocalValue(array)); + } + return; + } + + // Initializes ptr types out of known lib calls marked with pointer + // attributes. TODO: find a better way to tag this. + if (auto callOp = dyn_cast(dataSrcOp)) { + // iter = vector::begin() + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getLocalValue(callOp.getResult(0))); + } + + if (auto loadOp = dyn_cast(dataSrcOp)) { + // handle indirections through a load, a common example are temporaries + // copying the 'this' param to a subsequent call. + updatePointsTo(addr, loadOp.getAddr(), loc); + return; + } + + // What should we add next? +} + +void LifetimeCheckPass::checkStore(StoreOp storeOp) { + auto addr = storeOp.getAddr(); + + // Decompose store's to aggregates into multiple updates to individual fields. + if (aggregates.count(addr)) { + auto data = storeOp.getValue(); + auto dataSrcOp = data.getDefiningOp(); + // Only interested in updating and tracking fields, anything besides + // constants isn't really relevant. + if (dataSrcOp && isa(dataSrcOp)) + updatePointsTo(addr, data, data.getLoc()); + return; + } + + // The bulk of the check is done on top of store to pointer categories, + // which usually represent the most common case. + // + // We handle some special local values, like coroutine tasks and lambdas, + // which could be holding references to things with dangling lifetime. + if (!ptrs.count(addr)) { + if (currScope->localTempTasks.count(storeOp.getValue())) + checkCoroTaskStore(storeOp); + else + checkLambdaCaptureStore(storeOp); + return; + } + + // Only handle ptrs from here on. + updatePointsTo(addr, storeOp.getValue(), storeOp.getValue().getLoc()); +} + +void LifetimeCheckPass::checkLoad(LoadOp loadOp) { + auto addr = loadOp.getAddr(); + // Only interested in checking deference on top of pointer types. + // Note that usually the use of the invalid address happens at the + // load or store using the result of this loadOp. + if (!getPmap().count(addr) || !ptrs.count(addr)) + return; + + if (!loadOp.getIsDeref()) + return; + + checkPointerDeref(addr, loadOp.getLoc()); +} + +void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, + mlir::Value histKey, + mlir::Location warningLoc, + DerefStyle derefStyle) { + assert(invalidHist.count(histKey) && "expected invalid hist"); + auto &hist = invalidHist[histKey]; + unsigned limit = opts.histLimit; + + for (int lastIdx = hist.entries.size() - 1; limit > 0 && lastIdx >= 0; + lastIdx--, limit--) { + auto &info = hist.entries[lastIdx]; + + switch (info.style) { + case InvalidStyle::NotInitialized: { + D.attachNote(info.loc) << "uninitialized here"; + break; + } + case InvalidStyle::EndOfScope: { + if (tasks.count(histKey)) { + StringRef resource = "resource"; + if (auto allocaOp = dyn_cast(info.val->getDefiningOp())) { + if (isLambdaType(allocaOp.getAllocaType())) + resource = "lambda"; + } + D.attachNote((*info.val).getLoc()) + << "coroutine bound to " << resource << " with expired lifetime"; + D.attachNote(info.loc) << "at the end of scope or full-expression"; + } else if (derefStyle == DerefStyle::RetLambda) { + assert(currFunc && "expected function"); + StringRef parent = currFunc->getLambda() ? "lambda" : "function"; + D.attachNote(info.val->getLoc()) + << "declared here but invalid after enclosing " << parent + << " ends"; + } else { + auto outOfScopeVarName = getVarNameFromValue(*info.val); + D.attachNote(info.loc) << "pointee '" << outOfScopeVarName + << "' invalidated at end of scope"; + } + break; + } + case InvalidStyle::NonConstUseOfOwner: { + D.attachNote(info.loc) << "invalidated by non-const use of owner type"; + break; + } + default: + llvm_unreachable("unknown history style"); + } + } +} + +void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, + DerefStyle derefStyle) { + bool hasInvalid = getPmap()[addr].count(State::getInvalid()); + bool hasNullptr = getPmap()[addr].count(State::getNullPtr()); + + auto emitPsetRemark = [&] { + llvm::SmallString<128> psetStr; + llvm::raw_svector_ostream Out(psetStr); + printPset(getPmap()[addr], Out); + emitRemark(loc) << "pset => " << Out.str(); + }; + + // Do not emit the same warning twice or more. + if (emittedDiagnostics.count(loc)) + return; + + bool psetRemarkEmitted = false; + if (opts.emitRemarkPsetAlways()) { + emitPsetRemark(); + psetRemarkEmitted = true; + } + + // 2.4.2 - On every dereference of a Pointer p, enforce that p is valid. + if (!hasInvalid && !hasNullptr) + return; + + // TODO: create verbosity/accuracy levels, for now use deref styles directly + // to decide when not to emit a warning. + + // For indirect calls, do not relly on blunt nullptr passing, require some + // invalidation to have happened in a path. + if (derefStyle == DerefStyle::IndirectCallParam && !hasInvalid) + return; + + // Ok, filtered out questionable warnings, take the bad path leading to this + // deference point and diagnose it. + auto varName = getVarNameFromValue(addr); + auto D = emitWarning(loc); + emittedDiagnostics.insert(loc); + + if (tasks.count(addr)) + D << "use of coroutine '" << varName << "' with dangling reference"; + else if (derefStyle == DerefStyle::RetLambda) + D << "returned lambda captures local variable"; + else if (derefStyle == DerefStyle::CallParam || + derefStyle == DerefStyle::IndirectCallParam) { + bool isAgg = isa_and_nonnull(addr.getDefiningOp()); + D << "passing "; + if (!isAgg) + D << "invalid pointer"; + else + D << "aggregate containing invalid pointer member"; + D << " '" << varName << "'"; + } else + D << "use of invalid pointer '" << varName << "'"; + + // TODO: add accuracy levels, different combinations of invalid and null + // could have different ratios of false positives. + if (hasInvalid && opts.emitHistoryInvalid()) + emitInvalidHistory(D, addr, loc, derefStyle); + + if (hasNullptr && opts.emitHistoryNull()) { + assert(pmapNullHist.count(addr) && "expected nullptr hist"); + auto ¬e = pmapNullHist[addr]; + D.attachNote(*note) << "'nullptr' invalidated here"; + } + + if (!psetRemarkEmitted && opts.emitRemarkPsetInvalid()) + emitPsetRemark(); +} + +static FuncOp getCalleeFromSymbol(ModuleOp mod, StringRef name) { + auto global = mlir::SymbolTable::lookupSymbolIn(mod, name); + assert(global && "expected to find symbol for function"); + return dyn_cast(global); +} + +static const ASTCXXMethodDeclInterface getMethod(ModuleOp mod, CallOp callOp) { + if (!callOp.getCallee()) + return nullptr; + StringRef name = *callOp.getCallee(); + auto method = getCalleeFromSymbol(mod, name); + if (!method || method.getBuiltin()) + return nullptr; + return dyn_cast(method.getAstAttr()); +} + +mlir::Value LifetimeCheckPass::getThisParamPointerCategory(CallOp callOp) { + auto thisptr = callOp.getArgOperand(0); + if (ptrs.count(thisptr)) + return thisptr; + if (auto loadOp = dyn_cast_or_null(thisptr.getDefiningOp())) { + if (ptrs.count(loadOp.getAddr())) + return loadOp.getAddr(); + } + // TODO: add a remark to spot 'this' indirections we currently not track. + return {}; +} + +mlir::Value LifetimeCheckPass::getThisParamOwnerCategory(CallOp callOp) { + auto thisptr = callOp.getArgOperand(0); + if (owners.count(thisptr)) + return thisptr; + if (auto loadOp = dyn_cast_or_null(thisptr.getDefiningOp())) { + if (owners.count(loadOp.getAddr())) + return loadOp.getAddr(); + } + // TODO: add a remark to spot 'this' indirections we currently not track. + return {}; +} + +void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, + ASTCXXMethodDeclInterface m) { + // MyPointer::operator=(MyPointer&&)(%dst, %src) + // or + // MyOwner::operator=(MyOwner&&)(%dst, %src) + auto dst = getThisParamPointerCategory(callOp); + auto src = callOp.getArgOperand(1); + + // Move assignments between pointer categories. + if (dst && ptrs.count(src)) { + // Note that the current pattern here usually comes from a xvalue in src + // where all the initialization is done, and this move assignment is + // where we finally materialize it back to the original pointer category. + getPmap()[dst] = getPmap()[src]; + + // 2.4.2 - It is an error to use a moved-from object. + // To that intent we mark src's pset with invalid. + markPsetInvalid(src, InvalidStyle::MovedFrom, callOp.getLoc()); + return; + } + + // Copy assignments between owner categories. + dst = getThisParamOwnerCategory(callOp); + if (dst && owners.count(src)) { + // Handle as a non const use of owner, invalidating pointers. + checkNonConstUseOfOwner(dst, callOp.getLoc()); + + // 2.4.2 - It is an error to use a moved-from object. + // To that intent we mark src's pset with invalid. + markPsetInvalid(src, InvalidStyle::MovedFrom, callOp.getLoc()); + } +} + +void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, + ASTCXXMethodDeclInterface m) { + // MyIntOwner::operator=(MyIntOwner&)(%dst, %src) + auto dst = getThisParamOwnerCategory(callOp); + auto src = callOp.getArgOperand(1); + + // Copy assignment between owner categories. + if (dst && owners.count(src)) + return checkNonConstUseOfOwner(dst, callOp.getLoc()); + + // Copy assignment between pointer categories. + dst = getThisParamPointerCategory(callOp); + if (dst && ptrs.count(src)) { + getPmap()[dst] = getPmap()[src]; + return; + } +} + +// User defined ctors that initialize from owner types is one +// way of tracking owned pointers. +// +// Example: +// MyIntPointer::MyIntPointer(MyIntOwner const&)(%5, %4) +// +bool LifetimeCheckPass::isCtorInitPointerFromOwner(CallOp callOp) { + if (callOp.getNumArgOperands() < 2) + return false; + + // FIXME: should we scan all arguments past first to look for an owner? + auto ptr = getThisParamPointerCategory(callOp); + auto owner = callOp.getArgOperand(1); + + if (ptr && owners.count(owner)) + return true; + + return false; +} + +void LifetimeCheckPass::checkCtor(CallOp callOp, + ASTCXXConstructorDeclInterface ctor) { + // TODO: zero init + // 2.4.2 if the initialization is default initialization or zero + // initialization, example: + // + // int* p{}; + // string_view p; + // + // both results in pset(p) == {null} + if (ctor.isDefaultConstructor()) { + // First argument passed is always the alloca for the 'this' ptr. + + // Currently two possible actions: + // 1. Skip Owner category initialization. + // 2. Initialize Pointer categories. + auto addr = getThisParamOwnerCategory(callOp); + if (addr) + return; + + addr = getThisParamPointerCategory(callOp); + if (!addr) + return; + + // Not interested in block/function arguments or any indirect + // provided alloca address. + if (!dyn_cast_or_null(addr.getDefiningOp())) + return; + + markPsetNull(addr, callOp.getLoc()); + return; + } + + // User defined copy ctor calls ... + if (ctor.isCopyConstructor()) { + llvm_unreachable("NYI"); + } + + if (isCtorInitPointerFromOwner(callOp)) { + auto addr = getThisParamPointerCategory(callOp); + assert(addr && "expected pointer category"); + auto owner = callOp.getArgOperand(1); + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getOwnedBy(owner)); + return; + } +} + +void LifetimeCheckPass::checkOperators(CallOp callOp, + ASTCXXMethodDeclInterface m) { + auto addr = getThisParamOwnerCategory(callOp); + if (addr) { + // const access to the owner is fine. + if (m.isConst()) + return; + // TODO: this is a place where we can hook in some idiom recocgnition + // so we don't need to use actual source code annotation to make assumptions + // on methods we understand and know to behave nicely. + // + // In P1179, section 2.5.7.12, the use of [[gsl::lifetime_const]] is + // suggested, but it's not part of clang (will it ever?) + return checkNonConstUseOfOwner(addr, callOp.getLoc()); + } + + addr = getThisParamPointerCategory(callOp); + if (addr) { + // The assumption is that method calls on pointer types should trigger + // deref checking. + checkPointerDeref(addr, callOp.getLoc()); + return; + } + + // FIXME: we also need to look at operators from non owner or pointer + // types that could be using Owner/Pointer types as parameters. +} + +mlir::Value +LifetimeCheckPass::getNonConstUseOfOwner(CallOp callOp, + ASTCXXMethodDeclInterface m) { + if (m.isConst()) + return {}; + return getThisParamOwnerCategory(callOp); +} + +void LifetimeCheckPass::checkNonConstUseOfOwner(mlir::Value ownerAddr, + mlir::Location loc) { + // 2.4.2 - On every non-const use of a local Owner o: + // + // - For each entry e in pset(s): Remove e from pset(s), and if no other + // Owner’s pset contains only e, then KILL(e). + kill(State::getOwnedBy(ownerAddr), InvalidStyle::NonConstUseOfOwner, loc); + + // - Set pset(o) = {o__N'}, where N is one higher than the highest + // previously used suffix. For example, initially pset(o) is {o__1'}, on + // o’s first non-const use pset(o) becomes {o__2'}, on o’s second non-const + // use pset(o) becomes {o__3'}, and so on. + incOwner(ownerAddr); + return; +} + +void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, + unsigned firstArgIdx) { + auto numOperands = callOp.getNumArgOperands(); + if (firstArgIdx >= numOperands) + return; + + llvm::SmallSetVector ownersToInvalidate, ptrsToDeref; + for (unsigned i = firstArgIdx, e = numOperands; i != e; ++i) { + auto arg = callOp.getArgOperand(i); + // FIXME: apply p1179 rules as described in 2.5. Very conservative for now: + // + // - Owners: always invalidate. + // - Pointers: always check for deref. + // - Coroutine tasks: check the task for deref when calling methods of + // the task, but also when the passing the task around to other functions. + // - Aggregates: check ptr subelements for deref. + // + // FIXME: even before 2.5 we should only invalidate non-const param types. + if (owners.count(arg)) + ownersToInvalidate.insert(arg); + if (ptrs.count(arg)) + ptrsToDeref.insert(arg); + if (tasks.count(arg)) + ptrsToDeref.insert(arg); + if (aggregates.count(arg)) { + int memberIdx = 0; + auto sTy = + arg.getType().cast().getPointee().dyn_cast(); + assert(sTy && "expected struct type"); + for (auto m : sTy.getMembers()) { + auto ptrMemberAddr = aggregates[arg][memberIdx]; + if (m.isa() && ptrMemberAddr) { + ptrsToDeref.insert(ptrMemberAddr); + } + memberIdx++; + } + } + } + + // FIXME: CIR should track source info on the passed args, so we can get + // accurate location for why the invalidation happens. + for (auto o : ownersToInvalidate) + checkNonConstUseOfOwner(o, callOp.getLoc()); + for (auto p : ptrsToDeref) + checkPointerDeref(p, callOp.getLoc(), + callOp.getCallee() ? DerefStyle::CallParam + : DerefStyle::IndirectCallParam); +} + +void LifetimeCheckPass::checkOtherMethodsAndFunctions( + CallOp callOp, ASTCXXMethodDeclInterface m) { + unsigned firstArgIdx = 0; + + // Looks at a method 'this' pointer: + // - If a method call to a class we consider interesting, like a method + // call on a coroutine task (promise_type). + // - Skip the 'this' for any other method. + if (m && !tasks.count(callOp.getArgOperand(firstArgIdx))) + firstArgIdx++; + checkForOwnerAndPointerArguments(callOp, firstArgIdx); +} + +bool LifetimeCheckPass::isOwnerOrPointerClassMethod( + CallOp callOp, ASTCXXMethodDeclInterface m) { + // For the sake of analysis, these behave like regular functions + if (!m || m.isStatic()) + return false; + // Check the object for owner/pointer by looking at the 'this' pointer. + return getThisParamPointerCategory(callOp) || + getThisParamOwnerCategory(callOp); +} + +bool LifetimeCheckPass::isLambdaType(mlir::Type ty) { + if (IsLambdaTyCache.count(ty)) + return IsLambdaTyCache[ty]; + + IsLambdaTyCache[ty] = false; + auto taskTy = ty.dyn_cast(); + if (!taskTy) + return false; + if (taskTy.getAst().isLambda()) + IsLambdaTyCache[ty] = true; + + return IsLambdaTyCache[ty]; +} + +bool LifetimeCheckPass::isTaskType(mlir::Value taskVal) { + auto ty = taskVal.getType(); + if (IsTaskTyCache.count(ty)) + return IsTaskTyCache[ty]; + + bool result = [&] { + auto taskTy = taskVal.getType().dyn_cast(); + if (!taskTy) + return false; + return taskTy.getAst().hasPromiseType(); + }(); + + IsTaskTyCache[ty] = result; + return result; +} + +void LifetimeCheckPass::trackCallToCoroutine(CallOp callOp) { + if (auto fnName = callOp.getCallee()) { + auto calleeFuncOp = getCalleeFromSymbol(theModule, *fnName); + if (calleeFuncOp && + (calleeFuncOp.getCoroutine() || + (calleeFuncOp.isDeclaration() && callOp->getNumResults() > 0 && + isTaskType(callOp->getResult(0))))) { + currScope->localTempTasks.insert(callOp->getResult(0)); + } + return; + } + // Handle indirect calls to coroutines, for instance when + // lambda coroutines are involved with invokers. + if (callOp->getNumResults() > 0 && isTaskType(callOp->getResult(0))) { + // FIXME: get more guarantees to prevent false positives (perhaps + // apply some tracking analysis before this pass and check for lambda + // idioms). + currScope->localTempTasks.insert(callOp->getResult(0)); + } +} + +void LifetimeCheckPass::checkCall(CallOp callOp) { + if (callOp.getNumArgOperands() == 0) + return; + + // Identify calls to coroutines and track returning temporary task types. + // + // Note that we can't reliably know if a function is a coroutine only as + // part of declaration + trackCallToCoroutine(callOp); + + auto methodDecl = getMethod(theModule, callOp); + if (!isOwnerOrPointerClassMethod(callOp, methodDecl)) + return checkOtherMethodsAndFunctions(callOp, methodDecl); + + // From this point on only owner and pointer class methods handling, + // starting from special methods. + if (auto ctor = dyn_cast(methodDecl)) + return checkCtor(callOp, ctor); + if (methodDecl.isMoveAssignmentOperator()) + return checkMoveAssignment(callOp, methodDecl); + if (methodDecl.isCopyAssignmentOperator()) + return checkCopyAssignment(callOp, methodDecl); + if (methodDecl.isOverloadedOperator()) + return checkOperators(callOp, methodDecl); + + // For any other methods... + + // Non-const member call to a Owner invalidates any of its users. + if (auto owner = getNonConstUseOfOwner(callOp, methodDecl)) { + return checkNonConstUseOfOwner(owner, callOp.getLoc()); + } + + // Take a pset(Ptr) = { Ownr' } where Own got invalidated, this will become + // invalid access to Ptr if any of its methods are used. + auto addr = getThisParamPointerCategory(callOp); + if (addr) + return checkPointerDeref(addr, callOp.getLoc()); +} + +void LifetimeCheckPass::checkOperation(Operation *op) { + if (isa<::mlir::ModuleOp>(op)) { + theModule = cast<::mlir::ModuleOp>(op); + for (Region ®ion : op->getRegions()) + checkRegion(region); + return; + } + + if (isa(op)) { + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. + // + // No need to create a new pmap when entering a new scope since it + // doesn't cause control flow to diverge (as it does in presence + // of cir::IfOp or cir::SwitchOp). + // + // Also note that for dangling pointers coming from if init stmts + // should be caught just fine, given that a ScopeOp embraces a IfOp. + LexicalScopeContext lexScope{op}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + for (Region ®ion : op->getRegions()) + checkRegion(region); + return; + } + + // FIXME: we can do better than sequence of dyn_casts. + if (auto fnOp = dyn_cast(op)) + return checkFunc(fnOp); + if (auto ifOp = dyn_cast(op)) + return checkIf(ifOp); + if (auto switchOp = dyn_cast(op)) + return checkSwitch(switchOp); + if (auto loopOp = dyn_cast(op)) + return checkLoop(loopOp); + if (auto allocaOp = dyn_cast(op)) + return checkAlloca(allocaOp); + if (auto storeOp = dyn_cast(op)) + return checkStore(storeOp); + if (auto loadOp = dyn_cast(op)) + return checkLoad(loadOp); + if (auto callOp = dyn_cast(op)) + return checkCall(callOp); + if (auto awaitOp = dyn_cast(op)) + return checkAwait(awaitOp); + if (auto returnOp = dyn_cast(op)) + return checkReturn(returnOp); +} + +void LifetimeCheckPass::runOnOperation() { + assert(astCtx && "Missing ASTContext, please construct with the right ctor"); + opts.parseOptions(*this); + Operation *op = getOperation(); + checkOperation(op); +} + +std::unique_ptr mlir::createLifetimeCheckPass() { + return std::make_unique(); +} + +std::unique_ptr mlir::createLifetimeCheckPass(clang::ASTContext *astCtx) { + auto lifetime = std::make_unique(); + lifetime->setASTContext(astCtx); + return std::move(lifetime); +} + +std::unique_ptr mlir::createLifetimeCheckPass(ArrayRef remark, + ArrayRef hist, + unsigned hist_limit, + clang::ASTContext *astCtx) { + auto lifetime = std::make_unique(); + lifetime->setASTContext(astCtx); + lifetime->opts.parseOptions(remark, hist, hist_limit); + return std::move(lifetime); +} + +//===----------------------------------------------------------------------===// +// Dump & print helpers +//===----------------------------------------------------------------------===// + +void LifetimeCheckPass::LexicalScopeContext::dumpLocalValues() { + llvm::errs() << "Local values: { "; + for (auto value : localValues) { + llvm::errs() << getVarNameFromValue(value); + llvm::errs() << ", "; + } + llvm::errs() << "}\n"; +} + +void LifetimeCheckPass::State::dump(llvm::raw_ostream &OS, int ownedGen) { + switch (val.getInt()) { + case Invalid: + OS << "invalid"; + break; + case NullPtr: + OS << "nullptr"; + break; + case Global: + OS << "global"; + break; + case LocalValue: + OS << getVarNameFromValue(val.getPointer()); + break; + case OwnedBy: + ownedGen++; // Start from 1. + OS << getVarNameFromValue(val.getPointer()) << "__" << ownedGen << "'"; + break; + default: + llvm_unreachable("Not handled"); + } +} + +void LifetimeCheckPass::printPset(PSetType &pset, llvm::raw_ostream &OS) { + OS << "{ "; + auto size = pset.size(); + for (auto s : pset) { + int ownerGen = 0; + if (s.isOwnedBy()) + ownerGen = owners[s.getData()]; + s.dump(OS, ownerGen); + size--; + if (size > 0) + OS << ", "; + } + OS << " }"; +} + +void LifetimeCheckPass::dumpCurrentPmap() { dumpPmap(*currPmap); } + +void LifetimeCheckPass::dumpPmap(PMapType &pmap) { + llvm::errs() << "pmap {\n"; + int entry = 0; + for (auto &mapEntry : pmap) { + llvm::errs() << " " << entry << ": " << getVarNameFromValue(mapEntry.first) + << " " + << "=> "; + printPset(mapEntry.second); + llvm::errs() << "\n"; + entry++; + } + llvm::errs() << "}\n"; +} diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp new file mode 100644 index 000000000000..e92e40b7ccd4 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -0,0 +1,657 @@ +//===- LoweringPrepare.cpp - pareparation work for LLVM lowering ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "LoweringPrepareCXXABI.h" +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Region.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/Mangle.h" +#include "clang/Basic/Module.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Path.h" + +#include + +using cir::CIRBaseBuilderTy; +using namespace mlir; +using namespace mlir::cir; + +static SmallString<128> getTransformedFileName(ModuleOp theModule) { + SmallString<128> FileName; + + if (theModule.getSymName()) { + FileName = llvm::sys::path::filename(theModule.getSymName()->str()); + } + + if (FileName.empty()) + FileName = ""; + + for (size_t i = 0; i < FileName.size(); ++i) { + // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens + // to be the set of C preprocessing numbers. + if (!clang::isPreprocessingNumberBody(FileName[i])) + FileName[i] = '_'; + } + + return FileName; +} + +/// Return the FuncOp called by `callOp`. +static FuncOp getCalledFunction(CallOp callOp) { + SymbolRefAttr sym = + llvm::dyn_cast_if_present(callOp.getCallableForCallee()); + if (!sym) + return nullptr; + return dyn_cast_or_null( + SymbolTable::lookupNearestSymbolFrom(callOp, sym)); +} + +namespace { + +struct LoweringPreparePass : public LoweringPrepareBase { + LoweringPreparePass() = default; + void runOnOperation() override; + + void runOnOp(Operation *op); + void lowerThreeWayCmpOp(CmpThreeWayOp op); + void lowerGlobalOp(GlobalOp op); + void lowerDynamicCastOp(DynamicCastOp op); + void lowerStdFindOp(StdFindOp op); + void lowerIterBeginOp(IterBeginOp op); + void lowerIterEndOp(IterEndOp op); + void lowerArrayDtor(ArrayDtor op); + void lowerArrayCtor(ArrayCtor op); + + /// Build the function that initializes the specified global + FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); + + /// Build a module init function that calls all the dynamic initializers. + void buildCXXGlobalInitFunc(); + + /// Materialize global ctor/dtor list + void buildGlobalCtorDtorList(); + + FuncOp + buildRuntimeFunction(mlir::OpBuilder &builder, llvm::StringRef name, + mlir::Location loc, mlir::cir::FuncType type, + mlir::cir::GlobalLinkageKind linkage = + mlir::cir::GlobalLinkageKind::ExternalLinkage); + + GlobalOp + buildRuntimeVariable(mlir::OpBuilder &Builder, llvm::StringRef Name, + mlir::Location Loc, mlir::Type type, + mlir::cir::GlobalLinkageKind Linkage = + mlir::cir::GlobalLinkageKind::ExternalLinkage); + + /// + /// AST related + /// ----------- + + clang::ASTContext *astCtx; + std::shared_ptr<::cir::LoweringPrepareCXXABI> cxxABI; + + void setASTContext(clang::ASTContext *c) { + astCtx = c; + switch (c->getCXXABIKind()) { + case clang::TargetCXXABI::GenericItanium: + case clang::TargetCXXABI::GenericAArch64: + case clang::TargetCXXABI::AppleARM64: + // TODO: this isn't quite right, clang uses AppleARM64CXXABI which + // inherits from ARMCXXABI. We'll have to follow suit. + cxxABI.reset(::cir::LoweringPrepareCXXABI::createItaniumABI()); + break; + + default: + llvm_unreachable("NYI"); + } + } + + /// Tracks current module. + ModuleOp theModule; + + /// Tracks existing dynamic initializers. + llvm::StringMap dynamicInitializerNames; + llvm::SmallVector dynamicInitializers; + + /// List of ctors to be called before main() + SmallVector globalCtorList; + /// List of dtors to be called when unloading module. + SmallVector globalDtorList; +}; +} // namespace + +GlobalOp LoweringPreparePass::buildRuntimeVariable( + mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, + mlir::Type type, mlir::cir::GlobalLinkageKind linkage) { + GlobalOp g = dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( + theModule, StringAttr::get(theModule->getContext(), name))); + if (!g) { + g = builder.create(loc, name, type); + g.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + mlir::SymbolTable::setSymbolVisibility( + g, mlir::SymbolTable::Visibility::Private); + } + return g; +} + +FuncOp LoweringPreparePass::buildRuntimeFunction( + mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, + mlir::cir::FuncType type, mlir::cir::GlobalLinkageKind linkage) { + FuncOp f = dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( + theModule, StringAttr::get(theModule->getContext(), name))); + if (!f) { + f = builder.create(loc, name, type); + f.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + mlir::SymbolTable::setSymbolVisibility( + f, mlir::SymbolTable::Visibility::Private); + mlir::NamedAttrList attrs; + f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), attrs.getDictionary(builder.getContext()))); + } + return f; +} + +FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { + SmallString<256> fnName; + { + llvm::raw_svector_ostream Out(fnName); + op.getAst()->mangleDynamicInitializer(Out); + // Name numbering + uint32_t cnt = dynamicInitializerNames[fnName]++; + if (cnt) + fnName += "." + llvm::Twine(cnt).str(); + } + + // Create a variable initialization function. + mlir::OpBuilder builder(&getContext()); + builder.setInsertionPointAfter(op); + auto voidTy = ::mlir::cir::VoidType::get(builder.getContext()); + auto fnType = mlir::cir::FuncType::get({}, voidTy); + FuncOp f = + buildRuntimeFunction(builder, fnName, op.getLoc(), fnType, + mlir::cir::GlobalLinkageKind::InternalLinkage); + + // Move over the initialzation code of the ctor region. + auto &block = op.getCtorRegion().front(); + mlir::Block *entryBB = f.addEntryBlock(); + entryBB->getOperations().splice(entryBB->begin(), block.getOperations(), + block.begin(), std::prev(block.end())); + + // Register the destructor call with __cxa_atexit + auto &dtorRegion = op.getDtorRegion(); + if (!dtorRegion.empty()) { + assert(op.getAst() && + op.getAst()->getTLSKind() == clang::VarDecl::TLS_None && " TLS NYI"); + // Create a variable that binds the atexit to this shared object. + builder.setInsertionPointToStart(&theModule.getBodyRegion().front()); + auto Handle = buildRuntimeVariable(builder, "__dso_handle", op.getLoc(), + builder.getI8Type()); + + // Look for the destructor call in dtorBlock + auto &dtorBlock = dtorRegion.front(); + mlir::cir::CallOp dtorCall; + for (auto op : reverse(dtorBlock.getOps())) { + dtorCall = op; + break; + } + assert(dtorCall && "Expected a dtor call"); + FuncOp dtorFunc = getCalledFunction(dtorCall); + assert(dtorFunc && + mlir::isa(*dtorFunc.getAst()) && + "Expected a dtor call"); + + // Create a runtime helper function: + // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); + auto voidPtrTy = + ::mlir::cir::PointerType::get(builder.getContext(), voidTy); + auto voidFnTy = mlir::cir::FuncType::get({voidPtrTy}, voidTy); + auto voidFnPtrTy = + ::mlir::cir::PointerType::get(builder.getContext(), voidFnTy); + auto HandlePtrTy = + mlir::cir::PointerType::get(builder.getContext(), Handle.getSymType()); + auto fnAtExitType = mlir::cir::FuncType::get( + {voidFnPtrTy, voidPtrTy, HandlePtrTy}, + mlir::cir::VoidType::get(builder.getContext())); + const char *nameAtExit = "__cxa_atexit"; + FuncOp fnAtExit = + buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType); + + // Replace the dtor call with a call to __cxa_atexit(&dtor, &var, + // &__dso_handle) + builder.setInsertionPointAfter(dtorCall); + mlir::Value args[3]; + auto dtorPtrTy = mlir::cir::PointerType::get(builder.getContext(), + dtorFunc.getFunctionType()); + // dtorPtrTy + args[0] = builder.create( + dtorCall.getLoc(), dtorPtrTy, dtorFunc.getSymName()); + args[0] = builder.create( + dtorCall.getLoc(), voidFnPtrTy, mlir::cir::CastKind::bitcast, args[0]); + args[1] = builder.create(dtorCall.getLoc(), voidPtrTy, + mlir::cir::CastKind::bitcast, + dtorCall.getArgOperand(0)); + args[2] = builder.create( + Handle.getLoc(), HandlePtrTy, Handle.getSymName()); + builder.create(dtorCall.getLoc(), fnAtExit, args); + dtorCall->erase(); + entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(), + dtorBlock.begin(), + std::prev(dtorBlock.end())); + } + + // Replace cir.yield with cir.return + builder.setInsertionPointToEnd(entryBB); + auto &yieldOp = block.getOperations().back(); + assert(isa(yieldOp)); + builder.create(yieldOp.getLoc()); + return f; +} + +static void canonicalizeIntrinsicThreeWayCmp(CIRBaseBuilderTy &builder, + CmpThreeWayOp op) { + auto loc = op->getLoc(); + auto cmpInfo = op.getInfo(); + + if (cmpInfo.getLt() == -1 && cmpInfo.getEq() == 0 && cmpInfo.getGt() == 1) { + // The comparison is already in canonicalized form. + return; + } + + auto canonicalizedCmpInfo = + mlir::cir::CmpThreeWayInfoAttr::get(builder.getContext(), -1, 0, 1); + mlir::Value result = + builder + .create(loc, op.getType(), op.getLhs(), + op.getRhs(), canonicalizedCmpInfo) + .getResult(); + + auto compareAndYield = [&](mlir::Value input, int64_t test, + int64_t yield) -> mlir::Value { + // Create a conditional branch that tests whether `input` is equal to + // `test`. If `input` is equal to `test`, yield `yield`. Otherwise, yield + // `input` as is. + auto testValue = builder.getConstant( + loc, mlir::cir::IntAttr::get(input.getType(), test)); + auto yieldValue = builder.getConstant( + loc, mlir::cir::IntAttr::get(input.getType(), yield)); + auto eqToTest = + builder.createCompare(loc, mlir::cir::CmpOpKind::eq, input, testValue); + return builder + .create( + loc, eqToTest, + [&](OpBuilder &, Location) { + builder.create(loc, + mlir::ValueRange{yieldValue}); + }, + [&](OpBuilder &, Location) { + builder.create(loc, mlir::ValueRange{input}); + }) + ->getResult(0); + }; + + if (cmpInfo.getLt() != -1) + result = compareAndYield(result, -1, cmpInfo.getLt()); + + if (cmpInfo.getEq() != 0) + result = compareAndYield(result, 0, cmpInfo.getEq()); + + if (cmpInfo.getGt() != 1) + result = compareAndYield(result, 1, cmpInfo.getGt()); + + op.replaceAllUsesWith(result); + op.erase(); +} + +void LoweringPreparePass::lowerThreeWayCmpOp(CmpThreeWayOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + if (op.isIntegralComparison() && op.isStrongOrdering()) { + // For three-way comparisons on integral operands that produce strong + // ordering, we can generate potentially better code with the `llvm.scmp.*` + // and `llvm.ucmp.*` intrinsics. Thus we don't replace these comparisons + // here. They will be lowered directly to LLVMIR during the LLVM lowering + // pass. + // + // But we still need to take a step here. `llvm.scmp.*` and `llvm.ucmp.*` + // returns -1, 0, or 1 to represent lt, eq, and gt, which are the + // "canonicalized" result values of three-way comparisons. However, + // `cir.cmp3way` may not produce canonicalized result. We need to + // canonicalize the comparison if necessary. This is what we're doing in + // this special branch. + canonicalizeIntrinsicThreeWayCmp(builder, op); + return; + } + + auto loc = op->getLoc(); + auto cmpInfo = op.getInfo(); + + auto buildCmpRes = [&](int64_t value) -> mlir::Value { + return builder.create( + loc, op.getType(), mlir::cir::IntAttr::get(op.getType(), value)); + }; + auto ltRes = buildCmpRes(cmpInfo.getLt()); + auto eqRes = buildCmpRes(cmpInfo.getEq()); + auto gtRes = buildCmpRes(cmpInfo.getGt()); + + auto buildCmp = [&](CmpOpKind kind) -> mlir::Value { + auto ty = BoolType::get(&getContext()); + return builder.create(loc, ty, kind, op.getLhs(), + op.getRhs()); + }; + auto buildSelect = [&](mlir::Value condition, mlir::Value trueResult, + mlir::Value falseResult) -> mlir::Value { + return builder + .create( + loc, condition, + [&](OpBuilder &, Location) { + builder.create(loc, trueResult); + }, + [&](OpBuilder &, Location) { + builder.create(loc, falseResult); + }) + .getResult(); + }; + + mlir::Value transformedResult; + if (cmpInfo.getOrdering() == CmpOrdering::Strong) { + // Strong ordering. + auto lt = buildCmp(CmpOpKind::lt); + auto eq = buildCmp(CmpOpKind::eq); + auto selectOnEq = buildSelect(eq, eqRes, gtRes); + transformedResult = buildSelect(lt, ltRes, selectOnEq); + } else { + // Partial ordering. + auto unorderedRes = buildCmpRes(cmpInfo.getUnordered().value()); + + auto lt = buildCmp(CmpOpKind::lt); + auto eq = buildCmp(CmpOpKind::eq); + auto gt = buildCmp(CmpOpKind::gt); + auto selectOnEq = buildSelect(eq, eqRes, unorderedRes); + auto selectOnGt = buildSelect(gt, gtRes, selectOnEq); + transformedResult = buildSelect(lt, ltRes, selectOnGt); + } + + op.replaceAllUsesWith(transformedResult); + op.erase(); +} + +void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { + auto &ctorRegion = op.getCtorRegion(); + auto &dtorRegion = op.getDtorRegion(); + + if (!ctorRegion.empty() || !dtorRegion.empty()) { + // Build a variable initialization function and move the initialzation code + // in the ctor region over. + auto f = buildCXXGlobalVarDeclInitFunc(op); + + // Clear the ctor and dtor region + ctorRegion.getBlocks().clear(); + dtorRegion.getBlocks().clear(); + + // Add a function call to the variable initialization function. + assert(!hasAttr( + mlir::cast(*op.getAst())) && + "custom initialization priority NYI"); + dynamicInitializers.push_back(f); + } +} + +void LoweringPreparePass::buildGlobalCtorDtorList() { + if (!globalCtorList.empty()) { + theModule->setAttr("cir.global_ctors", + mlir::ArrayAttr::get(&getContext(), globalCtorList)); + } + if (!globalDtorList.empty()) { + theModule->setAttr("cir.global_dtors", + mlir::ArrayAttr::get(&getContext(), globalDtorList)); + } +} + +void LoweringPreparePass::buildCXXGlobalInitFunc() { + if (dynamicInitializers.empty()) + return; + + for (auto &f : dynamicInitializers) { + // TODO: handle globals with a user-specified initialzation priority. + auto ctorAttr = mlir::cir::GlobalCtorAttr::get(&getContext(), f.getName()); + globalCtorList.push_back(ctorAttr); + } + + SmallString<256> fnName; + // Include the filename in the symbol name. Including "sub_" matches gcc + // and makes sure these symbols appear lexicographically behind the symbols + // with priority emitted above. Module implementation units behave the same + // way as a non-modular TU with imports. + // TODO: check CXX20ModuleInits + if (astCtx->getCurrentNamedModule() && + !astCtx->getCurrentNamedModule()->isModuleImplementation()) { + llvm::raw_svector_ostream Out(fnName); + std::unique_ptr MangleCtx( + astCtx->createMangleContext()); + cast(*MangleCtx) + .mangleModuleInitializer(astCtx->getCurrentNamedModule(), Out); + } else { + fnName += "_GLOBAL__sub_I_"; + fnName += getTransformedFileName(theModule); + } + + mlir::OpBuilder builder(&getContext()); + builder.setInsertionPointToEnd(&theModule.getBodyRegion().back()); + auto fnType = mlir::cir::FuncType::get( + {}, mlir::cir::VoidType::get(builder.getContext())); + FuncOp f = + buildRuntimeFunction(builder, fnName, theModule.getLoc(), fnType, + mlir::cir::GlobalLinkageKind::ExternalLinkage); + builder.setInsertionPointToStart(f.addEntryBlock()); + for (auto &f : dynamicInitializers) { + builder.create(f.getLoc(), f); + } + + builder.create(f.getLoc()); +} + +void LoweringPreparePass::lowerDynamicCastOp(DynamicCastOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + auto loweredValue = cxxABI->lowerDynamicCast(builder, op); + op.replaceAllUsesWith(loweredValue); + op.erase(); +} + +static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, + mlir::Operation *op, mlir::Type eltTy, + mlir::Value arrayAddr, + uint64_t arrayLen) { + // Generate loop to call into ctor/dtor for every element. + auto loc = op->getLoc(); + + // TODO: instead of fixed integer size, create alias for PtrDiffTy and unify + // with CIRGen stuff. + auto ptrDiffTy = + mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); + auto numArrayElementsConst = builder.create( + loc, ptrDiffTy, mlir::cir::IntAttr::get(ptrDiffTy, arrayLen)); + + auto begin = builder.create( + loc, eltTy, mlir::cir::CastKind::array_to_ptrdecay, arrayAddr); + mlir::Value end = builder.create( + loc, eltTy, begin, numArrayElementsConst); + + auto tmpAddr = builder.createAlloca( + loc, /*addr type*/ builder.getPointerTo(eltTy), + /*var type*/ eltTy, "__array_idx", clang::CharUnits::One()); + builder.createStore(loc, begin, tmpAddr); + + auto loop = builder.createDoWhile( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto currentElement = b.create(loc, eltTy, tmpAddr); + mlir::Type boolTy = mlir::cir::BoolType::get(b.getContext()); + auto cmp = builder.create( + loc, boolTy, mlir::cir::CmpOpKind::eq, currentElement, end); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto currentElement = b.create(loc, eltTy, tmpAddr); + + CallOp ctorCall; + op->walk([&](CallOp c) { ctorCall = c; }); + assert(ctorCall && "expected ctor call"); + + auto one = builder.create( + loc, ptrDiffTy, mlir::cir::IntAttr::get(ptrDiffTy, 1)); + + ctorCall->moveAfter(one); + ctorCall->setOperand(0, currentElement); + + // Advance pointer and store them to temporary variable + auto nextElement = builder.create( + loc, eltTy, currentElement, one); + builder.createStore(loc, nextElement, tmpAddr); + builder.createYield(loc); + }); + + op->replaceAllUsesWith(loop); + op->erase(); +} + +void LoweringPreparePass::lowerArrayDtor(ArrayDtor op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + + auto eltTy = op->getRegion(0).getArgument(0).getType(); + auto arrayLen = op.getAddr() + .getType() + .cast() + .getPointee() + .cast() + .getSize(); + lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); +} + +void LoweringPreparePass::lowerArrayCtor(ArrayCtor op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + + auto eltTy = op->getRegion(0).getArgument(0).getType(); + auto arrayLen = op.getAddr() + .getType() + .cast() + .getPointee() + .cast() + .getSize(); + lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); +} + +void LoweringPreparePass::lowerStdFindOp(StdFindOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + auto call = builder.create( + op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), + mlir::ValueRange{op.getOperand(0), op.getOperand(1), op.getOperand(2)}); + + op.replaceAllUsesWith(call); + op.erase(); +} + +void LoweringPreparePass::lowerIterBeginOp(IterBeginOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + auto call = builder.create( + op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), + mlir::ValueRange{op.getOperand()}); + + op.replaceAllUsesWith(call); + op.erase(); +} + +void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + auto call = builder.create( + op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), + mlir::ValueRange{op.getOperand()}); + + op.replaceAllUsesWith(call); + op.erase(); +} + +void LoweringPreparePass::runOnOp(Operation *op) { + if (auto threeWayCmp = dyn_cast(op)) { + lowerThreeWayCmpOp(threeWayCmp); + } else if (auto getGlobal = dyn_cast(op)) { + lowerGlobalOp(getGlobal); + } else if (auto dynamicCast = dyn_cast(op)) { + lowerDynamicCastOp(dynamicCast); + } else if (auto stdFind = dyn_cast(op)) { + lowerStdFindOp(stdFind); + } else if (auto iterBegin = dyn_cast(op)) { + lowerIterBeginOp(iterBegin); + } else if (auto iterEnd = dyn_cast(op)) { + lowerIterEndOp(iterEnd); + } else if (auto arrayCtor = dyn_cast(op)) { + lowerArrayCtor(arrayCtor); + } else if (auto arrayDtor = dyn_cast(op)) { + lowerArrayDtor(arrayDtor); + } else if (auto fnOp = dyn_cast(op)) { + if (auto globalCtor = fnOp.getGlobalCtorAttr()) { + globalCtorList.push_back(globalCtor); + } else if (auto globalDtor = fnOp.getGlobalDtorAttr()) { + globalDtorList.push_back(globalDtor); + } + } +} + +void LoweringPreparePass::runOnOperation() { + assert(astCtx && "Missing ASTContext, please construct with the right ctor"); + auto *op = getOperation(); + if (isa<::mlir::ModuleOp>(op)) { + theModule = cast<::mlir::ModuleOp>(op); + } + + SmallVector opsToTransform; + op->walk([&](Operation *op) { + if (isa(op)) + opsToTransform.push_back(op); + }); + + for (auto *o : opsToTransform) + runOnOp(o); + + buildCXXGlobalInitFunc(); + buildGlobalCtorDtorList(); +} + +std::unique_ptr mlir::createLoweringPreparePass() { + return std::make_unique(); +} + +std::unique_ptr +mlir::createLoweringPreparePass(clang::ASTContext *astCtx) { + auto pass = std::make_unique(); + pass->setASTContext(astCtx); + return std::move(pass); +} diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h new file mode 100644 index 000000000000..2a094bad8702 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -0,0 +1,36 @@ +//====- LoweringPrepareCXXABI.h -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides the LoweringPrepareCXXABI class, which is the base class +// for ABI specific functionalities that are required during LLVM lowering +// prepare. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_LOWERING_PREPARE_CXX_ABI_H +#define LLVM_CLANG_LIB_CIR_LOWERING_PREPARE_CXX_ABI_H + +#include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +namespace cir { + +class LoweringPrepareCXXABI { +public: + static LoweringPrepareCXXABI *createItaniumABI(); + + virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, + mlir::cir::DynamicCastOp op) = 0; + + virtual ~LoweringPrepareCXXABI() {} +}; + +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_LOWERING_PREPARE_CXX_ABI_H diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp new file mode 100644 index 000000000000..3619648056cc --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -0,0 +1,117 @@ +//====- LoweringPrepareItaniumCXXABI.h - Itanium ABI specific code --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides Itanium C++ ABI specific code that is used during LLVMIR +// lowering prepare. +// +//===----------------------------------------------------------------------===// + +#include "../IR/MissingFeatures.h" +#include "LoweringPrepareCXXABI.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Value.h" +#include "mlir/IR/ValueRange.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +using namespace cir; + +namespace { + +class LoweringPrepareItaniumCXXABI : public LoweringPrepareCXXABI { +public: + mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, + mlir::cir::DynamicCastOp op) override; +}; + +} // namespace + +LoweringPrepareCXXABI *LoweringPrepareCXXABI::createItaniumABI() { + return new LoweringPrepareItaniumCXXABI(); +} + +static void buildBadCastCall(CIRBaseBuilderTy &builder, mlir::Location loc, + mlir::FlatSymbolRefAttr badCastFuncRef) { + // TODO(cir): set the calling convention to __cxa_bad_cast. + assert(!MissingFeatures::setCallingConv()); + + builder.create(loc, badCastFuncRef, mlir::ValueRange{}); + builder.create(loc); + builder.clearInsertionPoint(); +} + +static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, + mlir::cir::DynamicCastOp op) { + auto loc = op->getLoc(); + auto srcValue = op.getSrc(); + auto castInfo = op.getInfo().cast(); + + // TODO(cir): consider address space + assert(!MissingFeatures::addressSpace()); + + auto srcPtr = builder.createBitcast(srcValue, builder.getVoidPtrTy()); + auto srcRtti = builder.getConstant(loc, castInfo.getSrcRtti()); + auto destRtti = builder.getConstant(loc, castInfo.getDestRtti()); + auto offsetHint = builder.getConstant(loc, castInfo.getOffsetHint()); + + auto dynCastFuncRef = castInfo.getRuntimeFunc(); + mlir::Value dynCastFuncArgs[4] = {srcPtr, srcRtti, destRtti, offsetHint}; + + // TODO(cir): set the calling convention for __dynamic_cast. + assert(!MissingFeatures::setCallingConv()); + mlir::Value castedPtr = + builder + .create(loc, dynCastFuncRef, + builder.getVoidPtrTy(), dynCastFuncArgs) + .getResult(0); + + assert(castedPtr.getType().isa() && + "the return value of __dynamic_cast should be a ptr"); + + /// C++ [expr.dynamic.cast]p9: + /// A failed cast to reference type throws std::bad_cast + if (op.isRefcast()) { + // Emit a cir.if that checks the casted value. + mlir::Value castedValueIsNull = builder.createPtrIsNull(castedPtr); + builder.create( + loc, castedValueIsNull, false, [&](mlir::OpBuilder &, mlir::Location) { + buildBadCastCall(builder, loc, castInfo.getBadCastFunc()); + }); + } + + // Note that castedPtr is a void*. Cast it to a pointer to the destination + // type before return. + return builder.createBitcast(castedPtr, op.getType()); +} + +mlir::Value +LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, + mlir::cir::DynamicCastOp op) { + auto loc = op->getLoc(); + auto srcValue = op.getSrc(); + + assert(!MissingFeatures::buildTypeCheck()); + + if (op.isRefcast()) + return buildDynamicCastAfterNullCheck(builder, op); + + auto srcValueIsNull = builder.createPtrToBoolCast(srcValue); + return builder + .create( + loc, srcValueIsNull, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield( + loc, builder.getNullPtr(op.getType(), loc).getResult()); + }, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield(loc, + buildDynamicCastAfterNullCheck(builder, op)); + }) + .getResult(); +} diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp new file mode 100644 index 000000000000..e4848a21d0bd --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -0,0 +1,132 @@ +//===- MergeCleanups.cpp - merge simple return/yield blocks ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +using namespace mlir; +using namespace cir; + +//===----------------------------------------------------------------------===// +// Rewrite patterns +//===----------------------------------------------------------------------===// + +namespace { + +/// Removes branches between two blocks if it is the only branch. +/// +/// From: +/// ^bb0: +/// cir.br ^bb1 +/// ^bb1: // pred: ^bb0 +/// cir.return +/// +/// To: +/// ^bb0: +/// cir.return +struct RemoveRedudantBranches : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(BrOp op, + PatternRewriter &rewriter) const final { + Block *block = op.getOperation()->getBlock(); + Block *dest = op.getDest(); + + // Single edge between blocks: merge it. + if (block->getNumSuccessors() == 1 && + dest->getSinglePredecessor() == block) { + rewriter.eraseOp(op); + rewriter.mergeBlocks(dest, block); + return success(); + } + + return failure(); + } +}; + +struct RemoveEmptyScope : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(ScopeOp op) const final { + return success(op.getRegion().empty() || + (op.getRegion().getBlocks().size() == 1 && + op.getRegion().front().empty())); + } + + void rewrite(ScopeOp op, PatternRewriter &rewriter) const final { + rewriter.eraseOp(op); + } +}; + +struct RemoveEmptySwitch : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(SwitchOp op) const final { + return success(op.getRegions().empty()); + } + + void rewrite(SwitchOp op, PatternRewriter &rewriter) const final { + rewriter.eraseOp(op); + } +}; + +//===----------------------------------------------------------------------===// +// MergeCleanupsPass +//===----------------------------------------------------------------------===// + +struct MergeCleanupsPass : public MergeCleanupsBase { + using MergeCleanupsBase::MergeCleanupsBase; + + // The same operation rewriting done here could have been performed + // by CanonicalizerPass (adding hasCanonicalizer for target Ops and + // implementing the same from above in CIRDialects.cpp). However, it's + // currently too aggressive for static analysis purposes, since it might + // remove things where a diagnostic can be generated. + // + // FIXME: perhaps we can add one more mode to GreedyRewriteConfig to + // disable this behavior. + void runOnOperation() override; +}; + +void populateMergeCleanupPatterns(RewritePatternSet &patterns) { + // clang-format off + patterns.add< + RemoveRedudantBranches, + RemoveEmptyScope, + RemoveEmptySwitch + >(patterns.getContext()); + // clang-format on +} + +void MergeCleanupsPass::runOnOperation() { + // Collect rewrite patterns. + RewritePatternSet patterns(&getContext()); + populateMergeCleanupPatterns(patterns); + + // Collect operations to apply patterns. + SmallVector ops; + getOperation()->walk([&](Operation *op) { + if (isa(op)) + ops.push_back(op); + }); + + // Apply patterns. + if (applyOpPatternsAndFold(ops, std::move(patterns)).failed()) + signalPassFailure(); +} + +} // namespace + +std::unique_ptr mlir::createMergeCleanupsPass() { + return std::make_unique(); +} diff --git a/clang/lib/CIR/Dialect/Transforms/PassDetail.h b/clang/lib/CIR/Dialect/Transforms/PassDetail.h new file mode 100644 index 000000000000..2fdcfbda61e5 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/PassDetail.h @@ -0,0 +1,29 @@ +//===- PassDetail.h - CIR Pass class details --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef DIALECT_CIR_TRANSFORMS_PASSDETAIL_H_ +#define DIALECT_CIR_TRANSFORMS_PASSDETAIL_H_ + +#include "mlir/IR/Dialect.h" +#include "mlir/Pass/Pass.h" + +namespace mlir { +// Forward declaration from Dialect.h +template +void registerDialect(DialectRegistry ®istry); + +namespace cir { +class CIRDialect; +} // namespace cir + +#define GEN_PASS_CLASSES +#include "clang/CIR/Dialect/Passes.h.inc" + +} // namespace mlir + +#endif // DIALECT_CIR_TRANSFORMS_PASSDETAIL_H_ diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp new file mode 100644 index 000000000000..93e19294feec --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp @@ -0,0 +1,32 @@ +//===- StdHelpers.cpp - Implementation standard related helpers--*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "StdHelpers.h" + +namespace mlir { +namespace cir { + +bool isStdArrayType(mlir::Type t) { + auto sTy = t.dyn_cast(); + if (!sTy) + return false; + auto recordDecl = sTy.getAst(); + if (!recordDecl.isInStdNamespace()) + return false; + + // TODO: only std::array supported for now, generalize and + // use tablegen. CallDescription.cpp in the static analyzer + // could be a good inspiration source too. + if (recordDecl.getName().compare("array") != 0) + return false; + + return true; +} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.h b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h new file mode 100644 index 000000000000..302272feb6bb --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h @@ -0,0 +1,36 @@ +//===- StdHelpers.h - Helpers for standard types/functions ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Region.h" +#include "clang/AST/ASTContext.h" +#include "clang/Basic/Module.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Path.h" + +#ifndef DIALECT_CIR_TRANSFORMS_STDHELPERS_H_ +#define DIALECT_CIR_TRANSFORMS_STDHELPERS_H_ + +namespace mlir { +namespace cir { + +bool isStdArrayType(mlir::Type t); + +} // namespace cir +} // namespace mlir + +#endif diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp new file mode 100644 index 000000000000..bc4f68655883 --- /dev/null +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -0,0 +1,471 @@ +//===--- CIRGenAction.cpp - LLVM Code generation Frontend Action ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/CIRFrontendAction/CIRGenAction.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/OperationSupport.h" +#include "mlir/Parser/Parser.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclGroup.h" +#include "clang/Basic/DiagnosticFrontend.h" +#include "clang/Basic/FileManager.h" +#include "clang/Basic/LangStandard.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/CIRToCIRPasses.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/LowerToLLVM.h" +#include "clang/CIR/Passes.h" +#include "clang/CodeGen/BackendUtil.h" +#include "clang/CodeGen/ModuleBuilder.h" +#include "clang/Driver/DriverDiagnostic.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Lex/Preprocessor.h" +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DiagnosticInfo.h" +#include "llvm/IR/DiagnosticPrinter.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/LLVMRemarkStreamer.h" +#include "llvm/IR/Module.h" +#include "llvm/IRReader/IRReader.h" +#include "llvm/LTO/LTOBackend.h" +#include "llvm/Linker/Linker.h" +#include "llvm/Pass.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/Signals.h" +#include "llvm/Support/SourceMgr.h" +#include "llvm/Support/TimeProfiler.h" +#include "llvm/Support/Timer.h" +#include "llvm/Support/ToolOutputFile.h" +#include "llvm/Support/YAMLTraits.h" +#include "llvm/Transforms/IPO/Internalize.h" + +#include + +using namespace cir; +using namespace clang; + +static std::string sanitizePassOptions(llvm::StringRef o) { + if (o.empty()) + return ""; + std::string opts{o}; + // MLIR pass options are space separated, but we use ';' in clang since + // space aren't well supported, switch it back. + for (unsigned i = 0, e = opts.size(); i < e; ++i) + if (opts[i] == ';') + opts[i] = ' '; + // If arguments are surrounded with '"', trim them off + return llvm::StringRef(opts).trim('"').str(); +} + +namespace cir { + +static std::unique_ptr +lowerFromCIRToLLVMIR(const clang::FrontendOptions &feOptions, + mlir::ModuleOp mlirMod, + std::unique_ptr mlirCtx, + llvm::LLVMContext &llvmCtx, bool disableVerifier = false) { + if (feOptions.ClangIRDirectLowering) + return direct::lowerDirectlyFromCIRToLLVMIR(mlirMod, llvmCtx, + disableVerifier); + else + return lowerFromCIRToMLIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); +} + +class CIRGenConsumer : public clang::ASTConsumer { + + virtual void anchor(); + + CIRGenAction::OutputType action; + + DiagnosticsEngine &diagnosticsEngine; + const HeaderSearchOptions &headerSearchOptions; + const CodeGenOptions &codeGenOptions; + const TargetOptions &targetOptions; + const LangOptions &langOptions; + const FrontendOptions &feOptions; + + std::unique_ptr outputStream; + + ASTContext *astContext{nullptr}; + IntrusiveRefCntPtr FS; + std::unique_ptr gen; + +public: + CIRGenConsumer(CIRGenAction::OutputType action, + DiagnosticsEngine &diagnosticsEngine, + IntrusiveRefCntPtr VFS, + const HeaderSearchOptions &headerSearchOptions, + const CodeGenOptions &codeGenOptions, + const TargetOptions &targetOptions, + const LangOptions &langOptions, + const FrontendOptions &feOptions, + std::unique_ptr os) + : action(action), diagnosticsEngine(diagnosticsEngine), + headerSearchOptions(headerSearchOptions), + codeGenOptions(codeGenOptions), targetOptions(targetOptions), + langOptions(langOptions), feOptions(feOptions), + outputStream(std::move(os)), FS(VFS), + gen(std::make_unique(diagnosticsEngine, std::move(VFS), + codeGenOptions)) {} + + void Initialize(ASTContext &ctx) override { + assert(!astContext && "initialized multiple times"); + + astContext = &ctx; + + gen->Initialize(ctx); + } + + bool HandleTopLevelDecl(DeclGroupRef D) override { + PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), + astContext->getSourceManager(), + "LLVM IR generation of declaration"); + gen->HandleTopLevelDecl(D); + return true; + } + + void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *VD) override { + gen->HandleCXXStaticMemberVarInstantiation(VD); + } + + void HandleInlineFunctionDefinition(FunctionDecl *D) override { + gen->HandleInlineFunctionDefinition(D); + } + + void HandleInterestingDecl(DeclGroupRef D) override { + llvm_unreachable("NYI"); + } + + void HandleTranslationUnit(ASTContext &C) override { + // Note that this method is called after `HandleTopLevelDecl` has already + // ran all over the top level decls. Here clang mostly wraps defered and + // global codegen, followed by running CIR passes. + gen->HandleTranslationUnit(C); + + if (!feOptions.ClangIRDisableCIRVerifier) + if (!gen->verifyModule()) { + llvm::report_fatal_error( + "CIR codegen: module verification error before running CIR passes"); + return; + } + + auto mlirMod = gen->getModule(); + auto mlirCtx = gen->takeContext(); + + auto setupCIRPipelineAndExecute = [&] { + // Sanitize passes options. MLIR uses spaces between pass options + // and since that's hard to fly in clang, we currently use ';'. + std::string lifetimeOpts, idiomRecognizerOpts, libOptOpts; + if (feOptions.ClangIRLifetimeCheck) + lifetimeOpts = sanitizePassOptions(feOptions.ClangIRLifetimeCheckOpts); + if (feOptions.ClangIRIdiomRecognizer) + idiomRecognizerOpts = + sanitizePassOptions(feOptions.ClangIRIdiomRecognizerOpts); + if (feOptions.ClangIRLibOpt) + libOptOpts = sanitizePassOptions(feOptions.ClangIRLibOptOpts); + + // Setup and run CIR pipeline. + std::string passOptParsingFailure; + if (runCIRToCIRPasses( + mlirMod, mlirCtx.get(), C, !feOptions.ClangIRDisableCIRVerifier, + feOptions.ClangIRLifetimeCheck, lifetimeOpts, + feOptions.ClangIRIdiomRecognizer, idiomRecognizerOpts, + feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, + action == CIRGenAction::OutputType::EmitCIRFlat) + .failed()) { + if (!passOptParsingFailure.empty()) + diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) + << feOptions.ClangIRLifetimeCheckOpts; + else + llvm::report_fatal_error("CIR codegen: MLIR pass manager fails " + "when running CIR passes!"); + return; + } + }; + + if (!feOptions.ClangIRDisablePasses) { + // Handle source manager properly given that lifetime analysis + // might emit warnings and remarks. + auto &clangSourceMgr = C.getSourceManager(); + FileID MainFileID = clangSourceMgr.getMainFileID(); + + std::unique_ptr FileBuf = + llvm::MemoryBuffer::getMemBuffer( + clangSourceMgr.getBufferOrFake(MainFileID)); + + llvm::SourceMgr mlirSourceMgr; + mlirSourceMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + if (feOptions.ClangIRVerifyDiags) { + mlir::SourceMgrDiagnosticVerifierHandler sourceMgrHandler( + mlirSourceMgr, mlirCtx.get()); + mlirCtx->printOpOnDiagnostic(false); + setupCIRPipelineAndExecute(); + + // Verify the diagnostic handler to make sure that each of the + // diagnostics matched. + if (sourceMgrHandler.verify().failed()) { + // FIXME: we fail ungracefully, there's probably a better way + // to communicate non-zero return so tests can actually fail. + llvm::sys::RunInterruptHandlers(); + exit(1); + } + } else { + mlir::SourceMgrDiagnosticHandler sourceMgrHandler(mlirSourceMgr, + mlirCtx.get()); + setupCIRPipelineAndExecute(); + } + } + + switch (action) { + case CIRGenAction::OutputType::EmitCIR: + case CIRGenAction::OutputType::EmitCIRFlat: + if (outputStream && mlirMod) { + // Emit remaining defaulted C++ methods + if (!feOptions.ClangIRDisableEmitCXXDefault) + gen->buildDefaultMethods(); + + // FIXME: we cannot roundtrip prettyForm=true right now. + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(/*enable=*/true, /*prettyForm=*/false); + mlirMod->print(*outputStream, flags); + } + break; + case CIRGenAction::OutputType::EmitMLIR: { + auto loweredMlirModule = lowerFromCIRToMLIR(mlirMod, mlirCtx.get()); + assert(outputStream && "Why are we here without an output stream?"); + // FIXME: we cannot roundtrip prettyForm=true right now. + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(/*enable=*/true, /*prettyForm=*/false); + loweredMlirModule->print(*outputStream, flags); + break; + } + case CIRGenAction::OutputType::EmitLLVM: { + llvm::LLVMContext llvmCtx; + auto llvmModule = + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, + feOptions.ClangIRDisableCIRVerifier); + + llvmModule->setTargetTriple(targetOptions.Triple); + + EmitBackendOutput(diagnosticsEngine, headerSearchOptions, codeGenOptions, + targetOptions, langOptions, + C.getTargetInfo().getDataLayoutString(), + llvmModule.get(), BackendAction::Backend_EmitLL, FS, + std::move(outputStream)); + break; + } + case CIRGenAction::OutputType::EmitObj: { + llvm::LLVMContext llvmCtx; + auto llvmModule = + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, + feOptions.ClangIRDisableCIRVerifier); + + llvmModule->setTargetTriple(targetOptions.Triple); + EmitBackendOutput(diagnosticsEngine, headerSearchOptions, codeGenOptions, + targetOptions, langOptions, + C.getTargetInfo().getDataLayoutString(), + llvmModule.get(), BackendAction::Backend_EmitObj, FS, + std::move(outputStream)); + break; + } + case CIRGenAction::OutputType::EmitAssembly: { + llvm::LLVMContext llvmCtx; + auto llvmModule = + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, + feOptions.ClangIRDisableCIRVerifier); + + llvmModule->setTargetTriple(targetOptions.Triple); + EmitBackendOutput(diagnosticsEngine, headerSearchOptions, codeGenOptions, + targetOptions, langOptions, + C.getTargetInfo().getDataLayoutString(), + llvmModule.get(), BackendAction::Backend_EmitAssembly, + FS, std::move(outputStream)); + break; + } + case CIRGenAction::OutputType::None: + break; + } + } + + void HandleTagDeclDefinition(TagDecl *D) override { + PrettyStackTraceDecl CrashInfo(D, SourceLocation(), + astContext->getSourceManager(), + "CIR generation of declaration"); + gen->HandleTagDeclDefinition(D); + } + + void HandleTagDeclRequiredDefinition(const TagDecl *D) override { + gen->HandleTagDeclRequiredDefinition(D); + } + + void CompleteTentativeDefinition(VarDecl *D) override { + gen->CompleteTentativeDefinition(D); + } + + void CompleteExternalDeclaration(VarDecl *D) override { + llvm_unreachable("NYI"); + } + + void AssignInheritanceModel(CXXRecordDecl *RD) override { + llvm_unreachable("NYI"); + } + + void HandleVTable(CXXRecordDecl *RD) override { gen->HandleVTable(RD); } +}; +} // namespace cir + +void CIRGenConsumer::anchor() {} + +CIRGenAction::CIRGenAction(OutputType act, mlir::MLIRContext *_MLIRContext) + : mlirContext(_MLIRContext ? _MLIRContext : new mlir::MLIRContext), + action(act) {} + +CIRGenAction::~CIRGenAction() { mlirModule.reset(); } + +void CIRGenAction::EndSourceFileAction() { + // If the consumer creation failed, do nothing. + if (!getCompilerInstance().hasASTConsumer()) + return; + + // TODO: pass the module around + // module = cgConsumer->takeModule(); +} + +static std::unique_ptr +getOutputStream(CompilerInstance &ci, StringRef inFile, + CIRGenAction::OutputType action) { + switch (action) { + case CIRGenAction::OutputType::EmitAssembly: + return ci.createDefaultOutputFile(false, inFile, "s"); + case CIRGenAction::OutputType::EmitCIR: + return ci.createDefaultOutputFile(false, inFile, "cir"); + case CIRGenAction::OutputType::EmitCIRFlat: + return ci.createDefaultOutputFile(false, inFile, "cir"); + case CIRGenAction::OutputType::EmitMLIR: + return ci.createDefaultOutputFile(false, inFile, "mlir"); + case CIRGenAction::OutputType::EmitLLVM: + return ci.createDefaultOutputFile(false, inFile, "llvm"); + case CIRGenAction::OutputType::EmitObj: + return ci.createDefaultOutputFile(true, inFile, "o"); + case CIRGenAction::OutputType::None: + return nullptr; + } + + llvm_unreachable("Invalid action!"); +} + +std::unique_ptr +CIRGenAction::CreateASTConsumer(CompilerInstance &ci, StringRef inputFile) { + auto out = ci.takeOutputStream(); + if (!out) + out = getOutputStream(ci, inputFile, action); + + auto Result = std::make_unique( + action, ci.getDiagnostics(), &ci.getVirtualFileSystem(), + ci.getHeaderSearchOpts(), ci.getCodeGenOpts(), ci.getTargetOpts(), + ci.getLangOpts(), ci.getFrontendOpts(), std::move(out)); + cgConsumer = Result.get(); + + // Enable generating macro debug info only when debug info is not disabled and + // also macrod ebug info is enabled + if (ci.getCodeGenOpts().getDebugInfo() != llvm::codegenoptions::NoDebugInfo && + ci.getCodeGenOpts().MacroDebugInfo) { + llvm_unreachable("NYI"); + } + + return std::move(Result); +} + +mlir::OwningOpRef +CIRGenAction::loadModule(llvm::MemoryBufferRef mbRef) { + auto module = + mlir::parseSourceString(mbRef.getBuffer(), mlirContext); + assert(module && "Failed to parse ClangIR module"); + return module; +} + +void CIRGenAction::ExecuteAction() { + if (getCurrentFileKind().getLanguage() != Language::CIR) { + this->ASTFrontendAction::ExecuteAction(); + return; + } + + // If this is a CIR file we have to treat it specially. + // TODO: This could be done more logically. This is just modeled at the moment + // mimicing CodeGenAction but this is clearly suboptimal. + auto &ci = getCompilerInstance(); + std::unique_ptr outstream = + getOutputStream(ci, getCurrentFile(), action); + if (action != OutputType::None && !outstream) + return; + + auto &sourceManager = ci.getSourceManager(); + auto fileID = sourceManager.getMainFileID(); + auto mainFile = sourceManager.getBufferOrNone(fileID); + + if (!mainFile) + return; + + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + + // TODO: unwrap this -- this exists because including the `OwningModuleRef` in + // CIRGenAction's header would require linking the Frontend against MLIR. + // Let's avoid that for now. + auto mlirModule = loadModule(*mainFile); + if (!mlirModule) + return; + + llvm::LLVMContext llvmCtx; + auto llvmModule = lowerFromCIRToLLVMIR( + ci.getFrontendOpts(), mlirModule.release(), + std::unique_ptr(mlirContext), llvmCtx); + + if (outstream) + llvmModule->print(*outstream, nullptr); +} + +void EmitAssemblyAction::anchor() {} +EmitAssemblyAction::EmitAssemblyAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitAssembly, _MLIRContext) {} + +void EmitCIRAction::anchor() {} +EmitCIRAction::EmitCIRAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitCIR, _MLIRContext) {} + +void EmitCIRFlatAction::anchor() {} +EmitCIRFlatAction::EmitCIRFlatAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitCIRFlat, _MLIRContext) {} + +void EmitCIROnlyAction::anchor() {} +EmitCIROnlyAction::EmitCIROnlyAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::None, _MLIRContext) {} + +void EmitMLIRAction::anchor() {} +EmitMLIRAction::EmitMLIRAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitMLIR, _MLIRContext) {} + +void EmitLLVMAction::anchor() {} +EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitLLVM, _MLIRContext) {} + +void EmitObjAction::anchor() {} +EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt new file mode 100644 index 000000000000..077bd733cbd8 --- /dev/null +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -0,0 +1,38 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIRFrontendAction + CIRGenAction.cpp + + DEPENDS + MLIRCIROpsIncGen + MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen + MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen + MLIRFunctionInterfacesIncGen + + LINK_LIBS + clangAST + clangBasic + clangCodeGen + clangLex + clangFrontend + clangCIR + clangCIRLoweringDirectToLLVM + clangCIRLoweringThroughMLIR + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + MLIRMemRefDialect + ) diff --git a/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp b/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp new file mode 100644 index 000000000000..a3f525dd65a3 --- /dev/null +++ b/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp @@ -0,0 +1,15 @@ +//====- ASTAttrInterfaces.cpp - Interface to AST Attributes ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" + +#include "llvm/ADT/SmallVector.h" + +using namespace mlir::cir; + +/// Include the generated type qualifiers interfaces. +#include "clang/CIR/Interfaces/ASTAttrInterfaces.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp b/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp new file mode 100644 index 000000000000..6062a39be7fa --- /dev/null +++ b/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp @@ -0,0 +1,14 @@ +//====- CIRFPTypeInterface.cpp - Interface for floating-point types -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Interfaces/CIRFPTypeInterface.h" + +using namespace mlir::cir; + +/// Include the generated interfaces. +#include "clang/CIR/Interfaces/CIRFPTypeInterface.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp new file mode 100644 index 000000000000..8b1708fa815c --- /dev/null +++ b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp @@ -0,0 +1,57 @@ +//===- CIRLoopOpInterface.cpp - Interface for CIR loop-like ops *- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h" + +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Interfaces/CIRLoopOpInterface.cpp.inc" +#include "llvm/Support/ErrorHandling.h" + +namespace mlir { +namespace cir { + +void LoopOpInterface::getLoopOpSuccessorRegions( + LoopOpInterface op, RegionBranchPoint point, + SmallVectorImpl ®ions) { + assert(point.isParent() || point.getRegionOrNull()); + + // Branching to first region: go to condition or body (do-while). + if (point.isParent()) { + regions.emplace_back(&op.getEntry(), op.getEntry().getArguments()); + } + // Branching from condition: go to body or exit. + else if (&op.getCond() == point.getRegionOrNull()) { + regions.emplace_back(RegionSuccessor(op->getResults())); + regions.emplace_back(&op.getBody(), op.getBody().getArguments()); + } + // Branching from body: go to step (for) or condition. + else if (&op.getBody() == point.getRegionOrNull()) { + // FIXME(cir): Should we consider break/continue statements here? + auto *afterBody = (op.maybeGetStep() ? op.maybeGetStep() : &op.getCond()); + regions.emplace_back(afterBody, afterBody->getArguments()); + } + // Branching from step: go to condition. + else if (op.maybeGetStep() == point.getRegionOrNull()) { + regions.emplace_back(&op.getCond(), op.getCond().getArguments()); + } else { + llvm_unreachable("unexpected branch origin"); + } +} + +/// Verify invariants of the LoopOpInterface. +LogicalResult detail::verifyLoopOpInterface(Operation *op) { + // FIXME: fix this so the conditionop isn't requiring MLIRCIR + // auto loopOp = cast(op); + // if (!isa(loopOp.getCond().back().getTerminator())) + // return op->emitOpError( + // "expected condition region to terminate with 'cir.condition'"); + return success(); +} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp new file mode 100644 index 000000000000..38211effb79c --- /dev/null +++ b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp @@ -0,0 +1,15 @@ +//====- CIROpInterfaces.cpp - Interface to AST Attributes ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "clang/CIR/Interfaces/CIROpInterfaces.h" + +#include "llvm/ADT/SmallVector.h" + +using namespace mlir::cir; + +/// Include the generated type qualifiers interfaces. +#include "clang/CIR/Interfaces/CIROpInterfaces.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CMakeLists.txt b/clang/lib/CIR/Interfaces/CMakeLists.txt new file mode 100644 index 000000000000..dee0a1408250 --- /dev/null +++ b/clang/lib/CIR/Interfaces/CMakeLists.txt @@ -0,0 +1,21 @@ +add_clang_library(MLIRCIRInterfaces + ASTAttrInterfaces.cpp + CIROpInterfaces.cpp + CIRLoopOpInterface.cpp + CIRFPTypeInterface.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Interfaces + + DEPENDS + MLIRCIRASTAttrInterfacesIncGen + MLIRCIREnumsGen + MLIRCIRFPTypeInterfaceIncGen + MLIRCIRLoopOpInterfaceIncGen + MLIRCIROpInterfacesIncGen + + LINK_LIBS + ${dialect_libs} + MLIRIR + MLIRSupport + ) diff --git a/clang/lib/CIR/Lowering/CMakeLists.txt b/clang/lib/CIR/Lowering/CMakeLists.txt new file mode 100644 index 000000000000..f720e597ecb0 --- /dev/null +++ b/clang/lib/CIR/Lowering/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(DirectToLLVM) +add_subdirectory(ThroughMLIR) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt new file mode 100644 index 000000000000..edabbaabec13 --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -0,0 +1,41 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIRLoweringDirectToLLVM + LowerToLLVMIR.cpp + LowerToLLVM.cpp + + DEPENDS + MLIRCIREnumsGen + MLIRCIROpsIncGen + MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen + MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen + MLIRFunctionInterfacesIncGen + + LINK_LIBS + clangAST + clangBasic + clangCodeGen + clangLex + clangFrontend + clangCIR + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRCIRTransforms + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + MLIRMemRefDialect + MLIROpenMPDialect + MLIROpenMPToLLVMIRTranslation + ) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp new file mode 100644 index 000000000000..b8e226a80b6c --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -0,0 +1,3308 @@ +//====- LowerToLLVM.cpp - Lowering from CIR to LLVMIR ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements lowering of CIR operations to LLVMIR. +// +//===----------------------------------------------------------------------===// +#include "LoweringHelpers.h" +#include "mlir/Conversion/AffineToStandard/AffineToStandard.h" +#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h" +#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" +#include "mlir/Dialect/DLTI/DLTI.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" +#include "mlir/Dialect/LLVMIR/Transforms/Passes.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinDialect.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/IR/Operation.h" +#include "mlir/IR/Types.h" +#include "mlir/IR/Value.h" +#include "mlir/IR/ValueRange.h" +#include "mlir/IR/Visitors.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Support/LLVM.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Export.h" +#include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Passes.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include +#include +#include + +using namespace cir; +using namespace llvm; + +namespace cir { +namespace direct { + +//===----------------------------------------------------------------------===// +// Helper Methods +//===----------------------------------------------------------------------===// + +namespace { + +/// Walks a region while skipping operations of type `Ops`. This ensures the +/// callback is not applied to said operations and its children. +template +void walkRegionSkipping(mlir::Region ®ion, + mlir::function_ref callback) { + region.walk([&](mlir::Operation *op) { + if (isa(op)) + return mlir::WalkResult::skip(); + callback(op); + return mlir::WalkResult::advance(); + }); +} + +/// Convert from a CIR comparison kind to an LLVM IR integral comparison kind. +mlir::LLVM::ICmpPredicate +convertCmpKindToICmpPredicate(mlir::cir::CmpOpKind kind, bool isSigned) { + using CIR = mlir::cir::CmpOpKind; + using LLVMICmp = mlir::LLVM::ICmpPredicate; + switch (kind) { + case CIR::eq: + return LLVMICmp::eq; + case CIR::ne: + return LLVMICmp::ne; + case CIR::lt: + return (isSigned ? LLVMICmp::slt : LLVMICmp::ult); + case CIR::le: + return (isSigned ? LLVMICmp::sle : LLVMICmp::ule); + case CIR::gt: + return (isSigned ? LLVMICmp::sgt : LLVMICmp::ugt); + case CIR::ge: + return (isSigned ? LLVMICmp::sge : LLVMICmp::uge); + } + llvm_unreachable("Unknown CmpOpKind"); +} + +/// Convert from a CIR comparison kind to an LLVM IR floating-point comparison +/// kind. +mlir::LLVM::FCmpPredicate +convertCmpKindToFCmpPredicate(mlir::cir::CmpOpKind kind) { + using CIR = mlir::cir::CmpOpKind; + using LLVMFCmp = mlir::LLVM::FCmpPredicate; + switch (kind) { + case CIR::eq: + return LLVMFCmp::oeq; + case CIR::ne: + return LLVMFCmp::une; + case CIR::lt: + return LLVMFCmp::olt; + case CIR::le: + return LLVMFCmp::ole; + case CIR::gt: + return LLVMFCmp::ogt; + case CIR::ge: + return LLVMFCmp::oge; + } + llvm_unreachable("Unknown CmpOpKind"); +} + +/// If the given type is a vector type, return the vector's element type. +/// Otherwise return the given type unchanged. +mlir::Type elementTypeIfVector(mlir::Type type) { + if (auto VecType = type.dyn_cast()) { + return VecType.getEltType(); + } + return type; +} + +} // namespace + +//===----------------------------------------------------------------------===// +// Visitors for Lowering CIR Const Attributes +//===----------------------------------------------------------------------===// + +/// Switches on the type of attribute and calls the appropriate conversion. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter); + +/// IntAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::IntAttr intAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); + return rewriter.create( + loc, converter->convertType(intAttr.getType()), intAttr.getValue()); +} + +/// BoolAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::BoolAttr boolAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); + return rewriter.create( + loc, converter->convertType(boolAttr.getType()), boolAttr.getValue()); +} + +/// ConstPtrAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstPtrAttr ptrAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); + if (ptrAttr.isNullValue()) { + return rewriter.create( + loc, converter->convertType(ptrAttr.getType())); + } + mlir::Value ptrVal = rewriter.create( + loc, rewriter.getI64Type(), ptrAttr.getValue()); + return rewriter.create( + loc, converter->convertType(ptrAttr.getType()), ptrVal); +} + +/// FPAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::FPAttr fltAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); + return rewriter.create( + loc, converter->convertType(fltAttr.getType()), fltAttr.getValue()); +} + +/// ZeroAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ZeroAttr zeroAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); + return rewriter.create( + loc, converter->convertType(zeroAttr.getType())); +} + +/// ConstStruct visitor. +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::ConstStructAttr constStruct, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(constStruct.getType()); + auto loc = parentOp->getLoc(); + mlir::Value result = rewriter.create(loc, llvmTy); + + // Iteratively lower each constant element of the struct. + for (auto [idx, elt] : llvm::enumerate(constStruct.getMembers())) { + mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + result = rewriter.create(loc, result, init, idx); + } + + return result; +} + +// VTableAttr visitor. +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::VTableAttr vtableArr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(vtableArr.getType()); + auto loc = parentOp->getLoc(); + mlir::Value result = rewriter.create(loc, llvmTy); + + for (auto [idx, elt] : llvm::enumerate(vtableArr.getVtableData())) { + mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + result = rewriter.create(loc, result, init, idx); + } + + return result; +} + +// TypeInfoAttr visitor. +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::TypeInfoAttr typeinfoArr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(typeinfoArr.getType()); + auto loc = parentOp->getLoc(); + mlir::Value result = rewriter.create(loc, llvmTy); + + for (auto [idx, elt] : llvm::enumerate(typeinfoArr.getData())) { + mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + result = rewriter.create(loc, result, init, idx); + } + + return result; +} + +// ConstArrayAttr visitor +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::ConstArrayAttr constArr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(constArr.getType()); + auto loc = parentOp->getLoc(); + mlir::Value result; + + if (auto zeros = constArr.getTrailingZerosNum()) { + auto arrayTy = constArr.getType(); + result = rewriter.create( + loc, converter->convertType(arrayTy)); + } else { + result = rewriter.create(loc, llvmTy); + } + + // Iteratively lower each constant element of the array. + if (auto arrayAttr = constArr.getElts().dyn_cast()) { + for (auto [idx, elt] : llvm::enumerate(arrayAttr)) { + mlir::Value init = + lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + result = + rewriter.create(loc, result, init, idx); + } + } + // TODO(cir): this diverges from traditional lowering. Normally the string + // would be a global constant that is memcopied. + else if (auto strAttr = constArr.getElts().dyn_cast()) { + auto arrayTy = strAttr.getType().dyn_cast(); + assert(arrayTy && "String attribute must have an array type"); + auto eltTy = arrayTy.getEltType(); + for (auto [idx, elt] : llvm::enumerate(strAttr)) { + auto init = rewriter.create( + loc, converter->convertType(eltTy), elt); + result = + rewriter.create(loc, result, init, idx); + } + } else { + llvm_unreachable("unexpected ConstArrayAttr elements"); + } + + return result; +} + +// GlobalViewAttr visitor. +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::GlobalViewAttr globalAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto module = parentOp->getParentOfType(); + mlir::Type sourceType; + llvm::StringRef symName; + auto *sourceSymbol = + mlir::SymbolTable::lookupSymbolIn(module, globalAttr.getSymbol()); + if (auto llvmSymbol = dyn_cast(sourceSymbol)) { + sourceType = llvmSymbol.getType(); + symName = llvmSymbol.getSymName(); + } else if (auto cirSymbol = dyn_cast(sourceSymbol)) { + sourceType = converter->convertType(cirSymbol.getSymType()); + symName = cirSymbol.getSymName(); + } else if (auto llvmFun = dyn_cast(sourceSymbol)) { + sourceType = llvmFun.getFunctionType(); + symName = llvmFun.getSymName(); + } else if (auto fun = dyn_cast(sourceSymbol)) { + sourceType = converter->convertType(fun.getFunctionType()); + symName = fun.getSymName(); + } else { + llvm_unreachable("Unexpected GlobalOp type"); + } + + auto loc = parentOp->getLoc(); + mlir::Value addrOp = rewriter.create( + loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), symName); + + if (globalAttr.getIndices()) { + llvm::SmallVector indices; + for (auto idx : globalAttr.getIndices()) { + auto intAttr = dyn_cast(idx); + assert(intAttr && "index must be integers"); + indices.push_back(intAttr.getValue().getSExtValue()); + } + auto resTy = addrOp.getType(); + auto eltTy = converter->convertType(sourceType); + addrOp = rewriter.create(loc, resTy, eltTy, addrOp, + indices, true); + } + + auto ptrTy = globalAttr.getType().dyn_cast(); + assert(ptrTy && "Expecting pointer type in GlobalViewAttr"); + auto llvmEltTy = converter->convertType(ptrTy.getPointee()); + + if (llvmEltTy == sourceType) + return addrOp; + + auto llvmDstTy = converter->convertType(globalAttr.getType()); + return rewriter.create(parentOp->getLoc(), llvmDstTy, + addrOp); +} + +/// Switches on the type of attribute and calls the appropriate conversion. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + if (const auto intAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); + if (const auto fltAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, fltAttr, rewriter, converter); + if (const auto ptrAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, ptrAttr, rewriter, converter); + if (const auto constStruct = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter); + if (const auto constArr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter); + if (const auto boolAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, boolAttr, rewriter, converter); + if (const auto zeroAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); + if (const auto globalAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter); + if (const auto vtableAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, vtableAttr, rewriter, converter); + if (const auto typeinfoAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, typeinfoAttr, rewriter, converter); + + llvm_unreachable("unhandled attribute type"); +} + +//===----------------------------------------------------------------------===// + +mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { + using CIR = mlir::cir::GlobalLinkageKind; + using LLVM = mlir::LLVM::Linkage; + + switch (linkage) { + case CIR::AvailableExternallyLinkage: + return LLVM::AvailableExternally; + case CIR::CommonLinkage: + return LLVM::Common; + case CIR::ExternalLinkage: + return LLVM::External; + case CIR::ExternalWeakLinkage: + return LLVM::ExternWeak; + case CIR::InternalLinkage: + return LLVM::Internal; + case CIR::LinkOnceAnyLinkage: + return LLVM::Linkonce; + case CIR::LinkOnceODRLinkage: + return LLVM::LinkonceODR; + case CIR::PrivateLinkage: + return LLVM::Private; + case CIR::WeakAnyLinkage: + return LLVM::Weak; + case CIR::WeakODRLinkage: + return LLVM::WeakODR; + }; +} + +class CIRCopyOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CopyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const mlir::Value length = rewriter.create( + op.getLoc(), rewriter.getI32Type(), op.getLength()); + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), length, /*isVolatile=*/false); + return mlir::success(); + } +}; + +class CIRMemCpyOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::MemCpyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), + /*isVolatile=*/false); + return mlir::success(); + } +}; + +class CIRPtrStrideOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::PtrStrideOp ptrStrideOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto *tc = getTypeConverter(); + const auto resultTy = tc->convertType(ptrStrideOp.getType()); + const auto elementTy = tc->convertType(ptrStrideOp.getElementTy()); + rewriter.replaceOpWithNewOp(ptrStrideOp, resultTy, + elementTy, adaptor.getBase(), + adaptor.getStride()); + return mlir::success(); + } +}; + +class CIRBrCondOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BrCondOp brOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Value i1Condition; + + if (auto defOp = adaptor.getCond().getDefiningOp()) { + if (auto zext = dyn_cast(defOp)) { + if (zext->use_empty() && + zext->getOperand(0).getType() == rewriter.getI1Type()) { + i1Condition = zext->getOperand(0); + rewriter.eraseOp(zext); + } + } + } + + if (!i1Condition) + i1Condition = rewriter.create( + brOp.getLoc(), rewriter.getI1Type(), adaptor.getCond()); + + rewriter.replaceOpWithNewOp( + brOp, i1Condition, brOp.getDestTrue(), adaptor.getDestOperandsTrue(), + brOp.getDestFalse(), adaptor.getDestOperandsFalse()); + + return mlir::success(); + } +}; + +class CIRCastOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + inline mlir::Type convertTy(mlir::Type ty) const { + return getTypeConverter()->convertType(ty); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CastOp castOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // For arithmetic conversions, LLVM IR uses the same instruction to convert + // both individual scalars and entire vectors. This lowering pass handles + // both situations. + + auto src = adaptor.getSrc(); + + switch (castOp.getKind()) { + case mlir::cir::CastKind::array_to_ptrdecay: { + const auto ptrTy = castOp.getType().cast(); + auto sourceValue = adaptor.getOperands().front(); + auto targetType = convertTy(ptrTy); + auto elementTy = convertTy(ptrTy.getPointee()); + auto offset = llvm::SmallVector{0}; + rewriter.replaceOpWithNewOp( + castOp, targetType, elementTy, sourceValue, offset); + break; + } + case mlir::cir::CastKind::int_to_bool: { + auto zero = rewriter.create( + src.getLoc(), castOp.getSrc().getType(), + mlir::cir::IntAttr::get(castOp.getSrc().getType(), 0)); + rewriter.replaceOpWithNewOp( + castOp, mlir::cir::BoolType::get(getContext()), + mlir::cir::CmpOpKind::ne, castOp.getSrc(), zero); + break; + } + case mlir::cir::CastKind::integral: { + auto srcType = castOp.getSrc().getType(); + auto dstType = castOp.getResult().getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstType = getTypeConverter()->convertType(dstType); + mlir::cir::IntType srcIntType = + elementTypeIfVector(srcType).cast(); + mlir::cir::IntType dstIntType = + elementTypeIfVector(dstType).cast(); + + if (dstIntType.getWidth() < srcIntType.getWidth()) { + // Bigger to smaller. Truncate. + rewriter.replaceOpWithNewOp(castOp, llvmDstType, + llvmSrcVal); + } else if (dstIntType.getWidth() > srcIntType.getWidth()) { + // Smaller to bigger. Zero extend or sign extend based on signedness. + if (srcIntType.isUnsigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstType, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstType, + llvmSrcVal); + } else { + // Same size. Signedness changes doesn't matter to LLVM. Do nothing. + rewriter.replaceOp(castOp, llvmSrcVal); + } + break; + } + case mlir::cir::CastKind::floating: { + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = + getTypeConverter()->convertType(castOp.getResult().getType()); + + auto srcTy = elementTypeIfVector(castOp.getSrc().getType()); + auto dstTy = elementTypeIfVector(castOp.getResult().getType()); + + if (!dstTy.isa() || + !srcTy.isa()) + return castOp.emitError() + << "NYI cast from " << srcTy << " to " << dstTy; + + auto getFloatWidth = [](mlir::Type ty) -> unsigned { + return ty.cast().getWidth(); + }; + + if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::int_to_ptr: { + auto dstTy = castOp.getType().cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::ptr_to_int: { + auto dstTy = castOp.getType().cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::float_to_bool: { + auto dstTy = castOp.getType().cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + auto kind = mlir::LLVM::FCmpPredicate::une; + + // Check if float is not equal to zero. + auto zeroFloat = rewriter.create( + castOp.getLoc(), llvmSrcVal.getType(), + mlir::FloatAttr::get(llvmSrcVal.getType(), 0.0)); + + // Extend comparison result to either bool (C++) or int (C). + mlir::Value cmpResult = rewriter.create( + castOp.getLoc(), kind, llvmSrcVal, zeroFloat); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + cmpResult); + return mlir::success(); + } + case mlir::cir::CastKind::bool_to_int: { + auto dstTy = castOp.getType().cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmSrcTy = llvmSrcVal.getType().cast(); + auto llvmDstTy = + getTypeConverter()->convertType(dstTy).cast(); + if (llvmSrcTy.getWidth() == llvmDstTy.getWidth()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::bool_to_float: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::int_to_float: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + if (elementTypeIfVector(castOp.getSrc().getType()) + .cast() + .isSigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::float_to_int: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + if (elementTypeIfVector(castOp.getResult().getType()) + .cast() + .isSigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::bitcast: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::ptr_to_bool: { + auto null = rewriter.create( + src.getLoc(), castOp.getSrc().getType(), + mlir::cir::ConstPtrAttr::get(getContext(), castOp.getSrc().getType(), + 0)); + rewriter.replaceOpWithNewOp( + castOp, mlir::cir::BoolType::get(getContext()), + mlir::cir::CmpOpKind::ne, castOp.getSrc(), null); + break; + } + } + + return mlir::success(); + } +}; + +class CIRReturnLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ReturnOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, + adaptor.getOperands()); + return mlir::LogicalResult::success(); + } +}; + +struct ConvertCIRToLLVMPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + registry.insert(); + } + void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-flat-to-llvm"; } +}; + +class CIRCallLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + llvm::SmallVector llvmResults; + auto cirResults = op.getResultTypes(); + auto *converter = getTypeConverter(); + + if (converter->convertTypes(cirResults, llvmResults).failed()) + return mlir::failure(); + + if (auto callee = op.getCalleeAttr()) { // direct call + rewriter.replaceOpWithNewOp( + op, llvmResults, op.getCalleeAttr(), adaptor.getOperands()); + } else { // indirect call + assert(op.getOperands().size() && + "operands list must no be empty for the indirect call"); + auto typ = op.getOperands().front().getType(); + assert(isa(typ) && "expected pointer type"); + auto ptyp = dyn_cast(typ); + auto ftyp = dyn_cast(ptyp.getPointee()); + assert(ftyp && "expected a pointer to a function as the first operand"); + + rewriter.replaceOpWithNewOp( + op, + dyn_cast(converter->convertType(ftyp)), + adaptor.getOperands()); + } + return mlir::success(); + } +}; + +class CIRAllocaLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Value size = + op.isDynamic() + ? adaptor.getDynAllocSize() + : rewriter.create( + op.getLoc(), + typeConverter->convertType(rewriter.getIndexType()), + rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); + auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); + auto resultTy = mlir::LLVM::LLVMPointerType::get(getContext()); + rewriter.replaceOpWithNewOp( + op, resultTy, elementTy, size, op.getAlignmentAttr().getInt()); + return mlir::success(); + } +}; + +static mlir::LLVM::AtomicOrdering +getLLVMMemOrder(std::optional &memorder) { + if (!memorder) + return mlir::LLVM::AtomicOrdering::not_atomic; + switch (*memorder) { + case mlir::cir::MemOrder::Relaxed: + return mlir::LLVM::AtomicOrdering::monotonic; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case mlir::cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case mlir::cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case mlir::cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; + } + llvm_unreachable("unknown memory order"); +} + +class CIRLoadLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const auto llvmTy = + getTypeConverter()->convertType(op.getResult().getType()); + unsigned alignment = 0; + auto memorder = op.getMemOrder(); + auto ordering = getLLVMMemOrder(memorder); + + // FIXME: right now we only pass in the alignment when the memory access + // is atomic, we should always pass it instead. + if (ordering != mlir::LLVM::AtomicOrdering::not_atomic) { + mlir::DataLayout layout(op->getParentOfType()); + alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); + } + + // TODO: nontemporal, invariant, syncscope. + rewriter.replaceOpWithNewOp( + op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, + op.getIsVolatile(), /* nontemporal */ false, + /* invariant */ false, ordering); + return mlir::LogicalResult::success(); + } +}; + +class CIRStoreLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + unsigned alignment = 0; + auto memorder = op.getMemOrder(); + auto ordering = getLLVMMemOrder(memorder); + + // FIXME: right now we only pass in the alignment when the memory access + // is atomic, we should always pass it instead. + if (ordering != mlir::LLVM::AtomicOrdering::not_atomic) { + const auto llvmTy = + getTypeConverter()->convertType(op.getValue().getType()); + mlir::DataLayout layout(op->getParentOfType()); + alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); + } + + // TODO: nontemporal, syncscope. + rewriter.replaceOpWithNewOp( + op, adaptor.getValue(), adaptor.getAddr(), alignment, + op.getIsVolatile(), /* nontemporal */ false, ordering); + return mlir::LogicalResult::success(); + } +}; + +mlir::DenseElementsAttr +convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, + mlir::Type type) { + auto values = llvm::SmallVector{}; + auto stringAttr = attr.getElts().dyn_cast(); + assert(stringAttr && "expected string attribute here"); + for (auto element : stringAttr) + values.push_back({8, (uint64_t)element}); + return mlir::DenseElementsAttr::get( + mlir::RankedTensorType::get({(int64_t)values.size()}, type), + llvm::ArrayRef(values)); +} + +template StorageTy getZeroInitFromType(mlir::Type Ty); + +template <> mlir::APInt getZeroInitFromType(mlir::Type Ty) { + assert(Ty.isa() && "expected int type"); + auto IntTy = Ty.cast(); + return mlir::APInt::getZero(IntTy.getWidth()); +} + +template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { + assert((Ty.isa()) && + "only float and double supported"); + if (Ty.isF32() || Ty.isa()) + return mlir::APFloat(0.f); + if (Ty.isF64() || Ty.isa()) + return mlir::APFloat(0.0); + llvm_unreachable("NYI"); +} + +// return the nested type and quantity of elements for cir.array type. +// e.g: for !cir.array x 1> +// it returns !s32i as return value and stores 3 to elemQuantity. +mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { + assert(Ty.isa() && "expected ArrayType"); + + elemQuantity = 1; + mlir::Type nestTy = Ty; + while (auto ArrTy = nestTy.dyn_cast()) { + nestTy = ArrTy.getEltType(); + elemQuantity *= ArrTy.getSize(); + } + + return nestTy; +} + +template +void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, + llvm::SmallVectorImpl &values) { + auto arrayAttr = attr.getElts().cast(); + for (auto eltAttr : arrayAttr) { + if (auto valueAttr = eltAttr.dyn_cast()) { + values.push_back(valueAttr.getValue()); + } else if (auto subArrayAttr = + eltAttr.dyn_cast()) { + convertToDenseElementsAttrImpl(subArrayAttr, values); + } else if (auto zeroAttr = eltAttr.dyn_cast()) { + unsigned numStoredZeros = 0; + auto nestTy = + getNestedTypeAndElemQuantity(zeroAttr.getType(), numStoredZeros); + values.insert(values.end(), numStoredZeros, + getZeroInitFromType(nestTy)); + } else { + llvm_unreachable("unknown element in ConstArrayAttr"); + } + } + + // Only fill in trailing zeros at the local cir.array level where the element + // type isn't another array (for the mult-dim case). + auto numTrailingZeros = attr.getTrailingZerosNum(); + if (numTrailingZeros) { + auto localArrayTy = attr.getType().dyn_cast(); + assert(localArrayTy && "expected !cir.array"); + + auto nestTy = localArrayTy.getEltType(); + if (!nestTy.isa()) + values.insert(values.end(), localArrayTy.getSize() - numTrailingZeros, + getZeroInitFromType(nestTy)); + } +} + +template +mlir::DenseElementsAttr +convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, + const llvm::SmallVectorImpl &dims, + mlir::Type type) { + auto values = llvm::SmallVector{}; + convertToDenseElementsAttrImpl(attr, values); + return mlir::DenseElementsAttr::get(mlir::RankedTensorType::get(dims, type), + llvm::ArrayRef(values)); +} + +std::optional +lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, + const mlir::TypeConverter *converter) { + + // Ensure ConstArrayAttr has a type. + auto typedConstArr = constArr.dyn_cast(); + assert(typedConstArr && "cir::ConstArrayAttr is not a mlir::TypedAttr"); + + // Ensure ConstArrayAttr type is a ArrayType. + auto cirArrayType = typedConstArr.getType().dyn_cast(); + assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); + + // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. + mlir::Type type = cirArrayType; + auto dims = llvm::SmallVector{}; + while (auto arrayType = type.dyn_cast()) { + dims.push_back(arrayType.getSize()); + type = arrayType.getEltType(); + } + + // Convert array attr to LLVM compatible dense elements attr. + if (constArr.getElts().isa()) + return convertStringAttrToDenseElementsAttr(constArr, + converter->convertType(type)); + if (type.isa()) + return convertToDenseElementsAttr( + constArr, dims, converter->convertType(type)); + if (type.isa()) + return convertToDenseElementsAttr( + constArr, dims, converter->convertType(type)); + + return std::nullopt; +} + +bool hasTrailingZeros(mlir::cir::ConstArrayAttr attr) { + auto array = attr.getElts().dyn_cast(); + return attr.hasTrailingZeros() || + (array && std::count_if(array.begin(), array.end(), [](auto elt) { + auto ar = dyn_cast(elt); + return ar && hasTrailingZeros(ar); + })); +} + +class CIRConstantLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Attribute attr = op.getValue(); + + if (op.getType().isa()) { + int value = + (op.getValue() == + mlir::cir::BoolAttr::get( + getContext(), ::mlir::cir::BoolType::get(getContext()), true)); + attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()), + value); + } else if (op.getType().isa()) { + attr = rewriter.getIntegerAttr( + typeConverter->convertType(op.getType()), + op.getValue().cast().getValue()); + } else if (op.getType().isa()) { + attr = rewriter.getFloatAttr( + typeConverter->convertType(op.getType()), + op.getValue().cast().getValue()); + } else if (op.getType().isa()) { + // Optimize with dedicated LLVM op for null pointers. + if (op.getValue().isa()) { + if (op.getValue().cast().isNullValue()) { + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType())); + return mlir::success(); + } + } + // Lower GlobalViewAttr to llvm.mlir.addressof + if (auto gv = op.getValue().dyn_cast()) { + auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } + attr = op.getValue(); + } + // TODO(cir): constant arrays are currently just pushed into the stack using + // the store instruction, instead of being stored as global variables and + // then memcopyied into the stack (as done in Clang). + else if (auto arrTy = op.getType().dyn_cast()) { + // Fetch operation constant array initializer. + + auto constArr = op.getValue().dyn_cast(); + if (!constArr && !isa(op.getValue())) + return op.emitError() << "array does not have a constant initializer"; + + std::optional denseAttr; + if (constArr && hasTrailingZeros(constArr)) { + auto newOp = + lowerCirAttrAsValue(op, constArr, rewriter, getTypeConverter()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } else if (constArr && + (denseAttr = lowerConstArrayAttr(constArr, typeConverter))) { + attr = denseAttr.value(); + } else { + auto initVal = + lowerCirAttrAsValue(op, op.getValue(), rewriter, typeConverter); + rewriter.replaceAllUsesWith(op, initVal); + rewriter.eraseOp(op); + return mlir::success(); + } + } else if (const auto structAttr = + op.getValue().dyn_cast()) { + // TODO(cir): this diverges from traditional lowering. Normally the + // initializer would be a global constant that is memcopied. Here we just + // define a local constant with llvm.undef that will be stored into the + // stack. + auto initVal = + lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter); + rewriter.replaceAllUsesWith(op, initVal); + rewriter.eraseOp(op); + return mlir::success(); + } else if (auto strTy = op.getType().dyn_cast()) { + if (auto zero = op.getValue().dyn_cast()) { + auto initVal = lowerCirAttrAsValue(op, zero, rewriter, typeConverter); + rewriter.replaceAllUsesWith(op, initVal); + rewriter.eraseOp(op); + return mlir::success(); + } + + return op.emitError() << "unsupported lowering for struct constant type " + << op.getType(); + } else + return op.emitError() << "unsupported constant type " << op.getType(); + + rewriter.replaceOpWithNewOp( + op, getTypeConverter()->convertType(op.getType()), attr); + + return mlir::success(); + } +}; + +class CIRVectorCreateLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecCreateOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Start with an 'undef' value for the vector. Then 'insertelement' for + // each of the vector elements. + auto vecTy = op.getType().dyn_cast(); + assert(vecTy && "result type of cir.vec.create op is not VectorType"); + auto llvmTy = typeConverter->convertType(vecTy); + auto loc = op.getLoc(); + mlir::Value result = rewriter.create(loc, llvmTy); + assert(vecTy.getSize() == op.getElements().size() && + "cir.vec.create op count doesn't match vector type elements count"); + for (uint64_t i = 0; i < vecTy.getSize(); ++i) { + mlir::Value indexValue = rewriter.create( + loc, rewriter.getI64Type(), i); + result = rewriter.create( + loc, result, adaptor.getElements()[i], indexValue); + } + rewriter.replaceOp(op, result); + return mlir::success(); + } +}; + +class CIRVectorInsertLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecInsertOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getVec(), adaptor.getValue(), adaptor.getIndex()); + return mlir::success(); + } +}; + +class CIRVectorExtractLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecExtractOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getVec(), adaptor.getIndex()); + return mlir::success(); + } +}; + +class CIRVectorCmpOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecCmpOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert(op.getType().isa() && + op.getLhs().getType().isa() && + op.getRhs().getType().isa() && + "Vector compare with non-vector type"); + // LLVM IR vector comparison returns a vector of i1. This one-bit vector + // must be sign-extended to the correct result type. + auto elementType = elementTypeIfVector(op.getLhs().getType()); + mlir::Value bitResult; + if (auto intType = elementType.dyn_cast()) { + bitResult = rewriter.create( + op.getLoc(), + convertCmpKindToICmpPredicate(op.getKind(), intType.isSigned()), + adaptor.getLhs(), adaptor.getRhs()); + } else if (elementType.isa()) { + bitResult = rewriter.create( + op.getLoc(), convertCmpKindToFCmpPredicate(op.getKind()), + adaptor.getLhs(), adaptor.getRhs()); + } else { + return op.emitError() << "unsupported type for VecCmpOp: " << elementType; + } + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType()), bitResult); + return mlir::success(); + } +}; + +class CIRVectorSplatLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecSplatOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Vector splat can be implemented with an `insertelement` and a + // `shufflevector`, which is better than an `insertelement` for each + // element in the vector. Start with an undef vector. Insert the value into + // the first element. Then use a `shufflevector` with a mask of all 0 to + // fill out the entire vector with that value. + auto vecTy = op.getType().dyn_cast(); + assert(vecTy && "result type of cir.vec.splat op is not VectorType"); + auto llvmTy = typeConverter->convertType(vecTy); + auto loc = op.getLoc(); + mlir::Value undef = rewriter.create(loc, llvmTy); + mlir::Value indexValue = + rewriter.create(loc, rewriter.getI64Type(), 0); + mlir::Value elementValue = adaptor.getValue(); + mlir::Value oneElement = rewriter.create( + loc, undef, elementValue, indexValue); + SmallVector zeroValues(vecTy.getSize(), 0); + mlir::Value shuffled = rewriter.create( + loc, oneElement, undef, zeroValues); + rewriter.replaceOp(op, shuffled); + return mlir::success(); + } +}; + +class CIRVectorTernaryLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecTernaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert(op.getType().isa() && + op.getCond().getType().isa() && + op.getVec1().getType().isa() && + op.getVec2().getType().isa() && + "Vector ternary op with non-vector type"); + // Convert `cond` into a vector of i1, then use that in a `select` op. + mlir::Value bitVec = rewriter.create( + op.getLoc(), mlir::LLVM::ICmpPredicate::ne, adaptor.getCond(), + rewriter.create( + op.getCond().getLoc(), + typeConverter->convertType(op.getCond().getType()))); + rewriter.replaceOpWithNewOp( + op, bitVec, adaptor.getVec1(), adaptor.getVec2()); + return mlir::success(); + } +}; + +class CIRVectorShuffleIntsLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecShuffleOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // LLVM::ShuffleVectorOp takes an ArrayRef of int for the list of indices. + // Convert the ClangIR ArrayAttr of IntAttr constants into a + // SmallVector. + SmallVector indices; + std::transform( + op.getIndices().begin(), op.getIndices().end(), + std::back_inserter(indices), [](mlir::Attribute intAttr) { + return intAttr.cast().getValue().getSExtValue(); + }); + rewriter.replaceOpWithNewOp( + op, adaptor.getVec1(), adaptor.getVec2(), indices); + return mlir::success(); + } +}; + +class CIRVectorShuffleVecLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern< + mlir::cir::VecShuffleDynamicOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecShuffleDynamicOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // LLVM IR does not have an operation that corresponds to this form of + // the built-in. + // __builtin_shufflevector(V, I) + // is implemented as this pseudocode, where the for loop is unrolled + // and N is the number of elements: + // masked = I & (N-1) + // for (i in 0 <= i < N) + // result[i] = V[masked[i]] + auto loc = op.getLoc(); + mlir::Value input = adaptor.getVec(); + mlir::Type llvmIndexVecType = + getTypeConverter()->convertType(op.getIndices().getType()); + mlir::Type llvmIndexType = getTypeConverter()->convertType( + elementTypeIfVector(op.getIndices().getType())); + uint64_t numElements = + op.getVec().getType().cast().getSize(); + mlir::Value maskValue = rewriter.create( + loc, llvmIndexType, + mlir::IntegerAttr::get(llvmIndexType, numElements - 1)); + mlir::Value maskVector = + rewriter.create(loc, llvmIndexVecType); + for (uint64_t i = 0; i < numElements; ++i) { + mlir::Value iValue = rewriter.create( + loc, rewriter.getI64Type(), i); + maskVector = rewriter.create( + loc, maskVector, maskValue, iValue); + } + mlir::Value maskedIndices = rewriter.create( + loc, llvmIndexVecType, adaptor.getIndices(), maskVector); + mlir::Value result = rewriter.create( + loc, getTypeConverter()->convertType(op.getVec().getType())); + for (uint64_t i = 0; i < numElements; ++i) { + mlir::Value iValue = rewriter.create( + loc, rewriter.getI64Type(), i); + mlir::Value indexValue = rewriter.create( + loc, maskedIndices, iValue); + mlir::Value valueAtIndex = + rewriter.create(loc, input, indexValue); + result = rewriter.create( + loc, result, valueAtIndex, iValue); + } + rewriter.replaceOp(op, result); + return mlir::success(); + } +}; + +class CIRVAStartLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VAStartOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); + auto vaList = rewriter.create( + op.getLoc(), opaquePtr, adaptor.getOperands().front()); + rewriter.replaceOpWithNewOp(op, vaList); + return mlir::success(); + } +}; + +class CIRVAEndLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VAEndOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); + auto vaList = rewriter.create( + op.getLoc(), opaquePtr, adaptor.getOperands().front()); + rewriter.replaceOpWithNewOp(op, vaList); + return mlir::success(); + } +}; + +class CIRVACopyLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VACopyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); + auto dstList = rewriter.create( + op.getLoc(), opaquePtr, adaptor.getOperands().front()); + auto srcList = rewriter.create( + op.getLoc(), opaquePtr, adaptor.getOperands().back()); + rewriter.replaceOpWithNewOp(op, dstList, srcList); + return mlir::success(); + } +}; + +class CIRVAArgLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VAArgOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + return op.emitError("cir.vaarg lowering is NYI"); + } +}; + +class CIRFuncLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + /// Returns the name used for the linkage attribute. This *must* correspond + /// to the name of the attribute in ODS. + static StringRef getLinkageAttrNameString() { return "linkage"; } + + /// Only retain those attributes that are not constructed by + /// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out + /// argument attributes. + void + filterFuncAttributes(mlir::cir::FuncOp func, bool filterArgAndResAttrs, + SmallVectorImpl &result) const { + for (auto attr : func->getAttrs()) { + if (attr.getName() == mlir::SymbolTable::getSymbolAttrName() || + attr.getName() == func.getFunctionTypeAttrName() || + attr.getName() == getLinkageAttrNameString() || + (filterArgAndResAttrs && + (attr.getName() == func.getArgAttrsAttrName() || + attr.getName() == func.getResAttrsAttrName()))) + continue; + + // `CIRDialectLLVMIRTranslationInterface` requires "cir." prefix for + // dialect specific attributes, rename them. + if (attr.getName() == func.getExtraAttrsAttrName()) { + std::string cirName = "cir." + func.getExtraAttrsAttrName().str(); + attr.setName(mlir::StringAttr::get(getContext(), cirName)); + } + result.push_back(attr); + } + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + auto fnType = op.getFunctionType(); + mlir::TypeConverter::SignatureConversion signatureConversion( + fnType.getNumInputs()); + + for (const auto &argType : enumerate(fnType.getInputs())) { + auto convertedType = typeConverter->convertType(argType.value()); + if (!convertedType) + return mlir::failure(); + signatureConversion.addInputs(argType.index(), convertedType); + } + + mlir::Type resultType = + getTypeConverter()->convertType(fnType.getReturnType()); + + // Create the LLVM function operation. + auto llvmFnTy = mlir::LLVM::LLVMFunctionType::get( + resultType ? resultType : mlir::LLVM::LLVMVoidType::get(getContext()), + signatureConversion.getConvertedTypes(), + /*isVarArg=*/fnType.isVarArg()); + // LLVMFuncOp expects a single FileLine Location instead of a fused + // location. + auto Loc = op.getLoc(); + if (Loc.isa()) { + auto FusedLoc = Loc.cast(); + Loc = FusedLoc.getLocations()[0]; + } + assert((Loc.isa() || Loc.isa()) && + "expected single location or unknown location here"); + + auto linkage = convertLinkage(op.getLinkage()); + SmallVector attributes; + filterFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes); + + auto fn = rewriter.create( + Loc, op.getName(), llvmFnTy, linkage, false, mlir::LLVM::CConv::C, + mlir::SymbolRefAttr(), attributes); + + rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); + if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, + &signatureConversion))) + return mlir::failure(); + + rewriter.eraseOp(op); + + return mlir::LogicalResult::success(); + } +}; + +class CIRGetGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GetGlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. + // CIRGen should mitigate this and not emit the get_global. + if (op->getUses().empty()) { + rewriter.eraseOp(op); + return mlir::success(); + } + + auto type = getTypeConverter()->convertType(op.getType()); + auto symbol = op.getName(); + mlir::Operation *newop = + rewriter.create(op.getLoc(), type, symbol); + + if (op.getTls()) { + // Handle access to TLS via intrinsic. + newop = rewriter.create( + op.getLoc(), type, newop->getResult(0)); + } + + rewriter.replaceOp(op, newop); + return mlir::success(); + } +}; + +class CIRSwitchFlatOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SwitchFlatOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + llvm::SmallVector caseValues; + if (op.getCaseValues()) { + for (auto val : op.getCaseValues()) { + auto intAttr = dyn_cast(val); + caseValues.push_back(intAttr.getValue()); + } + } + + llvm::SmallVector caseDestinations; + llvm::SmallVector caseOperands; + + for (auto x : op.getCaseDestinations()) { + caseDestinations.push_back(x); + } + + for (auto x : op.getCaseOperands()) { + caseOperands.push_back(x); + } + + // Set switch op to branch to the newly created blocks. + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp( + op, adaptor.getCondition(), op.getDefaultDestination(), + op.getDefaultOperands(), caseValues, caseDestinations, caseOperands); + return mlir::success(); + } +}; + +class CIRGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + /// Replace CIR global with a region initialized LLVM global and update + /// insertion point to the end of the initializer block. + inline void setupRegionInitializedLLVMGlobalOp( + mlir::cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { + const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + SmallVector attributes; + auto newGlobalOp = rewriter.replaceOpWithNewOp( + op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), + op.getSymName(), nullptr, /*alignment*/ 0, /*addrSpace*/ 0, + /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*comdat*/ mlir::SymbolRefAttr(), attributes); + newGlobalOp.getRegion().push_back(new mlir::Block()); + rewriter.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + // Fetch required values to create LLVM op. + const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + const auto isConst = op.getConstant(); + const auto linkage = convertLinkage(op.getLinkage()); + const auto symbol = op.getSymName(); + const auto loc = op.getLoc(); + std::optional section = op.getSection(); + std::optional init = op.getInitialValue(); + + SmallVector attributes; + if (section.has_value()) + attributes.push_back(rewriter.getNamedAttr( + "section", rewriter.getStringAttr(section.value()))); + + // Check for missing funcionalities. + if (!init.has_value()) { + rewriter.replaceOpWithNewOp( + op, llvmType, isConst, linkage, symbol, mlir::Attribute(), + /*alignment*/ 0, /*addrSpace*/ 0, + /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*comdat*/ mlir::SymbolRefAttr(), attributes); + return mlir::success(); + } + + // Initializer is a constant array: convert it to a compatible llvm init. + if (auto constArr = init.value().dyn_cast()) { + if (auto attr = constArr.getElts().dyn_cast()) { + init = rewriter.getStringAttr(attr.getValue()); + } else if (auto attr = constArr.getElts().dyn_cast()) { + // Failed to use a compact attribute as an initializer: + // initialize elements individually. + if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, constArr, rewriter, typeConverter)); + return mlir::success(); + } + } else { + op.emitError() + << "unsupported lowering for #cir.const_array with value " + << constArr.getElts(); + return mlir::failure(); + } + } else if (auto fltAttr = init.value().dyn_cast()) { + // Initializer is a constant floating-point number: convert to MLIR + // builtin constant. + init = rewriter.getFloatAttr(llvmType, fltAttr.getValue()); + } + // Initializer is a constant integer: convert to MLIR builtin constant. + else if (auto intAttr = init.value().dyn_cast()) { + init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); + } else if (auto boolAttr = init.value().dyn_cast()) { + init = rewriter.getBoolAttr(boolAttr.getValue()); + } else if (isa( + init.value())) { + // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute + // this should be updated. For now, we use a custom op to initialize + // globals to zero. + setupRegionInitializedLLVMGlobalOp(op, rewriter); + auto value = + lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter); + rewriter.create(loc, value); + return mlir::success(); + } else if (const auto structAttr = + init.value().dyn_cast()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter)); + return mlir::success(); + } else if (auto attr = init.value().dyn_cast()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + loc, lowerCirAttrAsValue(op, attr, rewriter, typeConverter)); + return mlir::success(); + } else if (const auto vtableAttr = + init.value().dyn_cast()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, vtableAttr, rewriter, typeConverter)); + return mlir::success(); + } else if (const auto typeinfoAttr = + init.value().dyn_cast()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, typeinfoAttr, rewriter, typeConverter)); + return mlir::success(); + } else { + op.emitError() << "usupported initializer '" << init.value() << "'"; + return mlir::failure(); + } + + // Rewrite op. + rewriter.replaceOpWithNewOp( + op, llvmType, isConst, linkage, symbol, init.value(), + /*alignment*/ 0, /*addrSpace*/ 0, + /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*comdat*/ mlir::SymbolRefAttr(), attributes); + return mlir::success(); + } +}; + +class CIRUnaryOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert(op.getType() == op.getInput().getType() && + "Unary operation's operand type and result type are different"); + mlir::Type type = op.getType(); + mlir::Type elementType = elementTypeIfVector(type); + bool IsVector = type.isa(); + auto llvmType = getTypeConverter()->convertType(type); + auto loc = op.getLoc(); + + // Integer unary operations: + - ~ ++ -- + if (elementType.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Inc: { + assert(!IsVector && "++ not allowed on vector types"); + auto One = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput(), One); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Dec: { + assert(!IsVector && "-- not allowed on vector types"); + auto One = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput(), One); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Plus: { + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Minus: { + mlir::Value Zero; + if (IsVector) + Zero = rewriter.create(loc, llvmType); + else + Zero = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 0)); + rewriter.replaceOpWithNewOp(op, llvmType, Zero, + adaptor.getInput()); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Not: { + // bit-wise compliment operator, implemented as an XOR with -1. + mlir::Value MinusOne; + if (IsVector) { + // Creating a vector object with all -1 values is easier said than + // done. It requires a series of insertelement ops. + mlir::Type llvmElementType = + getTypeConverter()->convertType(elementType); + auto MinusOneInt = rewriter.create( + loc, llvmElementType, + mlir::IntegerAttr::get(llvmElementType, -1)); + MinusOne = rewriter.create(loc, llvmType); + auto NumElements = type.dyn_cast().getSize(); + for (uint64_t i = 0; i < NumElements; ++i) { + mlir::Value indexValue = rewriter.create( + loc, rewriter.getI64Type(), i); + MinusOne = rewriter.create( + loc, MinusOne, MinusOneInt, indexValue); + } + } else { + MinusOne = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, -1)); + } + rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, + adaptor.getInput()); + return mlir::success(); + } + } + } + + // Floating point unary operations: + - ++ -- + if (elementType.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Inc: { + assert(!IsVector && "++ not allowed on vector types"); + auto oneAttr = rewriter.getFloatAttr(llvmType, 1.0); + auto oneConst = + rewriter.create(loc, llvmType, oneAttr); + rewriter.replaceOpWithNewOp(op, llvmType, oneConst, + adaptor.getInput()); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Dec: { + assert(!IsVector && "-- not allowed on vector types"); + auto negOneAttr = rewriter.getFloatAttr(llvmType, -1.0); + auto negOneConst = + rewriter.create(loc, llvmType, negOneAttr); + rewriter.replaceOpWithNewOp( + op, llvmType, negOneConst, adaptor.getInput()); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Plus: + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + case mlir::cir::UnaryOpKind::Minus: { + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput()); + return mlir::success(); + } + default: + return op.emitError() + << "Unknown floating-point unary operation during CIR lowering"; + } + } + + // Boolean unary operations: ! only. (For all others, the operand has + // already been promoted to int.) + if (elementType.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Not: + assert(!IsVector && "NYI: op! on vector mask"); + rewriter.replaceOpWithNewOp( + op, llvmType, adaptor.getInput(), + rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1))); + return mlir::success(); + default: + return op.emitError() + << "Unknown boolean unary operation during CIR lowering"; + } + } + + // Pointer unary operations: + only. (++ and -- of pointers are implemented + // with cir.ptr_stride, not cir.unary.) + if (elementType.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Plus: + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + default: + op.emitError() << "Unknown pointer unary operation during CIR lowering"; + return mlir::failure(); + } + } + + return op.emitError() << "Unary operation has unsupported type: " + << elementType; + } +}; + +class CIRBinOpLowering : public mlir::OpConversionPattern { + + mlir::LLVM::IntegerOverflowFlags + getIntOverflowFlag(mlir::cir::BinOp op) const { + if (op.getNoUnsignedWrap()) + return mlir::LLVM::IntegerOverflowFlags::nuw; + + if (op.getNoSignedWrap()) + return mlir::LLVM::IntegerOverflowFlags::nsw; + + return mlir::LLVM::IntegerOverflowFlags::none; + } + +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BinOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert((op.getLhs().getType() == op.getRhs().getType()) && + "inconsistent operands' types not supported yet"); + mlir::Type type = op.getRhs().getType(); + assert((type.isa()) && + "operand type not supported yet"); + + auto llvmTy = getTypeConverter()->convertType(op.getType()); + auto rhs = adaptor.getRhs(); + auto lhs = adaptor.getLhs(); + + type = elementTypeIfVector(type); + + switch (op.getKind()) { + case mlir::cir::BinOpKind::Add: + if (type.isa()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case mlir::cir::BinOpKind::Sub: + if (type.isa()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case mlir::cir::BinOpKind::Mul: + if (type.isa()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case mlir::cir::BinOpKind::Div: + if (auto ty = type.dyn_cast()) { + if (ty.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + } else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case mlir::cir::BinOpKind::Rem: + if (auto ty = type.dyn_cast()) { + if (ty.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + } else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case mlir::cir::BinOpKind::And: + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case mlir::cir::BinOpKind::Or: + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case mlir::cir::BinOpKind::Xor: + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + } + + return mlir::LogicalResult::success(); + } +}; + +class CIRShiftOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ShiftOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto cirAmtTy = op.getAmount().getType().dyn_cast(); + auto cirValTy = op.getValue().getType().dyn_cast(); + auto llvmTy = getTypeConverter()->convertType(op.getType()); + auto loc = op.getLoc(); + mlir::Value amt = adaptor.getAmount(); + mlir::Value val = adaptor.getValue(); + + assert(cirValTy && cirAmtTy && "non-integer shift is NYI"); + assert(cirValTy == op.getType() && "inconsistent operands' types NYI"); + + // Ensure shift amount is the same type as the value. Some undefined + // behavior might occur in the casts below as per [C99 6.5.7.3]. + if (cirAmtTy.getWidth() > cirValTy.getWidth()) { + amt = rewriter.create(loc, llvmTy, amt); + } else if (cirAmtTy.getWidth() < cirValTy.getWidth()) { + if (cirAmtTy.isSigned()) + amt = rewriter.create(loc, llvmTy, amt); + else + amt = rewriter.create(loc, llvmTy, amt); + } + + // Lower to the proper LLVM shift operation. + if (op.getIsShiftleft()) + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); + else { + if (cirValTy.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); + else + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); + } + + return mlir::success(); + } +}; + +class CIRCmpOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CmpOp cmpOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto type = cmpOp.getLhs().getType(); + mlir::Value llResult; + + // Lower to LLVM comparison op. + if (auto intTy = type.dyn_cast()) { + auto kind = + convertCmpKindToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else if (auto ptrTy = type.dyn_cast()) { + auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), + /* isSigned=*/false); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else if (type.isa()) { + auto kind = convertCmpKindToFCmpPredicate(cmpOp.getKind()); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else { + return cmpOp.emitError() << "unsupported type for CmpOp: " << type; + } + + // LLVM comparison ops return i1, but cir::CmpOp returns the same type as + // the LHS value. Since this return value can be used later, we need to + // restore the type with the extension below. + auto llResultTy = getTypeConverter()->convertType(cmpOp.getType()); + rewriter.replaceOpWithNewOp(cmpOp, llResultTy, + llResult); + + return mlir::success(); + } +}; + +static mlir::LLVM::CallIntrinsicOp +createCallLLVMIntrinsicOp(mlir::ConversionPatternRewriter &rewriter, + mlir::Location loc, const llvm::Twine &intrinsicName, + mlir::Type resultTy, mlir::ValueRange operands) { + auto intrinsicNameAttr = + mlir::StringAttr::get(rewriter.getContext(), intrinsicName); + return rewriter.create( + loc, resultTy, intrinsicNameAttr, operands); +} + +static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( + mlir::ConversionPatternRewriter &rewriter, mlir::Operation *op, + const llvm::Twine &intrinsicName, mlir::Type resultTy, + mlir::ValueRange operands) { + auto callIntrinOp = createCallLLVMIntrinsicOp( + rewriter, op->getLoc(), intrinsicName, resultTy, operands); + rewriter.replaceOp(op, callIntrinOp.getOperation()); + return callIntrinOp; +} + +static mlir::Value createLLVMBitOp(mlir::Location loc, + const llvm::Twine &llvmIntrinBaseName, + mlir::Type resultTy, mlir::Value operand, + std::optional poisonZeroInputFlag, + mlir::ConversionPatternRewriter &rewriter) { + auto operandIntTy = operand.getType().cast(); + auto resultIntTy = resultTy.cast(); + + std::string llvmIntrinName = + llvmIntrinBaseName.concat(".i") + .concat(std::to_string(operandIntTy.getWidth())) + .str(); + + // Note that LLVM intrinsic calls to bit intrinsics have the same type as the + // operand. + mlir::LLVM::CallIntrinsicOp op; + if (poisonZeroInputFlag.has_value()) { + auto poisonZeroInputValue = rewriter.create( + loc, rewriter.getI1Type(), static_cast(*poisonZeroInputFlag)); + op = createCallLLVMIntrinsicOp(rewriter, loc, llvmIntrinName, + operand.getType(), + {operand, poisonZeroInputValue}); + } else { + op = createCallLLVMIntrinsicOp(rewriter, loc, llvmIntrinName, + operand.getType(), operand); + } + + mlir::Value result = op->getResult(0); + if (operandIntTy.getWidth() > resultIntTy.getWidth()) { + result = rewriter.create(loc, resultTy, result); + } else if (operandIntTy.getWidth() < resultIntTy.getWidth()) { + result = rewriter.create(loc, resultTy, result); + } + + return result; +} + +class CIRBitClrsbOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitClrsbOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto zero = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), 0); + auto isNeg = rewriter.create( + op.getLoc(), + mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), + mlir::LLVM::ICmpPredicate::slt), + adaptor.getInput(), zero); + + auto negOne = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), -1); + auto flipped = rewriter.create( + op.getLoc(), adaptor.getInput(), negOne); + + auto select = rewriter.create( + op.getLoc(), isNeg, flipped, adaptor.getInput()); + + auto resTy = getTypeConverter()->convertType(op.getType()); + auto clz = createLLVMBitOp(op.getLoc(), "llvm.ctlz", resTy, select, + /*poisonZeroInputFlag=*/false, rewriter); + + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto res = rewriter.create(op.getLoc(), clz, one); + rewriter.replaceOp(op, res); + + return mlir::LogicalResult::success(); + } +}; + +class CIRObjSizeOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ObjSizeOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llvmResTy = getTypeConverter()->convertType(op.getType()); + auto loc = op->getLoc(); + + mlir::cir::SizeInfoType kindInfo = op.getKind(); + auto falseValue = rewriter.create( + loc, rewriter.getI1Type(), false); + auto trueValue = rewriter.create( + loc, rewriter.getI1Type(), true); + + replaceOpWithCallLLVMIntrinsicOp( + rewriter, op, "llvm.objectsize", llvmResTy, + mlir::ValueRange{adaptor.getPtr(), + kindInfo == mlir::cir::SizeInfoType::max ? falseValue + : trueValue, + trueValue, op.getDynamic() ? trueValue : falseValue}); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBitClzOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitClzOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.ctlz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/true, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); + } +}; + +class CIRBitCtzOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitCtzOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.cttz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/true, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); + } +}; + +class CIRBitFfsOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitFfsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto ctz = + createLLVMBitOp(op.getLoc(), "llvm.cttz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/false, rewriter); + + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto ctzAddOne = rewriter.create(op.getLoc(), ctz, one); + + auto zeroInputTy = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), 0); + auto isZero = rewriter.create( + op.getLoc(), + mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), + mlir::LLVM::ICmpPredicate::eq), + adaptor.getInput(), zeroInputTy); + + auto zero = rewriter.create(op.getLoc(), resTy, 0); + auto res = rewriter.create(op.getLoc(), isZero, zero, + ctzAddOne); + rewriter.replaceOp(op, res); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBitParityOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitParityOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto popcnt = + createLLVMBitOp(op.getLoc(), "llvm.ctpop", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/std::nullopt, rewriter); + + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto popcntMod2 = + rewriter.create(op.getLoc(), popcnt, one); + rewriter.replaceOp(op, popcntMod2); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBitPopcountOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitPopcountOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.ctpop", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/std::nullopt, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); + } +}; + +static mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(mlir::cir::MemOrder memo) { + switch (memo) { + case mlir::cir::MemOrder::Relaxed: + return mlir::LLVM::AtomicOrdering::monotonic; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case mlir::cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case mlir::cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case mlir::cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; + } + llvm_unreachable("shouldn't get here"); +} + +class CIRAtomicCmpXchgLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AtomicCmpXchg op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto expected = adaptor.getExpected(); + auto desired = adaptor.getDesired(); + + // FIXME: add syncscope. + auto cmpxchg = rewriter.create( + op.getLoc(), adaptor.getPtr(), expected, desired, + getLLVMAtomicOrder(adaptor.getSuccOrder()), + getLLVMAtomicOrder(adaptor.getFailOrder())); + cmpxchg.setWeak(adaptor.getWeak()); + cmpxchg.setVolatile_(adaptor.getIsVolatile()); + + // Check result and apply stores accordingly. + auto old = rewriter.create( + op.getLoc(), cmpxchg.getResult(), 0); + auto cmp = rewriter.create( + op.getLoc(), cmpxchg.getResult(), 1); + + auto extCmp = rewriter.create( + op.getLoc(), rewriter.getI8Type(), cmp); + rewriter.replaceOp(op, {old, extCmp}); + return mlir::success(); + } +}; + +class CIRAtomicXchgLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AtomicXchg op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME: add syncscope. + auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + rewriter.replaceOpWithNewOp( + op, mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(), adaptor.getVal(), + llvmOrder); + return mlir::success(); + } +}; + +class CIRAtomicFetchLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::Value buildPostOp(mlir::cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::Value rmwVal, bool isInt) const { + SmallVector atomicOperands = {rmwVal, adaptor.getVal()}; + SmallVector atomicResTys = {rmwVal.getType()}; + return rewriter + .create(op.getLoc(), + rewriter.getStringAttr(getLLVMBinop(op.getBinop(), isInt)), + atomicOperands, atomicResTys, {}) + ->getResult(0); + } + + mlir::Value buildMinMaxPostOp(mlir::cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::Value rmwVal, bool isSigned) const { + auto loc = op.getLoc(); + mlir::LLVM::ICmpPredicate pred; + if (op.getBinop() == mlir::cir::AtomicFetchKind::Max) { + pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt + : mlir::LLVM::ICmpPredicate::ugt; + } else { // Min + pred = isSigned ? mlir::LLVM::ICmpPredicate::slt + : mlir::LLVM::ICmpPredicate::ult; + } + + auto cmp = rewriter.create( + loc, mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), pred), + rmwVal, adaptor.getVal()); + return rewriter.create(loc, cmp, rmwVal, + adaptor.getVal()); + } + + llvm::StringLiteral getLLVMBinop(mlir::cir::AtomicFetchKind k, + bool isInt) const { + switch (k) { + case mlir::cir::AtomicFetchKind::Add: + return isInt ? mlir::LLVM::AddOp::getOperationName() + : mlir::LLVM::FAddOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Sub: + return isInt ? mlir::LLVM::SubOp::getOperationName() + : mlir::LLVM::FSubOp::getOperationName(); + case mlir::cir::AtomicFetchKind::And: + return mlir::LLVM::AndOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Xor: + return mlir::LLVM::XOrOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Or: + return mlir::LLVM::OrOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Nand: + // There's no nand binop in LLVM, this is later fixed with a not. + return mlir::LLVM::AndOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Max: + case mlir::cir::AtomicFetchKind::Min: + llvm_unreachable("handled in buildMinMaxPostOp"); + } + llvm_unreachable("Unknown atomic fetch opcode"); + } + + mlir::LLVM::AtomicBinOp getLLVMAtomicBinOp(mlir::cir::AtomicFetchKind k, + bool isInt, + bool isSignedInt) const { + switch (k) { + case mlir::cir::AtomicFetchKind::Add: + return isInt ? mlir::LLVM::AtomicBinOp::add + : mlir::LLVM::AtomicBinOp::fadd; + case mlir::cir::AtomicFetchKind::Sub: + return isInt ? mlir::LLVM::AtomicBinOp::sub + : mlir::LLVM::AtomicBinOp::fsub; + case mlir::cir::AtomicFetchKind::And: + return mlir::LLVM::AtomicBinOp::_and; + case mlir::cir::AtomicFetchKind::Xor: + return mlir::LLVM::AtomicBinOp::_xor; + case mlir::cir::AtomicFetchKind::Or: + return mlir::LLVM::AtomicBinOp::_or; + case mlir::cir::AtomicFetchKind::Nand: + return mlir::LLVM::AtomicBinOp::nand; + case mlir::cir::AtomicFetchKind::Max: { + if (!isInt) + return mlir::LLVM::AtomicBinOp::fmax; + return isSignedInt ? mlir::LLVM::AtomicBinOp::max + : mlir::LLVM::AtomicBinOp::umax; + } + case mlir::cir::AtomicFetchKind::Min: { + if (!isInt) + return mlir::LLVM::AtomicBinOp::fmin; + return isSignedInt ? mlir::LLVM::AtomicBinOp::min + : mlir::LLVM::AtomicBinOp::umin; + } + } + llvm_unreachable("Unknown atomic fetch opcode"); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + bool isInt, isSignedInt = false; // otherwise it's float. + if (auto intTy = op.getVal().getType().dyn_cast()) { + isInt = true; + isSignedInt = intTy.isSigned(); + } else if (op.getVal() + .getType() + .isa()) + isInt = false; + else { + return op.emitError() + << "Unsupported type: " << adaptor.getVal().getType(); + } + + // FIXME: add syncscope. + auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + auto llvmBinOpc = getLLVMAtomicBinOp(op.getBinop(), isInt, isSignedInt); + auto rmwVal = rewriter.create( + op.getLoc(), llvmBinOpc, adaptor.getPtr(), adaptor.getVal(), llvmOrder); + + mlir::Value result = rmwVal.getRes(); + if (!op.getFetchFirst()) { + if (op.getBinop() == mlir::cir::AtomicFetchKind::Max || + op.getBinop() == mlir::cir::AtomicFetchKind::Min) + result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), + isSignedInt); + else + result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt); + + // Compensate lack of nand binop in LLVM IR. + if (op.getBinop() == mlir::cir::AtomicFetchKind::Nand) { + auto negOne = rewriter.create( + op.getLoc(), result.getType(), -1); + result = + rewriter.create(op.getLoc(), result, negOne); + } + } + + rewriter.replaceOp(op, result); + return mlir::success(); + } +}; + +class CIRByteswapOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ByteswapOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Note that LLVM intrinsic calls to @llvm.bswap.i* have the same type as + // the operand. + + auto resTy = + getTypeConverter()->convertType(op.getType()).cast(); + + std::string llvmIntrinName = "llvm.bswap.i"; + llvmIntrinName.append(std::to_string(resTy.getWidth())); + + rewriter.replaceOpWithNewOp(op, adaptor.getInput()); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBrOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getOperands(), + op.getDest()); + return mlir::LogicalResult::success(); + } +}; + +class CIRGetMemberOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GetMemberOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llResTy = getTypeConverter()->convertType(op.getType()); + const auto structTy = + op.getAddrTy().getPointee().cast(); + assert(structTy && "expected struct type"); + + switch (structTy.getKind()) { + case mlir::cir::StructType::Struct: + case mlir::cir::StructType::Class: { + // Since the base address is a pointer to an aggregate, the first offset + // is always zero. The second offset tell us which member it will access. + llvm::SmallVector offset{0, op.getIndex()}; + const auto elementTy = getTypeConverter()->convertType(structTy); + rewriter.replaceOpWithNewOp(op, llResTy, elementTy, + adaptor.getAddr(), offset); + return mlir::success(); + } + case mlir::cir::StructType::Union: + // Union members share the address space, so we just need a bitcast to + // conform to type-checking. + rewriter.replaceOpWithNewOp(op, llResTy, + adaptor.getAddr()); + return mlir::success(); + } + } +}; + +class CIRPtrDiffOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) const { + mlir::DataLayout layout(op.getParentOfType()); + return llvm::divideCeil(layout.getTypeSizeInBits(type), 8); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::PtrDiffOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto dstTy = op.getType().cast(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + + auto lhs = rewriter.create(op.getLoc(), llvmDstTy, + adaptor.getLhs()); + auto rhs = rewriter.create(op.getLoc(), llvmDstTy, + adaptor.getRhs()); + + auto diff = + rewriter.create(op.getLoc(), llvmDstTy, lhs, rhs); + + auto ptrTy = op.getLhs().getType().cast(); + auto typeSize = getTypeSize(ptrTy.getPointee(), *op); + auto typeSizeVal = rewriter.create( + op.getLoc(), llvmDstTy, mlir::IntegerAttr::get(llvmDstTy, typeSize)); + + if (dstTy.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmDstTy, diff, + typeSizeVal); + else + rewriter.replaceOpWithNewOp(op, llvmDstTy, diff, + typeSizeVal); + + return mlir::success(); + } +}; + +class CIRFAbsOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FAbsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getOperands().front()); + return mlir::success(); + } +}; + +class CIRExpectOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ExpectOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + std::optional prob = op.getProb(); + if (!prob) + rewriter.replaceOpWithNewOp(op, adaptor.getVal(), + adaptor.getExpected()); + else + rewriter.replaceOpWithNewOp( + op, adaptor.getVal(), adaptor.getExpected(), prob.value()); + return mlir::success(); + } +}; + +class CIRVTableAddrPointOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VTableAddrPointOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const auto *converter = getTypeConverter(); + auto targetType = converter->convertType(op.getType()); + mlir::Value symAddr = op.getSymAddr(); + + mlir::Type eltType; + if (!symAddr) { + auto module = op->getParentOfType(); + auto symbol = dyn_cast( + mlir::SymbolTable::lookupSymbolIn(module, op.getNameAttr())); + symAddr = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(getContext()), + *op.getName()); + eltType = converter->convertType(symbol.getType()); + } + + auto offsets = llvm::SmallVector{ + 0, op.getVtableIndex(), op.getAddressPointIndex()}; + if (eltType) + rewriter.replaceOpWithNewOp(op, targetType, eltType, + symAddr, offsets, true); + else + llvm_unreachable("Shouldn't ever be missing an eltType here"); + + return mlir::success(); + } +}; + +class CIRStackSaveLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::StackSaveOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto ptrTy = getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, ptrTy); + return mlir::success(); + } +}; + +class CIRStackRestoreLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::StackRestoreOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, + adaptor.getPtr()); + return mlir::success(); + } +}; + +class CIRUnreachableLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::UnreachableOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op); + return mlir::success(); + } +}; + +class CIRTrapLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TrapOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto loc = op->getLoc(); + rewriter.eraseOp(op); + + rewriter.create(loc); + + // Note that the call to llvm.trap is not a terminator in LLVM dialect. + // So we must emit an additional llvm.unreachable to terminate the current + // block. + rewriter.create(loc); + + return mlir::success(); + } +}; + +class CIRInlineAsmOpLowering + : public mlir::OpConversionPattern { + + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::InlineAsmOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Type llResTy; + if (op.getNumResults()) + llResTy = getTypeConverter()->convertType(op.getType(0)); + + auto dialect = op.getAsmFlavor(); + auto llDialect = dialect == mlir::cir::AsmFlavor::x86_att + ? mlir::LLVM::AsmDialect::AD_ATT + : mlir::LLVM::AsmDialect::AD_Intel; + + std::vector opAttrs; + auto llvmAttrName = mlir::LLVM::InlineAsmOp::getElementTypeAttrName(); + + // this is for the lowering to LLVM from LLVm dialect. Otherwise, if we + // don't have the result (i.e. void type as a result of operation), the + // element type attribute will be attached to the whole instruction, but not + // to the operand + if (!op.getNumResults()) + opAttrs.push_back(mlir::Attribute()); + + llvm::SmallVector llvmOperands; + llvm::SmallVector cirOperands; + for (size_t i = 0; i < op.getOperands().size(); ++i) { + auto llvmOps = adaptor.getOperands()[i]; + auto cirOps = op.getOperands()[i]; + llvmOperands.insert(llvmOperands.end(), llvmOps.begin(), llvmOps.end()); + cirOperands.insert(cirOperands.end(), cirOps.begin(), cirOps.end()); + } + + // so far we infer the llvm dialect element type attr from + // CIR operand type. + for (std::size_t i = 0; i < op.getOperandAttrs().size(); ++i) { + if (!op.getOperandAttrs()[i]) { + opAttrs.push_back(mlir::Attribute()); + continue; + } + + std::vector attrs; + auto typ = cast(cirOperands[i].getType()); + auto typAttr = mlir::TypeAttr::get( + getTypeConverter()->convertType(typ.getPointee())); + + attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr)); + auto newDict = rewriter.getDictionaryAttr(attrs); + opAttrs.push_back(newDict); + } + + rewriter.replaceOpWithNewOp( + op, llResTy, llvmOperands, op.getAsmStringAttr(), + op.getConstraintsAttr(), op.getSideEffectsAttr(), + /*is_align_stack*/ mlir::UnitAttr(), + mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect), + rewriter.getArrayAttr(opAttrs)); + + return mlir::success(); + } +}; + +class CIRPrefetchLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::PrefetchOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getAddr(), adaptor.getIsWrite(), adaptor.getLocality(), + /*DataCache*/ 1); + return mlir::success(); + } +}; + +class CIRSetBitfieldLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SetBitfieldOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + + auto info = op.getBitfieldInfo(); + auto size = info.getSize(); + auto offset = info.getOffset(); + auto storageType = info.getStorageType(); + auto context = storageType.getContext(); + + unsigned storageSize = 0; + + if (auto arTy = storageType.dyn_cast()) + storageSize = arTy.getSize() * 8; + else if (auto intTy = storageType.dyn_cast()) + storageSize = intTy.getWidth(); + else + llvm_unreachable( + "Either ArrayType or IntType expected for bitfields storage"); + + auto intType = mlir::IntegerType::get(context, storageSize); + auto srcVal = createIntCast(rewriter, adaptor.getSrc(), intType); + auto srcWidth = storageSize; + auto resultVal = srcVal; + + if (storageSize != size) { + assert(storageSize > size && "Invalid bitfield size."); + + mlir::Value val = rewriter.create( + op.getLoc(), intType, adaptor.getAddr(), /* alignment */ 0, + op.getIsVolatile()); + + srcVal = createAnd(rewriter, srcVal, + llvm::APInt::getLowBitsSet(srcWidth, size)); + resultVal = srcVal; + srcVal = createShL(rewriter, srcVal, offset); + + // Mask out the original value. + val = + createAnd(rewriter, val, + ~llvm::APInt::getBitsSet(srcWidth, offset, offset + size)); + + // Or together the unchanged values and the source value. + srcVal = rewriter.create(op.getLoc(), val, srcVal); + } + + rewriter.create(op.getLoc(), srcVal, adaptor.getAddr(), + /* alignment */ 0, op.getIsVolatile()); + + auto resultTy = getTypeConverter()->convertType(op.getType()); + + resultVal = + createIntCast(rewriter, resultVal, resultTy.cast()); + + if (info.getIsSigned()) { + assert(size <= storageSize); + unsigned highBits = storageSize - size; + + if (highBits) { + resultVal = createShL(rewriter, resultVal, highBits); + resultVal = createAShR(rewriter, resultVal, highBits); + } + } + + rewriter.replaceOp(op, resultVal); + return mlir::success(); + } +}; + +class CIRGetBitfieldLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GetBitfieldOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + + auto info = op.getBitfieldInfo(); + auto size = info.getSize(); + auto offset = info.getOffset(); + auto storageType = info.getStorageType(); + auto context = storageType.getContext(); + unsigned storageSize = 0; + + if (auto arTy = storageType.dyn_cast()) + storageSize = arTy.getSize() * 8; + else if (auto intTy = storageType.dyn_cast()) + storageSize = intTy.getWidth(); + else + llvm_unreachable( + "Either ArrayType or IntType expected for bitfields storage"); + + auto intType = mlir::IntegerType::get(context, storageSize); + + mlir::Value val = rewriter.create( + op.getLoc(), intType, adaptor.getAddr(), 0, op.getIsVolatile()); + val = rewriter.create(op.getLoc(), intType, val); + + if (info.getIsSigned()) { + assert(static_cast(offset + size) <= storageSize); + unsigned highBits = storageSize - offset - size; + val = createShL(rewriter, val, highBits); + val = createAShR(rewriter, val, offset + highBits); + } else { + val = createLShR(rewriter, val, offset); + + if (static_cast(offset) + size < storageSize) + val = createAnd(rewriter, val, + llvm::APInt::getLowBitsSet(storageSize, size)); + } + + auto resTy = getTypeConverter()->convertType(op.getType()); + auto newOp = createIntCast(rewriter, val, resTy.cast(), + info.getIsSigned()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } +}; + +class CIRIsConstantOpLowering + : public mlir::OpConversionPattern { + + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::IsConstantOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME(cir): llvm.intr.is.constant returns i1 value but the LLVM Lowering + // expects that cir.bool type will be lowered as i8 type. + // So we have to insert zext here. + auto isConstantOP = rewriter.create( + op.getLoc(), adaptor.getVal()); + rewriter.replaceOpWithNewOp(op, rewriter.getI8Type(), + isConstantOP); + return mlir::success(); + } +}; + +class CIRCmpThreeWayOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + mlir::cir::CmpThreeWayOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CmpThreeWayOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + if (!op.isIntegralComparison() || !op.isStrongOrdering()) { + op.emitError() << "unsupported three-way comparison type"; + return mlir::failure(); + } + + auto cmpInfo = op.getInfo(); + assert(cmpInfo.getLt() == -1 && cmpInfo.getEq() == 0 && + cmpInfo.getGt() == 1); + + auto operandTy = op.getLhs().getType().cast(); + auto resultTy = op.getType(); + auto llvmIntrinsicName = getLLVMIntrinsicName( + operandTy.isSigned(), operandTy.getWidth(), resultTy.getWidth()); + + rewriter.setInsertionPoint(op); + + auto llvmLhs = adaptor.getLhs(); + auto llvmRhs = adaptor.getRhs(); + auto llvmResultTy = getTypeConverter()->convertType(resultTy); + auto callIntrinsicOp = + createCallLLVMIntrinsicOp(rewriter, op.getLoc(), llvmIntrinsicName, + llvmResultTy, {llvmLhs, llvmRhs}); + + rewriter.replaceOp(op, callIntrinsicOp); + return mlir::success(); + } + +private: + static std::string getLLVMIntrinsicName(bool signedCmp, unsigned operandWidth, + unsigned resultWidth) { + // The intrinsic's name takes the form: + // `llvm..i.i` + + std::string result = "llvm."; + + if (signedCmp) + result.append("scmp."); + else + result.append("ucmp."); + + // Result type part. + result.push_back('i'); + result.append(std::to_string(resultWidth)); + result.push_back('.'); + + // Operand type part. + result.push_back('i'); + result.append(std::to_string(operandWidth)); + + return result; + } +}; + +void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, + mlir::TypeConverter &converter) { + patterns.add(patterns.getContext()); + patterns.add< + CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, + CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, + CIRBitPopcountOpLowering, CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, + CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRBrCondOpLowering, + CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, + CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, + CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, + CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, + CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, + CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRVectorCreateLowering, CIRVectorInsertLowering, + CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, + CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, + CIRVectorShuffleVecLowering, CIRStackSaveLowering, + CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, + CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, + CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, + CIRCmpThreeWayOpLowering>(converter, patterns.getContext()); +} + +namespace { +void prepareTypeConverter(mlir::LLVMTypeConverter &converter, + mlir::DataLayout &dataLayout) { + converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { + // Drop pointee type since LLVM dialect only allows opaque pointers. + return mlir::LLVM::LLVMPointerType::get(type.getContext()); + }); + converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { + auto ty = converter.convertType(type.getEltType()); + return mlir::LLVM::LLVMArrayType::get(ty, type.getSize()); + }); + converter.addConversion([&](mlir::cir::VectorType type) -> mlir::Type { + auto ty = converter.convertType(type.getEltType()); + return mlir::LLVM::getFixedVectorType(ty, type.getSize()); + }); + converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { + return mlir::IntegerType::get(type.getContext(), 8, + mlir::IntegerType::Signless); + }); + converter.addConversion([&](mlir::cir::IntType type) -> mlir::Type { + // LLVM doesn't work with signed types, so we drop the CIR signs here. + return mlir::IntegerType::get(type.getContext(), type.getWidth()); + }); + converter.addConversion([&](mlir::cir::SingleType type) -> mlir::Type { + return mlir::FloatType::getF32(type.getContext()); + }); + converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { + return mlir::FloatType::getF64(type.getContext()); + }); + converter.addConversion([&](mlir::cir::FP80Type type) -> mlir::Type { + return mlir::FloatType::getF80(type.getContext()); + }); + converter.addConversion([&](mlir::cir::LongDoubleType type) -> mlir::Type { + return converter.convertType(type.getUnderlying()); + }); + converter.addConversion([&](mlir::cir::FuncType type) -> mlir::Type { + auto result = converter.convertType(type.getReturnType()); + llvm::SmallVector arguments; + if (converter.convertTypes(type.getInputs(), arguments).failed()) + llvm_unreachable("Failed to convert function type parameters"); + auto varArg = type.isVarArg(); + return mlir::LLVM::LLVMFunctionType::get(result, arguments, varArg); + }); + converter.addConversion([&](mlir::cir::StructType type) -> mlir::Type { + // FIXME(cir): create separate unions, struct, and classes types. + // Convert struct members. + llvm::SmallVector llvmMembers; + switch (type.getKind()) { + case mlir::cir::StructType::Class: + // TODO(cir): This should be properly validated. + case mlir::cir::StructType::Struct: + for (auto ty : type.getMembers()) + llvmMembers.push_back(converter.convertType(ty)); + break; + // Unions are lowered as only the largest member. + case mlir::cir::StructType::Union: { + auto largestMember = type.getLargestMember(dataLayout); + if (largestMember) + llvmMembers.push_back(converter.convertType(largestMember)); + break; + } + } + + // Struct has a name: lower as an identified struct. + mlir::LLVM::LLVMStructType llvmStruct; + if (type.getName()) { + llvmStruct = mlir::LLVM::LLVMStructType::getIdentified( + type.getContext(), type.getPrefixedName()); + if (llvmStruct.setBody(llvmMembers, /*isPacked=*/type.getPacked()) + .failed()) + llvm_unreachable("Failed to set body of struct"); + } else { // Struct has no name: lower as literal struct. + llvmStruct = mlir::LLVM::LLVMStructType::getLiteral( + type.getContext(), llvmMembers, /*isPacked=*/type.getPacked()); + } + + return llvmStruct; + }); + converter.addConversion([&](mlir::cir::VoidType type) -> mlir::Type { + return mlir::LLVM::LLVMVoidType::get(type.getContext()); + }); +} +} // namespace + +static void buildCtorDtorList( + mlir::ModuleOp module, StringRef globalXtorName, StringRef llvmXtorName, + llvm::function_ref(mlir::Attribute)> createXtor) { + llvm::SmallVector, 2> globalXtors; + for (auto namedAttr : module->getAttrs()) { + if (namedAttr.getName() == globalXtorName) { + for (auto attr : namedAttr.getValue().cast()) + globalXtors.emplace_back(createXtor(attr)); + break; + } + } + + if (globalXtors.empty()) + return; + + mlir::OpBuilder builder(module.getContext()); + builder.setInsertionPointToEnd(&module.getBodyRegion().back()); + + // Create a global array llvm.global_ctors with element type of + // struct { i32, ptr, ptr } + auto CtorPFTy = mlir::LLVM::LLVMPointerType::get(builder.getContext()); + llvm::SmallVector CtorStructFields; + CtorStructFields.push_back(builder.getI32Type()); + CtorStructFields.push_back(CtorPFTy); + CtorStructFields.push_back(CtorPFTy); + + auto CtorStructTy = mlir::LLVM::LLVMStructType::getLiteral( + builder.getContext(), CtorStructFields); + auto CtorStructArrayTy = + mlir::LLVM::LLVMArrayType::get(CtorStructTy, globalXtors.size()); + + auto loc = module.getLoc(); + auto newGlobalOp = builder.create( + loc, CtorStructArrayTy, true, mlir::LLVM::Linkage::Appending, + llvmXtorName, mlir::Attribute()); + + newGlobalOp.getRegion().push_back(new mlir::Block()); + builder.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); + + mlir::Value result = + builder.create(loc, CtorStructArrayTy); + + for (uint64_t I = 0; I < globalXtors.size(); I++) { + auto fn = globalXtors[I]; + mlir::Value structInit = + builder.create(loc, CtorStructTy); + mlir::Value initPriority = builder.create( + loc, CtorStructFields[0], fn.second); + mlir::Value initFuncAddr = builder.create( + loc, CtorStructFields[1], fn.first); + mlir::Value initAssociate = + builder.create(loc, CtorStructFields[2]); + structInit = builder.create(loc, structInit, + initPriority, 0); + structInit = builder.create(loc, structInit, + initFuncAddr, 1); + // TODO: handle associated data for initializers. + structInit = builder.create(loc, structInit, + initAssociate, 2); + result = + builder.create(loc, result, structInit, I); + } + + builder.create(loc, result); +} + +// The unreachable code is not lowered by applyPartialConversion function +// since it traverses blocks in the dominance order. At the same time we +// do need to lower such code - otherwise verification errors occur. +// For instance, the next CIR code: +// +// cir.func @foo(%arg0: !s32i) -> !s32i { +// %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// cir.if %4 { +// %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// cir.return %5 : !s32i +// } else { +// %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// cir.return %5 : !s32i +// } +// cir.return %arg0 : !s32i +// } +// +// contains an unreachable return operation (the last one). After the flattening +// pass it will be placed into the unreachable block. And the possible error +// after the lowering pass is: error: 'cir.return' op expects parent op to be +// one of 'cir.func, cir.scope, cir.if ... The reason that this operation was +// not lowered and the new parent is llvm.func. +// +// In the future we may want to get rid of this function and use DCE pass or +// something similar. But now we need to guarantee the absence of the dialect +// verification errors. +void collect_unreachable(mlir::Operation *parent, + llvm::SmallVector &ops) { + + llvm::SmallVector unreachable_blocks; + parent->walk([&](mlir::Block *blk) { // check + if (blk->hasNoPredecessors() && !blk->isEntryBlock()) + unreachable_blocks.push_back(blk); + }); + + std::set visited; + for (auto *root : unreachable_blocks) { + // We create a work list for each unreachable block. + // Thus we traverse operations in some order. + std::deque workList; + workList.push_back(root); + + while (!workList.empty()) { + auto *blk = workList.back(); + workList.pop_back(); + if (visited.count(blk)) + continue; + visited.emplace(blk); + + for (auto &op : *blk) + ops.push_back(&op); + + for (auto it = blk->succ_begin(); it != blk->succ_end(); ++it) + workList.push_back(*it); + } + } +} + +void ConvertCIRToLLVMPass::runOnOperation() { + auto module = getOperation(); + mlir::DataLayout dataLayout(module); + mlir::LLVMTypeConverter converter(&getContext()); + prepareTypeConverter(converter, dataLayout); + + mlir::RewritePatternSet patterns(&getContext()); + + populateCIRToLLVMConversionPatterns(patterns, converter); + mlir::populateFuncToLLVMConversionPatterns(converter, patterns); + + mlir::ConversionTarget target(getContext()); + using namespace mlir::cir; + // clang-format off + target.addLegalOp(); + // clang-format on + target.addLegalDialect(); + target.addIllegalDialect(); + + // Allow operations that will be lowered directly to LLVM IR. + target.addLegalOp(); + + getOperation()->removeAttr("cir.sob"); + getOperation()->removeAttr("cir.lang"); + + llvm::SmallVector ops; + ops.push_back(module); + collect_unreachable(module, ops); + + if (failed(applyPartialConversion(ops, target, std::move(patterns)))) + signalPassFailure(); + + // Emit the llvm.global_ctors array. + buildCtorDtorList(module, "cir.global_ctors", "llvm.global_ctors", + [](mlir::Attribute attr) { + assert(attr.isa() && + "must be a GlobalCtorAttr"); + auto ctorAttr = attr.cast(); + return std::make_pair(ctorAttr.getName(), + ctorAttr.getPriority()); + }); + // Emit the llvm.global_dtors array. + buildCtorDtorList(module, "cir.global_dtors", "llvm.global_dtors", + [](mlir::Attribute attr) { + assert(attr.isa() && + "must be a GlobalDtorAttr"); + auto dtorAttr = attr.cast(); + return std::make_pair(dtorAttr.getName(), + dtorAttr.getPriority()); + }); +} + +std::unique_ptr createConvertCIRToLLVMPass() { + return std::make_unique(); +} + +void populateCIRToLLVMPasses(mlir::OpPassManager &pm) { + populateCIRPreLoweringPasses(pm); + pm.addPass(createConvertCIRToLLVMPass()); +} + +extern void registerCIRDialectTranslation(mlir::MLIRContext &context); + +std::unique_ptr +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, + bool disableVerifier) { + mlir::MLIRContext *mlirCtx = theModule.getContext(); + mlir::PassManager pm(mlirCtx); + populateCIRToLLVMPasses(pm); + + // This is necessary to have line tables emitted and basic + // debugger working. In the future we will add proper debug information + // emission directly from our frontend. + pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass()); + + // FIXME(cir): this shouldn't be necessary. It's meant to be a temporary + // workaround until we understand why some unrealized casts are being + // emmited and how to properly avoid them. + pm.addPass(mlir::createReconcileUnrealizedCastsPass()); + + pm.enableVerifier(!disableVerifier); + (void)mlir::applyPassManagerCLOptions(pm); + + auto result = !mlir::failed(pm.run(theModule)); + if (!result) + report_fatal_error( + "The pass manager failed to lower CIR to LLVMIR dialect!"); + + // Now that we ran all the lowering passes, verify the final output. + if (theModule.verify().failed()) + report_fatal_error("Verification of the final LLVMIR dialect failed!"); + + mlir::registerBuiltinDialectTranslation(*mlirCtx); + mlir::registerLLVMDialectTranslation(*mlirCtx); + mlir::registerOpenMPDialectTranslation(*mlirCtx); + registerCIRDialectTranslation(*mlirCtx); + + auto ModuleName = theModule.getName(); + auto llvmModule = mlir::translateModuleToLLVMIR( + theModule, llvmCtx, ModuleName ? *ModuleName : "CIRToLLVMModule"); + + if (!llvmModule) + report_fatal_error("Lowering from LLVMIR dialect to llvm IR failed!"); + + return llvmModule; +} +} // namespace direct +} // namespace cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp new file mode 100644 index 000000000000..dac44ca4d8d0 --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -0,0 +1,102 @@ +//====- LoweToLLVMIR.cpp - Lowering CIR attributes to LLVMIR ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements lowering of CIR attributes and operations directly to +// LLVMIR. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/IR/DialectRegistry.h" +#include "mlir/Target/LLVMIR/LLVMTranslationInterface.h" +#include "mlir/Target/LLVMIR/ModuleTranslation.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/GlobalVariable.h" + +using namespace llvm; + +namespace cir { +namespace direct { + +/// Implementation of the dialect interface that converts CIR attributes to LLVM +/// IR metadata. +class CIRDialectLLVMIRTranslationInterface + : public mlir::LLVMTranslationDialectInterface { +public: + using LLVMTranslationDialectInterface::LLVMTranslationDialectInterface; + + /// Any named attribute in the CIR dialect, i.e, with name started with + /// "cir.", will be handled here. + virtual mlir::LogicalResult amendOperation( + mlir::Operation *op, llvm::ArrayRef instructions, + mlir::NamedAttribute attribute, + mlir::LLVM::ModuleTranslation &moduleTranslation) const override { + // Translate CIR's extra function attributes to LLVM's function attributes. + auto func = dyn_cast(op); + if (!func) + return mlir::success(); + llvm::Function *llvmFunc = moduleTranslation.lookupFunction(func.getName()); + if (auto extraAttr = attribute.getValue() + .dyn_cast()) { + for (auto attr : extraAttr.getElements()) { + if (auto inlineAttr = + attr.getValue().dyn_cast()) { + if (inlineAttr.isNoInline()) + llvmFunc->addFnAttr(llvm::Attribute::NoInline); + else if (inlineAttr.isAlwaysInline()) + llvmFunc->addFnAttr(llvm::Attribute::AlwaysInline); + else if (inlineAttr.isInlineHint()) + llvmFunc->addFnAttr(llvm::Attribute::InlineHint); + else + llvm_unreachable("Unknown inline kind"); + } else if (attr.getValue().dyn_cast()) { + llvmFunc->addFnAttr(llvm::Attribute::OptimizeNone); + } else if (attr.getValue().dyn_cast()) { + llvmFunc->addFnAttr(llvm::Attribute::NoUnwind); + } + } + } + + // Drop ammended CIR attribute from LLVM op. + op->removeAttr(attribute.getName()); + return mlir::success(); + } + + /// Translates the given operation to LLVM IR using the provided IR builder + /// and saving the state in `moduleTranslation`. + mlir::LogicalResult convertOperation( + mlir::Operation *op, llvm::IRBuilderBase &builder, + mlir::LLVM::ModuleTranslation &moduleTranslation) const final { + + if (auto cirOp = llvm::dyn_cast(op)) + moduleTranslation.mapValue(cirOp.getResult()) = + llvm::Constant::getNullValue( + moduleTranslation.convertType(cirOp.getType())); + + return mlir::success(); + } +}; + +void registerCIRDialectTranslation(mlir::DialectRegistry ®istry) { + registry.insert(); + registry.addExtension( + +[](mlir::MLIRContext *ctx, mlir::cir::CIRDialect *dialect) { + dialect->addInterfaces(); + }); +} + +void registerCIRDialectTranslation(mlir::MLIRContext &context) { + mlir::DialectRegistry registry; + registerCIRDialectTranslation(registry); + context.appendDialectRegistry(registry); +} +} // namespace direct +} // namespace cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h new file mode 100644 index 000000000000..c9ee75a06352 --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h @@ -0,0 +1,74 @@ +#ifndef LLVM_CLANG_LIB_LOWERINGHELPERS_H +#define LLVM_CLANG_LIB_LOWERINGHELPERS_H + +#include "mlir/IR/Types.h" +#include "mlir/IR/Value.h" + +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" + +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +using namespace llvm; + +mlir::Value createIntCast(mlir::OpBuilder &bld, mlir::Value src, + mlir::IntegerType dstTy, bool isSigned = false) { + auto srcTy = src.getType(); + assert(isa(srcTy)); + + auto srcWidth = srcTy.cast().getWidth(); + auto dstWidth = dstTy.cast().getWidth(); + auto loc = src.getLoc(); + + if (dstWidth > srcWidth && isSigned) + return bld.create(loc, dstTy, src); + else if (dstWidth > srcWidth) + return bld.create(loc, dstTy, src); + else if (dstWidth < srcWidth) + return bld.create(loc, dstTy, src); + else + return bld.create(loc, dstTy, src); +} + +mlir::Value getConstAPInt(mlir::OpBuilder &bld, mlir::Location loc, + mlir::Type typ, const llvm::APInt &val) { + return bld.create(loc, typ, val); +} + +mlir::Value getConst(mlir::OpBuilder &bld, mlir::Location loc, mlir::Type typ, + unsigned val) { + return bld.create(loc, typ, val); +} + +mlir::Value createShL(mlir::OpBuilder &bld, mlir::Value lhs, unsigned rhs) { + if (!rhs) + return lhs; + auto rhsVal = getConst(bld, lhs.getLoc(), lhs.getType(), rhs); + return bld.create(lhs.getLoc(), lhs, rhsVal); +} + +mlir::Value createLShR(mlir::OpBuilder &bld, mlir::Value lhs, unsigned rhs) { + if (!rhs) + return lhs; + auto rhsVal = getConst(bld, lhs.getLoc(), lhs.getType(), rhs); + return bld.create(lhs.getLoc(), lhs, rhsVal); +} + +mlir::Value createAShR(mlir::OpBuilder &bld, mlir::Value lhs, unsigned rhs) { + if (!rhs) + return lhs; + auto rhsVal = getConst(bld, lhs.getLoc(), lhs.getType(), rhs); + return bld.create(lhs.getLoc(), lhs, rhsVal); +} + +mlir::Value createAnd(mlir::OpBuilder &bld, mlir::Value lhs, + const llvm::APInt &rhs) { + auto rhsVal = getConstAPInt(bld, lhs.getLoc(), lhs.getType(), rhs); + return bld.create(lhs.getLoc(), lhs, rhsVal); +} + +#endif // LLVM_CLANG_LIB_LOWERINGHELPERS_H \ No newline at end of file diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt new file mode 100644 index 000000000000..e3f125aed84a --- /dev/null +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -0,0 +1,40 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIRLoweringThroughMLIR + LowerCIRToMLIR.cpp + LowerMLIRToLLVM.cpp + + DEPENDS + MLIRCIROpsIncGen + MLIRCIREnumsGen + MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen + MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen + MLIRFunctionInterfacesIncGen + + LINK_LIBS + clangAST + clangBasic + clangCodeGen + clangLex + clangFrontend + clangCIR + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + MLIRMemRefDialect + MLIROpenMPDialect + MLIROpenMPToLLVMIRTranslation + ) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp new file mode 100644 index 000000000000..9f30541dc3f4 --- /dev/null +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -0,0 +1,852 @@ +//====- LowerCIRToMLIR.cpp - Lowering from CIR to MLIR --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements lowering of CIR operations to MLIR. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Conversion/AffineToStandard/AffineToStandard.h" +#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" +#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" +#include "mlir/Conversion/LLVMCommon/ConversionTarget.h" +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" +#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" +#include "mlir/Dialect/Affine/IR/AffineOps.h" +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/Math/IR/Math.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SCF/IR/SCF.h" +#include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/IR/BuiltinDialect.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Export.h" +#include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Passes.h" +#include "llvm/ADT/Sequence.h" +#include "llvm/ADT/TypeSwitch.h" + +using namespace cir; +using namespace llvm; + +namespace cir { + +class CIRReturnLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ReturnOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, + adaptor.getOperands()); + return mlir::LogicalResult::success(); + } +}; + +struct ConvertCIRToMLIRPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + registry.insert(); + } + void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-to-mlir"; } +}; + +class CIRCallOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + SmallVector types; + if (mlir::failed( + getTypeConverter()->convertTypes(op.getResultTypes(), types))) + return mlir::failure(); + rewriter.replaceOpWithNewOp( + op, mlir::SymbolRefAttr::get(op), types, adaptor.getOperands()); + return mlir::LogicalResult::success(); + } +}; + +class CIRAllocaOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto type = adaptor.getAllocaType(); + auto mlirType = getTypeConverter()->convertType(type); + + // FIXME: Some types can not be converted yet (e.g. struct) + if (!mlirType) + return mlir::LogicalResult::failure(); + + auto memreftype = mlir::MemRefType::get({}, mlirType); + rewriter.replaceOpWithNewOp(op, memreftype, + op.getAlignmentAttr()); + return mlir::LogicalResult::success(); + } +}; + +class CIRLoadOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getAddr()); + return mlir::LogicalResult::success(); + } +}; + +class CIRStoreOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getValue(), + adaptor.getAddr()); + return mlir::LogicalResult::success(); + } +}; + +class CIRCosOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CosOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRConstantOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto ty = getTypeConverter()->convertType(op.getType()); + mlir::TypedAttr value; + if (mlir::isa(op.getType())) { + auto boolValue = mlir::cast(op.getValue()); + value = rewriter.getIntegerAttr(ty, boolValue.getValue()); + } else if (op.getType().isa()) { + value = rewriter.getFloatAttr( + ty, op.getValue().cast().getValue()); + } else { + auto cirIntAttr = mlir::dyn_cast(op.getValue()); + assert(cirIntAttr && "NYI non cir.int attr"); + value = rewriter.getIntegerAttr( + ty, cast(op.getValue()).getValue()); + } + rewriter.replaceOpWithNewOp(op, ty, value); + return mlir::LogicalResult::success(); + } +}; + +class CIRFuncOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + auto fnType = op.getFunctionType(); + mlir::TypeConverter::SignatureConversion signatureConversion( + fnType.getNumInputs()); + + for (const auto &argType : enumerate(fnType.getInputs())) { + auto convertedType = typeConverter->convertType(argType.value()); + if (!convertedType) + return mlir::failure(); + signatureConversion.addInputs(argType.index(), convertedType); + } + + mlir::Type resultType = + getTypeConverter()->convertType(fnType.getReturnType()); + auto fn = rewriter.create( + op.getLoc(), op.getName(), + rewriter.getFunctionType(signatureConversion.getConvertedTypes(), + resultType ? mlir::TypeRange(resultType) + : mlir::TypeRange())); + + rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); + if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, + &signatureConversion))) + return mlir::failure(); + + rewriter.eraseOp(op); + + return mlir::LogicalResult::success(); + } +}; + +class CIRUnaryOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto input = adaptor.getInput(); + auto type = getTypeConverter()->convertType(op.getType()); + + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Inc: { + auto One = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); + rewriter.replaceOpWithNewOp(op, type, input, One); + break; + } + case mlir::cir::UnaryOpKind::Dec: { + auto One = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); + rewriter.replaceOpWithNewOp(op, type, input, One); + break; + } + case mlir::cir::UnaryOpKind::Plus: { + rewriter.replaceOp(op, op.getInput()); + break; + } + case mlir::cir::UnaryOpKind::Minus: { + auto Zero = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 0)); + rewriter.replaceOpWithNewOp(op, type, Zero, input); + break; + } + case mlir::cir::UnaryOpKind::Not: { + auto MinusOne = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, -1)); + rewriter.replaceOpWithNewOp(op, type, MinusOne, + input); + break; + } + } + + return mlir::LogicalResult::success(); + } +}; + +class CIRBinOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BinOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert((adaptor.getLhs().getType() == adaptor.getRhs().getType()) && + "inconsistent operands' types not supported yet"); + mlir::Type mlirType = getTypeConverter()->convertType(op.getType()); + assert((mlirType.isa() || + mlirType.isa()) && + "operand type not supported yet"); + + switch (op.getKind()) { + case mlir::cir::BinOpKind::Add: + if (mlirType.isa()) + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + break; + case mlir::cir::BinOpKind::Sub: + if (mlirType.isa()) + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + break; + case mlir::cir::BinOpKind::Mul: + if (mlirType.isa()) + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + break; + case mlir::cir::BinOpKind::Div: + if (mlirType.isa()) { + if (mlirType.isSignlessInteger()) + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + else + llvm_unreachable("integer mlirType not supported in CIR yet"); + } else + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + break; + case mlir::cir::BinOpKind::Rem: + if (mlirType.isa()) { + if (mlirType.isSignlessInteger()) + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + else + llvm_unreachable("integer mlirType not supported in CIR yet"); + } else + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + break; + case mlir::cir::BinOpKind::And: + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + break; + case mlir::cir::BinOpKind::Or: + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + break; + case mlir::cir::BinOpKind::Xor: + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); + break; + } + + return mlir::LogicalResult::success(); + } +}; + +class CIRCmpOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CmpOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto type = adaptor.getLhs().getType(); + auto integerType = + mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); + + mlir::Value mlirResult; + switch (op.getKind()) { + case mlir::cir::CmpOpKind::gt: { + if (type.isa()) { + mlir::arith::CmpIPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::arith::CmpIPredicate::ugt; + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + adaptor.getLhs(), adaptor.getRhs()); + } else if (type.isa()) { + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::UGT), + adaptor.getLhs(), adaptor.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::ge: { + if (type.isa()) { + mlir::arith::CmpIPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::arith::CmpIPredicate::uge; + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + adaptor.getLhs(), adaptor.getRhs()); + } else if (type.isa()) { + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::UGE), + adaptor.getLhs(), adaptor.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::lt: { + if (type.isa()) { + mlir::arith::CmpIPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::arith::CmpIPredicate::ult; + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + adaptor.getLhs(), adaptor.getRhs()); + } else if (type.isa()) { + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::ULT), + adaptor.getLhs(), adaptor.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::le: { + if (type.isa()) { + mlir::arith::CmpIPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::arith::CmpIPredicate::ule; + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + adaptor.getLhs(), adaptor.getRhs()); + } else if (type.isa()) { + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::ULE), + adaptor.getLhs(), adaptor.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::eq: { + if (type.isa()) { + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), + mlir::arith::CmpIPredicate::eq), + adaptor.getLhs(), adaptor.getRhs()); + } else if (type.isa()) { + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::UEQ), + adaptor.getLhs(), adaptor.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::ne: { + if (type.isa()) { + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), + mlir::arith::CmpIPredicate::ne), + adaptor.getLhs(), adaptor.getRhs()); + } else if (type.isa()) { + mlirResult = rewriter.create( + op.getLoc(), integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::UNE), + adaptor.getLhs(), adaptor.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + } + + // MLIR comparison ops return i1, but cir::CmpOp returns the same type as + // the LHS value. Since this return value can be used later, we need to + // restore the type with the extension below. + auto mlirResultTy = getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, mlirResultTy, + mlirResult); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBrOpLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BrOp op, + mlir::PatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, op.getDest()); + return mlir::LogicalResult::success(); + } +}; + +class CIRScopeOpLowering + : public mlir::OpConversionPattern { + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ScopeOp scopeOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Empty scope: just remove it. + if (scopeOp.getRegion().empty()) { + rewriter.eraseOp(scopeOp); + return mlir::success(); + } + + for (auto &block : scopeOp.getRegion()) { + rewriter.setInsertionPointToEnd(&block); + auto *terminator = block.getTerminator(); + rewriter.replaceOpWithNewOp( + terminator, terminator->getOperands()); + } + + SmallVector mlirResultTypes; + if (mlir::failed(getTypeConverter()->convertTypes(scopeOp->getResultTypes(), + mlirResultTypes))) + return mlir::LogicalResult::failure(); + + rewriter.setInsertionPoint(scopeOp); + auto newScopeOp = rewriter.create( + scopeOp.getLoc(), mlirResultTypes); + rewriter.inlineRegionBefore(scopeOp.getScopeRegion(), + newScopeOp.getBodyRegion(), + newScopeOp.getBodyRegion().end()); + rewriter.replaceOp(scopeOp, newScopeOp); + + return mlir::LogicalResult::success(); + } +}; + +struct CIRBrCondOpLowering + : public mlir::OpConversionPattern { + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BrCondOp brOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + auto condition = adaptor.getCond(); + auto i1Condition = rewriter.create( + brOp.getLoc(), rewriter.getI1Type(), condition); + rewriter.replaceOpWithNewOp( + brOp, i1Condition.getResult(), brOp.getDestTrue(), + adaptor.getDestOperandsTrue(), brOp.getDestFalse(), + adaptor.getDestOperandsFalse()); + + return mlir::success(); + } +}; + +class CIRTernaryOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TernaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.setInsertionPoint(op); + auto condition = adaptor.getCond(); + auto i1Condition = rewriter.create( + op.getLoc(), rewriter.getI1Type(), condition); + SmallVector resultTypes; + if (mlir::failed(getTypeConverter()->convertTypes(op->getResultTypes(), + resultTypes))) + return mlir::failure(); + + auto ifOp = rewriter.create(op.getLoc(), resultTypes, + i1Condition.getResult(), true); + auto *thenBlock = &ifOp.getThenRegion().front(); + auto *elseBlock = &ifOp.getElseRegion().front(); + rewriter.inlineBlockBefore(&op.getTrueRegion().front(), thenBlock, + thenBlock->end()); + rewriter.inlineBlockBefore(&op.getFalseRegion().front(), elseBlock, + elseBlock->end()); + + rewriter.replaceOp(op, ifOp); + return mlir::success(); + } +}; + +class CIRYieldOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + mlir::LogicalResult + matchAndRewrite(mlir::cir::YieldOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto *parentOp = op->getParentOp(); + return llvm::TypeSwitch(parentOp) + .Case([&](auto) { + rewriter.replaceOpWithNewOp( + op, adaptor.getOperands()); + return mlir::success(); + }) + .Default([](auto) { return mlir::failure(); }); + } +}; + +class CIRGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + mlir::LogicalResult + matchAndRewrite(mlir::cir::GlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto moduleOp = op->getParentOfType(); + if (!moduleOp) + return mlir::failure(); + + mlir::OpBuilder b(moduleOp.getContext()); + + const auto CIRSymType = op.getSymType(); + auto convertedType = getTypeConverter()->convertType(CIRSymType); + if (!convertedType) + return mlir::failure(); + auto memrefType = dyn_cast(convertedType); + if (!memrefType) + memrefType = mlir::MemRefType::get({}, convertedType); + // Add an optional alignment to the global memref. + mlir::IntegerAttr memrefAlignment = + op.getAlignment() + ? mlir::IntegerAttr::get(b.getI64Type(), op.getAlignment().value()) + : mlir::IntegerAttr(); + // Add an optional initial value to the global memref. + mlir::Attribute initialValue = mlir::Attribute(); + std::optional init = op.getInitialValue(); + if (init.has_value()) { + if (auto constArr = init.value().dyn_cast()) { + if (memrefType.getShape().size()) { + auto rtt = mlir::RankedTensorType::get(memrefType.getShape(), + memrefType.getElementType()); + initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); + } else { + auto rtt = mlir::RankedTensorType::get({}, convertedType); + initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); + } + } else if (auto intAttr = init.value().dyn_cast()) { + auto rtt = mlir::RankedTensorType::get({}, convertedType); + initialValue = mlir::DenseIntElementsAttr::get(rtt, intAttr.getValue()); + } else if (auto fltAttr = init.value().dyn_cast()) { + auto rtt = mlir::RankedTensorType::get({}, convertedType); + initialValue = mlir::DenseFPElementsAttr::get(rtt, fltAttr.getValue()); + } else if (auto boolAttr = init.value().dyn_cast()) { + auto rtt = mlir::RankedTensorType::get({}, convertedType); + initialValue = + mlir::DenseIntElementsAttr::get(rtt, (char)boolAttr.getValue()); + } else + llvm_unreachable( + "GlobalOp lowering with initial value is not fully supported yet"); + } + + // Add symbol visibility + std::string sym_visibility = op.isPrivate() ? "private" : "public"; + + rewriter.replaceOpWithNewOp( + op, b.getStringAttr(op.getSymName()), + /*sym_visibility=*/b.getStringAttr(sym_visibility), + /*type=*/memrefType, initialValue, + /*constant=*/op.getConstant(), + /*alignment=*/memrefAlignment); + + return mlir::success(); + } +}; + +class CIRGetGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GetGlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. + // CIRGen should mitigate this and not emit the get_global. + if (op->getUses().empty()) { + rewriter.eraseOp(op); + return mlir::success(); + } + + auto type = getTypeConverter()->convertType(op.getType()); + auto symbol = op.getName(); + rewriter.replaceOpWithNewOp(op, type, symbol); + return mlir::success(); + } +}; + +void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, + mlir::TypeConverter &converter) { + patterns.add(patterns.getContext()); + + patterns.add(converter, patterns.getContext()); +} + +static mlir::TypeConverter prepareTypeConverter() { + mlir::TypeConverter converter; + converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { + auto ty = converter.convertType(type.getPointee()); + // FIXME: The pointee type might not be converted (e.g. struct) + if (!ty) + return nullptr; + if (isa(type.getPointee())) + return ty; + return mlir::MemRefType::get({}, ty); + }); + converter.addConversion( + [&](mlir::IntegerType type) -> mlir::Type { return type; }); + converter.addConversion( + [&](mlir::FloatType type) -> mlir::Type { return type; }); + converter.addConversion( + [&](mlir::cir::VoidType type) -> mlir::Type { return {}; }); + converter.addConversion([&](mlir::cir::IntType type) -> mlir::Type { + // arith dialect ops doesn't take signed integer -- drop cir sign here + return mlir::IntegerType::get( + type.getContext(), type.getWidth(), + mlir::IntegerType::SignednessSemantics::Signless); + }); + converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { + return mlir::IntegerType::get(type.getContext(), 8); + }); + converter.addConversion([&](mlir::cir::SingleType type) -> mlir::Type { + return mlir::FloatType::getF32(type.getContext()); + }); + converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { + return mlir::FloatType::getF64(type.getContext()); + }); + converter.addConversion([&](mlir::cir::FP80Type type) -> mlir::Type { + return mlir::FloatType::getF80(type.getContext()); + }); + converter.addConversion([&](mlir::cir::LongDoubleType type) -> mlir::Type { + return converter.convertType(type.getUnderlying()); + }); + converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { + SmallVector shape; + mlir::Type curType = type; + while (auto arrayType = dyn_cast(curType)) { + shape.push_back(arrayType.getSize()); + curType = arrayType.getEltType(); + } + auto elementType = converter.convertType(curType); + // FIXME: The element type might not be converted (e.g. struct) + if (!elementType) + return nullptr; + return mlir::MemRefType::get(shape, elementType); + }); + + return converter; +} + +void ConvertCIRToMLIRPass::runOnOperation() { + auto module = getOperation(); + + auto converter = prepareTypeConverter(); + + mlir::RewritePatternSet patterns(&getContext()); + + populateCIRToMLIRConversionPatterns(patterns, converter); + + mlir::ConversionTarget target(getContext()); + target.addLegalOp(); + target.addLegalDialect(); + target.addIllegalDialect(); + + if (failed(applyPartialConversion(module, target, std::move(patterns)))) + signalPassFailure(); +} + +std::unique_ptr +lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + LLVMContext &llvmCtx) { + mlir::PassManager pm(mlirCtx.get()); + + pm.addPass(createConvertCIRToMLIRPass()); + pm.addPass(createConvertMLIRToLLVMPass()); + + auto result = !mlir::failed(pm.run(theModule)); + if (!result) + report_fatal_error( + "The pass manager failed to lower CIR to LLVMIR dialect!"); + + // Now that we ran all the lowering passes, verify the final output. + if (theModule.verify().failed()) + report_fatal_error("Verification of the final LLVMIR dialect failed!"); + + mlir::registerBuiltinDialectTranslation(*mlirCtx); + mlir::registerLLVMDialectTranslation(*mlirCtx); + mlir::registerOpenMPDialectTranslation(*mlirCtx); + + auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); + + if (!llvmModule) + report_fatal_error("Lowering from LLVMIR dialect to llvm IR failed!"); + + return llvmModule; +} + +std::unique_ptr createConvertCIRToMLIRPass() { + return std::make_unique(); +} + +mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, + mlir::MLIRContext *mlirCtx) { + mlir::PassManager pm(mlirCtx); + + pm.addPass(createConvertCIRToMLIRPass()); + + auto result = !mlir::failed(pm.run(theModule)); + if (!result) + report_fatal_error( + "The pass manager failed to lower CIR to LLVMIR dialect!"); + + // Now that we ran all the lowering passes, verify the final output. + if (theModule.verify().failed()) + report_fatal_error("Verification of the final LLVMIR dialect failed!"); + + return theModule; +} + +} // namespace cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp new file mode 100644 index 000000000000..930ce1c12f68 --- /dev/null +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp @@ -0,0 +1,79 @@ +//====- LowerMLIRToCIR.cpp - Lowering from MLIR to CIR --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements lowering of CIR-lowered MLIR operations to LLVMIR. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Conversion/AffineToStandard/AffineToStandard.h" +#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" +#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" +#include "mlir/Conversion/LLVMCommon/ConversionTarget.h" +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" +#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" +#include "mlir/Dialect/Affine/IR/AffineOps.h" +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SCF/IR/SCF.h" +#include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/IR/BuiltinDialect.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Export.h" +#include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Passes.h" +#include "llvm/ADT/Sequence.h" + +using namespace cir; +using namespace llvm; + +namespace cir { +struct ConvertMLIRToLLVMPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + registry.insert(); + } + void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-mlir-to-llvm"; } +}; + +void ConvertMLIRToLLVMPass::runOnOperation() { + mlir::LLVMConversionTarget target(getContext()); + target.addLegalOp(); + + mlir::LLVMTypeConverter typeConverter(&getContext()); + + mlir::RewritePatternSet patterns(&getContext()); + populateAffineToStdConversionPatterns(patterns); + mlir::arith::populateArithToLLVMConversionPatterns(typeConverter, patterns); + populateSCFToControlFlowConversionPatterns(patterns); + mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, + patterns); + populateFinalizeMemRefToLLVMConversionPatterns(typeConverter, patterns); + populateFuncToLLVMConversionPatterns(typeConverter, patterns); + + auto module = getOperation(); + if (failed(applyFullConversion(module, target, std::move(patterns)))) + signalPassFailure(); +} + +std::unique_ptr createConvertMLIRToLLVMPass() { + return std::make_unique(); +} + +} // namespace cir diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 76b7b9fdfb4f..368a30c46102 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -361,6 +361,8 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL, (PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) || (PhaseArg = DAL.getLastArg(options::OPT__migrate)) || (PhaseArg = DAL.getLastArg(options::OPT__analyze)) || + (PhaseArg = DAL.getLastArg(options::OPT_emit_cir)) || + (PhaseArg = DAL.getLastArg(options::OPT_emit_cir_flat)) || (PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) { FinalPhase = phases::Compile; @@ -4799,6 +4801,10 @@ Action *Driver::ConstructPhaseAction( return C.MakeAction(Input, types::TY_Remap); if (Args.hasArg(options::OPT_emit_ast)) return C.MakeAction(Input, types::TY_AST); + if (Args.hasArg(options::OPT_emit_cir)) + return C.MakeAction(Input, types::TY_CIR); + if (Args.hasArg(options::OPT_emit_cir_flat)) + return C.MakeAction(Input, types::TY_CIR_FLAT); if (Args.hasArg(options::OPT_module_file_info)) return C.MakeAction(Input, types::TY_ModuleFile); if (Args.hasArg(options::OPT_verify_pch)) diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 651a2b5aac36..258cf1f707aa 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -4960,6 +4960,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } } + if (Args.hasArg(options::OPT_fclangir) || + Args.hasArg(options::OPT_emit_cir) || + Args.hasArg(options::OPT_emit_cir_flat)) + CmdArgs.push_back("-fclangir"); + + if (Args.hasArg(options::OPT_fclangir_direct_lowering)) + CmdArgs.push_back("-fclangir-direct-lowering"); + + if (Args.hasArg(options::OPT_clangir_disable_passes)) + CmdArgs.push_back("-clangir-disable-passes"); + + // ClangIR lib opt requires idiom recognizer. + if (Args.hasArg(options::OPT_fclangir_lib_opt, + options::OPT_fclangir_lib_opt_EQ)) { + if (!Args.hasArg(options::OPT_fclangir_idiom_recognizer, + options::OPT_fclangir_idiom_recognizer_EQ)) + CmdArgs.push_back("-fclangir-idiom-recognizer"); + } + if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. std::string NormalizedTriple = @@ -5098,6 +5117,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } else if (JA.getType() == types::TY_LLVM_IR || JA.getType() == types::TY_LTO_IR) { CmdArgs.push_back("-emit-llvm"); + } else if (JA.getType() == types::TY_CIR) { + CmdArgs.push_back("-emit-cir"); + } else if (JA.getType() == types::TY_CIR_FLAT) { + CmdArgs.push_back("-emit-cir-flat"); } else if (JA.getType() == types::TY_LLVM_BC || JA.getType() == types::TY_LTO_BC) { // Emit textual llvm IR for AMDGPU offloading for -emit-llvm -S @@ -7538,6 +7561,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } } + for (const Arg *A : Args.filtered(options::OPT_mmlir)) { + A->claim(); + A->render(Args, CmdArgs); + } + // With -save-temps, we want to save the unoptimized bitcode output from the // CompileJobAction, use -disable-llvm-passes to get pristine IR generated // by the frontend. diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 8236051e30c4..fc14faa8d51a 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1530,6 +1530,13 @@ void CompilerInvocationBase::GenerateCodeGenArgs(const CodeGenOptions &Opts, if (Opts.NewStructPathTBAA) GenerateArg(Consumer, OPT_new_struct_path_tbaa); + if (Opts.ClangIRBuildDeferredThreshold) + GenerateArg(Consumer, OPT_fclangir_disable_deferred_EQ, + Twine(Opts.ClangIRBuildDeferredThreshold)); + + if (Opts.ClangIRSkipFunctionsFromSystemHeaders) + GenerateArg(Consumer, OPT_fclangir_skip_system_headers); + if (Opts.OptimizeSize == 1) GenerateArg(Consumer, OPT_O, "s"); else if (Opts.OptimizeSize == 2) @@ -2549,6 +2556,10 @@ static const auto &getFrontendActionTable() { {frontend::DumpTokens, OPT_dump_tokens}, {frontend::EmitAssembly, OPT_S}, {frontend::EmitBC, OPT_emit_llvm_bc}, + {frontend::EmitCIR, OPT_emit_cir}, + {frontend::EmitCIRFlat, OPT_emit_cir_flat}, + {frontend::EmitCIROnly, OPT_emit_cir_only}, + {frontend::EmitMLIR, OPT_emit_mlir}, {frontend::EmitHTML, OPT_emit_html}, {frontend::EmitLLVM, OPT_emit_llvm}, {frontend::EmitLLVMOnly, OPT_emit_llvm_only}, @@ -2692,6 +2703,17 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts, for (const auto &ModuleFile : Opts.ModuleFiles) GenerateArg(Consumer, OPT_fmodule_file, ModuleFile); + if (Opts.ClangIRLifetimeCheck) + GenerateArg(Consumer, OPT_fclangir_lifetime_check_EQ, + Opts.ClangIRLifetimeCheckOpts); + + if (Opts.ClangIRIdiomRecognizer) + GenerateArg(Consumer, OPT_fclangir_idiom_recognizer_EQ, + Opts.ClangIRIdiomRecognizerOpts); + + if (Opts.ClangIRLibOpt) + GenerateArg(Consumer, OPT_fclangir_lib_opt_EQ, Opts.ClangIRLibOptOpts); + if (Opts.AuxTargetCPU) GenerateArg(Consumer, OPT_aux_target_cpu, *Opts.AuxTargetCPU); @@ -2891,6 +2913,42 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Opts.ProgramAction != frontend::GenerateModule && Opts.IsSystemModule) Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module" << "-emit-module"; + if (Args.hasArg(OPT_fclangir) || Args.hasArg(OPT_emit_cir) || + Args.hasArg(OPT_emit_cir_flat)) + Opts.UseClangIRPipeline = true; + + if (Args.hasArg(OPT_fclangir_direct_lowering)) + Opts.ClangIRDirectLowering = true; + + if (Args.hasArg(OPT_clangir_disable_passes)) + Opts.ClangIRDisablePasses = true; + + if (Args.hasArg(OPT_clangir_disable_verifier)) + Opts.ClangIRDisableCIRVerifier = true; + + if (Args.hasArg(OPT_clangir_disable_emit_cxx_default)) + Opts.ClangIRDisableEmitCXXDefault = true; + + if (Args.hasArg(OPT_clangir_verify_diagnostics)) + Opts.ClangIRVerifyDiags = true; + + if (const Arg *A = Args.getLastArg(OPT_fclangir_lifetime_check, + OPT_fclangir_lifetime_check_EQ)) { + Opts.ClangIRLifetimeCheck = true; + Opts.ClangIRLifetimeCheckOpts = A->getValue(); + } + + if (const Arg *A = Args.getLastArg(OPT_fclangir_idiom_recognizer, + OPT_fclangir_idiom_recognizer_EQ)) { + Opts.ClangIRIdiomRecognizer = true; + Opts.ClangIRIdiomRecognizerOpts = A->getValue(); + } + + if (const Arg *A = + Args.getLastArg(OPT_fclangir_lib_opt, OPT_fclangir_lib_opt_EQ)) { + Opts.ClangIRLibOpt = true; + Opts.ClangIRLibOptOpts = A->getValue(); + } if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); @@ -4337,6 +4395,10 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) { case frontend::ASTView: case frontend::EmitAssembly: case frontend::EmitBC: + case frontend::EmitCIR: + case frontend::EmitCIRFlat: + case frontend::EmitCIROnly: + case frontend::EmitMLIR: case frontend::EmitHTML: case frontend::EmitLLVM: case frontend::EmitLLVMOnly: diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp index 9ae7664b4b49..0e9a7d2867bb 100644 --- a/clang/lib/Frontend/FrontendAction.cpp +++ b/clang/lib/Frontend/FrontendAction.cpp @@ -779,6 +779,26 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, return true; } + // TODO: blindly duplicating for now + if (Input.getKind().getLanguage() == Language::CIR) { + assert(hasCIRSupport() && "This action does not have CIR file support!"); + + // Inform the diagnostic client we are processing a source file. + CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), nullptr); + HasBegunSourceFile = true; + + // Initialize the action. + if (!BeginSourceFileAction(CI)) + return false; + + // Initialize the main file entry. + if (!CI.InitializeSourceManager(CurrentInput)) + return false; + + FailureCleanup.release(); + return true; + } + // If the implicit PCH include is actually a directory, rather than // a single file, search for a suitable PCH file in that directory. if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) { diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index 51c379ade270..6dae1455010c 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -12,6 +12,24 @@ set(link_libs clangRewriteFrontend ) +set(deps) + +if(CLANG_ENABLE_CIR) + list(APPEND link_libs + clangCIRFrontendAction + MLIRCIRTransforms + MLIRIR + MLIRPass + ) + list(APPEND deps + MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen + ) + + include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) + include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) +endif() + if(CLANG_ENABLE_ARCMT) list(APPEND link_libs clangARCMigrate @@ -29,6 +47,7 @@ add_clang_library(clangFrontendTool DEPENDS ClangDriverOptions + ${deps} LINK_LIBS ${link_libs} diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index f85f0365616f..1e1f6b34012f 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -31,6 +31,15 @@ #include "llvm/Support/BuryPointer.h" #include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/ErrorHandling.h" + +#if CLANG_ENABLE_CIR +#include "mlir/IR/AsmState.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Pass/PassManager.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIRFrontendAction/CIRGenAction.h" +#endif + using namespace clang; using namespace llvm::opt; @@ -42,6 +51,18 @@ CreateFrontendBaseAction(CompilerInstance &CI) { StringRef Action("unknown"); (void)Action; + auto UseCIR = CI.getFrontendOpts().UseClangIRPipeline; + auto Act = CI.getFrontendOpts().ProgramAction; + auto EmitsCIR = Act == EmitCIR || Act == EmitCIRFlat || Act == EmitCIROnly; + + if (!UseCIR && EmitsCIR) + llvm::report_fatal_error( + "-emit-cir and -emit-cir-only only valid when using -fclangir"); + + if (CI.getFrontendOpts().ClangIRDirectLowering && Act == EmitMLIR) + llvm::report_fatal_error( + "ClangIR direct lowering is incompatible with -emit-mlir"); + switch (CI.getFrontendOpts().ProgramAction) { case ASTDeclList: return std::make_unique(); case ASTDump: return std::make_unique(); @@ -51,13 +72,42 @@ CreateFrontendBaseAction(CompilerInstance &CI) { return std::make_unique(); case DumpRawTokens: return std::make_unique(); case DumpTokens: return std::make_unique(); - case EmitAssembly: return std::make_unique(); + case EmitAssembly: +#if CLANG_ENABLE_CIR + if (UseCIR) + return std::make_unique<::cir::EmitAssemblyAction>(); +#endif + return std::make_unique(); case EmitBC: return std::make_unique(); +#if CLANG_ENABLE_CIR + case EmitCIR: return std::make_unique<::cir::EmitCIRAction>(); + case EmitCIRFlat: + return std::make_unique<::cir::EmitCIRFlatAction>(); + case EmitCIROnly: return std::make_unique<::cir::EmitCIROnlyAction>(); + case EmitMLIR: return std::make_unique<::cir::EmitMLIRAction>(); +#else + case EmitCIR: + case EmitCIRFlat: + case EmitCIROnly: + llvm_unreachable("CIR suppport not built into clang"); +#endif case EmitHTML: return std::make_unique(); - case EmitLLVM: return std::make_unique(); + case EmitLLVM: { +#if CLANG_ENABLE_CIR + if (UseCIR) + return std::make_unique<::cir::EmitLLVMAction>(); +#endif + return std::make_unique(); + } case EmitLLVMOnly: return std::make_unique(); case EmitCodeGenOnly: return std::make_unique(); - case EmitObj: return std::make_unique(); + case EmitObj: { +#if CLANG_ENABLE_CIR + if (UseCIR) + return std::make_unique<::cir::EmitObjAction>(); +#endif + return std::make_unique(); + } case ExtractAPI: return std::make_unique(); case FixIt: return std::make_unique(); @@ -267,7 +317,21 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) { return true; } #endif - +#if CLANG_ENABLE_CIR + if (!Clang->getFrontendOpts().MLIRArgs.empty()) { + mlir::registerCIRPasses(); + mlir::registerMLIRContextCLOptions(); + mlir::registerPassManagerCLOptions(); + mlir::registerAsmPrinterCLOptions(); + unsigned NumArgs = Clang->getFrontendOpts().MLIRArgs.size(); + auto Args = std::make_unique(NumArgs + 2); + Args[0] = "clang (MLIR option parsing)"; + for (unsigned i = 0; i != NumArgs; ++i) + Args[i + 1] = Clang->getFrontendOpts().MLIRArgs[i].c_str(); + Args[NumArgs + 1] = nullptr; + llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args.get()); + } +#endif // If there were errors in processing arguments, don't do anything else. if (Clang->getDiagnostics().hasErrorOccurred()) return false; diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt index a96439df6642..885a2d788a77 100644 --- a/clang/lib/Sema/CMakeLists.txt +++ b/clang/lib/Sema/CMakeLists.txt @@ -13,6 +13,14 @@ clang_tablegen(OpenCLBuiltins.inc -gen-clang-opencl-builtins TARGET ClangOpenCLBuiltinsImpl ) +if(CLANG_ENABLE_CIR) + set(CIR_DEPS + MLIRCIROpsIncGen + MLIRCIR + ) +endif() + + add_clang_library(clangSema AnalysisBasedWarnings.cpp CodeCompleteConsumer.cpp @@ -76,6 +84,7 @@ add_clang_library(clangSema ClangOpenCLBuiltinsImpl omp_gen ClangDriverOptions + ${CIR_DEPS} LINK_LIBS clangAPINotes diff --git a/clang/test/CIR/CodeGen/Inputs/std-compare.h b/clang/test/CIR/CodeGen/Inputs/std-compare.h new file mode 100644 index 000000000000..f7f0c9b06db6 --- /dev/null +++ b/clang/test/CIR/CodeGen/Inputs/std-compare.h @@ -0,0 +1,324 @@ +#ifndef STD_COMPARE_H +#define STD_COMPARE_H + +namespace std { +inline namespace __1 { + +#ifdef NON_CANONICAL_CMP_RESULTS + +// exposition only +enum class _EqResult : unsigned char { + __equal = 2, + __equiv = __equal, +}; + +enum class _OrdResult : signed char { + __less = 1, + __greater = 3 +}; + +#else + +// exposition only +enum class _EqResult : unsigned char { + __equal = 0, + __equiv = __equal, +}; + +enum class _OrdResult : signed char { + __less = -1, + __greater = 1 +}; + +#endif + +enum class _NCmpResult : signed char { + __unordered = -127 +}; + +struct _CmpUnspecifiedType; +using _CmpUnspecifiedParam = void (_CmpUnspecifiedType::*)(); + +class partial_ordering { + using _ValueT = signed char; + explicit constexpr partial_ordering(_EqResult __v) noexcept + : __value_(_ValueT(__v)) {} + explicit constexpr partial_ordering(_OrdResult __v) noexcept + : __value_(_ValueT(__v)) {} + explicit constexpr partial_ordering(_NCmpResult __v) noexcept + : __value_(_ValueT(__v)) {} + + constexpr bool __is_ordered() const noexcept { + return __value_ != _ValueT(_NCmpResult::__unordered); + } + +public: + // valid values + static const partial_ordering less; + static const partial_ordering equivalent; + static const partial_ordering greater; + static const partial_ordering unordered; + + // comparisons + friend constexpr bool operator==(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator!=(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<=(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>=(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator==(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator!=(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator<(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator<=(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator>(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator>=(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + + friend constexpr partial_ordering operator<=>(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr partial_ordering operator<=>(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + + // test helper + constexpr bool test_eq(partial_ordering const &other) const noexcept { + return __value_ == other.__value_; + } + +private: + _ValueT __value_; +}; + +inline constexpr partial_ordering partial_ordering::less(_OrdResult::__less); +inline constexpr partial_ordering partial_ordering::equivalent(_EqResult::__equiv); +inline constexpr partial_ordering partial_ordering::greater(_OrdResult::__greater); +inline constexpr partial_ordering partial_ordering::unordered(_NCmpResult ::__unordered); +constexpr bool operator==(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ == 0; +} +constexpr bool operator<(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ < 0; +} +constexpr bool operator<=(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ <= 0; +} +constexpr bool operator>(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ > 0; +} +constexpr bool operator>=(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ >= 0; +} +constexpr bool operator==(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 == __v.__value_; +} +constexpr bool operator<(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 < __v.__value_; +} +constexpr bool operator<=(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 <= __v.__value_; +} +constexpr bool operator>(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 > __v.__value_; +} +constexpr bool operator>=(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 >= __v.__value_; +} +constexpr bool operator!=(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return !__v.__is_ordered() || __v.__value_ != 0; +} +constexpr bool operator!=(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return !__v.__is_ordered() || __v.__value_ != 0; +} + +constexpr partial_ordering operator<=>(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v; +} +constexpr partial_ordering operator<=>(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v < 0 ? partial_ordering::greater : (__v > 0 ? partial_ordering::less : __v); +} + +class weak_ordering { + using _ValueT = signed char; + explicit constexpr weak_ordering(_EqResult __v) noexcept : __value_(_ValueT(__v)) {} + explicit constexpr weak_ordering(_OrdResult __v) noexcept : __value_(_ValueT(__v)) {} + +public: + static const weak_ordering less; + static const weak_ordering equivalent; + static const weak_ordering greater; + + // conversions + constexpr operator partial_ordering() const noexcept { + return __value_ == 0 ? partial_ordering::equivalent + : (__value_ < 0 ? partial_ordering::less : partial_ordering::greater); + } + + // comparisons + friend constexpr bool operator==(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator!=(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<=(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>=(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator==(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator!=(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator<(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator<=(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator>(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator>=(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + + friend constexpr weak_ordering operator<=>(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr weak_ordering operator<=>(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + + // test helper + constexpr bool test_eq(weak_ordering const &other) const noexcept { + return __value_ == other.__value_; + } + +private: + _ValueT __value_; +}; + +inline constexpr weak_ordering weak_ordering::less(_OrdResult::__less); +inline constexpr weak_ordering weak_ordering::equivalent(_EqResult::__equiv); +inline constexpr weak_ordering weak_ordering::greater(_OrdResult::__greater); +constexpr bool operator==(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ == 0; +} +constexpr bool operator!=(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ != 0; +} +constexpr bool operator<(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ < 0; +} +constexpr bool operator<=(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ <= 0; +} +constexpr bool operator>(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ > 0; +} +constexpr bool operator>=(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ >= 0; +} +constexpr bool operator==(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 == __v.__value_; +} +constexpr bool operator!=(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 != __v.__value_; +} +constexpr bool operator<(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 < __v.__value_; +} +constexpr bool operator<=(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 <= __v.__value_; +} +constexpr bool operator>(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 > __v.__value_; +} +constexpr bool operator>=(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 >= __v.__value_; +} + +constexpr weak_ordering operator<=>(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v; +} +constexpr weak_ordering operator<=>(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return __v < 0 ? weak_ordering::greater : (__v > 0 ? weak_ordering::less : __v); +} + +class strong_ordering { + using _ValueT = signed char; + explicit constexpr strong_ordering(_EqResult __v) noexcept : __value_(static_cast(__v)) {} + explicit constexpr strong_ordering(_OrdResult __v) noexcept : __value_(static_cast(__v)) {} + +public: + static const strong_ordering less; + static const strong_ordering equal; + static const strong_ordering equivalent; + static const strong_ordering greater; + + // conversions + constexpr operator partial_ordering() const noexcept { + return __value_ == 0 ? partial_ordering::equivalent + : (__value_ < 0 ? partial_ordering::less : partial_ordering::greater); + } + constexpr operator weak_ordering() const noexcept { + return __value_ == 0 ? weak_ordering::equivalent + : (__value_ < 0 ? weak_ordering::less : weak_ordering::greater); + } + + // comparisons + friend constexpr bool operator==(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator!=(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<=(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>=(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator==(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator!=(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator<(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator<=(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator>(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator>=(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + + friend constexpr strong_ordering operator<=>(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr strong_ordering operator<=>(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + + // test helper + constexpr bool test_eq(strong_ordering const &other) const noexcept { + return __value_ == other.__value_; + } + +private: + _ValueT __value_; +}; + +inline constexpr strong_ordering strong_ordering::less(_OrdResult::__less); +inline constexpr strong_ordering strong_ordering::equal(_EqResult::__equal); +inline constexpr strong_ordering strong_ordering::equivalent(_EqResult::__equiv); +inline constexpr strong_ordering strong_ordering::greater(_OrdResult::__greater); + +constexpr bool operator==(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ == 0; +} +constexpr bool operator!=(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ != 0; +} +constexpr bool operator<(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ < 0; +} +constexpr bool operator<=(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ <= 0; +} +constexpr bool operator>(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ > 0; +} +constexpr bool operator>=(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ >= 0; +} +constexpr bool operator==(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 == __v.__value_; +} +constexpr bool operator!=(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 != __v.__value_; +} +constexpr bool operator<(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 < __v.__value_; +} +constexpr bool operator<=(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 <= __v.__value_; +} +constexpr bool operator>(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 > __v.__value_; +} +constexpr bool operator>=(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 >= __v.__value_; +} + +constexpr strong_ordering operator<=>(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v; +} +constexpr strong_ordering operator<=>(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return __v < 0 ? strong_ordering::greater : (__v > 0 ? strong_ordering::less : __v); +} + +} // namespace __1 +} // end namespace std + +#endif // STD_COMPARE_H diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp new file mode 100644 index 000000000000..576eed964761 --- /dev/null +++ b/clang/test/CIR/CodeGen/String.cpp @@ -0,0 +1,73 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +class String { + char *storage{nullptr}; + long size; + long capacity; + +public: + String() : size{0} {} + String(int size) : size{size} {} + String(const char *s) {} +}; + +void test() { + String s1{}; + String s2{1}; + String s3{"abcdefghijklmnop"}; +} + +// CHECK: cir.func linkonce_odr @_ZN6StringC2Ev +// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: cir.store %arg0, %0 +// CHECK-NEXT: %1 = cir.load %0 +// CHECK-NEXT: %2 = cir.get_member %1[0] {name = "storage"} +// CHECK-NEXT: %3 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %4 = cir.get_member %1[1] {name = "size"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.cast(integral, %5 : !s32i), !s64i +// CHECK-NEXT: cir.store %6, %4 : !s64i, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK: cir.func linkonce_odr @_ZN6StringC2Ei +// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["size", init] +// CHECK-NEXT: cir.store %arg0, %0 +// CHECK-NEXT: cir.store %arg1, %1 +// CHECK-NEXT: %2 = cir.load %0 +// CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} +// CHECK-NEXT: %4 = cir.const(#cir.ptr : !cir.ptr) +// CHECK-NEXT: cir.store %4, %3 +// CHECK-NEXT: %5 = cir.get_member %2[1] {name = "size"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %7 = cir.cast(integral, %6 : !s32i), !s64i +// CHECK-NEXT: cir.store %7, %5 : !s64i, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK: cir.func linkonce_odr @_ZN6StringC2EPKc +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} : !cir.ptr -> !cir.ptr> +// CHECK-NEXT: %4 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.return + +// CHECK: cir.func linkonce_odr @_ZN6StringC1EPKc +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.return + +// CHECK: cir.func @_Z4testv() +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, !s32i) -> () +// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/StringExample.cpp b/clang/test/CIR/CodeGen/StringExample.cpp new file mode 100644 index 000000000000..a2c0ef374f1c --- /dev/null +++ b/clang/test/CIR/CodeGen/StringExample.cpp @@ -0,0 +1,34 @@ +// RUN: true + +int strlen(char const *); +void puts(char const *); + +struct String { + long size; + long capacity; + char *storage; + + String() : size{0}, capacity{0}, storage{nullptr} {} + String(char const *s) : size{strlen(s)}, capacity{size}, + storage{new char[capacity]} {} +}; + +struct StringView { + long size; + char *storage; + + StringView(const String &s) : size{s.size}, storage{s.storage} {} + StringView() : size{0}, storage{nullptr} {} +}; + +int main() { + StringView sv; + { + String s = "Hi"; + sv = s; + + puts(sv.storage); + } + + puts(sv.storage); +} diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c new file mode 100644 index 000000000000..0132bdb1132f --- /dev/null +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -0,0 +1,85 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef struct {} S; + +typedef struct { + int a; + int b; + S s; +} A; + +// CHECK: cir.func @foo1 +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP4:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[TMP3]] : !s32i), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP7:%.*]] = cir.ptr_stride([[TMP5]] : !cir.ptr, [[TMP6]] : !s32i), !cir.ptr +// CHECK: cir.copy [[TMP7]] to [[TMP4]] : !cir.ptr +void foo1(A* a1, A* a2) { + a1[1] = a2[1]; +} + +// CHECK: cir.func @foo2 +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][2] {name = "s"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.get_member [[TMP4]][2] {name = "s"} : !cir.ptr -> !cir.ptr +// CHECK: cir.copy [[TMP5]] to [[TMP3]] : !cir.ptr +void foo2(A* a1, A* a2) { + a1->s = a2->s; +} + +// CHECK: cir.global external @a = #cir.zero : !ty_22A22 +// CHECK: cir.func @foo3 +// CHECK: [[TMP0]] = cir.alloca !ty_22A22, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: [[TMP1]] = cir.get_global @a : cir.ptr +// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr +// CHECK: [[TMP2]] = cir.load [[TMP0]] : cir.ptr , !ty_22A22 +// CHECK: cir.return [[TMP2]] : !ty_22A22 +A a; +A foo3(void) { + return a; +} + +// CHECK: cir.func @foo4 +// CHECK: [[TMP0]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] +// CHECK: [[TMP1]] = cir.alloca !ty_22A22, cir.ptr , ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr +void foo4(A* a1) { + A a2 = *a1; +} + +A create() { A a; return a; } + +// CHECK: cir.func {{.*@foo5}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22A22, cir.ptr , +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["tmp"] {alignment = 4 : i64} +// CHECK: [[TMP2:%.*]] = cir.call @create() : () -> !ty_22A22 +// CHECK: cir.store [[TMP2]], [[TMP1]] : !ty_22A22, cir.ptr +// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr +void foo5() { + A a; + a = create(); +} + +void foo6(A* a1) { + A a2 = (*a1); +// CHECK: cir.func {{.*@foo6}} +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] {alignment = 8 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a2", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp new file mode 100644 index 000000000000..40b584adf009 --- /dev/null +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: !ty_22yep_22 = !cir.struct, !cir.int}> + +typedef enum xxy_ { + xxy_Low = 0, + xxy_High = 0x3f800000, + xxy_EnumSize = 0x7fffffff +} xxy; + +typedef struct yep_ { + unsigned int Status; + xxy HC; +} yop; + +void use() { yop{}; } + +// CHECK: cir.func @_Z3usev() +// CHECK: %0 = cir.alloca !ty_22yep_22, cir.ptr , ["agg.tmp.ensured"] {alignment = 4 : i64} +// CHECK: %1 = cir.get_member %0[0] {name = "Status"} : !cir.ptr -> !cir.ptr +// CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.store %2, %1 : !u32i, cir.ptr +// CHECK: %3 = cir.get_member %0[1] {name = "HC"} : !cir.ptr -> !cir.ptr +// CHECK: %4 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.store %4, %3 : !u32i, cir.ptr +// CHECK: cir.return +// CHECK: } + +typedef unsigned long long Flags; + +typedef enum XType { + A = 0, + Y = 1000066001, + X = 1000070000 +} XType; + +typedef struct Yo { + XType type; + const void* __attribute__((__may_alias__)) next; + Flags createFlags; +} Yo; + +void yo() { + Yo ext = {X}; + Yo ext2 = {Y, &ext}; +} + +// CHECK: cir.func @_Z2yov() +// CHECK: %0 = cir.alloca !ty_22Yo22, cir.ptr , ["ext"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22Yo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.ptr : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22) : !ty_22Yo22 +// CHECK: cir.store %2, %0 : !ty_22Yo22, cir.ptr +// CHECK: %3 = cir.get_member %1[0] {name = "type"} : !cir.ptr -> !cir.ptr +// CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i +// CHECK: cir.store %4, %3 : !u32i, cir.ptr +// CHECK: %5 = cir.get_member %1[1] {name = "next"} : !cir.ptr -> !cir.ptr> +// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: %7 = cir.get_member %1[2] {name = "createFlags"} : !cir.ptr -> !cir.ptr +// CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i +// CHECK: cir.store %8, %7 : !u64i, cir.ptr diff --git a/clang/test/CIR/CodeGen/agg-init2.cpp b/clang/test/CIR/CodeGen/agg-init2.cpp new file mode 100644 index 000000000000..2662bd181936 --- /dev/null +++ b/clang/test/CIR/CodeGen/agg-init2.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: !ty_22Zero22 = !cir.struct}> + +struct Zero { + void yolo(); +}; + +void f() { + Zero z0 = Zero(); + // {} no element init. + Zero z1 = Zero{}; +} + +// CHECK: cir.func @_Z1fv() +// CHECK: %0 = cir.alloca !ty_22Zero22, cir.ptr , ["z0", init] +// CHECK: %1 = cir.alloca !ty_22Zero22, cir.ptr , ["z1"] +// CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/array-init-destroy.cpp b/clang/test/CIR/CodeGen/array-init-destroy.cpp new file mode 100644 index 000000000000..bb464a687bf2 --- /dev/null +++ b/clang/test/CIR/CodeGen/array-init-destroy.cpp @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck -check-prefix=BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t2.cir 2>&1 | FileCheck -check-prefix=AFTER %s + +void foo() noexcept; + +class xpto { +public: + xpto() { + foo(); + } + int i; + float f; + ~xpto() { + foo(); + } +}; + +void x() { + xpto array[2]; +} + +// BEFORE: cir.func @_Z1xv() +// BEFORE: %[[ArrayAddr:.*]] = cir.alloca !cir.array + +// BEFORE: cir.array.ctor(%[[ArrayAddr]] : !cir.ptr>) { +// BEFORE: ^bb0(%arg0: !cir.ptr +// BEFORE: cir.call @_ZN4xptoC1Ev(%arg0) : (!cir.ptr) -> () +// BEFORE: cir.yield +// BEFORE: } + +// BEFORE: cir.array.dtor(%[[ArrayAddr]] : !cir.ptr>) { +// BEFORE: ^bb0(%arg0: !cir.ptr +// BEFORE: cir.call @_ZN4xptoD1Ev(%arg0) : (!cir.ptr) -> () +// BEFORE: cir.yield +// BEFORE: } + +// AFTER: cir.func @_Z1xv() +// AFTER: %[[ArrayAddr0:.*]] = cir.alloca !cir.array +// AFTER: %[[ConstTwo:.*]] = cir.const(#cir.int<2> : !u64i) : !u64i +// AFTER: %[[ArrayBegin:.*]] = cir.cast(array_to_ptrdecay, %[[ArrayAddr0]] : !cir.ptr>), !cir.ptr +// AFTER: %[[ArrayPastEnd:.*]] = cir.ptr_stride(%[[ArrayBegin]] : !cir.ptr, %[[ConstTwo]] : !u64i), !cir.ptr +// AFTER: %[[TmpIdx:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["__array_idx"] {alignment = 1 : i64} +// AFTER: cir.store %[[ArrayBegin]], %[[TmpIdx]] : !cir.ptr, cir.ptr > +// AFTER: cir.do { +// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : cir.ptr >, !cir.ptr +// AFTER: %[[ConstOne:.*]] = cir.const(#cir.int<1> : !u64i) : !u64i +// AFTER: cir.call @_ZN4xptoC1Ev(%[[ArrayElt]]) : (!cir.ptr) -> () +// AFTER: %[[NextElt:.*]] = cir.ptr_stride(%[[ArrayElt]] : !cir.ptr, %[[ConstOne]] : !u64i), !cir.ptr +// AFTER: cir.store %[[NextElt]], %[[TmpIdx]] : !cir.ptr, cir.ptr > +// AFTER: cir.yield +// AFTER: } while { +// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : cir.ptr >, !cir.ptr +// AFTER: %[[ExitCond:.*]] = cir.cmp(eq, %[[ArrayElt]], %[[ArrayPastEnd]]) : !cir.ptr, !cir.bool +// AFTER: cir.condition(%[[ExitCond]]) +// AFTER: } + +// AFTER: cir.do { +// AFTER: cir.call @_ZN4xptoD1Ev({{.*}}) : (!cir.ptr) -> () +// AFTER: } while { +// AFTER: } + +// AFTER: cir.return \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c new file mode 100644 index 000000000000..cdba1e30cb4b --- /dev/null +++ b/clang/test/CIR/CodeGen/array-init.c @@ -0,0 +1,86 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +typedef struct { + int a; + long b; +} T; + +void buz(int x) { + T arr[] = { {0, x}, {0, 0} }; +} +// CHECK: cir.func @buz +// CHECK-NEXT: [[X_ALLOCA:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 16 : i64} +// CHECK-NEXT: cir.store %arg0, [[X_ALLOCA]] : !s32i, cir.ptr +// CHECK-NEXT: [[ARR_INIT:%.*]] = cir.const(#cir.zero : !cir.array) : !cir.array +// CHECK-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, cir.ptr > +// CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: [[A_STORAGE0:%.*]] = cir.get_member [[FI_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[B_STORAGE0:%.*]] = cir.get_member [[FI_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[X_VAL:%.*]] = cir.load [[X_ALLOCA]] : cir.ptr , !s32i +// CHECK-NEXT: [[X_CASTED:%.*]] = cir.cast(integral, [[X_VAL]] : !s32i), !s64i +// CHECK-NEXT: cir.store [[X_CASTED]], [[B_STORAGE0]] : !s64i, cir.ptr +// CHECK-NEXT: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride([[FI_EL]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: [[A_STORAGE1:%.*]] = cir.get_member [[SE_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[B_STORAGE1:%.*]] = cir.get_member [[SE_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: cir.return + +void foo() { + double bar[] = {9,8,7}; +} + +// CHECK: %0 = cir.alloca !cir.array, cir.ptr >, ["bar"] {alignment = 16 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array) : !cir.array +// CHECK-NEXT: cir.store %1, %0 : !cir.array, cir.ptr > +void bar(int a, int b, int c) { + int arr[] = {a,b,c}; +} + +// CHECK: cir.func @bar +// CHECK: [[ARR:%.*]] = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, [[A:%.*]] : !s32i, cir.ptr +// CHECK-NEXT: cir.store %arg1, [[B:%.*]] : !s32i, cir.ptr +// CHECK-NEXT: cir.store %arg2, [[C:%.*]] : !s32i, cir.ptr +// CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: [[LOAD_A:%.*]] = cir.load [[A]] : cir.ptr , !s32i +// CHECK-NEXT: cir.store [[LOAD_A]], [[FI_EL]] : !s32i, cir.ptr +// CHECK-NEXT: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride(%4 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: [[LOAD_B:%.*]] = cir.load [[B]] : cir.ptr , !s32i +// CHECK-NEXT: cir.store [[LOAD_B]], [[SE_EL]] : !s32i, cir.ptr +// CHECK-NEXT: [[TH_EL:%.*]] = cir.ptr_stride(%7 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: [[LOAD_C:%.*]] = cir.load [[C]] : cir.ptr , !s32i +// CHECK-NEXT: cir.store [[LOAD_C]], [[TH_EL]] : !s32i, cir.ptr + +void zero_init(int x) { + int arr[3] = {x}; +} + +// CHECK: cir.func @zero_init +// CHECK: [[VAR_ALLOC:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 4 : i64} +// CHECK: [[TEMP:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["arrayinit.temp", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, [[VAR_ALLOC]] : !s32i, cir.ptr +// CHECK: [[BEGIN:%.*]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK: [[VAR:%.*]] = cir.load [[VAR_ALLOC]] : cir.ptr , !s32i +// CHECK: cir.store [[VAR]], [[BEGIN]] : !s32i, cir.ptr +// CHECK: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK: [[ZERO_INIT_START:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK: cir.store [[ZERO_INIT_START]], [[TEMP]] : !cir.ptr, cir.ptr > +// CHECK: [[SIZE:%.*]] = cir.const(#cir.int<3> : !s64i) : !s64i +// CHECK: [[END:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[SIZE]] : !s64i), !cir.ptr +// CHECK: cir.do { +// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : cir.ptr >, !cir.ptr +// CHECK: [[FILLER:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.store [[FILLER]], [[CUR]] : !s32i, cir.ptr +// CHECK: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK: [[NEXT:%.*]] = cir.ptr_stride([[CUR]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK: cir.store [[NEXT]], [[TEMP]] : !cir.ptr, cir.ptr > +// CHECK: cir.yield +// CHECK: } while { +// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : cir.ptr >, !cir.ptr +// CHECK: [[CMP:%.*]] = cir.cmp(ne, [[CUR]], [[END]]) : !cir.ptr, !cir.bool +// CHECK: cir.condition([[CMP]]) +// CHECK: } +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/array-unknown-bound.cpp b/clang/test/CIR/CodeGen/array-unknown-bound.cpp new file mode 100644 index 000000000000..82948bef34e2 --- /dev/null +++ b/clang/test/CIR/CodeGen/array-unknown-bound.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +extern int table[]; +// CHECK: cir.global external @table = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array + +int *table_ptr = table; +// CHECK: cir.global external @table_ptr = #cir.global_view<@table> : !cir.ptr + +int test() { return table[1]; } +// CHECK: cir.func @_Z4testv() +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.get_global @table : cir.ptr > + +int table[3] {1, 2, 3}; diff --git a/clang/test/CIR/CodeGen/array.c b/clang/test/CIR/CodeGen/array.c new file mode 100644 index 000000000000..c98e97961602 --- /dev/null +++ b/clang/test/CIR/CodeGen/array.c @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Should implicitly zero-initialize global array elements. +struct S { + int i; +} arr[3] = {{1}}; +// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array + +int a[4]; +// CHECK: cir.global external @a = #cir.zero : !cir.array + +// Should create a pointer to a complete array. +int (*complete_ptr_a)[4] = &a; +// CHECK: cir.global external @complete_ptr_a = #cir.global_view<@a> : !cir.ptr> + +// Should create a pointer to an incomplete array. +int (*incomplete_ptr_a)[] = &a; +// CHECK: cir.global external @incomplete_ptr_a = #cir.global_view<@a> : !cir.ptr> + +// Should access incomplete array if external. +extern int foo[]; +// CHECK: cir.global "private" external @foo : !cir.array +void useFoo(int i) { + foo[i] = 42; +} +// CHECK: @useFoo +// CHECK: %[[#V2:]] = cir.get_global @foo : cir.ptr > +// CHECK: %[[#V3:]] = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %[[#V4:]] = cir.cast(array_to_ptrdecay, %[[#V2]] : !cir.ptr>), !cir.ptr +// CHECK: %[[#V5:]] = cir.ptr_stride(%[[#V4]] : !cir.ptr, %[[#V3]] : !s32i), !cir.ptr +// CHECK: cir.store %{{.+}}, %[[#V5]] : !s32i, cir.ptr diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp new file mode 100644 index 000000000000..61bde35e261c --- /dev/null +++ b/clang/test/CIR/CodeGen/array.cpp @@ -0,0 +1,92 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -Wno-return-stack-address -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void a0() { + int a[10]; +} + +// CHECK: cir.func @_Z2a0v() +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + +void a1() { + int a[10]; + a[0] = 1; +} + +// CHECK: cir.func @_Z2a1v() +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr +// CHECK-NEXT: cir.store %1, %4 : !s32i, cir.ptr + +int *a2() { + int a[4]; + return &a[0]; +} + +// CHECK: cir.func @_Z2a2v() -> !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr +// CHECK-NEXT: cir.store %4, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return %5 : !cir.ptr + +void local_stringlit() { + const char *s = "whatnow"; +} + +// CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.func @_Z15local_stringlitv() +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > + +int multidim(int i, int j) { + int arr[2][2]; + return arr[i][j]; +} + +// CHECK: %3 = cir.alloca !cir.array x 2>, cir.ptr x 2>> +// Stride first dimension (stride = 2) +// CHECK: %4 = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr x 2>>), !cir.ptr> +// CHECK: %6 = cir.ptr_stride(%5 : !cir.ptr>, %4 : !s32i), !cir.ptr> +// Stride second dimension (stride = 1) +// CHECK: %7 = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %8 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr +// CHECK: %9 = cir.ptr_stride(%8 : !cir.ptr, %7 : !s32i), !cir.ptr + +// Should globally zero-initialize null arrays. +int globalNullArr[] = {0, 0}; +// CHECK: cir.global external @globalNullArr = #cir.zero : !cir.array + +// Should implicitly zero-initialize global array elements. +struct S { + int i; +} arr[3] = {{1}}; +// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array + +void testPointerDecaySubscriptAccess(int arr[]) { +// CHECK: cir.func @{{.+}}testPointerDecaySubscriptAccess + arr[1]; + // CHECK: %[[#BASE:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr + // CHECK: %[[#DIM1:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%[[#BASE]] : !cir.ptr, %[[#DIM1]] : !s32i), !cir.ptr +} + +void testPointerDecayedArrayMultiDimSubscriptAccess(int arr[][3]) { +// CHECK: cir.func @{{.+}}testPointerDecayedArrayMultiDimSubscriptAccess + arr[1][2]; + // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >>, !cir.ptr> + // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#V3:]] = cir.ptr_stride(%[[#V1]] : !cir.ptr>, %[[#V2]] : !s32i), !cir.ptr> + // CHECK: %[[#V4:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: %[[#V5:]] = cir.cast(array_to_ptrdecay, %[[#V3]] : !cir.ptr>), !cir.ptr + // CHECK: cir.ptr_stride(%[[#V5]] : !cir.ptr, %[[#V4]] : !s32i), !cir.ptr +} diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c new file mode 100644 index 000000000000..56d8cf2bf57c --- /dev/null +++ b/clang/test/CIR/CodeGen/asm.c @@ -0,0 +1,349 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + + +// CHECK: cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [], +// CHECK: {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects +void empty1() { + __asm__ volatile("" : : : ); +} + +// CHECK: cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [], +// CHECK: {"xyz" "~{dirflag},~{fpsr},~{flags}"}) side_effects +void empty2() { + __asm__ volatile("xyz" : : : ); +} + +// CHECK: cir.asm(x86_att, +// CHECK: out = [%0 : !cir.ptr (maybe_memory)], +// CHECK: in = [], +// CHECK: in_out = [%0 : !cir.ptr (maybe_memory)], +// CHECK: {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) side_effects +void empty3(int x) { + __asm__ volatile("" : "+m"(x)); +} + +// CHECK: cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [%0 : !cir.ptr (maybe_memory)], +// CHECK: in_out = [], +// CHECK: {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects +void empty4(int x) { + __asm__ volatile("" : : "m"(x)); +} + +// CHECK: cir.asm(x86_att, +// CHECK: out = [%0 : !cir.ptr (maybe_memory)], +// CHECK: in = [], +// CHECK: in_out = [], +// CHECK: {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects +void empty5(int x) { + __asm__ volatile("" : "=m"(x)); +} + +// CHECK: %3 = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [%2 : !s32i], +// CHECK: {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !ty_22anon2E022 +void empty6(int x) { + __asm__ volatile("" : "=&r"(x), "+&r"(x)); +} + +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["a"] +// CHECK: [[TMP1:%.*]] = cir.load %0 : cir.ptr , !u32i +// CHECK: [[TMP2:%.*]] = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [%3 : !u32i], +// CHECK: in_out = [], +// CHECK: {"addl $$42, $1" "=r,r,~{dirflag},~{fpsr},~{flags}"}) -> !s32i +// CHECK: cir.store [[TMP2]], [[TMP0]] : !s32i, cir.ptr loc(#loc42) +unsigned add1(unsigned int x) { + int a; + __asm__("addl $42, %[val]" + : "=r" (a) + : [val] "r" (x) + ); + + return a; +} + +// CHECK: [[TMP0:%.*]] = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !u32i, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr , !u32i +// CHECK: [[TMP2:%.*]] = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [%2 : !u32i], +// CHECK: {"addl $$42, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) -> !u32i +// CHECK: cir.store [[TMP2]], [[TMP0]] : !u32i, cir.ptr +unsigned add2(unsigned int x) { + __asm__("addl $42, %[val]" + : [val] "+r" (x) + ); + return x; +} + + +// CHECK: [[TMP0:%.*]] = cir.alloca !u32i, cir.ptr , ["x", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr , !u32i +// CHECK: [[TMP2:%.*]] = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [%2 : !u32i], +// CHECK: {"addl $$42, $0 \0A\09 subl $$1, $0 \0A\09 imul $$2, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) -> !u32i +// CHECK: cir.store [[TMP2]], [[TMP0]] : !u32i, cir.ptr +unsigned add3(unsigned int x) { // ((42 + x) - 1) * 2 + __asm__("addl $42, %[val] \n\t\ + subl $1, %[val] \n\t\ + imul $2, %[val]" + : [val] "+r" (x) + ); + return x; +} + +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["x", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP1:%.*]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: cir.asm(x86_att, +// CHECK: out = [%1 : !cir.ptr (maybe_memory)], +// CHECK: in = [], +// CHECK: in_out = [], +// CHECK: {"addl $$42, $0" "=*m,~{dirflag},~{fpsr},~{flags}"}) +// CHECK-NEXT: cir.return +void add4(int *x) { + __asm__("addl $42, %[addr]" : [addr] "=m" (*x)); +} + + +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.float, cir.ptr , ["x", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.float, cir.ptr , ["y", init] +// CHECK: [[TMP2:%.*]] = cir.alloca !cir.float, cir.ptr , ["r"] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.float, cir.ptr +// CHECK: cir.store %arg1, [[TMP1]] : !cir.float, cir.ptr +// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : cir.ptr , !cir.float +// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : cir.ptr , !cir.float +// CHECK: [[TMP5:%.*]] = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [%4 : !cir.float, %5 : !cir.float], +// CHECK: in_out = [], +// CHECK: {"flds $1; flds $2; faddp" "=&{st},imr,imr,~{dirflag},~{fpsr},~{flags}"}) -> !cir.float +// CHECK: cir.store [[TMP5]], [[TMP2]] : !cir.float, cir.ptr +float add5(float x, float y) { + float r; + __asm__("flds %[x]; flds %[y]; faddp" + : "=&t" (r) + : [x] "g" (x), [y] "g" (y)); + return r; +} + +/* +There are tests from clang/test/CodeGen/asm.c. No checks for now - we just make +sure no crashes happen +*/ + + +void t1(int len) { + __asm__ volatile("" : "=&r"(len), "+&r"(len)); +} + +void t2(unsigned long long t) { + __asm__ volatile("" : "+m"(t)); +} + +void t3(unsigned char *src, unsigned long long temp) { + __asm__ volatile("" : "+m"(temp), "+r"(src)); +} + +void t4(void) { + unsigned long long a; + struct reg { unsigned long long a, b; } b; + + __asm__ volatile ("":: "m"(a), "m"(b)); +} + +void t5(int i) { + asm("nop" : "=r"(i) : "0"(t5)); +} + +void t6(void) { + __asm__ volatile("" : : "i" (t6)); +} + +void t7(int a) { + __asm__ volatile("T7 NAMED: %[input]" : "+r"(a): [input] "i" (4)); +} + +void t8(void) { + __asm__ volatile("T8 NAMED MODIFIER: %c[input]" :: [input] "i" (4)); +} + +unsigned t9(unsigned int a) { + asm("bswap %0 %1" : "+r" (a)); + return a; +} + +void t10(int r) { + __asm__("PR3908 %[lf] %[xx] %[li] %[r]" : [r] "+r" (r) : [lf] "mx" (0), [li] "mr" (0), [xx] "x" ((double)(0))); +} + +unsigned t11(signed char input) { + unsigned output; + __asm__("xyz" + : "=a" (output) + : "0" (input)); + return output; +} + +unsigned char t12(unsigned input) { + unsigned char output; + __asm__("xyz" + : "=a" (output) + : "0" (input)); + return output; +} + +unsigned char t13(unsigned input) { + unsigned char output; + __asm__("xyz %1" + : "=a" (output) + : "0" (input)); + return output; +} + +struct large { + int x[1000]; +}; + +unsigned long t15(int x, struct large *P) { + __asm__("xyz " + : "=r" (x) + : "m" (*P), "0" (x)); + return x; +} + +// bitfield destination of an asm. +struct S { + int a : 4; +}; + +void t14(struct S *P) { + __asm__("abc %0" : "=r"(P->a) ); +} + +int t16(void) { + int a,b; + asm ( "nop;" + :"=%c" (a) + : "r" (b) + ); + return 0; +} + +void t17(void) { + int i; + __asm__ ( "nop": "=m"(i)); +} + +int t18(unsigned data) { + int a, b; + + asm("xyz" :"=a"(a), "=d"(b) : "a"(data)); + return a + b; +} + +int t19(unsigned data) { + int a, b; + + asm("x{abc|def|ghi}z" :"=r"(a): "r"(data)); + return a + b; +} + +// skip t20 and t21: long double is not supported + +// accept 'l' constraint +unsigned char t22(unsigned char a, unsigned char b) { + unsigned int la = a; + unsigned int lb = b; + unsigned int bigres; + unsigned char res; + __asm__ ("0:\n1:\n" : [bigres] "=la"(bigres) : [la] "0"(la), [lb] "c"(lb) : + "edx", "cc"); + res = bigres; + return res; +} + +// accept 'l' constraint +unsigned char t23(unsigned char a, unsigned char b) { + unsigned int la = a; + unsigned int lb = b; + unsigned char res; + __asm__ ("0:\n1:\n" : [res] "=la"(res) : [la] "0"(la), [lb] "c"(lb) : + "edx", "cc"); + return res; +} + +void *t24(char c) { + void *addr; + __asm__ ("foobar" : "=a" (addr) : "0" (c)); + return addr; +} + +void t25(void) +{ + __asm__ __volatile__( \ + "finit" \ + : \ + : \ + :"st","st(1)","st(2)","st(3)", \ + "st(4)","st(5)","st(6)","st(7)", \ + "fpsr","fpcr" \ + ); +} + +//t26 skipped - no vector type support + +// Check to make sure the inline asm non-standard dialect attribute _not_ is +// emitted. +void t27(void) { + asm volatile("nop"); +} + +// Check handling of '*' and '#' constraint modifiers. +void t28(void) +{ + asm volatile ("/* %0 */" : : "i#*X,*r" (1)); +} + +static unsigned t29_var[1]; + +void t29(void) { + asm volatile("movl %%eax, %0" + : + : "m"(t29_var)); +} + +void t30(int len) { + __asm__ volatile("" + : "+&&rm"(len)); +} + +void t31(int len) { + __asm__ volatile("" + : "+%%rm"(len), "+rm"(len)); +} + +//t32 skipped: no goto + +void *t33(void *ptr) +{ + void *ret; + asm ("lea %1, %0" : "=r" (ret) : "p" (ptr)); + return ret; +} diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp new file mode 100644 index 000000000000..a7adf5f11502 --- /dev/null +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -0,0 +1,101 @@ +// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -clangir-disable-emit-cxx-default %s -o %t-disable.cir +// RUN: FileCheck --input-file=%t-disable.cir %s --check-prefix=DISABLE + +int strlen(char const *); + +struct String { + long size; + long capacity; + + String() : size{0}, capacity{0} {} + String(char const *s) : size{strlen(s)}, capacity{size} {} + // StringView::StringView(String const&) + // + // CHECK: cir.func linkonce_odr @_ZN10StringViewC2ERK6String + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %2 = cir.load %0 : cir.ptr > + + // Get address of `this->size` + + // CHECK: %3 = cir.get_member %2[0] {name = "size"} + + // Get address of `s` + + // CHECK: %4 = cir.load %1 : cir.ptr > + + // Get the address of s.size + + // CHECK: %5 = cir.get_member %4[0] {name = "size"} + + // Load value from s.size and store in this->size + + // CHECK: %6 = cir.load %5 : cir.ptr , !s64i + // CHECK: cir.store %6, %3 : !s64i, cir.ptr + // CHECK: cir.return + // CHECK: } + + // DISABLE: cir.func linkonce_odr @_ZN10StringViewC2ERK6String + // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + + // StringView::operator=(StringView&&) + // + // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", init] {alignment = 8 : i64} + // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %3 = cir.load deref %0 : cir.ptr > + // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %5 = cir.get_member %4[0] {name = "size"} + // CHECK: %6 = cir.load %5 : cir.ptr , !s64i + // CHECK: %7 = cir.get_member %3[0] {name = "size"} + // CHECK: cir.store %6, %7 : !s64i, cir.ptr + // CHECK: cir.store %3, %2 : !cir.ptr + // CHECK: %8 = cir.load %2 : cir.ptr > + // CHECK: cir.return %8 : !cir.ptr + // CHECK: } + + // DISABLE: cir.func private @_ZN10StringViewaSEOS_ + // DISABLE-NEXT: cir.func @main() +}; + +struct StringView { + long size; + + StringView(const String &s) : size{s.size} {} + StringView() : size{0} {} +}; + +int main() { + StringView sv; + { + String s = "Hi"; + sv = s; + } +} + +// CHECK: cir.func @main() -> !s32i +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !ty_22StringView22, cir.ptr , ["sv", init] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () +// CHECK: cir.scope { +// CHECK: %3 = cir.alloca !ty_22String22, cir.ptr , ["s", init] {alignment = 8 : i64} +// CHECK: %4 = cir.get_global @".str" : cir.ptr > +// CHECK: %5 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.scope { +// CHECK: %6 = cir.alloca !ty_22StringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2ERK6String(%6, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %6) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: } +// CHECK: } +// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: cir.return %2 : !s32i +// CHECK: } diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c new file mode 100644 index 000000000000..8e9276a8f42e --- /dev/null +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -0,0 +1,86 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +typedef struct __Base { + unsigned long id; + unsigned int a; + unsigned int n; + unsigned char x; + unsigned short u; +} Base; + +struct w { + Base _base; + const void * ref; +}; + +typedef struct w *wPtr; + +void field_access(wPtr item) { + __atomic_exchange_n((&item->ref), (((void*)0)), 5); +} + +// CHECK: ![[W:.*]] = !cir.struct, {{.*}} {alignment = 8 : i64} +// CHECK: %[[FIELD:.*]] = cir.load %[[WADDR]] +// CHECK: %[[MEMBER:.*]] = cir.get_member %[[FIELD]][1] {name = "ref"} +// CHECK: cir.atomic.xchg(%[[MEMBER]] : !cir.ptr>, {{.*}} : !u64i, seq_cst) + +// LLVM-LABEL: @field_access +// LLVM: = alloca ptr, i64 1, align 8 +// LLVM: %[[VAL_ADDR:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[RES_ADDR:.*]] = alloca ptr, i64 1, align 8 + +// LLVM: %[[MEMBER:.*]] = getelementptr %struct.w, ptr {{.*}}, i32 0, i32 1 +// LLVM: store ptr null, ptr %[[VAL_ADDR]], align 8 +// LLVM: %[[VAL:.*]] = load i64, ptr %[[VAL_ADDR]], align 8 +// LLVM: %[[RES:.*]] = atomicrmw xchg ptr %[[MEMBER]], i64 %[[VAL]] seq_cst, align 8 +// LLVM: store i64 %[[RES]], ptr %4, align 8 +// LLVM: load ptr, ptr %[[RES_ADDR]], align 8 +// LLVM: ret void + +void structAtomicExchange(unsigned referenceCount, wPtr item) { + __atomic_compare_exchange_n((&item->_base.a), (&referenceCount), (referenceCount + 1), 1 , 5, 5); +} + +// CHECK-LABEL: @structAtomicExchange +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u32i, {{.*}} : !u32i, success = seq_cst, failure = seq_cst) weak : (!u32i, !cir.bool) + +// LLVM-LABEL: @structAtomicExchange +// LLVM: load i32 +// LLVM: add i32 +// LLVM: store i32 +// LLVM: %[[EXP:.*]] = load i32 +// LLVM: %[[DES:.*]] = load i32 +// LLVM: %[[RES:.*]] = cmpxchg weak ptr %9, i32 %[[EXP]], i32 %[[DES]] seq_cst seq_cst +// LLVM: %[[OLD:.*]] = extractvalue { i32, i1 } %[[RES]], 0 +// LLVM: %[[CMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 +// LLVM: %[[Z:.*]] = zext i1 %[[CMP]] to i8, !dbg !16 +// LLVM: %[[X:.*]] = xor i8 %[[Z]], 1, !dbg !16 +// LLVM: %[[FAIL:.*]] = trunc i8 %[[X]] to i1, !dbg !16 + +// LLVM: br i1 %[[FAIL:.*]], label %[[STORE_OLD:.*]], label %[[CONTINUE:.*]], +// LLVM: [[STORE_OLD]]: +// LLVM: store i32 %[[OLD]], ptr +// LLVM: br label %[[CONTINUE]] + +// LLVM: [[CONTINUE]]: +// LLVM: store i8 %[[Z]], ptr {{.*}}, align 1 +// LLVM: ret void + +void f2(const void *cf); + +void structLoad(unsigned referenceCount, wPtr item) { + f2(__atomic_load_n(&item->ref, 5)); +} + +// CHECK-LABEL: @structLoad +// CHECK: %[[ATOMIC_TEMP:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["atomic-temp"] +// CHECK: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %6 : cir.ptr , !u64i +// CHECK: %[[RES:.*]] = cir.cast(bitcast, %[[ATOMIC_TEMP]] : !cir.ptr>), !cir.ptr +// CHECK: cir.store %[[ATOMIC_LOAD]], %[[RES]] : !u64i, cir.ptr + +// No LLVM tests needed for this one, already covered elsewhere. \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp new file mode 100644 index 000000000000..6c13eb11f1f6 --- /dev/null +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -0,0 +1,310 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// Available on resource dir. +#include + +typedef struct _a { + _Atomic(int) d; +} at; + +void m() { at y; } + +// CHECK: ![[A:.*]] = !cir.struct}> + +int basic_binop_fetch(int *i) { + return __atomic_add_fetch(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z17basic_binop_fetchPi +// CHECK: %[[ARGI:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} +// CHECK: %[[ONE_ADDR:.*]] = cir.alloca !s32i, cir.ptr , [".atomictmp"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %[[ARGI]] : !cir.ptr, cir.ptr > +// CHECK: %[[I:.*]] = cir.load %[[ARGI]] : cir.ptr >, !cir.ptr +// CHECK: %[[ONE:.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[ONE]], %[[ONE_ADDR]] : !s32i, cir.ptr +// CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : cir.ptr , !s32i +// CHECK: cir.atomic.fetch(add, %[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i + +// LLVM: define i32 @_Z17basic_binop_fetchPi +// LLVM: %[[RMW:.*]] = atomicrmw add ptr {{.*}}, i32 %[[VAL:.*]] seq_cst, align 4 +// LLVM: add i32 %[[RMW]], %[[VAL]] + +int other_binop_fetch(int *i) { + __atomic_sub_fetch(i, 1, memory_order_relaxed); + __atomic_and_fetch(i, 1, memory_order_consume); + __atomic_or_fetch(i, 1, memory_order_acquire); + return __atomic_xor_fetch(i, 1, memory_order_release); +} + +// CHECK: cir.func @_Z17other_binop_fetchPi +// CHECK: cir.atomic.fetch(sub, {{.*}}, relaxed +// CHECK: cir.atomic.fetch(and, {{.*}}, acquire +// CHECK: cir.atomic.fetch(or, {{.*}}, acquire +// CHECK: cir.atomic.fetch(xor, {{.*}}, release + +// LLVM: define i32 @_Z17other_binop_fetchPi +// LLVM: %[[RMW_SUB:.*]] = atomicrmw sub ptr {{.*}} monotonic +// LLVM: sub i32 %[[RMW_SUB]], {{.*}} +// LLVM: %[[RMW_AND:.*]] = atomicrmw and ptr {{.*}} acquire +// LLVM: and i32 %[[RMW_AND]], {{.*}} +// LLVM: %[[RMW_OR:.*]] = atomicrmw or ptr {{.*}} acquire +// LLVM: or i32 %[[RMW_OR]], {{.*}} +// LLVM: %[[RMW_XOR:.*]] = atomicrmw xor ptr {{.*}} release +// LLVM: xor i32 %[[RMW_XOR]], {{.*}} + +int nand_binop_fetch(int *i) { + return __atomic_nand_fetch(i, 1, memory_order_acq_rel); +} + +// CHECK: cir.func @_Z16nand_binop_fetchPi +// CHECK: cir.atomic.fetch(nand, {{.*}}, acq_rel + +// LLVM: define i32 @_Z16nand_binop_fetchPi +// LLVM: %[[RMW_NAND:.*]] = atomicrmw nand ptr {{.*}} acq_rel +// LLVM: %[[AND:.*]] = and i32 %[[RMW_NAND]] +// LLVM: = xor i32 %[[AND]], -1 + +int fp_binop_fetch(float *i) { + __atomic_add_fetch(i, 1, memory_order_seq_cst); + return __atomic_sub_fetch(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z14fp_binop_fetchPf +// CHECK: cir.atomic.fetch(add, +// CHECK: cir.atomic.fetch(sub, + +// LLVM: define i32 @_Z14fp_binop_fetchPf +// LLVM: %[[RMW_FADD:.*]] = atomicrmw fadd ptr +// LLVM: fadd float %[[RMW_FADD]] +// LLVM: %[[RMW_FSUB:.*]] = atomicrmw fsub ptr +// LLVM: fsub float %[[RMW_FSUB]] + +int fetch_binop(int *i) { + __atomic_fetch_add(i, 1, memory_order_seq_cst); + __atomic_fetch_sub(i, 1, memory_order_seq_cst); + __atomic_fetch_and(i, 1, memory_order_seq_cst); + __atomic_fetch_or(i, 1, memory_order_seq_cst); + __atomic_fetch_xor(i, 1, memory_order_seq_cst); + return __atomic_fetch_nand(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z11fetch_binopPi +// CHECK: cir.atomic.fetch(add, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(sub, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(and, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(or, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(xor, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(nand, {{.*}}) fetch_first + +// LLVM: define i32 @_Z11fetch_binopPi +// LLVM: atomicrmw add ptr +// LLVM-NOT: add {{.*}} +// LLVM: atomicrmw sub ptr +// LLVM-NOT: sub {{.*}} +// LLVM: atomicrmw and ptr +// LLVM-NOT: and {{.*}} +// LLVM: atomicrmw or ptr +// LLVM-NOT: or {{.*}} +// LLVM: atomicrmw xor ptr +// LLVM-NOT: xor {{.*}} +// LLVM: atomicrmw nand ptr +// LLVM-NOT: nand {{.*}} + +void min_max_fetch(int *i) { + __atomic_fetch_max(i, 1, memory_order_seq_cst); + __atomic_fetch_min(i, 1, memory_order_seq_cst); + __atomic_max_fetch(i, 1, memory_order_seq_cst); + __atomic_min_fetch(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z13min_max_fetchPi +// CHECK: = cir.atomic.fetch(max, {{.*}}) fetch_first +// CHECK: = cir.atomic.fetch(min, {{.*}}) fetch_first +// CHECK: = cir.atomic.fetch(max, {{.*}}) : !s32i +// CHECK: = cir.atomic.fetch(min, {{.*}}) : !s32i + +// LLVM: define void @_Z13min_max_fetchPi +// LLVM: atomicrmw max ptr +// LLVM-NOT: icmp {{.*}} +// LLVM: atomicrmw min ptr +// LLVM-NOT: icmp {{.*}} +// LLVM: %[[MAX:.*]] = atomicrmw max ptr +// LLVM: %[[ICMP_MAX:.*]] = icmp sgt i32 %[[MAX]] +// LLVM: select i1 %[[ICMP_MAX]], i32 %[[MAX]] +// LLVM: %[[MIN:.*]] = atomicrmw min ptr +// LLVM: %[[ICMP_MIN:.*]] = icmp slt i32 %[[MIN]] +// LLVM: select i1 %[[ICMP_MIN]], i32 %[[MIN]] + +int fi1(_Atomic(int) *i) { + return __c11_atomic_load(i, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z3fi1PU7_Atomici +// CHECK: cir.load atomic(seq_cst) + +// LLVM-LABEL: @_Z3fi1PU7_Atomici +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 + +int fi1a(int *i) { + int v; + __atomic_load(i, &v, memory_order_seq_cst); + return v; +} + +// CHECK-LABEL: @_Z4fi1aPi +// CHECK: cir.load atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi1aPi +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 + +int fi1b(int *i) { + return __atomic_load_n(i, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z4fi1bPi +// CHECK: cir.load atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi1bPi +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 + +int fi1c(atomic_int *i) { + return atomic_load(i); +} + +// CHECK-LABEL: @_Z4fi1cPU7_Atomici +// CHECK: cir.load atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi1cPU7_Atomici +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 + +void fi2(_Atomic(int) *i) { + __c11_atomic_store(i, 1, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z3fi2PU7_Atomici +// CHECK: cir.store atomic(seq_cst) + +// LLVM-LABEL: @_Z3fi2PU7_Atomici +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 + +void fi2a(int *i) { + int v = 1; + __atomic_store(i, &v, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z4fi2aPi +// CHECK: cir.store atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi2aPi +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 + +void fi2b(int *i) { + __atomic_store_n(i, 1, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z4fi2bPi +// CHECK: cir.store atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi2bPi +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 + +void fi2c(atomic_int *i) { + atomic_store(i, 1); +} + +struct S { + double x; +}; + +// CHECK-LABEL: @_Z4fi2cPU7_Atomici +// CHECK: cir.store atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi2cPU7_Atomici +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 + +void fd3(struct S *a, struct S *b, struct S *c) { + __atomic_exchange(a, b, c, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z3fd3P1SS0_S0_ +// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) : !u64i + +// FIXME: CIR is producing an over alignment of 8, only 4 needed. +// LLVM-LABEL: @_Z3fd3P1SS0_S0_ +// LLVM: [[A_ADDR:%.*]] = alloca ptr +// LLVM-NEXT: [[B_ADDR:%.*]] = alloca ptr +// LLVM-NEXT: [[C_ADDR:%.*]] = alloca ptr +// LLVM-NEXT: store ptr {{.*}}, ptr [[A_ADDR]] +// LLVM-NEXT: store ptr {{.*}}, ptr [[B_ADDR]] +// LLVM-NEXT: store ptr {{.*}}, ptr [[C_ADDR]] +// LLVM-NEXT: [[LOAD_A_PTR:%.*]] = load ptr, ptr [[A_ADDR]] +// LLVM-NEXT: [[LOAD_B_PTR:%.*]] = load ptr, ptr [[B_ADDR]] +// LLVM-NEXT: [[LOAD_C_PTR:%.*]] = load ptr, ptr [[C_ADDR]] +// LLVM-NEXT: [[LOAD_B:%.*]] = load i64, ptr [[LOAD_B_PTR]] +// LLVM-NEXT: [[RESULT:%.*]] = atomicrmw xchg ptr [[LOAD_A_PTR]], i64 [[LOAD_B]] seq_cst +// LLVM-NEXT: store i64 [[RESULT]], ptr [[LOAD_C_PTR]] + +bool fd4(struct S *a, struct S *b, struct S *c) { + return __atomic_compare_exchange(a, b, c, 1, 5, 5); +} + +// CHECK-LABEL: @_Z3fd4P1SS0_S0_ +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) weak : (!u64i, !cir.bool) + +// LLVM-LABEL: @_Z3fd4P1SS0_S0_ +// LLVM: cmpxchg weak ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 + +bool fi4a(int *i) { + int cmp = 0; + int desired = 1; + return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire); +} + +// CHECK-LABEL: @_Z4fi4aPi +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) : (!s32i, !cir.bool) + +// LLVM-LABEL: @_Z4fi4aPi +// LLVM: %[[RES:.*]] = cmpxchg ptr %7, i32 %8, i32 %9 acquire acquire, align 4 +// LLVM: extractvalue { i32, i1 } %[[RES]], 0 +// LLVM: extractvalue { i32, i1 } %[[RES]], 1 + +bool fi4b(int *i) { + int cmp = 0; + return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire); +} + +// CHECK-LABEL: @_Z4fi4bPi +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) weak : (!s32i, !cir.bool) + +// LLVM-LABEL: @_Z4fi4bPi +// LLVM: %[[R:.*]] = cmpxchg weak ptr {{.*}}, i32 {{.*}}, i32 {{.*}} acquire acquire, align 4 +// LLVM: extractvalue { i32, i1 } %[[R]], 0 +// LLVM: extractvalue { i32, i1 } %[[R]], 1 + +bool fi4c(atomic_int *i) { + int cmp = 0; + return atomic_compare_exchange_strong(i, &cmp, 1); +} + +// CHECK-LABEL: @_Z4fi4cPU7_Atomici +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[CMP:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[CMP:.*]] { +// CHECK: cir.store %old, {{.*}} : !s32i, cir.ptr +// CHECK: } + +// LLVM-LABEL: @_Z4fi4cPU7_Atomici +// LLVM: cmpxchg ptr {{.*}}, i32 {{.*}}, i32 {{.*}} seq_cst seq_cst, align 4 + +bool fsb(bool *c) { + return __atomic_exchange_n(c, 1, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z3fsbPb +// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u8i, seq_cst) : !u8i + +// LLVM-LABEL: @_Z3fsbPb +// LLVM: atomicrmw xchg ptr {{.*}}, i8 {{.*}} seq_cst, align 1 \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/attributes.c b/clang/test/CIR/CodeGen/attributes.c new file mode 100644 index 000000000000..6c8977c3c41e --- /dev/null +++ b/clang/test/CIR/CodeGen/attributes.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +extern int __attribute__((section(".shared"))) ext; +int getExt() { + return ext; +} +// CIR: cir.global "private" external @ext : !s32i {section = ".shared"} +// LLVM: @ext = external global i32, section ".shared" + +int __attribute__((section(".shared"))) glob = 42; +// CIR: cir.global external @glob = #cir.int<42> : !s32i {section = ".shared"} +// LLVM @glob = global i32 42, section ".shared" diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c new file mode 100644 index 000000000000..c99ecce64090 --- /dev/null +++ b/clang/test/CIR/CodeGen/basic.c @@ -0,0 +1,54 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +int foo(int i); + +int foo(int i) { + i; + return i; +} + +// CIR: module @"{{.*}}basic.c" attributes {{{.*}}cir.lang = #cir.lang +// CIR-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i +// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr +// CIR-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: %3 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: cir.store %3, %1 : !s32i, cir.ptr +// CIR-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CIR-NEXT: cir.return %4 : !s32i + +int f2(void) { return 3; } + +// CIR: cir.func @f2() -> !s32i +// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i +// CIR-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CIR-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: cir.return %2 : !s32i + +// LLVM: define i32 @f2() +// LLVM-NEXT: %1 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 3, ptr %1, align 4 +// LLVM-NEXT: %2 = load i32, ptr %1, align 4 +// LLVM-NEXT: ret i32 %2 + + + +int f3(void) { + int i = 3; + return i; +} + +// CIR: cir.func @f3() -> !s32i +// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CIR-NEXT: %2 = cir.const(#cir.int<3> : !s32i) : !s32i +// CIR-NEXT: cir.store %2, %1 : !s32i, cir.ptr +// CIR-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CIR-NEXT: cir.store %3, %0 : !s32i, cir.ptr +// CIR-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: cir.return %4 : !s32i diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp new file mode 100644 index 000000000000..83c423ea917c --- /dev/null +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -0,0 +1,182 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int *p0() { + int *p = nullptr; + return p; +} + +// CHECK: cir.func @_Z2p0v() -> !cir.ptr +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] +// CHECK: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > + +int *p1() { + int *p; + p = nullptr; + return p; +} + +// CHECK: cir.func @_Z2p1v() -> !cir.ptr +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p"] +// CHECK: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > + +int *p2() { + int *p = nullptr; + { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; + return p; +} + +// CHECK: cir.func @_Z2p2v() -> !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] {alignment = 8 : i64} +// CHECK-NEXT: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %7 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: %8 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.store %8, %7 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %7, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %9 = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %10 = cir.load deref %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %9, %10 : !s32i, cir.ptr +// CHECK-NEXT: } loc(#[[locScope:loc[0-9]+]]) +// CHECK-NEXT: %3 = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.load deref %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %3, %4 : !s32i, cir.ptr +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %6 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return %6 : !cir.ptr + +void b0() { bool x = true, y = false; } + +// CHECK: cir.func @_Z2b0v() +// CHECK: %2 = cir.const(#true) : !cir.bool +// CHECK: %3 = cir.const(#false) : !cir.bool + +void b1(int a) { bool b = a; } + +// CHECK: cir.func @_Z2b1i(%arg0: !s32i loc({{.*}})) +// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool +// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr + +void if0(int a) { + int x = 0; + if (a) { + x = 3; + } else { + x = 4; + } +} + +// CHECK: cir.func @_Z3if0i(%arg0: !s32i loc({{.*}})) +// CHECK: cir.scope { +// CHECK: %3 = cir.load %0 : cir.ptr , !s32i +// CHECK: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool +// CHECK-NEXT: cir.if %4 { +// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: } else { +// CHECK-NEXT: %5 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: } +// CHECK: } + +void if1(int a, bool b, bool c) { + int x = 0; + if (a) { + x = 3; + if (b) { + x = 8; + } + } else { + if (c) { + x = 14; + } + x = 4; + } +} + +// CHECK: cir.func @_Z3if1ibb(%arg0: !s32i loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) +// CHECK: cir.scope { +// CHECK: %5 = cir.load %0 : cir.ptr , !s32i +// CHECK: %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool +// CHECK: cir.if %6 { +// CHECK: %7 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.store %7, %3 : !s32i, cir.ptr +// CHECK: cir.scope { +// CHECK: %8 = cir.load %1 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.if %8 { +// CHECK-NEXT: %9 = cir.const(#cir.int<8> : !s32i) : !s32i +// CHECK-NEXT: cir.store %9, %3 : !s32i, cir.ptr +// CHECK-NEXT: } +// CHECK: } +// CHECK: } else { +// CHECK: cir.scope { +// CHECK: %8 = cir.load %2 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.if %8 { +// CHECK-NEXT: %9 = cir.const(#cir.int<14> : !s32i) : !s32i +// CHECK-NEXT: cir.store %9, %3 : !s32i, cir.ptr +// CHECK-NEXT: } +// CHECK: } +// CHECK: %7 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK: cir.store %7, %3 : !s32i, cir.ptr +// CHECK: } +// CHECK: } + +enum { + um = 0, + dois = 1, +}; // Do not crash! + +extern "C" { +struct regs { + unsigned long sp; + unsigned long pc; +}; + +// Check it's not mangled. +// CHECK: cir.func @use_regs() + +void use_regs() { regs r; } +} + +void x() { + const bool b0 = true; + const bool b1 = false; +} + +// CHECK: cir.func @_Z1xv() +// CHECK: %0 = cir.alloca !cir.bool, cir.ptr , ["b0", init] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["b1", init] {alignment = 1 : i64} +// CHECK: %2 = cir.const(#true) : !cir.bool +// CHECK: cir.store %2, %0 : !cir.bool, cir.ptr +// CHECK: %3 = cir.const(#false) : !cir.bool +// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr + +typedef unsigned long size_type; +typedef unsigned long _Tp; + +size_type max_size() { + return size_type(~0) / sizeof(_Tp); +} + +// CHECK: cir.func @_Z8max_sizev() +// CHECK: %0 = cir.alloca !u64i, cir.ptr , ["__retval"] {alignment = 8 : i64} +// CHECK: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %2 = cir.unary(not, %1) : !s32i, !s32i +// CHECK: %3 = cir.cast(integral, %2 : !s32i), !u64i +// CHECK: %4 = cir.const(#cir.int<8> : !u64i) : !u64i +// CHECK: %5 = cir.binop(div, %3, %4) : !u64i + +// CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) +// CHECK-DAG: #[[locScopeA]] = loc("{{.*}}basic.cpp":27:3) +// CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp new file mode 100644 index 000000000000..a0a029ef7e9a --- /dev/null +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -0,0 +1,75 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int foo(int a, int b) { + int x = a * b; + x *= b; + x /= b; + x %= b; + x += b; + x -= b; + x >>= b; + x <<= b; + x &= b; + x ^= b; + x |= b; + return x; +} + +// CHECK: [[Value:%[0-9]+]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: = cir.binop(mul, +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(mul, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: cir.binop(div, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(rem, {{.*}} loc([[SourceLocation:#loc[0-9]+]]) +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(add, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(sub, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.shift( right +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.shift(left +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(and, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(xor, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(or, +// CHECK: cir.store {{.*}}[[Value]] + +typedef enum { + A = 3, +} enumy; + +enumy getty(); + +void exec() { + enumy r; + if ((r = getty()) < 0) {} +} + +// CHECK: cir.func @_Z4execv() +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["r"] {alignment = 4 : i64} +// CHECK: cir.scope { +// CHECK: %1 = cir.call @_Z5gettyv() : () -> !u32i +// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.cast(integral, %1 : !u32i), !s32i +// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool +// CHECK: cir.if %4 { + +// CHECK: [[SourceLocationB:#loc[0-9]+]] = loc("{{.*}}binassign.cpp":8:8) +// CHECK: [[SourceLocationA:#loc[0-9]+]] = loc("{{.*}}binassign.cpp":8:3) +// CHECK: [[SourceLocation]] = loc(fused[[[SourceLocationA]], [[SourceLocationB]]]) diff --git a/clang/test/CIR/CodeGen/binop.c b/clang/test/CIR/CodeGen/binop.c new file mode 100644 index 000000000000..280fd29b067f --- /dev/null +++ b/clang/test/CIR/CodeGen/binop.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void conditionalResultIimplicitCast(int a, int b, float f) { + // Should implicit cast back to int. + int x = a && b; + // CHECK: %[[#INT:]] = cir.ternary + // CHECK: %{{.+}} = cir.cast(bool_to_int, %[[#INT]] : !cir.bool), !s32i + float y = f && f; + // CHECK: %[[#BOOL:]] = cir.ternary + // CHECK: %[[#INT:]] = cir.cast(bool_to_int, %[[#BOOL]] : !cir.bool), !s32i + // CHECK: %{{.+}} = cir.cast(int_to_float, %[[#INT]] : !s32i), !cir.float +} diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp new file mode 100644 index 000000000000..0564e9c8e89f --- /dev/null +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -0,0 +1,113 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void b0(int a, int b) { + int x = a * b; + x = x / b; + x = x % b; + x = x + b; + x = x - b; + x = x >> b; + x = x << b; + x = x & b; + x = x ^ b; + x = x | b; +} + +// CHECK: = cir.binop(mul, %3, %4) : !s32i +// CHECK: = cir.binop(div, %6, %7) : !s32i +// CHECK: = cir.binop(rem, %9, %10) : !s32i +// CHECK: = cir.binop(add, %12, %13) : !s32i +// CHECK: = cir.binop(sub, %15, %16) nsw : !s32i +// CHECK: = cir.shift( right, %18 : !s32i, %19 : !s32i) -> !s32i +// CHECK: = cir.shift(left, %21 : !s32i, %22 : !s32i) -> !s32i +// CHECK: = cir.binop(and, %24, %25) : !s32i +// CHECK: = cir.binop(xor, %27, %28) : !s32i +// CHECK: = cir.binop(or, %30, %31) : !s32i + +void b1(bool a, bool b) { + bool x = a && b; + x = x || b; +} + +// CHECK: cir.ternary(%3, true +// CHECK-NEXT: %7 = cir.load %1 +// CHECK-NEXT: cir.ternary(%7, true +// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.yield +// CHECK-NEXT: false { +// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.yield +// CHECK: cir.yield +// CHECK-NEXT: false { +// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.yield + +// CHECK: cir.ternary(%5, true +// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.yield +// CHECK-NEXT: false { +// CHECK-NEXT: %7 = cir.load %1 +// CHECK-NEXT: cir.ternary(%7, true +// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.yield +// CHECK-NEXT: false { +// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.yield + +void b2(bool a) { + bool x = 0 && a; + x = 1 && a; + x = 0 || a; + x = 1 || a; +} + +// CHECK: %0 = cir.alloca {{.*}} ["a", init] +// CHECK: %1 = cir.alloca {{.*}} ["x", init] +// CHECK: %2 = cir.const(#false) +// CHECK-NEXT: cir.store %2, %1 +// CHECK-NEXT: %3 = cir.load %0 +// CHECK-NEXT: cir.store %3, %1 +// CHECK-NEXT: %4 = cir.load %0 +// CHECK-NEXT: cir.store %4, %1 +// CHECK-NEXT: %5 = cir.const(#true) +// CHECK-NEXT: cir.store %5, %1 + +void b3(int a, int b, int c, int d) { + bool x = (a == b) && (c == d); + x = (a == b) || (c == d); +} + +// CHECK: %0 = cir.alloca {{.*}} ["a", init] +// CHECK-NEXT: %1 = cir.alloca {{.*}} ["b", init] +// CHECK-NEXT: %2 = cir.alloca {{.*}} ["c", init] +// CHECK-NEXT: %3 = cir.alloca {{.*}} ["d", init] +// CHECK-NEXT: %4 = cir.alloca {{.*}} ["x", init] +// CHECK: %5 = cir.load %0 +// CHECK-NEXT: %6 = cir.load %1 +// CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) +// CHECK-NEXT: cir.ternary(%7, true +// CHECK-NEXT: %13 = cir.load %2 +// CHECK-NEXT: %14 = cir.load %3 +// CHECK-NEXT: %15 = cir.cmp(eq, %13, %14) +// CHECK-NEXT: cir.ternary(%15, true +// CHECK: %9 = cir.load %0 +// CHECK-NEXT: %10 = cir.load %1 +// CHECK-NEXT: %11 = cir.cmp(eq, %9, %10) +// CHECK-NEXT: %12 = cir.ternary(%11, true { +// CHECK: }, false { +// CHECK-NEXT: %13 = cir.load %2 +// CHECK-NEXT: %14 = cir.load %3 +// CHECK-NEXT: %15 = cir.cmp(eq, %13, %14) +// CHECK-NEXT: %16 = cir.ternary(%15, true + +void testFloatingPointBinOps(float a, float b) { + a * b; + // CHECK: cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.float + a / b; + // CHECK: cir.binop(div, %{{.+}}, %{{.+}}) : !cir.float + a + b; + // CHECK: cir.binop(add, %{{.+}}, %{{.+}}) : !cir.float + a - b; + // CHECK: cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.float +} diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c new file mode 100644 index 000000000000..c6231b94f7d1 --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -0,0 +1,131 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct __long { + struct __attribute__((__packed__)) { + unsigned __is_long_ : 1; + unsigned __cap_ : sizeof(unsigned) * 8 - 1; + }; + unsigned __size_; + unsigned *__data_; +}; + +void m() { + struct __long l; +} + +typedef struct { + int a : 4; + int b : 5; + int c; +} D; + +typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + unsigned f; // type other than int above, not a bitfield +} S; + +typedef struct { + int a : 3; // one bitfield with size < 8 + unsigned b; +} T; + +typedef struct { + char a; + char b; + char c; + + // startOffset 24 bits, new storage from here + int d: 2; + int e: 2; + int f: 4; + int g: 25; + int h: 3; + int i: 4; + int j: 3; + int k: 8; + + int l: 14; // need to be a part of the new storage + // because (tail - startOffset) is 65 after 'l' field +} U; + +// CHECK: !ty_22D22 = !cir.struct, !cir.int}> +// CHECK: !ty_22T22 = !cir.struct, !cir.int} #cir.record.decl.ast> +// CHECK: !ty_22anon2E122 = !cir.struct} #cir.record.decl.ast> +// CHECK: !ty_anon_struct = !cir.struct, !cir.int, !cir.int}> +// CHECK: #bfi_a = #cir.bitfield_info +// CHECK: #bfi_e = #cir.bitfield_info +// CHECK: !ty_22S22 = !cir.struct, !cir.array x 3>, !cir.int, !cir.int}> +// CHECK: !ty_22U22 = !cir.struct, !cir.int, !cir.int, !cir.array x 9>}> +// CHECK: !ty_22__long22 = !cir.struct} #cir.record.decl.ast>, !cir.int, !cir.ptr>}> +// CHECK: #bfi_d = #cir.bitfield_info, size = 2, offset = 17, is_signed = true> + +// CHECK: cir.func {{.*@store_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP0]][2] {name = "e"} : !cir.ptr -> !cir.ptr +// CHECK: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) +void store_field() { + S s; + s.e = 3; +} + +// CHECK: cir.func {{.*@load_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i +int load_field(S* s) { + return s->d; +} + +// CHECK: cir.func {{.*@unOp}} +// CHECK: [[TMP0:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP1:%.*]] = cir.get_bitfield(#bfi_d, [[TMP0]] : !cir.ptr>) -> !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(inc, [[TMP1]]) : !s32i, !s32i +// CHECK: cir.set_bitfield(#bfi_d, [[TMP0]] : !cir.ptr>, [[TMP2]] : !s32i) +void unOp(S* s) { + s->d++; +} + +// CHECK: cir.func {{.*@binOp}} +// CHECK: [[TMP0:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK: [[TMP1:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.get_bitfield(#bfi_d, [[TMP1]] : !cir.ptr>) -> !s32i +// CHECK: [[TMP3:%.*]] = cir.binop(or, [[TMP2]], [[TMP0]]) : !s32i +// CHECK: cir.set_bitfield(#bfi_d, [[TMP1]] : !cir.ptr>, [[TMP3]] : !s32i) +void binOp(S* s) { + s->d |= 42; +} + + +// CHECK: cir.func {{.*@load_non_bitfield}} +// CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr +unsigned load_non_bitfield(S *s) { + return s->f; +} + +// just create a usage of T type +// CHECK: cir.func {{.*@load_one_bitfield}} +int load_one_bitfield(T* t) { + return t->a; +} + +// CHECK: cir.func {{.*@createU}} +void createU() { + U u; +} + +// for this struct type we create an anon structure with different storage types in initialization +// CHECK: cir.func {{.*@createD}} +// CHECK: %0 = cir.alloca !ty_22D22, cir.ptr , ["d"] {alignment = 4 : i64} +// CHECK: %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<33> : !u8i, #cir.int<0> : !u8i, #cir.int<3> : !s32i}> : !ty_anon_struct) : !ty_anon_struct +// CHECK: cir.store %2, %1 : !ty_anon_struct, cir.ptr +void createD() { + D d = {1,2,3}; +} diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp new file mode 100644 index 000000000000..365b784ed710 --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -0,0 +1,64 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct __long { + struct __attribute__((__packed__)) { + unsigned __is_long_ : 1; + unsigned __cap_ : sizeof(unsigned) * 8 - 1; + }; + unsigned __size_; + unsigned *__data_; +}; + +void m() { + __long l; +} + +typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + unsigned f; // type other than int above, not a bitfield +} S; + +typedef struct { + int a : 3; // one bitfield with size < 8 + unsigned b; +} T; +// CHECK: !ty_22T22 = !cir.struct, !cir.int} #cir.record.decl.ast> +// CHECK: !ty_22anon2E122 = !cir.struct} #cir.record.decl.ast> +// CHECK: !ty_22S22 = !cir.struct, !cir.array x 3>, !cir.int, !cir.int}> +// CHECK: !ty_22__long22 = !cir.struct} #cir.record.decl.ast>, !cir.int, !cir.ptr>}> + +// CHECK: cir.func @_Z11store_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CHECK: cir.set_bitfield(#bfi_a, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func @_Z10load_field +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i +int load_field(S& s) { + return s.d; +} + +// CHECK: cir.func @_Z17load_non_bitfield +// CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr +unsigned load_non_bitfield(S& s) { + return s.f; +} + +// just create a usage of T type +// CHECK: cir.func @_Z17load_one_bitfield +int load_one_bitfield(T& t) { + return t.a; +} diff --git a/clang/test/CIR/CodeGen/bitint.c b/clang/test/CIR/CodeGen/bitint.c new file mode 100644 index 000000000000..51111ee1dafc --- /dev/null +++ b/clang/test/CIR/CodeGen/bitint.c @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void VLATest(_BitInt(3) A, _BitInt(42) B, _BitInt(17) C) { + int AR1[A]; + int AR2[B]; + int AR3[C]; +} + +// CHECK: cir.func @VLATest +// CHECK: %[[#A:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#A_PROMOTED:]] = cir.cast(integral, %[[#A]] : !cir.int), !u64i +// CHECK-NEXT: %[[#SP:]] = cir.stack_save : !cir.ptr +// CHECK-NEXT: cir.store %[[#SP]], %{{.+}} : !cir.ptr, cir.ptr > +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#A_PROMOTED]] : !u64i +// CHECK-NEXT: %[[#B:]] = cir.load %1 : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#B_PROMOTED:]] = cir.cast(integral, %[[#B]] : !cir.int), !u64i +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#B_PROMOTED]] : !u64i +// CHECK-NEXT: %[[#C:]] = cir.load %2 : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#C_PROMOTED:]] = cir.cast(integral, %[[#C]] : !cir.int), !u64i +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#C_PROMOTED]] : !u64i +// CHECK: } diff --git a/clang/test/CIR/CodeGen/bitint.cpp b/clang/test/CIR/CodeGen/bitint.cpp new file mode 100644 index 000000000000..fad50e1ee858 --- /dev/null +++ b/clang/test/CIR/CodeGen/bitint.cpp @@ -0,0 +1,86 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +using i10 = signed _BitInt(10); +using u10 = unsigned _BitInt(10); + +unsigned _BitInt(1) GlobSize1 = 0; +// CHECK: cir.global external @GlobSize1 = #cir.int<0> : !cir.int + +i10 test_signed(i10 arg) { + return arg; +} + +// CHECK: cir.func @_Z11test_signedDB10_(%arg0: !cir.int loc({{.*}}) -> !cir.int +// CHECK: } + +u10 test_unsigned(u10 arg) { + return arg; +} + +// CHECK: cir.func @_Z13test_unsignedDU10_(%arg0: !cir.int loc({{.*}}) -> !cir.int +// CHECK: } + +i10 test_init() { + return 42; +} + +// CHECK: cir.func @_Z9test_initv() -> !cir.int +// CHECK: %[[#LITERAL:]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#LITERAL]] : !s32i), !cir.int +// CHECK: } + +void test_init_for_mem() { + i10 x = 42; +} + +// CHECK: cir.func @_Z17test_init_for_memv() +// CHECK: %[[#LITERAL:]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %[[#INIT:]] = cir.cast(integral, %[[#LITERAL]] : !s32i), !cir.int +// CHECK-NEXT: cir.store %[[#INIT]], %{{.+}} : !cir.int, cir.ptr > +// CHECK: } + +i10 test_arith(i10 lhs, i10 rhs) { + return lhs + rhs; +} + +// CHECK: cir.func @_Z10test_arithDB10_S_(%arg0: !cir.int loc({{.+}}), %arg1: !cir.int loc({{.+}})) -> !cir.int +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK-NEXT: %{{.+}} = cir.binop(add, %[[#LHS]], %[[#RHS]]) : !cir.int +// CHECK: } + +void Size1ExtIntParam(unsigned _BitInt(1) A) { + unsigned _BitInt(1) B[5]; + B[2] = A; +} + +// CHECK: cir.func @_Z16Size1ExtIntParamDU1_ +// CHECK: %[[#A:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#IDX:]] = cir.const(#cir.int<2> : !s32i) : !s32i +// CHECK-NEXT: %[[#ARRAY:]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr x 5>>), !cir.ptr> +// CHECK-NEXT: %[[#PTR:]] = cir.ptr_stride(%[[#ARRAY]] : !cir.ptr>, %[[#IDX]] : !s32i), !cir.ptr> +// CHECK-NEXT: cir.store %[[#A]], %[[#PTR]] : !cir.int, cir.ptr > +// CHECK: } + +struct S { + _BitInt(17) A; + _BitInt(10) B; + _BitInt(17) C; +}; + +void OffsetOfTest(void) { + int A = __builtin_offsetof(struct S,A); + int B = __builtin_offsetof(struct S,B); + int C = __builtin_offsetof(struct S,C); +} + +// CHECK: cir.func @_Z12OffsetOfTestv() +// CHECK: %{{.+}} = cir.const(#cir.int<0> : !u64i) : !u64i +// CHECK: %{{.+}} = cir.const(#cir.int<4> : !u64i) : !u64i +// CHECK: %{{.+}} = cir.const(#cir.int<8> : !u64i) : !u64i +// CHECK: } + +_BitInt(2) ParamPassing(_BitInt(15) a, _BitInt(31) b) {} + +// CHECK: cir.func @_Z12ParamPassingDB15_DB31_(%arg0: !cir.int loc({{.+}}), %arg1: !cir.int loc({{.+}})) -> !cir.int diff --git a/clang/test/CIR/CodeGen/bool.c b/clang/test/CIR/CodeGen/bool.c new file mode 100644 index 000000000000..7af7527c4a76 --- /dev/null +++ b/clang/test/CIR/CodeGen/bool.c @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include + +typedef struct { + bool x; +} S; + +// CHECK: cir.func @init_bool +// CHECK: [[ALLOC:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[ZERO:%.*]] = cir.const(#cir.zero : !ty_22S22) : !ty_22S22 +// CHECK: cir.store [[ZERO]], [[ALLOC]] : !ty_22S22, cir.ptr +void init_bool(void) { + S s = {0}; +} + +// CHECK: cir.func @store_bool +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(int_to_bool, [[TMP1]] : !s32i), !cir.bool +// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.get_member [[TMP3]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store [[TMP2]], [[TMP4]] : !cir.bool, cir.ptr +void store_bool(S *s) { + s->x = false; +} + +// CHECK: cir.func @load_bool +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.bool, cir.ptr , ["x", init] {alignment = 1 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : cir.ptr , !cir.bool +void load_bool(S *s) { + bool x = s->x; +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/bswap.cpp b/clang/test/CIR/CodeGen/bswap.cpp new file mode 100644 index 000000000000..66a6ccf3ffec --- /dev/null +++ b/clang/test/CIR/CodeGen/bswap.cpp @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +using u16 = unsigned short; +using u32 = unsigned int; +using u64 = unsigned long long; + +u16 bswap_u16(u16 x) { + return __builtin_bswap16(x); +} + +// CHECK: cir.func @_Z9bswap_u16t +// CHECK: %{{.+}} = cir.bswap(%{{.+}} : !u16i) : !u16i +// CHECK: } + +u32 bswap_u32(u32 x) { + return __builtin_bswap32(x); +} + +// CHECK: cir.func @_Z9bswap_u32j +// CHECK: %{{.+}} = cir.bswap(%{{.+}} : !u32i) : !u32i +// CHECK: } + +u64 bswap_u64(u64 x) { + return __builtin_bswap64(x); +} + +// CHECK: cir.func @_Z9bswap_u64y +// CHECK: %{{.+}} = cir.bswap(%{{.+}} : !u64i) : !u64i +// CHECK: } diff --git a/clang/test/CIR/CodeGen/build-deferred.cpp b/clang/test/CIR/CodeGen/build-deferred.cpp new file mode 100644 index 000000000000..bf0f2ce30c9e --- /dev/null +++ b/clang/test/CIR/CodeGen/build-deferred.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fclangir-build-deferred-threshold=0 %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class String { + char *storage{nullptr}; + long size; + long capacity; + +public: + String() : size{0} {} + String(int size) : size{size} {} + String(const char *s) {} +}; + +void test() { + String s1{}; + String s2{1}; + String s3{"abcdefghijklmnop"}; +} + +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2Ev +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2Ei +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2EPKc +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC1EPKc + +// CHECK: cir.func @_Z4testv() +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/builtin-alloca.c b/clang/test/CIR/CodeGen/builtin-alloca.c new file mode 100644 index 000000000000..a02f328cc12f --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-alloca.c @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +typedef __SIZE_TYPE__ size_t; +void *alloca(size_t size); +void *_alloca(size_t size); + +void my_alloca(size_t n) +{ + int *c1 = alloca(n); +} +// CIR: cir.func @my_alloca([[ALLOCA_SIZE:%.*]]: !u64i +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr +// CIR: } + + +// LLVM: define void @my_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], +// LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], +// LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 +// LLVM: } + +void my___builtin_alloca(size_t n) +{ + int *c1 = (int *)__builtin_alloca(n); +} + +// CIR: cir.func @my___builtin_alloca([[ALLOCA_SIZE:%.*]]: !u64i +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr +// CIR: } + + +// LLVM: define void @my___builtin_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], +// LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], +// LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 +// LLVM: } + +void my__builtin_alloca_uninitialized(size_t n) +{ + int *c1 = (int *)__builtin_alloca_uninitialized(n); +} + +// CIR: cir.func @my__builtin_alloca_uninitialized([[ALLOCA_SIZE:%.*]]: !u64i +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr +// CIR: } + + +// LLVM: define void @my__builtin_alloca_uninitialized(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], +// LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], +// LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 +// LLVM: } diff --git a/clang/test/CIR/CodeGen/builtin-bits.cpp b/clang/test/CIR/CodeGen/builtin-bits.cpp new file mode 100644 index 000000000000..6b82f75187b8 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-bits.cpp @@ -0,0 +1,186 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int test_builtin_clrsb(int x) { + return __builtin_clrsb(x); +} + +// CHECK: cir.func @_Z18test_builtin_clrsbi +// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s32i) : !s32i +// CHECK: } + +int test_builtin_clrsbl(long x) { + return __builtin_clrsbl(x); +} + +// CHECK: cir.func @_Z19test_builtin_clrsbll +// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s64i) : !s32i +// CHECK: } + +int test_builtin_clrsbll(long long x) { + return __builtin_clrsbll(x); +} + +// CHECK: cir.func @_Z20test_builtin_clrsbllx +// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s64i) : !s32i +// CHECK: } + +int test_builtin_ctzs(unsigned short x) { + return __builtin_ctzs(x); +} + +// CHECK: cir.func @_Z17test_builtin_ctzst +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u16i) : !s32i +// CHEKC: } + +int test_builtin_ctz(unsigned x) { + return __builtin_ctz(x); +} + +// CHECK: cir.func @_Z16test_builtin_ctzj +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_ctzl(unsigned long x) { + return __builtin_ctzl(x); +} + +// CHECK: cir.func @_Z17test_builtin_ctzlm +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_ctzll(unsigned long long x) { + return __builtin_ctzll(x); +} + +// CHECK: cir.func @_Z18test_builtin_ctzlly +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_ctzg(unsigned x) { + return __builtin_ctzg(x); +} + +// CHECK: cir.func @_Z17test_builtin_ctzgj +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_clzs(unsigned short x) { + return __builtin_clzs(x); +} + +// CHECK: cir.func @_Z17test_builtin_clzst +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u16i) : !s32i +// CHECK: } + +int test_builtin_clz(unsigned x) { + return __builtin_clz(x); +} + +// CHECK: cir.func @_Z16test_builtin_clzj +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_clzl(unsigned long x) { + return __builtin_clzl(x); +} + +// CHECK: cir.func @_Z17test_builtin_clzlm +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_clzll(unsigned long long x) { + return __builtin_clzll(x); +} + +// CHECK: cir.func @_Z18test_builtin_clzlly +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_clzg(unsigned x) { + return __builtin_clzg(x); +} + +// CHECK: cir.func @_Z17test_builtin_clzgj +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_ffs(int x) { + return __builtin_ffs(x); +} + +// CHECK: cir.func @_Z16test_builtin_ffsi +// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s32i) : !s32i +// CHECK: } + +int test_builtin_ffsl(long x) { + return __builtin_ffsl(x); +} + +// CHECK: cir.func @_Z17test_builtin_ffsll +// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s64i) : !s32i +// CHECK: } + +int test_builtin_ffsll(long long x) { + return __builtin_ffsll(x); +} + +// CHECK: cir.func @_Z18test_builtin_ffsllx +// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s64i) : !s32i +// CHECK: } + +int test_builtin_parity(unsigned x) { + return __builtin_parity(x); +} + +// CHECK: cir.func @_Z19test_builtin_parityj +// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_parityl(unsigned long x) { + return __builtin_parityl(x); +} + +// CHECK: cir.func @_Z20test_builtin_paritylm +// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_parityll(unsigned long long x) { + return __builtin_parityll(x); +} + +// CHECK: cir.func @_Z21test_builtin_paritylly +// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_popcount(unsigned x) { + return __builtin_popcount(x); +} + +// CHECK: cir.func @_Z21test_builtin_popcountj +// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_popcountl(unsigned long x) { + return __builtin_popcountl(x); +} + +// CHECK: cir.func @_Z22test_builtin_popcountlm +// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_popcountll(unsigned long long x) { + return __builtin_popcountll(x); +} + +// CHECK: cir.func @_Z23test_builtin_popcountlly +// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_popcountg(unsigned x) { + return __builtin_popcountg(x); +} + +// CHECK: cir.func @_Z22test_builtin_popcountgj +// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u32i) : !s32i +// CHECK: } diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp new file mode 100644 index 000000000000..9aa3175eeecd --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +auto func() { + return __builtin_strcmp("", ""); + // CHECK: cir.func @_Z4funcv() + // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) + // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc7) + // CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr loc(#loc8) + // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i loc(#loc8) + // CHECK-NEXT: cir.return %2 : !s32i loc(#loc8) +} diff --git a/clang/test/CIR/CodeGen/builtin-constant-p.c b/clang/test/CIR/CodeGen/builtin-constant-p.c new file mode 100644 index 000000000000..1b3dbe7e9275 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-constant-p.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +int a = 0; +int foo() { + return __builtin_constant_p(a); +} + +// CIR: cir.func no_proto @foo() -> !s32i extra(#fn_attr) +// CIR: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR: [[TMP1:%.*]] = cir.get_global @a : cir.ptr +// CIR: [[TMP2:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CIR: [[TMP3:%.*]] = cir.is_constant([[TMP2]] : !s32i) : !cir.bool +// CIR: [[TMP4:%.*]] = cir.cast(bool_to_int, [[TMP3]] : !cir.bool), !s32i +// CIR: cir.store [[TMP4]], [[TMP0]] : !s32i, cir.ptr +// CIR: [[TMP5:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CIR: cir.return [[TMP5]] : !s32i + +// LLVM:define i32 @foo() +// LLVM: [[TMP1:%.*]] = alloca i32, i64 1 +// LLVM: [[TMP2:%.*]] = load i32, ptr @a +// LLVM: [[TMP3:%.*]] = call i1 @llvm.is.constant.i32(i32 [[TMP2]]) +// LLVM: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8 +// LLVM: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32 +// LLVM: store i32 [[TMP5]], ptr [[TMP1]] +// LLVM: [[TMP6:%.*]] = load i32, ptr [[TMP1]] +// LLVM: ret i32 [[TMP6]] + diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c new file mode 100644 index 000000000000..82099f666f45 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -0,0 +1,618 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple aarch64-apple-darwin-macho -ffast-math -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=AARCH64 + +// ceil + +float my_ceilf(float f) { + return __builtin_ceilf(f); + // CHECK: cir.func @my_ceilf + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.float +} + +double my_ceil(double f) { + return __builtin_ceil(f); + // CHECK: cir.func @my_ceil + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double +} + +long double my_ceill(long double f) { + return __builtin_ceill(f); + // CHECK: cir.func @my_ceill + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.ceil {{.+}} : !cir.long_double +} + +float ceilf(float); +double ceil(double); +long double ceill(long double); + +float call_ceilf(float f) { + return ceilf(f); + // CHECK: cir.func @call_ceilf + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.float +} + +double call_ceil(double f) { + return ceil(f); + // CHECK: cir.func @call_ceil + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double +} + +long double call_ceill(long double f) { + return ceill(f); + // CHECK: cir.func @call_ceill + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.ceil {{.+}} : !cir.long_double +} + +// cos + +float my_cosf(float f) { + return __builtin_cosf(f); + // CHECK: cir.func @my_cosf + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float +} + +double my_cos(double f) { + return __builtin_cos(f); + // CHECK: cir.func @my_cos + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double +} + +long double my_cosl(long double f) { + return __builtin_cosl(f); + // CHECK: cir.func @my_cosl + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double +} + +float cosf(float); +double cos(double); +long double cosl(long double); + +float call_cosf(float f) { + return cosf(f); + // CHECK: cir.func @call_cosf + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float +} + +double call_cos(double f) { + return cos(f); + // CHECK: cir.func @call_cos + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double +} + +long double call_cosl(long double f) { + return cosl(f); + // CHECK: cir.func @call_cosl + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double +} + +// exp + +float my_expf(float f) { + return __builtin_expf(f); + // CHECK: cir.func @my_expf + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float +} + +double my_exp(double f) { + return __builtin_exp(f); + // CHECK: cir.func @my_exp + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double +} + +long double my_expl(long double f) { + return __builtin_expl(f); + // CHECK: cir.func @my_expl + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double +} + +float expf(float); +double exp(double); +long double expl(long double); + +float call_expf(float f) { + return expf(f); + // CHECK: cir.func @call_expf + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float +} + +double call_exp(double f) { + return exp(f); + // CHECK: cir.func @call_exp + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double +} + +long double call_expl(long double f) { + return expl(f); + // CHECK: cir.func @call_expl + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double +} + +// exp2 + +float my_exp2f(float f) { + return __builtin_exp2f(f); + // CHECK: cir.func @my_exp2f + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float +} + +double my_exp2(double f) { + return __builtin_exp2(f); + // CHECK: cir.func @my_exp2 + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double +} + +long double my_exp2l(long double f) { + return __builtin_exp2l(f); + // CHECK: cir.func @my_exp2l + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double +} + +float exp2f(float); +double exp2(double); +long double exp2l(long double); + +float call_exp2f(float f) { + return exp2f(f); + // CHECK: cir.func @call_exp2f + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float +} + +double call_exp2(double f) { + return exp2(f); + // CHECK: cir.func @call_exp2 + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double +} + +long double call_exp2l(long double f) { + return exp2l(f); + // CHECK: cir.func @call_exp2l + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double +} + +// floor + +float my_floorf(float f) { + return __builtin_floorf(f); + // CHECK: cir.func @my_floorf + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.float +} + +double my_floor(double f) { + return __builtin_floor(f); + // CHECK: cir.func @my_floor + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double +} + +long double my_floorl(long double f) { + return __builtin_floorl(f); + // CHECK: cir.func @my_floorl + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.floor {{.+}} : !cir.long_double +} + +float floorf(float); +double floor(double); +long double floorl(long double); + +float call_floorf(float f) { + return floorf(f); + // CHECK: cir.func @call_floorf + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.float +} + +double call_floor(double f) { + return floor(f); + // CHECK: cir.func @call_floor + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double +} + +long double call_floorl(long double f) { + return floorl(f); + // CHECK: cir.func @call_floorl + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.floor {{.+}} : !cir.long_double +} + +// log + +float my_logf(float f) { + return __builtin_logf(f); + // CHECK: cir.func @my_logf + // CHECK: {{.+}} = cir.log {{.+}} : !cir.float +} + +double my_log(double f) { + return __builtin_log(f); + // CHECK: cir.func @my_log + // CHECK: {{.+}} = cir.log {{.+}} : !cir.double +} + +long double my_logl(long double f) { + return __builtin_logl(f); + // CHECK: cir.func @my_logl + // CHECK: {{.+}} = cir.log {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double +} + +float logf(float); +double log(double); +long double logl(long double); + +float call_logf(float f) { + return logf(f); + // CHECK: cir.func @call_logf + // CHECK: {{.+}} = cir.log {{.+}} : !cir.float +} + +double call_log(double f) { + return log(f); + // CHECK: cir.func @call_log + // CHECK: {{.+}} = cir.log {{.+}} : !cir.double +} + +long double call_logl(long double f) { + return logl(f); + // CHECK: cir.func @call_logl + // CHECK: {{.+}} = cir.log {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double +} + +// log10 + +float my_log10f(float f) { + return __builtin_log10f(f); + // CHECK: cir.func @my_log10f + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float +} + +double my_log10(double f) { + return __builtin_log10(f); + // CHECK: cir.func @my_log10 + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double +} + +long double my_log10l(long double f) { + return __builtin_log10l(f); + // CHECK: cir.func @my_log10l + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double +} + +float log10f(float); +double log10(double); +long double log10l(long double); + +float call_log10f(float f) { + return log10f(f); + // CHECK: cir.func @call_log10f + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float +} + +double call_log10(double f) { + return log10(f); + // CHECK: cir.func @call_log10 + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double +} + +long double call_log10l(long double f) { + return log10l(f); + // CHECK: cir.func @call_log10l + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double +} + +// log2 + +float my_log2f(float f) { + return __builtin_log2f(f); + // CHECK: cir.func @my_log2f + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float +} + +double my_log2(double f) { + return __builtin_log2(f); + // CHECK: cir.func @my_log2 + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double +} + +long double my_log2l(long double f) { + return __builtin_log2l(f); + // CHECK: cir.func @my_log2l + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double +} + +float log2f(float); +double log2(double); +long double log2l(long double); + +float call_log2f(float f) { + return log2f(f); + // CHECK: cir.func @call_log2f + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float +} + +double call_log2(double f) { + return log2(f); + // CHECK: cir.func @call_log2 + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double +} + +long double call_log2l(long double f) { + return log2l(f); + // CHECK: cir.func @call_log2l + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double +} + +// nearbyint + +float my_nearbyintf(float f) { + return __builtin_nearbyintf(f); + // CHECK: cir.func @my_nearbyintf + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.float +} + +double my_nearbyint(double f) { + return __builtin_nearbyint(f); + // CHECK: cir.func @my_nearbyint + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double +} + +long double my_nearbyintl(long double f) { + return __builtin_nearbyintl(f); + // CHECK: cir.func @my_nearbyintl + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double +} + +float nearbyintf(float); +double nearbyint(double); +long double nearbyintl(long double); + +float call_nearbyintf(float f) { + return nearbyintf(f); + // CHECK: cir.func @call_nearbyintf + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.float +} + +double call_nearbyint(double f) { + return nearbyint(f); + // CHECK: cir.func @call_nearbyint + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double +} + +long double call_nearbyintl(long double f) { + return nearbyintl(f); + // CHECK: cir.func @call_nearbyintl + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double +} + +// rint + +float my_rintf(float f) { + return __builtin_rintf(f); + // CHECK: cir.func @my_rintf + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.float +} + +double my_rint(double f) { + return __builtin_rint(f); + // CHECK: cir.func @my_rint + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double +} + +long double my_rintl(long double f) { + return __builtin_rintl(f); + // CHECK: cir.func @my_rintl + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.rint {{.+}} : !cir.long_double +} + +float rintf(float); +double rint(double); +long double rintl(long double); + +float call_rintf(float f) { + return rintf(f); + // CHECK: cir.func @call_rintf + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.float +} + +double call_rint(double f) { + return rint(f); + // CHECK: cir.func @call_rint + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double +} + +long double call_rintl(long double f) { + return rintl(f); + // CHECK: cir.func @call_rintl + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.rint {{.+}} : !cir.long_double +} + +// round + +float my_roundf(float f) { + return __builtin_roundf(f); + // CHECK: cir.func @my_roundf + // CHECK: {{.+}} = cir.round {{.+}} : !cir.float +} + +double my_round(double f) { + return __builtin_round(f); + // CHECK: cir.func @my_round + // CHECK: {{.+}} = cir.round {{.+}} : !cir.double +} + +long double my_roundl(long double f) { + return __builtin_roundl(f); + // CHECK: cir.func @my_roundl + // CHECK: {{.+}} = cir.round {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.round {{.+}} : !cir.long_double +} + +float roundf(float); +double round(double); +long double roundl(long double); + +float call_roundf(float f) { + return roundf(f); + // CHECK: cir.func @call_roundf + // CHECK: {{.+}} = cir.round {{.+}} : !cir.float +} + +double call_round(double f) { + return round(f); + // CHECK: cir.func @call_round + // CHECK: {{.+}} = cir.round {{.+}} : !cir.double +} + +long double call_roundl(long double f) { + return roundl(f); + // CHECK: cir.func @call_roundl + // CHECK: {{.+}} = cir.round {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.round {{.+}} : !cir.long_double +} + +// sin + +float my_sinf(float f) { + return __builtin_sinf(f); + // CHECK: cir.func @my_sinf + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float +} + +double my_sin(double f) { + return __builtin_sin(f); + // CHECK: cir.func @my_sin + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double +} + +long double my_sinl(long double f) { + return __builtin_sinl(f); + // CHECK: cir.func @my_sinl + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double +} + +float sinf(float); +double sin(double); +long double sinl(long double); + +float call_sinf(float f) { + return sinf(f); + // CHECK: cir.func @call_sinf + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float +} + +double call_sin(double f) { + return sin(f); + // CHECK: cir.func @call_sin + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double +} + +long double call_sinl(long double f) { + return sinl(f); + // CHECK: cir.func @call_sinl + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double +} + +// sqrt + +float my_sqrtf(float f) { + return __builtin_sqrtf(f); + // CHECK: cir.func @my_sqrtf + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float +} + +double my_sqrt(double f) { + return __builtin_sqrt(f); + // CHECK: cir.func @my_sqrt + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double +} + +long double my_sqrtl(long double f) { + return __builtin_sqrtl(f); + // CHECK: cir.func @my_sqrtl + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double +} + +float sqrtf(float); +double sqrt(double); +long double sqrtl(long double); + +float call_sqrtf(float f) { + return sqrtf(f); + // CHECK: cir.func @call_sqrtf + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float +} + +double call_sqrt(double f) { + return sqrt(f); + // CHECK: cir.func @call_sqrt + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double +} + +long double call_sqrtl(long double f) { + return sqrtl(f); + // CHECK: cir.func @call_sqrtl + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double +} + +// trunc + +float my_truncf(float f) { + return __builtin_truncf(f); + // CHECK: cir.func @my_truncf + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.float +} + +double my_trunc(double f) { + return __builtin_trunc(f); + // CHECK: cir.func @my_trunc + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double +} + +long double my_truncl(long double f) { + return __builtin_truncl(f); + // CHECK: cir.func @my_truncl + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double +} + +float truncf(float); +double trunc(double); +long double truncl(long double); + +float call_truncf(float f) { + return truncf(f); + // CHECK: cir.func @call_truncf + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.float +} + +double call_trunc(double f) { + return trunc(f); + // CHECK: cir.func @call_trunc + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double +} + +long double call_truncl(long double f) { + return truncl(f); + // CHECK: cir.func @call_truncl + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double +} diff --git a/clang/test/CIR/CodeGen/builtin-ms-alloca.c b/clang/test/CIR/CodeGen/builtin-ms-alloca.c new file mode 100644 index 000000000000..d500304d7f6d --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-ms-alloca.c @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fms-extensions -emit-cir %s -o - | FileCheck %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fms-extensions -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +typedef __SIZE_TYPE__ size_t; + +void my_win_alloca(size_t n) +{ + int *c1 = (int *)_alloca(n); +} + +// CIR: cir.func @my_win_alloca([[ALLOCA_SIZE:%.*]]: !u64i +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr +// CIR: } + + +// LLVM: define void @my_win_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], +// LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], +// LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 +// LLVM: } diff --git a/clang/test/CIR/CodeGen/builtin-prefetch.c b/clang/test/CIR/CodeGen/builtin-prefetch.c new file mode 100644 index 000000000000..21b908d085bd --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-prefetch.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +void foo(void *a) { + __builtin_prefetch(a, 1, 1); +} + +// CIR: cir.func @foo(%arg0: !cir.ptr loc({{.*}})) +// CIR: [[PTR_ALLOC:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} +// CIR: cir.store %arg0, [[PTR_ALLOC]] : !cir.ptr, cir.ptr > +// CIR: [[PTR:%.*]] = cir.load [[PTR_ALLOC]] : cir.ptr >, !cir.ptr +// CIR: cir.prefetch([[PTR]] : !cir.ptr) locality(1) write +// CIR: cir.return + +// LLVM: define void @foo(ptr [[ARG0:%.*]]) +// LLVM: [[PTR_ALLOC:%.*]] = alloca ptr, i64 1 +// LLVM: store ptr [[ARG0]], ptr [[PTR_ALLOC]] +// LLVM: [[PTR:%.*]] = load ptr, ptr [[PTR_ALLOC]] +// LLVM: call void @llvm.prefetch.p0(ptr [[PTR]], i32 1, i32 1, i32 1) +// LLVM: ret void diff --git a/clang/test/CIR/CodeGen/c89-implicit-int.c b/clang/test/CIR/CodeGen/c89-implicit-int.c new file mode 100644 index 000000000000..8fe7b285c338 --- /dev/null +++ b/clang/test/CIR/CodeGen/c89-implicit-int.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c89 -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Implicit int return type. +test = 0; +// CHECK: cir.global external @test = #cir.int<0> : !s32i +func (void) { +// CHECK: cir.func @func() -> !s32i + return 0; +} diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c new file mode 100644 index 000000000000..956c72ca0bc5 --- /dev/null +++ b/clang/test/CIR/CodeGen/call.c @@ -0,0 +1,91 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=CXX + +void a(void) {} +int b(int a, int b) { + return a + b; +} +double c(double a, double b) { + return a + b; +} + +void d(void) { + a(); + b(0, 1); +} + +// CHECK: module {{.*}} { +// CHECK: cir.func @a() +// CHECK: cir.return +// CHECK: } +// CHECK: cir.func @b(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["b", init] +// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: cir.store %arg1, %1 : !s32i, cir.ptr +// CHECK: %3 = cir.load %0 : cir.ptr , !s32i +// CHECK: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK: cir.store %5, %2 : !s32i, cir.ptr +// CHECK: %6 = cir.load %2 : cir.ptr , !s32i +// CHECK: cir.return %6 +// CHECK: } +// CHECK: cir.func @c(%arg0: !cir.double {{.*}}, %arg1: !cir.double {{.*}}) -> !cir.double +// CHECK: %0 = cir.alloca !cir.double, cir.ptr , ["a", init] +// CHECK: %1 = cir.alloca !cir.double, cir.ptr , ["b", init] +// CHECK: %2 = cir.alloca !cir.double, cir.ptr , ["__retval"] +// CHECK: cir.store %arg0, %0 : !cir.double, cir.ptr +// CHECK: cir.store %arg1, %1 : !cir.double, cir.ptr +// CHECK: %3 = cir.load %0 : cir.ptr , !cir.double +// CHECK: %4 = cir.load %1 : cir.ptr , !cir.double +// CHECK: %5 = cir.binop(add, %3, %4) : !cir.double +// CHECK: cir.store %5, %2 : !cir.double, cir.ptr +// CHECK: %6 = cir.load %2 : cir.ptr , !cir.double +// CHECK: cir.return %6 : !cir.double +// CHECK: } +// CHECK: cir.func @d() +// CHECK: call @a() : () -> () +// CHECK: %0 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: call @b(%0, %1) : (!s32i, !s32i) -> !s32i +// CHECK: cir.return +// CHECK: } +// +// CXX: module {{.*}} { +// CXX-NEXT: cir.func @_Z1av() +// CXX-NEXT: cir.return +// CXX-NEXT: } +// CXX-NEXT: cir.func @_Z1bii(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i +// CXX-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] +// CXX-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["b", init] +// CXX-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CXX-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr +// CXX-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr +// CXX-NEXT: %3 = cir.load %0 : cir.ptr , !s32i +// CXX-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CXX-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CXX-NEXT: cir.store %5, %2 : !s32i, cir.ptr +// CXX-NEXT: %6 = cir.load %2 : cir.ptr , !s32i +// CXX-NEXT: cir.return %6 +// CXX-NEXT: } +// CXX-NEXT: cir.func @_Z1cdd(%arg0: !cir.double {{.*}}, %arg1: !cir.double {{.*}}) -> !cir.double +// CXX-NEXT: %0 = cir.alloca !cir.double, cir.ptr , ["a", init] +// CXX-NEXT: %1 = cir.alloca !cir.double, cir.ptr , ["b", init] +// CXX-NEXT: %2 = cir.alloca !cir.double, cir.ptr , ["__retval"] +// CXX-NEXT: cir.store %arg0, %0 : !cir.double, cir.ptr +// CXX-NEXT: cir.store %arg1, %1 : !cir.double, cir.ptr +// CXX-NEXT: %3 = cir.load %0 : cir.ptr , !cir.double +// CXX-NEXT: %4 = cir.load %1 : cir.ptr , !cir.double +// CXX-NEXT: %5 = cir.binop(add, %3, %4) : !cir.double +// CXX-NEXT: cir.store %5, %2 : !cir.double, cir.ptr +// CXX-NEXT: %6 = cir.load %2 : cir.ptr , !cir.double +// CXX-NEXT: cir.return %6 : !cir.double +// CXX-NEXT: } +// CXX-NEXT: cir.func @_Z1dv() +// CXX-NEXT: call @_Z1av() : () -> () +// CXX-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i +// CXX-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CXX-NEXT: call @_Z1bii(%0, %1) : (!s32i, !s32i) -> !s32i +// CXX-NEXT: cir.return +// CXX-NEXT: } diff --git a/clang/test/CIR/CodeGen/call.cpp b/clang/test/CIR/CodeGen/call.cpp new file mode 100644 index 000000000000..7f2a8497bad0 --- /dev/null +++ b/clang/test/CIR/CodeGen/call.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int& p(); +int f() { + return p() - 22; +} + +// CHECK: cir.func @_Z1fv() -> !s32i +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.call @_Z1pv() : () -> !cir.ptr +// CHECK: %2 = cir.load %1 : cir.ptr , !s32i +// CHECK: %3 = cir.const(#cir.int<22> : !s32i) : !s32i +// CHECK: %4 = cir.binop(sub, %2, %3) nsw : !s32i diff --git a/clang/test/CIR/CodeGen/cast.c b/clang/test/CIR/CodeGen/cast.c new file mode 100644 index 000000000000..6e25fcc2abdc --- /dev/null +++ b/clang/test/CIR/CodeGen/cast.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +typedef struct { + int x; +} A; + +int cstyle_cast_lvalue(A a) { + return ((A)(a)).x; +} + +// CHECK: cir.func @cstyle_cast_lvalue(%arg0: !ty_22A22 loc({{.*}})) +// CHECK: [[ALLOC_A:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: [[ALLOC_RET:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[ALLOC_A]] : !ty_22A22, cir.ptr +// CHECK: [[X_ADDR:%.*]] = cir.get_member [[ALLOC_A]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: [[X:%.*]] = cir.load [[X_ADDR]] : cir.ptr , !s32i +// CHECK: cir.store [[X]], [[ALLOC_RET]] : !s32i, cir.ptr +// CHECK: [[RET:%.*]] = cir.load [[ALLOC_RET]] : cir.ptr , !s32i +// CHECK: cir.return [[RET]] : !s32i + diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp new file mode 100644 index 000000000000..b760e90b131b --- /dev/null +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -0,0 +1,144 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +unsigned char cxxstaticcast_0(unsigned int x) { + return static_cast(x); +} + +// CHECK: cir.func @_Z15cxxstaticcast_0j +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !u8i, cir.ptr , ["__retval"] {alignment = 1 : i64} +// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: %3 = cir.cast(integral, %2 : !u32i), !u8i +// CHECK: cir.store %3, %1 : !u8i, cir.ptr +// CHECK: %4 = cir.load %1 : cir.ptr , !u8i +// CHECK: cir.return %4 : !u8i +// CHECK: } + + +int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) { +// CHECK: cir.func @_{{.*}}cStyleCasts_0{{.*}} + + char a = (char)x1; // truncate + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s8i + + short b = (short)x2; // truncate with sign + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s16i + + long long c = (long long)x1; // zero extend + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s64i + + long long d = (long long)x2; // sign extend + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s64i + + unsigned ui = (unsigned)x2; // sign drop + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !u32i + + int si = (int)x1; // sign add + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s32i + + unsigned uu = (unsigned)x1; // should not be generated + // CHECK-NOT: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !u32i + + int arr[3]; + int* e = (int*)arr; // explicit pointer decay + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr>), !cir.ptr + + int f = (int)x3; + // CHECK: %{{[0-9]+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.float), !s32i + + double g = (double)x3; // FP extension + // %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : !cir.float), !cir.double + + long l = (long)(void*)x4; // Must sign extend before casting to pointer + // CHECK: %[[TMP:[0-9]+]] = cir.cast(integral, %{{[0-9]+}} : !s16i), !u64i + // CHECK: %[[TMP2:[0-9]+]] = cir.cast(int_to_ptr, %[[TMP]] : !u64i), !cir.ptr + // CHECK: %{{[0-9]+}} = cir.cast(ptr_to_int, %[[TMP2]] : !cir.ptr), !s64i + + float sitofp = (float)x2; // Signed integer to floating point + // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !s32i), !cir.float + + float uitofp = (float)x1; // Unsigned integer to floating point + // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !u32i), !cir.float + + int fptosi = (int)x3; // Floating point to signed integer + // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.float), !s32i + + unsigned fptoui = (unsigned)x3; // Floating point to unsigned integer + // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.float), !u32i + + bool ib = (bool)x1; // No checking, because this isn't a regular cast. + + int bi = (int)ib; // bool to int + // CHECK: %{{[0-9]+}} = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !s32i + + float bf = (float)ib; // bool to float + // CHECK: %{{[0-9]+}} = cir.cast(bool_to_float, %{{[0-9]+}} : !cir.bool), !cir.float + + void* bpv = (void*)ib; // bool to pointer, which is done in two steps + // CHECK: %[[TMP:[0-9]+]] = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !u64i + // CHECK: %{{[0-9]+}} = cir.cast(int_to_ptr, %[[TMP]] : !u64i), !cir.ptr + + float dptofp = (float)x5; + // CHECK: %{{.+}} = cir.cast(floating, %{{[0-9]+}} : !cir.double), !cir.float + + return 0; +} + +bool cptr(void *d) { + bool x = d; + return x; +} + +// CHECK: cir.func @_Z4cptrPv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} + +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool + +void call_cptr(void *d) { + if (!cptr(d)) { + } +} + +// CHECK: cir.func @_Z9call_cptrPv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} + +// CHECK: cir.scope { +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool +// CHECK: %3 = cir.unary(not, %2) : !cir.bool, !cir.bool +// CHECK: cir.if %3 { + +void lvalue_cast(int x) { + *(int *)&x = 42; +} + +// CHECK: cir.func @_Z11lvalue_cast +// CHECK: %1 = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK: cir.store %1, %0 : !s32i, cir.ptr + +struct A { int x; }; + +void null_cast(long ptr) { + *(int *)0 = 0; + ((A *)0)->x = 0; +} + +// CHECK: cir.func @_Z9null_castl +// CHECK: %[[ADDR:[0-9]+]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[ADDR]] : !s32i, cir.ptr +// CHECK: %[[BASE:[0-9]+]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, cir.ptr + +void int_cast(long ptr) { + ((A *)ptr)->x = 0; +} + +// CHECK: cir.func @_Z8int_castl +// CHECK: %[[BASE:[0-9]+]] = cir.cast(int_to_ptr, %{{[0-9]+}} : !u64i), !cir.ptr +// CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, cir.ptr + diff --git a/clang/test/CIR/CodeGen/cmp.cpp b/clang/test/CIR/CodeGen/cmp.cpp new file mode 100644 index 000000000000..3bca55e78d13 --- /dev/null +++ b/clang/test/CIR/CodeGen/cmp.cpp @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void c0(int a, int b) { + bool x = a > b; + x = a < b; + x = a <= b; + x = a >= b; + x = a != b; + x = a == b; +} + +// CHECK: = cir.cmp(gt, %3, %4) : !s32i, !cir.bool +// CHECK: = cir.cmp(lt, %6, %7) : !s32i, !cir.bool +// CHECK: = cir.cmp(le, %9, %10) : !s32i, !cir.bool +// CHECK: = cir.cmp(ge, %12, %13) : !s32i, !cir.bool +// CHECK: = cir.cmp(ne, %15, %16) : !s32i, !cir.bool +// CHECK: = cir.cmp(eq, %18, %19) : !s32i, !cir.bool diff --git a/clang/test/CIR/CodeGen/comma.cpp b/clang/test/CIR/CodeGen/comma.cpp new file mode 100644 index 000000000000..4d2ce88b9d26 --- /dev/null +++ b/clang/test/CIR/CodeGen/comma.cpp @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int c0() { + int a = 1; + int b = 2; + return b + 1, a; +} + +// CHECK: cir.func @_Z2c0v() -> !s32i +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#B:]] = cir.alloca !s32i, cir.ptr , ["b", init] +// CHECK: %[[#LOADED_B:]] = cir.load %[[#B]] : cir.ptr , !s32i +// CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) : !s32i +// CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : cir.ptr , !s32i +// CHECK: cir.store %[[#LOADED_A]], %[[#RET]] : !s32i, cir.ptr + +int &foo1(); +int &foo2(); + +void c1() { + int &x = (foo1(), foo2()); +} + +// CHECK: cir.func @_Z2c1v() +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr > +// CHECK: %1 = cir.call @_Z4foo1v() : () -> !cir.ptr +// CHECK: %2 = cir.call @_Z4foo2v() : () -> !cir.ptr +// CHECK: cir.store %2, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c new file mode 100644 index 000000000000..a91c58ba9e9b --- /dev/null +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -0,0 +1,67 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + + +typedef struct { + int *arr; +} S; + +S a = { + .arr = (int[]){} +}; + +// CIR: cir.global "private" internal @".compoundLiteral.0" = #cir.zero : !cir.array {alignment = 4 : i64} +// CIR: cir.global external @a = #cir.const_struct<{#cir.global_view<@".compoundLiteral.0"> : !cir.ptr}> : !ty_22S22 + +// LLVM: @.compoundLiteral.0 = internal global [0 x i32] zeroinitializer +// LLVM: @a = global %struct.S { ptr @.compoundLiteral.0 } + +S b = { + .arr = (int[]){1} +}; + +// CIR: cir.global "private" internal @".compoundLiteral.1" = #cir.const_array<[#cir.int<1> : !s32i]> : !cir.array {alignment = 4 : i64} +// CIR: cir.global external @b = #cir.const_struct<{#cir.global_view<@".compoundLiteral.1"> : !cir.ptr}> : !ty_22S22 + +// LLVM: @.compoundLiteral.1 = internal global [1 x i32] [i32 1] +// LLVM: @b = global %struct.S { ptr @.compoundLiteral.1 } + +int foo() { + return (struct { + int i; + }){1} + .i; +} + +// CIR: cir.func no_proto @foo() -> !s32i +// CIR: [[RET_MEM:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_22anon2E122, cir.ptr , [".compoundliteral"] {alignment = 4 : i64} +// CIR: [[FIELD:%.*]] = cir.get_member [[COMPLITERAL_MEM]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CIR: [[ONE:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CIR: cir.store [[ONE]], [[FIELD]] : !s32i, cir.ptr +// CIR: [[ONE:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CIR: cir.store [[ONE]], [[RET_MEM]] : !s32i, cir.ptr +// CIR: [[RET:%.*]] = cir.load [[RET_MEM]] : cir.ptr , !s32i +// CIR: cir.return [[RET]] : !s32i + +struct G { short x, y, z; }; +struct G g(int x, int y, int z) { + return (struct G) { x, y, z }; +} + +// CIR: cir.func @g +// CIR: %[[RETVAL:.*]] = cir.alloca !ty_22G22, cir.ptr , ["__retval"] {alignment = 2 : i64} loc(#loc18) +// CIR: %[[X:.*]] = cir.get_member %[[RETVAL]][0] {name = "x"} +// CIR: cir.store {{.*}}, %[[X]] : !s16i +// CIR: %[[Y:.*]] = cir.get_member %[[RETVAL]][1] {name = "y"} +// CIR: cir.store {{.*}}, %[[Y]] : !s16i +// CIR: %[[Z:.*]] = cir.get_member %[[RETVAL]][2] {name = "z"} +// CIR: cir.store {{.*}}, %[[Z]] : !s16i +// CIR: %[[RES:.*]] = cir.load %[[RETVAL]] +// CIR: cir.return %[[RES]] + +// Nothing meaningful to test for LLVM codegen here. +// FIXME: ABI note, LLVM lowering differs from traditional LLVM codegen here, +// because the former does a memcopy + i48 load. \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/cond.cpp b/clang/test/CIR/CodeGen/cond.cpp new file mode 100644 index 000000000000..fdeab5942e7b --- /dev/null +++ b/clang/test/CIR/CodeGen/cond.cpp @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct __less { + inline constexpr bool operator()(const unsigned long& __x, const unsigned long& __y) const {return __x < __y;} +}; + +const unsigned long& +min(const unsigned long& __a, const unsigned long& __b) { + return __less()(__b, __a) ? __b : __a; +} + +// CHECK: cir.func @_Z3minRKmS0_(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__a", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__b", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.scope { +// CHECK: %4 = cir.alloca !ty_22__less22, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: cir.call @_ZN6__lessC1Ev(%4) : (!cir.ptr) -> () +// CHECK: %5 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %6 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %7 = cir.call @_ZNK6__lessclERKmS1_(%4, %5, %6) : (!cir.ptr, !cir.ptr, !cir.ptr) -> !cir.bool +// CHECK: %8 = cir.ternary(%7, true { +// CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: cir.yield %9 : !cir.ptr +// CHECK: }, false { +// CHECK: %9 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.yield %9 : !cir.ptr +// CHECK: }) : (!cir.bool) -> !cir.ptr +// CHECK: cir.store %8, %2 : !cir.ptr, cir.ptr > \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/const-array.c b/clang/test/CIR/CodeGen/const-array.c new file mode 100644 index 000000000000..eb0adceabdad --- /dev/null +++ b/clang/test/CIR/CodeGen/const-array.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +void bar() { + const int arr[1] = {1}; +} + +// CHECK: cir.global "private" constant internal @bar.arr = #cir.const_array<[#cir.int<1> : !s32i]> : !cir.array {alignment = 4 : i64} +// CHECK: cir.func no_proto @bar() +// CHECK: {{.*}} = cir.get_global @bar.arr : cir.ptr > + +void foo() { + int a[10] = {1}; +} + +// CHECK: cir.func {{.*@foo}} +// CHECK: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK: %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array) : !cir.array +// CHECK: cir.store %1, %0 : !cir.array, cir.ptr > diff --git a/clang/test/CIR/CodeGen/const-bitfields.c b/clang/test/CIR/CodeGen/const-bitfields.c new file mode 100644 index 000000000000..7be114951bec --- /dev/null +++ b/clang/test/CIR/CodeGen/const-bitfields.c @@ -0,0 +1,47 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o - 2>&1 | FileCheck %s + +struct T { + int X : 5; + int Y : 6; + int Z : 9; + int W; +}; + +struct Inner { + unsigned a : 1; + unsigned b : 1; + unsigned c : 1; + unsigned d : 30; +}; + +// CHECK: !ty_anon_struct = !cir.struct, !cir.int, !cir.int, !cir.int}> +// CHECK: !ty_22T22 = !cir.struct x 3>, !cir.int} #cir.record.decl.ast> +// CHECK: !ty_anon_struct1 = !cir.struct, !cir.array x 3>, !cir.int, !cir.int, !cir.int, !cir.int}> +// CHECK: #bfi_Z = #cir.bitfield_info, size = 9, offset = 11, is_signed = true> + +struct T GV = { 1, 5, 26, 42 }; +// CHECK: cir.global external @GV = #cir.const_struct<{#cir.int<161> : !u8i, #cir.int<208> : !u8i, #cir.int<0> : !u8i, #cir.int<42> : !s32i}> : !ty_anon_struct + +// check padding is used (const array of zeros) +struct Inner var = { 1, 0, 1, 21}; +// CHECK: cir.global external @var = #cir.const_struct<{#cir.int<5> : !u8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.int<21> : !u8i, #cir.int<0> : !u8i, #cir.int<0> : !u8i, #cir.int<0> : !u8i}> : !ty_anon_struct1 + + +// CHECK: cir.func {{.*@getZ()}} +// CHECK: %1 = cir.get_global @GV : cir.ptr +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: %3 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr> +// CHECK: %4 = cir.get_bitfield(#bfi_Z, %3 : !cir.ptr>) -> !s32i +int getZ() { + return GV.Z; +} + +// check the type used is the type of T struct for plain field +// CHECK: cir.func {{.*@getW()}} +// CHECK: %1 = cir.get_global @GV : cir.ptr +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: %3 = cir.get_member %2[1] {name = "W"} : !cir.ptr -> !cir.ptr +int getW() { + return GV.W; +} + diff --git a/clang/test/CIR/CodeGen/constptr.c b/clang/test/CIR/CodeGen/constptr.c new file mode 100644 index 000000000000..b400cb8c444f --- /dev/null +++ b/clang/test/CIR/CodeGen/constptr.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +int *p = (int*)0x1234; + + +// CIR: cir.global external @p = #cir.ptr<4660> : !cir.ptr +// LLVM: @p = global ptr inttoptr (i64 4660 to ptr) diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp new file mode 100644 index 000000000000..c252a5cc43ab --- /dev/null +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -0,0 +1,382 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +namespace std { + +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; + +template +typename remove_reference::type &&move(T &&t) noexcept; + +template +struct coroutine_traits { using promise_type = typename Ret::promise_type; }; + +template +struct coroutine_handle { + static coroutine_handle from_address(void *) noexcept; +}; +template <> +struct coroutine_handle { + template + coroutine_handle(coroutine_handle) noexcept; + static coroutine_handle from_address(void *); +}; + +struct suspend_always { + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} +}; + +struct suspend_never { + bool await_ready() noexcept { return true; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} +}; + +struct string { + int size() const; + string(); + string(char const *s); +}; + +template +struct optional { + optional(); + optional(const T&); + T &operator*() &; + T &&operator*() &&; + T &value() &; + T &&value() &&; +}; +} // namespace std + +namespace folly { +namespace coro { + +using std::suspend_always; +using std::suspend_never; +using std::coroutine_handle; + +using SemiFuture = int; + +template +struct Task { + struct promise_type { + Task get_return_object() noexcept; + suspend_always initial_suspend() noexcept; + suspend_always final_suspend() noexcept; + void return_value(T); + void unhandled_exception(); + auto yield_value(Task) noexcept { return final_suspend(); } + }; + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + T await_resume(); +}; + +template<> +struct Task { + struct promise_type { + Task get_return_object() noexcept; + suspend_always initial_suspend() noexcept; + suspend_always final_suspend() noexcept; + void return_void() noexcept; + void unhandled_exception() noexcept; + auto yield_value(Task) noexcept { return final_suspend(); } + }; + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} + SemiFuture semi(); +}; + +// FIXME: add CIRGen support here. +// struct blocking_wait_fn { +// template +// T operator()(Task&& awaitable) const { +// return T(); +// } +// }; + +// inline constexpr blocking_wait_fn blocking_wait{}; +// static constexpr blocking_wait_fn const& blockingWait = blocking_wait; + +template +T blockingWait(Task&& awaitable) { + return T(); +} + +template +Task collectAllRange(Task* awaitable); + +template +Task collectAll(SemiAwaitables&&... awaitables); + +struct co_invoke_fn { + template + Task operator()(F&& f, A&&... a) const { + return Task(); + } +}; + +co_invoke_fn co_invoke; + +}} // namespace folly::coro + +// CHECK-DAG: ![[IntTask:.*]] = !cir.struct" {!cir.int}> +// CHECK-DAG: ![[VoidTask:.*]] = !cir.struct" {!cir.int}> +// CHECK-DAG: ![[VoidPromisse:.*]] = !cir.struct::promise_type" {!cir.int}> +// CHECK-DAG: ![[CoroHandleVoid:.*]] = !cir.struct" {!cir.int}> +// CHECK-DAG: ![[CoroHandlePromise:ty_.*]] = !cir.struct::promise_type>" {!cir.int}> +// CHECK-DAG: ![[StdString:.*]] = !cir.struct}> +// CHECK-DAG: ![[SuspendAlways:.*]] = !cir.struct}> + +// CHECK: module {{.*}} { +// CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22folly3A3Acoro3A3Aco_invoke_fn22 + +// CHECK: cir.func builtin private @__builtin_coro_id(!u32i, !cir.ptr, !cir.ptr, !cir.ptr) -> !u32i +// CHECK: cir.func builtin private @__builtin_coro_alloc(!u32i) -> !cir.bool +// CHECK: cir.func builtin private @__builtin_coro_size() -> !u64i +// CHECK: cir.func builtin private @__builtin_coro_begin(!u32i, !cir.ptr) -> !cir.ptr + +using VoidTask = folly::coro::Task; + +VoidTask silly_task() { + co_await std::suspend_always(); +} + +// CHECK: cir.func coroutine @_Z10silly_taskv() -> ![[VoidTask]] {{.*}} { + +// Allocate promise. + +// CHECK: %[[#VoidTaskAddr:]] = cir.alloca ![[VoidTask]], {{.*}}, ["__retval"] +// CHECK: %[[#SavedFrameAddr:]] = cir.alloca !cir.ptr, cir.ptr >, ["__coro_frame_addr"] {alignment = 8 : i64} +// CHECK: %[[#VoidPromisseAddr:]] = cir.alloca ![[VoidPromisse]], {{.*}}, ["__promise"] + +// Get coroutine id with __builtin_coro_id. + +// CHECK: %[[#NullPtr:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %[[#Align:]] = cir.const(#cir.int<16> : !u32i) : !u32i +// CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) + +// Perform allocation calling operator 'new' depending on __builtin_coro_alloc and +// call __builtin_coro_begin for the final coroutine frame address. + +// CHECK: %[[#ShouldAlloc:]] = cir.call @__builtin_coro_alloc(%[[#CoroId]]) : (!u32i) -> !cir.bool +// CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > +// CHECK: cir.if %[[#ShouldAlloc]] { +// CHECK: %[[#CoroSize:]] = cir.call @__builtin_coro_size() : () -> !u64i +// CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (!u64i) -> !cir.ptr +// CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > +// CHECK: } +// CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#CoroFrameAddr:]] = cir.call @__builtin_coro_begin(%[[#CoroId]], %[[#Load0]]) + +// Call promise.get_return_object() to retrieve the task object. + +// CHECK: %[[#RetObj:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type17get_return_objectEv(%[[#VoidPromisseAddr]]) : {{.*}} -> ![[VoidTask]] +// CHECK: cir.store %[[#RetObj]], %[[#VoidTaskAddr]] : ![[VoidTask]] + +// Start a new scope for the actual codegen for co_await, create temporary allocas for +// holding coroutine handle and the suspend_always struct. + +// CHECK: cir.scope { +// CHECK: %[[#SuspendAlwaysAddr:]] = cir.alloca ![[SuspendAlways]], {{.*}} ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %[[#CoroHandleVoidAddr:]] = cir.alloca ![[CoroHandleVoid]], {{.*}} ["agg.tmp0"] {alignment = 1 : i64} +// CHECK: %[[#CoroHandlePromiseAddr:]] = cir.alloca ![[CoroHandlePromise]], {{.*}} ["agg.tmp1"] {alignment = 1 : i64} + +// Effectively execute `coawait promise_type::initial_suspend()` by calling initial_suspend() and getting +// the suspend_always struct to use for cir.await. Note that we return by-value since we defer ABI lowering +// to later passes, same is done elsewhere. + +// CHECK: %[[#Tmp0:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type15initial_suspendEv(%[[#VoidPromisseAddr]]) +// CHECK: cir.store %[[#Tmp0]], %[[#SuspendAlwaysAddr]] + +// +// Here we start mapping co_await to cir.await. +// + +// First regions `ready` has a special cir.yield code to veto suspension. + +// CHECK: cir.await(init, ready : { +// CHECK: %[[#ReadyVeto:]] = cir.scope { +// CHECK: %[[#TmpCallRes:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SuspendAlwaysAddr]]) +// CHECK: cir.yield %[[#TmpCallRes]] : !cir.bool +// CHECK: } +// CHECK: cir.condition(%[[#ReadyVeto]]) + +// Second region `suspend` contains the actual suspend logic. +// +// - Start by getting the coroutine handle using from_address(). +// - Implicit convert coroutine handle from task specific promisse +// specialization to a void one. +// - Call suspend_always::await_suspend() passing the handle. +// +// FIXME: add veto support for non-void await_suspends. + +// CHECK: }, suspend : { +// CHECK: %[[#FromAddrRes:]] = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIvE12promise_typeEE12from_addressEPv(%[[#CoroFrameAddr]]) +// CHECK: cir.store %[[#FromAddrRes]], %[[#CoroHandlePromiseAddr]] : ![[CoroHandlePromise]] +// CHECK: %[[#CoroHandlePromiseReload:]] = cir.load %[[#CoroHandlePromiseAddr]] +// CHECK: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIvE12promise_typeEEES_IT_E(%[[#CoroHandleVoidAddr]], %[[#CoroHandlePromiseReload]]) +// CHECK: %[[#CoroHandleVoidReload:]] = cir.load %[[#CoroHandleVoidAddr]] : cir.ptr , ![[CoroHandleVoid]] +// CHECK: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[#SuspendAlwaysAddr]], %[[#CoroHandleVoidReload]]) +// CHECK: cir.yield + +// Third region `resume` handles coroutine resuming logic. + +// CHECK: }, resume : { +// CHECK: cir.call @_ZNSt14suspend_always12await_resumeEv(%[[#SuspendAlwaysAddr]]) +// CHECK: cir.yield +// CHECK: },) +// CHECK: } + +// Since we already tested cir.await guts above, the remaining checks for: +// - The actual user written co_await +// - The promise call +// - The final suspend co_await +// - Return + +// The actual user written co_await +// CHECK: cir.scope { +// CHECK: cir.await(user, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } + +// The promise call +// CHECK: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv(%[[#VoidPromisseAddr]]) + +// The final suspend co_await +// CHECK: cir.scope { +// CHECK: cir.await(final, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } + +// Call builtin coro end and return + +// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.ptr : !cir.ptr) +// CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const(#false) : !cir.bool +// CHECK-NEXT: = cir.call @__builtin_coro_end(%[[#CoroEndArg0]], %[[#CoroEndArg1]]) + +// CHECK: %[[#Tmp1:]] = cir.load %[[#VoidTaskAddr]] +// CHECK-NEXT: cir.return %[[#Tmp1]] +// CHECK-NEXT: } + +folly::coro::Task byRef(const std::string& s) { + co_return s.size(); +} + +// FIXME: this could be less redundant than two allocas + reloads +// CHECK: cir.func coroutine @_Z5byRefRKSt6string(%arg0: !cir.ptr +// CHECK: %[[#AllocaParam:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] +// CHECK: %[[#AllocaFnUse:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] + +folly::coro::Task silly_coro() { + std::optional> task; + { + std::string s = "yolo"; + task = byRef(s); + } + folly::coro::blockingWait(std::move(task.value())); + co_return; +} + +// Make sure we properly handle OnFallthrough coro body sub stmt and +// check there are not multiple co_returns emitted. + +// CHECK: cir.func coroutine @_Z10silly_corov() +// CHECK: cir.await(init, ready : { +// CHECK: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv +// CHECK-NOT: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv +// CHECK: cir.await(final, ready : { + +folly::coro::Task go(int const& val); +folly::coro::Task go1() { + auto task = go(1); + co_return co_await task; +} + +// CHECK: cir.func coroutine @_Z3go1v() +// CHECK: %[[#IntTaskAddr:]] = cir.alloca ![[IntTask]], cir.ptr , ["task", init] + +// CHECK: cir.await(init, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } + +// The call to go(1) has its own scope due to full-expression rules. +// CHECK: cir.scope { +// CHECK: %[[#OneAddr:]] = cir.alloca !s32i, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} +// CHECK: %[[#One:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#One]], %[[#OneAddr]] : !s32i, cir.ptr +// CHECK: %[[#IntTaskTmp:]] = cir.call @_Z2goRKi(%[[#OneAddr]]) : (!cir.ptr) -> ![[IntTask]] +// CHECK: cir.store %[[#IntTaskTmp]], %[[#IntTaskAddr]] : ![[IntTask]], cir.ptr +// CHECK: } + +// CHECK: %[[#CoReturnValAddr:]] = cir.alloca !s32i, cir.ptr , ["__coawait_resume_rval"] {alignment = 1 : i64} +// CHECK: cir.await(user, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: %[[#ResumeVal:]] = cir.call @_ZN5folly4coro4TaskIiE12await_resumeEv(%3) +// CHECK: cir.store %[[#ResumeVal]], %[[#CoReturnValAddr]] : !s32i, cir.ptr +// CHECK: },) +// CHECK: %[[#V:]] = cir.load %[[#CoReturnValAddr]] : cir.ptr , !s32i +// CHECK: cir.call @_ZN5folly4coro4TaskIiE12promise_type12return_valueEi({{.*}}, %[[#V]]) + +folly::coro::Task go1_lambda() { + auto task = []() -> folly::coro::Task { + co_return 1; + }(); + co_return co_await task; +} + +// CHECK: cir.func coroutine lambda internal private @_ZZ10go1_lambdavENK3$_0clEv +// CHECK: cir.func coroutine @_Z10go1_lambdav() + +folly::coro::Task go4() { + auto* fn = +[](int const& i) -> folly::coro::Task { co_return i; }; + auto task = fn(3); + co_return co_await std::move(task); +} + +// CHECK: cir.func coroutine @_Z3go4v() + +// CHECK: cir.await(init, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } + +// CHECK: %12 = cir.scope { +// CHECK: %17 = cir.alloca !ty_22anon2E522, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} + +// Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> +// CHECK: cir.yield %19 : !cir.ptr)>> +// CHECK: } +// CHECK: cir.store %12, %3 : !cir.ptr)>>, cir.ptr )>>> +// CHECK: cir.scope { +// CHECK: %17 = cir.alloca !s32i, cir.ptr , ["ref.tmp2", init] {alignment = 4 : i64} +// CHECK: %18 = cir.load %3 : cir.ptr )>>>, !cir.ptr)>> +// CHECK: %19 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.store %19, %17 : !s32i, cir.ptr + +// Call invoker, which calls operator() indirectly. +// CHECK: %20 = cir.call %18(%17) : (!cir.ptr)>>, !cir.ptr) -> ![[IntTask]] +// CHECK: cir.store %20, %4 : ![[IntTask]], cir.ptr +// CHECK: } + +// CHECK: cir.await(user, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp new file mode 100644 index 000000000000..865b05b267b5 --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -0,0 +1,40 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o - | FileCheck %s + +struct DummyString { + DummyString(const char *s) {} +}; + +void t() { + DummyString s4 = "yolo"; +} + +// CHECK: cir.func linkonce_odr @_ZN11DummyStringC2EPKc +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return + +// CHECK-NOT: cir.fun @_ZN11DummyStringC1EPKc + +// CHECK: cir.func @_Z1tv +// CHECK-NEXT: %0 = cir.alloca !ty_22DummyString22, cir.ptr , ["s4", init] {alignment = 1 : i64} +// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.return + +struct B { + B(); +}; +B::B() { +} + +// CHECK: cir.func @_ZN1BC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.return +// CHECK: } +// CHECK: cir.func @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp new file mode 100644 index 000000000000..743e1db42584 --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +// TODO: support -mno-constructor-aliases + +struct String { + long size; + String(const String &s) : size{s.size} {} +// CHECK: cir.func linkonce_odr @_ZN6StringC2ERKS_ +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 +// CHECK: cir.store %arg1, %1 +// CHECK: %2 = cir.load %0 +// CHECK: %3 = cir.get_member %2[0] {name = "size"} +// CHECK: %4 = cir.load %1 +// CHECK: %5 = cir.get_member %4[0] {name = "size"} +// CHECK: %6 = cir.load %5 : cir.ptr , !s64i +// CHECK: cir.store %6, %3 : !s64i, cir.ptr +// CHECK: cir.return +// CHECK: } + + String() {} +}; + +void foo() { + String s; + String s1{s}; + // FIXME: s1 shouldn't be uninitialized. + + // cir.func @_Z3foov() { + // %0 = cir.alloca !ty_22String22, cir.ptr , ["s"] {alignment = 8 : i64} + // %1 = cir.alloca !ty_22String22, cir.ptr , ["s1"] {alignment = 8 : i64} + // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () + // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () + // cir.return + // } +} diff --git a/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp b/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp new file mode 100644 index 000000000000..f70d1f8428d4 --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct e { e(int); }; +e *g = new e(0); + +//CHECK: {{%.*}} = cir.const(#cir.int<1> : !u64i) : !u64i loc(#loc11) +//CHECK: {{%.*}} = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr loc(#loc6) diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp new file mode 100644 index 000000000000..842ead5f1143 --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Struk { + int a; + Struk() {} + void test() {} +}; + +void baz() { + Struk s; +} + +// CHECK: !ty_22Struk22 = !cir.struct}> + +// CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return + +// CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () +// CHECK-NEXT: cir.return + +// CHECK: cir.func @_Z3bazv() +// CHECK-NEXT: %0 = cir.alloca !ty_22Struk22, cir.ptr , ["s", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () +// CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/cxx-default-arg.cpp b/clang/test/CIR/CodeGen/cxx-default-arg.cpp new file mode 100644 index 000000000000..c5665337608b --- /dev/null +++ b/clang/test/CIR/CodeGen/cxx-default-arg.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: cir.func private @_ZN12MyIntPointerC1EPi + +struct MyIntPointer { + MyIntPointer(int *p = nullptr); +}; + +void foo() { + MyIntPointer p; +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/delete.cpp b/clang/test/CIR/CodeGen/delete.cpp new file mode 100644 index 000000000000..0f0ddcbc2c84 --- /dev/null +++ b/clang/test/CIR/CodeGen/delete.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef __typeof(sizeof(int)) size_t; + +namespace test1 { + struct A { void operator delete(void*,size_t); int x; }; + void a(A *x) { + delete x; + } + // CHECK: cir.func @_ZN5test11aEPNS_1AE + + // CHECK: %[[CONST:.*]] = cir.const(#cir.int<4> : !u64i) : !u64i + // CHECK: cir.call @_ZN5test11AdlEPvm({{.*}}, %[[CONST]]) +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp new file mode 100644 index 000000000000..99a8641c7f76 --- /dev/null +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -0,0 +1,174 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef enum { + RequestFailed = -2004, +} enumy; + +typedef struct { + const void* samples; + int cound; +} buffy; + +class C1 { + public: + virtual ~C1(); + C1(int i); + + struct IE { + bool supported = false; + unsigned version = 0; + }; + + struct IEs { + IE chain; + }; + + static IEs availableIEs; + class Layer { + public: + Layer(int d); + virtual ~Layer() {} + }; + + virtual enumy SetStuff(enumy e, buffy b); + virtual enumy Initialize() = 0; +}; + +class C2 : public C1 { + public: + C2( + void* p, + int i + ); + + ~C2() override; + + class Layer : public C1::Layer { + public: + Layer(int d, const C2* C1); + virtual ~Layer(); + + protected: + const C2* m_C1; + }; + + virtual enumy SetStuff(enumy e, buffy b) override; + virtual enumy Initialize() override; +}; + +class C3 : public C2 { + struct Layer : public C2::Layer { + public: + Layer(int d, const C2* C1); + void Initialize(); + }; + + virtual enumy Initialize() override; +}; + +void C3::Layer::Initialize() { + if (m_C1 == nullptr) { + return; + } + if (m_C1->availableIEs.chain.supported) { + } +} + +// CHECK-DAG: !ty_22C23A3ALayer22 = !cir.struct) -> cir.ptr +// CHECK: %3 = cir.get_member %2[1] {name = "m_C1"} : !cir.ptr -> !cir.ptr> +// CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool + +enumy C3::Initialize() { + return C2::Initialize(); +} + +// CHECK: cir.func @_ZN2C310InitializeEv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.base_class_addr(%2 : cir.ptr ) -> cir.ptr +// CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i + +void vcall(C1 &c1) { + buffy b; + enumy e; + c1.SetStuff(e, b); +} + +// CHECK: cir.func @_Z5vcallR2C1(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["c1", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22buffy22, cir.ptr , ["b"] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["e"] {alignment = 4 : i64} +// CHECK: %3 = cir.alloca !ty_22buffy22, cir.ptr , ["agg.tmp0"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.load %2 : cir.ptr , !s32i +// CHECK: cir.call @_ZN5buffyC2ERKS_(%3, %1) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %6 = cir.load %3 : cir.ptr , !ty_22buffy22 +// CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_22buffy22)>>>> +// CHECK: %8 = cir.load %7 : cir.ptr , !s32i, !ty_22buffy22)>>>>, !cir.ptr, !s32i, !ty_22buffy22)>>> +// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_22buffy22)>>>, vtable_index = 0, address_point_index = 2) : cir.ptr , !s32i, !ty_22buffy22)>>> +// CHECK: %10 = cir.load %9 : cir.ptr , !s32i, !ty_22buffy22)>>>, !cir.ptr, !s32i, !ty_22buffy22)>> +// CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_22buffy22)>>, !cir.ptr, !s32i, !ty_22buffy22) -> !s32i +// CHECK: cir.return +// CHECK: } + +class A { +public: + int a; + virtual void foo() {a++;} +}; + +class B : public A { +public: + int b; + void foo () { static_cast(*this).foo();} +}; + +// CHECK: cir.func linkonce_odr @_ZN1B3fooEv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load deref %0 : cir.ptr >, !cir.ptr +// CHECK: cir.scope { +// CHECK: %2 = cir.alloca !ty_22A22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %3 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr + +// Call @A::A(A const&) +// CHECK: cir.call @_ZN1AC2ERKS_(%2, %3) : (!cir.ptr, !cir.ptr) -> () + +// Call @A::foo() +// CHECK: cir.call @_ZN1A3fooEv(%2) : (!cir.ptr) -> () +// CHECK: } +// CHECK: cir.return +// CHECK: } + +void t() { + B b; + b.foo(); +} + +struct C : public A { + int& ref; + C(int& x) : ref(x) {} +}; + +// CHECK: cir.func @_Z8test_refv() +// CHECK: cir.get_member %2[1] {name = "ref"} +int test_ref() { + int x = 42; + C c(x); + return c.ref; +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/dlti.c b/clang/test/CIR/CodeGen/dlti.c new file mode 100644 index 000000000000..4ea8f5ca6359 --- /dev/null +++ b/clang/test/CIR/CodeGen/dlti.c @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo() {} + +// CHECK: module @"{{.*}}dlti.c" attributes { +// CHECK-DAG: cir.sob = #cir.signed_overflow_behavior, +// CHECK-DAG: dlti.dl_spec = +// CHECK-DAG: #dlti.dl_spec< +// CHECK-DAG: #dlti.dl_entry<"dlti.endianness", "little"> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry, dense<32> : vector<4xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry, dense<64> : vector<4xi64>> +// CHECK-DAG: #dlti.dl_entry, dense<32> : vector<4xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<4xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry : vector<2xi64>> +// CHECK-DAG: #dlti.dl_entry<"dlti.stack_alignment", 128 : i64> +// CHECK-DAG: >, +// CHECK-DAG: llvm.data_layout = +// CHECK-DAG: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + diff --git a/clang/test/CIR/CodeGen/dtors-scopes.cpp b/clang/test/CIR/CodeGen/dtors-scopes.cpp new file mode 100644 index 000000000000..d0991dc304c1 --- /dev/null +++ b/clang/test/CIR/CodeGen/dtors-scopes.cpp @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -std=c++20 -fclangir -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir %s --check-prefix=DTOR_BODY + +extern "C" int printf(char const*, ...); +struct C { + C() { printf("++A\n"); } + ~C() { printf("--A\n"); } +}; +void dtor1() { + { + C c; + } + printf("Done\n"); +} + +// CHECK: cir.func @_Z5dtor1v() +// CHECK: cir.scope { +// CHECK: %4 = cir.alloca !ty_22C22, cir.ptr , ["c", init] {alignment = 1 : i64} +// CHECK: cir.call @_ZN1CC2Ev(%4) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1CD2Ev(%4) : (!cir.ptr) -> () +// CHECK: } + +// DTOR_BODY: cir.func linkonce_odr @_ZN1CD2Ev{{.*}}{ +// DTOR_BODY: %2 = cir.get_global @printf +// DTOR_BODY: %3 = cir.get_global @".str2" +// DTOR_BODY: %4 = cir.cast(array_to_ptrdecay, %3 +// DTOR_BODY: %5 = cir.call @printf(%4) +// DTOR_BODY: cir.return + +// DTOR_BODY: cir.func linkonce_odr @_ZN1CD1Ev(%arg0: !cir.ptr + +// DTOR_BODY: cir.call @_ZN1CD2Ev +// DTOR_BODY: cir.return +// DTOR_BODY: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp new file mode 100644 index 000000000000..6c005f6314c5 --- /dev/null +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -0,0 +1,81 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + + +enum class EFMode { Always, Verbose }; + +class PSEvent { + public: + PSEvent( + EFMode m, + const char* n); + ~PSEvent(); + + private: + const char* n; + EFMode m; +}; + +void blue() { + PSEvent p(EFMode::Verbose, __FUNCTION__); +} + +class A +{ +public: + A() noexcept {} + A(const A&) noexcept = default; + + virtual ~A() noexcept; + virtual const char* quack() const noexcept; +}; + +class B : public A +{ +public: + virtual ~B() noexcept {} +}; + +// Class A +// CHECK: ![[ClassA:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast> + +// Class B +// CHECK: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> + +// CHECK: cir.func @_Z4bluev() +// CHECK: %0 = cir.alloca !ty_22PSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} +// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %2 = cir.get_global @".str" : cir.ptr > +// CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, !s32i, !cir.ptr) -> () +// CHECK: cir.return +// CHECK: } + +// @B::~B() #1 definition call into base @A::~A() +// CHECK: cir.func linkonce_odr @_ZN1BD2Ev{{.*}}{ +// CHECK: cir.call @_ZN1AD2Ev( + +// void foo() +// CHECK: cir.func @_Z3foov() +// CHECK: cir.scope { +// CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1BD2Ev(%0) : (!cir.ptr) -> () + +// operator delete(void*) declaration +// CHECK: cir.func private @_ZdlPv(!cir.ptr) + +// B dtor => @B::~B() #2 +// Calls dtor #1 +// Calls operator delete +// +// CHECK: cir.func linkonce_odr @_ZN1BD0Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.call @_ZN1BD2Ev(%1) : (!cir.ptr) -> () +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: cir.call @_ZdlPv(%2) : (!cir.ptr) -> () +// CHECK: cir.return +// CHECK: } + +void foo() { B(); } diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp new file mode 100644 index 000000000000..ea31b4460c12 --- /dev/null +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -0,0 +1,81 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER + +struct Base { + virtual ~Base(); +}; + +struct Derived : Base {}; + +// BEFORE: #dyn_cast_info__ZTI4Base__ZTI7Derived = #cir.dyn_cast_info<#cir.global_view<@_ZTI4Base> : !cir.ptr, #cir.global_view<@_ZTI7Derived> : !cir.ptr, @__dynamic_cast, @__cxa_bad_cast, #cir.int<0> : !s64i> +// BEFORE: !ty_22Base22 = !cir.struct +// BEFORE: !ty_22Derived22 = !cir.struct + +Derived *ptr_cast(Base *b) { + return dynamic_cast(b); +} + +// BEFORE: cir.func @_Z8ptr_castP4Base +// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr, #dyn_cast_info__ZTI4Base__ZTI7Derived) -> !cir.ptr +// BEFORE: } + +// AFTER: cir.func @_Z8ptr_castP4Base +// AFTER: %[[#SRC_IS_NULL:]] = cir.cast(ptr_to_bool, %{{.+}} : !cir.ptr), !cir.bool +// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NULL]], true { +// AFTER-NEXT: %[[#NULL:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// AFTER-NEXT: cir.yield %[[#NULL]] : !cir.ptr +// AFTER-NEXT: }, false { +// AFTER-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const(#cir.int<0> : !s64i) : !s64i +// AFTER-NEXT: %[[#CASTED_PTR:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#SRC_RTTI]], %[[#DEST_RTTI]], %[[#OFFSET_HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr +// AFTER-NEXT: %[[#RESULT:]] = cir.cast(bitcast, %[[#CASTED_PTR]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.yield %[[#RESULT]] : !cir.ptr +// AFTER-NEXT: }) : (!cir.bool) -> !cir.ptr +// AFTER: } + +Derived &ref_cast(Base &b) { + return dynamic_cast(b); +} + +// BEFORE: cir.func @_Z8ref_castR4Base +// BEFORE: %{{.+}} = cir.dyn_cast(ref, %{{.+}} : !cir.ptr, #dyn_cast_info__ZTI4Base__ZTI7Derived) -> !cir.ptr +// BEFORE: } + +// AFTER: cir.func @_Z8ref_castR4Base +// AFTER: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const(#cir.int<0> : !s64i) : !s64i +// AFTER-NEXT: %[[#CASTED_PTR:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#SRC_RTTI]], %[[#DEST_RTTI]], %[[#OFFSET_HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr +// AFTER-NEXT: %[[#CASTED_PTR_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#CASTED_PTR]] : !cir.ptr), !cir.bool +// AFTER-NEXT: %[[#CASTED_PTR_IS_NULL:]] = cir.unary(not, %[[#CASTED_PTR_IS_NOT_NULL]]) : !cir.bool, !cir.bool +// AFTER-NEXT: cir.if %[[#CASTED_PTR_IS_NULL]] { +// AFTER-NEXT: cir.call @__cxa_bad_cast() : () -> () +// AFTER-NEXT: cir.unreachable +// AFTER-NEXT: } +// AFTER-NEXT: %{{.+}} = cir.cast(bitcast, %[[#CASTED_PTR]] : !cir.ptr), !cir.ptr +// AFTER: } + +void *ptr_cast_to_complete(Base *ptr) { + return dynamic_cast(ptr); +} + +// BEFORE: cir.func @_Z20ptr_cast_to_completeP4Base +// BEFORE: %[[#V19:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr +// BEFORE-NEXT: %[[#V20:]] = cir.cast(ptr_to_bool, %[[#V19]] : !cir.ptr), !cir.bool +// BEFORE-NEXT: %[[#V21:]] = cir.unary(not, %[[#V20]]) : !cir.bool, !cir.bool +// BEFORE-NEXT: %{{.+}} = cir.ternary(%[[#V21]], true { +// BEFORE-NEXT: %[[#V22:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// BEFORE-NEXT: cir.yield %[[#V22]] : !cir.ptr +// BEFORE-NEXT: }, false { +// BEFORE-NEXT: %[[#V23:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr> +// BEFORE-NEXT: %[[#V24:]] = cir.load %[[#V23]] : cir.ptr >, !cir.ptr +// BEFORE-NEXT: %[[#V25:]] = cir.vtable.address_point( %[[#V24]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : cir.ptr +// BEFORE-NEXT: %[[#V26:]] = cir.load %[[#V25]] : cir.ptr , !s64i +// BEFORE-NEXT: %[[#V27:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr +// BEFORE-NEXT: %[[#V28:]] = cir.ptr_stride(%[[#V27]] : !cir.ptr, %[[#V26]] : !s64i), !cir.ptr +// BEFORE-NEXT: %[[#V29:]] = cir.cast(bitcast, %[[#V28]] : !cir.ptr), !cir.ptr +// BEFORE-NEXT: cir.yield %[[#V29]] : !cir.ptr +// BEFORE-NEXT: }) : (!cir.bool) -> !cir.ptr diff --git a/clang/test/CIR/CodeGen/evaluate-expr.c b/clang/test/CIR/CodeGen/evaluate-expr.c new file mode 100644 index 000000000000..81947ea181e9 --- /dev/null +++ b/clang/test/CIR/CodeGen/evaluate-expr.c @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +static const int g = 1; +void foo() { + if ((g != 1) && (g != 1)) + return; + if ((g == 1) || (g == 1)) + return; +} +// CHECK: cir.func no_proto @foo() +// CHECK: cir.scope { +// CHECK: [[ZERO:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[FALSE:%.*]] = cir.cast(int_to_bool, [[ZERO:%.*]] : !s32i), !cir.bool +// CHECK: cir.if [[FALSE]] { +// CHECK: cir.return +// CHECK: } +// CHECK: } +// CHECK: cir.return + +typedef struct { int x; } S; +static const S s = {0}; +void bar() { + int a = s.x; +} +// CHECK: cir.func no_proto @bar() +// CHECK: [[ALLOC:%.*]] = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: {{%.*}} = cir.get_global @s : cir.ptr +// CHECK: [[CONST:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.store [[CONST]], [[ALLOC]] : !s32i, cir.ptr +// CHECK: cir.return + diff --git a/clang/test/CIR/CodeGen/expressions.cpp b/clang/test/CIR/CodeGen/expressions.cpp new file mode 100644 index 000000000000..fa17f0921fcd --- /dev/null +++ b/clang/test/CIR/CodeGen/expressions.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void test(int a) { +// CHECK: cir.func @{{.+}}test + + // Should generate LValue parenthesis expression. + (a) = 1; + // CHECK: %[[#C:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: cir.store %[[#C]], %{{.+}} : !s32i, cir.ptr +} diff --git a/clang/test/CIR/CodeGen/forward-decls.cpp b/clang/test/CIR/CodeGen/forward-decls.cpp new file mode 100644 index 000000000000..c9721cc6f857 --- /dev/null +++ b/clang/test/CIR/CodeGen/forward-decls.cpp @@ -0,0 +1,125 @@ +// RUN: split-file %s %t + + +//--- incomplete_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/incomplete_struct -o %t/incomplete_struct.cir +// RUN: FileCheck %s --input-file=%t/incomplete_struct.cir --check-prefix=CHECK1 + +// Forward declaration of the record is never defined, so it is created as +// an incomplete struct in CIR and will remain as such. + +// CHECK1: ![[INC_STRUCT:.+]] = !cir.struct +struct IncompleteStruct; +// CHECK1: testIncompleteStruct(%arg0: !cir.ptr +void testIncompleteStruct(struct IncompleteStruct *s) {}; + + + +//--- mutated_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/mutated_struct -o %t/mutated_struct.cir +// RUN: FileCheck %s --input-file=%t/mutated_struct.cir --check-prefix=CHECK2 + +// Foward declaration of the struct is followed by usage, then definition. +// This means it will initially be created as incomplete, then completed. + +// CHECK2: ![[COMPLETE:.+]] = !cir.struct} #cir.record.decl.ast> +// CHECK2: testForwardDeclaredStruct(%arg0: !cir.ptr +struct ForwardDeclaredStruct; +void testForwardDeclaredStruct(struct ForwardDeclaredStruct *fds) {}; +struct ForwardDeclaredStruct { + int testVal; +}; + + + +//--- recursive_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/recursive_struct -o %t/recursive_struct.cir +// RUN: FileCheck --check-prefix=CHECK3 --input-file=%t/recursive_struct.cir %s + +// Struct is initially forward declared since the self-reference is generated +// first. Then, once the type is fully generated, it is completed. + +// CHECK3: ![[STRUCT:.+]] = !cir.struct, !cir.ptr>} #cir.record.decl.ast> +struct RecursiveStruct { + int value; + struct RecursiveStruct *next; +}; +// CHECK3: testRecursiveStruct(%arg0: !cir.ptr +void testRecursiveStruct(struct RecursiveStruct *arg) { + // CHECK3: %[[#NEXT:]] = cir.get_member %{{.+}}[1] {name = "next"} : !cir.ptr -> !cir.ptr> + // CHECK3: %[[#DEREF:]] = cir.load %[[#NEXT]] : cir.ptr >, !cir.ptr + // CHECK3: cir.get_member %[[#DEREF]][0] {name = "value"} : !cir.ptr -> !cir.ptr + arg->next->value; +} + + + +//--- indirect_recursive_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/indirect_recursive_struct -o %t/indirect_recursive_struct.cir +// RUN: FileCheck --check-prefix=CHECK4 --input-file=%t/indirect_recursive_struct.cir %s + +// Node B refers to A, and vice-versa, so a forward declaration is used to +// ensure the classes can be defined. Since types alias are not yet supported +// in recursive type, each struct is expanded until there are no more recursive +// types, or all the recursive types are self references. + +// CHECK4: ![[B:.+]] = !cir.struct, !cir.ptr, !cir.ptr>} +// CHECK4: ![[A:.+]] = !cir.struct, !cir.ptr, !cir.ptr>} +struct StructNodeB; +struct StructNodeA { + int value; + struct StructNodeB *next; +}; +struct StructNodeB { + int value; + struct StructNodeA *next; +}; + +void testIndirectSelfReference(struct StructNodeA arg) { + // CHECK4: %[[#V1:]] = cir.get_member %{{.+}}[1] {name = "next"} : !cir.ptr -> !cir.ptr> + // CHECK4: %[[#V2:]] = cir.load %[[#V1]] : cir.ptr >, !cir.ptr + // CHECK4: %[[#V3:]] = cir.get_member %[[#V2]][1] {name = "next"} : !cir.ptr -> !cir.ptr> + // CHECK4: %[[#V4:]] = cir.load %[[#V3]] : cir.ptr >, !cir.ptr + // CHECK4: cir.get_member %[[#V4]][0] {name = "value"} : !cir.ptr -> !cir.ptr + arg.next->next->value; +} + + + +//--- complex_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/complex_struct -o %t/complex_struct.cir +// RUN: FileCheck --check-prefix=CHECK5 --input-file=%t/complex_struct.cir %s + +// A sizeable complex struct just to double check that stuff is working. + +// CHECK5: !cir.struct, !cir.struct>, !cir.struct>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> +// CHECK5: !cir.struct>} #cir.record.decl.ast>, !cir.struct>, !cir.struct, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>, !cir.struct, !cir.struct>} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !cir.struct>} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct>} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>>, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> +// CHECK5: !cir.struct>} #cir.record.decl.ast>, !cir.struct>, !cir.struct>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.struct>, !cir.struct>} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>>} #cir.record.decl.ast>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !cir.struct>} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct>} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast> +// CHECK5: !cir.struct>} #cir.record.decl.ast>, !cir.struct>, !cir.struct>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast> +struct A { + struct { + struct A *a1; + }; + struct B { + struct B *b1; + struct C { + struct A *a2; + struct B *b2; + struct C *c1; + } c; + union { + struct A *a2; + struct { + struct B *b3; + }; + } u; + } b; +}; +void test(struct A *a){}; diff --git a/clang/test/CIR/CodeGen/fullexpr.cpp b/clang/test/CIR/CodeGen/fullexpr.cpp new file mode 100644 index 000000000000..bb8f30d9af6d --- /dev/null +++ b/clang/test/CIR/CodeGen/fullexpr.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int go(int const& val); + +int go1() { + auto x = go(1); + return x; +} + +// CHECK: cir.func @_Z3go1v() -> !s32i +// CHECK: %[[#XAddr:]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: %[[#RVal:]] = cir.scope { +// CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca !s32i, cir.ptr , ["ref.tmp0", init] {alignment = 4 : i64} +// CHECK-NEXT: %[[#One:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : !s32i, cir.ptr +// CHECK-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> !s32i +// CHECK-NEXT: cir.yield %[[#RValTmp]] : !s32i +// CHECK-NEXT: } +// CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : !s32i, cir.ptr diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c new file mode 100644 index 000000000000..8ef5797f482c --- /dev/null +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -0,0 +1,72 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM +// RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +typedef struct { + int a; + int b; +} Data; + +typedef int (*fun_t)(Data* d); + +struct A; +typedef int (*fun_typ)(struct A*); + +typedef struct A { + fun_typ fun; +} A; + +// CIR: !ty_22A22 = !cir.struct (!cir.ptr>)>>} #cir.record.decl.ast> +A a = {(fun_typ)0}; + +int extract_a(Data* d) { + return d->a; +} + +// CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr +// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["d", init] +// CIR: [[TMP1:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr)>>, cir.ptr )>>>, ["f", init] +// CIR: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CIR: [[TMP3:%.*]] = cir.const(#cir.ptr : !cir.ptr)>>) : !cir.ptr)>> +// CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr)>>, cir.ptr )>>> +// CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : cir.ptr )>> +// CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr)>>, cir.ptr )>>> +// CIR: [[TMP5:%.*]] = cir.load [[TMP2]] : cir.ptr )>>>, !cir.ptr)>> +// CIR: [[TMP6:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CIR: [[TMP7:%.*]] = cir.call [[TMP5]]([[TMP6]]) : (!cir.ptr)>>, !cir.ptr) -> !s32i +// CIR: cir.store [[TMP7]], [[TMP1]] : !s32i, cir.ptr + +// LLVM: define i32 {{@.*foo.*}}(ptr %0) +// LLVM: [[TMP1:%.*]] = alloca ptr, i64 1 +// LLVM: [[TMP2:%.*]] = alloca i32, i64 1 +// LLVM: [[TMP3:%.*]] = alloca ptr, i64 1 +// LLVM: store ptr %0, ptr [[TMP1]] +// LLVM: store ptr null, ptr [[TMP3]] +// LLVM: store ptr {{@.*extract_a.*}}, ptr [[TMP3]] +// LLVM: [[TMP4:%.*]] = load ptr, ptr [[TMP3]] +// LLVM: [[TMP5:%.*]] = load ptr, ptr [[TMP1]] +// LLVM: [[TMP6:%.*]] = call i32 [[TMP4]](ptr [[TMP5]]) +// LLVM: store i32 [[TMP6]], ptr [[TMP2]] +int foo(Data* d) { + fun_t f = 0; + f = extract_a; + return f(d); +} + +// CIR: cir.func private {{@.*test.*}}() -> !cir.ptr> +// CIR: cir.func {{@.*bar.*}}() +// CIR: [[RET:%.*]] = cir.call {{@.*test.*}}() : () -> !cir.ptr> +// CIR: cir.call [[RET]]() : (!cir.ptr>) -> () +// CIR: cir.return + +// LLVM: declare {{.*}} ptr {{@.*test.*}}() +// LLVM: define void {{@.*bar.*}}() +// LLVM: [[RET:%.*]] = call ptr {{@.*test.*}}() +// LLVM: call void [[RET]]() +// LLVM: ret void +void (*test(void))(void); +void bar(void) { + test()(); +} diff --git a/clang/test/CIR/CodeGen/function-attrs.cpp b/clang/test/CIR/CodeGen/function-attrs.cpp new file mode 100644 index 000000000000..4975a3f31253 --- /dev/null +++ b/clang/test/CIR/CodeGen/function-attrs.cpp @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + + +inline int s0(int a, int b) { + int x = a + b; + return x; +} + +__attribute__((noinline)) +int s1(int a, int b) { + return s0(a,b); +} + +__attribute__((always_inline)) +int s2(int a, int b) { + return s0(a,b); +} + +int s3(int a, int b) { + int x = a + b; + return x; +} + +// CIR: #fn_attr = #cir, nothrow = #cir.nothrow})> +// CIR: #fn_attr1 = #cir, nothrow = #cir.nothrow})> +// CIR: #fn_attr2 = #cir, nothrow = #cir.nothrow})> + +// CIR: cir.func linkonce_odr @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr) +// CIR: cir.func @_Z2s1ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr1) +// CIR: cir.func @_Z2s2ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr2) +// CIR: cir.func @_Z2s3ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} { + +// LLVM: define i32 @_Z2s1ii(i32 %0, i32 %1) {{.*}} #[[#ATTR1:]] +// LLVM: define i32 @_Z2s2ii(i32 %0, i32 %1) {{.*}} #[[#ATTR2:]] +// LLVM: attributes #[[#ATTR1]] = {{.*}} noinline +// LLVM: attributes #[[#ATTR2]] = {{.*}} alwaysinline diff --git a/clang/test/CIR/CodeGen/global-ctor-dtor.cpp b/clang/test/CIR/CodeGen/global-ctor-dtor.cpp new file mode 100644 index 000000000000..230b223b0040 --- /dev/null +++ b/clang/test/CIR/CodeGen/global-ctor-dtor.cpp @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=BEFORE --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t2.cir 2>&1 +// RUN: FileCheck --check-prefix=AFTER --input-file=%t2.cir %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +extern int bar(); +void foo(void) __attribute__((constructor)); +void foo(void) { + bar(); +} + +// BEFORE: cir.func @_Z3foov() global_ctor(65535) + +void foo2(void) __attribute__((constructor(777))); +void foo2(void) { + bar(); +} + +// BEFORE: cir.func @_Z4foo2v() global_ctor(777) + +void foo3(void) __attribute__((destructor)); +void foo3(void) { + bar(); +} + +// BEFORE: cir.func @_Z4foo3v() global_dtor(65535) + +void foo4(void) __attribute__((destructor(789))); +void foo4(void) { + bar(); +} + +// BEFORE: cir.func @_Z4foo4v() global_dtor(789) + +// AFTER: module @{{.*}} attributes {cir.global_ctors = [#cir.global_ctor<"_Z3foov", 65535>, #cir.global_ctor<"_Z4foo2v", 777>], cir.global_dtors = [#cir.global_dtor<"_Z4foo3v", 65535>, #cir.global_dtor<"_Z4foo4v", 789>] +// LLVM: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_Z3foov, ptr null }, { i32, ptr, ptr } { i32 777, ptr @_Z4foo2v, ptr null }] +// LLVM-NEXT: @llvm.global_dtors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_Z4foo3v, ptr null }, { i32, ptr, ptr } { i32 789, ptr @_Z4foo4v, ptr null }] \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c new file mode 100644 index 000000000000..e576fb30fc65 --- /dev/null +++ b/clang/test/CIR/CodeGen/globals.c @@ -0,0 +1,105 @@ +// There seems to be some differences in how constant expressions are evaluated +// in C vs C++. This causees the code gen for C initialized globals to be a +// bit different from the C++ version. This test ensures that these differences +// are accounted for. + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +char string[] = "whatnow"; +// CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array +int sint[] = {123, 456, 789}; +// CHECK: cir.global external @sint = #cir.const_array<[#cir.int<123> : !s32i, #cir.int<456> : !s32i, #cir.int<789> : !s32i]> : !cir.array +int filler_sint[4] = {1, 2}; // Ensure missing elements are zero-initialized. +// CHECK: cir.global external @filler_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i]> : !cir.array +int excess_sint[2] = {1, 2, 3, 4}; // Ensure excess elements are ignored. +// CHECK: cir.global external @excess_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array +float flt[] = {1.0, 2.0}; +// CHECK: cir.global external @flt = #cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array + +// Tentative definition is just a declaration. +int tentativeB; +int tentativeB = 1; +// CHECK: cir.global external @tentativeB = #cir.int<1> : !s32i + +// Tentative incomplete definition is just a declaration. +int tentativeE[]; +int tentativeE[2] = {1, 2}; +// CHECK: cir.global external @tentativeE = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array + +int twoDim[2][2] = {{1, 2}, {3, 4}}; +// CHECK: cir.global external @twoDim = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array]> : !cir.array x 2> + +struct { + int x; + int y[2][2]; +} nestedTwoDim = {1, {{2, 3}, {4, 5}}}; +// CHECK: cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> + +struct { + char x[3]; + char y[3]; + char z[3]; +} nestedString = {"1", "", "\0"}; +// CHECK: cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.zero : !cir.array, #cir.zero : !cir.array}> + +struct { + char *name; +} nestedStringPtr = {"1"}; +// CHECK: cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> + +int *globalPtr = &nestedString.y[1]; +// CHECK: cir.global external @globalPtr = #cir.global_view<@nestedString, [1 : i32, 1 : i32]> : !cir.ptr + +const int i = 12; +int i2 = i; +struct { int i; } i3 = {i}; +// CHECK: cir.global external @i2 = #cir.int<12> : !s32i +// CHECK: cir.global external @i3 = #cir.const_struct<{#cir.int<12> : !s32i}> : !ty_22anon2E722 + +int a[10][10][10]; +int *a2 = &a[3][0][8]; +struct { int *p; } a3 = {&a[3][0][8]}; +// CHECK: cir.global external @a2 = #cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr +// CHECK: cir.global external @a3 = #cir.const_struct<{#cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr}> : !ty_22anon2E922 + +int p[10]; +int *p1 = &p[0]; +struct { int *x; } p2 = {&p[0]}; +// CHECK: cir.global external @p1 = #cir.global_view<@p> : !cir.ptr +// CHECK: cir.global external @p2 = #cir.const_struct<{#cir.global_view<@p> : !cir.ptr}> : !ty_22anon2E1122 + +int q[10]; +int *q1 = q; +struct { int *x; } q2 = {q}; +// CHECK: cir.global external @q1 = #cir.global_view<@q> : !cir.ptr +// CHECK: cir.global external @q2 = #cir.const_struct<{#cir.global_view<@q> : !cir.ptr}> : !ty_22anon2E1322 + +int foo() { + extern int optind; + return optind; +} +// CHECK: cir.global "private" external @optind : !s32i +// CHECK: cir.func {{.*@foo}} +// CHECK: {{.*}} = cir.get_global @optind : cir.ptr + +struct Glob { + double a[42]; + int pad1[3]; + double b[42]; +} glob; + +double *const glob_ptr = &glob.b[1]; +// CHECK: cir.global external @glob_ptr = #cir.global_view<@glob, [2 : i32, 1 : i32]> : !cir.ptr + +// TODO: test tentatives with internal linkage. + +// Tentative definition is THE definition. Should be zero-initialized. +int tentativeA; +float tentativeC; +int tentativeD[]; +float zeroInitFlt[2]; +// CHECK: cir.global external @tentativeA = #cir.int<0> : !s32i +// CHECK: cir.global external @tentativeC = #cir.fp<0.000000e+00> : !cir.float +// CHECK: cir.global external @tentativeD = #cir.zero : !cir.array +// CHECK: cir.global external @zeroInitFlt = #cir.zero : !cir.array diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp new file mode 100644 index 000000000000..ba5bb7eedba6 --- /dev/null +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -0,0 +1,133 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int a = 3; +const int b = 4; // unless used wont be generated + +unsigned long int c = 2; +int d = a; +bool e; +float y = 3.4; +double w = 4.3; +char x = '3'; +unsigned char rgb[3] = {0, 233, 33}; +char alpha[4] = "abc"; +const char *s = "example"; +const char *s1 = "example1"; +const char *s2 = "example"; + +void use_global() { + int li = a; +} + +void use_global_string() { + unsigned char c = s2[0]; +} + +template +T func() { + return T(); +} + +int use_func() { return func(); } + +// CHECK: module {{.*}} { +// CHECK-NEXT: cir.global external @a = #cir.int<3> : !s32i +// CHECK-NEXT: cir.global external @c = #cir.int<2> : !u64i +// CHECK-NEXT: cir.global external @d = #cir.int<0> : !s32i + +// CHECK-NEXT: cir.func internal private @__cxx_global_var_init() +// CHECK-NEXT: [[TMP0:%.*]] = cir.get_global @d : cir.ptr +// CHECK-NEXT: [[TMP1:%.*]] = cir.get_global @a : cir.ptr +// CHECK-NEXT: [[TMP2:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CHECK-NEXT: cir.store [[TMP2]], [[TMP0]] : !s32i, cir.ptr + +// CHECK: cir.global external @e = #false +// CHECK-NEXT: cir.global external @y = #cir.fp<3.400000e+00> : !cir.float +// CHECK-NEXT: cir.global external @w = #cir.fp<4.300000e+00> : !cir.double +// CHECK-NEXT: cir.global external @x = #cir.int<51> : !s8i +// CHECK-NEXT: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array +// CHECK-NEXT: cir.global external @alpha = #cir.const_array<"abc\00" : !cir.array> : !cir.array + +// CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global external @s = #cir.global_view<@".str"> : !cir.ptr + +// CHECK-NEXT: cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr + +// CHECK-NEXT: cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr + +// CHECK: cir.func @_Z10use_globalv() +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.get_global @a : cir.ptr +// CHECK-NEXT: %2 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %2, %0 : !s32i, cir.ptr + +// CHECK: cir.func @_Z17use_global_stringv() +// CHECK-NEXT: %0 = cir.alloca !u8i, cir.ptr , ["c", init] {alignment = 1 : i64} +// CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > +// CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr +// CHECK-NEXT: %5 = cir.load %4 : cir.ptr , !s8i +// CHECK-NEXT: %6 = cir.cast(integral, %5 : !s8i), !u8i +// CHECK-NEXT: cir.store %6, %0 : !u8i, cir.ptr +// CHECK-NEXT: cir.return + +// CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %2 : !s32i +// CHECK-NEXT: } +// CHECK-NEXT: cir.func @_Z8use_funcv() -> !s32i +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.call @_Z4funcIiET_v() : () -> !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %2 : !s32i +// CHECK-NEXT: } + + +char string[] = "whatnow"; +// CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array +unsigned uint[] = {255}; +// CHECK: cir.global external @uint = #cir.const_array<[#cir.int<255> : !u32i]> : !cir.array +short sshort[] = {11111, 22222}; +// CHECK: cir.global external @sshort = #cir.const_array<[#cir.int<11111> : !s16i, #cir.int<22222> : !s16i]> : !cir.array +int sint[] = {123, 456, 789}; +// CHECK: cir.global external @sint = #cir.const_array<[#cir.int<123> : !s32i, #cir.int<456> : !s32i, #cir.int<789> : !s32i]> : !cir.array +long long ll[] = {999999999, 0, 0, 0}; +// CHECK: cir.global external @ll = #cir.const_array<[#cir.int<999999999> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i]> : !cir.array + +void get_globals() { + // CHECK: cir.func @_Z11get_globalsv() + char *s = string; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @string : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + unsigned *u = uint; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @uint : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + short *ss = sshort; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @sshort : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + int *si = sint; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @sint : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + long long *l = ll; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @ll : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr +} + +// Should generate extern global variables. +extern int externVar; +int testExternVar(void) { return externVar; } +// CHECK: cir.global "private" external @externVar : !s32i +// CHECK: cir.func @{{.+}}testExternVar +// CHECK: cir.get_global @externVar : cir.ptr + +// Should constant initialize global with constant address. +int var = 1; +int *constAddr = &var; +// CHECK-DAG: cir.global external @constAddr = #cir.global_view<@var> : !cir.ptr diff --git a/clang/test/CIR/CodeGen/gnu-extension.c b/clang/test/CIR/CodeGen/gnu-extension.c new file mode 100644 index 000000000000..e9cee90b57e9 --- /dev/null +++ b/clang/test/CIR/CodeGen/gnu-extension.c @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int foo(void) { return __extension__ 0b101010; } + +//CHECK: cir.func @foo() +//CHECK-NEXT: [[ADDR:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +//CHECK-NEXT: [[VAL:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i +//CHECK-NEXT: cir.store [[VAL]], [[ADDR]] : !s32i, cir.ptr +//CHECK-NEXT: [[LOAD_VAL:%.*]] = cir.load [[ADDR]] : cir.ptr , !s32i +//CHECK-NEXT: cir.return [[LOAD_VAL]] : !s32i + +void bar(void) { + __extension__ bar; +} + +//CHECK: cir.func @bar() +//CHECK: {{.*}} = cir.get_global @bar : cir.ptr > +//CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/gnu89.c b/clang/test/CIR/CodeGen/gnu89.c new file mode 100644 index 000000000000..5254576779aa --- /dev/null +++ b/clang/test/CIR/CodeGen/gnu89.c @@ -0,0 +1,5 @@ +// RUN: %clang_cc1 -std=gnu89 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo() {} +//CHECK: cir.func {{.*@foo}} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp new file mode 100644 index 000000000000..153bd3d3445d --- /dev/null +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -0,0 +1,66 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void g0(int a) { + int b = a; + goto end; + b = b + 1; +end: + b = b + 2; +} + +// CHECK: cir.func @_Z2g0i +// CHECK-NEXT %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} +// CHECK-NEXT cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK-NEXT %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT cir.store %2, %1 : !s32i, cir.ptr +// CHECK-NEXT cir.br ^bb2 +// CHECK-NEXT ^bb1: // no predecessors +// CHECK-NEXT %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT %4 = cir.const(1 : !s32i) : !s32i +// CHECK-NEXT %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT cir.br ^bb2 +// CHECK-NEXT ^bb2: // 2 preds: ^bb0, ^bb1 +// CHECK-NEXT %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT %7 = cir.const(2 : !s32i) : !s32i +// CHECK-NEXT %8 = cir.binop(add, %6, %7) : !s32i +// CHECK-NEXT cir.store %8, %1 : !s32i, cir.ptr +// CHECK-NEXT cir.return + +void g1(int a) { + int x = 0; + goto end; +end: + int y = a + 2; +} + +// Make sure alloca for "y" shows up in the entry block +// CHECK: cir.func @_Z2g1i(%arg0: !s32i +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr + +int g2() { + int b = 1; + goto end; + b = b + 1; +end: + b = b + 2; + return 1; +} + +// Make sure (1) we don't get dangling unused cleanup blocks +// (2) generated returns consider the function type + +// CHECK: cir.func @_Z2g2v() -> !s32i + +// CHECK: cir.br ^bb2 +// CHECK-NEXT: ^bb1: // no predecessors +// CHECK: ^bb2: // 2 preds: ^bb0, ^bb1 + +// CHECK: [[R:%[0-9]+]] = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: [[R]] : !s32i +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/hello.c b/clang/test/CIR/CodeGen/hello.c new file mode 100644 index 000000000000..3b7155c36ff7 --- /dev/null +++ b/clang/test/CIR/CodeGen/hello.c @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +int printf(const char *restrict, ...); + +int main (void) { + printf ("Hello, world!\n"); + return 0; +} + +// CHECK: cir.func private @printf(!cir.ptr, ...) -> !s32i +// CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.func @main() -> !s32i +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.get_global @printf : cir.ptr , ...)>> +// CHECK: %2 = cir.get_global @".str" : cir.ptr > +// CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr +// CHECK: %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i +// CHECK: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.store %5, %0 : !s32i, cir.ptr +// CHECK: %6 = cir.load %0 : cir.ptr , !s32i +// CHECK: cir.return %6 : !s32i +// CHECK: } diff --git a/clang/test/CIR/CodeGen/if-consteval.cpp b/clang/test/CIR/CodeGen/if-consteval.cpp new file mode 100644 index 000000000000..97468beb0ac5 --- /dev/null +++ b/clang/test/CIR/CodeGen/if-consteval.cpp @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -std=c++23 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void should_be_used_1(); +void should_be_used_2(); +void should_be_used_3(); +constexpr void should_not_be_used() {} + +constexpr void f() { + if consteval { + should_not_be_used(); // CHECK-NOT: call {{.*}}should_not_be_used + } else { + should_be_used_1(); // CHECK: call {{.*}}should_be_used_1 + } + + if !consteval { + should_be_used_2(); // CHECK: call {{.*}}should_be_used_2 + } else { + should_not_be_used(); // CHECK-NOT: call {{.*}}should_not_be_used + } + + if consteval { + should_not_be_used(); // CHECK-NOT: call {{.*}}should_not_be_used + } + + if !consteval { + should_be_used_3(); // CHECK: call {{.*}}should_be_used_3 + } +} + +void g() { + f(); +} diff --git a/clang/test/CIR/CodeGen/if-constexpr.cpp b/clang/test/CIR/CodeGen/if-constexpr.cpp new file mode 100644 index 000000000000..8ef8315e1ad0 --- /dev/null +++ b/clang/test/CIR/CodeGen/if-constexpr.cpp @@ -0,0 +1,95 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void if0() { + int x = 0; + if constexpr (0 == 0) { + // Declare a variable with same name to be sure we handle the + // scopes correctly + int x = 2; + } else { + int x = 3; + } + if constexpr (0 == 1) { + int x = 4; + } else { + int x = 5; + } + if constexpr (int x = 7; 8 == 8) { + int y = x; + } else { + int y = 2*x; + } + if constexpr (int x = 9; 8 == 10) { + int y = x; + } else { + int y = 3*x; + } + if constexpr (10 == 10) { + int x = 20; + } + if constexpr (10 == 11) { + int x = 30; + } + if constexpr (int x = 70; 80 == 80) { + int y = 10*x; + } + if constexpr (int x = 90; 80 == 100) { + int y = 11*x; + } +} + +// CHECK: cir.func @_Z3if0v() {{.*}} +// CHECK: cir.store %1, %0 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.const(#cir.int<2> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.const(#cir.int<5> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %4 = cir.const(#cir.int<7> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %5, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %4 = cir.const(#cir.int<9> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) +// CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %7, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.const(#cir.int<20> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// Note that Clang does not even emit a block in this case +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %4 = cir.const(#cir.int<70> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) +// CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %7, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.const(#cir.int<90> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.return loc({{.*}}) diff --git a/clang/test/CIR/CodeGen/if.cir b/clang/test/CIR/CodeGen/if.cir new file mode 100644 index 000000000000..b3104fd42d66 --- /dev/null +++ b/clang/test/CIR/CodeGen/if.cir @@ -0,0 +1,48 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s32i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + cir.if %4 { + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.return %5 : !s32i + } else { + %5 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %5 : !s32i + } + cir.return %arg0 : !s32i + } +// CHECK: cir.func @foo(%arg0: !s32i) -> !s32i { +// CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb2, ^bb1 +// CHECK-NEXT: ^bb1: // pred: ^bb0 +// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.return %1 : !s32i +// CHECK-NEXT: ^bb2: // pred: ^bb0 +// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: cir.return %2 : !s32i +// CHECK-NEXT: ^bb3: // no predecessors +// CHECK-NEXT: cir.return %arg0 : !s32i +// CHECK-NEXT: } + + cir.func @onlyIf(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + cir.if %4 { + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.return %5 : !s32i + } + cir.return %arg0 : !s32i + } +// CHECK: cir.func @onlyIf(%arg0: !s32i) -> !s32i { +// CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: // pred: ^bb0 +// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: cir.return %1 : !s32i +// CHECK-NEXT: ^bb2: // pred: ^bb0 +// CHECK-NEXT: cir.return %arg0 : !s32i +// CHECK-NEXT: } + +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/implicit-return.cpp b/clang/test/CIR/CodeGen/implicit-return.cpp new file mode 100644 index 000000000000..09b084b70ddb --- /dev/null +++ b/clang/test/CIR/CodeGen/implicit-return.cpp @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CHECK-O0 +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CHECK-O2 + +void ret_void() {} + +// CHECK-O0: cir.func @_Z8ret_voidv() +// CHECK-O0-NEXT: cir.return +// CHECK-O0-NEXT: } + +// CHECK-O2: cir.func @_Z8ret_voidv() +// CHECK-O2-NEXT: cir.return +// CHECK-O2-NEXT: } + +int ret_non_void() {} + +// CHECK-O0: cir.func @_Z12ret_non_voidv() -> !s32i +// CHECK-O0-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK-O0-NEXT: cir.trap +// CHECK-O0-NEXT: } + +// CHECK-O2: cir.func @_Z12ret_non_voidv() -> !s32i +// CHECK-O2-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK-O2-NEXT: cir.unreachable +// CHECK-O2-NEXT: } diff --git a/clang/test/CIR/CodeGen/inc-bool.cpp b/clang/test/CIR/CodeGen/inc-bool.cpp new file mode 100644 index 000000000000..05c3bb54aca3 --- /dev/null +++ b/clang/test/CIR/CodeGen/inc-bool.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++14 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo(bool x) { + x++; +} + +// CHECK: cir.func @_Z3foob(%arg0: !cir.bool loc({{.*}})) +// CHECK: [[ALLOC_X:%.*]] = cir.alloca !cir.bool, cir.ptr , ["x", init] {alignment = 1 : i64} +// CHECK: cir.store %arg0, [[ALLOC_X]] : !cir.bool, cir.ptr +// CHECK: {{.*}} = cir.load [[ALLOC_X]] : cir.ptr , !cir.bool +// CHECK: [[TRUE:%.*]] = cir.const(#true) : !cir.bool +// CHECK: cir.store [[TRUE]], [[ALLOC_X]] : !cir.bool, cir.ptr +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/inc-dec.cpp b/clang/test/CIR/CodeGen/inc-dec.cpp new file mode 100644 index 000000000000..1005299027a1 --- /dev/null +++ b/clang/test/CIR/CodeGen/inc-dec.cpp @@ -0,0 +1,55 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +unsigned id0() { + unsigned a = 1; + return ++a; +} + +// CHECK: cir.func @_Z3id0v() -> !u32i +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] +// CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) +// CHECK: cir.store %[[#AFTER_A]], %[[#A]] +// CHECK: cir.store %[[#AFTER_A]], %[[#RET]] + + +unsigned id1() { + unsigned a = 1; + return --a; +} + +// CHECK: cir.func @_Z3id1v() -> !u32i +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] +// CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) +// CHECK: cir.store %[[#AFTER_A]], %[[#A]] +// CHECK: cir.store %[[#AFTER_A]], %[[#RET]] + +unsigned id2() { + unsigned a = 1; + return a++; +} + +// CHECK: cir.func @_Z3id2v() -> !u32i +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] +// CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) +// CHECK: cir.store %[[#AFTER_A]], %[[#A]] +// CHECK: cir.store %[[#BEFORE_A]], %[[#RET]] + +unsigned id3() { + unsigned a = 1; + return a--; +} + +// CHECK: cir.func @_Z3id3v() -> !u32i +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] +// CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) +// CHECK: cir.store %[[#AFTER_A]], %[[#A]] +// CHECK: cir.store %[[#BEFORE_A]], %[[#RET]] diff --git a/clang/test/CIR/CodeGen/int-wrap.c b/clang/test/CIR/CodeGen/int-wrap.c new file mode 100644 index 000000000000..f23e216143fc --- /dev/null +++ b/clang/test/CIR/CodeGen/int-wrap.c @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fwrapv -fclangir -emit-cir %s -o - 2>&1 | FileCheck %s --check-prefix=WRAP +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - 2>&1 | FileCheck %s --check-prefix=NOWRAP + +#define N 42 + +typedef struct { + const char* ptr; +} A; + +// WRAP: cir.binop(sub, {{.*}}, {{.*}}) : !s32i +// NOWRAP: cir.binop(sub, {{.*}}, {{.*}}) nsw : !s32i +void foo(int* ar, int len) { + int x = ar[len - N]; +} + +// check that the ptr_stride is generated in both cases (i.e. no NYI fails) + +// WRAP: cir.ptr_stride +// NOWRAP: cir.ptr_stride +void bar(A* a, unsigned n) { + a->ptr = a->ptr + n; +} + +// WRAP cir.ptr_stride +// NOWRAP: cir.ptr_stride +void baz(A* a) { + a->ptr--; +} + + diff --git a/clang/test/CIR/CodeGen/lalg.c b/clang/test/CIR/CodeGen/lalg.c new file mode 100644 index 000000000000..5bdca87db150 --- /dev/null +++ b/clang/test/CIR/CodeGen/lalg.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o - | FileCheck %s + +double dot() { + double x = 0.0; + double y = 0.0f; + double result = x * y; + return result; +} + +// CHECK: %1 = cir.alloca !cir.double, cir.ptr , ["x", init] +// CHECK-NEXT: %2 = cir.alloca !cir.double, cir.ptr , ["y", init] +// CHECK-NEXT: %3 = cir.alloca !cir.double, cir.ptr , ["result", init] +// CHECK-NEXT: %4 = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double +// CHECK-NEXT: cir.store %4, %1 : !cir.double, cir.ptr +// CHECK-NEXT: %5 = cir.const(#cir.fp<0.000000e+00> : !cir.float) : !cir.float +// CHECK-NEXT: %6 = cir.cast(floating, %5 : !cir.float), !cir.double +// CHECK-NEXT: cir.store %6, %2 : !cir.double, cir.ptr +// CHECK-NEXT: %7 = cir.load %1 : cir.ptr , !cir.double +// CHECK-NEXT: %8 = cir.load %2 : cir.ptr , !cir.double +// CHECK-NEXT: %9 = cir.binop(mul, %7, %8) : !cir.double diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp new file mode 100644 index 000000000000..1440e14b76cb --- /dev/null +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -0,0 +1,136 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-return-stack-address -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void fn() { + auto a = [](){}; + a(); +} + +// CHECK: !ty_22anon2E222 = !cir.struct}> +// CHECK-DAG: module + +// CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv + +// CHECK: cir.func @_Z2fnv() +// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E222, cir.ptr , ["a"] +// CHECK: cir.call @_ZZ2fnvENK3$_0clEv + +void l0() { + int i; + auto a = [&](){ i = i + 1; }; + a(); +} + +// CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv( + +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.load %3 : cir.ptr , !s32i +// CHECK: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr +// CHECK: cir.store %6, %8 : !s32i, cir.ptr + +// CHECK: cir.func @_Z2l0v() + +auto g() { + int i = 12; + return [&] { + i += 100; + return i; + }; +} + +// CHECK: cir.func @_Z1gv() -> !ty_22anon2E622 +// CHECK: %0 = cir.alloca !ty_22anon2E622, cir.ptr , ["__retval"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i +// CHECK: cir.store %2, %1 : !s32i, cir.ptr +// CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > +// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22anon2E622 +// CHECK: cir.return %4 : !ty_22anon2E622 + +auto g2() { + int i = 12; + auto lam = [&] { + i += 100; + return i; + }; + return lam; +} + +// Should be same as above because of NRVO +// CHECK: cir.func @_Z2g2v() -> !ty_22anon2E822 +// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E822, cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i +// CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr +// CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22anon2E822 +// CHECK-NEXT: cir.return %4 : !ty_22anon2E822 + +int f() { + return g2()(); +} + +// CHECK: cir.func @_Z1fv() -> !s32i +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !ty_22anon2E822, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22anon2E822 +// CHECK-NEXT: cir.store %3, %2 : !ty_22anon2E822, cir.ptr +// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i +// CHECK-NEXT: cir.store %4, %0 : !s32i, cir.ptr +// CHECK-NEXT: } +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %1 : !s32i +// CHECK-NEXT: } + +int g3() { + auto* fn = +[](int const& i) -> int { return i; }; + auto task = fn(3); + return task; +} + +// lambda operator() +// CHECK: cir.func lambda internal private @_ZZ2g3vENK3$_0clERKi + +// lambda __invoke() +// CHECK: cir.func internal private @_ZZ2g3vEN3$_08__invokeERKi + +// lambda operator int (*)(int const&)() +// CHECK: cir.func internal private @_ZZ2g3vENK3$_0cvPFiRKiEEv + +// CHECK: cir.func @_Z2g3v() -> !s32i +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !cir.ptr)>>, cir.ptr )>>>, ["fn", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["task", init] {alignment = 4 : i64} + +// 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. +// CHECK: %3 = cir.scope { +// CHECK: %7 = cir.alloca !ty_22anon2E1122, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %9 = cir.unary(plus, %8) : !cir.ptr)>>, !cir.ptr)>> +// CHECK: cir.yield %9 : !cir.ptr)>> +// CHECK: } + +// 2. Load ptr to `__invoke()`. +// CHECK: cir.store %3, %1 : !cir.ptr)>>, cir.ptr )>>> +// CHECK: %4 = cir.scope { +// CHECK: %7 = cir.alloca !s32i, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} +// CHECK: %8 = cir.load %1 : cir.ptr )>>>, !cir.ptr)>> +// CHECK: %9 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.store %9, %7 : !s32i, cir.ptr + +// 3. Call `__invoke()`, which effectively executes `operator()`. +// CHECK: %10 = cir.call %8(%7) : (!cir.ptr)>>, !cir.ptr) -> !s32i +// CHECK: cir.yield %10 : !s32i +// CHECK: } + +// CHECK: } diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c new file mode 100644 index 000000000000..f6cf6a8e50e6 --- /dev/null +++ b/clang/test/CIR/CodeGen/libc.c @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Should generate CIR's builtin memcpy op. +void *memcpy(void *, const void *, unsigned long); +void testMemcpy(void *src, const void *dst, unsigned long size) { + memcpy(dst, src, size); + // CHECK: cir.libc.memcpy %{{.+}} bytes from %{{.+}} to %{{.+}} : !u64i, !cir.ptr -> !cir.ptr +} + +double fabs(double); +double testFabs(double x) { + return fabs(x); + // CHECK: cir.fabs %{{.+}} : !cir.double +} + +float fabsf(float); +float testFabsf(float x) { + return fabsf(x); + // CHECK: cir.fabs %{{.+}} : !cir.float +} diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp new file mode 100644 index 000000000000..3df45c43e124 --- /dev/null +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -0,0 +1,63 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef __builtin_va_list va_list; + +static __inline__ __attribute__((__always_inline__)) __attribute__((__format__(printf, 3, 0))) +int vsnprintf(char* const __attribute__((pass_object_size(1))) dest, int size, const char* format, va_list ap) + __attribute__((overloadable)) { + return __builtin___vsnprintf_chk(dest, size, 0, __builtin_object_size(((dest)), (1)), format, ap); +} + +typedef long unsigned int size_t; + +size_t __strlen_chk(const char* __s, size_t __n) __attribute__((annotate("introduced_in=" "17"))); +size_t strlen(const char* __s) __attribute__((__pure__)); +static __inline__ __attribute__((__always_inline__)) +size_t strlen(const char* const s __attribute__((pass_object_size(0)))) __attribute__((overloadable)) { + size_t bos = __builtin_object_size(((s)), (0)); + + if (bos == ((size_t) -1)) { + return __builtin_strlen(s); + } + + return __strlen_chk(s, bos); +} + +void log(int, const char *, int); + +void consume_message(const char *m) { + log(3, m, strlen(m)); +} + +void t(const char* fmt, ...) { + va_list args; + __builtin_va_start(args, fmt); + const int size = 512; + char message[size]; + vsnprintf(message, size, fmt, args); + consume_message(message); +} + +// CHECK: cir.func @_Z15consume_messagePKc(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} + +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.objsize(%3 : , max) -> !u64i +// CHECK: %5 = cir.call @_ZL6strlenPKcU17pass_object_size0(%3, %4) : (!cir.ptr, !u64i) -> !u64i + +// CHECK: cir.func private @__vsnprintf_chk +// CHECK: cir.func internal private @_ZL9vsnprintfPcU17pass_object_size1iPKcP13__va_list_tag + +// Implicit size parameter in arg %1 +// +// FIXME: tag the param with an attribute to designate the size information. +// +// CHECK: %1 = cir.alloca !u64i, cir.ptr , ["", init] {alignment = 8 : i64} + +// CHECK: cir.store %arg1, %1 : !u64i, cir.ptr + +// CHECK: %10 = cir.load %1 : cir.ptr , !u64i +// CHECK: %11 = cir.load %3 : cir.ptr >, !cir.ptr +// CHECK: %12 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c new file mode 100644 index 000000000000..aff2c6ccafad --- /dev/null +++ b/clang/test/CIR/CodeGen/linkage.c @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + + +static int bar(int i) { + return i; +} + +int foo(void) { + return bar(5); +} + +// CIR: cir.func internal private @bar( +// CIR: cir.func @foo( + +// LLVM: define internal i32 @bar( +// LLVM: define i32 @foo( + +static int var = 0; +// CIR: cir.global "private" internal @var = #cir.int<0> : !s32i +int get_var(void) { + return var; +} + +// Should generate available_externally linkage. +inline int availableExternallyMethod(void) { return 0; } +void callAvailableExternallyMethod(void) { availableExternallyMethod(); } +// CIR: cir.func available_externally @availableExternallyMethod diff --git a/clang/test/CIR/CodeGen/literals.c b/clang/test/CIR/CodeGen/literals.c new file mode 100644 index 000000000000..47665212c287 --- /dev/null +++ b/clang/test/CIR/CodeGen/literals.c @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +int literals(void) { + char a = 'a'; // char literals are int in C + // CHECK: %[[RES:[0-9]+]] = cir.const(#cir.int<97> : !s32i) : !s32i + // CHECK: %{{[0-9]+}} = cir.cast(integral, %[[RES]] : !s32i), !s8i + + return 0; +} diff --git a/clang/test/CIR/CodeGen/literals.cpp b/clang/test/CIR/CodeGen/literals.cpp new file mode 100644 index 000000000000..537ebc8557e1 --- /dev/null +++ b/clang/test/CIR/CodeGen/literals.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +int literals() { + char a = 'a'; // char literals have char type in C++ + // CHECK: %{{[0-9]+}} = cir.const(#cir.int<97> : !s8i) : !s8i + + return 0; +} diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp new file mode 100644 index 000000000000..c333654a38ad --- /dev/null +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cpp.cir +// RUN: FileCheck --input-file=%t.cpp.cir %s --check-prefix=CPPSCOPE +// RUN: %clang_cc1 -x c -std=c11 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.c.cir +// RUN: FileCheck --input-file=%t.c.cir %s --check-prefix=CSCOPE + +void l0(void) { + for (int i = 0;;) { + int j = 0; + } +} + +// CPPSCOPE: cir.func @_Z2l0v() +// CPPSCOPE-NEXT: cir.scope { +// CPPSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CPPSCOPE-NEXT: cir.store %2, %0 : !s32i, cir.ptr +// CPPSCOPE-NEXT: cir.for : cond { + +// CSCOPE: cir.func @l0() +// CSCOPE-NEXT: cir.scope { +// CSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CSCOPE-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CSCOPE-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CSCOPE-NEXT: cir.for : cond { + +// CSCOPE: } body { +// CSCOPE-NEXT: cir.scope { +// CSCOPE-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/loop.cir b/clang/test/CIR/CodeGen/loop.cir new file mode 100644 index 000000000000..8204216b6f52 --- /dev/null +++ b/clang/test/CIR/CodeGen/loop.cir @@ -0,0 +1,122 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s32i = !cir.int + +module { + + cir.func @testFor(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.yield + } step { + cir.yield + } + cir.return + } +// CHECK: cir.func @testFor(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // Test while cir.loop operation lowering. + cir.func @testWhile(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.yield + } + cir.return + } +// CHECK: cir.func @testWhile(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // Test do-while cir.loop operation lowering. + cir.func @testDoWhile(%arg0 : !cir.bool) { + cir.do { + cir.yield + } while { + cir.condition(%arg0) + } + cir.return + } +// CHECK: cir.func @testDoWhile(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#BODY:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // test corner case + // while (1) { + // break; + // } + cir.func @testWhileWithBreakTerminatedBody(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.break + } + cir.return + } +// CHECK: cir.func @testWhileWithBreakTerminatedBody(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // test C only corner case - no fails during the lowering + // for (;;) { + // break; + // } + cir.func @forWithBreakTerminatedScopeInBody(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.scope { // FIXME(cir): Redundant scope emitted during C codegen. + cir.break + } + cir.yield + } step { + cir.yield + } + cir.return + } +// CHECK: cir.func @forWithBreakTerminatedScopeInBody(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#EX_SCOPE_IN:]] +// CHECK: ^bb[[#EX_SCOPE_IN]]: +// CHECK: cir.br ^bb[[#EXIT:]] +// CHECK: ^bb[[#EX_SCOPE_EXIT:]]: +// CHECK: cir.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp new file mode 100644 index 000000000000..b6f63b2ce4fc --- /dev/null +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -0,0 +1,207 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void l0() { + for (;;) { + } +} + +// CHECK: cir.func @_Z2l0v +// CHECK: cir.for : cond { +// CHECK: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK: cir.condition(%[[#TRUE]]) + +void l1() { + int x = 0; + for (int i = 0; i < 10; i = i + 1) { + x = x + 1; + } +} + +// CHECK: cir.func @_Z2l1v +// CHECK: cir.for : cond { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool +// CHECK-NEXT: cir.condition(%6) +// CHECK-NEXT: } body { +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: cir.store %6, %2 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } + +void l2(bool cond) { + int i = 0; + while (cond) { + i = i + 1; + } + while (true) { + i = i + 1; + } + while (1) { + i = i + 1; + } +} + +// CHECK: cir.func @_Z2l2b +// CHECK: cir.scope { +// CHECK-NEXT: cir.while { +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.condition(%3) +// CHECK-NEXT: } do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.while { +// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) +// CHECK-NEXT: } do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.while { +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool +// CHECK-NEXT: cir.condition(%4) +// CHECK-NEXT: } do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } + +void l3(bool cond) { + int i = 0; + do { + i = i + 1; + } while (cond); + do { + i = i + 1; + } while (true); + do { + i = i + 1; + } while (1); +} + +// CHECK: cir.func @_Z2l3b +// CHECK: cir.scope { +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[#TRUE:]] = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool +// CHECK-NEXT: cir.condition(%4) +// CHECK-NEXT: } +// CHECK-NEXT: } + +void l4() { + int i = 0, y = 100; + while (true) { + i = i + 1; + if (i < 10) + continue; + y = y - 20; + } +} + +// CHECK: cir.func @_Z2l4v +// CHECK: cir.while { +// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) +// CHECK-NEXT: } do { +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %10 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %11 = cir.const(#cir.int<10> : !s32i) : !s32i +// CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : !s32i, !cir.bool +// CHECK-NEXT: cir.if %12 { +// CHECK-NEXT: cir.continue +// CHECK-NEXT: } +// CHECK-NEXT: } + +void l5() { + do { + } while (0); +} + +// CHECK: cir.func @_Z2l5v() +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.do { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool +// CHECK-NEXT: cir.condition(%1) +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +void l6() { + while (true) { + return; + } +} + +// CHECK: cir.func @_Z2l6v() +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.while { +// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) +// CHECK-NEXT: } do { +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp new file mode 100644 index 000000000000..ad56d820c5e9 --- /dev/null +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +struct String { + long size; +}; + +void split(String &S) {} + +// CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", init] + +void foo() { + String s; + split(s); +} + +// CHECK: cir.func @_Z3foov() +// CHECK: %0 = cir.alloca !ty_22String22, cir.ptr , ["s"] +// CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/move.cpp b/clang/test/CIR/CodeGen/move.cpp new file mode 100644 index 000000000000..2d889d134b43 --- /dev/null +++ b/clang/test/CIR/CodeGen/move.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +namespace std { + +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; + +template +typename remove_reference::type &&move(T &&t) noexcept; + +struct string { + string(); +}; + +} // std namespace + +// CHECK: ![[StdString:ty_.*]] = !cir.struct}> + +std::string getstr(); +void emplace(std::string &&s); + +void t() { + emplace(std::move(getstr())); +} + +// FIXME: we should explicitly model std::move here since it will +// be useful at least for the lifetime checker. + +// CHECK: cir.func @_Z1tv() +// CHECK: %[[#Addr:]] = cir.alloca ![[StdString]], {{.*}} ["ref.tmp0"] +// CHECK: %[[#RValStr:]] = cir.call @_Z6getstrv() : () -> ![[StdString]] +// CHECK: cir.store %[[#RValStr]], %[[#Addr]] +// CHECK: cir.call @_Z7emplaceOSt6string(%[[#Addr]]) +// CHECK: cir.return +// CHECK: } diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp new file mode 100644 index 000000000000..93e3f7f5c40e --- /dev/null +++ b/clang/test/CIR/CodeGen/new.cpp @@ -0,0 +1,58 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +struct S { + S(int, int); +}; + +void m(int a, int b) { + std::shared_ptr l = std::make_shared(a, b); +} + +// CHECK: cir.func linkonce_odr @_ZSt11make_sharedI1SJRiS1_EESt10shared_ptrIT_EDpOT0_( +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !ty_22std3A3Ashared_ptr3CS3E22, cir.ptr , ["__retval"] {alignment = 1 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.scope { +// CHECK: %4 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK: %5 = cir.call @_Znwm(%4) : (!u64i) -> !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr +// CHECK: %7 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %8 = cir.load %7 : cir.ptr , !s32i +// CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %10 = cir.load %9 : cir.ptr , !s32i +// CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () +// CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () +// CHECK: } + +class B { +public: + void construct(B* __p) { + ::new ((void*)__p) B; + } +}; + +// CHECK: cir.func linkonce_odr @_ZN1B9constructEPS_(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__p", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK: %4 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr + +// cir.call @B::B()(%new_placament_ptr) +// CHECK: cir.call @_ZN1BC1Ev(%6) : (!cir.ptr) -> () +// CHECK: cir.return +// CHECK: } + +void t() { + B b; + b.construct(&b); +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/no-common.c b/clang/test/CIR/CodeGen/no-common.c new file mode 100644 index 000000000000..61ecea191636 --- /dev/null +++ b/clang/test/CIR/CodeGen/no-common.c @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir %s -emit-cir -o - | FileCheck %s -check-prefix=CHECK-DEFAULT +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir %s -fno-common -emit-cir -o - | FileCheck %s -check-prefix=CHECK-DEFAULT +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir %s -fcommon -emit-cir -o - | FileCheck %s -check-prefix=CHECK-COMMON + +// CHECK-COMMON: cir.global common @x +// CHECK-DEFAULT: cir.global external @x +int x; + +// CHECK-COMMON: cir.global external @ABC +// CHECK-DEFAULT: cir.global external @ABC +typedef void* (*fn_t)(long a, long b, char *f, int c); +fn_t ABC __attribute__ ((nocommon)); + +// CHECK-COMMON: cir.global common @y +// CHECK-DEFAULT: cir.global common @y +int y __attribute__((common)); diff --git a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c new file mode 100644 index 000000000000..e396a606a73d --- /dev/null +++ b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +void empty(); + +void check_noproto_ptr() { + void (*fun)(void) = empty; +} + +// CHECK: cir.func no_proto @check_noproto_ptr() +// CHECK: [[ALLOC:%.*]] = cir.alloca !cir.ptr>, cir.ptr >>, ["fun", init] {alignment = 8 : i64} +// CHECK: [[GGO:%.*]] = cir.get_global @empty : cir.ptr > +// CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> +// CHECK: cir.store [[CAST]], [[ALLOC]] : !cir.ptr>, cir.ptr >> +// CHECK: cir.return + +void empty(void) {} + +void buz() { + void (*func)(); + (*func)(); +} + +// CHECK: cir.func no_proto @buz() +// CHECK: [[FNPTR_ALLOC:%.*]] = cir.alloca !cir.ptr>, cir.ptr >>, ["func"] {alignment = 8 : i64} +// CHECK: [[FNPTR:%.*]] = cir.load deref [[FNPTR_ALLOC]] : cir.ptr >>, !cir.ptr> +// CHECK: [[CAST:%.*]] = cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> +// CHECK: cir.call [[CAST]]() : (!cir.ptr>) -> () +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/no-proto-is-void.cpp b/clang/test/CIR/CodeGen/no-proto-is-void.cpp new file mode 100644 index 000000000000..7ab958f8fd00 --- /dev/null +++ b/clang/test/CIR/CodeGen/no-proto-is-void.cpp @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c -std=c2x -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Both CXX and C2X don't support no-prototype functions. They default to void. +int noProto(); +// CHECK: cir.func @{{.*}}noProto{{.*}}() -> !s32i +int test(int x) { + return noProto(); + // CHECK {{.+}} = cir.call @{{.*}}noProto{{.*}}() : () -> !s32i +} +int noProto() { return 0; } diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c new file mode 100644 index 000000000000..4028d8e2ec32 --- /dev/null +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -0,0 +1,85 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +//===----------------------------------------------------------------------===// +// DEFINED BEHAVIOUR +//===----------------------------------------------------------------------===// + +// No-proto definition followed by a correct call. +int noProto0(x) int x; { return x; } +// CHECK: cir.func no_proto @noProto0(%arg0: !s32i {{.+}}) -> !s32i +int test0(int x) { + // CHECK: cir.func @test0 + return noProto0(x); // We know the definition. Should be a direct call. + // CHECK: %{{.+}} = cir.call @noProto0(%{{.+}}) +} + +// Declaration without prototype followed by its definition, then a correct call. +// +// Prototyped definition overrides no-proto declaration before any call is made, +// only allowing calls with proper arguments. This is the only case where the +// definition is not marked as no-proto. +int noProto1(); +int noProto1(int x) { return x; } +// CHECK: cir.func @noProto1(%arg0: !s32i {{.+}}) -> !s32i +int test1(int x) { + // CHECK: cir.func @test1 + return noProto1(x); + // CHECK: %{{.+}} = cir.call @noProto1(%{{[0-9]+}}) : (!s32i) -> !s32i +} + +// Declaration without prototype followed by a correct call, then its definition. +// +// Call to no-proto is made before definition, so a variadic call that takes anything +// is created. Later, when the definition is found, no-proto is replaced. +int noProto2(); +int test2(int x) { + return noProto2(x); + // CHECK: [[GGO:%.*]] = cir.get_global @noProto2 : cir.ptr > + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, %3 : !cir.ptr>), !cir.ptr> + // CHECK: {{.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i +} +int noProto2(int x) { return x; } +// CHECK: cir.func no_proto @noProto2(%arg0: !s32i {{.+}}) -> !s32i + +// No-proto declaration without definition (any call here is "correct"). +// +// Call to no-proto is made before definition, so a variadic call that takes anything +// is created. Definition is not in the translation unit, so it is left as is. +int noProto3(); +// cir.func private no_proto @noProto3(...) -> !s32i +int test3(int x) { +// CHECK: cir.func @test3 + return noProto3(x); + // CHECK: [[GGO:%.*]] = cir.get_global @noProto3 : cir.ptr > + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> + // CHECK: {{%.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i +} + + +//===----------------------------------------------------------------------===// +// UNDEFINED BEHAVIOUR +// +// No-proto definitions followed by incorrect calls. +//===----------------------------------------------------------------------===// + +// No-proto definition followed by an incorrect call due to extra args. +int noProto4() { return 0; } +// cir.func private no_proto @noProto4() -> !s32i +int test4(int x) { + return noProto4(x); // Even if we know the definition, this should compile. + // CHECK: [[GGO:%.*]] = cir.get_global @noProto4 : cir.ptr > + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> + // CHECK: {{%.*}} = cir.call [[CAST]]({{%.*}}) : (!cir.ptr>, !s32i) -> !s32i +} + +// No-proto definition followed by an incorrect call due to lack of args. +int noProto5(); +int test5(int x) { + return noProto5(); + // CHECK: [[GGO:%.*]] = cir.get_global @noProto5 : cir.ptr > + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> + // CHECK: {{%.*}} = cir.call [[CAST]]() : (!cir.ptr>) -> !s32i +} +int noProto5(int x) { return x; } +// CHECK: cir.func no_proto @noProto5(%arg0: !s32i {{.+}}) -> !s32i diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp new file mode 100644 index 000000000000..9c5da47da458 --- /dev/null +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +std::vector test_nrvo() { + std::vector result; + result.push_back("Words bend our thinking to infinite paths of self-delusion"); + return result; +} + +// CHECK: ![[VEC:.*]] = !cir.struct" {!cir.ptr>>, !cir.ptr>>, !cir.ptr>>}> + +// CHECK: cir.func @_Z9test_nrvov() -> ![[VEC]] +// CHECK: %0 = cir.alloca ![[VEC]], cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["nrvo"] {alignment = 1 : i64} +// CHECK: %2 = cir.const(#false) : !cir.bool +// CHECK: cir.store %2, %1 : !cir.bool, cir.ptr +// CHECK: cir.call @_ZNSt6vectorIPKcEC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.scope { +// CHECK: %5 = cir.alloca !cir.ptr, cir.ptr >, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %6 = cir.get_global @".str" : cir.ptr > +// CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr +// CHECK: cir.store %7, %5 : !cir.ptr, cir.ptr > +// CHECK: cir.call @_ZNSt6vectorIPKcE9push_backEOS1_(%0, %5) : (!cir.ptr, !cir.ptr>) -> () +// CHECK: } +// CHECK: %3 = cir.const(#true) : !cir.bool +// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr +// CHECK: %4 = cir.load %0 : cir.ptr , ![[VEC]] +// CHECK: cir.return %4 : ![[VEC]] +// CHECK: } diff --git a/clang/test/CIR/CodeGen/offsetof.c b/clang/test/CIR/CodeGen/offsetof.c new file mode 100644 index 000000000000..5259e14d4915 --- /dev/null +++ b/clang/test/CIR/CodeGen/offsetof.c @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +#include + +typedef struct { + int a; + int b; +} A; + +void foo() { + offsetof(A, a); + offsetof(A, b); +} + +// CHECK: cir.func no_proto @foo() +// CHECK: {{.*}} = cir.const(#cir.int<0> : !u64i) : !u64i +// CHECK: {{.*}} = cir.const(#cir.int<4> : !u64i) : !u64i +// CHECK: cir.return + diff --git a/clang/test/CIR/CodeGen/opaque.c b/clang/test/CIR/CodeGen/opaque.c new file mode 100644 index 000000000000..00c11d7c65d1 --- /dev/null +++ b/clang/test/CIR/CodeGen/opaque.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int foo(int x, short y) { + return x ?: y; +} + +// CHECK: cir.func @foo +// CHECK: %[[Load:.*]] = cir.load +// CHECK: %[[Bool:.*]] = cir.cast(int_to_bool, %[[Load]] : !s32i), !cir.bool loc(#loc8) +// CHECK: = cir.ternary(%[[Bool]], true { +// CHECK: cir.yield %[[Load]] \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/openmp.cpp b/clang/test/CIR/CodeGen/openmp.cpp new file mode 100644 index 000000000000..a3c37da349b4 --- /dev/null +++ b/clang/test/CIR/CodeGen/openmp.cpp @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fopenmp -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: cir.func +void omp_parallel_1() { +// CHECK: omp.parallel { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: } +// CHECK-NEXT: omp.terminator +// CHECK-NEXT: } +#pragma omp parallel +{ +} +} +// CHECK: cir.func +void omp_parallel_2() { +// CHECK: %[[YVarDecl:.+]] = {{.*}} ["y", init] +// CHECK: omp.parallel { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[XVarDecl:.+]] = {{.*}} ["x", init] +// CHECK-NEXT: %[[C1:.+]] = cir.const(#cir.int<1> : !s32i) +// CHECK-NEXT: cir.store %[[C1]], %[[XVarDecl]] +// CHECK-NEXT: %[[XVal:.+]] = cir.load %[[XVarDecl]] +// CHECK-NEXT: %[[COne:.+]] = cir.const(#cir.int<1> : !s32i) +// CHECK-NEXT: %[[BinOpVal:.+]] = cir.binop(add, %[[XVal]], %[[COne]]) +// CHECK-NEXT: cir.store %[[BinOpVal]], %[[YVarDecl]] +// CHECK-NEXT: } +// CHECK-NEXT: omp.terminator +// CHECK-NEXT: } + int y = 0; +#pragma omp parallel +{ + int x = 1; + y = x + 1; +} +} diff --git a/clang/test/CIR/CodeGen/operators.cpp b/clang/test/CIR/CodeGen/operators.cpp new file mode 100644 index 000000000000..1d900188f1ce --- /dev/null +++ b/clang/test/CIR/CodeGen/operators.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class __attribute__((__visibility__("default"))) exception_ptr +{ + void* __ptr_; +public: + explicit operator bool() const noexcept {return __ptr_ != nullptr;} +}; + +// TODO: for now only check that this doesn't crash, in the future check operator +// bool codegen. + +// CHECK: module \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/optnone.cpp b/clang/test/CIR/CodeGen/optnone.cpp new file mode 100644 index 000000000000..7fa22865c274 --- /dev/null +++ b/clang/test/CIR/CodeGen/optnone.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR-O0 +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM-O0 + +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir %s -check-prefix=CIR-O2 +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t2.ll +// RUN: FileCheck --input-file=%t2.ll %s -check-prefix=LLVM-O2 + +int s0(int a, int b) { + int x = a + b; + if (x > 0) + x = 0; + else + x = 1; + return x; +} + +// CIR-O0: #fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> +// CIR-O0: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr) + +// CIR-O2-NOT: #fn_attr ={{.*}} optnone + +// LLVM-O0: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR:]] +// LLVM-O0: attributes #[[#ATTR]] = { noinline nounwind optnone } +// LLVM-O2-NOT: attributes #[[#]] = { noinline nounwind optnone } diff --git a/clang/test/CIR/CodeGen/packed-structs.c b/clang/test/CIR/CodeGen/packed-structs.c new file mode 100644 index 000000000000..aefe8a60e28b --- /dev/null +++ b/clang/test/CIR/CodeGen/packed-structs.c @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#pragma pack(1) + +typedef struct { + int a0; + char a1; +} A; + +typedef struct { + int b0; + char b1; + A a[6]; +} B; + +typedef struct { + int c0; + char c1; +} __attribute__((aligned(2))) C; + + +// CHECK: !ty_22A22 = !cir.struct, !cir.int}> +// CHECK: !ty_22C22 = !cir.struct, !cir.int}> +// CHECK: !ty_22B22 = !cir.struct, !cir.int, !cir.array, !cir.int}> x 6>}> + +// CHECK: cir.func {{.*@foo()}} +// CHECK: %0 = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_22B22, cir.ptr , ["b"] {alignment = 1 : i64} +// CHECK: %2 = cir.alloca !ty_22C22, cir.ptr , ["c"] {alignment = 2 : i64} +void foo() { + A a; + B b; + C c; +} + + diff --git a/clang/test/CIR/CodeGen/pass-object-size.c b/clang/test/CIR/CodeGen/pass-object-size.c new file mode 100644 index 000000000000..989949b22384 --- /dev/null +++ b/clang/test/CIR/CodeGen/pass-object-size.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +void b(void *__attribute__((pass_object_size(0)))); +void e(void *__attribute__((pass_object_size(2)))); +void c() { + int a; + int d[a]; + b(d); + e(d); +} + +// CIR: cir.func no_proto @c() +// CIR: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , %{{[0-9]+}} : !u64i, ["vla"] {alignment = 16 : i64} +// CIR: [[TMP1:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CIR-NEXT: [[TMP2:%.*]] = cir.objsize([[TMP1]] : , max) -> !u64i +// CIR-NEXT: cir.call @b([[TMP1]], [[TMP2]]) : (!cir.ptr, !u64i) -> () +// CIR: [[TMP3:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CIR: [[TMP4:%.*]] = cir.objsize([[TMP3]] : , min) -> !u64i +// CIR-NEXT: cir.call @e([[TMP3]], [[TMP4]]) : (!cir.ptr, !u64i) -> () + +// LLVM: define void @c() +// LLVM: [[TMP0:%.*]] = alloca i32, i64 %{{[0-9]+}}, +// LLVM: [[TMP1:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 false, i1 true, i1 false), +// LLVM-NEXT: call void @b(ptr [[TMP0]], i64 [[TMP1]]) +// LLVM: [[TMP2:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 true, i1 true, i1 false), +// LLVM-NEXT: call void @e(ptr [[TMP0]], i64 [[TMP2]]) diff --git a/clang/test/CIR/CodeGen/pointer-to-data-member.cpp b/clang/test/CIR/CodeGen/pointer-to-data-member.cpp new file mode 100644 index 000000000000..7fb3ab8a784c --- /dev/null +++ b/clang/test/CIR/CodeGen/pointer-to-data-member.cpp @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Point { + int x; + int y; + int z; +}; +// CHECK-DAG: !ty_22Point22 = !cir.struct, !cir.int, !cir.int} + +struct Incomplete; +// CHECK-DAG: !ty_22Incomplete22 = !cir.struct + +int Point::*pt_member = &Point::x; +// CHECK: cir.global external @pt_member = #cir.data_member<0> : !cir.data_member + +auto test1() -> int Point::* { + return &Point::y; +} +// CHECK: cir.func @_Z5test1v() -> !cir.data_member +// CHECK: %{{.+}} = cir.const(#cir.data_member<1> : !cir.data_member) : !cir.data_member +// CHECK: } + +int test2(const Point &pt, int Point::*member) { + return pt.*member; +} +// CHECK: cir.func @_Z5test2RK5PointMS_i +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: } + +int test3(const Point *pt, int Point::*member) { + return pt->*member; +} +// CHECK: cir.func @_Z5test3PK5PointMS_i +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: } + +auto test4(int Incomplete::*member) -> int Incomplete::* { + return member; +} +// CHECK: cir.func @_Z5test4M10Incompletei(%arg0: !cir.data_member loc({{.+}})) -> !cir.data_member + +int test5(Incomplete *ic, int Incomplete::*member) { + return ic->*member; +} +// CHECK: cir.func @_Z5test5P10IncompleteMS_i +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: } + +auto test_null() -> int Point::* { + return nullptr; +} +// CHECK: cir.func @_Z9test_nullv +// CHECK: %{{.+}} = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK: } + +auto test_null_incomplete() -> int Incomplete::* { + return nullptr; +} +// CHECK: cir.func @_Z20test_null_incompletev +// CHECK: %{{.+}} = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK: } diff --git a/clang/test/CIR/CodeGen/pointer.cpp b/clang/test/CIR/CodeGen/pointer.cpp new file mode 100644 index 000000000000..bdf0e2103192 --- /dev/null +++ b/clang/test/CIR/CodeGen/pointer.cpp @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Global pointer should be zero initialized by default. +int *ptr; +// CHECK: cir.global external @ptr = #cir.ptr : !cir.ptr diff --git a/clang/test/CIR/CodeGen/pointers.cpp b/clang/test/CIR/CodeGen/pointers.cpp new file mode 100644 index 000000000000..8df8a0f6b658 --- /dev/null +++ b/clang/test/CIR/CodeGen/pointers.cpp @@ -0,0 +1,49 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Should generate basic pointer arithmetics. +void foo(int *iptr, char *cptr, unsigned ustride) { + iptr + 2; + // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr + cptr + 3; + // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<3> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr + iptr - 2; + // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#STRIDE]]) : !s32i, !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr + cptr - 3; + // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<3> : !s32i) : !s32i + // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#STRIDE]]) : !s32i, !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr + iptr + ustride; + // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : cir.ptr , !u32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !u32i), !cir.ptr + + // Must convert unsigned stride to a signed one. + iptr - ustride; + // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : cir.ptr , !u32i + // CHECK: %[[#SIGNSTRIDE:]] = cir.cast(integral, %[[#STRIDE]] : !u32i), !s32i + // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#SIGNSTRIDE]]) : !s32i, !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr +} + +void testPointerSubscriptAccess(int *ptr) { +// CHECK: testPointerSubscriptAccess + ptr[1]; + // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr + // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%[[#V1]] : !cir.ptr, %[[#V2]] : !s32i), !cir.ptr +} + +void testPointerMultiDimSubscriptAccess(int **ptr) { +// CHECK: testPointerMultiDimSubscriptAccess + ptr[1][2]; + // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >>, !cir.ptr> + // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#V3:]] = cir.ptr_stride(%[[#V1]] : !cir.ptr>, %[[#V2]] : !s32i), !cir.ptr> + // CHECK: %[[#V4:]] = cir.load %[[#V3]] : cir.ptr >, !cir.ptr + // CHECK: %[[#V5:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%[[#V4]] : !cir.ptr, %[[#V5]] : !s32i), !cir.ptr +} diff --git a/clang/test/CIR/CodeGen/pred-info-builtins.c b/clang/test/CIR/CodeGen/pred-info-builtins.c new file mode 100644 index 000000000000..263274890e34 --- /dev/null +++ b/clang/test/CIR/CodeGen/pred-info-builtins.c @@ -0,0 +1,40 @@ +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=CIR-O0 +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=CIR-O2 + +extern void __attribute__((noinline)) bar(void); + +void expect(int x) { + if (__builtin_expect(x, 0)) + bar(); +} +// CIR-O0: cir.func @expect +// CIR-O0: cir.if {{%.*}} { +// CIR-O0: cir.call @bar() : () -> () + +// CIR-O2: cir.func @expect +// CIR-O2: [[EXPECT:%.*]] = cir.expect({{.*}}, {{.*}}) : !s64i +// CIR-O2: [[EXPECT_BOOL:%.*]] = cir.cast(int_to_bool, [[EXPECT]] : !s64i), !cir.bool +// CIR-O2: cir.if [[EXPECT_BOOL]] +// CIR-O2: cir.call @bar() : () -> () + +void expect_with_probability(int x) { + if (__builtin_expect_with_probability(x, 1, 0.8)) + bar(); +} +// CIR-O0: cir.func @expect_with_probability +// CIR-O0: cir.if {{%.*}} { +// CIR-O0: cir.call @bar() : () -> () + +// CIR-O2: cir.func @expect_with_probability +// CIR-O2: [[EXPECT:%.*]] = cir.expect({{.*}}, {{.*}}, 8.000000e-01) : !s64i +// CIR-O2: [[EXPECT_BOOL:%.*]] = cir.cast(int_to_bool, [[EXPECT]] : !s64i), !cir.bool +// CIR-O2: cir.if [[EXPECT_BOOL]] +// CIR-O2: cir.call @bar() : () -> () + +void unpredictable(int x) { + if (__builtin_unpredictable(x > 1)) + bar(); +// CIR-O0: cir.func @unpredictable +// CIR-O0: cir.if {{%.*}} { +// CIR-O0: cir.call @bar() : () -> () +} diff --git a/clang/test/CIR/CodeGen/predefined.cpp b/clang/test/CIR/CodeGen/predefined.cpp new file mode 100644 index 000000000000..dc849d915598 --- /dev/null +++ b/clang/test/CIR/CodeGen/predefined.cpp @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +extern "C" { + void __assert2(const char* __file, int __line, const char* __function, const char* __msg) __attribute__((__noreturn__)); +} + +void m() { + __assert2("yo.cpp", 79, __PRETTY_FUNCTION__, "doom"); +} + +// CHECK: cir.func @_Z1mv() +// CHECK: %0 = cir.get_global @".str" : cir.ptr > +// CHECK: %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK: %2 = cir.const(#cir.int<79> : !s32i) : !s32i +// CHECK: %3 = cir.get_global @".str1" : cir.ptr > +// CHECK: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr +// CHECK: %5 = cir.get_global @".str2" : cir.ptr > +// CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @__assert2(%1, %2, %4, %6) : (!cir.ptr, !s32i, !cir.ptr, !cir.ptr) -> () +// CHECK: cir.return +// CHECK: } diff --git a/clang/test/CIR/CodeGen/ptr_diff.cpp b/clang/test/CIR/CodeGen/ptr_diff.cpp new file mode 100644 index 000000000000..ebaa5ec6bfac --- /dev/null +++ b/clang/test/CIR/CodeGen/ptr_diff.cpp @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef unsigned long size_type; +size_type size(unsigned long *_start, unsigned long *_finish) { + return static_cast(_finish - _start); +} + +// CHECK: cir.func @_Z4sizePmS_(%arg0: !cir.ptr +// CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.ptr_diff(%3, %4) : !cir.ptr -> !s64i +// CHECK: %6 = cir.cast(integral, %5 : !s64i), !u64i + +long add(char *a, char *b) { + return a - b + 1; +} + +// CHECK: cir.func @_Z3addPcS_(%arg0: !cir.ptr +// %5 = cir.ptr_diff(%3, %4) : !cir.ptr -> !s64i +// %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// %7 = cir.cast(integral, %6 : !s32i), !s64i +// %8 = cir.binop(add, %5, %7) : !s64i + diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp new file mode 100644 index 000000000000..13dec345cc08 --- /dev/null +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -0,0 +1,71 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +typedef enum enumy { + Unknown = 0, + Some = 1000024002, +} enumy; + +typedef struct triple { + enumy type; + void* __attribute__((__may_alias__)) next; + unsigned image; +} triple; + +void init(unsigned numImages) { + std::vector images(numImages); + for (auto& image : images) { + image = {Some}; + } +} + +// CHECK-DAG: !ty_22triple22 = !cir.struct, !cir.ptr, !cir.int}> +// CHECK-DAG: ![[VEC:.*]] = !cir.struct" {!cir.ptr, !cir.ptr, !cir.int}>>, !cir.ptr, !cir.ptr, !cir.int}>>, !cir.ptr, !cir.ptr, !cir.int}>>}> +// CHECK-DAG: ![[VEC_IT:.*]] = !cir.struct" {!cir.ptr, !cir.ptr, !cir.int}> + +// CHECK: cir.func @_Z4initj(%arg0: !u32i +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["numImages", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca ![[VEC]], cir.ptr , ["images", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: %3 = cir.cast(integral, %2 : !u32i), !u64i +// CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, !u64i) -> () +// CHECK: cir.scope { +// CHECK: %4 = cir.alloca !cir.ptr, cir.ptr >, ["__range1", init] {alignment = 8 : i64} +// CHECK: %5 = cir.alloca ![[VEC_IT]], cir.ptr , ["__begin1", init] {alignment = 8 : i64} +// CHECK: %6 = cir.alloca ![[VEC_IT]], cir.ptr , ["__end1", init] {alignment = 8 : i64} +// CHECK: %7 = cir.alloca !cir.ptr, cir.ptr >, ["image", init] {alignment = 8 : i64} +// CHECK: cir.store %1, %4 : !cir.ptr, cir.ptr > +// CHECK: %8 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> ![[VEC_IT]] +// CHECK: cir.store %9, %5 : ![[VEC_IT]], cir.ptr +// CHECK: %10 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %11 = cir.call @_ZNSt6vectorI6tripleE3endEv(%10) : (!cir.ptr) -> ![[VEC_IT]] +// CHECK: cir.store %11, %6 : ![[VEC_IT]], cir.ptr +// CHECK: cir.for : cond { +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool +// CHECK: cir.condition(%12) +// CHECK: } body { +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: cir.store %12, %7 : !cir.ptr, cir.ptr > +// CHECK: cir.scope { +// CHECK: %13 = cir.alloca !ty_22triple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %14 = cir.const(#cir.zero : !ty_22triple22) : !ty_22triple22 +// CHECK: cir.store %14, %13 : !ty_22triple22, cir.ptr +// CHECK: %15 = cir.get_member %13[0] {name = "type"} : !cir.ptr -> !cir.ptr +// CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i +// CHECK: cir.store %16, %15 : !u32i, cir.ptr +// CHECK: %17 = cir.get_member %13[1] {name = "next"} : !cir.ptr -> !cir.ptr> +// CHECK: %18 = cir.get_member %13[2] {name = "image"} : !cir.ptr -> !cir.ptr +// CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr +// CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: } +// CHECK: cir.yield +// CHECK: } step { +// CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: cir.yield +// CHECK: } +// CHECK: } +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/return.cpp b/clang/test/CIR/CodeGen/return.cpp new file mode 100644 index 000000000000..ee7eef915c38 --- /dev/null +++ b/clang/test/CIR/CodeGen/return.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +int &ret0(int &x) { + return x; +} + +// CHECK: cir.func @_Z4ret0Ri +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: cir.return %3 : !cir.ptr diff --git a/clang/test/CIR/CodeGen/scope.cir b/clang/test/CIR/CodeGen/scope.cir new file mode 100644 index 000000000000..813862e7c2fb --- /dev/null +++ b/clang/test/CIR/CodeGen/scope.cir @@ -0,0 +1,60 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!u32i = !cir.int + +module { + cir.func @foo() { + cir.scope { + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<4> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + } + cir.return + } +// CHECK: cir.func @foo() { +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: %1 = cir.const(#cir.int<4> : !u32i) : !u32i +// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: cir.br ^bb2 +// CHECK: ^bb2: // pred: ^bb1 +// CHECK: cir.return +// CHECK: } + + // Should drop empty scopes. + cir.func @empty_scope() { + cir.scope { + } + cir.return + } +// CHECK: cir.func @empty_scope() { +// CHECK: cir.return +// CHECK: } + + cir.func @scope_with_return() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.scope { + %2 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %2, %0 : !u32i, cir.ptr + %3 = cir.load %0 : cir.ptr , !u32i + cir.return %3 : !u32i + } + %1 = cir.load %0 : cir.ptr , !u32i + cir.return %1 : !u32i + } + +// CHECK: cir.func @scope_with_return() -> !u32i { +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %1 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: cir.return %2 : !u32i +// CHECK: ^bb2: // no predecessors +// CHECK: %3 = cir.load %0 : cir.ptr , !u32i +// CHECK: cir.return %3 : !u32i +// CHECK: } + +} diff --git a/clang/test/CIR/CodeGen/shift.cpp b/clang/test/CIR/CodeGen/shift.cpp new file mode 100644 index 000000000000..6f6a10d34ab0 --- /dev/null +++ b/clang/test/CIR/CodeGen/shift.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +unsigned long s(int i, unsigned long x) { + return x << i; +} + +// CHECK: cir.shift(left, %3 : !u64i, %4 : !s32i) -> !u64i \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp b/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp new file mode 100644 index 000000000000..96730e748a4c --- /dev/null +++ b/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fclangir-skip-system-headers -I%S/../Inputs %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "skip-this-header.h" + +void test() { + String s1{}; + String s2{1}; + String s3{"abcdefghijklmnop"}; +} + +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2Ev +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2Ei +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2EPKc +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC1EPKc + +// CHECK: cir.func @_Z4testv() +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp new file mode 100644 index 000000000000..9c1fe9760c8c --- /dev/null +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -0,0 +1,90 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +int s0(int a, int b) { + int x = a + b; + if (x > 0) + x = 0; + else + x = 1; + return x; +} + +// CIR: #loc3 = loc("{{.*}}sourcelocation.cpp":6:8) +// CIR: #loc4 = loc("{{.*}}sourcelocation.cpp":6:12) +// CIR: #loc5 = loc("{{.*}}sourcelocation.cpp":6:15) +// CIR: #loc6 = loc("{{.*}}sourcelocation.cpp":6:19) +// CIR: #loc21 = loc(fused[#loc3, #loc4]) +// CIR: #loc22 = loc(fused[#loc5, #loc6]) +// CIR: module @"{{.*}}sourcelocation.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior +// CIR: cir.func @_Z2s0ii(%arg0: !s32i loc(fused[#loc3, #loc4]), %arg1: !s32i loc(fused[#loc5, #loc6])) -> !s32i +// CIR: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) +// CIR: %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) +// CIR: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) +// CIR: %3 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc23) +// CIR: cir.store %arg0, %0 : !s32i, cir.ptr loc(#loc9) +// CIR: cir.store %arg1, %1 : !s32i, cir.ptr loc(#loc9) +// CIR: %4 = cir.load %0 : cir.ptr , !s32i loc(#loc10) +// CIR: %5 = cir.load %1 : cir.ptr , !s32i loc(#loc8) +// CIR: %6 = cir.binop(add, %4, %5) : !s32i loc(#loc24) +// CIR: cir.store %6, %3 : !s32i, cir.ptr loc(#loc23) +// CIR: cir.scope { +// CIR: %9 = cir.load %3 : cir.ptr , !s32i loc(#loc13) +// CIR: %10 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc14) +// CIR: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool loc(#loc26) +// CIR: cir.if %11 { +// CIR: %12 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc16) +// CIR: cir.store %12, %3 : !s32i, cir.ptr loc(#loc28) +// CIR: } else { +// CIR: %12 = cir.const(#cir.int<1> : !s32i) : !s32i loc(#loc12) +// CIR: cir.store %12, %3 : !s32i, cir.ptr loc(#loc29) +// CIR: } loc(#loc27) +// CIR: } loc(#loc25) +// CIR: %7 = cir.load %3 : cir.ptr , !s32i loc(#loc18) +// CIR: cir.store %7, %2 : !s32i, cir.ptr loc(#loc30) +// CIR: %8 = cir.load %2 : cir.ptr , !s32i loc(#loc30) +// CIR: cir.return %8 : !s32i loc(#loc30) +// CIR: } loc(#loc20) +// CIR: } loc(#loc) +// CIR: #loc = loc("{{.*}}sourcelocation.cpp":0:0) +// CIR: #loc1 = loc("{{.*}}sourcelocation.cpp":6:1) +// CIR: #loc2 = loc("{{.*}}sourcelocation.cpp":13:1) +// CIR: #loc7 = loc("{{.*}}sourcelocation.cpp":7:3) +// CIR: #loc8 = loc("{{.*}}sourcelocation.cpp":7:15) +// CIR: #loc9 = loc("{{.*}}sourcelocation.cpp":6:22) +// CIR: #loc10 = loc("{{.*}}sourcelocation.cpp":7:11) +// CIR: #loc11 = loc("{{.*}}sourcelocation.cpp":8:3) +// CIR: #loc12 = loc("{{.*}}sourcelocation.cpp":11:9) +// CIR: #loc13 = loc("{{.*}}sourcelocation.cpp":8:7) +// CIR: #loc14 = loc("{{.*}}sourcelocation.cpp":8:11) +// CIR: #loc15 = loc("{{.*}}sourcelocation.cpp":9:5) +// CIR: #loc16 = loc("{{.*}}sourcelocation.cpp":9:9) +// CIR: #loc17 = loc("{{.*}}sourcelocation.cpp":11:5) +// CIR: #loc18 = loc("{{.*}}sourcelocation.cpp":12:10) +// CIR: #loc19 = loc("{{.*}}sourcelocation.cpp":12:3) +// CIR: #loc20 = loc(fused[#loc1, #loc2]) +// CIR: #loc23 = loc(fused[#loc7, #loc8]) +// CIR: #loc24 = loc(fused[#loc10, #loc8]) +// CIR: #loc25 = loc(fused[#loc11, #loc12]) +// CIR: #loc26 = loc(fused[#loc13, #loc14]) +// CIR: #loc27 = loc(fused[#loc15, #loc16, #loc17, #loc12]) +// CIR: #loc28 = loc(fused[#loc15, #loc16]) +// CIR: #loc29 = loc(fused[#loc17, #loc12]) +// CIR: #loc30 = loc(fused[#loc19, #loc18]) + + +// LLVM: ModuleID = '{{.*}}sourcelocation.cpp' +// LLVM: source_filename = "{{.*}}sourcelocation.cpp" +// LLVM: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#]] !dbg ![[#SP:]] +// LLVM: %3 = alloca i32, i64 1, align 4, !dbg ![[#LOC1:]] + + +// LLVM: !llvm.module.flags = !{!0} +// LLVM: !llvm.dbg.cu = !{!1} +// LLVM: !0 = !{i32 2, !"Debug Info Version", i32 3} +// LLVM: !1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2, producer: "MLIR", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly) +// LLVM: !2 = !DIFile(filename: "sourcelocation.cpp", directory: "{{.*}}CodeGen") +// LLVM: ![[#SP]] = distinct !DISubprogram(name: "_Z2s0ii", linkageName: "_Z2s0ii", scope: !2, file: !2, line: 6, type: !4, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !1) +// LLVM: ![[#LOC1]] = !DILocation(line: 6, scope: ![[#SP]]) diff --git a/clang/test/CIR/CodeGen/spelling-locations.cpp b/clang/test/CIR/CodeGen/spelling-locations.cpp new file mode 100644 index 000000000000..66c09c88a029 --- /dev/null +++ b/clang/test/CIR/CodeGen/spelling-locations.cpp @@ -0,0 +1,100 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +#define multiline_if_macro(c, t) \ +if (c) { \ + return t; \ +} + +int testMacroLocations(void) { + + // Expanded macros will use the location of the expansion site. + multiline_if_macro(1, 3); + // CHECK: cir.scope { + // CHECK: cir.if %{{.+}} { + // CHECK: cir.return %{{.+}} : !s32i loc(#loc[[#LOC:]]) + // CHECK: } loc(#loc[[#LOC]]) + // CHECK: } loc(#loc[[#LOC]]) + + // Regular if statements should use different locations. + if (1) { + return 3; + } + // CHECK: cir.scope { + // CHECK: cir.if %{{.+}} { + // CHECK: cir.return %{{.+}} : !s32i loc(#loc[[#LOC:]]) + // CHECK-NOT: } loc(#loc[[#LOC]]) + // CHECK-NOT: } loc(#loc[[#LOC]]) + + return 0; +} + +void testIfStmtLocations(int f) { + if (f) + ; + else + ; + + if (f) + ++f; + else + ; + + if (f) + ; + else + --f; + + if (f) + ++f; + else + --f; +} + +// CHECK: cir.if %{{.+}} { +// CHECK: } else { +// CHECK: } loc(#loc[[#LOC1:]]) + +// CHECK: cir.if %{{.+}} { +// CHECK: %{{.+}} = cir.load +// CHECK: %{{.+}} = cir.unary(inc +// CHECK: cir.store +// CHECK: } else { +// CHECK: } loc(#loc[[#LOC2:]]) + +// CHECK: cir.if %{{.+}} { +// CHECK: } else { +// CHECK: %{{.+}} = cir.load +// CHECK: %{{.+}} = cir.unary(dec +// CHECK: cir.store +// CHECK: } loc(#loc[[#LOC3:]]) + +// CHECK: cir.if %{{.+}} { +// CHECK: %{{.+}} = cir.load +// CHECK: %{{.+}} = cir.unary(inc +// CHECK: cir.store +// CHECK: } else { +// CHECK: %{{.+}} = cir.load +// CHECK: %{{.+}} = cir.unary(dec +// CHECK: cir.store +// CHECK: } loc(#loc[[#LOC4:]]) + +// CHECK: #loc[[#LOC12:]] = loc({{.+}}:35:5) +// CHECK: #loc[[#LOC11:]] = loc({{.+}}:33:5) + +// CHECK: #loc[[#LOC23:]] = loc({{.+}}:40:5) +// CHECK: #loc[[#LOC21:]] = loc({{.+}}:38:5) +// CHECK: #loc[[#LOC22:]] = loc({{.+}}:38:7) + +// CHECK: #loc[[#LOC33:]] = loc({{.+}}:45:7) +// CHECK: #loc[[#LOC31:]] = loc({{.+}}:43:5) +// CHECK: #loc[[#LOC32:]] = loc({{.+}}:45:5) + +// CHECK: #loc[[#LOC44:]] = loc({{.+}}:50:7) +// CHECK: #loc[[#LOC41:]] = loc({{.+}}:48:5) +// CHECK: #loc[[#LOC42:]] = loc({{.+}}:48:7) +// CHECK: #loc[[#LOC43:]] = loc({{.+}}:50:5) + +// CHECK: #loc[[#LOC1]] = loc(fused[#loc[[#LOC11]], #loc[[#LOC12]]]) +// CHECK: #loc[[#LOC2]] = loc(fused[#loc[[#LOC21]], #loc[[#LOC22]], #loc[[#LOC23]]]) +// CHECK: #loc[[#LOC3]] = loc(fused[#loc[[#LOC31]], #loc[[#LOC32]], #loc[[#LOC33]]]) +// CHECK: #loc[[#LOC4]] = loc(fused[#loc[[#LOC41]], #loc[[#LOC42]], #loc[[#LOC43]], #loc[[#LOC44]]]) diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c new file mode 100644 index 000000000000..4981052bc9ac --- /dev/null +++ b/clang/test/CIR/CodeGen/static-vars.c @@ -0,0 +1,50 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void func1(void) { + // Should lower default-initialized static vars. + static int i; + // CHECK-DAG: cir.global "private" internal @func1.i = #cir.int<0> : !s32i + + // Should lower constant-initialized static vars. + static int j = 1; + // CHECK-DAG: cir.global "private" internal @func1.j = #cir.int<1> : !s32i + + // Should properly shadow static vars in nested scopes. + { + static int j = 2; + // CHECK-DAG: cir.global "private" internal @func1.j.1 = #cir.int<2> : !s32i + } + { + static int j = 3; + // CHECK-DAG: cir.global "private" internal @func1.j.2 = #cir.int<3> : !s32i + } + + // Should lower basic static vars arithmetics. + j++; + // CHECK-DAG: %[[#V2:]] = cir.get_global @func1.j : cir.ptr + // CHECK-DAG: %[[#V3:]] = cir.load %[[#V2]] : cir.ptr , !s32i + // CHECK-DAG: %[[#V4:]] = cir.unary(inc, %[[#V3]]) : !s32i, !s32i + // CHECK-DAG: cir.store %[[#V4]], %[[#V2]] : !s32i, cir.ptr +} + +// Should shadow static vars on different functions. +void func2(void) { + static char i; + // CHECK-DAG: cir.global "private" internal @func2.i = #cir.int<0> : !s8i + static float j; + // CHECK-DAG: cir.global "private" internal @func2.j = #cir.fp<0.000000e+00> : !cir.float +} + +// Should const initialize static vars with constant addresses. +void func3(void) { + static int var; + static int *constAddr = &var; + // CHECK-DAG: cir.global "private" internal @func3.constAddr = #cir.global_view<@func3.var> : !cir.ptr +} + +// Should match type size in bytes between var and initializer. +void func4(void) { + static char string[] = "Hello"; + // CHECK-DAG: cir.global "private" internal @func4.string = #cir.const_array<"Hello\00" : !cir.array> : !cir.array +} diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp new file mode 100644 index 000000000000..1a075b7d968a --- /dev/null +++ b/clang/test/CIR/CodeGen/static-vars.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void func1(void) { + // Should lower default-initialized static vars. + static int i; + // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1i = #cir.int<0> : !s32i + + // Should lower constant-initialized static vars. + static int j = 1; + // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j = #cir.int<1> : !s32i + + // Should properly shadow static vars in nested scopes. + { + static int j = 2; + // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j_0 = #cir.int<2> : !s32i + } + { + static int j = 3; + // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j_1 = #cir.int<3> : !s32i + } + + // Should lower basic static vars arithmetics. + j++; + // CHECK-DAG: %[[#V2:]] = cir.get_global @_ZZ5func1vE1j : cir.ptr + // CHECK-DAG: %[[#V3:]] = cir.load %[[#V2]] : cir.ptr , !s32i + // CHECK-DAG: %[[#V4:]] = cir.unary(inc, %[[#V3]]) : !s32i, !s32i + // CHECK-DAG: cir.store %[[#V4]], %[[#V2]] : !s32i, cir.ptr +} + +// Should shadow static vars on different functions. +void func2(void) { + static char i; + // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1i = #cir.int<0> : !s8i + static float j; + // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1j = #cir.fp<0.000000e+00> : !cir.float +} diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp new file mode 100644 index 000000000000..998bd5c6457d --- /dev/null +++ b/clang/test/CIR/CodeGen/static.cpp @@ -0,0 +1,90 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: cir-opt %t.cir -o - | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +class Init { + +public: + Init(bool a) ; + ~Init(); +private: + static bool _S_synced_with_stdio; +}; + + +static Init __ioinit(true); +static Init __ioinit2(false); + +// BEFORE: module {{.*}} { +// BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// BEFORE-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) +// BEFORE-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// BEFORE-NEXT: %1 = cir.const(#true) : !cir.bool +// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: } dtor { +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () +// BEFORE-NEXT: } {ast = #cir.var.decl.ast} +// BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22Init22 { +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// BEFORE-NEXT: %1 = cir.const(#false) : !cir.bool +// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: } dtor { +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () +// BEFORE-NEXT: } {ast = #cir.var.decl.ast} +// BEFORE-NEXT: } + + +// AFTER: module {{.*}} attributes {{.*}}cir.global_ctors = [#cir.global_ctor<"__cxx_global_var_init", 65536>, #cir.global_ctor<"__cxx_global_var_init.1", 65536>] +// AFTER-NEXT: cir.global "private" external @__dso_handle : i8 +// AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) +// AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) +// AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} +// AFTER-NEXT: cir.func internal private @__cxx_global_var_init() +// AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// AFTER-NEXT: %1 = cir.const(#true) : !cir.bool +// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: %2 = cir.get_global @_ZL8__ioinit : cir.ptr +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : cir.ptr )>> +// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> +// AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// AFTER-NEXT: %6 = cir.get_global @__dso_handle : cir.ptr +// AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () +// AFTER-NEXT: cir.return +// AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} +// AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() +// AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// AFTER-NEXT: %1 = cir.const(#false) : !cir.bool +// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: %2 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : cir.ptr )>> +// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> +// AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// AFTER-NEXT: %6 = cir.get_global @__dso_handle : cir.ptr +// AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () +// AFTER-NEXT: cir.return +// AFTER: cir.func private @_GLOBAL__sub_I_static.cpp() +// AFTER-NEXT: cir.call @__cxx_global_var_init() : () -> () +// AFTER-NEXT: cir.call @__cxx_global_var_init.1() : () -> () +// AFTER-NEXT: cir.return + +// LLVM: @__dso_handle = external global i8 +// LLVM: @_ZL8__ioinit = internal global %class.Init zeroinitializer +// LLVM: @_ZL9__ioinit2 = internal global %class.Init zeroinitializer +// LLVM: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }, { i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init.1, ptr null }] +// LLVM: define internal void @__cxx_global_var_init() +// LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL8__ioinit, i8 1) +// LLVM-NEXT: call void @__cxa_atexit(ptr @_ZN4InitD1Ev, ptr @_ZL8__ioinit, ptr @__dso_handle) +// LLVM-NEXT: ret void +// LLVM: define internal void @__cxx_global_var_init.1() +// LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL9__ioinit2, i8 0) +// LLVM-NEXT: call void @__cxa_atexit(ptr @_ZN4InitD1Ev, ptr @_ZL9__ioinit2, ptr @__dso_handle) +// LLVM-NEXT: ret void +// LLVM: define void @_GLOBAL__sub_I_static.cpp() +// LLVM-NEXT: call void @__cxx_global_var_init() +// LLVM-NEXT: call void @__cxx_global_var_init.1() +// LLVM-NEXT: ret void diff --git a/clang/test/CIR/CodeGen/std-array.cpp b/clang/test/CIR/CodeGen/std-array.cpp new file mode 100644 index 000000000000..ac4b119bdeb0 --- /dev/null +++ b/clang/test/CIR/CodeGen/std-array.cpp @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +void t() { + std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + (void)v.end(); +} + +// CHECK: ![[array:.*]] = !cir.struct" + +// CHECK: {{.*}} = cir.get_member +// CHECK: {{.*}} = cir.cast(array_to_ptrdecay +// CHECK: {{.*}} = cir.const(#cir.int<9> : !u32i) : !u32i + +// CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/std-find.cpp b/clang/test/CIR/CodeGen/std-find.cpp new file mode 100644 index 000000000000..3b043a6e3766 --- /dev/null +++ b/clang/test/CIR/CodeGen/std-find.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +// CHECK: ![[array:.*]] = !cir.struct" + +int test_find(unsigned char n = 3) +{ + // CHECK: cir.func @_Z9test_findh(%arg0: !u8i + unsigned num_found = 0; + std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + // CHECK: %[[array_addr:.*]] = cir.alloca ![[array]], cir.ptr , ["v"] + + auto f = std::find(v.begin(), v.end(), n); + // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE5beginEv(%[[array_addr]]) + // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv(%[[array_addr]]) + // CHECK: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( + + if (f != v.end()) + num_found++; + // CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv(%[[array_addr]] + // CHECK: %[[neq_cmp:.*]] = cir.cmp + // CHECK: cir.if %[[neq_cmp]] + + return num_found; +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/stmt-expr.c b/clang/test/CIR/CodeGen/stmt-expr.c new file mode 100644 index 000000000000..8029358887e0 --- /dev/null +++ b/clang/test/CIR/CodeGen/stmt-expr.c @@ -0,0 +1,42 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Yields void. +void test1() { ({ }); } +// CHECK: @test1 +// CHECK: cir.scope { +// CHECK-NOT: cir.yield +// CHECK: } + +// Yields an out-of-scope scalar. +void test2() { ({int x = 3; x; }); } +// CHECK: @test2 +// CHECK: %[[#RETVAL:]] = cir.alloca !s32i, cir.ptr +// CHECK: cir.scope { +// CHECK: %[[#VAR:]] = cir.alloca !s32i, cir.ptr , ["x", init] +// [...] +// CHECK: %[[#TMP:]] = cir.load %[[#VAR]] : cir.ptr , !s32i +// CHECK: cir.store %[[#TMP]], %[[#RETVAL]] : !s32i, cir.ptr +// CHECK: } +// CHECK: %{{.+}} = cir.load %[[#RETVAL]] : cir.ptr , !s32i + +// Yields an aggregate. +struct S { int x; }; +int test3() { return ({ struct S s = {1}; s; }).x; } +// CHECK: @test3 +// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: cir.scope { +// CHECK: %[[#VAR:]] = cir.alloca !ty_22S22, cir.ptr +// [...] +// CHECK: cir.copy %[[#VAR]] to %[[#RETVAL]] : !cir.ptr +// CHECK: } +// CHECK: %[[#RETADDR:]] = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: %{{.+}} = cir.load %[[#RETADDR]] : cir.ptr , !s32i + +// Expression is wrapped in an expression attribute (just ensure it does not crash). +void test4(int x) { ({[[gsl::suppress("foo")]] x;}); } +// CHECK: @test4 + +// TODO(cir): Missing label support. +// // Expression is wrapped in a label. +// // void test5(int x) { x = ({ label: x; }); } diff --git a/clang/test/CIR/CodeGen/stmt-expr.cpp b/clang/test/CIR/CodeGen/stmt-expr.cpp new file mode 100644 index 000000000000..d9d619f70a92 --- /dev/null +++ b/clang/test/CIR/CodeGen/stmt-expr.cpp @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class A { +public: + A(): x(0) {} + A(A &a) : x(a.x) {} + // TODO(cir): Ensure dtors are properly called. The dtor below crashes. + // ~A() {} + int x; + void Foo() {} +}; + +void test1() { + ({ + A a; + a; + }).Foo(); +} +// CHECK: @_Z5test1v +// CHECK: cir.scope { +// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22A22, cir.ptr +// CHECK: cir.scope { +// CHECK: %[[#VAR:]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: cir.call @_ZN1AC1Ev(%[[#VAR]]) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1AC1ERS_(%[[#RETVAL]], %[[#VAR]]) : (!cir.ptr, !cir.ptr) -> () +// TODO(cir): the local VAR should be destroyed here. +// CHECK: } +// CHECK: cir.call @_ZN1A3FooEv(%[[#RETVAL]]) : (!cir.ptr) -> () +// TODO(cir): the temporary RETVAL should be destroyed here. +// CHECK: } diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c new file mode 100644 index 000000000000..14e8d8a37fdb --- /dev/null +++ b/clang/test/CIR/CodeGen/store.c @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo(void) { + int a = 0; + a = 1; +} + +// CHECK: cir.func @foo() +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: cir.store %2, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c new file mode 100644 index 000000000000..aacb36474315 --- /dev/null +++ b/clang/test/CIR/CodeGen/struct.c @@ -0,0 +1,101 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Bar { + int a; + char b; +} bar; + +struct Foo { + int a; + char b; + struct Bar z; +}; + +// Recursive type +typedef struct Node { + struct Node* next; +} NodeStru; + +void baz(void) { + struct Bar b; + struct Foo f; +} + +// CHECK-DAG: !ty_22Node22 = !cir.struct>} #cir.record.decl.ast> +// CHECK-DAG: !ty_22Bar22 = !cir.struct, !cir.int}> +// CHECK-DAG: !ty_22Foo22 = !cir.struct, !cir.int, !cir.struct, !cir.int}>}> +// CHECK-DAG: module {{.*}} { + // CHECK: cir.func @baz() +// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, cir.ptr , ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +void shouldConstInitStructs(void) { +// CHECK: cir.func @shouldConstInitStructs + struct Foo f = {1, 2, {3, 4}}; + // CHECK: %[[#V0:]] = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} + // CHECK: %[[#V1:]] = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_22Bar22}> : !ty_22Foo22) : !ty_22Foo22 + // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22Foo22, cir.ptr +} + +// Should zero-initialize uninitialized global structs. +struct S { + int a,b; +} s; +// CHECK-DAG: cir.global external @s = #cir.zero : !ty_22S22 + +// Should initialize basic global structs. +struct S1 { + int a; + float f; + int *p; +} s1 = {1, .1, 0}; +// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 + +// Should initialize global nested structs. +struct S2 { + struct S2A { + int a; + } s2a; +} s2 = {{1}}; +// CHECK-DAG: cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 + +// Should initialize global arrays of structs. +struct S3 { + int a; +} s3[3] = {{1}, {2}, {3}}; +// CHECK-DAG: cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array + +void shouldCopyStructAsCallArg(struct S1 s) { +// CHECK-DAG: cir.func @shouldCopyStructAsCallArg + shouldCopyStructAsCallArg(s); + // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : cir.ptr , !ty_22S122 + // CHECK-DAG: cir.call @shouldCopyStructAsCallArg(%[[#LV]]) : (!ty_22S122) -> () +} + +struct Bar shouldGenerateAndAccessStructArrays(void) { + struct Bar s[1] = {{3, 4}}; + return s[0]; +} +// CHECK-DAG: cir.func @shouldGenerateAndAccessStructArrays +// CHECK-DAG: %[[#STRIDE:]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-DAG: %[[#DARR:]] = cir.cast(array_to_ptrdecay, %{{.+}} : !cir.ptr>), !cir.ptr +// CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr +// CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr + +// CHECK-DAG: cir.func @local_decl +// CHECK-DAG: {{%.}} = cir.alloca !ty_22Local22, cir.ptr , ["a"] +void local_decl(void) { + struct Local { + int i; + }; + struct Local a; +} + +// CHECK-DAG: cir.func @useRecursiveType +// CHECK-DAG: cir.get_member {{%.}}[0] {name = "next"} : !cir.ptr -> !cir.ptr> +void useRecursiveType(NodeStru* a) { + a->next = 0; +} diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp new file mode 100644 index 000000000000..c98622b00081 --- /dev/null +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -0,0 +1,167 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Bar { + int a; + char b; + void method() {} + void method2(int a) {} + int method3(int a) { return a; } +}; + +struct Foo { + int a; + char b; + Bar z; +}; + +void baz() { + Bar b; + b.method(); + b.method2(4); + int result = b.method3(4); + Foo f; +} + +struct incomplete; +void yoyo(incomplete *i) {} + +// CHECK-DAG: !ty_22incomplete22 = !cir.struct, !cir.int}> + +// CHECK-DAG: !ty_22Foo22 = !cir.struct, !cir.int, !cir.struct, !cir.int}>}> +// CHECK-DAG: !ty_22Mandalore22 = !cir.struct, !cir.ptr, !cir.int} #cir.record.decl.ast> +// CHECK-DAG: !ty_22Adv22 = !cir.struct, !cir.ptr, !cir.int} #cir.record.decl.ast>}> +// CHECK-DAG: !ty_22Entry22 = !cir.struct (!cir.int, !cir.ptr>, !cir.ptr)>>}> + +// CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr +// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %5 +// CHECK-NEXT: } + +// CHECK: cir.func @_Z3bazv() +// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, cir.ptr , ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["result", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %3 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, !s32i) -> () +// CHECK-NEXT: %4 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, !s32i) -> !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +typedef enum Ways { + ThisIsTheWay = 1000024001, +} Ways; + +typedef struct Mandalore { + Ways w; + const void* n; + int d; +} Mandalore; + +class Adv { + Mandalore x{ThisIsTheWay}; +public: + Adv() {} +}; + +void m() { Adv C; } + +// CHECK: cir.func linkonce_odr @_ZN3AdvC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: %3 = cir.get_member %2[0] {name = "w"} : !cir.ptr -> !cir.ptr +// CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i +// CHECK: cir.store %4, %3 : !u32i, cir.ptr +// CHECK: %5 = cir.get_member %2[1] {name = "n"} : !cir.ptr -> !cir.ptr> +// CHECK: %6 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: %7 = cir.get_member %2[2] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.store %8, %7 : !s32i, cir.ptr +// CHECK: cir.return +// CHECK: } + +struct A { + int a; +}; + +// Should globally const-initialize struct members. +struct A simpleConstInit = {1}; +// CHECK: cir.global external @simpleConstInit = #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22A22 + +// Should globally const-initialize arrays with struct members. +struct A arrConstInit[1] = {{1}}; +// CHECK: cir.global external @arrConstInit = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22A22]> : !cir.array + +// Should locally copy struct members. +void shouldLocallyCopyStructAssignments(void) { + struct A a = { 3 }; + // CHECK: %[[#SA:]] = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 4 : i64} + struct A b = a; + // CHECK: %[[#SB:]] = cir.alloca !ty_22A22, cir.ptr , ["b", init] {alignment = 4 : i64} + // cir.copy %[[#SA]] to %[[SB]] : !cir.ptr +} + +A get_default() { return A{2}; } + +struct S { + S(A a = get_default()); +}; + +void h() { S s; } + +// CHECK: cir.func @_Z1hv() +// CHECK: %0 = cir.alloca !ty_22S22, cir.ptr , ["s", init] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_22A22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %2 = cir.call @_Z11get_defaultv() : () -> !ty_22A22 +// CHECK: cir.store %2, %1 : !ty_22A22, cir.ptr +// CHECK: %3 = cir.load %1 : cir.ptr , !ty_22A22 +// CHECK: cir.call @_ZN1SC1E1A(%0, %3) : (!cir.ptr, !ty_22A22) -> () +// CHECK: cir.return +// CHECK: } + +typedef enum enumy { + A = 1 +} enumy; + +typedef enumy (*fnPtr)(int instance, const char* name, void* function); + +struct Entry { + fnPtr procAddr = nullptr; +}; + +void ppp() { Entry x; } + +// CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr + +// CHECK: cir.get_member %1[0] {name = "procAddr"} : !cir.ptr -> !cir.ptr, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/switch.cir b/clang/test/CIR/CodeGen/switch.cir new file mode 100644 index 000000000000..da99dffa1fa8 --- /dev/null +++ b/clang/test/CIR/CodeGen/switch.cir @@ -0,0 +1,207 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s8i = !cir.int +!s32i = !cir.int +!s64i = !cir.int + +module { + cir.func @shouldFlatSwitchWithDefault(%arg0: !s8i) { + cir.switch (%arg0 : !s8i) [ + case (equal, 1) { + cir.break + }, + case (default) { + cir.break + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithDefault(%arg0: !s8i) { +// CHECK: cir.switch.flat %arg0 : !s8i, ^bb[[#DEFAULT:]] [ +// CHECK: 1: ^bb[[#CASE1:]] +// CHECK: ] +// CHECK: ^bb[[#CASE1]]: +// CHECK: cir.br ^bb3 +// CHECK: ^bb[[#DEFAULT]]: +// CHECK: cir.br ^bb[[#EXIT:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + cir.func @shouldFlatSwitchWithoutDefault(%arg0: !s32i) { + cir.switch (%arg0 : !s32i) [ + case (equal, 1) { + cir.break + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithoutDefault(%arg0: !s32i) { +// CHECK: cir.switch.flat %arg0 : !s32i, ^bb[[#EXIT:]] [ +// CHECK: 1: ^bb[[#CASE1:]] +// CHECK: ] +// CHECK: ^bb[[#CASE1]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + + cir.func @shouldFlatSwitchWithImplicitFallthrough(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + case (anyof, [1, 2] : !s64i) { + cir.break + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithImplicitFallthrough(%arg0: !s64i) { +// CHECK: cir.switch.flat %arg0 : !s64i, ^bb[[#EXIT:]] [ +// CHECK: 1: ^bb[[#CASE1N2:]], +// CHECK: 2: ^bb[[#CASE1N2]] +// CHECK: ] +// CHECK: ^bb[[#CASE1N2]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + + + cir.func @shouldFlatSwitchWithExplicitFallthrough(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + case (equal, 1 : !s64i) { // case 1 has its own region + cir.yield // fallthrough to case 2 + }, + case (equal, 2 : !s64i) { + cir.break + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithExplicitFallthrough(%arg0: !s64i) { +// CHECK: cir.switch.flat %arg0 : !s64i, ^bb[[#EXIT:]] [ +// CHECK: 1: ^bb[[#CASE1:]], +// CHECK: 2: ^bb[[#CASE2:]] +// CHECK: ] +// CHECK: ^bb[[#CASE1]]: +// CHECK: cir.br ^bb[[#CASE2]] +// CHECK: ^bb[[#CASE2]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + cir.func @shouldFlatSwitchWithFallthroughToExit(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + case (equal, 1 : !s64i) { + cir.yield // fallthrough to exit + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithFallthroughToExit(%arg0: !s64i) { +// CHECK: cir.switch.flat %arg0 : !s64i, ^bb[[#EXIT:]] [ +// CHECK: 1: ^bb[[#CASE1:]] +// CHECK: ] +// CHECK: ^bb[[#CASE1]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + cir.func @shouldDropEmptySwitch(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + ] + // CHECK-NOT: llvm.switch + cir.return + } +// CHECK: cir.func @shouldDropEmptySwitch(%arg0: !s64i) +// CHECK-NOT: cir.switch.flat + + + cir.func @shouldFlatMultiBlockCase(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.scope { + %1 = cir.load %0 : cir.ptr , !s32i + cir.switch (%1 : !s32i) [ + case (equal, 3) { + cir.return + ^bb1: // no predecessors + cir.break + } + ] + } + cir.return + } + +// CHECK: cir.func @shouldFlatMultiBlockCase(%arg0: !s32i) { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK: cir.switch.flat %1 : !s32i, ^bb4 [ +// CHECK: 3: ^bb2 +// CHECK: ] +// CHECK: ^bb2: // pred: ^bb1 +// CHECK: cir.return +// CHECK: ^bb3: // no predecessors +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 +// CHECK: cir.br ^bb5 +// CHECK: ^bb5: // pred: ^bb4 +// CHECK: cir.return +// CHECK: } + + + cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + cir.scope { + %5 = cir.load %0 : cir.ptr , !s32i + cir.switch (%5 : !s32i) [ + case (equal, 0) { + cir.scope { + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.const(#cir.int<0> : !s32i) : !s32i + %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i + %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool + cir.if %9 { + cir.break + } + } + cir.break + } + ] + } + %3 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + %4 = cir.load %2 : cir.ptr , !s32i + cir.return %4 : !s32i + } +// CHECK: cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { +// CHECK: cir.switch.flat %3 : !s32i, ^bb7 [ +// CHECK: 0: ^bb2 +// CHECK: ] +// CHECK: ^bb2: // pred: ^bb1 +// CHECK: cir.br ^bb3 +// CHECK: ^bb3: // pred: ^bb2 +// CHECK: cir.brcond {{%.*}} ^bb4, ^bb5 +// CHECK: ^bb4: // pred: ^bb3 +// CHECK: cir.br ^bb7 +// CHECK: ^bb5: // pred: ^bb3 +// CHECK: cir.br ^bb6 +// CHECK: ^bb6: // pred: ^bb5 +// CHECK: cir.br ^bb7 +// CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 +// CHECK: cir.br ^bb8 +// CHECK: ^bb8: // pred: ^bb7 +// CHECK: cir.return %9 : !s32i +// CHECK: } + +} diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp new file mode 100644 index 000000000000..b378c7364475 --- /dev/null +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -0,0 +1,329 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void sw1(int a) { + switch (int b = 1; a) { + case 0: + b = b + 1; + break; + case 1: + break; + case 2: { + b = b + 1; + int yolo = 100; + break; + } + } +} +// CHECK: cir.func @_Z3sw1i +// CHECK: cir.switch (%3 : !s32i) [ +// CHECK-NEXT: case (equal, 0) { +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: cir.store %6, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.break +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %4 = cir.alloca !s32i, cir.ptr , ["yolo", init] +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i +// CHECK-NEXT: cir.store %7, %1 : !s32i, cir.ptr +// CHECK-NEXT: %8 = cir.const(#cir.int<100> : !s32i) : !s32i +// CHECK-NEXT: cir.store %8, %4 : !s32i, cir.ptr +// CHECK-NEXT: cir.break +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } + +void sw2(int a) { + switch (int yolo = 2; a) { + case 3: + // "fomo" has the same lifetime as "yolo" + int fomo = 0; + yolo = yolo + fomo; + break; + } +} + +// CHECK: cir.func @_Z3sw2i +// CHECK: cir.scope { +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["yolo", init] +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["fomo", init] +// CHECK: cir.switch (%4 : !s32i) [ +// CHECK-NEXT: case (equal, 3) { +// CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %2 : !s32i, cir.ptr + +void sw3(int a) { + switch (a) { + default: + break; + } +} + +// CHECK: cir.func @_Z3sw3i +// CHECK: cir.scope { +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.switch (%1 : !s32i) [ +// CHECK-NEXT: case (default) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: } +// CHECK-NEXT: ] + +int sw4(int a) { + switch (a) { + case 42: { + return 3; + } + default: + return 2; + } + return 0; +} + +// CHECK: cir.func @_Z3sw4i +// CHECK: cir.switch (%4 : !s32i) [ +// CHECK-NEXT: case (equal, 42) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %6 : !s32i +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, +// CHECK-NEXT: case (default) { +// CHECK-NEXT: %5 = cir.const(#cir.int<2> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %6 : !s32i +// CHECK-NEXT: } +// CHECK-NEXT: ] + +void sw5(int a) { + switch (a) { + case 1:; + } +} + +// CHECK: cir.func @_Z3sw5i +// CHECK: cir.switch (%1 : !s32i) [ +// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: cir.yield + +void sw6(int a) { + switch (a) { + case 0: + case 1: + case 2: + break; + case 3: + case 4: + case 5: + break; + } +} + +// CHECK: cir.func @_Z3sw6i +// CHECK: cir.switch (%1 : !s32i) [ +// CHECK-NEXT: case (anyof, [0, 1, 2] : !s32i) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: }, +// CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: } + +void sw7(int a) { + switch (a) { + case 0: + case 1: + case 2: + int x; + case 3: + case 4: + case 5: + break; + } +} + +// CHECK: cir.func @_Z3sw7i +// CHECK: case (anyof, [0, 1, 2] : !s32i) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, +// CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: } + +void sw8(int a) { + switch (a) + { + case 3: + break; + case 4: + default: + break; + } +} + +//CHECK: cir.func @_Z3sw8i +//CHECK: case (equal, 3) +//CHECK-NEXT: cir.break +//CHECK-NEXT: }, +//CHECK-NEXT: case (equal, 4) { +//CHECK-NEXT: cir.yield +//CHECK-NEXT: } +//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.break +//CHECK-NEXT: } + +void sw9(int a) { + switch (a) + { + case 3: + break; + default: + case 4: + break; + } +} + +//CHECK: cir.func @_Z3sw9i +//CHECK: case (equal, 3) { +//CHECK-NEXT: cir.break +//CHECK-NEXT: } +//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.yield +//CHECK-NEXT: } +//CHECK: case (equal, 4) +//CHECK-NEXT: cir.break +//CHECK-NEXT: } + +void sw10(int a) { + switch (a) + { + case 3: + break; + case 4: + default: + case 5: + break; + } +} + +//CHECK: cir.func @_Z4sw10i +//CHECK: case (equal, 3) +//CHECK-NEXT: cir.break +//CHECK-NEXT: }, +//CHECK-NEXT: case (equal, 4) { +//CHECK-NEXT: cir.yield +//CHECK-NEXT: } +//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.yield +//CHECK-NEXT: } +//CHECK-NEXT: case (equal, 5) { +//CHECK-NEXT: cir.break +//CHECK-NEXT: } + +void sw11(int a) { + switch (a) + { + case 3: + break; + case 4: + case 5: + default: + case 6: + case 7: + break; + } +} + +//CHECK: cir.func @_Z4sw11i +//CHECK: case (equal, 3) +//CHECK-NEXT: cir.break +//CHECK-NEXT: }, +//CHECK-NEXT: case (anyof, [4, 5] : !s32i) { +//CHECK-NEXT: cir.yield +//CHECK-NEXT: } +//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.yield +//CHECK-NEXT: } +//CHECK-NEXT: case (anyof, [6, 7] : !s32i) { +//CHECK-NEXT: cir.break +//CHECK-NEXT: } + +void sw12(int a) { + switch (a) + { + case 3: + return; + break; + } +} + +// CHECK: cir.func @_Z4sw12i +// CHECK: cir.scope { +// CHECK: cir.switch +// CHECK-NEXT: case (equal, 3) { +// CHECK-NEXT: cir.return +// CHECK-NEXT: ^bb1: // no predecessors +// CHECK-NEXT: cir.break +// CHECK-NEXT: } + +void sw13(int a, int b) { + switch (a) { + case 1: + switch (b) { + case 2: + break; + } + } +} + +// CHECK: cir.func @_Z4sw13ii +// CHECK: cir.scope { +// CHECK: cir.switch +// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: cir.scope { +// CHECK: cir.switch +// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK: } +// CHECK: cir.return + +void fallthrough(int x) { + switch (x) { + case 1: + __attribute__((fallthrough)); + case 2: + break; + default: + break; + } +} + +// CHECK: cir.func @_Z11fallthroughi +// CHECK: cir.scope { +// CHECK: cir.switch (%1 : !s32i) [ +// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: }, +// CHECK-NEXT: case (default) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/ternary.cir b/clang/test/CIR/CodeGen/ternary.cir new file mode 100644 index 000000000000..1589fee6f6be --- /dev/null +++ b/clang/test/CIR/CodeGen/ternary.cir @@ -0,0 +1,47 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s32i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool + %5 = cir.ternary(%4, true { + %7 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.yield %7 : !s32i + }, false { + %7 = cir.const(#cir.int<5> : !s32i) : !s32i + cir.yield %7 : !s32i + }) : (!cir.bool) -> !s32i + cir.store %5, %1 : !s32i, cir.ptr + %6 = cir.load %1 : cir.ptr , !s32i + cir.return %6 : !s32i + } + +// CHECK: cir.func @foo(%arg0: !s32i) -> !s32i { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool +// CHECK: cir.brcond %4 ^bb1, ^bb2 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.br ^bb3(%5 : !s32i) +// CHECK: ^bb2: // pred: ^bb0 +// CHECK: %6 = cir.const(#cir.int<5> : !s32i) : !s32i +// CHECK: cir.br ^bb3(%6 : !s32i) +// CHECK: ^bb3(%7: !s32i): // 2 preds: ^bb1, ^bb2 +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // pred: ^bb3 +// CHECK: cir.store %7, %1 : !s32i, cir.ptr +// CHECK: %8 = cir.load %1 : cir.ptr , !s32i +// CHECK: cir.return %8 : !s32i +// CHECK: } + +} diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp new file mode 100644 index 000000000000..5ce164624409 --- /dev/null +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -0,0 +1,76 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int x(int y) { + return y > 0 ? 3 : 5; +} + +// CHECK: cir.func @_Z1xi +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool +// CHECK: %5 = cir.ternary(%4, true { +// CHECK: %7 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.yield %7 : !s32i +// CHECK: }, false { +// CHECK: %7 = cir.const(#cir.int<5> : !s32i) : !s32i +// CHECK: cir.yield %7 : !s32i +// CHECK: }) : (!cir.bool) -> !s32i +// CHECK: cir.store %5, %1 : !s32i, cir.ptr +// CHECK: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK: cir.return %6 : !s32i +// CHECK: } + +typedef enum { + API_A, + API_EnumSize = 0x7fffffff +} APIType; + +void oba(const char *); + +void m(APIType api) { + ((api == API_A) ? (static_cast(0)) : oba("yo.cpp")); +} + +// CHECK: cir.func @_Z1m7APIType +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["api", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr +// CHECK: %1 = cir.load %0 : cir.ptr , !u32i +// CHECK: %2 = cir.cast(integral, %1 : !u32i), !s32i +// CHECK: %3 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: %4 = cir.cast(integral, %3 : !u32i), !s32i +// CHECK: %5 = cir.cmp(eq, %2, %4) : !s32i, !cir.bool +// CHECK: cir.ternary(%5, true { +// CHECK: %6 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.yield +// CHECK: }, false { +// CHECK: %6 = cir.get_global @".str" : cir.ptr > +// CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_Z3obaPKc(%7) : (!cir.ptr) -> () +// CHECK: cir.yield +// CHECK: }) : (!cir.bool) -> () +// CHECK: cir.return +// CHECK: } + +int foo(int a, int b) { + if (a < b ? 0 : a) + return -1; + return 0; +} + +// CHECK: cir.func @_Z3fooii +// CHECK: [[A0:%.*]] = cir.load {{.*}} : cir.ptr , !s32i +// CHECK: [[B0:%.*]] = cir.load {{.*}} : cir.ptr , !s32i +// CHECK: [[CMP:%.*]] = cir.cmp(lt, [[A0]], [[B0]]) : !s32i, !cir.bool +// CHECK: [[RES:%.*]] = cir.ternary([[CMP]], true { +// CHECK: [[ZERO:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.yield [[ZERO]] : !s32i +// CHECK: }, false { +// CHECK: [[A1:%.*]] = cir.load {{.*}} : cir.ptr , !s32i +// CHECK: cir.yield [[A1]] : !s32i +// CHECK: }) : (!cir.bool) -> !s32i +// CHECK: [[RES_CAST:%.*]] = cir.cast(int_to_bool, [[RES]] : !s32i), !cir.bool +// CHECK: cir.if [[RES_CAST]] diff --git a/clang/test/CIR/CodeGen/three-way-comparison.cpp b/clang/test/CIR/CodeGen/three-way-comparison.cpp new file mode 100644 index 000000000000..f501ad1be9d8 --- /dev/null +++ b/clang/test/CIR/CodeGen/three-way-comparison.cpp @@ -0,0 +1,92 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -DNON_CANONICAL_CMP_RESULTS -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=NONCANONICAL-BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -DNON_CANONICAL_CMP_RESULTS -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=NONCANONICAL-AFTER + +#include "Inputs/std-compare.h" + +// BEFORE: #cmp3way_info_partial_ltn1eq0gt1unn127_ = #cir.cmp3way_info +// BEFORE: #cmp3way_info_strong_ltn1eq0gt1_ = #cir.cmp3way_info +// BEFORE: !ty_22std3A3A__13A3Apartial_ordering22 = !cir.struct} +// BEFORE: !ty_22std3A3A__13A3Astrong_ordering22 = !cir.struct} + +auto three_way_strong(int x, int y) { + return x <=> y; +} + +// BEFORE: cir.func @_Z16three_way_strongii +// BEFORE: %{{.+}} = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_ltn1eq0gt1_) : !s8i +// BEFORE: } + +// AFTER: cir.func @_Z16three_way_strongii +// AFTER: %{{.+}} = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_ltn1eq0gt1_) : !s8i +// AFTER: } + +// NONCANONICAL-BEFORE: #cmp3way_info_strong_lt1eq2gt3_ = #cir.cmp3way_info +// NONCANONICAL-BEFORE: cir.func @_Z16three_way_strongii +// NONCANONICAL-BEFORE: %{{.+}} = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_lt1eq2gt3_) : !s8i +// NONCANONICAL-BEFORE: } + +// NONCANONICAL-AFTER: #cmp3way_info_strong_ltn1eq0gt1_ = #cir.cmp3way_info +// NONCANONICAL-AFTER: cir.func @_Z16three_way_strongii +// NONCANONICAL-AFTER: %[[#CMP3WAY_RESULT:]] = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_ltn1eq0gt1_) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#NEGONE:]] = cir.const(#cir.int<-1> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ONE:]] = cir.const(#cir.int<1> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_NEGONE:]] = cir.cmp(eq, %[[#CMP3WAY_RESULT]], %[[#NEGONE]]) : !s8i, !cir.bool +// NONCANONICAL-AFTER-NEXT: %[[#A:]] = cir.ternary(%[[#CMP_TO_NEGONE]], true { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#ONE]] : !s8i +// NONCANONICAL-AFTER-NEXT: }, false { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#CMP3WAY_RESULT]] : !s8i +// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ZERO:]] = cir.const(#cir.int<0> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#TWO:]] = cir.const(#cir.int<2> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_ZERO:]] = cir.cmp(eq, %[[#A]], %[[#ZERO]]) : !s8i, !cir.bool +// NONCANONICAL-AFTER-NEXT: %[[#B:]] = cir.ternary(%[[#CMP_TO_ZERO]], true { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#TWO]] : !s8i +// NONCANONICAL-AFTER-NEXT: }, false { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#A]] : !s8i +// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ONE2:]] = cir.const(#cir.int<1> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#THREE:]] = cir.const(#cir.int<3> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_ONE:]] = cir.cmp(eq, %[[#B]], %[[#ONE2]]) : !s8i, !cir.bool +// NONCANONICAL-AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_TO_ONE]], true { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#THREE]] : !s8i +// NONCANONICAL-AFTER-NEXT: }, false { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#B]] : !s8i +// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER: } + +auto three_way_weak(float x, float y) { + return x <=> y; +} + +// BEFORE: cir.func @_Z14three_way_weakff +// BEFORE: %{{.+}} = cir.cmp3way(%{{.+}} : !cir.float, %{{.+}}, #cmp3way_info_partial_ltn1eq0gt1unn127_) : !s8i +// BEFORE: } + +// AFTER: cir.func @_Z14three_way_weakff +// AFTER: %[[#LHS:]] = cir.load %0 : cir.ptr , !cir.float +// AFTER-NEXT: %[[#RHS:]] = cir.load %1 : cir.ptr , !cir.float +// AFTER-NEXT: %[[#LT:]] = cir.const(#cir.int<-1> : !s8i) : !s8i +// AFTER-NEXT: %[[#EQ:]] = cir.const(#cir.int<0> : !s8i) : !s8i +// AFTER-NEXT: %[[#GT:]] = cir.const(#cir.int<1> : !s8i) : !s8i +// AFTER-NEXT: %[[#UNORDERED:]] = cir.const(#cir.int<-127> : !s8i) : !s8i +// AFTER-NEXT: %[[#CMP_LT:]] = cir.cmp(lt, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool +// AFTER-NEXT: %[[#CMP_EQ:]] = cir.cmp(eq, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool +// AFTER-NEXT: %[[#CMP_GT:]] = cir.cmp(gt, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool +// AFTER-NEXT: %[[#CMP_EQ_RES:]] = cir.ternary(%[[#CMP_EQ]], true { +// AFTER-NEXT: cir.yield %[[#EQ]] : !s8i +// AFTER-NEXT: }, false { +// AFTER-NEXT: cir.yield %[[#UNORDERED]] : !s8i +// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER-NEXT: %[[#CMP_GT_RES:]] = cir.ternary(%[[#CMP_GT]], true { +// AFTER-NEXT: cir.yield %[[#GT]] : !s8i +// AFTER-NEXT: }, false { +// AFTER-NEXT: cir.yield %[[#CMP_EQ_RES]] : !s8i +// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_LT]], true { +// AFTER-NEXT: cir.yield %[[#LT]] : !s8i +// AFTER-NEXT: }, false { +// AFTER-NEXT: cir.yield %[[#CMP_GT_RES]] : !s8i +// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER: } diff --git a/clang/test/CIR/CodeGen/throw.cpp b/clang/test/CIR/CodeGen/throw.cpp new file mode 100644 index 000000000000..9c390ebb8136 --- /dev/null +++ b/clang/test/CIR/CodeGen/throw.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +double d(int a, int b) { + if (b == 0) + throw "Division by zero condition!"; + return (a/b); +} + +// CHECK: cir.if %10 { +// CHECK-NEXT: %11 = cir.alloc_exception(!cir.ptr) -> > +// CHECK-NEXT: %12 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: cir.store %13, %11 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.throw(%11 : !cir.ptr>, @_ZTIPKc) +// CHECK-NEXT: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/tls.c b/clang/test/CIR/CodeGen/tls.c new file mode 100644 index 000000000000..2a3ebda00744 --- /dev/null +++ b/clang/test/CIR/CodeGen/tls.c @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +extern __thread int b; +int c(void) { return *&b; } +// CIR: cir.global "private" external tls_dyn @b : !s32i +// CIR: cir.func @c() -> !s32i +// CIR: %[[TLS_ADDR:.*]] = cir.get_global thread_local @b : cir.ptr + +__thread int a; +// CIR: cir.global external tls_dyn @a = #cir.int<0> : !s32i + +// LLVM: @b = external thread_local global i32 +// LLVM: @a = thread_local global i32 0 + +// LLVM-LABEL: @c +// LLVM: = call ptr @llvm.threadlocal.address.p0(ptr @b) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/trap.cpp b/clang/test/CIR/CodeGen/trap.cpp new file mode 100644 index 000000000000..2d1089421876 --- /dev/null +++ b/clang/test/CIR/CodeGen/trap.cpp @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo(); + +void basic() { + foo(); + __builtin_trap(); +} + +// CHECK: cir.func @_Z5basicv() +// CHECK-NEXT: cir.call @_Z3foov() : () -> () +// CHECK-NEXT: cir.trap +// CHECK-NEXT: } + +void code_after_unreachable() { + foo(); + __builtin_trap(); + foo(); +} + +// CHECK: cir.func @_Z22code_after_unreachablev() +// CHECK-NEXT: cir.call @_Z3foov() : () -> () +// CHECK-NEXT: cir.trap +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.call @_Z3foov() : () -> () +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp new file mode 100644 index 000000000000..47a29ebc90df --- /dev/null +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -0,0 +1,87 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +double division(int a, int b); + +// CHECK: cir.func @_Z2tcv() +unsigned long long tc() { + int x = 50, y = 3; + unsigned long long z; + + try { + // CHECK: cir.scope { + // CHECK: %[[msg:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["msg"] + // CHECK: %[[idx:.*]] = cir.alloca !s32i, cir.ptr , ["idx"] + + // CHECK: %[[try_eh:.*]] = cir.try { + // CHECK: %[[eh_info:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["__exception_ptr"] + // CHECK: %[[local_a:.*]] = cir.alloca !s32i, cir.ptr , ["a", init] + int a = 4; + z = division(x, y); + // CHECK: %[[div_res:.*]] = cir.try_call exception(%[[eh_info]]) @_Z8divisionii({{.*}}) : (!cir.ptr>, !s32i, !s32i) -> !cir.double + a++; + + // CHECK: cir.catch(%[[try_eh]] : !cir.ptr, [ + } catch (int idx) { + // CHECK: type (#cir.global_view<@_ZTIi> : !cir.ptr) + // CHECK: { + // CHECK: %[[catch_idx_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr + // CHECK: %[[idx_load:.*]] = cir.load %[[catch_idx_addr]] : cir.ptr , !s32i + // CHECK: cir.store %[[idx_load]], %[[idx]] : !s32i, cir.ptr + z = 98; + idx++; + } catch (const char* msg) { + // CHECK: type (#cir.global_view<@_ZTIPKc> : !cir.ptr) + // CHECK: { + // CHECK: %[[msg_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr + // CHECK: cir.store %[[msg_addr]], %[[msg]] : !cir.ptr, cir.ptr > + z = 99; + (void)msg[0]; + } // CHECK: #cir.unwind + // CHECK: cir.resume + // CHECK-NEXT: }]) + + return z; +} + +// CHECK: cir.func @_Z3tc2v +unsigned long long tc2() { + int x = 50, y = 3; + unsigned long long z; + + try { + int a = 4; + z = division(x, y); + a++; + } catch (int idx) { + z = 98; + idx++; + } catch (const char* msg) { + z = 99; + (void)msg[0]; + } catch (...) { + // CHECK: type (#cir.all) + // CHECK: cir.catch_param + // CHECK: cir.const(#cir.int<100> : !s32i) : !s32i + z = 100; + } + + return z; +} + +// CHECK: cir.func @_Z3tc3v +unsigned long long tc3() { + int x = 50, y = 3; + unsigned long long z; + + try { + z = division(x, y); + } catch (...) { + // CHECK: type (#cir.all) + // CHECK: cir.catch_param + // CHECK: cir.const(#cir.int<100> : !s32i) : !s32i + z = 100; + } + + return z; +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/typedef.c b/clang/test/CIR/CodeGen/typedef.c new file mode 100644 index 000000000000..aa55270ce13a --- /dev/null +++ b/clang/test/CIR/CodeGen/typedef.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +void local_typedef() { + typedef struct {int a;} Struct; + Struct s; +} + +//CHECK: cir.func no_proto @local_typedef() +//CHECK: {{.*}} = cir.alloca !ty_22Struct22, cir.ptr , ["s"] {alignment = 4 : i64} +//CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/types-nullptr.cpp b/clang/test/CIR/CodeGen/types-nullptr.cpp new file mode 100644 index 000000000000..e84c386417a7 --- /dev/null +++ b/clang/test/CIR/CodeGen/types-nullptr.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef decltype(nullptr) nullptr_t; +void f() { nullptr_t t = nullptr; } + +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr > +// CHECK: %1 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c new file mode 100644 index 000000000000..18db058b67e5 --- /dev/null +++ b/clang/test/CIR/CodeGen/types.c @@ -0,0 +1,46 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cpp.cir +// RUN: FileCheck --input-file=%t.cpp.cir --check-prefix=CHECK-CPP %s + +int t0(int i) { return i; } +unsigned int t1(unsigned int i) { return i; } + +char t2(char i) { return i; } +unsigned char t3(unsigned char i) { return i; } + +short t4(short i) { return i; } +unsigned short t5(unsigned short i) { return i; } + +float t6(float i) { return i; } +double t7(double i) { return i; } +long double t10(long double i) { return i; } + +void t8(void) {} + +#ifdef __cplusplus +bool t9(bool b) { return b; } +#endif + +// CHECK: cir.func @t0(%arg0: !s32i loc({{.*}})) -> !s32i +// CHECK: cir.func @t1(%arg0: !u32i loc({{.*}})) -> !u32i +// CHECK: cir.func @t2(%arg0: !s8i loc({{.*}})) -> !s8i +// CHECK: cir.func @t3(%arg0: !u8i loc({{.*}})) -> !u8i +// CHECK: cir.func @t4(%arg0: !s16i loc({{.*}})) -> !s16i +// CHECK: cir.func @t5(%arg0: !u16i loc({{.*}})) -> !u16i +// CHECK: cir.func @t6(%arg0: !cir.float loc({{.*}})) -> !cir.float +// CHECK: cir.func @t7(%arg0: !cir.double loc({{.*}})) -> !cir.double +// CHECK: cir.func @t10(%arg0: !cir.long_double loc({{.*}})) -> !cir.long_double +// CHECK: cir.func @t8() + +// CHECK-CPP: cir.func @_Z2t0i(%arg0: !s32i loc({{.*}})) -> !s32i +// CHECK-CPP: cir.func @_Z2t1j(%arg0: !u32i loc({{.*}})) -> !u32i +// CHECK-CPP: cir.func @_Z2t2c(%arg0: !s8i loc({{.*}})) -> !s8i +// CHECK-CPP: cir.func @_Z2t3h(%arg0: !u8i loc({{.*}})) -> !u8i +// CHECK-CPP: cir.func @_Z2t4s(%arg0: !s16i loc({{.*}})) -> !s16i +// CHECK-CPP: cir.func @_Z2t5t(%arg0: !u16i loc({{.*}})) -> !u16i +// CHECK-CPP: cir.func @_Z2t6f(%arg0: !cir.float loc({{.*}})) -> !cir.float +// CHECK-CPP: cir.func @_Z2t7d(%arg0: !cir.double loc({{.*}})) -> !cir.double +// CHECK-CPP: cir.func @{{.+}}t10{{.+}}(%arg0: !cir.long_double loc({{.*}})) -> !cir.long_double +// CHECK-CPP: cir.func @_Z2t8v() +// CHECK-CPP: cir.func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool diff --git a/clang/test/CIR/CodeGen/unary-deref.cpp b/clang/test/CIR/CodeGen/unary-deref.cpp new file mode 100644 index 000000000000..92eb404b1204 --- /dev/null +++ b/clang/test/CIR/CodeGen/unary-deref.cpp @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +struct MyIntPointer { + int *ptr = nullptr; + int read() const { return *ptr; } +}; + +void foo() { + MyIntPointer p; + (void)p.read(); +} + +// CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv +// CHECK: %2 = cir.load %0 +// CHECK: %3 = cir.get_member %2[0] {name = "ptr"} +// CHECK: %4 = cir.load deref %3 : cir.ptr > +// CHECK: %5 = cir.load %4 \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/unary.c b/clang/test/CIR/CodeGen/unary.c new file mode 100644 index 000000000000..e364808f9579 --- /dev/null +++ b/clang/test/CIR/CodeGen/unary.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void valueNegation(int i, short s, long l, float f, double d) { +// CHECK: cir.func @valueNegation( + !i; + // CHECK: %[[#INT:]] = cir.load %{{[0-9]+}} : cir.ptr , !s32i + // CHECK: %[[#INT_TO_BOOL:]] = cir.cast(int_to_bool, %[[#INT]] : !s32i), !cir.bool + // CHECK: = cir.unary(not, %[[#INT_TO_BOOL]]) : !cir.bool, !cir.bool + !s; + // CHECK: %[[#SHORT:]] = cir.load %{{[0-9]+}} : cir.ptr , !s16i + // CHECK: %[[#SHORT_TO_BOOL:]] = cir.cast(int_to_bool, %[[#SHORT]] : !s16i), !cir.bool + // CHECK: = cir.unary(not, %[[#SHORT_TO_BOOL]]) : !cir.bool, !cir.bool + !l; + // CHECK: %[[#LONG:]] = cir.load %{{[0-9]+}} : cir.ptr , !s64i + // CHECK: %[[#LONG_TO_BOOL:]] = cir.cast(int_to_bool, %[[#LONG]] : !s64i), !cir.bool + // CHECK: = cir.unary(not, %[[#LONG_TO_BOOL]]) : !cir.bool, !cir.bool + !f; + // CHECK: %[[#FLOAT:]] = cir.load %{{[0-9]+}} : cir.ptr , !cir.float + // CHECK: %[[#FLOAT_TO_BOOL:]] = cir.cast(float_to_bool, %[[#FLOAT]] : !cir.float), !cir.bool + // CHECK: %[[#FLOAT_NOT:]] = cir.unary(not, %[[#FLOAT_TO_BOOL]]) : !cir.bool, !cir.bool + // CHECK: = cir.cast(bool_to_int, %[[#FLOAT_NOT]] : !cir.bool), !s32i + !d; + // CHECK: %[[#DOUBLE:]] = cir.load %{{[0-9]+}} : cir.ptr , !cir.double + // CHECK: %[[#DOUBLE_TO_BOOL:]] = cir.cast(float_to_bool, %[[#DOUBLE]] : !cir.double), !cir.bool + // CHECK: %[[#DOUBLE_NOT:]] = cir.unary(not, %[[#DOUBLE_TO_BOOL]]) : !cir.bool, !cir.bool + // CHECK: = cir.cast(bool_to_int, %[[#DOUBLE_NOT]] : !cir.bool), !s32i +} diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp new file mode 100644 index 000000000000..74a6c09b2f3c --- /dev/null +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -0,0 +1,232 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +unsigned up0() { + unsigned a = 1; + return +a; +} + +// CHECK: cir.func @_Z3up0v() -> !u32i +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#OUTPUT:]] = cir.unary(plus, %[[#INPUT]]) +// CHECK: cir.store %[[#OUTPUT]], %[[#RET]] + +unsigned um0() { + unsigned a = 1; + return -a; +} + +// CHECK: cir.func @_Z3um0v() -> !u32i +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#OUTPUT:]] = cir.unary(minus, %[[#INPUT]]) +// CHECK: cir.store %[[#OUTPUT]], %[[#RET]] + +unsigned un0() { + unsigned a = 1; + return ~a; // a ^ -1 , not +} + +// CHECK: cir.func @_Z3un0v() -> !u32i +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#OUTPUT:]] = cir.unary(not, %[[#INPUT]]) +// CHECK: cir.store %[[#OUTPUT]], %[[#RET]] + +int inc0() { + int a = 1; + ++a; + return a; +} + +// CHECK: cir.func @_Z4inc0v() -> !s32i +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#INPUT]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] +// CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : !s32i + +int dec0() { + int a = 1; + --a; + return a; +} + +// CHECK: cir.func @_Z4dec0v() -> !s32i +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(dec, %[[#INPUT]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] +// CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : !s32i + + +int inc1() { + int a = 1; + a++; + return a; +} + +// CHECK: cir.func @_Z4inc1v() -> !s32i +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#INPUT]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] +// CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : !s32i + +int dec1() { + int a = 1; + a--; + return a; +} + +// CHECK: cir.func @_Z4dec1v() -> !s32i +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(dec, %[[#INPUT]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] +// CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : !s32i + +// Ensure the increment is performed after the assignment to b. +int inc2() { + int a = 1; + int b = a++; + return b; +} + +// CHECK: cir.func @_Z4inc2v() -> !s32i +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#B:]] = cir.alloca !s32i, cir.ptr , ["b", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i +// CHECK: %[[#ATOB:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#ATOB]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: cir.store %[[#ATOB]], %[[#B]] +// CHECK: %[[#B_TO_OUTPUT:]] = cir.load %[[#B]] +// CHECK: cir.store %[[#B_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : !s32i + +int *inc_p(int *i) { + --i; + ++i; + return i; +} + +// CHECK: cir.func @_Z5inc_pPi(%arg0: !cir.ptr + +// CHECK: %[[#i_addr:]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} +// CHECK: %[[#i_dec:]] = cir.load %[[#i_addr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#dec_const:]] = cir.const(#cir.int<-1> : !s32i) : !s32i +// CHECK: = cir.ptr_stride(%[[#i_dec]] : !cir.ptr, %[[#dec_const]] : !s32i), !cir.ptr + +// CHECK: %[[#i_inc:]] = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %[[#inc_const:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: = cir.ptr_stride(%[[#i_inc]] : !cir.ptr, %[[#inc_const]] : !s32i), !cir.ptr + +void floats(float f) { +// CHECK: cir.func @{{.+}}floats{{.+}} + +f; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.float, !cir.float + -f; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.float, !cir.float + ++f; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.float, !cir.float + --f; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.float, !cir.float + f++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.float, !cir.float + f--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.float, !cir.float + + !f; + // CHECK: %[[#F_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : !cir.float), !cir.bool + // CHECK: = cir.unary(not, %[[#F_BOOL]]) : !cir.bool, !cir.bool +} + +void doubles(double d) { +// CHECK: cir.func @{{.+}}doubles{{.+}} + +d; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.double, !cir.double + -d; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.double, !cir.double + ++d; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.double, !cir.double + --d; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.double, !cir.double + d++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.double, !cir.double + d--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.double, !cir.double + + !d; + // CHECK: %[[#D_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : !cir.double), !cir.bool + // CHECK: = cir.unary(not, %[[#D_BOOL]]) : !cir.bool, !cir.bool +} + +void pointers(int *p) { +// CHECK: cir.func @{{[^ ]+}}pointers + // CHECK: %[[#P:]] = cir.alloca !cir.ptr, cir.ptr > + + +p; + // CHECK: cir.unary(plus, %{{.+}}) : !cir.ptr, !cir.ptr + + ++p; + // CHECK: %[[#INC:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#INC]] : !s32i), !cir.ptr + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + --p; + // CHECK: %[[#DEC:]] = cir.const(#cir.int<-1> : !s32i) : !s32i + // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + p++; + // CHECK: %[[#INC:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#INC]] : !s32i), !cir.ptr + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + p--; + // CHECK: %[[#DEC:]] = cir.const(#cir.int<-1> : !s32i) : !s32i + // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + + !p; + // %[[BOOLPTR:]] = cir.cast(ptr_to_bool, %15 : !cir.ptr), !cir.bool + // cir.unary(not, %[[BOOLPTR]]) : !cir.bool, !cir.bool +} + +void chars(char c) { +// CHECK: cir.func @{{.+}}chars{{.+}} + + +c; + // CHECK: %[[#PROMO:]] = cir.cast(integral, %{{.+}} : !s8i), !s32i + // CHECK: cir.unary(plus, %[[#PROMO]]) : !s32i, !s32i + -c; + // CHECK: %[[#PROMO:]] = cir.cast(integral, %{{.+}} : !s8i), !s32i + // CHECK: cir.unary(minus, %[[#PROMO]]) : !s32i, !s32i + + // Chars can go through some integer promotion codegen paths even when not promoted. + ++c; // CHECK: cir.unary(inc, %7) : !s8i, !s8i + --c; // CHECK: cir.unary(dec, %9) : !s8i, !s8i + c++; // CHECK: cir.unary(inc, %11) : !s8i, !s8i + c--; // CHECK: cir.unary(dec, %13) : !s8i, !s8i + + !c; + // CHECK: %[[#C_BOOL:]] = cir.cast(int_to_bool, %{{[0-9]+}} : !s8i), !cir.bool + // CHECK: cir.unary(not, %[[#C_BOOL]]) : !cir.bool, !cir.bool +} diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c new file mode 100644 index 000000000000..54d4e0516c25 --- /dev/null +++ b/clang/test/CIR/CodeGen/union-init.c @@ -0,0 +1,43 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +// XFAIL: * + +typedef union { + int value; + struct { + int x : 16; + int y : 16; + }; +} A; + +void foo(int x) { + A a = {.x = x}; +} + +// CHECK: cir.func @foo(%arg0: !s32i loc({{.*}})) +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !s32i, cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = ""} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !s32i), !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<4294901760> : !u32i) : !u32i +// CHECK: [[TMP10:%.*]] = cir.binop(and, [[TMP6]], [[TMP9]]) : !u32i +// CHECK: [[TMP11:%.*]] = cir.binop(or, [[TMP10]], [[TMP8]]) : !u32i +// CHECK: cir.store [[TMP11]], [[TMP3]] : !u32i, cir.ptr +// CHECK: [[TMP12:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP13:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[TMP14:%.*]] = cir.cast(integral, [[TMP13]] : !s32i), !u32i +// CHECK: [[TMP15:%.*]] = cir.load [[TMP12]] : cir.ptr , !u32i +// CHECK: [[TMP16:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP17:%.*]] = cir.binop(and, [[TMP14]], [[TMP16]]) : !u32i +// CHECK: [[TMP18:%.*]] = cir.const(#cir.int<16> : !u32i) : !u32i +// CHECK: [[TMP19:%.*]] = cir.shift(left, [[TMP17]] : !u32i, [[TMP18]] : !u32i) -> !u32i +// CHECK: [[TMP20:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP21:%.*]] = cir.binop(and, [[TMP15]], [[TMP20]]) : !u32i +// CHECK: [[TMP22:%.*]] = cir.binop(or, [[TMP21]], [[TMP19]]) : !u32i +// CHECK: cir.store [[TMP22]], [[TMP12]] : !u32i, cir.ptr +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp new file mode 100644 index 000000000000..cffa1721256a --- /dev/null +++ b/clang/test/CIR/CodeGen/union.cpp @@ -0,0 +1,91 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef struct { int x; } yolo; +typedef union { yolo y; struct { int lifecnt; }; } yolm; +typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; +typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; + +// CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct, !cir.float} #cir.record.decl.ast> +// CHECK-DAG: !ty_22anon2E522 = !cir.struct} #cir.record.decl.ast> +// CHECK-DAG: !ty_22anon2E122 = !cir.struct} #cir.record.decl.ast> +// CHECK-DAG: !ty_22yolo22 = !cir.struct} #cir.record.decl.ast> +// CHECK-DAG: !ty_22anon2E322 = !cir.struct>, !cir.int} #cir.record.decl.ast> + +// CHECK-DAG: !ty_22yolm22 = !cir.struct} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>}> +// CHECK-DAG: !ty_22yolm322 = !cir.struct} #cir.record.decl.ast>, !cir.struct} #cir.record.decl.ast>}> +// CHECK-DAG: !ty_22yolm222 = !cir.struct} #cir.record.decl.ast>, !cir.struct>, !cir.int} #cir.record.decl.ast>}> + +// Should generate a union type with all members preserved. +union U { + bool b; + short s; + int i; + float f; + double d; +}; +// CHECK-DAG: !ty_22U22 = !cir.struct, !cir.int, !cir.float, !cir.double}> + +// Should generate unions with complex members. +union U2 { + bool b; + struct Dummy { + short s; + float f; + } s; +} u2; +// CHECK-DAG: !cir.struct, !cir.float} #cir.record.decl.ast>} #cir.record.decl.ast> + +// Should genereate unions without padding. +union U3 { + short b; + U u; +} u3; +// CHECK-DAG: !ty_22U322 = !cir.struct, !cir.struct, !cir.int, !cir.float, !cir.double}>} #cir.record.decl.ast> + +void m() { + yolm q; + yolm2 q2; + yolm3 q3; +} + +// CHECK: cir.func @_Z1mv() +// CHECK: cir.alloca !ty_22yolm22, cir.ptr , ["q"] {alignment = 4 : i64} +// CHECK: cir.alloca !ty_22yolm222, cir.ptr , ["q2"] {alignment = 8 : i64} +// CHECK: cir.alloca !ty_22yolm322, cir.ptr , ["q3"] {alignment = 4 : i64} + +void shouldGenerateUnionAccess(union U u) { + u.b = true; + // CHECK: %[[#BASE:]] = cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.bool, cir.ptr + u.b; + // CHECK: cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr + u.i = 1; + // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !s32i, cir.ptr + u.i; + // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr + u.f = 0.1F; + // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.float, cir.ptr + u.f; + // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr + u.d = 0.1; + // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.double, cir.ptr + u.d; + // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr +} + +typedef union { + short a; + int b; +} A; + +void noCrushOnDifferentSizes() { + A a = {0}; + // CHECK: %[[#TMP0:]] = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#TMP1:]] = cir.cast(bitcast, %[[#TMP0]] : !cir.ptr), !cir.ptr + // CHECK: %[[#TMP2:]] = cir.const(#cir.zero : !ty_anon_struct) : !ty_anon_struct + // CHECK: cir.store %[[#TMP2]], %[[#TMP1]] : !ty_anon_struct, cir.ptr +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/unreachable.cpp b/clang/test/CIR/CodeGen/unreachable.cpp new file mode 100644 index 000000000000..c617fe8c6212 --- /dev/null +++ b/clang/test/CIR/CodeGen/unreachable.cpp @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo(); + +void basic() { + foo(); + __builtin_unreachable(); +} + +// CHECK: cir.func @_Z5basicv() +// CHECK-NEXT: cir.call @_Z3foov() : () -> () +// CHECK-NEXT: cir.unreachable +// CHECK-NEXT: } + +void code_after_unreachable() { + foo(); + __builtin_unreachable(); + foo(); +} + +// CHECK: cir.func @_Z22code_after_unreachablev() +// CHECK: cir.call @_Z3foov() : () -> () +// CHECK: cir.unreachable +// CHECK: ^{{.+}}: +// CHECK: cir.call @_Z3foov() : () -> () +// CHECK: cir.return +// CHECK: } diff --git a/clang/test/CIR/CodeGen/variadics.c b/clang/test/CIR/CodeGen/variadics.c new file mode 100644 index 000000000000..90ab27cc8ae5 --- /dev/null +++ b/clang/test/CIR/CodeGen/variadics.c @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -std=c++20 -triple aarch64-none-linux-android24 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef __builtin_va_list va_list; + +#define va_start(ap, param) __builtin_va_start(ap, param) +#define va_end(ap) __builtin_va_end(ap) +#define va_arg(ap, type) __builtin_va_arg(ap, type) +#define va_copy(dst, src) __builtin_va_copy(dst, src) + +// CHECK: [[VALISTTYPE:!.+va_list.*]] = !cir.struct !s32i + va_list args, args_copy; + va_start(args, count); + // CHECK: cir.va.start %{{[0-9]+}} : !cir.ptr<[[VALISTTYPE]]> + + va_copy(args_copy, args); + // CHECK: cir.va.copy %{{[0-9]+}} to %{{[0-9]+}} : !cir.ptr<[[VALISTTYPE]]>, !cir.ptr<[[VALISTTYPE]]> + + int sum = 0; + for(int i = 0; i < count; i++) { + sum += va_arg(args, int); + // CHECK: %{{[0-9]+}} = cir.va.arg %{{[0-9]+}} : (!cir.ptr<[[VALISTTYPE]]>) -> !s32i + } + + va_end(args); + // CHECK: cir.va.end %{{[0-9]+}} : !cir.ptr<[[VALISTTYPE]]> + + return count > 0 ? sum / count : 0; +} + +int test(void) { + return average(5, 1, 2, 3, 4, 5); + // CHECK: cir.call @{{.*}}average{{.*}}(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) : (!s32i, !s32i, !s32i, !s32i, !s32i, !s32i) -> !s32i +} diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp new file mode 100644 index 000000000000..a966f82069c6 --- /dev/null +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -0,0 +1,47 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +struct A { + int a; +}; + +struct B: virtual A { + int b; +}; + +void ppp() { B b; } + + +// Vtable definition for B +// CIR: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> + +// VTT for B. +// CIR: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 1> + +// CIR: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE + +// Type info name for B +// CIR: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array + +// CIR: cir.global "private" external @_ZTVN10__cxxabiv117__class_type_infoE : !cir.ptr> + +// Type info name for A +// CIR: cir.global linkonce_odr @_ZTS1A = #cir.const_array<"1A" : !cir.array> : !cir.array + +// Type info A. +// CIR: cir.global constant external @_ZTI1A = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1A> : !cir.ptr}> + +// Type info B. +// CIR: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<1> : !u32i, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.int<-6141> : !s64i}> + + +// LLVM: @_ZTV1B = linkonce_odr global { [3 x ptr] } { [3 x ptr] [ptr inttoptr (i64 12 to ptr), ptr null, ptr @_ZTI1B] } +// LLVM: @_ZTT1B = linkonce_odr global [1 x ptr] [ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1B, i32 0, i32 0, i32 3)] +// LLVM: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr +// LLVM: @_ZTS1B = linkonce_odr global [2 x i8] c"1B" +// LLVM: @_ZTVN10__cxxabiv117__class_type_infoE = external global ptr +// LLVM: @_ZTS1A = linkonce_odr global [2 x i8] c"1A" +// LLVM: @_ZTI1A = constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 2), ptr @_ZTS1A } +// LLVM: @_ZTI1B = constant { ptr, ptr, i32, i32, ptr, i64 } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i32 2), ptr @_ZTS1B, i32 0, i32 1, ptr @_ZTI1A, i64 -6141 } diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp new file mode 100644 index 000000000000..8464ce3173e2 --- /dev/null +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +namespace std { + template + void vector::resize(size_type __sz) { + size_type __cs = size(); + if (__cs) {} + } +} // namespace std + +// CHECK: cir.func linkonce_odr @_ZNSt6vectorIyE6resizeEm( +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !u64i, cir.ptr , ["__sz", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !u64i, cir.ptr , ["__cs", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !u64i, cir.ptr +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i +// CHECK: cir.store %4, %2 : !u64i, cir.ptr +// CHECK: cir.scope { +// CHECK: %5 = cir.load %2 : cir.ptr , !u64i +// CHECK: %6 = cir.cast(int_to_bool, %5 : !u64i), !cir.bool +// CHECK: cir.if %6 { +// CHECK: } +// CHECK: } +// CHECK: cir.return + +void m() { + std::vector a; + int i = 43; + a.resize(i); +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp new file mode 100644 index 000000000000..8cfbd869e3e1 --- /dev/null +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -0,0 +1,160 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +typedef int vi4 __attribute__((vector_size(16))); +typedef double vd2 __attribute__((vector_size(16))); +typedef long long vll2 __attribute__((vector_size(16))); +typedef unsigned short vus2 __attribute__((vector_size(4))); + +void vector_int_test(int x) { + + // Vector constant. Not yet implemented. Expected results will change from + // cir.vec.create to cir.const. + vi4 a = { 1, 2, 3, 4 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector + + // Non-const vector initialization. + vi4 b = { x, 5, 6, x + 1 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector + + // Incomplete vector initialization. + vi4 bb = { x, x + 1 }; + // CHECK: %[[#zero:]] = cir.const(#cir.int<0> : !s32i) : !s32i + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %[[#zero]], %[[#zero]] : !s32i, !s32i, !s32i, !s32i) : !cir.vector + + // Scalar to vector conversion, a.k.a. vector splat. Only valid as an + // operand of a binary operator, not as a regular conversion. + bb = a + 7; + // CHECK: %[[#seven:]] = cir.const(#cir.int<7> : !s32i) : !s32i + // CHECK: %{{[0-9]+}} = cir.vec.splat %[[#seven]] : !s32i, !cir.vector + + // Vector to vector conversion + vd2 bbb = { }; + bb = (vi4)bbb; + // CHECK: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.vector), !cir.vector + + // Extract element + int c = a[x]; + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector + + // Insert element + a[x] = x; + // CHECK: %[[#LOADEDVI:]] = cir.load %[[#STORAGEVI:]] : cir.ptr >, !cir.vector + // CHECK: %[[#UPDATEDVI:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVI]][%{{[0-9]+}} : !s32i] : !cir.vector + // CHECK: cir.store %[[#UPDATEDVI]], %[[#STORAGEVI]] : !cir.vector, cir.ptr > + + // Binary arithmetic operations + vi4 d = a + b; + // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 e = a - b; + // CHECK: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 f = a * b; + // CHECK: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 g = a / b; + // CHECK: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 h = a % b; + // CHECK: %{{[0-9]+}} = cir.binop(rem, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 i = a & b; + // CHECK: %{{[0-9]+}} = cir.binop(and, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 j = a | b; + // CHECK: %{{[0-9]+}} = cir.binop(or, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 k = a ^ b; + // CHECK: %{{[0-9]+}} = cir.binop(xor, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + + // Unary arithmetic operations + vi4 l = +a; + // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 m = -a; + // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 n = ~a; + // CHECK: %{{[0-9]+}} = cir.unary(not, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // Ternary conditional operator + vi4 tc = a ? b : d; + // CHECK: %{{[0-9]+}} = cir.vec.ternary(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // Comparisons + vi4 o = a == b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 p = a != b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 q = a < b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 r = a > b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 s = a <= b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 t = a >= b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // __builtin_shufflevector + vi4 u = __builtin_shufflevector(a, b, 7, 5, 3, 1); + // CHECK: %{{[0-9]+}} = cir.vec.shuffle(%{{[0-9]+}}, %{{[0-9]+}} : !cir.vector) [#cir.int<7> : !s64i, #cir.int<5> : !s64i, #cir.int<3> : !s64i, #cir.int<1> : !s64i] : !cir.vector + vi4 v = __builtin_shufflevector(a, b); + // CHECK: %{{[0-9]+}} = cir.vec.shuffle.dynamic %{{[0-9]+}} : !cir.vector, %{{[0-9]+}} : !cir.vector +} + +void vector_double_test(int x, double y) { + // Vector constant. Not yet implemented. Expected results will change from + // cir.vec.create to cir.const. + vd2 a = { 1.5, 2.5 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : !cir.vector + + // Non-const vector initialization. + vd2 b = { y, y + 1.0 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : !cir.vector + + // Incomplete vector initialization + vd2 bb = { y }; + // CHECK: [[#dzero:]] = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %[[#dzero]] : !cir.double, !cir.double) : !cir.vector + + // Scalar to vector conversion, a.k.a. vector splat. Only valid as an + // operand of a binary operator, not as a regular conversion. + bb = a + 2.5; + // CHECK: %[[#twohalf:]] = cir.const(#cir.fp<2.500000e+00> : !cir.double) : !cir.double + // CHECK: %{{[0-9]+}} = cir.vec.splat %[[#twohalf]] : !cir.double, !cir.vector + + // Extract element + double c = a[x]; + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector + + // Insert element + a[x] = y; + // CHECK: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : cir.ptr >, !cir.vector + // CHECK: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : !cir.vector + // CHECK: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, cir.ptr > + + // Binary arithmetic operations + vd2 d = a + b; + // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 e = a - b; + // CHECK: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 f = a * b; + // CHECK: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 g = a / b; + // CHECK: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + + // Unary arithmetic operations + vd2 l = +a; + // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vd2 m = -a; + // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // Comparisons + vll2 o = a == b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vll2 p = a != b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vll2 q = a < b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vll2 r = a > b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vll2 s = a <= b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + vll2 t = a >= b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // __builtin_convertvector + vus2 w = __builtin_convertvector(a, vus2); + // CHECK: %{{[0-9]+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.vector), !cir.vector +} diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c new file mode 100644 index 000000000000..687d264987db --- /dev/null +++ b/clang/test/CIR/CodeGen/vla.c @@ -0,0 +1,95 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +// CHECK: cir.func @f0(%arg0: !s32i +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["len", init] {alignment = 4 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["saved_stack"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !s32i, cir.ptr +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u64i +// CHECK: [[TMP4:%.*]] = cir.stack_save : !cir.ptr +// CHECK: cir.store [[TMP4]], [[TMP1]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP5:%.*]] = cir.alloca !s32i, cir.ptr , [[TMP3]] : !u64i, ["vla"] {alignment = 16 : i64} +// CHECK: [[TMP6:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: cir.stack_restore [[TMP6]] : !cir.ptr +void f0(int len) { + int a[len]; +} + +// CHECK: cir.func @f1 +// CHECK-NOT: cir.stack_save +// CHECK-NOT: cir.stack_restore +// CHECK: cir.return +int f1(int n) { + return sizeof(int[n]); +} + +// CHECK: cir.func @f2 +// CHECK: cir.stack_save +// DONT_CHECK: cir.stack_restore +// CHECK: cir.return +int f2(int x) { + int vla[x]; + return vla[x-1]; +} + +// CHECK: cir.func @f3 +// CHECK: cir.stack_save +// CHECK: cir.stack_restore +// CHECK: cir.return +void f3(int count) { + int a[count]; + + do { } while (0); + if (a[0] != 3) {} +} + + +// CHECK: cir.func @f4 +// CHECK-NOT: cir.stack_save +// CHECK-NOT: cir.stack_restore +// CHECK: cir.return +void f4(int count) { + // Make sure we emit sizes correctly in some obscure cases + int (*a[5])[count]; + int (*b)[][count]; +} + +// FIXME(cir): the test is commented due to stack_restore operation +// is not emitted for the if branch +// void f5(unsigned x) { +// while (1) { +// char s[x]; +// if (x > 5) //: stack restore here is missed +// break; +// } +// } + +// Check no errors happen +void function1(short width, int data[][width]) {} +void function2(short width, int data[][width][width]) {} +void f6(void) { + int bork[4][13][15]; + + function1(1, bork[2]); + function2(1, bork); +} + +static int GLOB; +int f7(int n) +{ + GLOB = 0; + char b[1][n+3]; + + __typeof__(b[GLOB++]) c; + return GLOB; +} + +double f8(int n, double (*p)[n][5]) { + return p[1][2][3]; +} + +int f9(unsigned n, char (*p)[n][n+1][6]) { + __typeof(p) p2 = (p + n/2) - n/4; + + return p2 - p; +} diff --git a/clang/test/CIR/CodeGen/volatile.cpp b/clang/test/CIR/CodeGen/volatile.cpp new file mode 100644 index 000000000000..5b8c13334ecf --- /dev/null +++ b/clang/test/CIR/CodeGen/volatile.cpp @@ -0,0 +1,70 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int test_load(volatile int *ptr) { + return *ptr; +} + +// CHECK: cir.func @_Z9test_loadPVi +// CHECK: %{{.+}} = cir.load volatile + +void test_store(volatile int *ptr) { + *ptr = 42; +} + +// CHECK: cir.func @_Z10test_storePVi +// CHECK: cir.store volatile + +struct Foo { + int x; + volatile int y; + volatile int z: 4; +}; + +int test_load_field1(volatile Foo *ptr) { + return ptr->x; +} + +// CHECK: cir.func @_Z16test_load_field1PV3Foo +// CHECK: %[[MemberAddr:.*]] = cir.get_member +// CHECK: %{{.+}} = cir.load volatile %[[MemberAddr]] + +int test_load_field2(Foo *ptr) { + return ptr->y; +} + +// CHECK: cir.func @_Z16test_load_field2P3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: %{{.+}} = cir.load volatile %[[MemberAddr]] + +int test_load_field3(Foo *ptr) { + return ptr->z; +} + +// CHECK: cir.func @_Z16test_load_field3P3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: %{{.+}} = cir.get_bitfield(#bfi_z, %[[MemberAddr:.+]] {is_volatile} + +void test_store_field1(volatile Foo *ptr) { + ptr->x = 42; +} + +// CHECK: cir.func @_Z17test_store_field1PV3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: cir.store volatile %{{.+}}, %[[MemberAddr]] + +void test_store_field2(Foo *ptr) { + ptr->y = 42; +} + +// CHECK: cir.func @_Z17test_store_field2P3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: cir.store volatile %{{.+}}, %[[MemberAddr]] + +void test_store_field3(Foo *ptr) { + ptr->z = 4; +} + +// CHECK: cir.func @_Z17test_store_field3P3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: cir.set_bitfield(#bfi_z, %[[MemberAddr:.+]] : !cir.ptr, %1 : !s32i) {is_volatile} diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp new file mode 100644 index 000000000000..caaadaa2ef0a --- /dev/null +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -0,0 +1,103 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fno-rtti -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir --check-prefix=RTTI_DISABLED %s + +class A +{ +public: + A() noexcept {} + A(const A&) noexcept = default; + + virtual ~A() noexcept; + virtual const char* quack() const noexcept; +}; + +class B : public A +{ +public: + virtual ~B() noexcept {} +}; + +// Type info B. +// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct>, !cir.ptr>, !cir.ptr>}> + +// vtable for A type +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct> x 5>}> +// RTTI_DISABLED: ![[VTableTypeA:ty_.*]] = !cir.struct> x 5>}> + +// Class A +// CHECK: ![[ClassA:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast> + +// Class B +// CHECK: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> +// RTTI_DISABLED: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> + +// B ctor => @B::B() +// Calls @A::A() and initialize __vptr with address of B's vtable. +// +// CHECK: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr +// RTTI_DISABLED: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr + +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () +// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr >> +// CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> +// CHECK: cir.store %3, %4 : !cir.ptr>>, cir.ptr >>> +// CHECK: cir.return +// CHECK: } + +// foo - zero initialize object B and call ctor (@B::B()) +// +// CHECK: cir.func @_Z3foov() +// CHECK: cir.scope { +// CHECK: %0 = cir.alloca !ty_22B22, cir.ptr , ["agg.tmp.ensured"] {alignment = 8 : i64} +// CHECK: %1 = cir.const(#cir.zero : ![[ClassB]]) : ![[ClassB]] +// CHECK: cir.store %1, %0 : ![[ClassB]], cir.ptr +// CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () +// CHECK: } +// CHECK: cir.return +// CHECK: } + +// Vtable definition for A +// CHECK: cir.global "private" external @_ZTV1A : ![[VTableTypeA]] {alignment = 8 : i64} + +// A ctor => @A::A() +// Calls @A::A() and initialize __vptr with address of A's vtable +// +// CHECK: cir.func linkonce_odr @_ZN1AC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr >> +// CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> +// CHECK: cir.store %2, %3 : !cir.ptr>>, cir.ptr >>> +// CHECK: cir.return +// CHECK: } + +// vtable for B +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}> : ![[VTableTypeA]] +// RTTI_DISABLED: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}> : ![[VTableTypeA]] + +// vtable for __cxxabiv1::__si_class_type_info +// CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> +// RTTI_DISABLED-NOT: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> + +// typeinfo name for B +// CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array {alignment = 1 : i64} +// RTTI_DISABLED-NOT: cir.global linkonce_odr @_ZTS1B + +// typeinfo for A +// CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr +// RTTI_DISABLED-NOT: cir.global "private" constant external @_ZTI1A : !cir.ptr + +// typeinfo for B +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr}> : ![[TypeInfoB]] +// RTTI_DISABLED-NOT: cir.global constant external @_ZTI1B + +// Checks for dtors in dtors.cpp + +void foo() { B(); } diff --git a/clang/test/CIR/CodeGen/weak.c b/clang/test/CIR/CodeGen/weak.c new file mode 100644 index 000000000000..02adfeb53de2 --- /dev/null +++ b/clang/test/CIR/CodeGen/weak.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +extern void B (void); +static __typeof(B) A __attribute__ ((__weakref__("B"))); + +void active (void) +{ + A(); +} + +// CIR: cir.func extern_weak private @B() +// CIR: cir.func @active() +// CIR-NEXT: cir.call @B() : () -> () + +// LLVM: declare !dbg !{{.}} extern_weak void @B() +// LLVM: define void @active() +// LLVM-NEXT: call void @B() diff --git a/clang/test/CIR/CodeGen/wide-string.cpp b/clang/test/CIR/CodeGen/wide-string.cpp new file mode 100644 index 000000000000..1b3cacc4dd49 --- /dev/null +++ b/clang/test/CIR/CodeGen/wide-string.cpp @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +const char16_t *test_utf16() { + return u"你好世界"; +} + +// CHECK: cir.global "private" constant internal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u16i, #cir.int<22909> : !u16i, #cir.int<19990> : !u16i, #cir.int<30028> : !u16i, #cir.int<0> : !u16i]> : !cir.array + +const char32_t *test_utf32() { + return U"你好世界"; +} + +// CHECK: cir.global "private" constant internal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u32i, #cir.int<22909> : !u32i, #cir.int<19990> : !u32i, #cir.int<30028> : !u32i, #cir.int<0> : !u32i]> : !cir.array + +const char16_t *test_zero16() { + return u"\0\0\0\0"; +} + +// CHECK: cir.global "private" constant internal @{{.+}} = #cir.zero : !cir.array + +const char32_t *test_zero32() { + return U"\0\0\0\0"; +} + +// CHECK: cir.global "private" constant internal @{{.+}} = #cir.zero : !cir.array diff --git a/clang/test/CIR/IR/aliases.cir b/clang/test/CIR/IR/aliases.cir new file mode 100644 index 000000000000..8d6cbd04c7a2 --- /dev/null +++ b/clang/test/CIR/IR/aliases.cir @@ -0,0 +1,15 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +module { + // CHECK: @testAnonRecordsAlias + cir.func @testAnonRecordsAlias() { + // CHECK: cir.alloca !ty_anon_struct, cir.ptr + %0 = cir.alloca !cir.struct}>, cir.ptr }>>, ["A"] + // CHECK: cir.alloca !ty_anon_struct1, cir.ptr + %1 = cir.alloca !cir.struct}>, cir.ptr }>>, ["B"] + // CHECK: cir.alloca !ty_anon_union, cir.ptr + %2 = cir.alloca !cir.struct}>, cir.ptr }>>, ["C"] + cir.return + } +} diff --git a/clang/test/CIR/IR/alloca.cir b/clang/test/CIR/IR/alloca.cir new file mode 100644 index 000000000000..71293f6a0948 --- /dev/null +++ b/clang/test/CIR/IR/alloca.cir @@ -0,0 +1,21 @@ +// Test the CIR operations can parse and print correctly (roundtrip) + +// RUN: cir-opt %s | cir-opt | FileCheck %s +!s32i = !cir.int +!u64i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} + cir.return + } +} + +//CHECK: module { + +//CHECK-NEXT: cir.func @foo(%arg0: !s32i) { +//CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} +//CHECK-NEXT: cir.return +//CHECK-NEXT: } + +//CHECK: } diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir new file mode 100644 index 000000000000..6653cdbfbe2e --- /dev/null +++ b/clang/test/CIR/IR/array.cir @@ -0,0 +1,13 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s + +!u32i = !cir.int + +module { + cir.func @arrays() { + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + cir.return + } +} + +// CHECK: cir.func @arrays() { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] diff --git a/clang/test/CIR/IR/attribute.cir b/clang/test/CIR/IR/attribute.cir new file mode 100644 index 000000000000..4c9d4083ad4a --- /dev/null +++ b/clang/test/CIR/IR/attribute.cir @@ -0,0 +1,25 @@ +// RUN: cir-opt %s -split-input-file -allow-unregistered-dialect -verify-diagnostics | FileCheck %s + +cir.func @float_attrs_pass() { + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<2.000000e+00> : !cir.float + float_attr = #cir.fp<2.> : !cir.float + } : () -> () + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<-2.000000e+00> : !cir.float + float_attr = #cir.fp<-2.> : !cir.float + } : () -> () + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<2.000000e+00> : !cir.double + float_attr = #cir.fp<2.> : !cir.double + } : () -> () + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<2.000000e+00> : !cir.long_double + float_attr = #cir.fp<2.> : !cir.long_double + } : () -> () + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<2.000000e+00> : !cir.long_double + float_attr = #cir.fp<2.> : !cir.long_double + } : () -> () + cir.return +} \ No newline at end of file diff --git a/clang/test/CIR/IR/await.cir b/clang/test/CIR/IR/await.cir new file mode 100644 index 000000000000..c62e6b7b88b6 --- /dev/null +++ b/clang/test/CIR/IR/await.cir @@ -0,0 +1,22 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func coroutine @checkPrintParse(%arg0 : !cir.bool) { + cir.await(user, ready : { + cir.condition(%arg0) + }, suspend : { + cir.yield + }, resume : { + cir.yield + },) + cir.return +} + +// CHECK: cir.func coroutine @checkPrintParse +// CHECK: cir.await(user, ready : { +// CHECK: cir.condition(%arg0) +// CHECK: }, suspend : { +// CHECK: cir.yield +// CHECK: }, resume : { +// CHECK: cir.yield +// CHECK: },) diff --git a/clang/test/CIR/IR/bit.cir b/clang/test/CIR/IR/bit.cir new file mode 100644 index 000000000000..974f22606cdc --- /dev/null +++ b/clang/test/CIR/IR/bit.cir @@ -0,0 +1,75 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s + +!s8i = !cir.int +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!u8i = !cir.int +!u16i = !cir.int +!u32i = !cir.int +!u64i = !cir.int + +module { + cir.func @test() { + %s8 = cir.const(#cir.int<1> : !s8i) : !s8i + %s16 = cir.const(#cir.int<1> : !s16i) : !s16i + %s32 = cir.const(#cir.int<1> : !s32i) : !s32i + %s64 = cir.const(#cir.int<1> : !s64i) : !s64i + %u8 = cir.const(#cir.int<1> : !u8i) : !u8i + %u16 = cir.const(#cir.int<1> : !u16i) : !u16i + %u32 = cir.const(#cir.int<1> : !u32i) : !u32i + %u64 = cir.const(#cir.int<1> : !u64i) : !u64i + + %2 = cir.bit.clrsb(%s32 : !s32i) : !s32i + %3 = cir.bit.clrsb(%s64 : !s64i) : !s32i + + %4 = cir.bit.clz(%u16 : !u16i) : !s32i + %5 = cir.bit.clz(%u32 : !u32i) : !s32i + %6 = cir.bit.clz(%u64 : !u64i) : !s32i + + %7 = cir.bit.ctz(%u16 : !u16i) : !s32i + %8 = cir.bit.ctz(%u32 : !u32i) : !s32i + %9 = cir.bit.ctz(%u64 : !u64i) : !s32i + + %10 = cir.bit.ffs(%s32 : !s32i) : !s32i + %11 = cir.bit.ffs(%s64 : !s64i) : !s32i + + %12 = cir.bit.parity(%u32 : !u32i) : !s32i + %13 = cir.bit.parity(%u64 : !u64i) : !s32i + + %14 = cir.bit.popcount(%u16 : !u16i) : !s32i + %15 = cir.bit.popcount(%u32 : !u32i) : !s32i + %16 = cir.bit.popcount(%u64 : !u64i) : !s32i + + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: cir.func @test() { +// CHECK-NEXT: %0 = cir.const(#cir.int<1> : !s8i) : !s8i +// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s16i) : !s16i +// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !u8i) : !u8i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u16i) : !u16i +// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK-NEXT: %7 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK-NEXT: %8 = cir.bit.clrsb(%2 : !s32i) : !s32i +// CHECK-NEXT: %9 = cir.bit.clrsb(%3 : !s64i) : !s32i +// CHECK-NEXT: %10 = cir.bit.clz(%5 : !u16i) : !s32i +// CHECK-NEXT: %11 = cir.bit.clz(%6 : !u32i) : !s32i +// CHECK-NEXT: %12 = cir.bit.clz(%7 : !u64i) : !s32i +// CHECK-NEXT: %13 = cir.bit.ctz(%5 : !u16i) : !s32i +// CHECK-NEXT: %14 = cir.bit.ctz(%6 : !u32i) : !s32i +// CHECK-NEXT: %15 = cir.bit.ctz(%7 : !u64i) : !s32i +// CHECK-NEXT: %16 = cir.bit.ffs(%2 : !s32i) : !s32i +// CHECK-NEXT: %17 = cir.bit.ffs(%3 : !s64i) : !s32i +// CHECK-NEXT: %18 = cir.bit.parity(%6 : !u32i) : !s32i +// CHECK-NEXT: %19 = cir.bit.parity(%7 : !u64i) : !s32i +// CHECK-NEXT: %20 = cir.bit.popcount(%5 : !u16i) : !s32i +// CHECK-NEXT: %21 = cir.bit.popcount(%6 : !u32i) : !s32i +// CHECK-NEXT: %22 = cir.bit.popcount(%7 : !u64i) : !s32i +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir new file mode 100644 index 000000000000..7f418908a94c --- /dev/null +++ b/clang/test/CIR/IR/branch.cir @@ -0,0 +1,21 @@ +// RUN: cir-opt %s | FileCheck %s + +cir.func @test_branch_parsing(%arg0: !cir.bool) { + // CHECK: cir.br ^bb1 + cir.br ^bb1 +^bb1: + // CHECK: cir.br ^bb2(%arg0 : !cir.bool) + cir.br ^bb2(%arg0 : !cir.bool) +// CHECK: ^bb2(%0: !cir.bool): +^bb2(%x: !cir.bool): + cir.return +} + +cir.func @test_conditional_branch_parsing(%arg0 : !cir.bool) { + // CHEK: cir.brcond %arg0 ^bb1, ^bb2 + cir.brcond %arg0 ^bb1, ^bb2 +^bb1: + cir.return +^bb2: + cir.return +} diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir new file mode 100644 index 000000000000..2ed1fa062868 --- /dev/null +++ b/clang/test/CIR/IR/call.cir @@ -0,0 +1,13 @@ +// RUN: cir-opt %s | FileCheck %s + +!s32i = !cir.int +!fnptr = !cir.ptr)>> + +module { + cir.func @ind(%fnptr: !fnptr, %a : !s32i) { + %r = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i + cir.return + } +} + +// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir new file mode 100644 index 000000000000..e8b5989fd8ad --- /dev/null +++ b/clang/test/CIR/IR/cast.cir @@ -0,0 +1,24 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s +!s32i = !cir.int + +module { + cir.func @yolo(%arg0 : !s32i) { + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + %a = cir.cast (int_to_bool, %arg0 : !s32i), !cir.bool + + %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + %4 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return + } + + cir.func @bitcast(%p: !cir.ptr) { + %2 = cir.cast(bitcast, %p : !cir.ptr), !cir.ptr + cir.return + } +} + +// CHECK: cir.func @yolo(%arg0: !s32i) +// CHECK: %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK: cir.func @bitcast +// CHECK: %0 = cir.cast(bitcast, %arg0 : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir new file mode 100644 index 000000000000..97d58223b1db --- /dev/null +++ b/clang/test/CIR/IR/cir-ops.cir @@ -0,0 +1,101 @@ +// Test the CIR operations can parse and print correctly (roundtrip) + +// RUN: cir-opt %s | cir-opt | FileCheck %s +!s32i = !cir.int +!s8i = !cir.int +!u64i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] + cir.store %arg0, %0 : !s32i, cir.ptr + %1 = cir.load %0 : cir.ptr , !s32i + cir.return %1 : !s32i + } + + cir.func @f3() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] + %1 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + cir.return %2 : !s32i + } + + cir.func @if0(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.store %arg0, %1 : !s32i, cir.ptr + %2 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + %3 = cir.load %1 : cir.ptr , !s32i + %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool + cir.if %4 { + %6 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.store %6, %0 : !s32i, cir.ptr + } else { + %6 = cir.const(#cir.int<4> : !s32i) : !s32i + cir.store %6, %0 : !s32i, cir.ptr + } + %5 = cir.load %0 : cir.ptr , !s32i + cir.return %5 : !s32i + } + + cir.func @s0() { + %0 = cir.alloca !s32i, cir.ptr , ["x"] {alignment = 4 : i64} + cir.scope { + %1 = cir.alloca !s32i, cir.ptr , ["y"] {alignment = 4 : i64} + } + cir.return + } + + cir.func @os() { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} + %3 = cir.load %0 : cir.ptr >, !cir.ptr + %4 = cir.objsize(%3 : , max) -> !u64i + %5 = cir.objsize(%3 : , min) -> !u64i + cir.return + } +} + +// CHECK: module { + +// CHECK-NEXT: cir.func @foo(%arg0: !s32i) -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x", init] +// CHECK-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %1 : !s32i +// CHECK-NEXT: } + +// CHECK-NEXT: cir.func @f3() -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x", init] +// CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %2 : !s32i +// CHECK-NEXT: } + +// CHECK: @if0(%arg0: !s32i) -> !s32i { +// CHECK: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool +// CHECK-NEXT: cir.if %4 { +// CHECK-NEXT: %6 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: } else { +// CHECK-NEXT: %6 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: } + +// CHECK: cir.func @s0() { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x"] {alignment = 4 : i64} +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["y"] {alignment = 4 : i64} +// CHECK-NEXT: } + +// CHECK: cir.func @os() { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %2 = cir.objsize(%1 : , max) -> !u64i +// CHECK-NEXT: %3 = cir.objsize(%1 : , min) -> !u64i +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK: } diff --git a/clang/test/CIR/IR/constptrattr.cir b/clang/test/CIR/IR/constptrattr.cir new file mode 100644 index 000000000000..30b79a882ac1 --- /dev/null +++ b/clang/test/CIR/IR/constptrattr.cir @@ -0,0 +1,8 @@ +// RUN: cir-opt %s | FileCheck %s + +!s32i = !cir.int + +cir.global external @const_ptr = #cir.ptr<4660> : !cir.ptr +// CHECK: cir.global external @const_ptr = #cir.ptr<4660> : !cir.ptr +cir.global external @null_ptr = #cir.ptr : !cir.ptr +// CHECK: cir.global external @null_ptr = #cir.ptr : !cir.ptr diff --git a/clang/test/CIR/IR/copy.cir b/clang/test/CIR/IR/copy.cir new file mode 100644 index 000000000000..9a689036985e --- /dev/null +++ b/clang/test/CIR/IR/copy.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s + +!s32i = !cir.int +module { + cir.func @shouldParseCopyOp(%arg0 : !cir.ptr, %arg1 : !cir.ptr) { + cir.copy %arg0 to %arg1 : !cir.ptr + cir.return + } +} diff --git a/clang/test/CIR/IR/data-member-ptr.cir b/clang/test/CIR/IR/data-member-ptr.cir new file mode 100644 index 000000000000..6370877291a4 --- /dev/null +++ b/clang/test/CIR/IR/data-member-ptr.cir @@ -0,0 +1,46 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s + +!s32i = !cir.int +!ty_22Foo22 = !cir.struct + +#global_ptr = #cir.data_member<0> : !cir.data_member + +module { + cir.func @null_member() { + %0 = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member + cir.return + } + + cir.func @get_runtime_member(%arg0: !cir.ptr) { + %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member + %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } + + cir.func @get_global_member(%arg0: !cir.ptr) { + %0 = cir.const(#global_ptr) : !cir.data_member + %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } +} + +// CHECK: module { + +// CHECK-NEXT: cir.func @null_member() { +// CHECK-NEXT: %0 = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK-NEXT: cir.func @get_runtime_member(%arg0: !cir.ptr) { +// CHECK-NEXT: %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member +// CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK-NEXT: cir.func @get_global_member(%arg0: !cir.ptr) { +// CHECK-NEXT: %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member +// CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK: } diff --git a/clang/test/CIR/IR/do-while.cir b/clang/test/CIR/IR/do-while.cir new file mode 100644 index 000000000000..6664b4cfe4bf --- /dev/null +++ b/clang/test/CIR/IR/do-while.cir @@ -0,0 +1,18 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func @testPrintingAndParsing (%arg0 : !cir.bool) -> !cir.void { + cir.do { + cir.yield + } while { + cir.condition(%arg0) + } + cir.return +} + +// CHECK: testPrintingAndParsing +// CHECK: cir.do { +// CHECK: cir.yield +// CHECK: } while { +// CHECK: cir.condition(%arg0) +// CHECK: } diff --git a/clang/test/CIR/IR/exceptions.cir b/clang/test/CIR/IR/exceptions.cir new file mode 100644 index 000000000000..aa93eea43559 --- /dev/null +++ b/clang/test/CIR/IR/exceptions.cir @@ -0,0 +1,24 @@ +// RUN: cir-opt %s | FileCheck %s + +!s32i = !cir.int + +module { + cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %3 : !s32i + } + + cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { + %11 = cir.scope { + %10 = cir.try { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["exception_info"] {alignment = 16 : i64} + %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i + // CHECK: cir.try_call exception(%2) @div(%arg0, %arg1) : (!cir.ptr>, !s32i, !s32i) -> !s32i + %1 = cir.load %0 : cir.ptr >, !cir.ptr + cir.yield %1 : !cir.ptr + } : () -> !cir.ptr + cir.yield %10 : !cir.ptr + } : !cir.ptr + cir.return %11 : !cir.ptr + } +} \ No newline at end of file diff --git a/clang/test/CIR/IR/float.cir b/clang/test/CIR/IR/float.cir new file mode 100644 index 000000000000..13aeb2d97261 --- /dev/null +++ b/clang/test/CIR/IR/float.cir @@ -0,0 +1,90 @@ +// RUN: cir-opt %s | FileCheck %s + +// Adapted from mlir/test/IR/parser.mlir + +// CHECK-LABEL: @f32_special_values +cir.func @f32_special_values() { + // F32 signaling NaNs. + // CHECK: cir.const(#cir.fp<0x7F800001> : !cir.float) : !cir.float + %0 = cir.const(#cir.fp<0x7F800001> : !cir.float) : !cir.float + // CHECK: cir.const(#cir.fp<0x7FBFFFFF> : !cir.float) : !cir.float + %1 = cir.const(#cir.fp<0x7FBFFFFF> : !cir.float) : !cir.float + + // F32 quiet NaNs. + // CHECK: cir.const(#cir.fp<0x7FC00000> : !cir.float) : !cir.float + %2 = cir.const(#cir.fp<0x7FC00000> : !cir.float) : !cir.float + // CHECK: cir.const(#cir.fp<0xFFFFFFFF> : !cir.float) : !cir.float + %3 = cir.const(#cir.fp<0xFFFFFFFF> : !cir.float) : !cir.float + + // F32 positive infinity. + // CHECK: cir.const(#cir.fp<0x7F800000> : !cir.float) : !cir.float + %4 = cir.const(#cir.fp<0x7F800000> : !cir.float) : !cir.float + // F32 negative infinity. + // CHECK: cir.const(#cir.fp<0xFF800000> : !cir.float) : !cir.float + %5 = cir.const(#cir.fp<0xFF800000> : !cir.float) : !cir.float + + cir.return +} + +// CHECK-LABEL: @f64_special_values +cir.func @f64_special_values() { + // F64 signaling NaNs. + // CHECK: cir.const(#cir.fp<0x7FF0000000000001> : !cir.double) : !cir.double + %0 = cir.const(#cir.fp<0x7FF0000000000001> : !cir.double) : !cir.double + // CHECK: cir.const(#cir.fp<0x7FF8000000000000> : !cir.double) : !cir.double + %1 = cir.const(#cir.fp<0x7FF8000000000000> : !cir.double) : !cir.double + + // F64 quiet NaNs. + // CHECK: cir.const(#cir.fp<0x7FF0000001000000> : !cir.double) : !cir.double + %2 = cir.const(#cir.fp<0x7FF0000001000000> : !cir.double) : !cir.double + // CHECK: cir.const(#cir.fp<0xFFF0000001000000> : !cir.double) : !cir.double + %3 = cir.const(#cir.fp<0xFFF0000001000000> : !cir.double) : !cir.double + + // F64 positive infinity. + // CHECK: cir.const(#cir.fp<0x7FF0000000000000> : !cir.double) : !cir.double + %4 = cir.const(#cir.fp<0x7FF0000000000000> : !cir.double) : !cir.double + // F64 negative infinity. + // CHECK: cir.const(#cir.fp<0xFFF0000000000000> : !cir.double) : !cir.double + %5 = cir.const(#cir.fp<0xFFF0000000000000> : !cir.double) : !cir.double + + // Check that values that can't be represented with the default format, use + // hex instead. + // CHECK: cir.const(#cir.fp<0xC1CDC00000000000> : !cir.double) : !cir.double + %6 = cir.const(#cir.fp<0xC1CDC00000000000> : !cir.double) : !cir.double + + cir.return +} + +// CHECK-LABEL: @f80_special_values +cir.func @f80_special_values() { + // F80 signaling NaNs. + // CHECK: cir.const(#cir.fp<0x7FFFE000000000000001> : !cir.long_double) : !cir.long_double + %0 = cir.const(#cir.fp<0x7FFFE000000000000001> : !cir.long_double) : !cir.long_double + // CHECK: cir.const(#cir.fp<0x7FFFB000000000000011> : !cir.long_double) : !cir.long_double + %1 = cir.const(#cir.fp<0x7FFFB000000000000011> : !cir.long_double) : !cir.long_double + + // F80 quiet NaNs. + // CHECK: cir.const(#cir.fp<0x7FFFC000000000100000> : !cir.long_double) : !cir.long_double + %2 = cir.const(#cir.fp<0x7FFFC000000000100000> : !cir.long_double) : !cir.long_double + // CHECK: cir.const(#cir.fp<0x7FFFE000000001000000> : !cir.long_double) : !cir.long_double + %3 = cir.const(#cir.fp<0x7FFFE000000001000000> : !cir.long_double) : !cir.long_double + + // F80 positive infinity. + // CHECK: cir.const(#cir.fp<0x7FFF8000000000000000> : !cir.long_double) : !cir.long_double + %4 = cir.const(#cir.fp<0x7FFF8000000000000000> : !cir.long_double) : !cir.long_double + // F80 negative infinity. + // CHECK: cir.const(#cir.fp<0xFFFF8000000000000000> : !cir.long_double) : !cir.long_double + %5 = cir.const(#cir.fp<0xFFFF8000000000000000> : !cir.long_double) : !cir.long_double + + cir.return +} + +// We want to print floats in exponential notation with 6 significant digits, +// but it may lead to precision loss when parsing back, in which case we print +// the decimal form instead. +// CHECK-LABEL: @f32_potential_precision_loss() +cir.func @f32_potential_precision_loss() { + // CHECK: cir.const(#cir.fp<1.23697901> : !cir.float) : !cir.float + %0 = cir.const(#cir.fp<1.23697901> : !cir.float) : !cir.float + cir.return +} diff --git a/clang/test/CIR/IR/for.cir b/clang/test/CIR/IR/for.cir new file mode 100644 index 000000000000..62b82976cc68 --- /dev/null +++ b/clang/test/CIR/IR/for.cir @@ -0,0 +1,22 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func @testPrintingParsing(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.yield + } step { + cir.yield + } + cir.return +} + +// CHECK: @testPrintingParsing +// CHECK: cir.for : cond { +// CHECK: cir.condition(%arg0) +// CHECK: } body { +// CHECK: cir.yield +// CHECK: } step { +// CHECK: cir.yield +// CHECK: } diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir new file mode 100644 index 000000000000..01f6b54877c8 --- /dev/null +++ b/clang/test/CIR/IR/func.cir @@ -0,0 +1,45 @@ +// RUN: cir-opt %s | FileCheck %s +!s32i = !cir.int +!u8i = !cir.int +module { + cir.func @l0() { + cir.return + } + + cir.func @l1() alias(@l0) + + cir.func private @variadic(!s32i, ...) -> !s32i + + // Should accept call with only the required parameters. + cir.func @variadic_call_1(%0: !s32i) -> !s32i { + %9 = cir.call @variadic(%0) : (!s32i) -> !s32i + cir.return %9 : !s32i + } + + // Should accept calls with variadic parameters. + cir.func @variadic_call_2(%0: !s32i, %1: !s32i, %2: !u8i) -> !s32i { + %9 = cir.call @variadic(%0, %1, %2) : (!s32i, !s32i, !u8i) -> !s32i + cir.return %9 : !s32i + } + + // Should parse custom assembly format. + cir.func @parse_func_type() -> () { + %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["fn", init] {alignment = 8 : i64} + cir.return + } + + // Should parse void return types. + cir.func @parse_explicit_void_func() -> !cir.void { + cir.return + } + + // Should parse omitted void return type. + cir.func @parse_func_type_with_omitted_void() { + cir.return + } + + // Should parse variadic no-proto functions. + cir.func no_proto private @no_proto(...) -> !s32i +} + +// CHECK: cir.func @l0() diff --git a/clang/test/CIR/IR/getmember.cir b/clang/test/CIR/IR/getmember.cir new file mode 100644 index 000000000000..5bfd8f24d161 --- /dev/null +++ b/clang/test/CIR/IR/getmember.cir @@ -0,0 +1,24 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!u16i = !cir.int +!u32i = !cir.int + +!ty_22Class22 = !cir.struct +!ty_22Incomplete22 = !cir.struct +!ty_22Struct22 = !cir.struct + +module { + cir.func @shouldGetStructMember(%arg0 : !cir.ptr) { + // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr + %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr + cir.return + } + + // FIXME: remove bypass once codegen for CIR class records is patched. + cir.func @shouldBypassMemberTypeCheckForClassRecords(%arg0 : !cir.ptr) { + // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> + %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> + cir.return + } +} diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir new file mode 100644 index 000000000000..a9a5e1e5809c --- /dev/null +++ b/clang/test/CIR/IR/global.cir @@ -0,0 +1,106 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +!s8i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!ty_22Init22 = !cir.struct +module { + cir.global external @a = #cir.int<3> : !s32i + cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> + cir.global external @b = #cir.const_array<"example\00" : !cir.array> + cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.ptr : !cir.ptr}> : !cir.struct}> + cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} + cir.global "private" internal @c : !s32i + cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s = #cir.global_view<@".str2"> : !cir.ptr + cir.func @use_global() { + %0 = cir.get_global @a : cir.ptr + cir.return + } + cir.global external @table = #cir.global_view<@s> : !cir.ptr + cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr + cir.global external @table_of_ptrs = #cir.const_array<[#cir.global_view<@rgb, [1]> : !cir.ptr] : !cir.array x 1>> + + // Note MLIR requires "private" for global declarations, should get + // rid of this somehow in favor of clarity? + cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr + cir.global "private" constant external @type_info_A : !cir.ptr + cir.global constant external @type_info_name_B = #cir.const_array<"1B\00" : !cir.array> + + cir.global external @type_info_B = #cir.typeinfo<{ + #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, + #cir.global_view<@type_info_name_B> : !cir.ptr, + #cir.global_view<@type_info_A> : !cir.ptr}> + : !cir.struct, !cir.ptr, !cir.ptr}> + cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) + cir.func private @_ZN4InitD1Ev(!cir.ptr) + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { + %0 = cir.get_global @_ZL8__ioinit : cir.ptr + %1 = cir.const(#cir.int<3> : !s8i) : !s8i + cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () + } dtor { + %0 = cir.get_global @_ZL8__ioinit : cir.ptr + cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () + } + + cir.func @f31() global_ctor { + cir.return + } + + cir.func @f32() global_ctor(777) { + cir.return + } + + cir.func @f33() global_dtor { + cir.return + } + + cir.func @f34() global_dtor(777) { + cir.return + } + + cir.global external tls_dyn @model0 = #cir.int<0> : !s32i + cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i + cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i + cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i + + cir.global "private" external tls_dyn @batata : !s32i + cir.func @f35() { + %0 = cir.get_global thread_local @batata : cir.ptr + cir.return + } +} + +// CHECK: cir.global external @a = #cir.int<3> : !s32i +// CHECK: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i]> : !cir.array +// CHECK: cir.global external @b = #cir.const_array<"example\00" : !cir.array> +// CHECK: cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" internal @c : !s32i +// CHECK: cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global external @s = #cir.global_view<@".str2"> : !cir.ptr + + +// CHECK: cir.func @use_global() +// CHECK-NEXT: %0 = cir.get_global @a : cir.ptr + +// CHECK: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { +// CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s8i) : !s8i +// CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () +// CHECK-NEXT: } + +// CHECK: cir.func @f31() global_ctor +// CHECK: cir.func @f32() global_ctor(777) +// CHECK: cir.func @f33() global_dtor +// CHECK: cir.func @f34() global_dtor(777) + +// CHECK: cir.global external tls_dyn @model0 = #cir.int<0> : !s32i +// CHECK: cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i +// CHECK: cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i +// CHECK: cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i + +// CHECK: cir.global "private" external tls_dyn @batata : !s32i +// CHECK: cir.func @f35() { +// CHECK: %0 = cir.get_global thread_local @batata : cir.ptr +// CHECK: cir.return +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/IR/inlineAttr.cir b/clang/test/CIR/IR/inlineAttr.cir new file mode 100644 index 000000000000..76de9acbb736 --- /dev/null +++ b/clang/test/CIR/IR/inlineAttr.cir @@ -0,0 +1,15 @@ +// RUN: cir-opt %s | FileCheck %s -check-prefix=CIR +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +#fn_attr = #cir})> + +module { + cir.func @l0() extra(#fn_attr) { + cir.return + } +} + +// CIR: #fn_attr = #cir})> +// CIR: cir.func @l0() extra(#fn_attr) { + +// MLIR: llvm.func @l0() attributes {cir.extra_attrs = #fn_attr} diff --git a/clang/test/CIR/IR/int.cir b/clang/test/CIR/IR/int.cir new file mode 100644 index 000000000000..3acaacd011f7 --- /dev/null +++ b/clang/test/CIR/IR/int.cir @@ -0,0 +1,39 @@ +// module { +// cir.global external @a = #cir.int<255> : !cir.int +// } + +// RUN: cir-opt %s | FileCheck %s +!s8i = !cir.int +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int + +!u8i = !cir.int +!u16i = !cir.int +!u32i = !cir.int +!u64i = !cir.int + +cir.func @validIntTypesAndAttributes() -> () { + + %1 = cir.const(#cir.int<-128> : !cir.int) : !s8i + %2 = cir.const(#cir.int<127> : !cir.int) : !s8i + %3 = cir.const(#cir.int<255> : !cir.int) : !u8i + + %4 = cir.const(#cir.int<-32768> : !cir.int) : !s16i + %5 = cir.const(#cir.int<32767> : !cir.int) : !s16i + %6 = cir.const(#cir.int<65535> : !cir.int) : !u16i + + %7 = cir.const(#cir.int<-2147483648> : !cir.int) : !s32i + %8 = cir.const(#cir.int<2147483647> : !cir.int) : !s32i + %9 = cir.const(#cir.int<4294967295> : !cir.int) : !u32i + + // FIXME: MLIR is emitting a "too large" error for this one. Not sure why. + // %10 = cir.const(#cir.int<-9223372036854775808> : !cir.int) : !s64i + %11 = cir.const(#cir.int<9223372036854775807> : !cir.int) : !s64i + %12 = cir.const(#cir.int<18446744073709551615> : !cir.int) : !u64i + + cir.return +} + +// No need to check stuff. If it parses, it's fine. +// CHECK: cir.func @validIntTypesAndAttributes() diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir new file mode 100644 index 000000000000..d2e38c82fa38 --- /dev/null +++ b/clang/test/CIR/IR/invalid.cir @@ -0,0 +1,1180 @@ +// Test attempts to build bogus CIR +// RUN: cir-opt %s -verify-diagnostics -split-input-file + +!u32i = !cir.int + +// expected-error@+2 {{'cir.const' op nullptr expects pointer type}} +cir.func @p0() { + %1 = cir.const(#cir.ptr : !cir.ptr) : !u32i + cir.return +} + +// ----- + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +!u32i = !cir.int +// expected-error@+2 {{op result type ('!cir.int') must be '!cir.bool' for '#cir.bool : !cir.bool'}} +cir.func @b0() { + %1 = cir.const(#true) : !u32i + cir.return +} + +// ----- + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +!u32i = !cir.int +cir.func @if0() { + %0 = cir.const(#true) : !cir.bool + // expected-error@+1 {{'cir.if' op region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 0}} + cir.if %0 { + %6 = cir.const(#cir.int<3> : !u32i) : !u32i + cir.yield %6 : !u32i + } + cir.return +} + +// ----- + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @yield0() { + %0 = cir.const(#true) : !cir.bool + cir.if %0 { // expected-error {{custom op 'cir.if' multi-block region must not omit terminator}} + cir.br ^a + ^a: + } + cir.return +} + +// ----- + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @yieldbreak() { + %0 = cir.const(#true) : !cir.bool + cir.if %0 { + cir.break // expected-error {{op must be within a loop or switch}} + } + cir.return +} + +// ----- + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @yieldcontinue() { + %0 = cir.const(#true) : !cir.bool + cir.if %0 { + cir.continue // expected-error {{op must be within a loop}} + } + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @s0() { + %1 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.switch (%1 : !s32i) [ + case (equal, 5) { // expected-error {{custom op 'cir.switch' case regions must be explicitly terminated}} + %2 = cir.const(#cir.int<3> : !s32i) : !s32i + } + ] + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @s1() { + %1 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.switch (%1 : !s32i) [ + case (equal, 5) { + } + ] // expected-error {{case region shall not be empty}} + cir.return +} + +// ----- + +cir.func @badstride(%x: !cir.ptr>) { + %idx = cir.const(#cir.int<2> : !cir.int) : !cir.int + %4 = cir.ptr_stride(%x : !cir.ptr>, %idx : !cir.int), !cir.ptr // expected-error {{requires the same type for first operand and result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast0(%arg0: !u32i) { + %1 = cir.cast(int_to_bool, %arg0 : !u32i), !u32i // expected-error {{requires !cir.bool type for result}} + cir.return +} + +// ----- + +cir.func @cast1(%arg1: !cir.float) { + %1 = cir.cast(int_to_bool, %arg1 : !cir.float), !cir.bool // expected-error {{requires !cir.int type for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast2(%p: !cir.ptr) { + %2 = cir.cast(array_to_ptrdecay, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.array pointee}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast3(%p: !cir.ptr) { + %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] + %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast4(%p: !cir.ptr) { + %2 = cir.cast(bitcast, %p : !cir.ptr), !u32i // expected-error {{requires !cir.ptr or !cir.vector type for source and result}} + cir.return +} + +// ----- + +cir.func @cast5(%p: !cir.float) { + %2 = cir.cast(bool_to_float, %p : !cir.float), !cir.float // expected-error {{requires !cir.bool type for source}} + cir.return +} + +// ----- + +cir.func @cast6(%p: !cir.bool) { + %2 = cir.cast(bool_to_float, %p : !cir.bool), !cir.int // expected-error {{requires !cir.float type for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast7(%p: !cir.ptr) { + %2 = cir.cast(ptr_to_bool, %p : !cir.ptr), !u32i // expected-error {{requires !cir.bool type for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast8(%p: !u32i) { + %2 = cir.cast(ptr_to_bool, %p : !u32i), !cir.bool // expected-error {{requires !cir.ptr type for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast9(%p : !u32i) { + %2 = cir.cast(integral, %p : !u32i), !cir.float // expected-error {{requires !cir.int type for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast10(%p : !cir.float) { + %2 = cir.cast(integral, %p : !cir.float), !u32i // expected-error {{requires !cir.int type for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast11(%p : !cir.float) { + %2 = cir.cast(floating, %p : !cir.float), !u32i // expected-error {{requires !cir.float type for source and result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast12(%p : !u32i) { + %2 = cir.cast(floating, %p : !u32i), !cir.float // expected-error {{requires !cir.float type for source and result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast13(%p : !u32i) { + %2 = cir.cast(float_to_int, %p : !u32i), !u32i // expected-error {{requires !cir.float type for source}} + cir.return +} + +// ----- + +cir.func @cast14(%p : !cir.float) { + %2 = cir.cast(float_to_int, %p : !cir.float), !cir.float // expected-error {{requires !cir.int type for result}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast15(%p : !cir.ptr) { + %2 = cir.cast(int_to_ptr, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.int type for source}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast16(%p : !u64i) { + %2 = cir.cast(int_to_ptr, %p : !u64i), !u64i // expected-error {{requires !cir.ptr type for result}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast17(%p : !u64i) { + %2 = cir.cast(ptr_to_int, %p : !u64i), !u64i // expected-error {{requires !cir.ptr type for source}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast18(%p : !cir.ptr) { + %2 = cir.cast(ptr_to_int, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.int type for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast19(%p : !u32i) { + %2 = cir.cast(float_to_bool, %p : !u32i), !cir.bool // expected-error {{requires !cir.float type for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast20(%p : !cir.float) { + %2 = cir.cast(float_to_bool, %p : !cir.float), !u32i // expected-error {{requires !cir.bool type for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast21(%p : !u32i) { + %2 = cir.cast(bool_to_int, %p : !u32i), !u32i // expected-error {{requires !cir.bool type for source}} + cir.return +} + +// ----- + +cir.func @cast22(%p : !cir.bool) { + %2 = cir.cast(bool_to_int, %p : !cir.bool), !cir.float // expected-error {{requires !cir.int type for result}} + cir.return +} + +// ----- + +cir.func @cast23(%p : !cir.bool) { + %2 = cir.cast(int_to_float, %p : !cir.bool), !cir.float // expected-error {{requires !cir.int type for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast24(%p : !u32i) { + %2 = cir.cast(int_to_float, %p : !u32i), !cir.bool // expected-error {{requires !cir.float type for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +!u8i = !cir.int +module { + // expected-error@+1 {{constant array element should match array element type}} + cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> +} + +// ----- + +!u8i = !cir.int +module { + // expected-error@+1 {{constant array size should match type size}} + cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> +} + +// ----- + +!u32i = !cir.int +module { + // expected-error@+1 {{constant array element for string literals expects !cir.int element type}} + cir.global external @b = #cir.const_array<"example\00" : !cir.array> +} + +// ----- + +module { + // expected-error@+1 {{expected type declaration for string literal}} + cir.global "private" constant external @".str2" = #cir.const_array<"example\00"> {alignment = 1 : i64} +} + +// ----- + +!u32i = !cir.int +module { + // expected-error@+1 {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, cir_private, extern_weak, common]}} + cir.global @a = #cir.const_array<[0 : !u8i, -23 : !u8i, 33 : !u8i] : !cir.array> +} + +// ----- + +!u32i = !cir.int +module { + cir.global "private" external @v = #cir.int<3> : !u32i // expected-error {{private visibility not allowed with 'external' linkage}} +} + +// ----- + +!u32i = !cir.int +module { + cir.global "public" internal @v = #cir.int<3> : !u32i // expected-error {{public visibility not allowed with 'internal' linkage}} +} + +// ----- + +!u32i = !cir.int +module { + cir.global external @v = #cir.zero : !u32i // expected-error {{zero expects struct or array type}} +} + +// ----- + +!s32i = !cir.int +cir.func @vec_op_size() { + %0 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.vec.create(%0 : !s32i) : !cir.vector // expected-error {{'cir.vec.create' op operand count of 1 doesn't match vector type '!cir.vector x 2>' element count of 2}} + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +cir.func @vec_op_type() { + %0 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.const(#cir.int<2> : !u32i) : !u32i + %2 = cir.vec.create(%0, %1 : !s32i, !u32i) : !cir.vector // expected-error {{'cir.vec.create' op operand type '!cir.int' doesn't match vector element type '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_extract_non_int_idx() { + %0 = cir.const(1.5e+00 : f64) : f64 + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : !cir.vector + %3 = cir.vec.extract %2[%0 : f64] : !cir.vector // expected-error {{expected '<'}} + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +cir.func @vec_extract_bad_type() { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : !cir.vector + %3 = cir.vec.extract %2[%1 : !s32i] : !cir.vector // expected-note {{prior use here}} + cir.store %3, %0 : !u32i, cir.ptr // expected-error {{use of value '%3' expects different type than prior uses: '!cir.int' vs '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_extract_non_vector() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.extract %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.extract' 'vec' must be CIR vector type, but got '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +cir.func @vec_insert_bad_type() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector + %2 = cir.const(#cir.int<0> : !u32i) : !u32i // expected-note {{prior use here}} + %3 = cir.vec.insert %2, %1[%0 : !s32i] : !cir.vector // expected-error {{use of value '%2' expects different type than prior uses: '!cir.int' vs '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_insert_non_vector() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.insert %0, %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.insert' 'vec' must be CIR vector type, but got '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_ternary_non_vector1() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector + %2 = cir.vec.ternary(%0, %1, %1) : !s32i, !cir.vector // expected-error {{'cir.vec.ternary' op operand #0 must be !cir.vector of !cir.int, but got '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_ternary_non_vector2() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector + %2 = cir.vec.ternary(%1, %0, %0) : !cir.vector, !s32i // expected-error {{'cir.vec.ternary' op operand #1 must be CIR vector type, but got '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_ternary_different_size() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector + %2 = cir.vec.create(%0, %0, %0, %0 : !s32i, !s32i, !s32i, !s32i) : !cir.vector + %3 = cir.vec.ternary(%1, %2, %2) : !cir.vector, !cir.vector // expected-error {{'cir.vec.ternary' op : the number of elements in '!cir.vector x 2>' and '!cir.vector x 4>' don't match}} + cir.return +} + +// ----- + +cir.func @vec_ternary_not_int(%p : !cir.float) { + %0 = cir.vec.create(%p, %p : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.ternary(%0, %0, %0) : !cir.vector, !cir.vector // expected-error {{'cir.vec.ternary' op operand #0 must be !cir.vector of !cir.int, but got '!cir.vector'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_shuffle_mismatch_args(%f : !cir.float, %n : !s32i) { + %0 = cir.vec.create(%f, %f : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.create(%n, %n : !s32i, !s32i) : !cir.vector // expected-note {{prior use here}} + %2 = cir.vec.shuffle(%0, %1 : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i] : !cir.vector // expected-error {{use of value '%1' expects different type than prior uses: '!cir.vector' vs '!cir.vector x 2>}} + cir.return +} + +// ----- + +cir.func @vec_shuffle_non_ints(%f : !cir.float) { + %0 = cir.vec.create(%f, %f : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.shuffle(%0, %0 : !cir.vector) [#cir.fp<1.000000e+00> : !cir.float, #cir.fp<1.000000e+00> : !cir.float] : !cir.vector // expected-error {{'cir.vec.shuffle' op all index values must be integers}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_shuffle_result_size(%f : !cir.float) { + %0 = cir.vec.create(%f, %f : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.shuffle(%0, %0 : !cir.vector) [#cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector // expected-error {{'cir.vec.shuffle' op : the number of elements in [#cir.int<1> : !cir.int, #cir.int<1> : !cir.int] and '!cir.vector' don't match}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_shuffle_result_element(%f : !cir.float) { + %0 = cir.vec.create(%f, %f : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.shuffle(%0, %0 : !cir.vector) [#cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector // expected-error {{'cir.vec.shuffle' op : element types of '!cir.vector' and '!cir.vector x 2>' don't match}} + cir.return +} + +// ----- + +cir.func coroutine @bad_task() { // expected-error {{coroutine body must use at least one cir.await op}} + cir.return +} + +// ----- + +cir.func coroutine @missing_condition() { + cir.scope { + cir.await(user, ready : { // expected-error {{ready region must end with cir.condition}} + cir.yield + }, suspend : { + cir.yield + }, resume : { + cir.yield + },) + } + cir.return +} + +// ----- + +!u8i = !cir.int +!u32i = !cir.int +module { + // Note MLIR requires "private" for global declarations, should get + // rid of this somehow in favor of clarity? + cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr + + // expected-error@+1 {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} + cir.global external @type_info_B = #cir.typeinfo<{ + #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}> + : !cir.struct}> +} + +// ----- + +module { + cir.func @l0() { + cir.return + } + + cir.func @l1() alias(@l0) { // expected-error {{function alias shall not have a body}} + cir.return + } +} + +// ----- + +module { + // expected-error@below {{expected 's' or 'u'}} + cir.func @l0(%arg0: !cir.int) -> () { + cir.return + } +} + +// // ----- + +module { + // expected-error@below {{expected integer width to be from 1 up to 64}} + cir.func @l0(%arg0: !cir.int) -> () { + cir.return + } +} + +// ----- + +module { + // expected-error@below {{integer value too large for the given type}} + cir.global external @a = #cir.int<256> : !cir.int + // expected-error@below {{integer value too large for the given type}} + cir.global external @b = #cir.int<-129> : !cir.int +} + +// ----- + +module { + // expected-error@+1 {{prototyped function must have at least one non-variadic input}} + cir.func private @variadic(...) -> !cir.int +} + +// ----- + +module { + // expected-error@+1 {{custom op 'cir.func' variadic arguments must be in the end of the argument list}} + cir.func @variadic(..., !cir.int) -> !cir.int +} + +// ----- + +module { + // expected-error@+1 {{functions only supports zero or one results}} + cir.func @variadic() -> (!cir.int, !cir.int) +} + +// ----- + +module { + cir.func private @variadic(!cir.int, !cir.int, ...) -> !cir.int + cir.func @call_variadic(%0: !cir.int) -> !cir.int { + // expected-error@+1 {{'cir.call' op too few operands for callee}} + %1 = cir.call @variadic(%0) : (!cir.int) -> !cir.int + cir.return %1 : !cir.int + } +} + +// ----- + +!s32i = !cir.int +cir.func @test_br() -> !s32i { + %0 = cir.const(#cir.int<0>: !s32i) : !s32i + // expected-error@below {{branch has 1 operands for successor #0, but target block has 0}} + cir.br ^bb1(%0 : !s32i) + ^bb1: + cir.return %0 : !s32i +} + +// ----- + +module { + cir.func private @test() -> !cir.void + cir.func @invalid_call() { + // expected-error@+1 {{'cir.call' op callee returns void but call has results}} + %1 = cir.call @test() : () -> (!cir.int) + cir.return + } +} + +// ----- + +module { + cir.func private @test() -> !cir.int + cir.func @invalid_call() { + // expected-error@+1 {{'cir.call' op result type mismatch: expected '!cir.int', but provided '!cir.int'}} + %1 = cir.call @test() : () -> (!cir.int) + cir.return + } +} + +// ----- + +module { + cir.func @invalid_return_type(%0 : !cir.int) -> !cir.int { + // expected-error@+1 {{'cir.return' op returns '!cir.int' but enclosing function returns '!cir.int'}} + cir.return %0 : !cir.int + } +} + +// ----- + +// expected-error@+1 {{invalid language keyword 'dummy'}} +module attributes {cir.lang = #cir.lang} { } + +// ----- + +module { + // Should not copy types with no data layout (unkonwn byte size). + cir.func @invalid_copy(%arg0 : !cir.ptr, %arg1 : !cir.ptr) { + // expected-error@+1 {{missing data layout for pointee type}} + cir.copy %arg0 to %arg1 : !cir.ptr + cir.return + } +} + +// ----- + +module { + // Should not copy to same address. + cir.func @invalid_copy(%arg0 : !cir.ptr>) { + // expected-error@+1 {{source and destination are the same}} + cir.copy %arg0 to %arg0 : !cir.ptr> + cir.return + } +} + +// ----- + +!s8i = !cir.int +module { + // Should not memcpy with invalid length type. + cir.func @invalid_memcpy_len(%arg0 : !cir.ptr, %arg1 : !s8i) { + // expected-error@+1 {{memcpy length must be an unsigned integer}} + cir.libc.memcpy %arg1 bytes from %arg0 to %arg0 : !s8i, !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!s8i = !cir.int +!u32i = !cir.int +module { + // Should not memcpy non-void pointers. + cir.func @invalid_memcpy_len(%arg0 : !cir.ptr, %arg1 : !u32i) { + // expected-error@+1 {{memcpy src and dst must be void pointers}} + cir.libc.memcpy %arg1 bytes from %arg0 to %arg0 : !u32i, !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- +!s8i = !cir.int +!ty_22Init22 = !cir.struct +module { + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { + } + // expected-error@+1 {{custom op 'cir.global' ctor region must have exactly one block}} +} + +// ----- +!s8i = !cir.int +#true = #cir.bool : !cir.bool +!ty_22Init22 = !cir.struct +module { + cir.func private @_ZN4InitC1Eb(!cir.ptr) + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { + %0 = cir.get_global @_ZL8__ioinit : cir.ptr + cir.call @_ZN4InitC1Eb(%0) : (!cir.ptr) -> () + } dtor {} + // expected-error@+1 {{custom op 'cir.global' dtor region must have exactly one block}} +} + +// ----- +!s32i = !cir.int +!u8i = !cir.int +module { + cir.global "private" constant internal @".str" = #cir.const_array<"Division by zero condition!\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global "private" constant external @_ZTIPKc : !cir.ptr + cir.func @_Z8divisionii() { + %11 = cir.alloc_exception(!cir.ptr) -> > + %12 = cir.get_global @".str" : cir.ptr > + %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr + cir.store %13, %11 : !cir.ptr, cir.ptr > + cir.throw(%11 : !cir.ptr>) // expected-error {{'type_info' symbol attribute missing}} + } +} + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct = !cir.struct +module { + cir.func @memeber_index_out_of_bounds(%arg0 : !cir.ptr) { + // expected-error@+1 {{member index out of bounds}} + %0 = cir.get_member %arg0[2] {name = "test"} : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct = !cir.struct +module { + cir.func @memeber_type_mismatch(%arg0 : !cir.ptr) { + // expected-error@+1 {{member type mismatch}} + %0 = cir.get_member %arg0[0] {name = "test"} : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +// expected-error@+1 {{anonymous structs must be complete}} +!struct = !cir.struct + +// ----- + +!u16i = !cir.int +// expected-error@+1 {{identified structs cannot have an empty name}} +!struct = !cir.struct + +// ----- + +// expected-error@+1 {{invalid self-reference within record}} +!struct = !cir.struct}> + +// ----- + +// expected-error@+1 {{record already defined}} +!struct = !cir.struct}> + +// ----- +!s32i = !cir.int +module { + cir.func @tmp(%arg0: !cir.float) { + // expected-error@+1 {{operand #0 must be primitive int}} + %0 = cir.alloca !s32i, cir.ptr , %arg0 : !cir.float, ["tmp"] + cir.return + } +} + +// ----- + +!u8i = !cir.int +module { + cir.func @stack_save_type_mismatch() { + // expected-error@+1 {{must be CIR pointer type}} + %1 = cir.stack_save : !u8i + cir.return + } +} +// ----- + +!u8i = !cir.int +module { + cir.func @stack_restore_type_mismatch(%arg0 : !u8i) { + // expected-error@+1 {{must be CIR pointer type}} + cir.stack_restore %arg0 : !u8i + cir.return + } +} + +// ----- + +!s8i = !cir.int +!u8i = !cir.int +cir.func @const_type_mismatch() -> () { + // expected-error@+1 {{'cir.const' op result type ('!cir.int') does not match value type ('!cir.int')}} + %2 = cir.const(#cir.int<0> : !s8i) : !u8i + cir.return +} + +// ----- + +!u16i = !cir.int + +// expected-error@+1 {{invalid kind of type specified}} +#invalid_type = #cir.data_member<0> : !u16i + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct1 = !cir.struct + +// expected-error@+1 {{member type of a #cir.data_member attribute must match the attribute type}} +#invalid_member_ty = #cir.data_member<0> : !cir.data_member + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct1 = !cir.struct + +module { + cir.func @invalid_base_type(%arg0 : !cir.data_member) { + %0 = cir.alloca !u32i, cir.ptr , ["tmp"] {alignment = 4 : i64} + // expected-error@+1 {{'cir.get_runtime_member' op operand #0 must be !cir.struct*}} + %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct1 = !cir.struct +!struct2 = !cir.struct + +module { + cir.func @invalid_base_type(%arg0 : !cir.data_member) { + %0 = cir.alloca !struct2, cir.ptr , ["tmp"] {alignment = 4 : i64} + // expected-error@+1 {{record type does not match the member pointer type}} + %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct1 = !cir.struct + +module { + cir.func @invalid_base_type(%arg0 : !cir.data_member) { + %0 = cir.alloca !struct1, cir.ptr , ["tmp"] {alignment = 4 : i64} + // expected-error@+1 {{result type does not match the member pointer type}} + %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +!incomplete_struct = !cir.struct + +// expected-error@+1 {{incomplete 'cir.struct' cannot be used to build a non-null data member pointer}} +#incomplete_cls_member = #cir.data_member<0> : !cir.data_member + + +// ----- + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @clrsb_invalid_input_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.clrsb' op operand #0 must be 32-bit signed integer or 64-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.clrsb(%arg0 : !u32i) : !s32i + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @clrsb_invalid_result_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.clrsb' op result #0 must be 32-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.clrsb(%arg0 : !s32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int + +cir.func @clz_invalid_input_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.clz' op operand #0 must be 16-bit unsigned integer or 32-bit unsigned integer or 64-bit unsigned integer, but got '!cir.int'}} + %0 = cir.bit.clz(%arg0 : !s32i) : !s32i + cir.return +} + +// ----- + +!u32i = !cir.int + +cir.func @clz_invalid_result_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.clz' op result #0 must be 32-bit signed integer, but got '!cir.int}} + %0 = cir.bit.clz(%arg0 : !u32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int + +cir.func @ctz_invalid_input_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.ctz' op operand #0 must be 16-bit unsigned integer or 32-bit unsigned integer or 64-bit unsigned integer, but got '!cir.int'}} + %0 = cir.bit.ctz(%arg0 : !s32i) : !s32i + cir.return +} + +// ----- + +!u32i = !cir.int + +cir.func @ctz_invalid_result_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.ctz' op result #0 must be 32-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.ctz(%arg0 : !u32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @ffs_invalid_input_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.ffs' op operand #0 must be 32-bit signed integer or 64-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.ffs(%arg0 : !u32i) : !s32i + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @ffs_invalid_result_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.ffs' op result #0 must be 32-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.ffs(%arg0 : !s32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int + +cir.func @parity_invalid_input_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.parity' op operand #0 must be 32-bit unsigned integer or 64-bit unsigned integer, but got '!cir.int'}} + %0 = cir.bit.parity(%arg0 : !s32i) : !s32i + cir.return +} + +// ----- + +!u32i = !cir.int + +cir.func @parity_invalid_result_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.parity' op result #0 must be 32-bit signed integer, but got '!cir.int}} + %0 = cir.bit.parity(%arg0 : !u32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int + +cir.func @popcount_invalid_input_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.popcount' op operand #0 must be 16-bit unsigned integer or 32-bit unsigned integer or 64-bit unsigned integer, but got '!cir.int'}} + %0 = cir.bit.popcount(%arg0 : !s32i) : !s32i + cir.return +} + +// ----- + +!u32i = !cir.int + +cir.func @popcount_invalid_result_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.popcount' op result #0 must be 32-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.popcount(%arg0 : !u32i) : !u32i + cir.return +} + +// ----- + +cir.func @bad_fetch(%x: !cir.ptr, %y: !cir.float) -> () { + // expected-error@+1 {{only operates on integer values}} + %12 = cir.atomic.fetch(xor, %x : !cir.ptr, %y : !cir.float, seq_cst) : !cir.float + cir.return +} + +// ----- + +cir.func @bad_operands_for_nowrap(%x: !cir.float, %y: !cir.float) { + // expected-error@+1 {{only operations on integer values may have nsw/nuw flags}} + %0 = cir.binop(add, %x, %y) nsw : !cir.float +} + +// ----- + +!u32i = !cir.int + +cir.func @bad_binop_for_nowrap(%x: !u32i, %y: !u32i) { + // expected-error@+1 {{The nsw/nuw flags are applicable to opcodes: 'add', 'sub' and 'mul'}} + %0 = cir.binop(div, %x, %y) nsw : !u32i +} + +// ----- + +!s32i = !cir.int + +module { + cir.global "private" external @batata : !s32i + cir.func @f35() { + // expected-error@+1 {{access to global not marked thread local}} + %0 = cir.get_global thread_local @batata : cir.ptr + cir.return + } +} + +// ----- + +// expected-error@+1 {{invalid underlying type for long double}} +cir.func @bad_long_double(%arg0 : !cir.long_double) -> () { + cir.return +} + +// ----- + +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u8i = !cir.int +!void = !cir.void + +!Base = !cir.struct ()>>>}> +!Derived = !cir.struct ()>>>}>}> + +module { + cir.global "private" constant external @_ZTI4Base : !cir.ptr + cir.global "private" constant external @_ZTI7Derived : !cir.ptr + cir.func private @__dynamic_cast(!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr + cir.func private @__cxa_bad_cast() + cir.func @test(%arg0 : !cir.ptr) { + // expected-error@+1 {{srcRtti must be an RTTI pointer}} + %0 = cir.dyn_cast(ptr, %arg0 : !cir.ptr, #cir.dyn_cast_info<#cir.global_view<@_ZTI4Base> : !cir.ptr, #cir.global_view<@_ZTI7Derived> : !cir.ptr, @__dynamic_cast, @__cxa_bad_cast, #cir.int<0> : !s64i>) -> !cir.ptr + } +} + +// ----- + +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u8i = !cir.int +!void = !cir.void + +!Base = !cir.struct ()>>>}> +!Derived = !cir.struct ()>>>}>}> + +module { + cir.global "private" constant external @_ZTI4Base : !cir.ptr + cir.global "private" constant external @_ZTI7Derived : !cir.ptr + cir.func private @__dynamic_cast(!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr + cir.func private @__cxa_bad_cast() + cir.func @test(%arg0 : !cir.ptr) { + // expected-error@+1 {{destRtti must be an RTTI pointer}} + %0 = cir.dyn_cast(ptr, %arg0 : !cir.ptr, #cir.dyn_cast_info<#cir.global_view<@_ZTI4Base> : !cir.ptr, #cir.global_view<@_ZTI7Derived> : !cir.ptr, @__dynamic_cast, @__cxa_bad_cast, #cir.int<0> : !s64i>) -> !cir.ptr + } +} +// ----- + +// Type of the attribute must be a CIR floating point type + +// expected-error @below {{invalid kind of type specified}} +cir.global external @f = #cir.fp<0.5> : !cir.int + +// ----- + +// Value must be a floating point literal or integer literal + +// expected-error @below {{expected floating point literal}} +cir.global external @f = #cir.fp<"blabla"> : !cir.float + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +cir.global external @f = #cir.fp<0x7FC000000> : !cir.float + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +cir.global external @f = #cir.fp<0x7FC000007FC0000000> : !cir.double + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +cir.global external @f = #cir.fp<0x7FC0000007FC0000007FC000000> : !cir.long_double + +// ----- + +// Long double with `double` semnatics should have a value that fits in a double. + +// CHECK: cir.global external @f = #cir.fp<0x7FC000007FC000000000> : !cir.long_double +cir.global external @f = #cir.fp<0x7FC000007FC000000000> : !cir.long_double + +// expected-error @below {{hexadecimal float constant out of range for type}} +cir.global external @f = #cir.fp<0x7FC000007FC000000000> : !cir.long_double + +// ----- + +// Verify no need for type inside the attribute + +// expected-error @below {{expected '>'}} +cir.global external @f = #cir.fp<0x7FC00000 : !cir.float> : !cir.float + +// ----- + +// Verify literal must be hex or float + +// expected-error @below {{unexpected decimal integer literal for a floating point value}} +// expected-note @below {{add a trailing dot to make the literal a float}} +cir.global external @f = #cir.fp<42> : !cir.float diff --git a/clang/test/CIR/IR/invalid_xfail.cir b/clang/test/CIR/IR/invalid_xfail.cir new file mode 100644 index 000000000000..c29dbf075b6b --- /dev/null +++ b/clang/test/CIR/IR/invalid_xfail.cir @@ -0,0 +1,42 @@ +// Test attempts to build bogus CIR +// RUN: cir-opt %s -verify-diagnostics -split-input-file +// XFAIL: * + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @b0() { + cir.scope { + cir.while { // expected-error {{expected condition region to terminate with 'cir.condition'}} + cir.yield + } do { + cir.br ^bb1 + ^bb1: + cir.return + } + } + cir.return +} + +// ----- + +cir.func @invalid_cond_region_terminator(%arg0 : !cir.bool) -> !cir.void { + cir.do { // expected-error {{op expected condition region to terminate with 'cir.condition'}} + cir.yield + } while { + cir.yield + } + cir.return +} + +// ----- + +cir.func @invalidConditionTerminator (%arg0 : !cir.bool) -> !cir.void { + cir.for : cond { // expected-error {{op expected condition region to terminate with 'cir.condition'}} + cir.yield + } body { + cir.yield + } step { + cir.yield + } + cir.return +} diff --git a/clang/test/CIR/IR/libc-fabs.cir b/clang/test/CIR/IR/libc-fabs.cir new file mode 100644 index 000000000000..691849e0c3a5 --- /dev/null +++ b/clang/test/CIR/IR/libc-fabs.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s + +!u32i = !cir.int +module { + cir.func @foo(%arg0: !cir.double) -> !cir.double { + %0 = cir.fabs %arg0 : !cir.double + cir.return %0 : !cir.double + } +} diff --git a/clang/test/CIR/IR/libc-memchr.cir b/clang/test/CIR/IR/libc-memchr.cir new file mode 100644 index 000000000000..014414322819 --- /dev/null +++ b/clang/test/CIR/IR/libc-memchr.cir @@ -0,0 +1,11 @@ +// RUN: cir-opt %s + +!voidptr = !cir.ptr +!s32i = !cir.int +!u64i = !cir.int +module { + cir.func @f(%src : !voidptr, %pattern : !s32i, %len : !u64i) -> !voidptr { + %ptr = cir.libc.memchr(%src, %pattern, %len) + cir.return %ptr : !voidptr + } +} diff --git a/clang/test/CIR/IR/libc-memcpy.cir b/clang/test/CIR/IR/libc-memcpy.cir new file mode 100644 index 000000000000..737f56d533e3 --- /dev/null +++ b/clang/test/CIR/IR/libc-memcpy.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s + +!u32i = !cir.int +module { + cir.func @shouldParseLibcMemcpyOp(%arg0 : !cir.ptr, %arg1 : !u32i) { + cir.libc.memcpy %arg1 bytes from %arg0 to %arg0 : !u32i, !cir.ptr -> !cir.ptr + cir.return + } +} diff --git a/clang/test/CIR/IR/module.cir b/clang/test/CIR/IR/module.cir new file mode 100644 index 000000000000..7ce2c0ba21cb --- /dev/null +++ b/clang/test/CIR/IR/module.cir @@ -0,0 +1,12 @@ +// RUN: cir-opt %s -split-input-file -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Should parse and print C source language attribute. +module attributes {cir.lang = #cir.lang} { } +// CHECK: module attributes {cir.lang = #cir.lang} + +// ----- + +// Should parse and print C++ source language attribute. +module attributes {cir.lang = #cir.lang} { } +// CHECK: module attributes {cir.lang = #cir.lang} diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir new file mode 100644 index 000000000000..826ed571c3cb --- /dev/null +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -0,0 +1,22 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s +!s32i = !cir.int + +module { + cir.func @arraysubscript(%arg0: !s32i) { + %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] + %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr + cir.return + } +} + +// CHECK: cir.func @arraysubscript(%arg0: !s32i) { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] +// CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/scope.cir b/clang/test/CIR/IR/scope.cir new file mode 100644 index 000000000000..0cc45c8e389b --- /dev/null +++ b/clang/test/CIR/IR/scope.cir @@ -0,0 +1,27 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +!u32i = !cir.int + +module { + // Should properly print/parse scope with implicit empty yield. + cir.func @implicit_yield() { + cir.scope { + } + // CHECK: cir.scope { + // CHECK: } + cir.return + } + + // Should properly print/parse scope with explicit yield. + cir.func @explicit_yield() { + %0 = cir.scope { + %1 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.yield %1 : !cir.ptr + } : !cir.ptr + // CHECK: %0 = cir.scope { + // [...] + // CHECK: cir.yield %1 : !cir.ptr + // CHECK: } : !cir.ptr + cir.return + } +} diff --git a/clang/test/CIR/IR/stack-save-restore.cir b/clang/test/CIR/IR/stack-save-restore.cir new file mode 100644 index 000000000000..f6027258786d --- /dev/null +++ b/clang/test/CIR/IR/stack-save-restore.cir @@ -0,0 +1,23 @@ +// Test the CIR operations can parse and print correctly (roundtrip) + +// RUN: cir-opt %s | cir-opt | FileCheck %s + +!u8i = !cir.int + +module { + cir.func @stack_save_restore() { + %0 = cir.stack_save : !cir.ptr + cir.stack_restore %0 : !cir.ptr + cir.return + } +} + +//CHECK: module { + +//CHECK-NEXT: cir.func @stack_save_restore() { +//CHECK-NEXT: %0 = cir.stack_save : !cir.ptr +//CHECK-NEXT: cir.stack_restore %0 : !cir.ptr +//CHECK-NEXT: cir.return +//CHECK-NEXT: } + +//CHECK-NEXT: } diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir new file mode 100644 index 000000000000..65a319538d1a --- /dev/null +++ b/clang/test/CIR/IR/struct.cir @@ -0,0 +1,41 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!u8i = !cir.int +!u16i = !cir.int +!s32i = !cir.int +!u32i = !cir.int + +!ty_2222 = !cir.struct x 5>}> +!ty_22221 = !cir.struct, !cir.ptr, !cir.ptr}> +!ty_22A22 = !cir.struct +!ty_22i22 = !cir.struct +!ty_22S22 = !cir.struct +!ty_22S122 = !cir.struct + +// Test recursive struct parsing/printing. +!ty_22Node22 = !cir.struct>} #cir.record.decl.ast> +// CHECK-DAG: !cir.struct>} #cir.record.decl.ast> + +module { + // Dummy function to use types and force them to be printed. + cir.func @useTypes(%arg0: !ty_22Node22) { + cir.return + } + + cir.func @structs() { + %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] + %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] + cir.return + } + +// CHECK: cir.func @structs() { +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["i", init] + + cir.func @shouldSuccessfullyParseConstStructAttrs() { + %0 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122) : !ty_22S122 + // CHECK: cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122) : !ty_22S122 + cir.return + } +} diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir new file mode 100644 index 000000000000..db63a2928862 --- /dev/null +++ b/clang/test/CIR/IR/switch.cir @@ -0,0 +1,36 @@ +// RUN: cir-opt %s | FileCheck %s +!s32i = !cir.int + +cir.func @s0() { + %1 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.switch (%1 : !s32i) [ + case (default) { + cir.return + }, + case (equal, 3) { + cir.yield + }, + case (anyof, [6, 7, 8] : !s32i) { + cir.break + }, + case (equal, 5 : !s32i) { + cir.yield + } + ] + cir.return +} + +// CHECK: cir.switch (%0 : !s32i) [ +// CHECK-NEXT: case (default) { +// CHECK-NEXT: cir.return +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 3) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, +// CHECK-NEXT: case (anyof, [6, 7, 8] : !s32i) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 5) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: ] diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir new file mode 100644 index 000000000000..127d8ed8f2dc --- /dev/null +++ b/clang/test/CIR/IR/ternary.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s +!u32i = !cir.int + +module { + cir.func @blue(%arg0: !cir.bool) -> !u32i { + %0 = cir.ternary(%arg0, true { + %a = cir.const(#cir.int<0> : !u32i) : !u32i + cir.yield %a : !u32i + }, false { + %b = cir.const(#cir.int<1> : !u32i) : !u32i + cir.yield %b : !u32i + }) : (!cir.bool) -> !u32i + cir.return %0 : !u32i + } +} + +// CHECK: module { + +// CHECK: cir.func @blue(%arg0: !cir.bool) -> !u32i { +// CHECK: %0 = cir.ternary(%arg0, true { +// CHECK: %1 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.yield %1 : !u32i +// CHECK: }, false { +// CHECK: %1 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK: cir.yield %1 : !u32i +// CHECK: }) : (!cir.bool) -> !u32i +// CHECK: cir.return %0 : !u32i +// CHECK: } + +// CHECK: } diff --git a/clang/test/CIR/IR/try.cir b/clang/test/CIR/IR/try.cir new file mode 100644 index 000000000000..30a516e422e0 --- /dev/null +++ b/clang/test/CIR/IR/try.cir @@ -0,0 +1,24 @@ +// Test attempts to build bogus CIR +// RUN: cir-opt %s + +!s32i = !cir.int + +module { + cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %3 : !s32i + } + + cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { + %11 = cir.scope { + %10 = cir.scope { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["exception_info"] {alignment = 16 : i64} + %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i + %1 = cir.load %0 : cir.ptr >, !cir.ptr + cir.yield %1 : !cir.ptr + } : !cir.ptr + cir.yield %10 : !cir.ptr + } : !cir.ptr + cir.return %11 : !cir.ptr + } +} \ No newline at end of file diff --git a/clang/test/CIR/IR/types.cir b/clang/test/CIR/IR/types.cir new file mode 100644 index 000000000000..6653cdbfbe2e --- /dev/null +++ b/clang/test/CIR/IR/types.cir @@ -0,0 +1,13 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s + +!u32i = !cir.int + +module { + cir.func @arrays() { + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + cir.return + } +} + +// CHECK: cir.func @arrays() { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] diff --git a/clang/test/CIR/IR/unreachable.cir b/clang/test/CIR/IR/unreachable.cir new file mode 100644 index 000000000000..d057f47ee2b3 --- /dev/null +++ b/clang/test/CIR/IR/unreachable.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func @test() { + cir.unreachable +} + +// CHECK: cir.func @test +// CHECK-NEXT: cir.unreachable diff --git a/clang/test/CIR/IR/vtableAttr.cir b/clang/test/CIR/IR/vtableAttr.cir new file mode 100644 index 000000000000..f3792517eea4 --- /dev/null +++ b/clang/test/CIR/IR/vtableAttr.cir @@ -0,0 +1,8 @@ +// RUN: cir-opt %s | FileCheck %s + +!u8i = !cir.int +module { + // Should parse VTable attribute. + cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr]> : !cir.array x 1>}> : !cir.struct x 1>}> + // CHECK: cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr]> : !cir.array x 1>}> : !ty_anon_struct +} diff --git a/clang/test/CIR/IR/while.cir b/clang/test/CIR/IR/while.cir new file mode 100644 index 000000000000..85897af76800 --- /dev/null +++ b/clang/test/CIR/IR/while.cir @@ -0,0 +1,18 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func @testPrintingParsing(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.yield + } + cir.return +} + +// CHECK: @testPrintingParsing +// CHECK: cir.while { +// CHECK: cir.condition(%arg0) +// CHECK: } do { +// CHECK: cir.yield +// CHECK: } diff --git a/clang/test/CIR/Inputs/skip-this-header.h b/clang/test/CIR/Inputs/skip-this-header.h new file mode 100644 index 000000000000..bf94a9cfeb94 --- /dev/null +++ b/clang/test/CIR/Inputs/skip-this-header.h @@ -0,0 +1,12 @@ +#pragma clang system_header + +class String { + char *storage{nullptr}; + long size; + long capacity; + +public: + String() : size{0} {} + String(int size) : size{size} {} + String(const char *s) {} +}; \ No newline at end of file diff --git a/clang/test/CIR/Inputs/std-cxx.h b/clang/test/CIR/Inputs/std-cxx.h new file mode 100644 index 000000000000..1697e311bcb3 --- /dev/null +++ b/clang/test/CIR/Inputs/std-cxx.h @@ -0,0 +1,1321 @@ +// This header provides reduced versions of common standard library containers +// and whatnots. It's a copy from +// clang/test/Analysis/Inputs/system-header-simulator-cxx.h with some additions +// for ClangIR use cases found along the way. + +// Like the compiler, the static analyzer treats some functions differently if +// they come from a system header -- for example, it is assumed that system +// functions do not arbitrarily free() their parameters, and that some bugs +// found in system headers cannot be fixed by the user and should be +// suppressed. +#pragma clang system_header + +typedef unsigned char uint8_t; + +typedef __typeof__(sizeof(int)) size_t; +typedef __typeof__((char*)0-(char*)0) ptrdiff_t; +void *memmove(void *s1, const void *s2, size_t n); + +namespace std { + typedef size_t size_type; +#if __cplusplus >= 201103L + using nullptr_t = decltype(nullptr); +#endif +} + +namespace std { + struct input_iterator_tag { }; + struct output_iterator_tag { }; + struct forward_iterator_tag : public input_iterator_tag { }; + struct bidirectional_iterator_tag : public forward_iterator_tag { }; + struct random_access_iterator_tag : public bidirectional_iterator_tag { }; + + template struct iterator_traits { + typedef typename Iterator::difference_type difference_type; + typedef typename Iterator::value_type value_type; + typedef typename Iterator::pointer pointer; + typedef typename Iterator::reference reference; + typedef typename Iterator::iterator_category iterator_category; + }; +} + +template struct __vector_iterator { + typedef __vector_iterator iterator; + typedef __vector_iterator const_iterator; + + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef Ptr pointer; + typedef Ref reference; + typedef std::random_access_iterator_tag iterator_category; + + __vector_iterator(const Ptr p = 0) : ptr(p) {} + __vector_iterator(const iterator &rhs): ptr(rhs.base()) {} + __vector_iterator& operator++() { ++ ptr; return *this; } + __vector_iterator operator++(int) { + auto tmp = *this; + ++ ptr; + return tmp; + } + __vector_iterator operator--() { -- ptr; return *this; } + __vector_iterator operator--(int) { + auto tmp = *this; -- ptr; + return tmp; + } + __vector_iterator operator+(difference_type n) { + return ptr + n; + } + friend __vector_iterator operator+( + difference_type n, + const __vector_iterator &iter) { + return n + iter.ptr; + } + __vector_iterator operator-(difference_type n) { + return ptr - n; + } + __vector_iterator operator+=(difference_type n) { + return ptr += n; + } + __vector_iterator operator-=(difference_type n) { + return ptr -= n; + } + + template + difference_type operator-(const __vector_iterator &rhs); + + Ref operator*() const { return *ptr; } + Ptr operator->() const { return ptr; } + + Ref operator[](difference_type n) { + return *(ptr+n); + } + + bool operator==(const iterator &rhs) const { return ptr == rhs.ptr; } + bool operator==(const const_iterator &rhs) const { return ptr == rhs.ptr; } + + bool operator!=(const iterator &rhs) const { return ptr != rhs.ptr; } + bool operator!=(const const_iterator &rhs) const { return ptr != rhs.ptr; } + + const Ptr& base() const { return ptr; } + +private: + Ptr ptr; +}; + +template struct __deque_iterator { + typedef __deque_iterator iterator; + typedef __deque_iterator const_iterator; + + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef Ptr pointer; + typedef Ref reference; + typedef std::random_access_iterator_tag iterator_category; + + __deque_iterator(const Ptr p = 0) : ptr(p) {} + __deque_iterator(const iterator &rhs): ptr(rhs.base()) {} + __deque_iterator& operator++() { ++ ptr; return *this; } + __deque_iterator operator++(int) { + auto tmp = *this; + ++ ptr; + return tmp; + } + __deque_iterator operator--() { -- ptr; return *this; } + __deque_iterator operator--(int) { + auto tmp = *this; -- ptr; + return tmp; + } + __deque_iterator operator+(difference_type n) { + return ptr + n; + } + friend __deque_iterator operator+( + difference_type n, + const __deque_iterator &iter) { + return n + iter.ptr; + } + __deque_iterator operator-(difference_type n) { + return ptr - n; + } + __deque_iterator operator+=(difference_type n) { + return ptr += n; + } + __deque_iterator operator-=(difference_type n) { + return ptr -= n; + } + + Ref operator*() const { return *ptr; } + Ptr operator->() const { return ptr; } + + Ref operator[](difference_type n) { + return *(ptr+n); + } + + bool operator==(const iterator &rhs) const { return ptr == rhs.ptr; } + bool operator==(const const_iterator &rhs) const { return ptr == rhs.ptr; } + + bool operator!=(const iterator &rhs) const { return ptr != rhs.ptr; } + bool operator!=(const const_iterator &rhs) const { return ptr != rhs.ptr; } + + const Ptr& base() const { return ptr; } + +private: + Ptr ptr; +}; + +template struct __list_iterator { + typedef __list_iterator iterator; + typedef __list_iterator const_iterator; + + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef Ptr pointer; + typedef Ref reference; + typedef std::bidirectional_iterator_tag iterator_category; + + __list_iterator(T* it = 0) : item(it) {} + __list_iterator(const iterator &rhs): item(rhs.item) {} + __list_iterator& operator++() { item = item->next; return *this; } + __list_iterator operator++(int) { + auto tmp = *this; + item = item->next; + return tmp; + } + __list_iterator operator--() { item = item->prev; return *this; } + __list_iterator operator--(int) { + auto tmp = *this; + item = item->prev; + return tmp; + } + + Ref operator*() const { return item->data; } + Ptr operator->() const { return &item->data; } + + bool operator==(const iterator &rhs) const { return item == rhs->item; } + bool operator==(const const_iterator &rhs) const { return item == rhs->item; } + + bool operator!=(const iterator &rhs) const { return item != rhs->item; } + bool operator!=(const const_iterator &rhs) const { return item != rhs->item; } + + const T* &base() const { return item; } + + template + friend struct __list_iterator; + +private: + T* item; +}; + +template struct __fwdl_iterator { + typedef __fwdl_iterator iterator; + typedef __fwdl_iterator const_iterator; + + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef Ptr pointer; + typedef Ref reference; + typedef std::forward_iterator_tag iterator_category; + + __fwdl_iterator(T* it = 0) : item(it) {} + __fwdl_iterator(const iterator &rhs): item(rhs.item) {} + __fwdl_iterator& operator++() { item = item->next; return *this; } + __fwdl_iterator operator++(int) { + auto tmp = *this; + item = item->next; + return tmp; + } + Ref operator*() const { return item->data; } + Ptr operator->() const { return &item->data; } + + bool operator==(const iterator &rhs) const { return item == rhs->item; } + bool operator==(const const_iterator &rhs) const { return item == rhs->item; } + + bool operator!=(const iterator &rhs) const { return item != rhs->item; } + bool operator!=(const const_iterator &rhs) const { return item != rhs->item; } + + const T* &base() const { return item; } + + template + friend struct __fwdl_iterator; + +private: + T* item; +}; + +namespace std { + template + struct pair { + T1 first; + T2 second; + + pair() : first(), second() {} + pair(const T1 &a, const T2 &b) : first(a), second(b) {} + + template + pair(const pair &other) : first(other.first), + second(other.second) {} + }; + + typedef __typeof__(sizeof(int)) size_t; + + template class initializer_list; + + template< class T > struct remove_reference {typedef T type;}; + template< class T > struct remove_reference {typedef T type;}; + template< class T > struct remove_reference {typedef T type;}; + + template + typename remove_reference::type&& move(T&& a) { + typedef typename remove_reference::type&& RvalRef; + return static_cast(a); + } + + template + void swap(T &a, T &b) { + T c(std::move(a)); + a = std::move(b); + b = std::move(c); + } + + template + class vector { + T *_start; + T *_finish; + T *_end_of_storage; + + public: + typedef T value_type; + typedef size_t size_type; + typedef __vector_iterator iterator; + typedef __vector_iterator const_iterator; + + vector() : _start(0), _finish(0), _end_of_storage(0) {} + template + vector(InputIterator first, InputIterator last); + vector(const vector &other); + vector(vector &&other); + explicit vector(size_type count); + ~vector(); + + size_t size() const { + return size_t(_finish - _start); + } + void resize(size_type __sz); + + vector& operator=(const vector &other); + vector& operator=(vector &&other); + vector& operator=(std::initializer_list ilist); + + void assign(size_type count, const T &value); + template + void assign(InputIterator first, InputIterator last); + void assign(std::initializer_list ilist); + + void clear(); + + void push_back(const T &value); + void push_back(T &&value); + template + void emplace_back(Args&&... args); + void pop_back(); + + iterator insert(const_iterator position, const value_type &val); + iterator insert(const_iterator position, size_type n, + const value_type &val); + template + iterator insert(const_iterator position, InputIterator first, + InputIterator last); + iterator insert(const_iterator position, value_type &&val); + iterator insert(const_iterator position, initializer_list il); + + template + iterator emplace(const_iterator position, Args&&... args); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + T &operator[](size_t n) { + return _start[n]; + } + + const T &operator[](size_t n) const { + return _start[n]; + } + + iterator begin() { return iterator(_start); } + const_iterator begin() const { return const_iterator(_start); } + const_iterator cbegin() const { return const_iterator(_start); } + iterator end() { return iterator(_finish); } + const_iterator end() const { return const_iterator(_finish); } + const_iterator cend() const { return const_iterator(_finish); } + T& front() { return *begin(); } + const T& front() const { return *begin(); } + T& back() { return *(end() - 1); } + const T& back() const { return *(end() - 1); } + }; + + template + class list { + struct __item { + T data; + __item *prev, *next; + } *_start, *_finish; + + public: + typedef T value_type; + typedef size_t size_type; + typedef __list_iterator<__item, T *, T &> iterator; + typedef __list_iterator<__item, const T *, const T &> const_iterator; + + list() : _start(0), _finish(0) {} + template + list(InputIterator first, InputIterator last); + list(const list &other); + list(list &&other); + ~list(); + + list& operator=(const list &other); + list& operator=(list &&other); + list& operator=(std::initializer_list ilist); + + void assign(size_type count, const T &value); + template + void assign(InputIterator first, InputIterator last); + void assign(std::initializer_list ilist); + + void clear(); + + void push_back(const T &value); + void push_back(T &&value); + template + void emplace_back(Args&&... args); + void pop_back(); + + void push_front(const T &value); + void push_front(T &&value); + template + void emplace_front(Args&&... args); + void pop_front(); + + iterator insert(const_iterator position, const value_type &val); + iterator insert(const_iterator position, size_type n, + const value_type &val); + template + iterator insert(const_iterator position, InputIterator first, + InputIterator last); + iterator insert(const_iterator position, value_type &&val); + iterator insert(const_iterator position, initializer_list il); + + template + iterator emplace(const_iterator position, Args&&... args); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + iterator begin() { return iterator(_start); } + const_iterator begin() const { return const_iterator(_start); } + const_iterator cbegin() const { return const_iterator(_start); } + iterator end() { return iterator(_finish); } + const_iterator end() const { return const_iterator(_finish); } + const_iterator cend() const { return const_iterator(_finish); } + + T& front() { return *begin(); } + const T& front() const { return *begin(); } + T& back() { return *--end(); } + const T& back() const { return *--end(); } + }; + + template + class deque { + T *_start; + T *_finish; + T *_end_of_storage; + + public: + typedef T value_type; + typedef size_t size_type; + typedef __deque_iterator iterator; + typedef __deque_iterator const_iterator; + + deque() : _start(0), _finish(0), _end_of_storage(0) {} + template + deque(InputIterator first, InputIterator last); + deque(const deque &other); + deque(deque &&other); + ~deque(); + + size_t size() const { + return size_t(_finish - _start); + } + + deque& operator=(const deque &other); + deque& operator=(deque &&other); + deque& operator=(std::initializer_list ilist); + + void assign(size_type count, const T &value); + template + void assign(InputIterator first, InputIterator last); + void assign(std::initializer_list ilist); + + void clear(); + + void push_back(const T &value); + void push_back(T &&value); + template + void emplace_back(Args&&... args); + void pop_back(); + + void push_front(const T &value); + void push_front(T &&value); + template + void emplace_front(Args&&... args); + void pop_front(); + + iterator insert(const_iterator position, const value_type &val); + iterator insert(const_iterator position, size_type n, + const value_type &val); + template + iterator insert(const_iterator position, InputIterator first, + InputIterator last); + iterator insert(const_iterator position, value_type &&val); + iterator insert(const_iterator position, initializer_list il); + + template + iterator emplace(const_iterator position, Args&&... args); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + T &operator[](size_t n) { + return _start[n]; + } + + const T &operator[](size_t n) const { + return _start[n]; + } + + iterator begin() { return iterator(_start); } + const_iterator begin() const { return const_iterator(_start); } + const_iterator cbegin() const { return const_iterator(_start); } + iterator end() { return iterator(_finish); } + const_iterator end() const { return const_iterator(_finish); } + const_iterator cend() const { return const_iterator(_finish); } + T& front() { return *begin(); } + const T& front() const { return *begin(); } + T& back() { return *(end() - 1); } + const T& back() const { return *(end() - 1); } + }; + + template + class forward_list { + struct __item { + T data; + __item *next; + } *_start; + + public: + typedef T value_type; + typedef size_t size_type; + typedef __fwdl_iterator<__item, T *, T &> iterator; + typedef __fwdl_iterator<__item, const T *, const T &> const_iterator; + + forward_list() : _start(0) {} + template + forward_list(InputIterator first, InputIterator last); + forward_list(const forward_list &other); + forward_list(forward_list &&other); + ~forward_list(); + + forward_list& operator=(const forward_list &other); + forward_list& operator=(forward_list &&other); + forward_list& operator=(std::initializer_list ilist); + + void assign(size_type count, const T &value); + template + void assign(InputIterator first, InputIterator last); + void assign(std::initializer_list ilist); + + void clear(); + + void push_front(const T &value); + void push_front(T &&value); + template + void emplace_front(Args&&... args); + void pop_front(); + + iterator insert_after(const_iterator position, const value_type &val); + iterator insert_after(const_iterator position, value_type &&val); + iterator insert_after(const_iterator position, size_type n, + const value_type &val); + template + iterator insert_after(const_iterator position, InputIterator first, + InputIterator last); + iterator insert_after(const_iterator position, + initializer_list il); + + template + iterator emplace_after(const_iterator position, Args&&... args); + + iterator erase_after(const_iterator position); + iterator erase_after(const_iterator first, const_iterator last); + + iterator begin() { return iterator(_start); } + const_iterator begin() const { return const_iterator(_start); } + const_iterator cbegin() const { return const_iterator(_start); } + iterator end() { return iterator(); } + const_iterator end() const { return const_iterator(); } + const_iterator cend() const { return const_iterator(); } + + T& front() { return *begin(); } + const T& front() const { return *begin(); } + }; + + template + class basic_string { + class Allocator {}; + + public: + basic_string() : basic_string(Allocator()) {} + explicit basic_string(const Allocator &alloc); + basic_string(size_type count, CharT ch, + const Allocator &alloc = Allocator()); + basic_string(const basic_string &other, + size_type pos, + const Allocator &alloc = Allocator()); + basic_string(const basic_string &other, + size_type pos, size_type count, + const Allocator &alloc = Allocator()); + basic_string(const CharT *s, size_type count, + const Allocator &alloc = Allocator()); + basic_string(const CharT *s, + const Allocator &alloc = Allocator()); + template + basic_string(InputIt first, InputIt last, + const Allocator &alloc = Allocator()); + basic_string(const basic_string &other); + basic_string(const basic_string &other, + const Allocator &alloc); + basic_string(basic_string &&other); + basic_string(basic_string &&other, + const Allocator &alloc); + basic_string(std::initializer_list ilist, + const Allocator &alloc = Allocator()); + template + basic_string(const T &t, size_type pos, size_type n, + const Allocator &alloc = Allocator()); + // basic_string(std::nullptr_t) = delete; + + ~basic_string(); + void clear(); + + basic_string &operator=(const basic_string &str); + basic_string &operator+=(const basic_string &str); + + const CharT *c_str() const; + const CharT *data() const; + CharT *data(); + + const char *begin() const; + const char *end() const; + + basic_string &append(size_type count, CharT ch); + basic_string &assign(size_type count, CharT ch); + basic_string &erase(size_type index, size_type count); + basic_string &insert(size_type index, size_type count, CharT ch); + basic_string &replace(size_type pos, size_type count, const basic_string &str); + void pop_back(); + void push_back(CharT ch); + void reserve(size_type new_cap); + void resize(size_type count); + void shrink_to_fit(); + void swap(basic_string &other); + }; + + typedef basic_string string; + typedef basic_string wstring; +#if __cplusplus >= 201103L + typedef basic_string u16string; + typedef basic_string u32string; +#endif + + class exception { + public: + exception() throw(); + virtual ~exception() throw(); + virtual const char *what() const throw() { + return 0; + } + }; + + class bad_alloc : public exception { + public: + bad_alloc() throw(); + bad_alloc(const bad_alloc&) throw(); + bad_alloc& operator=(const bad_alloc&) throw(); + virtual const char* what() const throw() { + return 0; + } + }; + + struct nothrow_t {}; + extern const nothrow_t nothrow; + + enum class align_val_t : size_t {}; + + // libc++'s implementation + template + class initializer_list + { + const _E* __begin_; + size_t __size_; + + initializer_list(const _E* __b, size_t __s) + : __begin_(__b), + __size_(__s) + {} + + public: + typedef _E value_type; + typedef const _E& reference; + typedef const _E& const_reference; + typedef size_t size_type; + + typedef const _E* iterator; + typedef const _E* const_iterator; + + initializer_list() : __begin_(0), __size_(0) {} + + size_t size() const {return __size_;} + const _E* begin() const {return __begin_;} + const _E* end() const {return __begin_ + __size_;} + }; + + template struct enable_if {}; + template struct enable_if {typedef _Tp type;}; + + template + struct integral_constant + { + static const _Tp value = __v; + typedef _Tp value_type; + typedef integral_constant type; + + operator value_type() const {return value;} + + value_type operator ()() const {return value;} + }; + + template + const _Tp integral_constant<_Tp, __v>::value; + + template + struct is_trivially_assignable + : integral_constant + { + }; + + typedef integral_constant true_type; + typedef integral_constant false_type; + + template struct is_const : public false_type {}; + template struct is_const<_Tp const> : public true_type {}; + + template struct is_reference : public false_type {}; + template struct is_reference<_Tp&> : public true_type {}; + + template struct is_same : public false_type {}; + template struct is_same<_Tp, _Tp> : public true_type {}; + + template ::value || is_reference<_Tp>::value > + struct __add_const {typedef _Tp type;}; + + template + struct __add_const<_Tp, false> {typedef const _Tp type;}; + + template struct add_const {typedef typename __add_const<_Tp>::type type;}; + + template struct remove_const {typedef _Tp type;}; + template struct remove_const {typedef _Tp type;}; + + template struct add_lvalue_reference {typedef _Tp& type;}; + + template struct is_trivially_copy_assignable + : public is_trivially_assignable::type, + typename add_lvalue_reference::type>::type> {}; + + template + OutputIter __copy(InputIter II, InputIter IE, OutputIter OI) { + while (II != IE) + *OI++ = *II++; + + return OI; + } + + template + inline + typename enable_if + < + is_same::type, _Up>::value && + is_trivially_copy_assignable<_Up>::value, + _Up* + >::type __copy(_Tp* __first, _Tp* __last, _Up* __result) { + size_t __n = __last - __first; + + if (__n > 0) + memmove(__result, __first, __n * sizeof(_Up)); + + return __result + __n; + } + + template + OutputIter copy(InputIter II, InputIter IE, OutputIter OI) { + return __copy(II, IE, OI); + } + + template + inline + _OutputIterator + __copy_backward(_BidirectionalIterator __first, _BidirectionalIterator __last, + _OutputIterator __result) + { + while (__first != __last) + *--__result = *--__last; + return __result; + } + + template + inline + typename enable_if + < + is_same::type, _Up>::value && + is_trivially_copy_assignable<_Up>::value, + _Up* + >::type __copy_backward(_Tp* __first, _Tp* __last, _Up* __result) { + size_t __n = __last - __first; + + if (__n > 0) + { + __result -= __n; + memmove(__result, __first, __n * sizeof(_Up)); + } + return __result; + } + + template + OutputIter copy_backward(InputIter II, InputIter IE, OutputIter OI) { + return __copy_backward(II, IE, OI); + } +} + +template +void __advance(BidirectionalIterator& it, Distance n, + std::bidirectional_iterator_tag) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 2 +{ + if (n >= 0) while(n-- > 0) ++it; else while (n++<0) --it; +} +#else + ; +#endif + +template +void __advance(RandomAccessIterator& it, Distance n, + std::random_access_iterator_tag) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 2 +{ + it += n; +} +#else + ; +#endif + +namespace std { + +template +void advance(InputIterator& it, Distance n) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 1 +{ + __advance(it, n, typename InputIterator::iterator_category()); +} +#else + ; +#endif + +template +BidirectionalIterator +prev(BidirectionalIterator it, + typename iterator_traits::difference_type n = + 1) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 0 +{ + advance(it, -n); + return it; +} +#else + ; +#endif + +template +ForwardIterator +next(ForwardIterator it, + typename iterator_traits::difference_type n = + 1) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 0 +{ + advance(it, n); + return it; +} +#else + ; +#endif + + template + InputIt find(InputIt first, InputIt last, const T& value); + + template + ForwardIt find(ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, + const T& value); + + template + InputIt find_if (InputIt first, InputIt last, UnaryPredicate p); + + template + ForwardIt find_if (ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, + UnaryPredicate p); + + template + InputIt find_if_not (InputIt first, InputIt last, UnaryPredicate q); + + template + ForwardIt find_if_not (ExecutionPolicy&& policy, ForwardIt first, + ForwardIt last, UnaryPredicate q); + + template + InputIt find_first_of(InputIt first, InputIt last, + ForwardIt s_first, ForwardIt s_last); + + template + ForwardIt1 find_first_of (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last); + + template + InputIt find_first_of (InputIt first, InputIt last, + ForwardIt s_first, ForwardIt s_last, + BinaryPredicate p ); + + template + ForwardIt1 find_first_of (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last, + BinaryPredicate p ); + + template + InputIt find_end(InputIt first, InputIt last, + ForwardIt s_first, ForwardIt s_last); + + template + ForwardIt1 find_end (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last); + + template + InputIt find_end (InputIt first, InputIt last, + ForwardIt s_first, ForwardIt s_last, + BinaryPredicate p ); + + template + ForwardIt1 find_end (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last, + BinaryPredicate p ); + + template + ForwardIt lower_bound (ForwardIt first, ForwardIt last, const T& value); + + template + ForwardIt lower_bound (ForwardIt first, ForwardIt last, const T& value, + Compare comp); + + template + ForwardIt upper_bound (ForwardIt first, ForwardIt last, const T& value); + + template + ForwardIt upper_bound (ForwardIt first, ForwardIt last, const T& value, + Compare comp); + + template + ForwardIt1 search (ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last); + + template + ForwardIt1 search (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last); + + template + ForwardIt1 search (ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last, BinaryPredicate p); + + template + ForwardIt1 search (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last, BinaryPredicate p); + + template + ForwardIt search (ForwardIt first, ForwardIt last, const Searcher& searcher); + + template + ForwardIt search_n (ForwardIt first, ForwardIt last, Size count, + const T& value); + + template + ForwardIt search_n (ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, + Size count, const T& value); + + template + ForwardIt search_n (ForwardIt first, ForwardIt last, Size count, + const T& value, BinaryPredicate p); + + template + ForwardIt search_n (ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, + Size count, const T& value, BinaryPredicate p); + + template + OutputIterator copy(InputIterator first, InputIterator last, + OutputIterator result); + +} + +#if __cplusplus >= 201103L +namespace std { +template // TODO: Implement the stub for deleter. +class unique_ptr { +public: + unique_ptr() noexcept {} + unique_ptr(T *) noexcept {} + unique_ptr(const unique_ptr &) noexcept = delete; + unique_ptr(unique_ptr &&) noexcept; + + T *get() const noexcept; + T *release() noexcept; + void reset(T *p = nullptr) noexcept; + void swap(unique_ptr &p) noexcept; + + typename std::add_lvalue_reference::type operator*() const; + T *operator->() const noexcept; + operator bool() const noexcept; + unique_ptr &operator=(unique_ptr &&p) noexcept; + unique_ptr &operator=(nullptr_t) noexcept; +}; + +// TODO :: Once the deleter parameter is added update with additional template parameter. +template +void swap(unique_ptr &x, unique_ptr &y) noexcept { + x.swap(y); +} + +template +bool operator==(const unique_ptr &x, const unique_ptr &y); + +template +bool operator!=(const unique_ptr &x, const unique_ptr &y); + +template +bool operator<(const unique_ptr &x, const unique_ptr &y); + +template +bool operator>(const unique_ptr &x, const unique_ptr &y); + +template +bool operator<=(const unique_ptr &x, const unique_ptr &y); + +template +bool operator>=(const unique_ptr &x, const unique_ptr &y); + +template +bool operator==(const unique_ptr &x, nullptr_t y); + +template +bool operator!=(const unique_ptr &x, nullptr_t y); + +template +bool operator<(const unique_ptr &x, nullptr_t y); + +template +bool operator>(const unique_ptr &x, nullptr_t y); + +template +bool operator<=(const unique_ptr &x, nullptr_t y); + +template +bool operator>=(const unique_ptr &x, nullptr_t y); + +template +bool operator==(nullptr_t x, const unique_ptr &y); + +template +bool operator!=(nullptr_t x, const unique_ptr &y); + +template +bool operator>(nullptr_t x, const unique_ptr &y); + +template +bool operator<(nullptr_t x, const unique_ptr &y); + +template +bool operator>=(nullptr_t x, const unique_ptr &y); + +template +bool operator<=(nullptr_t x, const unique_ptr &y); + +template +unique_ptr make_unique(Args &&...args); + +#if __cplusplus >= 202002L + +template +unique_ptr make_unique_for_overwrite(); + +#endif + +} // namespace std +#endif + +namespace std { +template +class basic_ostream; + +using ostream = basic_ostream; + +extern std::ostream cout; + +ostream &operator<<(ostream &, const string &); + +#if __cplusplus >= 202002L +template +ostream &operator<<(ostream &, const std::unique_ptr &); +#endif +} // namespace std + +#ifdef TEST_INLINABLE_ALLOCATORS +namespace std { + void *malloc(size_t); + void free(void *); +} +void* operator new(std::size_t size, const std::nothrow_t&) throw() { return std::malloc(size); } +void* operator new[](std::size_t size, const std::nothrow_t&) throw() { return std::malloc(size); } +void operator delete(void* ptr, const std::nothrow_t&) throw() { std::free(ptr); } +void operator delete[](void* ptr, const std::nothrow_t&) throw() { std::free(ptr); } +#else +// C++20 standard draft 17.6.1, from "Header synopsis", but with throw() +// instead of noexcept: + +void *operator new(std::size_t size); +void *operator new(std::size_t size, std::align_val_t alignment); +void *operator new(std::size_t size, const std::nothrow_t &) throw(); +void *operator new(std::size_t size, std::align_val_t alignment, + const std::nothrow_t &) throw(); +void operator delete(void *ptr) throw(); +void operator delete(void *ptr, std::size_t size) throw(); +void operator delete(void *ptr, std::align_val_t alignment) throw(); +void operator delete(void *ptr, std::size_t size, std::align_val_t alignment) throw(); +void operator delete(void *ptr, const std::nothrow_t &)throw(); +void operator delete(void *ptr, std::align_val_t alignment, + const std::nothrow_t &)throw(); +void *operator new[](std::size_t size); +void *operator new[](std::size_t size, std::align_val_t alignment); +void *operator new[](std::size_t size, const std::nothrow_t &) throw(); +void *operator new[](std::size_t size, std::align_val_t alignment, + const std::nothrow_t &) throw(); +void operator delete[](void *ptr) throw(); +void operator delete[](void *ptr, std::size_t size) throw(); +void operator delete[](void *ptr, std::align_val_t alignment) throw(); +void operator delete[](void *ptr, std::size_t size, std::align_val_t alignment) throw(); +void operator delete[](void *ptr, const std::nothrow_t &) throw(); +void operator delete[](void *ptr, std::align_val_t alignment, + const std::nothrow_t &) throw(); +#endif + +void* operator new (std::size_t size, void* ptr) throw() { return ptr; }; +void* operator new[] (std::size_t size, void* ptr) throw() { return ptr; }; +void operator delete (void* ptr, void*) throw() {}; +void operator delete[] (void* ptr, void*) throw() {}; + +namespace __cxxabiv1 { +extern "C" { +extern char *__cxa_demangle(const char *mangled_name, + char *output_buffer, + size_t *length, + int *status); +}} +namespace abi = __cxxabiv1; + +namespace std { + template + bool is_sorted(ForwardIt first, ForwardIt last); + + template + void nth_element(RandomIt first, RandomIt nth, RandomIt last); + + template + void partial_sort(RandomIt first, RandomIt middle, RandomIt last); + + template + void sort (RandomIt first, RandomIt last); + + template + void stable_sort(RandomIt first, RandomIt last); + + template + BidirIt partition(BidirIt first, BidirIt last, UnaryPredicate p); + + template + BidirIt stable_partition(BidirIt first, BidirIt last, UnaryPredicate p); +} + +namespace std { + +template< class T = void > +struct less; + +template< class T > +struct allocator; + +template< class Key > +struct hash; + +template< + class Key, + class Compare = std::less, + class Alloc = std::allocator +> class set { + public: + set(initializer_list __list) {} + + class iterator { + public: + iterator(Key *key): ptr(key) {} + iterator& operator++() { ++ptr; return *this; } + bool operator!=(const iterator &other) const { return ptr != other.ptr; } + const Key &operator*() const { return *ptr; } + private: + Key *ptr; + }; + + public: + Key *val; + iterator begin() const { return iterator(val); } + iterator end() const { return iterator(val + 1); } +}; + +template< + class Key, + class Hash = std::hash, + class Compare = std::less, + class Alloc = std::allocator +> class unordered_set { + public: + unordered_set(initializer_list __list) {} + + class iterator { + public: + iterator(Key *key): ptr(key) {} + iterator& operator++() { ++ptr; return *this; } + bool operator!=(const iterator &other) const { return ptr != other.ptr; } + const Key &operator*() const { return *ptr; } + private: + Key *ptr; + }; + + public: + Key *val; + iterator begin() const { return iterator(val); } + iterator end() const { return iterator(val + 1); } +}; + +namespace execution { +class sequenced_policy {}; +} + +template struct equal_to {}; + +template > +class default_searcher { +public: + default_searcher (ForwardIt pat_first, + ForwardIt pat_last, + BinaryPredicate pred = BinaryPredicate()); + template + std::pair + operator()( ForwardIt2 first, ForwardIt2 last ) const; +}; + +template class packaged_task; +template class packaged_task { + // TODO: Add some actual implementation. +}; + +#if __has_feature(cxx_decltype) +typedef decltype(nullptr) nullptr_t; + +template +class shared_ptr +{ +public: + constexpr shared_ptr(nullptr_t); + explicit shared_ptr(_Tp* __p); + + shared_ptr(shared_ptr&& __r) { } + + ~shared_ptr(); + + // shared_ptr& operator=(shared_ptr&& __r); + shared_ptr<_Tp>& operator=(const shared_ptr& __r) noexcept + { + return *this; + } + + template + shared_ptr<_Tp>& operator=(const shared_ptr<_Yp>& __r) noexcept + { + return *this; + } + + shared_ptr<_Tp>& operator=(shared_ptr&& __r) noexcept + { + return *this; + } + + template + shared_ptr<_Tp>& operator=(shared_ptr<_Yp>&& __r) + { + return *this; + } +}; + +template +inline +constexpr +shared_ptr<_Tp>::shared_ptr(nullptr_t) { +} + +#endif // __has_feature(cxx_decltype) + +template + shared_ptr make_shared(Args &&...args) { + return shared_ptr(new T(static_cast(args)...)); + } + +template struct array { + T arr[N]; + typedef T value_type; + typedef value_type* iterator; + constexpr iterator begin() { return iterator(arr); } + constexpr iterator end() { return iterator(arr + N); } +}; + +} // namespace std diff --git a/clang/test/CIR/Lowering/ThroughMLIR/array.cir b/clang/test/CIR/Lowering/ThroughMLIR/array.cir new file mode 100644 index 000000000000..1a7e15531fd8 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/array.cir @@ -0,0 +1,17 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + cir.return + } +} + +// CHECK: module { +// CHECK: func @foo() { +// CHECK: = memref.alloca() {alignment = 16 : i64} : memref> +// CHECK: return +// CHECK: } +// CHECK: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir new file mode 100644 index 000000000000..790d50d5510d --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir @@ -0,0 +1,68 @@ +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} + %1 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, cir.ptr , ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, cir.ptr , ["e"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, cir.ptr , ["f"] {alignment = 8 : i64} + %5 = cir.alloca !cir.double, cir.ptr , ["g", init] {alignment = 8 : i64} + %6 = cir.load %0 : cir.ptr , !cir.float + %7 = cir.load %1 : cir.ptr , !cir.float + %8 = cir.binop(mul, %6, %7) : !cir.float + cir.store %8, %2 : !cir.float, cir.ptr + %9 = cir.load %2 : cir.ptr , !cir.float + %10 = cir.load %1 : cir.ptr , !cir.float + %11 = cir.binop(div, %9, %10) : !cir.float + cir.store %11, %2 : !cir.float, cir.ptr + %12 = cir.load %2 : cir.ptr , !cir.float + %13 = cir.load %1 : cir.ptr , !cir.float + %14 = cir.binop(add, %12, %13) : !cir.float + cir.store %14, %2 : !cir.float, cir.ptr + %15 = cir.load %2 : cir.ptr , !cir.float + %16 = cir.load %1 : cir.ptr , !cir.float + %17 = cir.binop(sub, %15, %16) : !cir.float + cir.store %17, %2 : !cir.float, cir.ptr + %18 = cir.load %3 : cir.ptr , !cir.double + %19 = cir.load %4 : cir.ptr , !cir.double + %20 = cir.binop(add, %18, %19) : !cir.double + cir.store %20, %5 : !cir.double, cir.ptr + %21 = cir.load %3 : cir.ptr , !cir.double + %22 = cir.load %4 : cir.ptr , !cir.double + %23 = cir.binop(sub, %21, %22) : !cir.double + cir.store %23, %5 : !cir.double, cir.ptr + %24 = cir.load %3 : cir.ptr , !cir.double + %25 = cir.load %4 : cir.ptr , !cir.double + %26 = cir.binop(mul, %24, %25) : !cir.double + cir.store %26, %5 : !cir.double, cir.ptr + %27 = cir.load %3 : cir.ptr , !cir.double + %28 = cir.load %4 : cir.ptr , !cir.double + %29 = cir.binop(div, %27, %28) : !cir.double + cir.store %29, %5 : !cir.double, cir.ptr + cir.return + } +} + +// MLIR: = memref.alloca() {alignment = 4 : i64} : memref +// MLIR: = memref.alloca() {alignment = 8 : i64} : memref +// MLIR: = arith.mulf {{.*}} : f32 +// MLIR: = arith.divf +// MLIR: = arith.addf +// MLIR: = arith.subf +// MLIR: = arith.addf {{.*}} : f64 +// MLIR: = arith.subf +// MLIR: = arith.mulf +// MLIR: = arith.divf + +// LLVM: = alloca float, i64 +// LLVM: = alloca double, i64 +// LLVM: = fmul float +// LLVM: = fdiv float +// LLVM: = fadd float +// LLVM: = fsub float +// LLVM: = fadd double +// LLVM: = fsub double +// LLVM: = fmul double +// LLVM: = fdiv double diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir new file mode 100644 index 000000000000..51c89f564efa --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir @@ -0,0 +1,78 @@ +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int + +module { + cir.func @foo() { + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, cir.ptr + %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, cir.ptr + %5 = cir.load %0 : cir.ptr , !u32i + %6 = cir.load %1 : cir.ptr , !u32i + %7 = cir.binop(mul, %5, %6) : !u32i + cir.store %7, %2 : !u32i, cir.ptr + %8 = cir.load %2 : cir.ptr , !u32i + %9 = cir.load %1 : cir.ptr , !u32i + %10 = cir.binop(div, %8, %9) : !u32i + cir.store %10, %2 : !u32i, cir.ptr + %11 = cir.load %2 : cir.ptr , !u32i + %12 = cir.load %1 : cir.ptr , !u32i + %13 = cir.binop(rem, %11, %12) : !u32i + cir.store %13, %2 : !u32i, cir.ptr + %14 = cir.load %2 : cir.ptr , !u32i + %15 = cir.load %1 : cir.ptr , !u32i + %16 = cir.binop(add, %14, %15) : !u32i + cir.store %16, %2 : !u32i, cir.ptr + %17 = cir.load %2 : cir.ptr , !u32i + %18 = cir.load %1 : cir.ptr , !u32i + %19 = cir.binop(sub, %17, %18) : !u32i + cir.store %19, %2 : !u32i, cir.ptr + // should move to cir.shift, which only accepts + // CIR types. + // %20 = cir.load %2 : cir.ptr , !u32i + // %21 = cir.load %1 : cir.ptr , !u32i + // %22 = cir.binop(shr, %20, %21) : !u32i + // cir.store %22, %2 : !u32i, cir.ptr + // %23 = cir.load %2 : cir.ptr , !u32i + // %24 = cir.load %1 : cir.ptr , !u32i + // %25 = cir.binop(shl, %23, %24) : !u32i + // cir.store %25, %2 : !u32i, cir.ptr + %26 = cir.load %2 : cir.ptr , !u32i + %27 = cir.load %1 : cir.ptr , !u32i + %28 = cir.binop(and, %26, %27) : !u32i + cir.store %28, %2 : !u32i, cir.ptr + %29 = cir.load %2 : cir.ptr , !u32i + %30 = cir.load %1 : cir.ptr , !u32i + %31 = cir.binop(xor, %29, %30) : !u32i + cir.store %31, %2 : !u32i, cir.ptr + %32 = cir.load %2 : cir.ptr , !u32i + %33 = cir.load %1 : cir.ptr , !u32i + %34 = cir.binop(or, %32, %33) : !u32i + cir.store %34, %2 : !u32i, cir.ptr + cir.return + } +} + +// MLIR: = arith.muli +// MLIR: = arith.divui +// MLIR: = arith.remui +// MLIR: = arith.addi +// MLIR: = arith.subi +// arith.shrui +// arith.shli +// MLIR: = arith.andi +// MLIR: = arith.xori +// MLIR: = arith.ori + +// LLVM: = mul i32 +// LLVM: = udiv i32 +// LLVM: = urem i32 +// LLVM: = add i32 +// LLVM: = sub i32 +// = lshr i32 +// = shl i32 +// LLVM: = and i32 +// LLVM: = xor i32 +// LLVM: = or i32 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir new file mode 100644 index 000000000000..2163f063d9e9 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir @@ -0,0 +1,23 @@ +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +module { + cir.func @foo() { + %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} + %1 = cir.const(#true) : !cir.bool + cir.store %1, %0 : !cir.bool, cir.ptr + cir.return + } +} + +// MLIR: func @foo() { +// MLIR: [[Value:%[a-z0-9]+]] = memref.alloca() {alignment = 1 : i64} : memref +// MLIR: = arith.constant 1 : i8 +// MLIR: memref.store {{.*}}, [[Value]][] : memref +// return + +// LLVM: = alloca i8, i64 +// LLVM: store i8 1, ptr %5 +// LLVM: ret diff --git a/clang/test/CIR/Lowering/ThroughMLIR/branch.cir b/clang/test/CIR/Lowering/ThroughMLIR/branch.cir new file mode 100644 index 000000000000..83c980838890 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/branch.cir @@ -0,0 +1,37 @@ +// RUN: cir-opt %s -cir-to-mlir | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +cir.func @foo(%arg0: !cir.bool) -> !s32i { + cir.brcond %arg0 ^bb1, ^bb2 + ^bb1: + %0 = cir.const(#cir.int<1>: !s32i) : !s32i + cir.return %0 : !s32i + ^bb2: + %1 = cir.const(#cir.int<0>: !s32i) : !s32i + cir.return %1 : !s32i +} + +// MLIR: module { +// MLIR-NEXT: func.func @foo(%arg0: i8) -> i32 +// MLIR-NEXT: %0 = arith.trunci %arg0 : i8 to i1 +// MLIR-NEXT: cf.cond_br %0, ^bb1, ^bb2 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 +// MLIR-NEXT: return %c1_i32 : i32 +// MLIR-NEXT: ^bb2: // pred: ^bb0 +// MLIR-NEXT: %c0_i32 = arith.constant 0 : i32 +// MLIR-NEXT: return %c0_i32 : i32 +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define i32 @foo(i8 %0) +// LLVM-NEXT: %2 = trunc i8 %0 to i1 +// LLVM-NEXT: br i1 %2, label %3, label %4 +// LLVM-EMPTY: +// LLVM-NEXT: 3: ; preds = %1 +// LLVM-NEXT: ret i32 1 +// LLVM-EMPTY: +// LLVM-NEXT: 4: ; preds = %1 +// LLVM-NEXT: ret i32 0 +// LLVM-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir new file mode 100644 index 000000000000..99eea2260c26 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir @@ -0,0 +1,76 @@ +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !s32i, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} + %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} + %5 = cir.load %0 : cir.ptr , !s32i + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.cmp(gt, %5, %6) : !s32i, !cir.bool + %8 = cir.load %0 : cir.ptr , !s32i + %9 = cir.load %1 : cir.ptr , !s32i + %10 = cir.cmp(eq, %8, %9) : !s32i, !cir.bool + %11 = cir.load %0 : cir.ptr , !s32i + %12 = cir.load %1 : cir.ptr , !s32i + %13 = cir.cmp(lt, %11, %12) : !s32i, !cir.bool + %14 = cir.load %0 : cir.ptr , !s32i + %15 = cir.load %1 : cir.ptr , !s32i + %16 = cir.cmp(ge, %14, %15) : !s32i, !cir.bool + %17 = cir.load %0 : cir.ptr , !s32i + %18 = cir.load %1 : cir.ptr , !s32i + %19 = cir.cmp(ne, %17, %18) : !s32i, !cir.bool + %20 = cir.load %0 : cir.ptr , !s32i + %21 = cir.load %1 : cir.ptr , !s32i + %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool + %23 = cir.load %2 : cir.ptr , !cir.float + %24 = cir.load %3 : cir.ptr , !cir.float + %25 = cir.cmp(gt, %23, %24) : !cir.float, !cir.bool + %26 = cir.load %2 : cir.ptr , !cir.float + %27 = cir.load %3 : cir.ptr , !cir.float + %28 = cir.cmp(eq, %26, %27) : !cir.float, !cir.bool + %29 = cir.load %2 : cir.ptr , !cir.float + %30 = cir.load %3 : cir.ptr , !cir.float + %31 = cir.cmp(lt, %29, %30) : !cir.float, !cir.bool + %32 = cir.load %2 : cir.ptr , !cir.float + %33 = cir.load %3 : cir.ptr , !cir.float + %34 = cir.cmp(ge, %32, %33) : !cir.float, !cir.bool + %35 = cir.load %2 : cir.ptr , !cir.float + %36 = cir.load %3 : cir.ptr , !cir.float + %37 = cir.cmp(ne, %35, %36) : !cir.float, !cir.bool + %38 = cir.load %2 : cir.ptr , !cir.float + %39 = cir.load %3 : cir.ptr , !cir.float + %40 = cir.cmp(le, %38, %39) : !cir.float, !cir.bool + cir.return + } +} + +// MLIR: = arith.cmpi ugt +// MLIR: = arith.cmpi eq, +// MLIR: = arith.cmpi ult, +// MLIR: = arith.cmpi uge, +// MLIR: = arith.cmpi ne, +// MLIR: = arith.cmpi ule, +// MLIR: = arith.cmpf ugt +// MLIR: = arith.cmpf ueq, +// MLIR: = arith.cmpf ult, +// MLIR: = arith.cmpf uge, +// MLIR: = arith.cmpf une, +// MLIR: = arith.cmpf ule, + +// LLVM: icmp ugt i32 +// LLVM: icmp eq i32 +// LLVM: icmp ult i32 +// LLVM: icmp uge i32 +// LLVM: icmp ne i32 +// LLVM: icmp ule i32 +// LLVM: fcmp ugt float +// LLVM: fcmp ueq float +// LLVM: fcmp ult float +// LLVM: fcmp uge float +// LLVM: fcmp une float +// LLVM: fcmp ule float diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cos.cir b/clang/test/CIR/Lowering/ThroughMLIR/cos.cir new file mode 100644 index 000000000000..0530d3cb19e8 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/cos.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %1 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float + %2 = cir.const(#cir.fp<1.0> : !cir.double) : !cir.double + %3 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double + %4 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double + %5 = cir.cos %1 : !cir.float + %6 = cir.cos %2 : !cir.double + %7 = cir.cos %3 : !cir.long_double + %8 = cir.cos %4 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 1.000000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 1.000000e+00 : f64 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 1.000000e+00 : f80 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 1.000000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.cos %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.cos %[[C1]] : f64 +// CHECK-NEXT: %{{.+}} = math.cos %[[C2]] : f80 +// CHECK-NEXT: %{{.+}} = math.cos %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir new file mode 100644 index 000000000000..cd82f88d9e46 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s +// XFAIL: * + +!s32i = !cir.int +module { + cir.func @dot(%arg0: !cir.ptr) -> !s32i { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %2 = cir.alloca !cir.ptr, cir.ptr >, ["y", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, cir.ptr > + %3 = cir.load %0 : cir.ptr >, !cir.ptr + cir.store %3, %2 : !cir.ptr, cir.ptr > + %4 = cir.const(#cir.int<0> : !s32i) : !s32i + %5 = cir.load %1 : cir.ptr , !s32i + cir.return %5 : !s32i + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @dot(%arg0: memref) -> i32 { +// CHECK-NEXT: %alloca = memref.alloca() {alignment = 8 : i64} : memref> +// CHECK-NEXT: %alloca_0 = memref.alloca() {alignment = 4 : i64} : memref +// CHECK-NEXT: %alloca_1 = memref.alloca() {alignment = 8 : i64} : memref> +// CHECK-NEXT: memref.store %arg0, %alloca[] : memref> +// CHECK-NEXT: %0 = memref.load %alloca[] : memref> +// CHECK-NEXT: memref.store %0, %alloca_1[] : memref> +// CHECK-NEXT: %c0_i32 = arith.constant 0 : i32 +// CHECK-NEXT: %1 = memref.load %alloca_0[] : memref +// CHECK-NEXT: return %1 : i32 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/global.cir b/clang/test/CIR/Lowering/ThroughMLIR/global.cir new file mode 100644 index 000000000000..3b1ed83239c6 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/global.cir @@ -0,0 +1,55 @@ +// RUN: cir-opt %s -cir-to-mlir | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int +module { + cir.global external @i = #cir.int<2> : !u32i + cir.global external @f = #cir.fp<3.000000e+00> : !cir.float + cir.global external @b = #cir.bool : !cir.bool + cir.global "private" external @a : !cir.array + cir.global external @aa = #cir.zero : !cir.array x 256> + + cir.func @get_global_int_value() -> !u32i { + %0 = cir.get_global @i : cir.ptr + %1 = cir.load %0 : cir.ptr , !u32i + cir.return %1 : !u32i + } + cir.func @get_global_float_value() -> !cir.float { + %0 = cir.get_global @f : cir.ptr + %1 = cir.load %0 : cir.ptr , !cir.float + cir.return %1 : !cir.float + } + cir.func @get_global_bool_value() -> !cir.bool { + %0 = cir.get_global @b : cir.ptr + %1 = cir.load %0 : cir.ptr , !cir.bool + cir.return %1 : !cir.bool + } + cir.func @get_global_array_pointer() -> !cir.ptr> { + %0 = cir.get_global @a : cir.ptr > + cir.return %0 : !cir.ptr> + } + cir.func @get_global_multi_array_pointer() -> !cir.ptr x 256>> { + %0 = cir.get_global @aa : cir.ptr x 256>> + cir.return %0 : !cir.ptr x 256>> + } +} + +// MLIR: memref.global "public" @i : memref = dense<2> +// MLIR: memref.global "public" @f : memref = dense<3.000000e+00> +// MLIR: memref.global "public" @b : memref = dense<1> +// MLIR: memref.global "private" @a : memref<100xi32> +// MLIR: memref.global "public" @aa : memref<256x256xi32> = dense<0> +// MLIR: memref.get_global @i : memref +// MLIR: memref.get_global @f : memref +// MLIR: memref.get_global @b : memref +// MLIR: memref.get_global @a : memref<100xi32> +// MLIR: memref.get_global @aa : memref<256x256xi32> + +// LLVM: @i = global i32 2 +// LLVM: @f = global float 3.000000e+00 +// LLVM: @b = global i8 1 +// LLVM: @a = private global [100 x i32] undef +// LLVM: @aa = global [256 x [256 x i32]] zeroinitializer +// LLVM: load i32, ptr @i +// LLVM: load float, ptr @f +// LLVM: load i8, ptr @b diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir new file mode 100644 index 000000000000..9cc9cc45b65f --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -0,0 +1,35 @@ +// RUN: cir-opt %s -canonicalize -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -canonicalize -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + cir.br ^bb2 + ^bb1: // no predecessors + %2 = cir.load %0 : cir.ptr , !u32i + %3 = cir.const(#cir.int<1> : !u32i) : !u32i + %4 = cir.binop(add, %2, %3) : !u32i + cir.store %4, %0 : !u32i, cir.ptr + cir.br ^bb2 + ^bb2: // 2 preds: ^bb0, ^bb1 + %5 = cir.load %0 : cir.ptr , !u32i + %6 = cir.const(#cir.int<2> : !u32i) : !u32i + %7 = cir.binop(add, %5, %6) : !u32i + cir.store %7, %0 : !u32i, cir.ptr + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: func @foo +// MLIR: cf.br ^bb1 +// MLIR: ^bb1: +// MLIR: return + +// LLVM: br label %[[Value:[0-9]+]] +// LLVM-EMPTY: +// LLVM-NEXT: [[Value]]: ; preds = +// LLVM: ret void diff --git a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir new file mode 100644 index 000000000000..ad338992806b --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir @@ -0,0 +1,34 @@ +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int +module { + cir.func @foo() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !u32i + cir.return %2 : !u32i + } +} + +// MLIR: module { +// MLIR-NEXT: func @foo() -> i32 { +// MLIR-NEXT: [[alloca:%[a-z0-9]+]] = memref.alloca() {alignment = 4 : i64} : memref +// MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 +// MLIR-NEXT: memref.store %c1_i32, [[alloca]][] : memref +// MLIR-NEXT: [[load:%[a-z0-9]+]] = memref.load [[alloca]][] : memref +// MLIR-NEXT: return [[load]] : i32 +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define i32 @foo() +// LLVM-NEXT: %1 = alloca i32, i64 +// LLVM-NEXT: %2 = insertvalue { ptr, ptr, i64 } undef, ptr %1, 0 +// LLVM-NEXT: %3 = insertvalue { ptr, ptr, i64 } %2, ptr %1, 1 +// LLVM-NEXT: %4 = insertvalue { ptr, ptr, i64 } %3, i64 0, 2 +// LLVM-NEXT: %5 = extractvalue { ptr, ptr, i64 } %4, 1 +// LLVM-NEXT: store i32 1, ptr %5, align 4 +// LLVM-NEXT: %6 = extractvalue { ptr, ptr, i64 } %4, 1 +// LLVM-NEXT: %7 = load i32, ptr %6, align 4 +// LLVM-NEXT: ret i32 %7 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir new file mode 100644 index 000000000000..4ebd7749a72f --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir @@ -0,0 +1,48 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s -input-file=%t.mlir -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o %t.mlir +// RUN: FileCheck %s -input-file=%t.mlir -check-prefix=LLVM + +!u32i = !cir.int +module { + cir.func @foo() { + cir.scope { + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<4> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + } + cir.return + } + +// MLIR: func.func @foo() +// MLIR-NEXT: memref.alloca_scope +// MLIR-NEXT: %alloca = memref.alloca() {alignment = 4 : i64} : memref +// MLIR-NEXT: %c4_i32 = arith.constant 4 : i32 +// MLIR-NEXT: memref.store %c4_i32, %alloca[] : memref +// MLIR-NEXT: } +// MLIR-NEXT: return + +// LLVM: llvm.func @foo() { +// LLVM: %0 = llvm.intr.stacksave : !llvm.ptr +// LLVM: llvm.br ^bb1 +// LLVM: ^bb1: +// [...] +// LLVM: llvm.intr.stackrestore %0 : !llvm.ptr +// LLVM: llvm.br ^bb2 +// LLVM: ^bb2: +// LLVM: llvm.return +// LLVM: } + + // Should drop empty scopes. + cir.func @empty_scope() { + cir.scope { + } + cir.return + } + // MLIR: func.func @empty_scope() + // MLIR-NEXT: return + // MLIR-NEXT: } + + // LLVM: llvm.func @empty_scope() + // LLVM: llvm.return +} diff --git a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir new file mode 100644 index 000000000000..df6e6a09a5ff --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir @@ -0,0 +1,44 @@ +// RUN: cir-opt %s -cir-to-mlir | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir --canonicalize | FileCheck %s --check-prefix=MLIR-CANONICALIZE +// RUN: cir-opt %s -cir-to-mlir --canonicalize -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int + +module { +cir.func @_Z1xi(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool + %5 = cir.ternary(%4, true { + %7 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.yield %7 : !s32i + }, false { + %7 = cir.const(#cir.int<5> : !s32i) : !s32i + cir.yield %7 : !s32i + }) : (!cir.bool) -> !s32i + cir.store %5, %1 : !s32i, cir.ptr + %6 = cir.load %1 : cir.ptr , !s32i + cir.return %6 : !s32i + } +} + +// MLIR: %1 = arith.cmpi ugt, %0, %c0_i32 : i32 +// MLIR-NEXT: %2 = arith.extui %1 : i1 to i8 +// MLIR-NEXT: %3 = arith.trunci %2 : i8 to i1 +// MLIR-NEXT: %4 = scf.if %3 -> (i32) { +// MLIR-NEXT: %c3_i32 = arith.constant 3 : i32 +// MLIR-NEXT: scf.yield %c3_i32 : i32 +// MLIR-NEXT: } else { +// MLIR-NEXT: %c5_i32 = arith.constant 5 : i32 +// MLIR-NEXT: scf.yield %c5_i32 : i32 +// MLIR-NEXT: } +// MLIR-NEXT: memref.store %4, %alloca_0[] : memref + +// MLIR-CANONICALIZE: %[[CMP:.*]] = arith.cmpi ugt +// MLIR-CANONICALIZE: arith.select %[[CMP]] + +// LLVM: %[[CMP:.*]] = icmp ugt +// LLVM: select i1 %[[CMP]] diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir new file mode 100644 index 000000000000..45368fb48f40 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.store %2, %1 : !s32i, cir.ptr + + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.unary(inc, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr + + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.unary(dec, %5) : !s32i, !s32i + cir.store %6, %1 : !s32i, cir.ptr + cir.return + } +} + +// MLIR: = arith.constant 1 +// MLIR: = arith.addi +// MLIR: = arith.constant 1 +// MLIR: = arith.subi + +// LLVM: = add i32 %[[#]], 1 +// LLVM: = sub i32 %[[#]], 1 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir new file mode 100644 index 000000000000..013bc65e95e3 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.store %2, %1 : !s32i, cir.ptr + + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.unary(plus, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr + + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.unary(minus, %5) : !s32i, !s32i + cir.store %6, %1 : !s32i, cir.ptr + cir.return + } +} + +// MLIR: %[[#INPUT_PLUS:]] = memref.load +// MLIR: memref.store %[[#INPUT_PLUS]] +// MLIR: %[[#INPUT_MINUS:]] = memref.load +// MLIR: %[[ZERO:[a-z0-9_]+]] = arith.constant 0 +// MLIR: arith.subi %[[ZERO]], %[[#INPUT_MINUS]] + +// LLVM: = sub i32 0, %[[#]] diff --git a/clang/test/CIR/Lowering/alloca.cir b/clang/test/CIR/Lowering/alloca.cir new file mode 100644 index 000000000000..4c512a762068 --- /dev/null +++ b/clang/test/CIR/Lowering/alloca.cir @@ -0,0 +1,17 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +!s32i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo(%arg0: i32) attributes {cir.extra_attrs = #fn_attr} { +// MLIR-NEXT: %0 = llvm.alloca %arg0 x i32 {alignment = 16 : i64} : (i32) -> !llvm.ptr +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/array-init.c b/clang/test/CIR/Lowering/array-init.c new file mode 100644 index 000000000000..8e452bf06878 --- /dev/null +++ b/clang/test/CIR/Lowering/array-init.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +// LLVM: define void @zeroInit +// LLVM: [[RES:%.*]] = alloca [3 x i32], i64 1 +// LLVM: store [3 x i32] zeroinitializer, ptr [[RES]] +void zeroInit() { + int a[3] = {0, 0, 0}; +} + diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir new file mode 100644 index 000000000000..56f4fd3a6331 --- /dev/null +++ b/clang/test/CIR/Lowering/array.cir @@ -0,0 +1,35 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +!ty_22S22 = !cir.struct + +module { + cir.func @foo() { + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + cir.return + } + +// MLIR: module { +// MLIR-NEXT: func @foo() +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 16 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: %1 = alloca [10 x i32], i64 1, align 16 +// LLVM-NEXT: ret void + + cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array + // CHECK: llvm.mlir.global external @arr() {addr_space = 0 : i32} : !llvm.array<2 x struct<"struct.S", (i32)>> { + // CHECK: %0 = llvm.mlir.undef : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: %5 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i32)> + // CHECK: %6 = llvm.insertvalue %5, %4[1] : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: llvm.return %6 : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: } +} diff --git a/clang/test/CIR/Lowering/asm.cir b/clang/test/CIR/Lowering/asm.cir new file mode 100644 index 000000000000..3aa753fbb91f --- /dev/null +++ b/clang/test/CIR/Lowering/asm.cir @@ -0,0 +1,55 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s + +!s32i = !cir.int + +module { + + cir.func @simple(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"" "~{dirflag},~{fpsr},~{flags}"}) -> !s32i + // CHECK: llvm.inline_asm asm_dialect = att operand_attrs = [] "", "~{dirflag},~{fpsr},~{flags}" : () -> i32 + + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"xyz" "~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "xyz", "~{dirflag},~{fpsr},~{flags}" : () -> i32 + + cir.asm(x86_att, + out = [%0 : !cir.ptr (maybe_memory)], + in = [], + in_out = [%0 : !cir.ptr (maybe_memory)], + {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [{elementtype = i32}, {elementtype = i32}] "", "=*m,*m,~{dirflag},~{fpsr},~{flags}" %1, %1 : (!llvm.ptr, !llvm.ptr) -> i32 + + cir.asm(x86_att, + out = [], + in = [%0 : !cir.ptr (maybe_memory)], + in_out = [], + {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [{elementtype = i32}] "", "*m,~{dirflag},~{fpsr},~{flags}" %1 : (!llvm.ptr) -> i32 + + cir.asm(x86_att, + out = [%0 : !cir.ptr (maybe_memory)], + in = [], + in_out = [], + {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [{elementtype = i32}] "", "=*m,~{dirflag},~{fpsr},~{flags}" %1 : (!llvm.ptr) -> i32 + + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}" : () -> i32 + cir.return + } + +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir new file mode 100644 index 000000000000..cb08205231e5 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-fp.cir @@ -0,0 +1,68 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} + %1 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, cir.ptr , ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, cir.ptr , ["e"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, cir.ptr , ["f"] {alignment = 8 : i64} + %5 = cir.alloca !cir.double, cir.ptr , ["g", init] {alignment = 8 : i64} + %6 = cir.load %0 : cir.ptr , !cir.float + %7 = cir.load %1 : cir.ptr , !cir.float + %8 = cir.binop(mul, %6, %7) : !cir.float + cir.store %8, %2 : !cir.float, cir.ptr + %9 = cir.load %2 : cir.ptr , !cir.float + %10 = cir.load %1 : cir.ptr , !cir.float + %11 = cir.binop(div, %9, %10) : !cir.float + cir.store %11, %2 : !cir.float, cir.ptr + %12 = cir.load %2 : cir.ptr , !cir.float + %13 = cir.load %1 : cir.ptr , !cir.float + %14 = cir.binop(add, %12, %13) : !cir.float + cir.store %14, %2 : !cir.float, cir.ptr + %15 = cir.load %2 : cir.ptr , !cir.float + %16 = cir.load %1 : cir.ptr , !cir.float + %17 = cir.binop(sub, %15, %16) : !cir.float + cir.store %17, %2 : !cir.float, cir.ptr + %18 = cir.load %3 : cir.ptr , !cir.double + %19 = cir.load %4 : cir.ptr , !cir.double + %20 = cir.binop(add, %18, %19) : !cir.double + cir.store %20, %5 : !cir.double, cir.ptr + %21 = cir.load %3 : cir.ptr , !cir.double + %22 = cir.load %4 : cir.ptr , !cir.double + %23 = cir.binop(sub, %21, %22) : !cir.double + cir.store %23, %5 : !cir.double, cir.ptr + %24 = cir.load %3 : cir.ptr , !cir.double + %25 = cir.load %4 : cir.ptr , !cir.double + %26 = cir.binop(mul, %24, %25) : !cir.double + cir.store %26, %5 : !cir.double, cir.ptr + %27 = cir.load %3 : cir.ptr , !cir.double + %28 = cir.load %4 : cir.ptr , !cir.double + %29 = cir.binop(div, %27, %28) : !cir.double + cir.store %29, %5 : !cir.double, cir.ptr + cir.return + } +} + +// MLIR: = llvm.alloca {{.*}} f32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR: = llvm.alloca {{.*}} f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR: = llvm.fmul {{.*}} : f32 +// MLIR: = llvm.fdiv +// MLIR: = llvm.fadd +// MLIR: = llvm.fsub +// MLIR: = llvm.fadd {{.*}} : f64 +// MLIR: = llvm.fsub +// MLIR: = llvm.fmul +// MLIR: = llvm.fdiv + +// LLVM: = alloca float, i64 +// LLVM: = alloca double, i64 +// LLVM: = fmul float +// LLVM: = fdiv float +// LLVM: = fadd float +// LLVM: = fsub float +// LLVM: = fadd double +// LLVM: = fsub double +// LLVM: = fmul double +// LLVM: = fdiv double diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir new file mode 100644 index 000000000000..855cd8cfbe92 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -0,0 +1,64 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !s32i) : !s32i cir.store %3, %0 : !s32i, cir.ptr + %4 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %4, %1 : !s32i, cir.ptr + %5 = cir.load %0 : cir.ptr , !s32i + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.binop(mul, %5, %6) : !s32i + // CHECK: = llvm.mul + cir.store %7, %2 : !s32i, cir.ptr + %8 = cir.load %2 : cir.ptr , !s32i + %9 = cir.load %1 : cir.ptr , !s32i + %10 = cir.binop(div, %8, %9) : !s32i + // CHECK: = llvm.sdiv + cir.store %10, %2 : !s32i, cir.ptr + %11 = cir.load %2 : cir.ptr , !s32i + %12 = cir.load %1 : cir.ptr , !s32i + %13 = cir.binop(rem, %11, %12) : !s32i + // CHECK: = llvm.srem + cir.store %13, %2 : !s32i, cir.ptr + %14 = cir.load %2 : cir.ptr , !s32i + %15 = cir.load %1 : cir.ptr , !s32i + %16 = cir.binop(add, %14, %15) : !s32i + // CHECK: = llvm.add + cir.store %16, %2 : !s32i, cir.ptr + %17 = cir.load %2 : cir.ptr , !s32i + %18 = cir.load %1 : cir.ptr , !s32i + %19 = cir.binop(sub, %17, %18) : !s32i + // CHECK: = llvm.sub + cir.store %19, %2 : !s32i, cir.ptr + %20 = cir.load %2 : cir.ptr , !s32i + %21 = cir.load %1 : cir.ptr , !s32i + %22 = cir.shift(right, %20 : !s32i, %21 : !s32i) -> !s32i + // CHECK: = llvm.ashr + cir.store %22, %2 : !s32i, cir.ptr + %23 = cir.load %2 : cir.ptr , !s32i + %24 = cir.load %1 : cir.ptr , !s32i + %25 = cir.shift(left, %23 : !s32i, %24 : !s32i) -> !s32i + // CHECK: = llvm.shl + cir.store %25, %2 : !s32i, cir.ptr + %26 = cir.load %2 : cir.ptr , !s32i + %27 = cir.load %1 : cir.ptr , !s32i + %28 = cir.binop(and, %26, %27) : !s32i + // CHECK: = llvm.and + cir.store %28, %2 : !s32i, cir.ptr + %29 = cir.load %2 : cir.ptr , !s32i + %30 = cir.load %1 : cir.ptr , !s32i + %31 = cir.binop(xor, %29, %30) : !s32i + // CHECK: = llvm.xor + cir.store %31, %2 : !s32i, cir.ptr + %32 = cir.load %2 : cir.ptr , !s32i + %33 = cir.load %1 : cir.ptr , !s32i + %34 = cir.binop(or, %32, %33) : !s32i + // CHECK: = llvm.or + cir.store %34, %2 : !s32i, cir.ptr + cir.return + } +} diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir new file mode 100644 index 000000000000..29076c52f51f --- /dev/null +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -0,0 +1,76 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int + +module { + cir.func @foo() { + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, cir.ptr + %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, cir.ptr + %5 = cir.load %0 : cir.ptr , !u32i + %6 = cir.load %1 : cir.ptr , !u32i + %7 = cir.binop(mul, %5, %6) : !u32i + cir.store %7, %2 : !u32i, cir.ptr + %8 = cir.load %2 : cir.ptr , !u32i + %9 = cir.load %1 : cir.ptr , !u32i + %10 = cir.binop(div, %8, %9) : !u32i + cir.store %10, %2 : !u32i, cir.ptr + %11 = cir.load %2 : cir.ptr , !u32i + %12 = cir.load %1 : cir.ptr , !u32i + %13 = cir.binop(rem, %11, %12) : !u32i + cir.store %13, %2 : !u32i, cir.ptr + %14 = cir.load %2 : cir.ptr , !u32i + %15 = cir.load %1 : cir.ptr , !u32i + %16 = cir.binop(add, %14, %15) : !u32i + cir.store %16, %2 : !u32i, cir.ptr + %17 = cir.load %2 : cir.ptr , !u32i + %18 = cir.load %1 : cir.ptr , !u32i + %19 = cir.binop(sub, %17, %18) : !u32i + cir.store %19, %2 : !u32i, cir.ptr + %20 = cir.load %2 : cir.ptr , !u32i + %21 = cir.load %1 : cir.ptr , !u32i + %22 = cir.shift(right, %20 : !u32i, %21 : !u32i) -> !u32i + cir.store %22, %2 : !u32i, cir.ptr + %23 = cir.load %2 : cir.ptr , !u32i + %24 = cir.load %1 : cir.ptr , !u32i + %25 = cir.shift(left, %23 : !u32i, %24 : !u32i) -> !u32i + cir.store %25, %2 : !u32i, cir.ptr + %26 = cir.load %2 : cir.ptr , !u32i + %27 = cir.load %1 : cir.ptr , !u32i + %28 = cir.binop(and, %26, %27) : !u32i + cir.store %28, %2 : !u32i, cir.ptr + %29 = cir.load %2 : cir.ptr , !u32i + %30 = cir.load %1 : cir.ptr , !u32i + %31 = cir.binop(xor, %29, %30) : !u32i + cir.store %31, %2 : !u32i, cir.ptr + %32 = cir.load %2 : cir.ptr , !u32i + %33 = cir.load %1 : cir.ptr , !u32i + %34 = cir.binop(or, %32, %33) : !u32i + cir.store %34, %2 : !u32i, cir.ptr + cir.return + } +} + +// MLIR: = llvm.mul +// MLIR: = llvm.udiv +// MLIR: = llvm.urem +// MLIR: = llvm.add +// MLIR: = llvm.sub +// MLIR: = llvm.lshr +// MLIR: = llvm.shl +// MLIR: = llvm.and +// MLIR: = llvm.xor +// MLIR: = llvm.or + +// LLVM: = mul i32 +// LLVM: = udiv i32 +// LLVM: = urem i32 +// LLVM: = add i32 +// LLVM: = sub i32 +// LLVM: = lshr i32 +// LLVM: = shl i32 +// LLVM: = and i32 +// LLVM: = xor i32 +// LLVM: = or i32 diff --git a/clang/test/CIR/Lowering/bit.cir b/clang/test/CIR/Lowering/bit.cir new file mode 100644 index 000000000000..425248c66821 --- /dev/null +++ b/clang/test/CIR/Lowering/bit.cir @@ -0,0 +1,206 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!u16i = !cir.int +!u32i = !cir.int +!u64i = !cir.int + +cir.func @clrsb_s32(%arg : !s32i) { + %0 = cir.bit.clrsb(%arg : !s32i) : !s32i + cir.return +} + +// CHECK: llvm.func @clrsb_s32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %1 = llvm.icmp "slt" %arg0, %0 : i32 +// CHECK-NEXT: %2 = llvm.mlir.constant(-1 : i32) : i32 +// CHECK-NEXT: %3 = llvm.xor %arg0, %2 : i32 +// CHECK-NEXT: %4 = llvm.select %1, %3, %arg0 : i1, i32 +// CHECK-NEXT: %5 = llvm.mlir.constant(false) : i1 +// CHECK-NEXT: %6 = llvm.call_intrinsic "llvm.ctlz.i32"(%4, %5) : (i32, i1) -> i32 +// CHECK-NEXT: %7 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %8 = llvm.sub %6, %7 : i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @clrsb_s64(%arg : !s64i) { + %0 = cir.bit.clrsb(%arg : !s64i) : !s32i + cir.return +} + +// CHECK: llvm.func @clrsb_s64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.mlir.constant(0 : i64) : i64 +// CHECK-NEXT: %1 = llvm.icmp "slt" %arg0, %0 : i64 +// CHECK-NEXT: %2 = llvm.mlir.constant(-1 : i64) : i64 +// CHECK-NEXT: %3 = llvm.xor %arg0, %2 : i64 +// CHECK-NEXT: %4 = llvm.select %1, %3, %arg0 : i1, i64 +// CHECK-NEXT: %5 = llvm.mlir.constant(false) : i1 +// CHECK-NEXT: %6 = llvm.call_intrinsic "llvm.ctlz.i64"(%4, %5) : (i64, i1) -> i64 +// CHECK-NEXT: %7 = llvm.trunc %6 : i64 to i32 +// CHECK-NEXT: %8 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %9 = llvm.sub %7, %8 : i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @clz_u16(%arg : !u16i) { + %0 = cir.bit.clz(%arg : !u16i) : !s32i + cir.return +} + +// CHECK: llvm.func @clz_u16(%arg0: i16) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.ctlz.i16"(%arg0, %0) : (i16, i1) -> i16 +// CHECK-NEXT: %2 = llvm.zext %1 : i16 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @clz_u32(%arg : !u32i) { + %0 = cir.bit.clz(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: llvm.func @clz_u32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.ctlz.i32"(%arg0, %0) : (i32, i1) -> i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @clz_u64(%arg : !u64i) { + %0 = cir.bit.clz(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: llvm.func @clz_u64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.ctlz.i64"(%arg0, %0) : (i64, i1) -> i64 +// CHECK-NEXT: %2 = llvm.trunc %1 : i64 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ctz_u16(%arg : !u16i) { + %0 = cir.bit.ctz(%arg : !u16i) : !s32i + cir.return +} + +// CHECK: llvm.func @ctz_u16(%arg0: i16) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i16"(%arg0, %0) : (i16, i1) -> i16 +// CHECK-NEXT: %2 = llvm.zext %1 : i16 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ctz_u32(%arg : !u32i) { + %0 = cir.bit.ctz(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: llvm.func @ctz_u32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i32"(%arg0, %0) : (i32, i1) -> i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ctz_u64(%arg : !u64i) { + %0 = cir.bit.ctz(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: llvm.func @ctz_u64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i64"(%arg0, %0) : (i64, i1) -> i64 +// CHECK-NEXT: %2 = llvm.trunc %1 : i64 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ffs_s32(%arg : !s32i) { + %0 = cir.bit.ffs(%arg : !s32i) : !s32i + cir.return +} + +// CHECK: llvm.func @ffs_s32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.mlir.constant(false) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i32"(%arg0, %0) : (i32, i1) -> i32 +// CHECK-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %3 = llvm.add %1, %2 : i32 +// CHECK-NEXT: %4 = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %5 = llvm.icmp "eq" %arg0, %4 : i32 +// CHECK-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %7 = llvm.select %5, %6, %3 : i1, i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ffs_s64(%arg : !s64i) { + %0 = cir.bit.ffs(%arg : !s64i) : !s32i + cir.return +} + +// CHECK: llvm.func @ffs_s64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.mlir.constant(false) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i64"(%arg0, %0) : (i64, i1) -> i64 +// CHECK-NEXT: %2 = llvm.trunc %1 : i64 to i32 +// CHECK-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %4 = llvm.add %2, %3 : i32 +// CHECK-NEXT: %5 = llvm.mlir.constant(0 : i64) : i64 +// CHECK-NEXT: %6 = llvm.icmp "eq" %arg0, %5 : i64 +// CHECK-NEXT: %7 = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %8 = llvm.select %6, %7, %4 : i1, i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @parity_s32(%arg : !u32i) { + %0 = cir.bit.parity(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: llvm.func @parity_s32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i32"(%arg0) : (i32) -> i32 +// CHECK-NEXT: %1 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %2 = llvm.and %0, %1 : i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @parity_s64(%arg : !u64i) { + %0 = cir.bit.parity(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: llvm.func @parity_s64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i64"(%arg0) : (i64) -> i64 +// CHECK-NEXT: %1 = llvm.trunc %0 : i64 to i32 +// CHECK-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %3 = llvm.and %1, %2 : i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @popcount_u16(%arg : !u16i) { + %0 = cir.bit.popcount(%arg : !u16i) : !s32i + cir.return +} + +// CHECK: llvm.func @popcount_u16(%arg0: i16) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i16"(%arg0) : (i16) -> i16 +// CHECK-NEXT: %1 = llvm.zext %0 : i16 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @popcount_u32(%arg : !u32i) { + %0 = cir.bit.popcount(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: llvm.func @popcount_u32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i32"(%arg0) : (i32) -> i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @popcount_u64(%arg : !u64i) { + %0 = cir.bit.popcount(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: llvm.func @popcount_u64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i64"(%arg0) : (i64) -> i64 +// CHECK-NEXT: %1 = llvm.trunc %0 : i64 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/bitfieils.c b/clang/test/CIR/Lowering/bitfieils.c new file mode 100644 index 000000000000..ec289bf1048b --- /dev/null +++ b/clang/test/CIR/Lowering/bitfieils.c @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +typedef struct { + int a : 4; +} B; + +// LLVM: define void @set_signed +// LLVM: [[TMP0:%.*]] = load ptr +// LLVM: [[TMP1:%.*]] = load i8, ptr [[TMP0]] +// LLVM: [[TMP2:%.*]] = and i8 [[TMP1]], -16 +// LLVM: [[TMP3:%.*]] = or i8 [[TMP2]], 14 +// LLVM: store i8 [[TMP3]], ptr [[TMP0]] +void set_signed(B* b) { + b->a = -2; +} + +// LLVM: define i32 @get_signed +// LLVM: [[TMP0:%.*]] = alloca i32 +// LLVM: [[TMP1:%.*]] = load ptr +// LLVM: [[TMP2:%.*]] = load i8, ptr [[TMP1]] +// LLVM: [[TMP3:%.*]] = shl i8 [[TMP2]], 4 +// LLVM: [[TMP4:%.*]] = ashr i8 [[TMP3]], 4 +// LLVM: [[TMP5:%.*]] = sext i8 [[TMP4]] to i32 +// LLVM: store i32 [[TMP5]], ptr [[TMP0]] +// LLVM: [[TMP6:%.*]] = load i32, ptr [[TMP0]] +// LLVM: ret i32 [[TMP6]] +int get_signed(B* b) { + return b->a; +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/bitint.cir b/clang/test/CIR/Lowering/bitint.cir new file mode 100644 index 000000000000..f89278b5faf7 --- /dev/null +++ b/clang/test/CIR/Lowering/bitint.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int + +module { + cir.func @ParamPassing(%arg0: !cir.int, %arg1: !cir.int) -> !cir.int { + %0 = cir.cast(integral, %arg0 : !cir.int), !s32i + %1 = cir.cast(integral, %arg1 : !cir.int), !s32i + %2 = cir.binop(add, %0, %1) : !s32i + %3 = cir.cast(integral, %2 : !s32i), !cir.int + cir.return %3 : !cir.int + } +} + +// MLIR: llvm.func @ParamPassing(%arg0: i15, %arg1: i31) -> i2 +// MLIR-NEXT: %0 = llvm.sext %arg0 : i15 to i32 +// MLIR-NEXT: %1 = llvm.sext %arg1 : i31 to i32 +// MLIR-NEXT: %2 = llvm.add %0, %1 : i32 +// MLIR-NEXT: %3 = llvm.trunc %2 : i32 to i2 +// MLIR-NEXT: llvm.return %3 : i2 +// MLIR-NEXT: } + +// LLVM: define i2 @ParamPassing(i15 %0, i31 %1) !dbg !3 { +// LLVM-NEXT: %3 = sext i15 %0 to i32, !dbg !6 +// LLVM-NEXT: %4 = sext i31 %1 to i32, !dbg !7 +// LLVM-NEXT: %5 = add i32 %3, %4, !dbg !8 +// LLVM-NEXT: %6 = trunc i32 %5 to i2, !dbg !9 +// LLVM-NEXT: ret i2 %6, !dbg !10 +// LLVM-NEXT: } diff --git a/clang/test/CIR/Lowering/bool-to-int.cir b/clang/test/CIR/Lowering/bool-to-int.cir new file mode 100644 index 000000000000..d7e2e45686cc --- /dev/null +++ b/clang/test/CIR/Lowering/bool-to-int.cir @@ -0,0 +1,21 @@ +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + +!s32i = !cir.int +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool + +module { + cir.func @foo(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %1 = cir.const(#true) : !cir.bool + %2 = cir.cast(bool_to_int, %1 : !cir.bool), !s32i + cir.return %2 : !s32i + } + cir.func @bar(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %1 = cir.const(#false) : !cir.bool + %2 = cir.cast(bool_to_int, %1 : !cir.bool), !s32i + cir.return %2 : !s32i + } +} + +// CHECK: ret i32 1 +// CHECK: ret i32 0 diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir new file mode 100644 index 000000000000..34175667ec39 --- /dev/null +++ b/clang/test/CIR/Lowering/bool.cir @@ -0,0 +1,29 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool + +module { + cir.global external @g_bl = #false +// MLIR: llvm.mlir.global external @g_bl(false) {addr_space = 0 : i32} : i8 +// LLVM: @g_bl = global i8 0 + + cir.func @foo() { + %1 = cir.const(#true) : !cir.bool + %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} + cir.store %1, %0 : !cir.bool, cir.ptr + cir.return + } +// MLIR: llvm.func @foo() +// MLIR-DAG: = llvm.mlir.constant(1 : i8) : i8 +// MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 +// MLIR-DAG: = llvm.alloca [[Value]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr +// MLIR-DAG: llvm.store %0, %2 : i8, !llvm.ptr +// MLIR-NEXT: llvm.return + +// LLVM: define void @foo() +// LLVM-NEXT: %1 = alloca i8, i64 1, align 1 +// LLVM-NEXT: store i8 1, ptr %1, align 1 +// LLVM-NEXT: ret void +} diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir new file mode 100644 index 000000000000..90e143913d50 --- /dev/null +++ b/clang/test/CIR/Lowering/branch.cir @@ -0,0 +1,37 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +cir.func @foo(%arg0: !cir.bool) -> !s32i { + cir.brcond %arg0 ^bb1, ^bb2 + ^bb1: + %0 = cir.const(#cir.int<1>: !s32i) : !s32i + cir.return %0 : !s32i + ^bb2: + %1 = cir.const(#cir.int<0>: !s32i) : !s32i + cir.return %1 : !s32i +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo(%arg0: i8) -> i32 +// MLIR-NEXT: %0 = llvm.trunc %arg0 : i8 to i1 +// MLIR-NEXT: llvm.cond_br %0, ^bb1, ^bb2 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %1 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.return %1 : i32 +// MLIR-NEXT: ^bb2: // pred: ^bb0 +// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.return %2 : i32 +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define i32 @foo(i8 %0) +// LLVM-NEXT: %2 = trunc i8 %0 to i1 +// LLVM-NEXT: br i1 %2, label %3, label %4 +// LLVM-EMPTY: +// LLVM-NEXT: 3: ; preds = %1 +// LLVM-NEXT: ret i32 1 +// LLVM-EMPTY: +// LLVM-NEXT: 4: ; preds = %1 +// LLVM-NEXT: ret i32 0 +// LLVM-NEXT: } diff --git a/clang/test/CIR/Lowering/bswap.cir b/clang/test/CIR/Lowering/bswap.cir new file mode 100644 index 000000000000..7733b4de1dae --- /dev/null +++ b/clang/test/CIR/Lowering/bswap.cir @@ -0,0 +1,19 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int + +cir.func @test(%arg0: !u32i) -> !u32i { + %0 = cir.bswap(%arg0 : !u32i) : !u32i + cir.return %0 : !u32i +} + +// MLIR: llvm.func @test(%arg0: i32) -> i32 +// MLIR-NEXT: %0 = llvm.intr.bswap(%arg0) : (i32) -> i32 +// MLIR-NEXT: llvm.return %0 : i32 +// MLIR-NEXT: } + +// LLVM: define i32 @test(i32 %0) +// LLVM-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %0) +// LLVM-NEXT: ret i32 %2 +// LLVM-NEXT: } diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir new file mode 100644 index 000000000000..2c40bb88e523 --- /dev/null +++ b/clang/test/CIR/Lowering/call.cir @@ -0,0 +1,39 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @a() { + cir.return + } + cir.func @d() { + cir.call @a() : () -> () + cir.return + } + +// MLIR: llvm.func @a() +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: llvm.func @d() +// MLIR-NEXT: llvm.call @a() : () -> () +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } + +// LLVM: define void @a() +// LLVM-NEXT: ret void +// LLVM-NEXT: } +// LLVM: define void @d() +// LLVM-NEXT: call void @a() +// LLVM-NEXT: ret void +// LLVM-NEXT: } + + // check operands and results type lowering + cir.func @callee(!cir.ptr) -> !cir.ptr attributes {sym_visibility = "private"} + // MLIR: llvm.func @callee(!llvm.ptr) -> !llvm.ptr + cir.func @caller(%arg0: !cir.ptr) -> !cir.ptr { + // MLIR: llvm.func @caller(%arg0: !llvm.ptr) -> !llvm.ptr + %0 = cir.call @callee(%arg0) : (!cir.ptr) -> !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.call @callee(%arg0) : (!llvm.ptr) -> !llvm.ptr + cir.return %0 : !cir.ptr + } + +} // end module diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir new file mode 100644 index 000000000000..60ad48e4a644 --- /dev/null +++ b/clang/test/CIR/Lowering/cast.cir @@ -0,0 +1,99 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir + +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u8i = !cir.int +!u64i = !cir.int + +module { + cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: !cir.float, %arg3: !cir.double) -> !s32i { + // CHECK: llvm.func @cStyleCasts + %0 = cir.alloca !u32i, cir.ptr , ["x1", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["x2", init] {alignment = 4 : i64} + %20 = cir.alloca !s16i, cir.ptr , ["x4", init] {alignment = 2 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %3 = cir.alloca !s8i, cir.ptr , ["a", init] {alignment = 1 : i64} + %4 = cir.alloca !s16i, cir.ptr , ["b", init] {alignment = 2 : i64} + %5 = cir.alloca !s64i, cir.ptr , ["c", init] {alignment = 8 : i64} + %6 = cir.alloca !s64i, cir.ptr , ["d", init] {alignment = 8 : i64} + %7 = cir.alloca !cir.array, cir.ptr >, ["arr"] {alignment = 4 : i64} + %8 = cir.alloca !cir.ptr, cir.ptr >, ["e", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !u32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + + // Integer casts. + %9 = cir.load %0 : cir.ptr , !u32i + %10 = cir.cast(integral, %9 : !u32i), !s8i + // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 + cir.store %10, %3 : !s8i, cir.ptr + %11 = cir.load %1 : cir.ptr , !s32i + %12 = cir.cast(integral, %11 : !s32i), !s16i + // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 + cir.store %12, %4 : !s16i, cir.ptr + %13 = cir.load %0 : cir.ptr , !u32i + %14 = cir.cast(integral, %13 : !u32i), !s64i + // CHECK: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64 + cir.store %14, %5 : !s64i, cir.ptr + %15 = cir.load %1 : cir.ptr , !s32i + %16 = cir.cast(integral, %15 : !s32i), !s64i + // CHECK: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 + %30 = cir.cast(integral, %arg1 : !s32i), !u32i + // Should not produce a cast. + %32 = cir.cast(integral, %arg0 : !u32i), !s32i + // Should not produce a cast. + %21 = cir.load %20 : cir.ptr , !s16i + %22 = cir.cast(integral, %21 : !s16i), !u64i + // CHECK: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 + %33 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool + // CHECK: %[[#ZERO:]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %[[#CMP:]] = llvm.icmp "ne" %arg1, %[[#ZERO]] : i32 + // CHECK: %{{.+}} = llvm.zext %[[#CMP]] : i1 to i8 + + // Pointer casts. + cir.store %16, %6 : !s64i, cir.ptr + %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr + cir.store %17, %8 : !cir.ptr, cir.ptr > + // CHECK: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, i32 + %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr + // CHECK: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr + %24 = cir.cast(ptr_to_int, %23 : !cir.ptr), !s32i + // CHECK: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 + %29 = cir.cast(ptr_to_bool, %23 : !cir.ptr), !cir.bool + + // Floating point casts. + %25 = cir.cast(int_to_float, %arg1 : !s32i), !cir.float + // CHECK: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 + %26 = cir.cast(int_to_float, %arg0 : !u32i), !cir.float + // CHECK: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 + %27 = cir.cast(float_to_int, %arg2 : !cir.float), !s32i + // CHECK: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 + %28 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i + // CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 + %18 = cir.const(#cir.int<0> : !s32i) : !s32i + // CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32 + %34 = cir.cast(floating, %arg3 : !cir.double), !cir.float + + cir.store %18, %2 : !s32i, cir.ptr + %19 = cir.load %2 : cir.ptr , !s32i + cir.return %19 : !s32i + } + + cir.func @testBoolToIntCast(%arg0: !cir.bool) { + // CHECK: llvm.func @testBoolToIntCast + %0 = cir.alloca !cir.bool, cir.ptr , ["bl", init] {alignment = 1 : i64} + %1 = cir.alloca !u8i, cir.ptr , ["y", init] {alignment = 1 : i64} + cir.store %arg0, %0 : !cir.bool, cir.ptr + + %2 = cir.load %0 : cir.ptr , !cir.bool + %3 = cir.cast(bool_to_int, %2 : !cir.bool), !u8i + // CHECK: %[[LOAD_BOOL:.*]] = llvm.load %{{.*}} : !llvm.ptr -> i8 + // CHECK: %{{.*}} = llvm.bitcast %[[LOAD_BOOL]] : i8 to i8 + + cir.store %3, %1 : !u8i, cir.ptr + cir.return + } +} diff --git a/clang/test/CIR/Lowering/class.cir b/clang/test/CIR/Lowering/class.cir new file mode 100644 index 000000000000..9136238e48da --- /dev/null +++ b/clang/test/CIR/Lowering/class.cir @@ -0,0 +1,96 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +!u8i = !cir.int +!u32i = !cir.int +!ty_22S22 = !cir.struct +!ty_22S2A22 = !cir.struct +!ty_22S122 = !cir.struct} #cir.record.decl.ast> +!ty_22S222 = !cir.struct +!ty_22S322 = !cir.struct + +module { + cir.func @test() { + %1 = cir.alloca !ty_22S22, cir.ptr , ["x"] {alignment = 4 : i64} + // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#CLASS:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"class.S", (i8, i32)> + %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr + // CHECK: = llvm.getelementptr %[[#CLASS]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"class.S", (i8, i32)> + %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr + // CHECK: = llvm.getelementptr %[[#CLASS]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"class.S", (i8, i32)> + cir.return + } + + cir.func @shouldConstInitLocalClassesWithConstStructAttr() { + %0 = cir.alloca !ty_22S2A22, cir.ptr , ["s"] {alignment = 4 : i64} + %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22) : !ty_22S2A22 + cir.store %1, %0 : !ty_22S2A22, cir.ptr + cir.return + } + // CHECK: llvm.func @shouldConstInitLocalClassesWithConstStructAttr() + // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 + // CHECK: %1 = llvm.alloca %0 x !llvm.struct<"class.S2A", (i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + // CHECK: %2 = llvm.mlir.undef : !llvm.struct<"class.S2A", (i32)> + // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.struct<"class.S2A", (i32)> + // CHECK: llvm.store %4, %1 : !llvm.struct<"class.S2A", (i32)>, !llvm.ptr + // CHECK: llvm.return + // CHECK: } + + // Should lower basic #cir.const_struct initializer. + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 + // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"class.S1", (i32, f32, ptr)> { + // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 + // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: %5 = llvm.mlir.zero : !llvm.ptr + // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: llvm.return %6 : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: } + + // Should lower nested #cir.const_struct initializer. + cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 + // CHECK: llvm.mlir.global external @s2() {addr_space = 0 : i32} : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> { + // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"class.S2A", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"class.S2A", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> + // CHECK: llvm.return %4 : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> + // CHECK: } + + cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array + // CHECK: llvm.mlir.global external @s3() {addr_space = 0 : i32} : !llvm.array<3 x struct<"class.S3", (i32)>> { + // CHECK: %0 = llvm.mlir.undef : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"class.S3", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"class.S3", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: %5 = llvm.mlir.undef : !llvm.struct<"class.S3", (i32)> + // CHECK: %6 = llvm.mlir.constant(2 : i32) : i32 + // CHECK: %7 = llvm.insertvalue %6, %5[0] : !llvm.struct<"class.S3", (i32)> + // CHECK: %8 = llvm.insertvalue %7, %4[1] : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: %9 = llvm.mlir.undef : !llvm.struct<"class.S3", (i32)> + // CHECK: %10 = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %11 = llvm.insertvalue %10, %9[0] : !llvm.struct<"class.S3", (i32)> + // CHECK: %12 = llvm.insertvalue %11, %8[2] : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: llvm.return %12 : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: } + + cir.func @shouldLowerClassCopies() { + // CHECK: llvm.func @shouldLowerClassCopies() + %1 = cir.alloca !ty_22S22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"class.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + %2 = cir.alloca !ty_22S22, cir.ptr , ["b", init] {alignment = 4 : i64} + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"class.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + cir.copy %1 to %2 : !cir.ptr + // CHECK: %[[#SIZE:]] = llvm.mlir.constant(8 : i32) : i32 + // CHECK: "llvm.intr.memcpy"(%[[#SB]], %[[#SA]], %[[#SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () + cir.return + } +} diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir new file mode 100644 index 000000000000..06dd60ff5453 --- /dev/null +++ b/clang/test/CIR/Lowering/cmp.cir @@ -0,0 +1,68 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !s32i, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} + %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} + %5 = cir.load %0 : cir.ptr , !s32i + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.cmp(gt, %5, %6) : !s32i, !cir.bool + // CHECK: llvm.icmp "sgt" + %8 = cir.load %0 : cir.ptr , !s32i + %9 = cir.load %1 : cir.ptr , !s32i + %10 = cir.cmp(eq, %8, %9) : !s32i, !cir.bool + // CHECK: llvm.icmp "eq" + %11 = cir.load %0 : cir.ptr , !s32i + %12 = cir.load %1 : cir.ptr , !s32i + %13 = cir.cmp(lt, %11, %12) : !s32i, !cir.bool + // CHECK: llvm.icmp "slt" + %14 = cir.load %0 : cir.ptr , !s32i + %15 = cir.load %1 : cir.ptr , !s32i + %16 = cir.cmp(ge, %14, %15) : !s32i, !cir.bool + // CHECK: llvm.icmp "sge" + %17 = cir.load %0 : cir.ptr , !s32i + %18 = cir.load %1 : cir.ptr , !s32i + %19 = cir.cmp(ne, %17, %18) : !s32i, !cir.bool + // CHECK: llvm.icmp "ne" + %20 = cir.load %0 : cir.ptr , !s32i + %21 = cir.load %1 : cir.ptr , !s32i + %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool + // CHECK: llvm.icmp "sle" + %23 = cir.load %2 : cir.ptr , !cir.float + %24 = cir.load %3 : cir.ptr , !cir.float + %25 = cir.cmp(gt, %23, %24) : !cir.float, !cir.bool + // CHECK: llvm.fcmp "ogt" + %26 = cir.load %2 : cir.ptr , !cir.float + %27 = cir.load %3 : cir.ptr , !cir.float + %28 = cir.cmp(eq, %26, %27) : !cir.float, !cir.bool + // CHECK: llvm.fcmp "oeq" + %29 = cir.load %2 : cir.ptr , !cir.float + %30 = cir.load %3 : cir.ptr , !cir.float + %31 = cir.cmp(lt, %29, %30) : !cir.float, !cir.bool + // CHECK: llvm.fcmp "olt" + %32 = cir.load %2 : cir.ptr , !cir.float + %33 = cir.load %3 : cir.ptr , !cir.float + %34 = cir.cmp(ge, %32, %33) : !cir.float, !cir.bool + // CHECK: llvm.fcmp "oge" + %35 = cir.load %2 : cir.ptr , !cir.float + %36 = cir.load %3 : cir.ptr , !cir.float + %37 = cir.cmp(ne, %35, %36) : !cir.float, !cir.bool + // CHECK: llvm.fcmp "une" + %38 = cir.load %2 : cir.ptr , !cir.float + %39 = cir.load %3 : cir.ptr , !cir.float + %40 = cir.cmp(le, %38, %39) : !cir.float, !cir.bool + // CHECK: llvm.fcmp "ole" + + // Pointer comparisons. + %41 = cir.cmp(ne, %0, %1) : !cir.ptr, !cir.bool + // CHECK: llvm.icmp "ne" + %42 = cir.cmp(lt, %0, %1) : !cir.ptr, !cir.bool + // CHECK: llvm.icmp "ult" + cir.return + } +} diff --git a/clang/test/CIR/Lowering/cmp3way.cir b/clang/test/CIR/Lowering/cmp3way.cir new file mode 100644 index 000000000000..6e00a9440f59 --- /dev/null +++ b/clang/test/CIR/Lowering/cmp3way.cir @@ -0,0 +1,40 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s8i = !cir.int +!s32i = !cir.int +!u32i = !cir.int + +#cmp3way_info = #cir.cmp3way_info + +module { + cir.func @test_scmp(%arg0 : !s32i, %arg1 : !s32i) -> !s8i { + %0 = cir.cmp3way(%arg0 : !s32i, %arg1, #cmp3way_info) : !s8i + cir.return %0 : !s8i + } + + // MLIR: llvm.func @test_scmp(%arg0: i32, %arg1: i32) -> i8 + // MLIR-NEXT: %0 = llvm.call_intrinsic "llvm.scmp.i8.i32"(%arg0, %arg1) : (i32, i32) -> i8 + // MLIR-NEXT: llvm.return %0 : i8 + // MLIR-NEXT: } + + // LLVM: define i8 @test_scmp(i32 %0, i32 %1) + // LLVM-NEXT: %[[#RET:]] = call i8 @llvm.scmp.i8.i32(i32 %0, i32 %1) + // LLVM-NEXT: ret i8 %[[#RET]] + // LLVM-NEXT: } + + cir.func @test_ucmp(%arg0 : !u32i, %arg1 : !u32i) -> !s8i { + %0 = cir.cmp3way(%arg0 : !u32i, %arg1, #cmp3way_info) : !s8i + cir.return %0 : !s8i + } + + // MLIR: llvm.func @test_ucmp(%arg0: i32, %arg1: i32) -> i8 + // MLIR-NEXT: %0 = llvm.call_intrinsic "llvm.ucmp.i8.i32"(%arg0, %arg1) : (i32, i32) -> i8 + // MLIR-NEXT: llvm.return %0 : i8 + // MLIR-NEXT: } + + // LLVM: define i8 @test_ucmp(i32 %0, i32 %1) + // LLVM-NEXT: %[[#RET:]] = call i8 @llvm.ucmp.i8.i32(i32 %0, i32 %1) + // LLVM-NEXT: ret i8 %[[#RET]] + // LLVM-NEXT: } +} diff --git a/clang/test/CIR/Lowering/const-array.cir b/clang/test/CIR/Lowering/const-array.cir new file mode 100644 index 000000000000..7aff779a04fa --- /dev/null +++ b/clang/test/CIR/Lowering/const-array.cir @@ -0,0 +1,15 @@ +// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM + +!u8i = !cir.int + +module { + cir.global "private" internal @normal_url_char = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<1> : !u8i], trailing_zeros> : !cir.array + // LLVM: @normal_url_char = internal global [4 x i8] c"\00\01\00\00" + + cir.func @c0() -> !cir.ptr> { + %0 = cir.get_global @normal_url_char : cir.ptr > + cir.return %0 : !cir.ptr> + } + // LLVM: define ptr @c0() + // LLVM: ret ptr @normal_url_char +} diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir new file mode 100644 index 000000000000..76ec616bed21 --- /dev/null +++ b/clang/test/CIR/Lowering/const.cir @@ -0,0 +1,81 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s8i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!ty_22anon2E122 = !cir.struct, !cir.int} #cir.record.decl.ast> +module { + cir.func @testConstArrInit() { + %0 = cir.const(#cir.const_array<"string\00" : !cir.array> : !cir.array) : !cir.array + // CHECK: llvm.mlir.constant(dense<[115, 116, 114, 105, 110, 103, 0]> : tensor<7xi8>) : !llvm.array<7 x i8> + %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array) : !cir.array + // CHECK: llvm.mlir.constant(dense<[1, 2]> : tensor<2xi32>) : !llvm.array<2 x i32> + %3 = cir.const(#cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array) : !cir.array + // CHECK: llvm.mlir.constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x f32> + %4 = cir.const(#cir.zero : !cir.array) : !cir.array + // CHECK: cir.llvmir.zeroinit : !llvm.array<3 x i32> + cir.return + } + + cir.func @testConvertConstArrayToDenseConst() { + %0 = cir.const(#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %1 = cir.const(#cir.const_array<[#cir.const_array<[#cir.int<1> : !s64i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %2 = cir.const(#cir.const_array<[#cir.const_array<[#cir.fp<1.000000e+00> : !cir.float]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %3 = cir.const(#cir.const_array<[#cir.const_array<[#cir.fp<1.000000e+00> : !cir.double]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %4 = cir.const(#cir.const_array<[#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array]> : !cir.array x 1>, #cir.zero : !cir.array x 1>]> : !cir.array x 1> x 2>) : !cir.array x 1> x 2> + + cir.return + } + // CHECK: llvm.func @testConvertConstArrayToDenseConst() + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[}}1], [0{{\]\]}}> : tensor<2x1xi32>) : !llvm.array<2 x array<1 x i32>> + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[}}1], [0{{\]\]}}> : tensor<2x1xi64>) : !llvm.array<2 x array<1 x i64>> + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[}}1.000000e+00], [0.000000e+00{{\]\]}}> : tensor<2x1xf32>) : !llvm.array<2 x array<1 x f32>> + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[}}1.000000e+00], [0.000000e+00{{\]\]}}> : tensor<2x1xf64>) : !llvm.array<2 x array<1 x f64>> + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[\[}}1, 1, 1{{\]\]}}, {{\[\[}}0, 0, 0{{\]\]\]}}> : tensor<2x1x3xi32>) : !llvm.array<2 x array<1 x array<3 x i32>>> + // CHECK: llvm.return + + cir.func @testConstArrayOfStructs() { + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 4 : i64} + %1 = cir.const(#cir.const_array<[#cir.const_struct<{#cir.int<0> : !s32i, #cir.int<1> : !s32i}> : !ty_22anon2E122]> : !cir.array) : !cir.array + cir.store %1, %0 : !cir.array, cir.ptr > + cir.return + } + // CHECK: llvm.func @testConstArrayOfStructs() + // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 + // CHECK: %1 = llvm.alloca %0 x !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>> {alignment = 4 : i64} : (i64) -> !llvm.ptr + // CHECK: %2 = llvm.mlir.undef : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>> + // CHECK: %3 = llvm.mlir.undef : !llvm.struct<"struct.anon.1", (i32, i32)> + // CHECK: %4 = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %5 = llvm.insertvalue %4, %3[0] : !llvm.struct<"struct.anon.1", (i32, i32)> + // CHECK: %6 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %7 = llvm.insertvalue %6, %5[1] : !llvm.struct<"struct.anon.1", (i32, i32)> + // CHECK: %8 = llvm.insertvalue %7, %2[0] : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>> + // CHECK: llvm.store %8, %1 : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>>, !llvm.ptr + // CHECK: llvm.return + + cir.func @testArrWithTrailingZeros() { + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array) : !cir.array + cir.store %1, %0 : !cir.array, cir.ptr > + cir.return + } + // CHECK: llvm.func @testArrWithTrailingZeros() + // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 + // CHECK: %1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 16 : i64} : (i64) -> !llvm.ptr + // CHECK: %2 = cir.llvmir.zeroinit : !llvm.array<10 x i32> + // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.array<10 x i32> + + cir.func @testInitArrWithBool() { + %1 = cir.const(#cir.const_array<[#cir.bool : !cir.bool]> : !cir.array) : !cir.array + cir.return + } + + // CHECK: llvm.func @testInitArrWithBool() + // CHECK: [[ARR:%.*]] = llvm.mlir.undef : !llvm.array<1 x i8> + // CHECK: [[TRUE:%.*]] = llvm.mlir.constant(1 : i8) : i8 + // CHECK: {{.*}} = llvm.insertvalue [[TRUE]], [[ARR]][0] : !llvm.array<1 x i8> + // CHECL: llvm.return + +} diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir new file mode 100644 index 000000000000..5b7742fc1400 --- /dev/null +++ b/clang/test/CIR/Lowering/dot.cir @@ -0,0 +1,115 @@ +// RUN: cir-opt %s -cir-to-llvm --reconcile-unrealized-casts -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR + +!s32i = !cir.int +module { + cir.func @dot(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: !s32i) -> !cir.double { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, cir.ptr >, ["b", init] {alignment = 8 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["size", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, cir.ptr , ["__retval"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, cir.ptr , ["q", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, cir.ptr > + cir.store %arg1, %1 : !cir.ptr, cir.ptr > + cir.store %arg2, %2 : !s32i, cir.ptr + %5 = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double + cir.store %5, %4 : !cir.double, cir.ptr + cir.scope { + %8 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %9 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %9, %8 : !s32i, cir.ptr + cir.for : cond { + %10 = cir.load %8 : cir.ptr , !s32i + %11 = cir.load %2 : cir.ptr , !s32i + %12 = cir.cmp(lt, %10, %11) : !s32i, !s32i + %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool + cir.condition(%13) + } body { + %10 = cir.load %0 : cir.ptr >, !cir.ptr + %11 = cir.load %8 : cir.ptr , !s32i + %12 = cir.ptr_stride(%10 : !cir.ptr, %11 : !s32i), !cir.ptr + %13 = cir.load %12 : cir.ptr , !cir.double + %14 = cir.load %1 : cir.ptr >, !cir.ptr + %15 = cir.load %8 : cir.ptr , !s32i + %16 = cir.ptr_stride(%14 : !cir.ptr, %15 : !s32i), !cir.ptr + %17 = cir.load %16 : cir.ptr , !cir.double + %18 = cir.binop(mul, %13, %17) : !cir.double + %19 = cir.load %4 : cir.ptr , !cir.double + %20 = cir.binop(add, %19, %18) : !cir.double + cir.store %20, %4 : !cir.double, cir.ptr + cir.yield + } step { + %10 = cir.load %8 : cir.ptr , !s32i + %11 = cir.unary(inc, %10) : !s32i, !s32i + cir.store %11, %8 : !s32i, cir.ptr + cir.yield + } + } + %6 = cir.load %4 : cir.ptr , !cir.double + cir.store %6, %3 : !cir.double, cir.ptr + %7 = cir.load %3 : cir.ptr , !cir.double + cir.return %7 : !cir.double + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @dot(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: i32) -> f64 +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %3 = llvm.alloca %2 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %4 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %5 = llvm.alloca %4 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %6 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %7 = llvm.alloca %6 x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %8 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %9 = llvm.alloca %8 x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr, !llvm.ptr +// MLIR-NEXT: llvm.store %arg1, %3 : !llvm.ptr, !llvm.ptr +// MLIR-NEXT: llvm.store %arg2, %5 : i32, !llvm.ptr +// MLIR-NEXT: %10 = llvm.mlir.constant(0.000000e+00 : f64) : f64 +// MLIR-NEXT: llvm.store %10, %9 : f64, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb1 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %11 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %12 = llvm.alloca %11 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %13 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.store %13, %12 : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb4 +// MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr -> i32 +// MLIR-NEXT: %16 = llvm.icmp "slt" %14, %15 : i32 +// MLIR-NEXT: %17 = llvm.zext %16 : i1 to i32 +// MLIR-NEXT: %18 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 +// MLIR-NEXT: llvm.cond_br %19, ^bb3, ^bb5 +// MLIR-NEXT: ^bb3: // pred: ^bb2 +// MLIR-NEXT: %20 = llvm.load %1 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %21 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %22 = llvm.getelementptr %20[%21] : (!llvm.ptr, i32) -> !llvm.ptr, f64 +// MLIR-NEXT: %23 = llvm.load %22 : !llvm.ptr -> f64 +// MLIR-NEXT: %24 = llvm.load %3 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %25 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %26 = llvm.getelementptr %24[%25] : (!llvm.ptr, i32) -> !llvm.ptr, f64 +// MLIR-NEXT: %27 = llvm.load %26 : !llvm.ptr -> f64 +// MLIR-NEXT: %28 = llvm.fmul %23, %27 : f64 +// MLIR-NEXT: %29 = llvm.load %9 : !llvm.ptr -> f64 +// MLIR-NEXT: %30 = llvm.fadd %29, %28 : f64 +// MLIR-NEXT: llvm.store %30, %9 : f64, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb4: // pred: ^bb3 +// MLIR-NEXT: %31 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %32 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %33 = llvm.add %31, %32 : i32 +// MLIR-NEXT: llvm.store %33, %12 : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb5: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb6 +// MLIR-NEXT: ^bb6: // pred: ^bb5 +// MLIR-NEXT: %34 = llvm.load %9 : !llvm.ptr -> f64 +// MLIR-NEXT: llvm.store %34, %7 : f64, !llvm.ptr +// MLIR-NEXT: %35 = llvm.load %7 : !llvm.ptr -> f64 +// MLIR-NEXT: llvm.return %35 : f64 +// MLIR-NEXT: } +// MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/expect.cir b/clang/test/CIR/Lowering/expect.cir new file mode 100644 index 000000000000..a221cca5f3dd --- /dev/null +++ b/clang/test/CIR/Lowering/expect.cir @@ -0,0 +1,54 @@ +// RUN: cir-opt %s -cir-to-llvm | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s64i = !cir.int +module { + cir.func @foo(%arg0: !s64i) { + %0 = cir.const(#cir.int<1> : !s64i) : !s64i + %1 = cir.expect(%arg0, %0) : !s64i + %2 = cir.cast(int_to_bool, %1 : !s64i), !cir.bool + cir.if %2 { + cir.yield + } + %3 = cir.expect(%arg0, %0, 1.000000e-01) : !s64i + %4 = cir.cast(int_to_bool, %3 : !s64i), !cir.bool + cir.if %4 { + cir.yield + } + cir.return + } +} + +// MLIR: llvm.func @foo(%arg0: i64) +// MLIR: [[ONE:%.*]] = llvm.mlir.constant(1 : i64) : i64 +// MLIR: [[EXPECT:%.*]] = llvm.intr.expect %arg0, [[ONE]] : i64 +// MLIR: [[ZERO:%.*]] = llvm.mlir.constant(0 : i64) : i64 +// MLIR: [[CMP_NE:%.*]] = llvm.icmp "ne" [[EXPECT]], [[ZERO]] : i64 +// MLIR: llvm.cond_br [[CMP_NE]], ^bb1, ^bb2 +// MLIR: ^bb1: // pred: ^bb0 +// MLIR: llvm.br ^bb2 +// MLIR: ^bb2: // 2 preds: ^bb0, ^bb1 +// MLIR: [[EXPECT_WITH_PROB:%.*]] = llvm.intr.expect.with.probability %arg0, [[ONE]], 1.000000e-01 : i64 +// MLIR: [[ZERO:%.*]] = llvm.mlir.constant(0 : i64) : i64 +// MLIR: [[CMP_NE:%.*]] = llvm.icmp "ne" [[EXPECT_WITH_PROB]], [[ZERO]] : i64 +// MLIR: llvm.cond_br [[CMP_NE]], ^bb3, ^bb4 +// MLIR: ^bb3: // pred: ^bb2 +// MLIR: llvm.br ^bb4 +// MLIR: ^bb4: // 2 preds: ^bb2, ^bb3 +// MLIR: llvm.return + +// LLVM: define void @foo(i64 %0) +// LLVM: [[EXPECT:%.*]] = call i64 @llvm.expect.i64(i64 %0, i64 1) +// LLVM: [[CMP_NE:%.*]] = icmp ne i64 [[EXPECT]], 0 +// LLVM: br i1 [[CMP_NE]], label %4, label %5 +// LLVM: 4: +// LLVM: br label %5 +// LLVM: 5: +// LLVM: [[EXPECT_WITH_PROB:%.*]] = call i64 @llvm.expect.with.probability.i64(i64 %0, i64 1, double 1.000000e-01) +// LLVM: [[CMP_NE:%.*]] = icmp ne i64 [[EXPECT_WITH_PROB]], 0 +// LLVM: br i1 [[CMP_NE]], label %8, label %9 +// LLVM: 8: +// LLVM: br label %9 +// LLVM: 9: +// LLVM: ret void + diff --git a/clang/test/CIR/Lowering/float.cir b/clang/test/CIR/Lowering/float.cir new file mode 100644 index 000000000000..463768a35935 --- /dev/null +++ b/clang/test/CIR/Lowering/float.cir @@ -0,0 +1,18 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir + +module { + cir.func @test() { + // %0 = cir.const(1.0 : f16) : f16 + // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f16) : f16 + %1 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float + // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f32) : f32 + %2 = cir.const(#cir.fp<1.0> : !cir.double) : !cir.double + // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f64) : f64 + %3 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double + // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f80) : f80 + // %5 = cir.const(1.0 : bf16) : bf16 + // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : bf16) : bf16 + cir.return + } +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/func.cir b/clang/test/CIR/Lowering/func.cir new file mode 100644 index 000000000000..6dcb7bdb42d0 --- /dev/null +++ b/clang/test/CIR/Lowering/func.cir @@ -0,0 +1,17 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck %s -check-prefix=MLIR --input-file=%t.mlir + +!s32i = !cir.int +module { + cir.func no_proto private @noProto3(...) -> !s32i + // MLIR: llvm.func @noProto3(...) -> i32 + cir.func @test3(%arg0: !s32i) { + %3 = cir.get_global @noProto3 : cir.ptr > + // MLIR: %[[#FN_PTR:]] = llvm.mlir.addressof @noProto3 : !llvm.ptr + %4 = cir.cast(bitcast, %3 : !cir.ptr>), !cir.ptr> + // MLIR: %[[#FUNC:]] = llvm.bitcast %[[#FN_PTR]] : !llvm.ptr to !llvm.ptr + %5 = cir.call %4(%arg0) : (!cir.ptr>, !s32i) -> !s32i + // MLIR: %{{.+}} = llvm.call %[[#FUNC]](%{{.+}}) : !llvm.ptr, (i32) -> i32 + cir.return + } +} diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir new file mode 100644 index 000000000000..dde8087fada6 --- /dev/null +++ b/clang/test/CIR/Lowering/globals.cir @@ -0,0 +1,191 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +!void = !cir.void +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u64i = !cir.int +!u8i = !cir.int +!ty_22A22 = !cir.struct x 2>} #cir.record.decl.ast> +!ty_22Bar22 = !cir.struct +!ty_22StringStruct22 = !cir.struct, !cir.array, !cir.array} #cir.record.decl.ast> +!ty_22StringStructPtr22 = !cir.struct} #cir.record.decl.ast> +!ty_22anon2E122 = !cir.struct)>>} #cir.record.decl.ast> + +module { + cir.global external @a = #cir.int<3> : !s32i + cir.global external @c = #cir.int<2> : !u64i + cir.global external @y = #cir.fp<3.400000e+00> : !cir.float + cir.global external @w = #cir.fp<4.300000e+00> : !cir.double + cir.global external @x = #cir.int<51> : !s8i + cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array + cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array + cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s = #cir.global_view<@".str"> : !cir.ptr + // MLIR: llvm.mlir.global internal constant @".str"("example\00") {addr_space = 0 : i32} + // MLIR: llvm.mlir.global external @s() {addr_space = 0 : i32} : !llvm.ptr { + // MLIR: %0 = llvm.mlir.addressof @".str" : !llvm.ptr + // MLIR: %1 = llvm.bitcast %0 : !llvm.ptr to !llvm.ptr + // MLIR: llvm.return %1 : !llvm.ptr + // MLIR: } + // LLVM: @.str = internal constant [8 x i8] c"example\00" + // LLVM: @s = global ptr @.str + cir.global external @aPtr = #cir.global_view<@a> : !cir.ptr + // MLIR: llvm.mlir.global external @aPtr() {addr_space = 0 : i32} : !llvm.ptr { + // MLIR: %0 = llvm.mlir.addressof @a : !llvm.ptr + // MLIR: llvm.return %0 : !llvm.ptr + // MLIR: } + cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr + cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr + cir.func @_Z10use_globalv() { + %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} + %1 = cir.get_global @a : cir.ptr + %2 = cir.load %1 : cir.ptr , !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.return + } + cir.func @_Z17use_global_stringv() { + %0 = cir.alloca !u8i, cir.ptr , ["c", init] {alignment = 1 : i64} + %1 = cir.get_global @s2 : cir.ptr > + %2 = cir.load %1 : cir.ptr >, !cir.ptr + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr + %5 = cir.load %4 : cir.ptr , !s8i + %6 = cir.cast(integral, %5 : !s8i), !u8i + cir.store %6, %0 : !u8i, cir.ptr + cir.return + } + cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + cir.return %2 : !s32i + } + cir.func @_Z8use_funcv() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.call @_Z4funcIiET_v() : () -> !s32i + cir.store %1, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + cir.return %2 : !s32i + } + cir.global external @string = #cir.const_array<[#cir.int<119> : !s8i, #cir.int<104> : !s8i, #cir.int<97> : !s8i, #cir.int<116> : !s8i, #cir.int<110> : !s8i, #cir.int<111> : !s8i, #cir.int<119> : !s8i, #cir.int<0> : !s8i]> : !cir.array + // MLIR: llvm.mlir.global external @string(dense<[119, 104, 97, 116, 110, 111, 119, 0]> : tensor<8xi8>) {addr_space = 0 : i32} : !llvm.array<8 x i8> + cir.global external @uint = #cir.const_array<[#cir.int<255> : !u32i]> : !cir.array + // MLIR: llvm.mlir.global external @uint(dense<255> : tensor<1xi32>) {addr_space = 0 : i32} : !llvm.array<1 x i32> + cir.global external @sshort = #cir.const_array<[#cir.int<11111> : !s16i, #cir.int<22222> : !s16i]> : !cir.array + // MLIR: llvm.mlir.global external @sshort(dense<[11111, 22222]> : tensor<2xi16>) {addr_space = 0 : i32} : !llvm.array<2 x i16> + cir.global external @sint = #cir.const_array<[#cir.int<123> : !s32i, #cir.int<456> : !s32i, #cir.int<789> : !s32i]> : !cir.array + // MLIR: llvm.mlir.global external @sint(dense<[123, 456, 789]> : tensor<3xi32>) {addr_space = 0 : i32} : !llvm.array<3 x i32> + cir.global external @ll = #cir.const_array<[#cir.int<999999999> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i]> : !cir.array + // MLIR: llvm.mlir.global external @ll(dense<[999999999, 0, 0, 0]> : tensor<4xi64>) {addr_space = 0 : i32} : !llvm.array<4 x i64> + cir.global external @twoDim = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array]> : !cir.array x 2> + // MLIR: llvm.mlir.global external @twoDim(dense<{{\[\[}}1, 2], [3, 4{{\]\]}}> : tensor<2x2xi32>) {addr_space = 0 : i32} : !llvm.array<2 x array<2 x i32>> + + // The following tests check direclty the resulting LLVM IR because the MLIR + // version is two long. Always prefer the MLIR prefix when possible. + cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22A22 + // LLVM: @nestedTwoDim = global %struct.A { i32 1, [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 2, i32 3], [2 x i32] [i32 4, i32 5{{\]\]}} } + cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> : !ty_22StringStruct22 + // LLVM: @nestedString = global %struct.StringStruct { [3 x i8] c"1\00\00", [3 x i8] zeroinitializer, [3 x i8] zeroinitializer } + cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> : !ty_22StringStructPtr22 + // LLVM: @nestedStringPtr = global %struct.StringStructPtr { ptr @.str } + + cir.func @_Z11get_globalsv() { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} + %2 = cir.alloca !cir.ptr, cir.ptr >, ["ss", init] {alignment = 8 : i64} + %3 = cir.alloca !cir.ptr, cir.ptr >, ["si", init] {alignment = 8 : i64} + %4 = cir.alloca !cir.ptr, cir.ptr >, ["l", init] {alignment = 8 : i64} + %5 = cir.get_global @string : cir.ptr > + %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @string : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i8 + cir.store %6, %0 : !cir.ptr, cir.ptr > + %7 = cir.get_global @uint : cir.ptr > + %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @uint : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i32 + cir.store %8, %1 : !cir.ptr, cir.ptr > + %9 = cir.get_global @sshort : cir.ptr > + %10 = cir.cast(array_to_ptrdecay, %9 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sshort : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i16 + cir.store %10, %2 : !cir.ptr, cir.ptr > + %11 = cir.get_global @sint : cir.ptr > + %12 = cir.cast(array_to_ptrdecay, %11 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sint : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i32 + cir.store %12, %3 : !cir.ptr, cir.ptr > + %13 = cir.get_global @ll : cir.ptr > + %14 = cir.cast(array_to_ptrdecay, %13 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @ll : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i64 + cir.store %14, %4 : !cir.ptr, cir.ptr > + cir.return + } + cir.global external @flt = #cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array + cir.global external @zeroInitFlt = #cir.const_array<[#cir.fp<0.000000e+00> : !cir.float, #cir.fp<0.000000e+00> : !cir.float]> : !cir.array + // MLIR: llvm.mlir.global external @flt(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> + // MLIR: llvm.mlir.global external @zeroInitFlt(dense<0.000000e+00> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> + cir.global "private" internal @staticVar = #cir.int<0> : !s32i + // MLIR: llvm.mlir.global internal @staticVar(0 : i32) {addr_space = 0 : i32} : i32 + cir.global external @nullPtr = #cir.ptr : !cir.ptr + // MLIR: llvm.mlir.global external @nullPtr() + // MLIR: %0 = llvm.mlir.zero : !llvm.ptr + // MLIR: llvm.return %0 : !llvm.ptr + // MLIR: } + cir.global external @zeroStruct = #cir.zero : !ty_22Bar22 + // MLIR: llvm.mlir.global external @zeroStruct() + // MLIR: %0 = cir.llvmir.zeroinit : !llvm.struct<"struct.Bar", (i32, i8)> + // MLIR: llvm.return %0 : !llvm.struct<"struct.Bar", (i32, i8)> + // MLIR: } + cir.global common @comm = #cir.int<0> : !s32i + // MLIR: llvm.mlir.global common @comm(0 : i32) {addr_space = 0 : i32} : i32 + + cir.global "private" internal @Handlers = #cir.const_array<[#cir.const_struct<{#cir.global_view<@myfun> : !cir.ptr>}> : !ty_22anon2E122]> : !cir.array + cir.func internal private @myfun(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.return + } + cir.func @foo(%arg0: !s32i, %arg1: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["flag", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + %2 = cir.get_global @Handlers : cir.ptr > + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + %5 = cir.ptr_stride(%4 : !cir.ptr, %3 : !s32i), !cir.ptr + %6 = cir.get_member %5[0] {name = "func"} : !cir.ptr -> !cir.ptr>> + %7 = cir.load %6 : cir.ptr >>, !cir.ptr> + %8 = cir.load %1 : cir.ptr , !s32i + cir.call %7(%8) : (!cir.ptr>, !s32i) -> () + cir.return + } + //MLIR: %[[RES4:.*]] = llvm.mlir.addressof @Handlers : !llvm.ptr + //MLIR: %[[RES5:.*]] = llvm.load {{.*}} : !llvm.ptr -> i32 + //MLIR: %[[RES6:.*]] = llvm.getelementptr %[[RES4]][0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> + //MLIR: %[[RES7:.*]] = llvm.getelementptr %[[RES6]][%[[RES5]]] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> + //MLIR: %[[RES8:.*]] = llvm.getelementptr %[[RES7]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> + //MLIR: %[[RES9:.*]] = llvm.load %[[RES8]] : !llvm.ptr -> !llvm.ptr + //MLIR: llvm.call %[[RES9]]({{.*}}) : !llvm.ptr, (i32) -> () + + cir.global external @zero_array = #cir.zero : !cir.array + cir.func @use_zero_array() { + %0 = cir.const(#cir.global_view<@zero_array> : !cir.ptr) : !cir.ptr + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.ptr_stride(%0 : !cir.ptr, %1 : !s32i), !cir.ptr + %3 = cir.load %2 : cir.ptr , !s32i + cir.return + } + // MLIR: %0 = llvm.mlir.addressof @zero_array + +} diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir new file mode 100644 index 000000000000..6dc2191c916e --- /dev/null +++ b/clang/test/CIR/Lowering/goto.cir @@ -0,0 +1,36 @@ +// RUN: cir-opt %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int + +module { + cir.func @foo() { + %0 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + cir.br ^bb2 + ^bb1: // no predecessors + %2 = cir.load %0 : cir.ptr , !u32i + %3 = cir.const(#cir.int<1> : !u32i) : !u32i + %4 = cir.binop(add, %2, %3) : !u32i + cir.store %4, %0 : !u32i, cir.ptr + cir.br ^bb2 + ^bb2: // 2 preds: ^bb0, ^bb1 + %5 = cir.load %0 : cir.ptr , !u32i + %6 = cir.const(#cir.int<2> : !u32i) : !u32i + %7 = cir.binop(add, %5, %6) : !u32i + cir.store %7, %0 : !u32i, cir.ptr + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo +// MLIR: llvm.br ^bb1 +// MLIR: ^bb1: +// MLIR: return + +// LLVM: br label %[[Value:[0-9]+]] +// LLVM-EMPTY: +// LLVM-NEXT: [[Value]]: ; preds = +// LLVM: ret void diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir new file mode 100644 index 000000000000..67fec18c2e8e --- /dev/null +++ b/clang/test/CIR/Lowering/hello.cir @@ -0,0 +1,34 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +!s8i = !cir.int +module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i32>>, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"} { + cir.func private @printf(!cir.ptr, ...) -> !s32i + cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.func @main() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.get_global @printf : cir.ptr , ...)>> + %2 = cir.get_global @".str" : cir.ptr > + %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i + %5 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %5, %0 : !s32i, cir.ptr + %6 = cir.load %0 : cir.ptr , !s32i + cir.return %6 : !s32i + } +} + +// CHECK: llvm.func @printf(!llvm.ptr, ...) -> i32 +// CHECK: llvm.mlir.global internal constant @".str"("Hello, world!\0A\00") {addr_space = 0 : i32} +// CHECK: llvm.func @main() -> i32 +// CHECK: %0 = llvm.mlir.constant(1 : index) : i64 +// CHECK: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// CHECK: %2 = llvm.mlir.addressof @".str" : !llvm.ptr +// CHECK: %3 = llvm.getelementptr %2[0] : (!llvm.ptr) -> !llvm.ptr, i8 +// CHECK: %4 = llvm.call @printf(%3) : (!llvm.ptr) -> i32 +// CHECK: %5 = llvm.mlir.constant(0 : i32) : i32 +// CHECK: llvm.store %5, %1 : i32, !llvm.ptr +// CHECK: %6 = llvm.load %1 : !llvm.ptr -> i32 +// CHECK: llvm.return %6 : i32 +// CHECK: } diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir new file mode 100644 index 000000000000..eac0b5e4467e --- /dev/null +++ b/clang/test/CIR/Lowering/if.cir @@ -0,0 +1,65 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + cir.if %4 { + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.return %5 : !s32i + } else { + %5 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %5 : !s32i + } + cir.return %arg0 : !s32i + } + +// MLIR: llvm.func @foo(%arg0: i32) -> i32 +// MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 +// MLIR-NEXT: llvm.cond_br %1, ^bb2, ^bb1 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.return %2 : i32 +// MLIR-NEXT: ^bb2: // pred: ^bb0 +// MLIR-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.return %3 : i32 +// MLIR-NEXT: ^bb3: // no predecessors +// MLIR-NEXT: llvm.return %arg0 : i32 +// MLIR-NEXT: } + +// LLVM: define i32 @foo(i32 %0) +// LLVM-NEXT: %2 = icmp ne i32 %0, 0 +// LLVM-NEXT: br i1 %2, label %4, label %3 +// LLVM-EMPTY: +// LLVM-NEXT: 3: +// LLVM-NEXT: ret i32 0 +// LLVM-EMPTY: +// LLVM-NEXT: 4: +// LLVM-NEXT: ret i32 1 +// LLVM-EMPTY: +// LLVM-NEXT: 5: +// LLVM-NEXT: ret i32 %0 +// LLVM-NEXT: } + + cir.func @onlyIf(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + cir.if %4 { + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.return %5 : !s32i + } + cir.return %arg0 : !s32i + } + + // MLIR: llvm.func @onlyIf(%arg0: i32) -> i32 + // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 + // MLIR-NEXT: llvm.cond_br %1, ^bb1, ^bb2 + // MLIR-NEXT: ^bb1: // pred: ^bb0 + // MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 + // MLIR-NEXT: llvm.return %2 : i32 + // MLIR-NEXT: ^bb2: // pred: ^bb0 + // MLIR-NEXT: llvm.return %arg0 : i32 + // MLIR-NEXT: } +} diff --git a/clang/test/CIR/Lowering/int-wrap.cir b/clang/test/CIR/Lowering/int-wrap.cir new file mode 100644 index 000000000000..3de5ec85b526 --- /dev/null +++ b/clang/test/CIR/Lowering/int-wrap.cir @@ -0,0 +1,24 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +module { + cir.func @test(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["len", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.const(#cir.int<42> : !s32i) : !s32i + %3 = cir.binop(sub, %1, %2) nsw : !s32i + %4 = cir.binop(sub, %1, %2) nuw : !s32i + %5 = cir.binop(sub, %1, %2) : !s32i + cir.return + } +} + +// MLIR: llvm.sub {{.*}}, {{.*}} overflow : i32 +// MLIR-NEXT: llvm.sub {{.*}}, {{.*}} overflow : i32 +// MLIR-NEXT: llvm.sub {{.*}}, {{.*}} : i32 + +// LLVM: sub nsw i32 {{.*}}, {{.*}}, !dbg !9 +// LLVM-NEXT: sub nuw i32 {{.*}}, {{.*}}, !dbg !10 +// LLVM-NEXT: sub i32 {{.*}}, {{.*}}, !dbg !11 \ No newline at end of file diff --git a/clang/test/CIR/Lowering/intrinsics.cir b/clang/test/CIR/Lowering/intrinsics.cir new file mode 100644 index 000000000000..25b0b34738bc --- /dev/null +++ b/clang/test/CIR/Lowering/intrinsics.cir @@ -0,0 +1,23 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @test_unreachable() { + cir.unreachable + } + + // MLIR: llvm.func @test_unreachable() + // MLIR-NEXT: llvm.unreachable + + cir.func @test_trap() { + cir.trap + } + + // MLIR: llvm.func @test_trap() + // MLIR-NEXT: "llvm.intr.trap"() : () -> () + // MLIR-NEXT: llvm.unreachable + + // LLVM: define void @test_trap() + // LLVM-NEXT: call void @llvm.trap() + // LLVM-NEXT: unreachable +} diff --git a/clang/test/CIR/Lowering/libc.cir b/clang/test/CIR/Lowering/libc.cir new file mode 100644 index 000000000000..5be5d44cd3c6 --- /dev/null +++ b/clang/test/CIR/Lowering/libc.cir @@ -0,0 +1,18 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!void = !cir.void +!u64i = !cir.int +module { + cir.func @shouldLowerLibcMemcpyBuiltin(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: !u64i) { + cir.libc.memcpy %arg2 bytes from %arg0 to %arg1 : !u64i, !cir.ptr -> !cir.ptr + // CHECK: "llvm.intr.memcpy"(%{{.+}}, %{{.+}}, %{{.+}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i64) -> () + cir.return + } + + cir.func @shouldLowerLibcFAbsBuiltin(%arg0: !cir.double) -> !cir.double { + %0 = cir.fabs %arg0 : !cir.double + // CHECK: %0 = llvm.intr.fabs(%arg0) : (f64) -> f64 + cir.return %0 : !cir.double + } +} diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir new file mode 100644 index 000000000000..fc3f333db56d --- /dev/null +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -0,0 +1,39 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR +!u32i = !cir.int + +module { + cir.func @foo() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !u32i + cir.return %2 : !u32i + } + + cir.func @test_volatile() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store volatile %1, %0 : !u32i, cir.ptr + %2 = cir.load volatile %0 : cir.ptr , !u32i + cir.return %2 : !u32i + } +} + +// MLIR: module { +// MLIR-NEXT: func @foo() -> i32 +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr +// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -> i32 +// MLIR-NEXT: return %3 : i32 + + +// MLIR: func @test_volatile() -> i32 +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.store volatile %2, %1 : i32, !llvm.ptr +// MLIR-NEXT: %3 = llvm.load volatile %1 : !llvm.ptr -> i32 +// MLIR-NEXT: return %3 : i32 diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir new file mode 100644 index 000000000000..d15479a76a0d --- /dev/null +++ b/clang/test/CIR/Lowering/loop.cir @@ -0,0 +1,126 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s +#true = #cir.bool : !cir.bool +!s32i = !cir.int + + +module { + + cir.func @testFor(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.yield + } step { + cir.yield + } + cir.return + } + +// CHECK: @testFor +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: llvm.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: + + + + // Test while cir.loop operation lowering. + cir.func @testWhile(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.yield + } + cir.return + } + +// CHECK: @testWhile +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: + + + + // Test do-while cir.loop operation lowering. + cir.func @testDoWhile(%arg0 : !cir.bool) { + cir.do { + cir.yield + } while { + cir.condition(%arg0) + } + cir.return + } + +// CHECK: @testDoWhile +// CHECK: llvm.br ^bb[[#BODY:]] +// CHECK: ^bb[[#COND:]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: + + + + // test corner case + // while (1) { + // break; + // } + cir.func @testWhileWithBreakTerminatedBody(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.break + } + cir.return + } + +// CHECK: @testWhileWithBreakTerminatedBody +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: + + + + // test C only corner case - no fails during the lowering + // for (;;) { + // break; + // } + cir.func @forWithBreakTerminatedScopeInBody(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.scope { // FIXME(cir): Redundant scope emitted during C codegen. + cir.break + } + cir.yield + } step { + cir.yield + } + cir.return + } + +// CHECK: @forWithBreakTerminatedScopeInBody +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND:]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#SCOPE_IN:]] +// CHECK: ^bb[[#SCOPE_IN]]: +// CHECK: llvm.br ^bb[[#EXIT]] +// CHECK: ^bb[[#SCOPE_EXIT:]]: +// CHECK: llvm.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: llvm.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: +} diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir new file mode 100644 index 000000000000..ee5238c5748a --- /dev/null +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -0,0 +1,278 @@ +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +module { + cir.func @testFor() { + cir.scope { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.for : cond { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } body { + cir.scope { + cir.scope { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<5> : !s32i) : !s32i + %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.if %5 { + cir.break + } + } + } + cir.yield + } step { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testFor() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#preBREAK1]]: + // CHECK: llvm.br ^bb[[#preBREAK2:]] + // CHECK: ^bb[[#preBREAK2]]: + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + + cir.func @testForNested() { + cir.scope { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.for : cond { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } body { + cir.scope { + cir.scope { + %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + cir.for : cond { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<10> : !s32i) : !s32i + %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.condition(%7) + } body { + cir.scope { + cir.scope { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.break + } + } + } + cir.yield + } step { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.unary(inc, %4) : !s32i, !s32i + cir.store %5, %2 : !s32i, cir.ptr + cir.yield + } + } + } + cir.yield + } step { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testForNested() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#preNESTED1]]: + // CHECK: llvm.br ^bb[[#preNESTED2:]] + // CHECK: ^bb[[#preNESTED2]]: + // CHECK: llvm.br ^bb[[#NESTED:]] + // CHECK: ^bb[[#NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#COND_NESTED]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT_NESTED:]] + // CHECK: ^bb[[#preBREAK1]]: + // CHECK: llvm.br ^bb[[#preBREAK2:]] + // CHECK: ^bb[[#preBREAK2]]: + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT2:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preEXIT2]]: + // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY_NESTED:]] + // CHECK: ^bb[[#BODY_NESTED]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#STEP_NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#EXIT_NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + + cir.func @testWhile() { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.scope { + cir.while { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } do { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.scope { + %4 = cir.load %0 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.break + } + } + cir.yield + } + } + cir.return + } + + + // CHECK: llvm.func @testWhile() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#preEXIT2:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preEXIT2]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + +cir.func @testDoWhile() { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.scope { + cir.do { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.scope { + %4 = cir.load %0 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.break + } + } + cir.yield + } while { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } + } + cir.return + } + + // CHECK: llvm.func @testDoWhile() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#preEXIT2:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preEXIT2]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir new file mode 100644 index 000000000000..9cfd3635d740 --- /dev/null +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -0,0 +1,274 @@ +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +module { + cir.func @testFor() { + cir.scope { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.for : cond { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } body { + cir.scope { + cir.scope { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<5> : !s32i) : !s32i + %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.if %5 { + cir.continue + } + } + } + cir.yield + } step { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testFor() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#preCONTINUE1]]: + // CHECK: llvm.br ^bb[[#preCONTINUE2:]] + // CHECK: ^bb[[#preCONTINUE2]]: + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preSTEP]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + + + cir.func @testForNested() { + cir.scope { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.for : cond { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } body { + cir.scope { + cir.scope { + %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + cir.for : cond { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<10> : !s32i) : !s32i + %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.condition(%7) + } body { + cir.scope { + cir.scope { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.continue + } + } + } + cir.yield + } step { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.unary(inc, %4) : !s32i, !s32i + cir.store %5, %2 : !s32i, cir.ptr + cir.yield + } + } + } + cir.yield + } step { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testForNested() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#preNESTED1]]: + // CHECK: llvm.br ^bb[[#preNESTED2:]] + // CHECK: ^bb[[#preNESTED2]]: + // CHECK: llvm.br ^bb[[#NESTED:]] + // CHECK: ^bb[[#NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#COND_NESTED]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT_NESTED:]] + // CHECK: ^bb[[#preCONTINUE1]]: + // CHECK: llvm.br ^bb[[#preCONTINUE2:]] + // CHECK: ^bb[[#preCONTINUE2]]: + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP0:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preSTEP0]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY_NESTED:]] + // CHECK: ^bb[[#BODY_NESTED]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#STEP_NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#EXIT_NESTED]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + +cir.func @testWhile() { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.scope { + cir.while { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } do { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.scope { + %4 = cir.load %0 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.continue + } + } + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testWhile() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#preCOND2:]] + // CHECK: ^bb[[#preCOND2]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + + cir.func @testDoWhile() { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.scope { + cir.do { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.scope { + %4 = cir.load %0 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.continue + } + } + cir.yield + } while { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } + } + cir.return + } + + + // CHECK: llvm.func @testDoWhile() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#preCOND2:]] + // CHECK: ^bb[[#preCOND2]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/openmp.cir b/clang/test/CIR/Lowering/openmp.cir new file mode 100644 index 000000000000..73b3155252cc --- /dev/null +++ b/clang/test/CIR/Lowering/openmp.cir @@ -0,0 +1,35 @@ +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + +!s32i = !cir.int +module { + cir.func @omp_parallel() { + %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + omp.parallel { + cir.scope { + %2 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + %6 = cir.binop(add, %4, %5) : !s32i + cir.store %6, %0 : !s32i, cir.ptr + } + omp.terminator + } + cir.return + } +} +// CHECK-LABEL: omp_parallel +// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call({{.*}}, ptr @omp_parallel..omp_par, +// CHECK: ret void +// CHECK-NEXT: } +// CHECK: define{{.*}} void @omp_parallel..omp_par(ptr +// CHECK: %[[YVar:.*]] = load ptr, ptr %{{.*}}, align 8 +// CHECK: %[[XVar:.*]] = alloca i32, i64 1, align 4 +// CHECK: store i32 1, ptr %[[XVar]], align 4 +// CHECK: %[[XVal:.*]] = load i32, ptr %[[XVar]], align 4 +// CHECK: %[[BinOp:.*]] = add i32 %[[XVal]], 1 +// CHECK: store i32 %[[BinOp]], ptr %[[YVar]], align 4 +// CHECK: ret diff --git a/clang/test/CIR/Lowering/ptrdiff.cir b/clang/test/CIR/Lowering/ptrdiff.cir new file mode 100644 index 000000000000..ff1248ddad66 --- /dev/null +++ b/clang/test/CIR/Lowering/ptrdiff.cir @@ -0,0 +1,18 @@ +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + +!s32i = !cir.int +!u64i = !cir.int + +module { + cir.func @foo(%arg0: !cir.ptr, %arg1: !cir.ptr) -> !s32i { + %1 = cir.ptr_diff(%arg0, %arg1) : !cir.ptr -> !u64i + %2 = cir.cast(integral, %1 : !u64i), !s32i + cir.return %2 : !s32i + } +} + +// CHECK: %3 = ptrtoint ptr %0 to i64 +// CHECK-NEXT: %4 = ptrtoint ptr %1 to i64 +// CHECK-NEXT: %5 = sub i64 %3, %4 +// CHECK-NEXT: %6 = udiv i64 %5, 4 +// CHECK-NEXT: %7 = trunc i64 %6 to i32 diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir new file mode 100644 index 000000000000..9c01fd7fde01 --- /dev/null +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -0,0 +1,28 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR + +!s32i = !cir.int +module { + cir.func @f(%arg0: !cir.ptr) { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, cir.ptr > + %1 = cir.load %0 : cir.ptr >, !cir.ptr + %2 = cir.const(#cir.int<1> : !s32i) : !s32i + %3 = cir.ptr_stride(%1 : !cir.ptr, %2 : !s32i), !cir.ptr + %4 = cir.load %3 : cir.ptr , !s32i + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @f(%arg0: !llvm.ptr) +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr, !llvm.ptr +// MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %4 = llvm.getelementptr %2[%3] : (!llvm.ptr, i32) -> !llvm.ptr, i32 +// MLIR-NEXT: %5 = llvm.load %4 : !llvm.ptr -> i32 +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir new file mode 100644 index 000000000000..8afa84d0c247 --- /dev/null +++ b/clang/test/CIR/Lowering/scope.cir @@ -0,0 +1,78 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int + +module { + cir.func @foo() { + cir.scope { + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<4> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + } + cir.return + } + +// MLIR: llvm.func @foo() +// MLIR-NEXT: llvm.br ^bb1 +// MLIR-NEXT: ^bb1: +// MLIR-DAG: [[v1:%[0-9]]] = llvm.mlir.constant(4 : i32) : i32 +// MLIR-DAG: [[v2:%[0-9]]] = llvm.mlir.constant(1 : index) : i64 +// MLIR-DAG: [[v3:%[0-9]]] = llvm.alloca [[v2]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.store [[v1]], [[v3]] : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb2: +// MLIR-NEXT: llvm.return + + +// LLVM: define void @foo() +// LLVM-NEXT: br label %1 +// LLVM-EMPTY: +// LLVM-NEXT: 1: +// LLVM-NEXT: %2 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 4, ptr %2, align 4 +// LLVM-NEXT: br label %3 +// LLVM-EMPTY: +// LLVM-NEXT: 3: +// LLVM-NEXT: ret void +// LLVM-NEXT: } + + + // Should drop empty scopes. + cir.func @empty_scope() { + cir.scope { + } + cir.return + } + // MLIR: llvm.func @empty_scope() + // MLIR-NEXT: llvm.return + // MLIR-NEXT: } + + + cir.func @scope_with_return() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.scope { + %2 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %2, %0 : !u32i, cir.ptr + %3 = cir.load %0 : cir.ptr , !u32i + cir.return %3 : !u32i + } + %1 = cir.load %0 : cir.ptr , !u32i + cir.return %1 : !u32i + } + + // MLIR: llvm.func @scope_with_return() + // MLIR-NEXT: [[v0:%.*]] = llvm.mlir.constant(1 : index) : i64 + // MLIR-NEXT: [[v1:%.*]] = llvm.alloca [[v0]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr + // MLIR-NEXT: llvm.br ^bb1 + // MLIR-NEXT: ^bb1: // pred: ^bb0 + // MLIR-NEXT: [[v2:%.*]] = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: llvm.store [[v2]], [[v1]] : i32, !llvm.ptr + // MLIR-NEXT: [[v3:%.*]] = llvm.load [[v1]] : !llvm.ptr -> i32 + // MLIR-NEXT: llvm.return [[v3]] : i32 + // MLIR-NEXT: ^bb2: // no predecessors + // MLIR-NEXT: [[v4:%.*]] = llvm.load [[v1]] : !llvm.ptr -> i32 + // MLIR-NEXT: llvm.return [[v4]] : i32 + // MLIR-NEXT: } + + } diff --git a/clang/test/CIR/Lowering/shift.cir b/clang/test/CIR/Lowering/shift.cir new file mode 100644 index 000000000000..78a7f89e13d0 --- /dev/null +++ b/clang/test/CIR/Lowering/shift.cir @@ -0,0 +1,28 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!u16i = !cir.int +module { + cir.func @testShiftWithDifferentValueAndAmountTypes(%arg0: !s16i, %arg1: !s32i, %arg2: !s64i, %arg3: !u16i) { + // CHECK: testShiftWithDifferentValueAndAmountTypes + + // Should allow shift with larger amount type. + %1 = cir.shift(left, %arg1: !s32i, %arg2 : !s64i) -> !s32i + // CHECK: %[[#CAST:]] = llvm.trunc %{{.+}} : i64 to i32 + // CHECK: llvm.shl %{{.+}}, %[[#CAST]] : i32 + + // Should allow shift with signed smaller amount type. + %2 = cir.shift(left, %arg1 : !s32i, %arg0 : !s16i) -> !s32i + // CHECK: %[[#CAST:]] = llvm.sext %{{.+}} : i16 to i32 + // CHECK: llvm.shl %{{.+}}, %[[#CAST]] : i32 + + // Should allow shift with unsigned smaller amount type. + %14 = cir.shift(left, %arg1 : !s32i, %arg3 : !u16i) -> !s32i + // CHECK: %[[#CAST:]] = llvm.zext %{{.+}} : i16 to i32 + // CHECK: llvm.shl %{{.+}}, %[[#CAST]] : i32 + cir.return + } +} diff --git a/clang/test/CIR/Lowering/stack-save-restore.cir b/clang/test/CIR/Lowering/stack-save-restore.cir new file mode 100644 index 000000000000..ad9dee66b53f --- /dev/null +++ b/clang/test/CIR/Lowering/stack-save-restore.cir @@ -0,0 +1,19 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +!u8i = !cir.int + +module { + cir.func @stack_save() { + %0 = cir.stack_save : !cir.ptr + cir.stack_restore %0 : !cir.ptr + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @stack_save +// MLIR-NEXT: %0 = llvm.intr.stacksave : !llvm.ptr +// MLIR-NEXT: llvm.intr.stackrestore %0 : !llvm.ptr +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/struct-init.c b/clang/test/CIR/Lowering/struct-init.c new file mode 100644 index 000000000000..3c94cf9d5f50 --- /dev/null +++ b/clang/test/CIR/Lowering/struct-init.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +struct S { + int x; +}; + +// LLVM: define void @zeroInit +// LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1 +// LLVM: store %struct.S zeroinitializer, ptr [[TMP0]] +void zeroInit() { + struct S s = {0}; +} diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir new file mode 100644 index 000000000000..e50bfc4991d5 --- /dev/null +++ b/clang/test/CIR/Lowering/struct.cir @@ -0,0 +1,96 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +!u8i = !cir.int +!u32i = !cir.int +!ty_22S22 = !cir.struct +!ty_22S2A22 = !cir.struct +!ty_22S122 = !cir.struct} #cir.record.decl.ast> +!ty_22S222 = !cir.struct +!ty_22S322 = !cir.struct + +module { + cir.func @test() { + %1 = cir.alloca !ty_22S22, cir.ptr , ["x"] {alignment = 4 : i64} + // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#STRUCT:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"struct.S", (i8, i32)> + %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr + // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.S", (i8, i32)> + %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr + // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.S", (i8, i32)> + cir.return + } + + cir.func @shouldConstInitLocalStructsWithConstStructAttr() { + %0 = cir.alloca !ty_22S2A22, cir.ptr , ["s"] {alignment = 4 : i64} + %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22) : !ty_22S2A22 + cir.store %1, %0 : !ty_22S2A22, cir.ptr + cir.return + } + // CHECK: llvm.func @shouldConstInitLocalStructsWithConstStructAttr() + // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 + // CHECK: %1 = llvm.alloca %0 x !llvm.struct<"struct.S2A", (i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + // CHECK: %2 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> + // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.struct<"struct.S2A", (i32)> + // CHECK: llvm.store %4, %1 : !llvm.struct<"struct.S2A", (i32)>, !llvm.ptr + // CHECK: llvm.return + // CHECK: } + + // Should lower basic #cir.const_struct initializer. + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 + // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"struct.S1", (i32, f32, ptr)> { + // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 + // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %5 = llvm.mlir.zero : !llvm.ptr + // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: llvm.return %6 : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: } + + // Should lower nested #cir.const_struct initializer. + cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 + // CHECK: llvm.mlir.global external @s2() {addr_space = 0 : i32} : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> { + // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S2A", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> + // CHECK: llvm.return %4 : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> + // CHECK: } + + cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array + // CHECK: llvm.mlir.global external @s3() {addr_space = 0 : i32} : !llvm.array<3 x struct<"struct.S3", (i32)>> { + // CHECK: %0 = llvm.mlir.undef : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %5 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> + // CHECK: %6 = llvm.mlir.constant(2 : i32) : i32 + // CHECK: %7 = llvm.insertvalue %6, %5[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %8 = llvm.insertvalue %7, %4[1] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %9 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> + // CHECK: %10 = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %11 = llvm.insertvalue %10, %9[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %12 = llvm.insertvalue %11, %8[2] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: llvm.return %12 : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: } + + cir.func @shouldLowerStructCopies() { + // CHECK: llvm.func @shouldLowerStructCopies() + %1 = cir.alloca !ty_22S22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + %2 = cir.alloca !ty_22S22, cir.ptr , ["b", init] {alignment = 4 : i64} + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + cir.copy %1 to %2 : !cir.ptr + // CHECK: %[[#SIZE:]] = llvm.mlir.constant(8 : i32) : i32 + // CHECK: "llvm.intr.memcpy"(%[[#SB]], %[[#SA]], %[[#SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () + cir.return + } +} diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir new file mode 100644 index 000000000000..5931d49de3a4 --- /dev/null +++ b/clang/test/CIR/Lowering/switch.cir @@ -0,0 +1,185 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s8i = !cir.int +!s32i = !cir.int +!s64i = !cir.int + +module { + cir.func @shouldLowerSwitchWithDefault(%arg0: !s8i) { + cir.switch (%arg0 : !s8i) [ + // CHECK: llvm.switch %arg0 : i8, ^bb[[#DEFAULT:]] [ + // CHECK: 1: ^bb[[#CASE1:]] + // CHECK: ] + case (equal, 1) { + cir.break + }, + // CHECK: ^bb[[#CASE1]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + case (default) { + cir.break + } + // CHECK: ^bb[[#DEFAULT]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldLowerSwitchWithoutDefault(%arg0: !s32i) { + cir.switch (%arg0 : !s32i) [ + // Default block is the exit block: + // CHECK: llvm.switch %arg0 : i32, ^bb[[#EXIT:]] [ + // CHECK: 1: ^bb[[#CASE1:]] + // CHECK: ] + case (equal, 1) { + cir.break + } + // CHECK: ^bb[[#CASE1]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldLowerSwitchWithImplicitFallthrough(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ + // CHECK: 1: ^bb[[#CASE1N2:]], + // CHECK: 2: ^bb[[#CASE1N2]] + // CHECK: ] + case (anyof, [1, 2] : !s64i) { // case 1 and 2 use same region + cir.break + } + // CHECK: ^bb[[#CASE1N2]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldLowerSwitchWithExplicitFallthrough(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ + // CHECK: 1: ^bb[[#CASE1:]], + // CHECK: 2: ^bb[[#CASE2:]] + // CHECK: ] + case (equal, 1 : !s64i) { // case 1 has its own region + cir.yield // fallthrough to case 2 + }, + // CHECK: ^bb[[#CASE1]]: + // CHECK: llvm.br ^bb[[#CASE2]] + case (equal, 2 : !s64i) { + cir.break + } + // CHECK: ^bb[[#CASE2]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldLowerSwitchWithFallthroughToExit(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ + // CHECK: 1: ^bb[[#CASE1:]] + // CHECK: ] + case (equal, 1 : !s64i) { + cir.yield // fallthrough to exit + } + // CHECK: ^bb[[#CASE1]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldDropEmptySwitch(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + ] + // CHECK-NOT: llvm.switch + cir.return + } + + cir.func @shouldLowerMultiBlockCase(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.scope { + %1 = cir.load %0 : cir.ptr , !s32i + cir.switch (%1 : !s32i) [ + case (equal, 3) { + cir.return + ^bb1: // no predecessors + cir.break + } + ] + } + cir.return + } + // CHECK: llvm.func @shouldLowerMultiBlockCase + // CHECK: ^bb1: // pred: ^bb0 + // CHECK: llvm.switch {{.*}} : i32, ^bb4 [ + // CHECK: 3: ^bb2 + // CHECK: ] + // CHECK: ^bb2: // pred: ^bb1 + // CHECK: llvm.return + // CHECK: ^bb3: // no predecessors + // CHECK: llvm.br ^bb4 + // CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 + // CHECK: llvm.br ^bb5 + // CHECK: ^bb5: // pred: ^bb4 + // CHECK: llvm.return + // CHECK: } + + cir.func @shouldLowerNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + cir.scope { + %5 = cir.load %0 : cir.ptr , !s32i + cir.switch (%5 : !s32i) [ + case (equal, 0) { + cir.scope { + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.const(#cir.int<0> : !s32i) : !s32i + %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i + %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool + cir.if %9 { + cir.break + } + } + cir.break + } + ] + } + %3 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + %4 = cir.load %2 : cir.ptr , !s32i + cir.return %4 : !s32i + } + // CHECK: llvm.func @shouldLowerNestedBreak + // CHECK: llvm.switch %6 : i32, ^bb7 [ + // CHECK: 0: ^bb2 + // CHECK: ] + // CHECK: ^bb2: // pred: ^bb1 + // CHECK: llvm.br ^bb3 + // CHECK: ^bb3: // pred: ^bb2 + // CHECK: llvm.cond_br {{%.*}}, ^bb4, ^bb5 + // CHECK: ^bb4: // pred: ^bb3 + // CHECK: llvm.br ^bb7 + // CHECK: ^bb5: // pred: ^bb3 + // CHECK: llvm.br ^bb6 + // CHECK: ^bb6: // pred: ^bb5 + // CHECK: llvm.br ^bb7 + // CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 + // CHECK: llvm.br ^bb8 + // CHECK: ^bb8: // pred: ^bb7 + // CHECK: llvm.return +} diff --git a/clang/test/CIR/Lowering/ternary.cir b/clang/test/CIR/Lowering/ternary.cir new file mode 100644 index 000000000000..b80ff86c9bbc --- /dev/null +++ b/clang/test/CIR/Lowering/ternary.cir @@ -0,0 +1,49 @@ +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR + +!s32i = !cir.int + +module { +cir.func @_Z1xi(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool + %5 = cir.ternary(%4, true { + %7 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.yield %7 : !s32i + }, false { + %7 = cir.const(#cir.int<5> : !s32i) : !s32i + cir.yield %7 : !s32i + }) : (!cir.bool) -> !s32i + cir.store %5, %1 : !s32i, cir.ptr + %6 = cir.load %1 : cir.ptr , !s32i + cir.return %6 : !s32i + } +} + +// MLIR: llvm.func @_Z1xi(%arg0: i32) -> i32 +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %3 = llvm.alloca %2 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr +// MLIR-NEXT: %4 = llvm.load %1 : !llvm.ptr -> i32 +// MLIR-NEXT: %5 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %6 = llvm.icmp "sgt" %4, %5 : i32 +// MLIR-NEXT: llvm.cond_br %6, ^bb1, ^bb2 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %7 = llvm.mlir.constant(3 : i32) : i32 +// MLIR-NEXT: llvm.br ^bb3(%7 : i32) +// MLIR-NEXT: ^bb2: // pred: ^bb0 +// MLIR-NEXT: %8 = llvm.mlir.constant(5 : i32) : i32 +// MLIR-NEXT: llvm.br ^bb3(%8 : i32) +// MLIR-NEXT: ^bb3(%9: i32): // 2 preds: ^bb1, ^bb2 +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb4: // pred: ^bb3 +// MLIR-NEXT: llvm.store %9, %3 : i32, !llvm.ptr +// MLIR-NEXT: %10 = llvm.load %3 : !llvm.ptr -> i32 +// MLIR-NEXT: llvm.return %10 : i32 +// MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/types.cir b/clang/test/CIR/Lowering/types.cir new file mode 100644 index 000000000000..12bb892bd4c4 --- /dev/null +++ b/clang/test/CIR/Lowering/types.cir @@ -0,0 +1,14 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!void = !cir.void +module { + cir.func @testTypeLowering() { + // Should lower void pointers as opaque pointers. + %0 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr + // CHECK: llvm.mlir.zero : !llvm.ptr + %1 = cir.const(#cir.ptr : !cir.ptr>) : !cir.ptr> + // CHECK: llvm.mlir.zero : !llvm.ptr + cir.return + } +} diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir new file mode 100644 index 000000000000..a5ea94324b55 --- /dev/null +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -0,0 +1,63 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.store %2, %1 : !s32i, cir.ptr + + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.unary(inc, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr + + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.unary(dec, %5) : !s32i, !s32i + cir.store %6, %1 : !s32i, cir.ptr + cir.return + } + +// MLIR: = llvm.mlir.constant(1 : i32) +// MLIR: = llvm.add +// MLIR: = llvm.mlir.constant(1 : i32) +// MLIR: = llvm.sub + +// LLVM: = add i32 %[[#]], 1 +// LLVM: = sub i32 %[[#]], 1 + + cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { + // MLIR: llvm.func @floatingPoint + %0 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.double, cir.ptr , ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.float, cir.ptr + cir.store %arg1, %1 : !cir.double, cir.ptr + + %2 = cir.load %0 : cir.ptr , !cir.float + %3 = cir.unary(inc, %2) : !cir.float, !cir.float + cir.store %3, %0 : !cir.float, cir.ptr + // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1.000000e+00 : f32) : f32 + // MLIR: = llvm.fadd %[[#F_ONE]], %{{[0-9]+}} : f32 + + %4 = cir.load %0 : cir.ptr , !cir.float + %5 = cir.unary(dec, %4) : !cir.float, !cir.float + cir.store %5, %0 : !cir.float, cir.ptr + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f32) : f32 + // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f32 + + %6 = cir.load %1 : cir.ptr , !cir.double + %7 = cir.unary(inc, %6) : !cir.double, !cir.double + cir.store %7, %1 : !cir.double, cir.ptr + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 + // MLIR: = llvm.fadd %[[#D_ONE]], %{{[0-9]+}} : f64 + + %8 = cir.load %1 : cir.ptr , !cir.double + %9 = cir.unary(dec, %8) : !cir.double, !cir.double + cir.store %9, %1 : !cir.double, cir.ptr + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 + // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f64 + + cir.return + } +} diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir new file mode 100644 index 000000000000..21b12755ae02 --- /dev/null +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -0,0 +1,82 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int +module { + cir.func @foo() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %2, %1 : !s32i, cir.ptr + %3 = cir.load %1 : cir.ptr , !s32i + %4 = cir.unary(not, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr + %5 = cir.load %0 : cir.ptr , !s32i + cir.return %5 : !s32i + } + +// MLIR: = llvm.load +// MLIR: = llvm.mlir.constant(-1 : i32) +// MLIR: = llvm.xor + +// LLVM: = xor i32 -1, %[[#]] + + + cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { + // MLIR: llvm.func @floatingPoint + %0 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.double, cir.ptr , ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.float, cir.ptr + cir.store %arg1, %1 : !cir.double, cir.ptr + %2 = cir.load %0 : cir.ptr , !cir.float + %3 = cir.cast(float_to_bool, %2 : !cir.float), !cir.bool + // MLIR: %[[#F_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 + // MLIR: %[[#F_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#F_ZERO]] : f32 + // MLIR: %[[#F_ZEXT:]] = llvm.zext %[[#F_BOOL]] : i1 to i8 + %4 = cir.unary(not, %3) : !cir.bool, !cir.bool + // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1 : i8) : i8 + // MLIR: = llvm.xor %[[#F_ZEXT]], %[[#F_ONE]] : i8 + %5 = cir.load %1 : cir.ptr , !cir.double + %6 = cir.cast(float_to_bool, %5 : !cir.double), !cir.bool + // MLIR: %[[#D_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 + // MLIR: %[[#D_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#D_ZERO]] : f64 + // MLIR: %[[#D_ZEXT:]] = llvm.zext %[[#D_BOOL]] : i1 to i8 + %7 = cir.unary(not, %6) : !cir.bool, !cir.bool + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1 : i8) : i8 + // MLIR: = llvm.xor %[[#D_ZEXT]], %[[#D_ONE]] : i8 + cir.return + } + + cir.func @CStyleValueNegation(%arg0: !s32i, %arg1: !cir.float) { + // MLIR: llvm.func @CStyleValueNegation + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %3 : !cir.float, cir.ptr + + %5 = cir.load %0 : cir.ptr , !s32i + %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool + %7 = cir.unary(not, %6) : !cir.bool, !cir.bool + %8 = cir.cast(bool_to_int, %7 : !cir.bool), !s32i + // MLIR: %[[#INT:]] = llvm.load %{{.+}} : !llvm.ptr + // MLIR: %[[#IZERO:]] = llvm.mlir.constant(0 : i32) : i32 + // MLIR: %[[#ICMP:]] = llvm.icmp "ne" %[[#INT]], %[[#IZERO]] : i32 + // MLIR: %[[#IEXT:]] = llvm.zext %[[#ICMP]] : i1 to i8 + // MLIR: %[[#IONE:]] = llvm.mlir.constant(1 : i8) : i8 + // MLIR: %[[#IXOR:]] = llvm.xor %[[#IEXT]], %[[#IONE]] : i8 + // MLIR: = llvm.zext %[[#IXOR]] : i8 to i32 + + %17 = cir.load %3 : cir.ptr , !cir.float + %18 = cir.cast(float_to_bool, %17 : !cir.float), !cir.bool + %19 = cir.unary(not, %18) : !cir.bool, !cir.bool + %20 = cir.cast(bool_to_int, %19 : !cir.bool), !s32i + // MLIR: %[[#FLOAT:]] = llvm.load %{{.+}} : !llvm.ptr + // MLIR: %[[#FZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 + // MLIR: %[[#FCMP:]] = llvm.fcmp "une" %[[#FLOAT]], %[[#FZERO]] : f32 + // MLIR: %[[#FEXT:]] = llvm.zext %[[#FCMP]] : i1 to i8 + // MLIR: %[[#FONE:]] = llvm.mlir.constant(1 : i8) : i8 + // MLIR: %[[#FXOR:]] = llvm.xor %[[#FEXT]], %[[#FONE]] : i8 + // MLIR: = llvm.zext %[[#FXOR]] : i8 to i32 + + cir.return + } +} diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir new file mode 100644 index 000000000000..dbf71c2833bd --- /dev/null +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -0,0 +1,43 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR + +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.store %2, %1 : !s32i, cir.ptr + + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.unary(plus, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr + + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.unary(minus, %5) : !s32i, !s32i + cir.store %6, %1 : !s32i, cir.ptr + cir.return + } + +// MLIR: %[[#INPUT_PLUS:]] = llvm.load +// MLIR: llvm.store %[[#INPUT_PLUS]] +// MLIR: %[[#INPUT_MINUS:]] = llvm.load +// MLIR: %[[ZERO:[a-z0-9_]+]] = llvm.mlir.constant(0 : i32) +// MLIR: llvm.sub %[[ZERO]], %[[#INPUT_MINUS]] + + cir.func @floatingPoints(%arg0: !cir.double) { + // MLIR: llvm.func @floatingPoints(%arg0: f64) + %0 = cir.alloca !cir.double, cir.ptr , ["X", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.double, cir.ptr + %1 = cir.load %0 : cir.ptr , !cir.double + %2 = cir.unary(plus, %1) : !cir.double, !cir.double + // MLIR: llvm.store %arg0, %[[#F_PLUS:]] : f64, !llvm.ptr + // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] : !llvm.ptr -> f64 + %3 = cir.load %0 : cir.ptr , !cir.double + %4 = cir.unary(minus, %3) : !cir.double, !cir.double + // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr -> f64 + // MLIR: %{{[0-9]}} = llvm.fneg %[[#F_MINUS]] : f64 + cir.return + } +} diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir new file mode 100644 index 000000000000..dac1006cd8d2 --- /dev/null +++ b/clang/test/CIR/Lowering/unions.cir @@ -0,0 +1,42 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s16i = !cir.int +!s32i = !cir.int +#true = #cir.bool : !cir.bool +!ty_22U122 = !cir.struct +!ty_22U222 = !cir.struct +!ty_22U322 = !cir.struct +module { + // Should lower union to struct with only the largest member. + cir.global external @u1 = #cir.zero : !ty_22U122 + // CHECK: llvm.mlir.global external @u1() {addr_space = 0 : i32} : !llvm.struct<"union.U1", (i32)> + + // Should recursively find the largest member if there are nested unions. + cir.global external @u2 = #cir.zero : !ty_22U222 + cir.global external @u3 = #cir.zero : !ty_22U322 + // CHECK: llvm.mlir.global external @u2() {addr_space = 0 : i32} : !llvm.struct<"union.U2", (f64)> + // CHECK: llvm.mlir.global external @u3() {addr_space = 0 : i32} : !llvm.struct<"union.U3", (i32)> + + // CHECK: llvm.func @test + cir.func @test(%arg0: !cir.ptr) { + + // Should store directly to the union's base address. + %5 = cir.const(#true) : !cir.bool + %6 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr + cir.store %5, %6 : !cir.bool, cir.ptr + // CHECK: %[[#VAL:]] = llvm.mlir.constant(1 : i8) : i8 + // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. + // CHECK: %[[#ADDR:]] = llvm.bitcast %{{.+}} : !llvm.ptr + // CHECK: llvm.store %[[#VAL]], %[[#ADDR]] : i8, !llvm.ptr + + // Should load direclty from the union's base address. + %7 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr + %8 = cir.load %7 : cir.ptr , !cir.bool + // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. + // CHECK: %[[#BASE:]] = llvm.bitcast %{{.+}} : !llvm.ptr + // CHECK: %{{.+}} = llvm.load %[[#BASE]] : !llvm.ptr -> i8 + + cir.return + } +} diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir new file mode 100644 index 000000000000..8e5cb670fa30 --- /dev/null +++ b/clang/test/CIR/Lowering/variadics.cir @@ -0,0 +1,40 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=MLIR + +!s32i = !cir.int +!u32i = !cir.int +!u8i = !cir.int + +!ty_22__va_list_tag22 = !cir.struct, !cir.ptr} #cir.record.decl.ast> + +module { + cir.func @average(%arg0: !s32i, ...) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["count", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %2 = cir.alloca !cir.array, cir.ptr >, ["args"] {alignment = 16 : i64} + %3 = cir.alloca !cir.array, cir.ptr >, ["args_copy"] {alignment = 16 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.start %4 : !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> + // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: llvm.intr.vastart %{{[0-9]+}} : !llvm.ptr + %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr + %6 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.copy %6 to %5 : !cir.ptr, !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> + // MLIR-NEXT: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> + // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: llvm.intr.vacopy %13 to %{{[0-9]+}} : !llvm.ptr, !llvm.ptr + %7 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.end %7 : !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> + // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: llvm.intr.vaend %{{[0-9]+}} : !llvm.ptr + %8 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %8, %1 : !s32i, cir.ptr + %9 = cir.load %1 : cir.ptr , !s32i + cir.return %9 : !s32i + } +} diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp new file mode 100644 index 000000000000..a7c2a55f3bc4 --- /dev/null +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -0,0 +1,338 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: cir-opt %t.cir -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +typedef int vi4 __attribute__((vector_size(16))); +typedef double vd2 __attribute__((vector_size(16))); +typedef long long vll2 __attribute__((vector_size(16))); +typedef unsigned short vus2 __attribute__((vector_size(4))); + +void vector_int_test(int x) { + + // Vector constant. Not yet implemented. Expected results will change when + // fully implemented. + vi4 a = { 1, 2, 3, 4 }; + // CHECK: %[[#T30:]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[#T31:]] = llvm.mlir.constant(2 : i32) : i32 + // CHECK: %[[#T32:]] = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %[[#T33:]] = llvm.mlir.constant(4 : i32) : i32 + // CHECK: %[[#T34:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T35:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T36:]] = llvm.insertelement %[[#T30]], %[[#T34]][%[[#T35]] : i64] : vector<4xi32> + // CHECK: %[[#T37:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T38:]] = llvm.insertelement %[[#T31]], %[[#T36]][%[[#T37]] : i64] : vector<4xi32> + // CHECK: %[[#T39:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T40:]] = llvm.insertelement %[[#T32]], %[[#T38]][%[[#T39]] : i64] : vector<4xi32> + // CHECK: %[[#T41:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T42:]] = llvm.insertelement %[[#T33]], %[[#T40]][%[[#T41]] : i64] : vector<4xi32> + // CHECK: llvm.store %[[#T42]], %[[#T3:]] : vector<4xi32>, !llvm.ptr + + // Non-const vector initialization. + vi4 b = { x, 5, 6, x + 1 }; + // CHECK: %[[#T43:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 + // CHECK: %[[#T44:]] = llvm.mlir.constant(5 : i32) : i32 + // CHECK: %[[#T45:]] = llvm.mlir.constant(6 : i32) : i32 + // CHECK: %[[#T46:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T47:]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[#T48:]] = llvm.add %[[#T46]], %[[#T47]] : i32 + // CHECK: %[[#T49:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T50:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T51:]] = llvm.insertelement %[[#T43]], %[[#T49]][%[[#T50]] : i64] : vector<4xi32> + // CHECK: %[[#T52:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T53:]] = llvm.insertelement %[[#T44]], %[[#T51]][%[[#T52]] : i64] : vector<4xi32> + // CHECK: %[[#T54:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T55:]] = llvm.insertelement %[[#T45]], %[[#T53]][%[[#T54]] : i64] : vector<4xi32> + // CHECK: %[[#T56:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T57:]] = llvm.insertelement %[[#T48]], %[[#T55]][%[[#T56]] : i64] : vector<4xi32> + // CHECK: llvm.store %[[#T57]], %[[#T5:]] : vector<4xi32>, !llvm.ptr + + // Vector to vector conversion + vd2 bb = (vd2)b; + // CHECK: %[[#bval:]] = llvm.load %[[#bmem:]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#bbval:]] = llvm.bitcast %[[#bval]] : vector<4xi32> to vector<2xf64> + // CHECK: llvm.store %[[#bbval]], %[[#bbmem:]] : vector<2xf64>, !llvm.ptr + + // Scalar to vector conversion, a.k.a. vector splat. + b = a + 7; + // CHECK: %[[#undef:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#zeroInt:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#inserted:]] = llvm.insertelement %[[#seven:]], %[[#undef]][%[[#zeroInt]] : i64] : vector<4xi32> + // CHECK: %[[#shuffled:]] = llvm.shufflevector %[[#inserted]], %[[#undef]] [0, 0, 0, 0] : vector<4xi32> + + // Extract element. + int c = a[x]; + // CHECK: %[[#T58:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T59:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T60:]] = llvm.extractelement %[[#T58]][%[[#T59]] : i32] : vector<4xi32> + // CHECK: llvm.store %[[#T60]], %[[#T7:]] : i32, !llvm.ptr + + // Insert element. + a[x] = x; + // CHECK: %[[#T61:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T62:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T63:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T64:]] = llvm.insertelement %[[#T61]], %[[#T63]][%[[#T62]] : i32] : vector<4xi32> + // CHECK: llvm.store %[[#T64]], %[[#T3]] : vector<4xi32>, !llvm.ptr + + // Binary arithmetic operators. + vi4 d = a + b; + // CHECK: %[[#T65:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T66:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T67:]] = llvm.add %[[#T65]], %[[#T66]] : vector<4xi32> + // CHECK: llvm.store %[[#T67]], %[[#T9:]] : vector<4xi32>, !llvm.ptr + vi4 e = a - b; + // CHECK: %[[#T68:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T69:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T70:]] = llvm.sub %[[#T68]], %[[#T69]] : vector<4xi32> + // CHECK: llvm.store %[[#T70]], %[[#T11:]] : vector<4xi32>, !llvm.ptr + vi4 f = a * b; + // CHECK: %[[#T71:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T73:]] = llvm.mul %[[#T71]], %[[#T72]] : vector<4xi32> + // CHECK: llvm.store %[[#T73]], %[[#T13:]] : vector<4xi32>, !llvm.ptr + vi4 g = a / b; + // CHECK: %[[#T74:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T75:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T76:]] = llvm.sdiv %[[#T74]], %[[#T75]] : vector<4xi32> + // CHECK: llvm.store %[[#T76]], %[[#T15:]] : vector<4xi32>, !llvm.ptr + vi4 h = a % b; + // CHECK: %[[#T77:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T78:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T79:]] = llvm.srem %[[#T77]], %[[#T78]] : vector<4xi32> + // CHECK: llvm.store %[[#T79]], %[[#T17:]] : vector<4xi32>, !llvm.ptr + vi4 i = a & b; + // CHECK: %[[#T80:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T81:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T82:]] = llvm.and %[[#T80]], %[[#T81]] : vector<4xi32> + // CHECK: llvm.store %[[#T82]], %[[#T19:]] : vector<4xi32>, !llvm.ptr + vi4 j = a | b; + // CHECK: %[[#T83:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T84:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T85:]] = llvm.or %[[#T83]], %[[#T84]] : vector<4xi32> + // CHECK: llvm.store %[[#T85]], %[[#T21:]] : vector<4xi32>, !llvm.ptr + vi4 k = a ^ b; + // CHECK: %[[#T86:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T87:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T88:]] = llvm.xor %[[#T86]], %[[#T87]] : vector<4xi32> + // CHECK: llvm.store %[[#T88]], %[[#T23:]] : vector<4xi32>, !llvm.ptr + + // Unary arithmetic operators. + vi4 l = +a; + // CHECK: %[[#T89:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: llvm.store %[[#T89]], %[[#T25:]] : vector<4xi32>, !llvm.ptr + vi4 m = -a; + // CHECK: %[[#T90:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T91:]] = llvm.mlir.zero : vector<4xi32> + // CHECK: %[[#T92:]] = llvm.sub %[[#T91]], %[[#T90]] : vector<4xi32> + // CHECK: llvm.store %[[#T92]], %[[#T27:]] : vector<4xi32>, !llvm.ptr + vi4 n = ~a; + // CHECK: %[[#T93:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T94:]] = llvm.mlir.constant(-1 : i32) : i32 + // CHECK: %[[#T95:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T96:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T97:]] = llvm.insertelement %[[#T94]], %[[#T95]][%[[#T96]] : i64] : vector<4xi32> + // CHECK: %[[#T98:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T99:]] = llvm.insertelement %[[#T94]], %[[#T97]][%[[#T98]] : i64] : vector<4xi32> + // CHECK: %[[#T100:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T101:]] = llvm.insertelement %[[#T94]], %[[#T99]][%[[#T100]] : i64] : vector<4xi32> + // CHECK: %[[#T102:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T103:]] = llvm.insertelement %[[#T94]], %[[#T101]][%[[#T102]] : i64] : vector<4xi32> + // CHECK: %[[#T104:]] = llvm.xor %[[#T103]], %[[#T93]] : vector<4xi32> + // CHECK: llvm.store %[[#T104]], %[[#T29:]] : vector<4xi32>, !llvm.ptr + + // Ternary conditional operator + vi4 tc = a ? b : d; + // CHECK: %[[#Zero:]] = llvm.mlir.zero : vector<4xi32> + // CHECK: %[[#BitVec:]] = llvm.icmp "ne" %[[#A:]], %[[#Zero]] : vector<4xi32> + // CHECK: %[[#Res:]] = llvm.select %[[#BitVec]], %[[#B:]], %[[#D:]] : vector<4xi1>, vector<4xi32> + + // Comparisons + vi4 o = a == b; + // CHECK: %[[#T105:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T106:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T107:]] = llvm.icmp "eq" %[[#T105]], %[[#T106]] : vector<4xi32> + // CHECK: %[[#T108:]] = llvm.sext %[[#T107]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T108]], %[[#To:]] : vector<4xi32>, !llvm.ptr + vi4 p = a != b; + // CHECK: %[[#T109:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T110:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T111:]] = llvm.icmp "ne" %[[#T109]], %[[#T110]] : vector<4xi32> + // CHECK: %[[#T112:]] = llvm.sext %[[#T111]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T112]], %[[#Tp:]] : vector<4xi32>, !llvm.ptr + vi4 q = a < b; + // CHECK: %[[#T113:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T114:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T115:]] = llvm.icmp "slt" %[[#T113]], %[[#T114]] : vector<4xi32> + // CHECK: %[[#T116:]] = llvm.sext %[[#T115]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T116]], %[[#Tq:]] : vector<4xi32>, !llvm.ptr + vi4 r = a > b; + // CHECK: %[[#T117:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T118:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T119:]] = llvm.icmp "sgt" %[[#T117]], %[[#T118]] : vector<4xi32> + // CHECK: %[[#T120:]] = llvm.sext %[[#T119]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T120]], %[[#Tr:]] : vector<4xi32>, !llvm.ptr + vi4 s = a <= b; + // CHECK: %[[#T121:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T122:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T123:]] = llvm.icmp "sle" %[[#T121]], %[[#T122]] : vector<4xi32> + // CHECK: %[[#T124:]] = llvm.sext %[[#T123]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T124]], %[[#Ts:]] : vector<4xi32>, !llvm.ptr + vi4 t = a >= b; + // CHECK: %[[#T125:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T126:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T127:]] = llvm.icmp "sge" %[[#T125]], %[[#T126]] : vector<4xi32> + // CHECK: %[[#T128:]] = llvm.sext %[[#T127]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T128]], %[[#Tt:]] : vector<4xi32>, !llvm.ptr + + // __builtin_shufflevector + vi4 u = __builtin_shufflevector(a, b, 7, 5, 3, 1); + // CHECK: %[[#Tu:]] = llvm.shufflevector %[[#bsva:]], %[[#bsvb:]] [7, 5, 3, 1] : vector<4xi32> + vi4 v = __builtin_shufflevector(a, b); + // CHECK: %[[#sv_a:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#sv_b:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#sv0:]] = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %[[#sv1:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#sv2:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#sv3:]] = llvm.insertelement %[[#sv0]], %[[#sv1]][%[[#sv2]] : i64] : vector<4xi32> + // CHECK: %[[#sv4:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#sv5:]] = llvm.insertelement %[[#sv0]], %[[#sv3]][%[[#sv4]] : i64] : vector<4xi32> + // CHECK: %[[#sv6:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#sv7:]] = llvm.insertelement %[[#sv0]], %[[#sv5]][%[[#sv6]] : i64] : vector<4xi32> + // CHECK: %[[#sv8:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#sv9:]] = llvm.insertelement %[[#sv0]], %[[#sv7]][%[[#sv8]] : i64] : vector<4xi32> + // CHECK: %[[#svA:]] = llvm.and %[[#sv_b]], %[[#sv9]] : vector<4xi32> + // CHECK: %[[#svB:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#svC:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#svD:]] = llvm.extractelement %[[#svA]][%[[#svC]] : i64] : vector<4xi32> + // CHECK: %[[#svE:]] = llvm.extractelement %[[#sv_a]][%[[#svD]] : i32] : vector<4xi32> + // CHECK: %[[#svF:]] = llvm.insertelement %[[#svE]], %[[#svB]][%[[#svC]] : i64] : vector<4xi32> + // CHECK: %[[#svG:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#svH:]] = llvm.extractelement %[[#svA]][%[[#svG]] : i64] : vector<4xi32> + // CHECK: %[[#svI:]] = llvm.extractelement %[[#sv_a]][%[[#svH]] : i32] : vector<4xi32> + // CHECK: %[[#svJ:]] = llvm.insertelement %[[#svI]], %[[#svF]][%[[#svG]] : i64] : vector<4xi32> + // CHECK: %[[#svK:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#svL:]] = llvm.extractelement %[[#svA]][%[[#svK]] : i64] : vector<4xi32> + // CHECK: %[[#svM:]] = llvm.extractelement %[[#sv_a]][%[[#svL]] : i32] : vector<4xi32> + // CHECK: %[[#svN:]] = llvm.insertelement %[[#svM]], %[[#svJ]][%[[#svK]] : i64] : vector<4xi32> + // CHECK: %[[#svO:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#svP:]] = llvm.extractelement %[[#svA]][%[[#svO]] : i64] : vector<4xi32> + // CHECK: %[[#svQ:]] = llvm.extractelement %[[#sv_a]][%[[#svP:]] : i32] : vector<4xi32> + // CHECK: %[[#svR:]] = llvm.insertelement %[[#svQ]], %[[#svN]][%[[#svO]] : i64] : vector<4xi32> + // CHECK: llvm.store %[[#svR]], %[[#sv_v:]] : vector<4xi32>, !llvm.ptr +} + +void vector_double_test(int x, double y) { + + // Vector constant. Not yet implemented. Expected results will change when + // fully implemented. + vd2 a = { 1.5, 2.5 }; + // CHECK: %[[#T22:]] = llvm.mlir.constant(1.500000e+00 : f64) : f64 + // CHECK: %[[#T23:]] = llvm.mlir.constant(2.500000e+00 : f64) : f64 + // CHECK: %[[#T24:]] = llvm.mlir.undef : vector<2xf64> + // CHECK: %[[#T25:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T26:]] = llvm.insertelement %[[#T22]], %[[#T24]][%[[#T25]] : i64] : vector<2xf64> + // CHECK: %[[#T27:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T28:]] = llvm.insertelement %[[#T23]], %[[#T26]][%[[#T27]] : i64] : vector<2xf64> + // CHECK: llvm.store %[[#T28]], %[[#T5:]] : vector<2xf64>, !llvm.ptr + + // Non-const vector initialization. + vd2 b = { y, y + 1.0 }; + // CHECK: %[[#T29:]] = llvm.load %[[#T3:]] : !llvm.ptr -> f64 + // CHECK: %[[#T30:]] = llvm.load %[[#T3]] : !llvm.ptr -> f64 + // CHECK: %[[#T31:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 + // CHECK: %[[#T32:]] = llvm.fadd %[[#T30]], %[[#T31]] : f64 + // CHECK: %[[#T33:]] = llvm.mlir.undef : vector<2xf64> + // CHECK: %[[#T34:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T35:]] = llvm.insertelement %[[#T29]], %[[#T33]][%[[#T34]] : i64] : vector<2xf64> + // CHECK: %[[#T36:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T37:]] = llvm.insertelement %[[#T32]], %[[#T35]][%[[#T36]] : i64] : vector<2xf64> + // CHECK: llvm.store %[[#T37]], %[[#T7:]] : vector<2xf64>, !llvm.ptr + + // Extract element. + double c = a[x]; + // CHECK: %[[#T38:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T39:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T40:]] = llvm.extractelement %[[#T38]][%[[#T39]] : i32] : vector<2xf64> + // CHECK: llvm.store %[[#T40]], %[[#T9:]] : f64, !llvm.ptr + + // Insert element. + a[x] = y; + // CHECK: %[[#T41:]] = llvm.load %[[#T3]] : !llvm.ptr -> f64 + // CHECK: %[[#T42:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 + // CHECK: %[[#T43:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T44:]] = llvm.insertelement %[[#T41]], %[[#T43]][%[[#T42]] : i32] : vector<2xf64> + // CHECK: llvm.store %[[#T44]], %[[#T5]] : vector<2xf64>, !llvm.ptr + + // Binary arithmetic operators. + vd2 d = a + b; + // CHECK: %[[#T45:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T46:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T47:]] = llvm.fadd %[[#T45]], %[[#T46]] : vector<2xf64> + // CHECK: llvm.store %[[#T47]], %[[#T11:]] : vector<2xf64>, !llvm.ptr + vd2 e = a - b; + // CHECK: %[[#T48:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T49:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T50:]] = llvm.fsub %[[#T48]], %[[#T49]] : vector<2xf64> + // CHECK: llvm.store %[[#T50]], %[[#T13:]] : vector<2xf64>, !llvm.ptr + vd2 f = a * b; + // CHECK: %[[#T51:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T52:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T53:]] = llvm.fmul %[[#T51]], %[[#T52]] : vector<2xf64> + // CHECK: llvm.store %[[#T53]], %[[#T15:]] : vector<2xf64>, !llvm.ptr + vd2 g = a / b; + // CHECK: %[[#T54:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T55:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T56:]] = llvm.fdiv %[[#T54]], %[[#T55]] : vector<2xf64> + // CHECK: llvm.store %[[#T56]], %[[#T17:]] : vector<2xf64>, !llvm.ptr + + // Unary arithmetic operators. + vd2 l = +a; + // CHECK: %[[#T57:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: llvm.store %[[#T57]], %[[#T19:]] : vector<2xf64>, !llvm.ptr + vd2 m = -a; + // CHECK: %[[#T58:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T59:]] = llvm.fneg %[[#T58]] : vector<2xf64> + // CHECK: llvm.store %[[#T59]], %[[#T21:]] : vector<2xf64>, !llvm.ptr + + // Comparisons + vll2 o = a == b; + // CHECK: %[[#T60:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T61:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T62:]] = llvm.fcmp "oeq" %[[#T60]], %[[#T61]] : vector<2xf64> + // CHECK: %[[#T63:]] = llvm.sext %[[#T62]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T63]], %[[#To:]] : vector<2xi64>, !llvm.ptr + vll2 p = a != b; + // CHECK: %[[#T64:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T65:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T66:]] = llvm.fcmp "une" %[[#T64]], %[[#T65]] : vector<2xf64> + // CHECK: %[[#T67:]] = llvm.sext %[[#T66]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T67]], %[[#Tp:]] : vector<2xi64>, !llvm.ptr + vll2 q = a < b; + // CHECK: %[[#T68:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T69:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T70:]] = llvm.fcmp "olt" %[[#T68]], %[[#T69]] : vector<2xf64> + // CHECK: %[[#T71:]] = llvm.sext %[[#T70]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T71]], %[[#Tq:]] : vector<2xi64>, !llvm.ptr + vll2 r = a > b; + // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T73:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T74:]] = llvm.fcmp "ogt" %[[#T72]], %[[#T73]] : vector<2xf64> + // CHECK: %[[#T75:]] = llvm.sext %[[#T74]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T75]], %[[#Tr:]] : vector<2xi64>, !llvm.ptr + vll2 s = a <= b; + // CHECK: %[[#T76:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T77:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T78:]] = llvm.fcmp "ole" %[[#T76]], %[[#T77]] : vector<2xf64> + // CHECK: %[[#T79:]] = llvm.sext %[[#T78]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T79]], %[[#Ts:]] : vector<2xi64>, !llvm.ptr + vll2 t = a >= b; + // CHECK: %[[#T80:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T81:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T82:]] = llvm.fcmp "oge" %[[#T80]], %[[#T81]] : vector<2xf64> + // CHECK: %[[#T83:]] = llvm.sext %[[#T82]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T83]], %[[#Tt:]] : vector<2xi64>, !llvm.ptr + + // __builtin_convertvector + vus2 w = __builtin_convertvector(a, vus2); + // CHECK: %[[#cv0:]] = llvm.fptoui %[[#cv1:]] : vector<2xf64> to vector<2xi16> +} diff --git a/clang/test/CIR/Transforms/Inputs/folly-coro.h b/clang/test/CIR/Transforms/Inputs/folly-coro.h new file mode 100644 index 000000000000..21e4b337eb22 --- /dev/null +++ b/clang/test/CIR/Transforms/Inputs/folly-coro.h @@ -0,0 +1,44 @@ +#include "std.h" + +namespace folly { +namespace coro { + +using std::suspend_always; +using std::suspend_never; +using std::coroutine_handle; + +using SemiFuture = int; + +template +struct Task { + struct promise_type { + Task get_return_object() noexcept; + suspend_always initial_suspend() noexcept; + suspend_always final_suspend() noexcept; + void return_value(T); + void unhandled_exception(); + auto yield_value(Task) noexcept { return final_suspend(); } + }; + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + T await_resume(); +}; + +template<> +struct Task { + struct promise_type { + Task get_return_object() noexcept; + suspend_always initial_suspend() noexcept; + suspend_always final_suspend() noexcept; + void return_void() noexcept; + void unhandled_exception() noexcept; + auto yield_value(Task) noexcept { return final_suspend(); } + }; + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} + SemiFuture semi(); +}; + +} // coro +} // folly \ No newline at end of file diff --git a/clang/test/CIR/Transforms/Inputs/std.h b/clang/test/CIR/Transforms/Inputs/std.h new file mode 100644 index 000000000000..1bc2b8504784 --- /dev/null +++ b/clang/test/CIR/Transforms/Inputs/std.h @@ -0,0 +1,29 @@ +namespace std { + +template +struct coroutine_traits { using promise_type = typename Ret::promise_type; }; + +template +struct coroutine_handle { + static coroutine_handle from_address(void *) noexcept; +}; +template <> +struct coroutine_handle { + template + coroutine_handle(coroutine_handle) noexcept; + static coroutine_handle from_address(void *); +}; + +struct suspend_always { + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} +}; + +struct suspend_never { + bool await_ready() noexcept { return true; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} +}; + +} // namespace std \ No newline at end of file diff --git a/clang/test/CIR/Transforms/idiom-iter.cpp b/clang/test/CIR/Transforms/idiom-iter.cpp new file mode 100644 index 000000000000..5591baa04ff6 --- /dev/null +++ b/clang/test/CIR/Transforms/idiom-iter.cpp @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics %s -o %t.cir + +namespace std { +template struct array { + T arr[N]; + struct iterator { + T *p; + constexpr explicit iterator(T *p) : p(p) {} + constexpr bool operator!=(iterator o) { return p != o.p; } + constexpr iterator &operator++() { ++p; return *this; } + constexpr T &operator*() { return *p; } + }; + constexpr iterator begin() { return iterator(arr); } +}; +} + +void iter_test() +{ + std::array v2 = {1, 2, 3}; + (void)v2.begin(); // no remark should be produced. +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp new file mode 100644 index 000000000000..7264444cd98f --- /dev/null +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -0,0 +1,50 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-idiom-recognizer -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics %s -o %t.cir + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fclangir-idiom-recognizer -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-before=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=BEFORE-IDIOM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fclangir-idiom-recognizer -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-IDIOM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fclangir-idiom-recognizer -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-LOWERING-PREPARE + +// PASS_ENABLED: IR Dump After IdiomRecognizer (cir-idiom-recognizer) + +#include "std-cxx.h" + +int test_find(unsigned char n = 3) +{ + unsigned num_found = 0; + std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + auto f = std::find(v.begin(), v.end(), n); // expected-remark {{found call to std::find()}} + // expected-remark@-1 {{found call to begin() iterator}} + // expected-remark@-2 {{found call to end() iterator}} + + // BEFORE-IDIOM: {{.*}} cir.call @_ZNSt5arrayIhLj9EE5beginEv( + // AFTER-IDIOM: {{.*}} cir.iterator_begin(@_ZNSt5arrayIhLj9EE5beginEv, + // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZNSt5arrayIhLj9EE5beginEv( + + // BEFORE-IDIOM: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv( + // AFTER-IDIOM: {{.*}} cir.iterator_end(@_ZNSt5arrayIhLj9EE3endEv, + // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv( + + // BEFORE-IDIOM: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( + // AFTER-IDIOM: {{.*}} cir.std.find(@_ZSt4findIPhhET_S1_S1_RKT0_, + // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( + + if (f != v.end()) // expected-remark {{found call to end() iterator}} + num_found++; + return num_found; +} + +namespace yolo { +template struct array { + T arr[N]; + typedef T value_type; + typedef value_type* iterator; + constexpr iterator begin() { return iterator(arr); } +}; +} + +int iter_test() +{ + yolo::array v = {1, 2, 3}; + (void)v.begin(); // no remark should be produced. +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lib-opt-find.cpp b/clang/test/CIR/Transforms/lib-opt-find.cpp new file mode 100644 index 000000000000..4812e72d8037 --- /dev/null +++ b/clang/test/CIR/Transforms/lib-opt-find.cpp @@ -0,0 +1,66 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -fclangir-idiom-recognizer -fclangir-lib-opt -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +int test1(unsigned char n = 3) +{ + // CHECK: test1 + unsigned num_found = 0; + // CHECK: %[[pattern_addr:.*]] = cir.alloca !u8i, cir.ptr , ["n" + std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + + auto f = std::find(v.begin(), v.end(), n); + + // CHECK: %[[first:.*]] = cir.call @_ZNSt5arrayIhLj9EE5beginEv + // CHECK: %[[last:.*]] = cir.call @_ZNSt5arrayIhLj9EE3endEv + // CHECK: %[[cast_to_void:.*]] = cir.cast(bitcast, %[[first]] : !cir.ptr), !cir.ptr + // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_addr]] : cir.ptr , !u8i + // CHECK: %[[pattern:.*]] = cir.cast(integral, %[[load_pattern:.*]] : !u8i), !s32i + + // CHECK-NOT: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( + // CHECK: %[[array_size:.*]] = cir.const(#cir.int<9> : !u64i) : !u64i + + // CHECK: %[[result_cast:.*]] = cir.libc.memchr(%[[cast_to_void]], %[[pattern]], %[[array_size]]) + // CHECK: %[[memchr_res:.*]] = cir.cast(bitcast, %[[result_cast]] : !cir.ptr), !cir.ptr + // CHECK: %[[nullptr:.*]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr + // CHECK: %[[cmp_res:.*]] = cir.cmp(eq, %[[nullptr]], %[[memchr_res]]) : !cir.ptr, !cir.bool + // CHECK: cir.ternary(%[[cmp_res]], true { + // CHECK: cir.yield %[[last]] : !cir.ptr + // CHECK: }, false { + // CHECK: cir.yield %[[memchr_res]] : !cir.ptr + // CHECK: }) : (!cir.bool) -> !cir.ptr + + if (f != v.end()) + num_found++; + + return num_found; +} + +unsigned char* test2(unsigned char* first, unsigned char* last, unsigned char v) +{ + return std::find(first, last, v); + // CHECK: test2 + + // CHECK: %[[first_storage:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["first", init] + // CHECK: %[[last_storage:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["last", init] + // CHECK: %[[pattern_storage:.*]] = cir.alloca !u8i, cir.ptr , ["v", init] + // CHECK: %[[first:.*]] = cir.load %[[first_storage]] + // CHECK: %[[last:.*]] = cir.load %[[last_storage]] + // CHECK: %[[cast_to_void:.*]] = cir.cast(bitcast, %[[first]] : !cir.ptr), !cir.ptr + // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_storage]] : cir.ptr , !u8i + // CHECK: %[[pattern:.*]] = cir.cast(integral, %[[load_pattern:.*]] : !u8i), !s32i + + // CHECK-NOT: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( + // CHECK: %[[array_size:.*]] = cir.ptr_diff(%[[last]], %[[first]]) : !cir.ptr -> !u64i + + // CHECK: %[[result_cast:.*]] = cir.libc.memchr(%[[cast_to_void]], %[[pattern]], %[[array_size]]) + // CHECK: %[[memchr_res:.*]] = cir.cast(bitcast, %[[result_cast]] : !cir.ptr), !cir.ptr + // CHECK: %[[nullptr:.*]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr + // CHECK: %[[cmp_res:.*]] = cir.cmp(eq, %[[nullptr]], %[[memchr_res]]) : !cir.ptr, !cir.bool + // CHECK: cir.ternary(%[[cmp_res]], true { + // CHECK: cir.yield %[[last]] : !cir.ptr + // CHECK: }, false { + // CHECK: cir.yield %[[memchr_res]] : !cir.ptr + // CHECK: }) : (!cir.bool) -> !cir.ptr +} diff --git a/clang/test/CIR/Transforms/lib-opt.cpp b/clang/test/CIR/Transforms/lib-opt.cpp new file mode 100644 index 000000000000..17895e567645 --- /dev/null +++ b/clang/test/CIR/Transforms/lib-opt.cpp @@ -0,0 +1,3 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-idiom-recognizer -fclangir-lib-opt -emit-cir -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=CIR + +// CIR: IR Dump After LibOpt (cir-lib-opt) \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp new file mode 100644 index 000000000000..fb89c0e6fd8f --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -0,0 +1,73 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +typedef enum SType { + INFO_ENUM_0 = 9, + INFO_ENUM_1 = 2020, +} SType; + +typedef struct InfoRaw { + SType type; + const void* __attribute__((__may_alias__)) next; + unsigned int fa; + unsigned f; + unsigned s; + unsigned w; + unsigned h; + unsigned g; + unsigned a; +} InfoRaw; + +typedef unsigned long long FlagsPriv; +typedef struct InfoPriv { + SType type; + void* __attribute__((__may_alias__)) next; + FlagsPriv flags; +} InfoPriv; + +static const FlagsPriv PrivBit = 0x00000001; + +void escape_info(InfoRaw *info); +typedef SType ( *FnPtr)(unsigned s, const InfoRaw* i); +struct X { + struct entries { + FnPtr wildfn = nullptr; + }; + static entries e; +}; + +void exploded_fields(bool cond, int c) { + for (int i = 0; i < c; i++) { + InfoRaw info = {INFO_ENUM_0}; // expected-note {{invalidated here}} + if (cond) { + InfoPriv privTmp = {INFO_ENUM_1}; + privTmp.flags = PrivBit; + info.next = &privTmp; + } // expected-note {{pointee 'privTmp' invalidated at end of scope}} + + // If the 'if' above is taken, info.next is invalidated at the end of the scope, otherwise + // it's also invalid because it was initialized with 'nullptr'. This could be a noisy + // check if calls like `escape_info` are used to further initialize `info`. + + escape_info(&info); // expected-remark {{pset => { invalid, nullptr }}} + // expected-warning@-1 {{passing aggregate containing invalid pointer member 'info.next'}} + X::e.wildfn(0, &info); // expected-remark {{pset => { invalid, nullptr }}} + // expected-warning@-1 {{passing aggregate containing invalid pointer member 'info.next'}} + } +} + +void exploded_fields1(bool cond, unsigned t) { + { + InfoRaw info = {INFO_ENUM_0, &t}; + if (cond) { + InfoPriv privTmp = {INFO_ENUM_1}; + privTmp.flags = PrivBit; + info.next = &privTmp; + } + + // A warning is not emitted here, lack of context for inferring + // anything about `cond` would make it too noisy given `info.next` + // wasn't null initialized. + + escape_info(&info); // expected-remark {{pset => { t }}} + } +} diff --git a/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp b/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp new file mode 100644 index 000000000000..cf101b790491 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -I%S/Inputs -fclangir -fclangir-lifetime-check="history=all;remarks=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +#include "folly-coro.h" + +folly::coro::Task go(int const& val); +folly::coro::Task go1() { + auto task = go(1); // expected-note {{coroutine bound to resource with expired lifetime}} + // expected-note@-1 {{at the end of scope or full-expression}} + co_return co_await task; // expected-remark {{pset => { task, invalid }}} + // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} +} + +folly::coro::Task go1_lambda() { + auto task = [i = 3]() -> folly::coro::Task { // expected-note {{coroutine bound to lambda with expired lifetime}} + co_return i; + }(); // expected-note {{at the end of scope or full-expression}} + co_return co_await task; // expected-remark {{pset => { task, invalid }}} + // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} +} + +folly::coro::Task go2_lambda() { + auto task = []() -> folly::coro::Task { // expected-note {{coroutine bound to lambda with expired lifetime}} + co_return 3; + }(); // expected-note {{at the end of scope or full-expression}} + co_return co_await task; // expected-remark {{pset => { task, invalid }}} + // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} +} + +folly::coro::Task go3_lambda() { + auto* fn = +[](int const& i) -> folly::coro::Task { co_return i; }; + auto task = fn(3); // expected-note {{coroutine bound to resource with expired lifetime}} + // expected-note@-1 {{at the end of scope or full-expression}} + co_return co_await task; // expected-remark {{pset => { task, invalid }}} + // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-check-lambda.cpp b/clang/test/CIR/Transforms/lifetime-check-lambda.cpp new file mode 100644 index 000000000000..617e18edf499 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-lambda.cpp @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -I%S/Inputs -Wno-return-stack-address -fclangir -fclangir-lifetime-check="history=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +// Check also implements: +// EXP61-CPP. A lambda object must not outlive any of its reference captured objects + +// This can be diagnosed by clang with -Wreturn-stack-address +auto g() { + int i = 12; // expected-note {{declared here but invalid after enclosing function ends}} + return [&] { // expected-warning {{returned lambda captures local variable}} + i += 100; + return i; + }; +} + +// This cannot be diagnosed by -Wreturn-stack-address +auto g2() { + int i = 12; // expected-note {{declared here but invalid after enclosing function ends}} + auto lam = [&] { + i += 100; + return i; + }; + return lam; // expected-warning {{returned lambda captures local variable}} +} + +auto g3(int val) { + auto outer = [val] { + int i = val; // expected-note {{declared here but invalid after enclosing lambda ends}} + auto inner = [&] { + i += 30; + return i; + }; + return inner; // expected-warning {{returned lambda captures local variable}} + }; + return outer(); +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp new file mode 100644 index 000000000000..23643c821884 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -0,0 +1,71 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=all;remarks=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +struct [[gsl::Owner(int)]] MyIntOwner { + int val; + MyIntOwner(int v) : val(v) {} + void changeInt(int i); + int &operator*(); + int read() const; +}; + +struct [[gsl::Pointer(int)]] MyIntPointer { + int *ptr; + MyIntPointer(int *p = nullptr) : ptr(p) {} + MyIntPointer(const MyIntOwner &); + int &operator*(); + MyIntOwner toOwner(); + int read() { return *ptr; } +}; + +void yolo() { + MyIntPointer p; + { + MyIntOwner o(1); + p = o; + *p = 3; // expected-remark {{pset => { o__1' }}} + } // expected-note {{pointee 'o' invalidated at end of scope}} + *p = 4; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} +} + +void yolo2() { + MyIntPointer p; + MyIntOwner o(1); + p = o; + (void)o.read(); + (void)p.read(); // expected-remark {{pset => { o__1' }}} + o.changeInt(42); // expected-note {{invalidated by non-const use of owner type}} + (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} + p = o; + (void)p.read(); // expected-remark {{pset => { o__2' }}} + o.changeInt(33); // expected-note {{invalidated by non-const use of owner type}} + (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} +} + +void yolo3() { + MyIntPointer p, q; + MyIntOwner o(1); + p = o; + q = o; + (void)q.read(); // expected-remark {{pset => { o__1' }}} + (void)p.read(); // expected-remark {{pset => { o__1' }}} + o.changeInt(42); // expected-note {{invalidated by non-const use of owner type}} + (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} + (void)q.read(); // expected-warning {{use of invalid pointer 'q'}} + // expected-remark@-1 {{pset => { invalid }}} +} + +void yolo4() { + MyIntOwner o0(1); + MyIntOwner o1(2); + MyIntPointer p{o0}, q{o1}; + p.read(); // expected-remark {{pset => { o0__1' }}} + q.read(); // expected-remark {{pset => { o1__1' }}} + o0 = o1; // expected-note {{invalidated by non-const use of owner type}} + p.read(); // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} + q.read(); // expected-remark {{pset => { o1__1' }}} +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp new file mode 100644 index 000000000000..e9c6d62b6f64 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all" -fclangir-skip-system-headers -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +#include "std-cxx.h" + +// expected-no-diagnostics + +typedef enum SType { + INFO_ENUM_0 = 9, + INFO_ENUM_1 = 2020, +} SType; + +typedef struct InfoRaw { + SType type; + const void* __attribute__((__may_alias__)) next; + unsigned u; +} InfoRaw; + +void swappy(unsigned c) { + std::vector images(c); + for (auto& image : images) { + image = {INFO_ENUM_1}; + } + + std::vector images2(c); + for (unsigned i = 0; i < c; i++) { + images2[i] = {INFO_ENUM_1}; + } +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp new file mode 100644 index 000000000000..83cef25c54da --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="remarks=pset-invalid" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +int *p0() { + int *p = nullptr; + { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} + return p; +} + +int *p1(bool b = true) { + int *p = nullptr; + if (b) { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid, nullptr }}} + return p; +} + +void p2(int b) { + int *p = nullptr; + switch (int x = 0; b) { + case 1: + p = &x; + case 2: + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr }}} + break; + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr, invalid }}} +} diff --git a/clang/test/CIR/Transforms/lifetime-check-string.cpp b/clang/test/CIR/Transforms/lifetime-check-string.cpp new file mode 100644 index 000000000000..383f3b5da626 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-string.cpp @@ -0,0 +1,87 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +int strlen(char const *); + +struct [[gsl::Owner(char *)]] String { + long size; + long capacity; + const char *storage; + char operator[](int); + String() : size{0}, capacity{0} {} + String(char const *s) : size{strlen(s)}, capacity{size}, storage{s} {} +}; + +struct [[gsl::Pointer(int)]] StringView { + long size; + const char *storage; + char operator[](int); + StringView(const String &s) : size{s.size}, storage{s.storage} {} + StringView() : size{0}, storage{nullptr} {} + int getSize() const; +}; + +void sv0() { + StringView sv; + String name = "abcdefghijklmnop"; + sv = name; + (void)sv.getSize(); // expected-remark {{pset => { name__1' }}} + name = "frobozz"; // expected-note {{invalidated by non-const use of owner type}} + (void)sv.getSize(); // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} + sv = name; + (void)sv.getSize(); // expected-remark {{pset => { name__2' }}} +} + +void sv1() { + StringView sv, sv_other; + String name = "abcdefghijklmnop"; + sv = name; + sv_other = sv; + (void)sv.getSize(); // expected-remark {{pset => { name__1' }}} + (void)sv_other.getSize(); // expected-remark {{pset => { name__1' }}} + name = "frobozz"; // expected-note {{invalidated by non-const use of owner type}} + (void)sv.getSize(); // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} + (void)sv_other.getSize(); // expected-warning {{use of invalid pointer 'sv_other'}} + // expected-remark@-1 {{pset => { invalid }}} + sv = name; + (void)sv.getSize(); // expected-remark {{pset => { name__2' }}} +} + +void sv2() { + StringView sv; + String name = "abcdefghijklmnop"; + sv = name; + char read0 = sv[0]; // expected-remark {{pset => { name__1' }}} + name = "frobozz"; // expected-note {{invalidated by non-const use of owner type}} + char read1 = sv[0]; // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} + sv = name; + char read2 = sv[0]; // expected-remark {{pset => { name__2' }}} + char read3 = name[1]; // expected-note {{invalidated by non-const use of owner type}} + char read4 = sv[1]; // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} +} + +class Stream { + public: + Stream& operator<<(char); + Stream& operator<<(const StringView &); + // FIXME: conservative for now, but do not invalidate const Owners? + Stream& operator<<(const String &); +}; + +void sv3() { + Stream cout; + StringView sv; + String name = "abcdefghijklmnop"; + sv = name; + cout << sv; // expected-remark {{pset => { name__1' }}} + name = "frobozz"; // expected-note {{invalidated by non-const use of owner type}} + cout << sv[2]; // expected-warning {{use of invalid pointer 'sv'}} + sv = name; // expected-remark@-1 {{pset => { invalid }}} + cout << sv; // expected-remark {{pset => { name__2' }}} + cout << name; // expected-note {{invalidated by non-const use of owner type}} + cout << sv; // expected-warning {{passing invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp new file mode 100644 index 000000000000..017de9f6495d --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -0,0 +1,48 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +int *p0() { + int *p = nullptr; + { + int x = 0; + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + return p; +} + +int *p1(bool b = true) { + int *p = nullptr; // expected-note {{invalidated here}} + if (b) { + int x = 0; + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + return p; +} + +void p2() { + int *p = nullptr; // expected-note {{invalidated here}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void p3() { + int *p; + p = nullptr; // expected-note {{invalidated here}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void p4() { + int *p; // expected-note {{uninitialized here}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void p5() { + int *p = nullptr; + { + int a[10]; + p = &a[0]; + } // expected-note {{pointee 'a' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} diff --git a/clang/test/CIR/Transforms/lifetime-fn-args.cpp b/clang/test/CIR/Transforms/lifetime-fn-args.cpp new file mode 100644 index 000000000000..6c1b297f1b32 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-fn-args.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +struct A { + void* ctx; + void setInfo(void** ctxPtr); +}; + +void A::setInfo(void** ctxPtr) { + if (ctxPtr != nullptr) { + *ctxPtr = ctx; // expected-remark {{pset => { fn_arg:1 }}} + } +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-invalid-option.cpp b/clang/test/CIR/Transforms/lifetime-invalid-option.cpp new file mode 100644 index 000000000000..64486b6aa166 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-invalid-option.cpp @@ -0,0 +1,3 @@ +// RUN: not %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="yolo=invalid,null" -emit-cir %s -o - 2>&1 | FileCheck %s + +// CHECK: clangir pass option 'yolo=invalid,null' not recognized \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp new file mode 100644 index 000000000000..e7ee7aca7cf3 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null;remarks=pset-always" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +// Loops that do not change psets + +// p1179r1: 2.4.9.1 +// No diagnostic needed, pset(p) = {a} before and after the loop +void valid0(bool b, int j) { + int a[10]; + int *p = &a[0]; + while (j) { + if (b) { + p = &a[j]; + } + j = j - 1; + } + *p = 12; // expected-remark {{pset => { a }}} +} + +// p1179r1: 2.4.9.2 +void valid1(bool b, int j) { + int a[4], c[5]; + int *p = &a[0]; + while (j) { + if (b) { + p = &c[j]; + } + j = j - 1; + } + *p = 0; // expected-remark {{pset => { a, c }}} + + while (j) { + if (b) { + p = &c[j]; + } + j = j - 1; + } + *p = 0; // expected-remark {{pset => { a, c }}} +} diff --git a/clang/test/CIR/Transforms/lifetime-loop.cpp b/clang/test/CIR/Transforms/lifetime-loop.cpp new file mode 100644 index 000000000000..cf58ddf48f73 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-loop.cpp @@ -0,0 +1,56 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null;remarks=pset-invalid" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +void loop_basic_for() { + int *p = nullptr; // expected-note {{invalidated here}} + for (int i = 0; i < 10; i = i + 1) { + int x = 0; + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr, invalid }}} +} + +void loop_basic_while() { + int *p = nullptr; // expected-note {{invalidated here}} + int i = 0; + while (i < 10) { + int x = 0; + p = &x; + *p = 42; + i = i + 1; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr, invalid }}} +} + +void loop_basic_dowhile() { + int *p = nullptr; // expected-note {{invalidated here}} + int i = 0; + do { + int x = 0; + p = &x; + *p = 42; + i = i + 1; + } while (i < 10); // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr, invalid }}} +} + +// p1179r1: 2.4.9.3 +void loop0(bool b, int j) { + int a[4], c[4]; + int *p = &a[0]; + while (j) { + // This access is invalidated after the first iteration + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { c, nullptr }}} + p = nullptr; // expected-note {{invalidated here}} + if (b) { + p = &c[j]; + } + j = j - 1; + } + *p = 0; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { a, c, nullptr }}} +} diff --git a/clang/test/CIR/Transforms/lifetime-null-passing.cpp b/clang/test/CIR/Transforms/lifetime-null-passing.cpp new file mode 100644 index 000000000000..e26210b56234 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-null-passing.cpp @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +class _j {}; +typedef _j* jobj; + +typedef enum SType { + INFO_ENUM_0 = 9, + INFO_ENUM_1 = 2020, +} SType; + +typedef SType ( *FnPtr2)(unsigned session, jobj* surface); + +struct X { + struct entries { + FnPtr2 wildfn = nullptr; + }; + static entries e; +}; + +void nullpassing() { + jobj o = nullptr; + X::e.wildfn(0, &o); +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-switch.cpp b/clang/test/CIR/Transforms/lifetime-switch.cpp new file mode 100644 index 000000000000..ca56b95f71a0 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-switch.cpp @@ -0,0 +1,46 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +void s0(int b) { + int *p = nullptr; + switch (b) { + default: { + int x = 0; + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void s1(int b) { + int *p = nullptr; + switch (b) { + default: + int x = 0; + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void s2(int b) { + int *p = nullptr; + switch (int x = 0; b) { + default: + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void s3(int b) { + int *p = nullptr; // expected-note {{invalidated here}} + switch (int x = 0; b) { + case 1: + p = &x; + case 2: + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + break; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} diff --git a/clang/test/CIR/Transforms/lifetime-this.cpp b/clang/test/CIR/Transforms/lifetime-this.cpp new file mode 100644 index 000000000000..8e18af8a9e16 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-this.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -fclangir-skip-system-headers -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +#include "std-cxx.h" + +struct S { + S(int, int, const S* s); + void f(int a, int b); +}; + +void S::f(int a, int b) { + std::shared_ptr l = std::make_shared(a, b, this); // expected-remark {{pset => { this }}} +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir new file mode 100644 index 000000000000..17880efeac2a --- /dev/null +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -0,0 +1,120 @@ +// RUN: cir-opt %s -cir-merge-cleanups -o %t.out.cir +// RUN: FileCheck --input-file=%t.out.cir %s + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +!s32i = !cir.int +module { + cir.func @sw1(%arg0: !s32i, %arg1: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["c", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + cir.scope { + %2 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + %4 = cir.load %0 : cir.ptr , !s32i + cir.switch (%4 : !s32i) [ + case (equal, 0 : !s32i) { + %5 = cir.load %2 : cir.ptr , !s32i + %6 = cir.const(#cir.int<1> : !s32i) : !s32i + %7 = cir.binop(add, %5, %6) : !s32i + cir.store %7, %2 : !s32i, cir.ptr + cir.br ^bb1 + ^bb1: // pred: ^bb0 + cir.return + }, + case (equal, 1 : !s32i) { + cir.scope { + cir.scope { + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.const(#cir.int<3> : !s32i) : !s32i + %7 = cir.cmp(eq, %5, %6) : !s32i, !cir.bool + cir.if %7 { + cir.br ^bb1 + ^bb1: // pred: ^bb0 + cir.return + } + } + cir.break + } + cir.yield + }, + case (equal, 2 : !s32i) { + cir.scope { + %5 = cir.alloca !s32i, cir.ptr , ["yolo", init] {alignment = 4 : i64} + %6 = cir.load %2 : cir.ptr , !s32i + %7 = cir.const(#cir.int<1> : !s32i) : !s32i + %8 = cir.binop(add, %6, %7) : !s32i + cir.store %8, %2 : !s32i, cir.ptr + %9 = cir.const(#cir.int<100> : !s32i) : !s32i + cir.store %9, %5 : !s32i, cir.ptr + cir.br ^bb1 + ^bb1: // pred: ^bb0 + cir.return + } + cir.yield + } + ] + } + cir.return + } + +// CHECK: cir.switch (%4 : !s32i) [ +// CHECK-NEXT: case (equal, 0) { +// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i +// CHECK-NEXT: cir.store %7, %2 : !s32i, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %6 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) : !s32i, !cir.bool +// CHECK-NEXT: cir.if %7 { +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.break +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %5 = cir.alloca !s32i, cir.ptr , ["yolo", init] {alignment = 4 : i64} +// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %7 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %8 = cir.binop(add, %6, %7) : !s32i +// CHECK-NEXT: cir.store %8, %2 : !s32i, cir.ptr +// CHECK-NEXT: %9 = cir.const(#cir.int<100> : !s32i) : !s32i +// CHECK-NEXT: cir.store %9, %5 : !s32i, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: ] + + // Should remove empty scopes. + cir.func @removeEmptyScope() { + cir.scope { + } + cir.return + } + // CHECK: cir.func @removeEmptyScope + // CHECK-NEXT: cir.return + + // Should remove empty switch-case statements. + cir.func @removeEmptySwitch(%arg0: !s32i) { + // CHECK: cir.func @removeEmptySwitch + cir.switch (%arg0 : !s32i) [ + ] + // CHECK-NOT: cir.switch + cir.return + // CHECK: cir.return + } + +} diff --git a/clang/test/CIR/Translation/zeroinitializer.cir b/clang/test/CIR/Translation/zeroinitializer.cir new file mode 100644 index 000000000000..c6b92be604d5 --- /dev/null +++ b/clang/test/CIR/Translation/zeroinitializer.cir @@ -0,0 +1,27 @@ +// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s + +module { + // Should zero-initialize global structs initialized with cir.llvmir.zeroinit. + llvm.mlir.global external @bar() {addr_space = 0 : i32} : !llvm.struct<"struct.S", (i8, i32)> { + %0 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i8, i32)> + llvm.return %0 : !llvm.struct<"struct.S", (i8, i32)> + } + // CHECK: @bar = global %struct.S zeroinitializer + + // Should null-initialize global pointer initialized with cir.llvmir.zeroinit. + llvm.mlir.global external @ptr() {addr_space = 0 : i32} : !llvm.ptr { + %0 = cir.llvmir.zeroinit : !llvm.ptr + llvm.return %0 : !llvm.ptr + } + // CHECK: @ptr = global ptr null + + // Should lower aggregates types with elements initialized with cir.llvmir.zeroinit. + llvm.mlir.global external @arr() {addr_space = 0 : i32} : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> { + %0 = llvm.mlir.undef : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> + %1 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i8, i32)> + %2 = llvm.insertvalue %1, %0[0] : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> + llvm.return %2 : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> + } + // CHECK: @arr = global [1 x %struct.S] zeroinitializer +} diff --git a/clang/test/CIR/cc1.c b/clang/test/CIR/cc1.c new file mode 100644 index 000000000000..c29c6943d6ff --- /dev/null +++ b/clang/test/CIR/cc1.c @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S %s -o %t.s +// RUN: FileCheck --input-file=%t.s %s -check-prefix=ASM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-obj %s -o %t.o +// RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ + +void foo() {} + +// MLIR: func.func @foo() { +// MLIR-NEXT: return +// MLIR-NEXT: } + +// LLVM: define void @foo() +// LLVM-NEXT: ret void +// LLVM-NEXT: } + +// ASM: .globl foo +// ASM-NEXT: .p2align +// ASM-NEXT: .type foo,@function +// ASM-NEXT: foo: +// ASM: retq + +// OBJ: 0: c3 retq diff --git a/clang/test/CIR/cc1.cir b/clang/test/CIR/cc1.cir new file mode 100644 index 000000000000..9ea923faff0c --- /dev/null +++ b/clang/test/CIR/cc1.cir @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=LLVM + +module { + cir.func @foo() { + cir.return + } +} + +// LLVM: define void @foo() +// LLVM-NEXT: ret void +// LLVM-NEXT: } diff --git a/clang/test/CIR/cirtool.cir b/clang/test/CIR/cirtool.cir new file mode 100644 index 000000000000..8351d5be3165 --- /dev/null +++ b/clang/test/CIR/cirtool.cir @@ -0,0 +1,20 @@ +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR +// RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +module { + cir.func @foo() { + cir.return + } +} + +// LLVM: define void @foo() +// LLVM-NEXT: ret void +// LLVM-NEXT: } + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo() { +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c new file mode 100644 index 000000000000..d1e0d7614489 --- /dev/null +++ b/clang/test/CIR/driver.c @@ -0,0 +1,28 @@ +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -S -Xclang -emit-cir %s -o %t1.cir +// RUN: FileCheck --input-file=%t1.cir %s -check-prefix=CIR +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -S -Xclang -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir %s -check-prefix=CIR +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -S -emit-llvm %s -o %t1.ll +// RUN: FileCheck --input-file=%t1.ll %s -check-prefix=LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -S -emit-llvm %s -o %t2.ll +// RUN: FileCheck --input-file=%t2.ll %s -check-prefix=LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o +// RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-passes -S -Xclang -emit-cir %s -o %t.cir +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-verifier -S -Xclang -emit-cir %s -o %t.cir +// RUN: %clang -target arm64-apple-macosx12.0.0 -fclangir -S -Xclang -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR + +void foo(void) {} + +// CIR: module {{.*}} { +// CIR-NEXT: cir.func @foo() +// CIR-NEXT: cir.return +// CIR-NEXT: } +// CIR-NEXT: } + +// LLVM: define void @foo() +// LLVM-NEXT: ret void +// LLVM-NEXT: } + +// OBJ: 0: c3 retq diff --git a/clang/test/CIR/lit.local.cfg b/clang/test/CIR/lit.local.cfg new file mode 100644 index 000000000000..6afd60f47bff --- /dev/null +++ b/clang/test/CIR/lit.local.cfg @@ -0,0 +1,2 @@ +if not config.root.clang_enable_cir: + config.unsupported = True diff --git a/clang/test/CIR/mlirargs.c b/clang/test/CIR/mlirargs.c new file mode 100644 index 000000000000..cfb07197ef18 --- /dev/null +++ b/clang/test/CIR/mlirargs.c @@ -0,0 +1,12 @@ +// Clang returns 1 when wrong arguments are given. +// RUN: not %clang_cc1 -mmlir -mlir-disable-threadingd -mmlir -mlir-print-op-genericd 2>&1 | FileCheck %s --check-prefix=WRONG +// Test that the driver can pass mlir args to cc1. +// RUN: %clang -### -mmlir -mlir-disable-threading %s 2>&1 | FileCheck %s --check-prefix=CC1 + + +// WRONG: clang (MLIR option parsing): Unknown command line argument '-mlir-disable-threadingd'. Try: 'clang (MLIR option parsing) --help' +// WRONG: clang (MLIR option parsing): Did you mean '--mlir-disable-threading'? +// WRONG: clang (MLIR option parsing): Unknown command line argument '-mlir-print-op-genericd'. Try: 'clang (MLIR option parsing) --help' +// WRONG: clang (MLIR option parsing): Did you mean '--mlir-print-op-generic'? + +// CC1: "-mmlir" "-mlir-disable-threading" diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c new file mode 100644 index 000000000000..2f6fe5651f60 --- /dev/null +++ b/clang/test/CIR/mlirprint.c @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRFLAT +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -mmlir --mlir-print-ir-after-all -mllvm -print-after-all %s -o %t.ll 2>&1 | FileCheck %s -check-prefix=CIR -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-drop-ast %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRPASS +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -mmlir --mlir-print-ir-before=cir-flatten-cfg %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CFGPASS + +int foo(void) { + int i = 3; + return i; +} + + +// CIR: IR Dump After MergeCleanups (cir-merge-cleanups) +// CIR: cir.func @foo() -> !s32i +// CIR: IR Dump After LoweringPrepare (cir-lowering-prepare) +// CIR: cir.func @foo() -> !s32i +// CIR-NOT: IR Dump After FlattenCFG +// CIR: IR Dump After DropAST (cir-drop-ast) +// CIR: cir.func @foo() -> !s32i +// CIRFLAT: IR Dump After MergeCleanups (cir-merge-cleanups) +// CIRFLAT: cir.func @foo() -> !s32i +// CIRFLAT: IR Dump After LoweringPrepare (cir-lowering-prepare) +// CIRFLAT: cir.func @foo() -> !s32i +// CIRFLAT: IR Dump After FlattenCFG (cir-flatten-cfg) +// CIRFLAT: IR Dump After DropAST (cir-drop-ast) +// CIRFLAT: cir.func @foo() -> !s32i +// LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-flat-to-llvm) +// LLVM: llvm.func @foo() -> i32 +// LLVM: IR Dump After +// LLVM: define i32 @foo() + +// CIRPASS-NOT: IR Dump After MergeCleanups +// CIRPASS: IR Dump After DropAST + +// CFGPASS: IR Dump Before FlattenCFG (cir-flatten-cfg) diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt index df34a5707da3..0b1c38a110c3 100644 --- a/clang/test/CMakeLists.txt +++ b/clang/test/CMakeLists.txt @@ -80,7 +80,15 @@ list(APPEND CLANG_TEST_DEPS diagtool hmaptool ) - + +if(CLANG_ENABLE_CIR) + list(APPEND CLANG_TEST_DEPS + cir-opt + cir-translate + mlir-translate + ) +endif() + if(CLANG_ENABLE_STATIC_ANALYZER) list(APPEND CLANG_TEST_DEPS clang-check diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py index e5630a07424c..bcbb0f13dd69 100644 --- a/clang/test/lit.cfg.py +++ b/clang/test/lit.cfg.py @@ -29,6 +29,7 @@ ".c", ".cpp", ".i", + ".cir", ".cppm", ".m", ".mm", @@ -84,6 +85,8 @@ tools = [ "apinotes-test", "c-index-test", + "cir-opt", + "cir-translate", "clang-diff", "clang-format", "clang-repl", diff --git a/clang/tools/CMakeLists.txt b/clang/tools/CMakeLists.txt index bdd8004be3e0..602ed9afd64b 100644 --- a/clang/tools/CMakeLists.txt +++ b/clang/tools/CMakeLists.txt @@ -3,6 +3,11 @@ create_subdirectory_options(CLANG TOOL) add_clang_subdirectory(diagtool) add_clang_subdirectory(driver) add_clang_subdirectory(apinotes-test) +if(CLANG_ENABLE_CIR) + add_clang_subdirectory(cir-opt) + add_clang_subdirectory(cir-translate) + add_clang_subdirectory(cir-lsp-server) +endif() add_clang_subdirectory(clang-diff) add_clang_subdirectory(clang-format) add_clang_subdirectory(clang-format-vs) diff --git a/clang/tools/cir-lsp-server/CMakeLists.txt b/clang/tools/cir-lsp-server/CMakeLists.txt new file mode 100644 index 000000000000..5154a08e7d47 --- /dev/null +++ b/clang/tools/cir-lsp-server/CMakeLists.txt @@ -0,0 +1,35 @@ +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) + +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) + +set(LIBS + ${dialect_libs} + ${conversion_libs} + ${test_libs} + clangCIR + clangCIRLoweringThroughMLIR + clangCIRLoweringDirectToLLVM + MLIRCIR + MLIRAffineAnalysis + MLIRAnalysis + MLIRDialect + MLIRLspServerLib + MLIRParser + MLIRPass + MLIRTransforms + MLIRTransformUtils + MLIRSupport + MLIRIR + ) + +add_mlir_tool(cir-lsp-server + cir-lsp-server.cpp + + DEPENDS + ${LIBS} +) + +target_link_libraries(cir-lsp-server PRIVATE ${LIBS}) +llvm_update_compile_flags(cir-lsp-server) diff --git a/clang/tools/cir-lsp-server/cir-lsp-server.cpp b/clang/tools/cir-lsp-server/cir-lsp-server.cpp new file mode 100644 index 000000000000..bd823c13a42e --- /dev/null +++ b/clang/tools/cir-lsp-server/cir-lsp-server.cpp @@ -0,0 +1,20 @@ +//===- cir-lsp-server.cpp - CIR Language Server ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/IR/Dialect.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/InitAllDialects.h" +#include "mlir/Tools/mlir-lsp-server/MlirLspServerMain.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +int main(int argc, char **argv) { + mlir::DialectRegistry registry; + mlir::registerAllDialects(registry); + registry.insert(); + return failed(mlir::MlirLspServerMain(argc, argv, registry)); +} diff --git a/clang/tools/cir-opt/CMakeLists.txt b/clang/tools/cir-opt/CMakeLists.txt new file mode 100644 index 000000000000..741cdfa5950d --- /dev/null +++ b/clang/tools/cir-opt/CMakeLists.txt @@ -0,0 +1,35 @@ +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) + +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) + +set(LIBS + ${dialect_libs} + ${conversion_libs} + clangCIR + clangCIRLoweringThroughMLIR + clangCIRLoweringDirectToLLVM + MLIRAnalysis + MLIRCIR + MLIRCIRTransforms + MLIRDialect + MLIRIR + MLIRMemRefDialect + MLIROptLib + MLIRParser + MLIRPass + MLIRSideEffectInterfaces + MLIRTransforms + MLIRTransformUtils +) + +add_clang_tool(cir-opt + cir-opt.cpp + + DEPENDS + ${LIBS} +) + +target_link_libraries(cir-opt PRIVATE ${LIBS}) +llvm_update_compile_flags(cir-opt) diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp new file mode 100644 index 000000000000..064aa7241c8e --- /dev/null +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -0,0 +1,66 @@ +//===- cir-opt.cpp - CIR optimization and analysis driver -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Similar to MLIR/LLVM's "opt" tools but also deals with analysis and custom +// arguments. TODO: this is basically a copy from MlirOptMain.cpp, but capable +// of module emission as specified by the user. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h" +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/Dialect/DLTI/DLTI.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" +#include "mlir/InitAllPasses.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Pass/PassRegistry.h" +#include "mlir/Tools/mlir-opt/MlirOptMain.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Passes.h" + +int main(int argc, char **argv) { + // TODO: register needed MLIR passes for CIR? + mlir::DialectRegistry registry; + registry.insert(); + + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return cir::createConvertMLIRToLLVMPass(); + }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createMergeCleanupsPass(); + }); + + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return cir::createConvertCIRToMLIRPass(); + }); + + mlir::PassPipelineRegistration pipeline( + "cir-to-llvm", "", [](mlir::OpPassManager &pm) { + cir::direct::populateCIRToLLVMPasses(pm); + }); + + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createFlattenCFGPass(); + }); + + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createReconcileUnrealizedCastsPass(); + }); + + mlir::registerTransformsPasses(); + + return failed(MlirOptMain( + argc, argv, "Clang IR analysis and optimization tool\n", registry)); +} diff --git a/clang/tools/cir-translate/CMakeLists.txt b/clang/tools/cir-translate/CMakeLists.txt new file mode 100644 index 000000000000..a5e22b02e505 --- /dev/null +++ b/clang/tools/cir-translate/CMakeLists.txt @@ -0,0 +1,36 @@ +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) +get_property(translation_libs GLOBAL PROPERTY MLIR_TRANSLATION_LIBS) + +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) + +set(LIBS + ${dialect_libs} + ${conversion_libs} + ${translation_libs} + clangCIR + clangCIRLoweringDirectToLLVM + MLIRAnalysis + MLIRCIR + MLIRCIRTransforms + MLIRDialect + MLIRIR + MLIROptLib + MLIRParser + MLIRPass + MLIRTransforms + MLIRTransformUtils + MLIRTranslateLib + MLIRSupport +) + +add_clang_tool(cir-translate + cir-translate.cpp + + DEPENDS + ${LIBS} +) + +target_link_libraries(cir-translate PRIVATE ${LIBS}) +llvm_update_compile_flags(cir-translate) diff --git a/clang/tools/cir-translate/cir-translate.cpp b/clang/tools/cir-translate/cir-translate.cpp new file mode 100644 index 000000000000..9ff379a26588 --- /dev/null +++ b/clang/tools/cir-translate/cir-translate.cpp @@ -0,0 +1,57 @@ +//===- cir-translate.cpp - CIR Translate Driver ------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Converts CIR directly to LLVM IR, similar to mlir-translate or LLVM llc. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/DLTI/DLTI.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/InitAllTranslations.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Target/LLVMIR/Dialect/All.h" +#include "mlir/Tools/mlir-translate/MlirTranslateMain.h" +#include "mlir/Tools/mlir-translate/Translation.h" +#include "llvm/IR/Module.h" + +namespace cir { +namespace direct { +extern void registerCIRDialectTranslation(mlir::DialectRegistry ®istry); +extern std::unique_ptr +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, + llvm::LLVMContext &llvmCtx, + bool disableVerifier = false); +} // namespace direct +} + +void registerToLLVMTranslation() { + mlir::TranslateFromMLIRRegistration registration( + "cir-to-llvmir", "Translate CIR to LLVMIR", + [](mlir::Operation *op, mlir::raw_ostream &output) { + llvm::LLVMContext llvmContext; + auto llvmModule = cir::direct::lowerDirectlyFromCIRToLLVMIR( + llvm::dyn_cast(op), llvmContext); + if (!llvmModule) + return mlir::failure(); + llvmModule->print(output, nullptr); + return mlir::success(); + }, + [](mlir::DialectRegistry ®istry) { + registry.insert(); + mlir::registerAllToLLVMIRTranslations(registry); + cir::direct::registerCIRDialectTranslation(registry); + }); +} + +int main(int argc, char **argv) { + registerToLLVMTranslation(); + return failed( + mlir::mlirTranslateMain(argc, argv, "CIR Translation Tool")); +} diff --git a/mlir/docs/Passes.md b/mlir/docs/Passes.md index 6a18e06593e8..242b11a824c1 100644 --- a/mlir/docs/Passes.md +++ b/mlir/docs/Passes.md @@ -123,3 +123,7 @@ This document describes the available MLIR passes and their contracts. ## XeGPU Dialect Passes [include "XeGPUPasses.md"] + +## CIR Dialect Passes + +[include "CIRPasses.md"] diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h index 5333d7446df5..fa435cb3155e 100644 --- a/mlir/include/mlir/IR/OpImplementation.h +++ b/mlir/include/mlir/IR/OpImplementation.h @@ -700,6 +700,10 @@ class AsmParser { /// Parse a floating point value from the stream. virtual ParseResult parseFloat(double &result) = 0; + /// Parse a floating point value into APFloat from the stream. + virtual ParseResult parseFloat(const llvm::fltSemantics &semantics, + APFloat &result) = 0; + /// Parse an integer value from the stream. template ParseResult parseInteger(IntT &result) { diff --git a/mlir/lib/AsmParser/AsmParserImpl.h b/mlir/lib/AsmParser/AsmParserImpl.h index 30c0079cda08..8b88a3a6650a 100644 --- a/mlir/lib/AsmParser/AsmParserImpl.h +++ b/mlir/lib/AsmParser/AsmParserImpl.h @@ -269,8 +269,11 @@ class AsmParserImpl : public BaseT { return success(); } - /// Parse a floating point value from the stream. - ParseResult parseFloat(double &result) override { + /// Parse a floating point value with given semantics from the stream. Since + /// this implementation parses the string as double precision and just than + /// converts the value to the requested semantic, precision may be lost. + ParseResult parseFloat(const llvm::fltSemantics &semantics, + APFloat &result) override { bool isNegative = parser.consumeIf(Token::minus); Token curTok = parser.getToken(); SMLoc loc = curTok.getLoc(); @@ -281,7 +284,9 @@ class AsmParserImpl : public BaseT { if (!val) return emitError(loc, "floating point value too large"); parser.consumeToken(Token::floatliteral); - result = isNegative ? -*val : *val; + result = APFloat(isNegative ? -*val : *val); + bool losesInfo; + result.convert(semantics, APFloat::rmNearestTiesToEven, &losesInfo); return success(); } @@ -289,18 +294,28 @@ class AsmParserImpl : public BaseT { if (curTok.is(Token::integer)) { std::optional apResult; if (failed(parser.parseFloatFromIntegerLiteral( - apResult, curTok, isNegative, APFloat::IEEEdouble(), - /*typeSizeInBits=*/64))) + apResult, curTok, isNegative, semantics, + APFloat::semanticsSizeInBits(semantics)))) return failure(); + result = *apResult; parser.consumeToken(Token::integer); - result = apResult->convertToDouble(); return success(); } return emitError(loc, "expected floating point literal"); } + /// Parse a floating point value from the stream. + ParseResult parseFloat(double &result) override { + llvm::APFloat apResult(0.0); + if (parseFloat(APFloat::IEEEdouble(), apResult)) + return failure(); + + result = apResult.convertToDouble(); + return success(); + } + /// Parse an optional integer value from the stream. OptionalParseResult parseOptionalInteger(APInt &result) override { return parser.parseOptionalInteger(result); diff --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp index 00f2b0c0c2f1..1b8b4bac1821 100644 --- a/mlir/lib/AsmParser/Parser.cpp +++ b/mlir/lib/AsmParser/Parser.cpp @@ -326,19 +326,15 @@ ParseResult Parser::parseFloatFromIntegerLiteral( "leading minus"); } - std::optional value = tok.getUInt64IntegerValue(); - if (!value) + APInt intValue; + tok.getSpelling().getAsInteger(isHex ? 0 : 10, intValue); + if (intValue.getActiveBits() > typeSizeInBits) return emitError(loc, "hexadecimal float constant out of range for type"); - if (&semantics == &APFloat::IEEEdouble()) { - result = APFloat(semantics, APInt(typeSizeInBits, *value)); - return success(); - } + APInt truncatedValue(typeSizeInBits, intValue.getNumWords(), + intValue.getRawData()); - APInt apInt(typeSizeInBits, *value); - if (apInt != *value) - return emitError(loc, "hexadecimal float constant out of range for type"); - result = APFloat(semantics, apInt); + result.emplace(semantics, truncatedValue); return success(); } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index 4e06b9c127e7..5313f6bafff6 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -976,9 +976,9 @@ void CallOp::build(OpBuilder &builder, OperationState &state, TypeRange results, void CallOp::build(OpBuilder &builder, OperationState &state, TypeRange results, FlatSymbolRefAttr callee, ValueRange args) { - assert(callee && "expected non-null callee in direct call builder"); + auto fargs = callee ? args : args.drop_front(); build(builder, state, results, - TypeAttr::get(getLLVMFuncType(builder.getContext(), results, args)), + TypeAttr::get(getLLVMFuncType(builder.getContext(), results, fargs)), callee, args, /*fastmathFlags=*/nullptr, /*branch_weights=*/nullptr, /*CConv=*/nullptr, /*access_groups=*/nullptr, /*alias_scopes=*/nullptr, diff --git a/mlir/test/IR/custom-float-attr-roundtrip.mlir b/mlir/test/IR/custom-float-attr-roundtrip.mlir new file mode 100644 index 000000000000..e0913e58d295 --- /dev/null +++ b/mlir/test/IR/custom-float-attr-roundtrip.mlir @@ -0,0 +1,57 @@ +// RUN: mlir-opt %s -split-input-file -verify-diagnostics| FileCheck %s + +// CHECK-LABEL: @test_enum_attr_roundtrip +func.func @test_enum_attr_roundtrip() -> () { + // CHECK: attr = #test.custom_float<"float" : 2.000000e+00> + "test.op"() {attr = #test.custom_float<"float" : 2.>} : () -> () + // CHECK: attr = #test.custom_float<"double" : 2.000000e+00> + "test.op"() {attr =#test.custom_float<"double" : 2.>} : () -> () + // CHECK: attr = #test.custom_float<"fp80" : 2.000000e+00> + "test.op"() {attr =#test.custom_float<"fp80" : 2.>} : () -> () + // CHECK: attr = #test.custom_float<"float" : 0x7FC00000> + "test.op"() {attr =#test.custom_float<"float" : 0x7FC00000>} : () -> () + // CHECK: attr = #test.custom_float<"double" : 0x7FF0000001000000> + "test.op"() {attr =#test.custom_float<"double" : 0x7FF0000001000000>} : () -> () + // CHECK: attr = #test.custom_float<"fp80" : 0x7FFFC000000000100000> + "test.op"() {attr =#test.custom_float<"fp80" : 0x7FFFC000000000100000>} : () -> () + return +} + +// ----- + +// Verify literal must be hex or float + +// expected-error @below {{unexpected decimal integer literal for a floating point value}} +// expected-note @below {{add a trailing dot to make the literal a float}} +"test.op"() {attr =#test.custom_float<"float" : 42>} : () -> () + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +"test.op"() {attr =#test.custom_float<"float" : 0x7FC000000>} : () -> () + + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +"test.op"() {attr =#test.custom_float<"double" : 0x7FC000007FC0000000>} : () -> () + + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +"test.op"() {attr =#test.custom_float<"fp80" : 0x7FC0000007FC0000007FC000000>} : () -> () + +// ----- + +// Value must be a floating point literal or integer literal + +// expected-error @below {{expected floating point literal}} +"test.op"() {attr =#test.custom_float<"float" : "blabla">} : () -> () + diff --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir index bebbb876391d..020942e7f4c1 100644 --- a/mlir/test/IR/parser.mlir +++ b/mlir/test/IR/parser.mlir @@ -1105,6 +1105,30 @@ func.func @bfloat16_special_values() { return } +// CHECK-LABEL: @f80_special_values +func.func @f80_special_values() { + // F80 signaling NaNs. + // CHECK: arith.constant 0x7FFFE000000000000001 : f80 + %0 = arith.constant 0x7FFFE000000000000001 : f80 + // CHECK: arith.constant 0x7FFFB000000000000011 : f80 + %1 = arith.constant 0x7FFFB000000000000011 : f80 + + // F80 quiet NaNs. + // CHECK: arith.constant 0x7FFFC000000000100000 : f80 + %2 = arith.constant 0x7FFFC000000000100000 : f80 + // CHECK: arith.constant 0x7FFFE000000001000000 : f80 + %3 = arith.constant 0x7FFFE000000001000000 : f80 + + // F80 positive infinity. + // CHECK: arith.constant 0x7FFF8000000000000000 : f80 + %4 = arith.constant 0x7FFF8000000000000000 : f80 + // F80 negative infinity. + // CHECK: arith.constant 0xFFFF8000000000000000 : f80 + %5 = arith.constant 0xFFFF8000000000000000 : f80 + + return +} + // We want to print floats in exponential notation with 6 significant digits, // but it may lead to precision loss when parsing back, in which case we print // the decimal form instead. diff --git a/mlir/test/lib/Dialect/Test/TestAttrDefs.td b/mlir/test/lib/Dialect/Test/TestAttrDefs.td index 40f035a3e3a4..12635e107bd4 100644 --- a/mlir/test/lib/Dialect/Test/TestAttrDefs.td +++ b/mlir/test/lib/Dialect/Test/TestAttrDefs.td @@ -340,4 +340,15 @@ def TestConditionalAliasAttr : Test_Attr<"TestConditionalAlias"> { }]; } +// Test AsmParser::parseFloat(const fltSemnatics&, APFloat&) API through the +// custom parser and printer. +def TestCustomFloatAttr : Test_Attr<"TestCustomFloat"> { + let mnemonic = "custom_float"; + let parameters = (ins "mlir::StringAttr":$type_str, APFloatParameter<"">:$value); + + let assemblyFormat = [{ + `<` custom($type_str, $value) `>` + }]; +} + #endif // TEST_ATTRDEFS diff --git a/mlir/test/lib/Dialect/Test/TestAttributes.cpp b/mlir/test/lib/Dialect/Test/TestAttributes.cpp index 2cc051e664be..d7e40d35238d 100644 --- a/mlir/test/lib/Dialect/Test/TestAttributes.cpp +++ b/mlir/test/lib/Dialect/Test/TestAttributes.cpp @@ -18,6 +18,7 @@ #include "mlir/IR/ExtensibleDialect.h" #include "mlir/IR/Types.h" #include "mlir/Support/LogicalResult.h" +#include "llvm/ADT/APFloat.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/TypeSwitch.h" @@ -240,6 +241,46 @@ static void printConditionalAlias(AsmPrinter &p, StringAttr value) { p.printKeywordOrString(value); } +//===----------------------------------------------------------------------===// +// Custom Float Attribute +//===----------------------------------------------------------------------===// + +static void printCustomFloatAttr(AsmPrinter &p, StringAttr typeStrAttr, + APFloat value) { + p << typeStrAttr << " : " << value; +} + +static ParseResult parseCustomFloatAttr(AsmParser &p, StringAttr &typeStrAttr, + FailureOr &value) { + + std::string str; + if (p.parseString(&str)) + return failure(); + + typeStrAttr = StringAttr::get(p.getContext(), str); + + if (p.parseColon()) + return failure(); + + const llvm::fltSemantics *semantics; + if (str == "float") + semantics = &llvm::APFloat::IEEEsingle(); + else if (str == "double") + semantics = &llvm::APFloat::IEEEdouble(); + else if (str == "fp80") + semantics = &llvm::APFloat::x87DoubleExtended(); + else + return p.emitError(p.getCurrentLocation(), "unknown float type, expected " + "'float', 'double' or 'fp80'"); + + APFloat parsedValue(0.0); + if (p.parseFloat(*semantics, parsedValue)) + return failure(); + + value.emplace(parsedValue); + return success(); +} + //===----------------------------------------------------------------------===// // Tablegen Generated Definitions //===----------------------------------------------------------------------===// diff --git a/runtimes/CMakeLists.txt b/runtimes/CMakeLists.txt index fcc59c8fa1c3..449170e68de1 100644 --- a/runtimes/CMakeLists.txt +++ b/runtimes/CMakeLists.txt @@ -48,6 +48,8 @@ function(runtime_register_component name) endfunction() find_package(LLVM PATHS "${LLVM_BINARY_DIR}" NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) +# TODO(CIR): Once we guard CIR including clang builds guard this with the same flag +find_package(MLIR PATHS "${LLVM_BINARY_DIR}" NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) find_package(Clang PATHS "${LLVM_BINARY_DIR}" NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) set(LLVM_THIRD_PARTY_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../third-party")