1,873 changes: 77 additions & 1,796 deletions clang/include/clang/Sema/Sema.h

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion clang/include/clang/Sema/SemaBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class SemaBase {
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// But see DiagIfDeviceCode() and DiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }

Expand Down
304 changes: 304 additions & 0 deletions clang/include/clang/Sema/SemaCUDA.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,304 @@
//===----- SemaCUDA.h ----- Semantic Analysis for CUDA constructs ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file declares semantic analysis for CUDA constructs.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CLANG_SEMA_SEMACUDA_H
#define LLVM_CLANG_SEMA_SEMACUDA_H

#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Redeclarable.h"
#include "clang/Basic/Cuda.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaBase.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include <string>

namespace clang {

enum class CUDAFunctionTarget;

class SemaCUDA : public SemaBase {
public:
SemaCUDA(Sema &S);

/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceHostDevice();

/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceHostDevice();

ExprResult ActOnExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);

/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<const FunctionDecl> FD;
SourceLocation Loc;
};

/// FunctionDecls and SourceLocations for which CheckCall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;

/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<const FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;

/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (DiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentTarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder DiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);

/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as DiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder DiagIfHostCode(SourceLocation Loc, unsigned DiagID);

/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CUDAFunctionTarget::Host if D is null.
CUDAFunctionTarget IdentifyTarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyTarget(const ParsedAttributesView &Attrs);

enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyTarget(const VarDecl *D);

/// Defines kinds of CUDA global host/device context where a function may be
/// called.
enum CUDATargetContextKind {
CTCK_Unknown, /// Unknown context
CTCK_InitGlobalVar, /// Function called during global variable
/// initialization
};

/// Define the current global CUDA host/device context where a function may be
/// called. Only used when a function is called outside of any functions.
struct CUDATargetContext {
CUDAFunctionTarget Target = CUDAFunctionTarget::HostDevice;
CUDATargetContextKind Kind = CTCK_Unknown;
Decl *D = nullptr;
} CurCUDATargetCtx;

struct CUDATargetContextRAII {
SemaCUDA &S;
SemaCUDA::CUDATargetContext SavedCtx;
CUDATargetContextRAII(SemaCUDA &S_, SemaCUDA::CUDATargetContextKind K,
Decl *D);
~CUDATargetContextRAII() { S.CurCUDATargetCtx = SavedCtx; }
};

/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentTarget() {
return IdentifyTarget(dyn_cast<FunctionDecl>(SemaRef.CurContext));
}

static bool isImplicitHostDeviceFunction(const FunctionDecl *D);

// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};

/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);

/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCall(const FunctionDecl *Caller, const FunctionDecl *Callee) {
return IdentifyPreference(Caller, Callee) != CFP_Never;
}

/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous);

/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddConstantAttr(VarDecl *VD);

/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCall(SourceLocation Loc, FunctionDecl *Callee);

void CheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);

/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void SetLambdaAttrs(CXXMethodDecl *Method);

/// Record \p FD if it is a CUDA/HIP implicit host device function used on
/// device side in device compilation.
void RecordImplicitHostDeviceFuncUsedByDevice(const FunctionDecl *FD);

/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedMatches(
const FunctionDecl *Caller,
llvm::SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>>
&Matches);

/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMemberKind CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS, bool Diagnose);

/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyDestructor(SourceLocation Loc, CXXDestructorDecl *CD);

// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedInitializer(VarDecl *VD);

/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkTargetOverload(FunctionDecl *NewFD, const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritTargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);

/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getConfigureFuncName() const;

private:
unsigned ForceHostDeviceDepth = 0;

friend class ASTReader;
friend class ASTWriter;
};

} // namespace clang

namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::SemaCUDA::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::SemaCUDA::FunctionDeclAndLoc;
using FDBaseInfo =
DenseMapInfo<clang::CanonicalDeclPtr<const clang::FunctionDecl>>;

static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}

static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}

static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}

static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm

#endif // LLVM_CLANG_SEMA_SEMACUDA_H
27 changes: 23 additions & 4 deletions clang/include/clang/Sema/SemaHLSL.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,42 @@
#ifndef LLVM_CLANG_SEMA_SEMAHLSL_H
#define LLVM_CLANG_SEMA_SEMAHLSL_H

#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaBase.h"
#include <initializer_list>

namespace clang {

class SemaHLSL : public SemaBase {
public:
SemaHLSL(Sema &S);

Decl *ActOnStartHLSLBuffer(Scope *BufferScope, bool CBuffer,
SourceLocation KwLoc, IdentifierInfo *Ident,
SourceLocation IdentLoc, SourceLocation LBrace);
void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace);
Decl *ActOnStartBuffer(Scope *BufferScope, bool CBuffer, SourceLocation KwLoc,
IdentifierInfo *Ident, SourceLocation IdentLoc,
SourceLocation LBrace);
void ActOnFinishBuffer(Decl *Dcl, SourceLocation RBrace);
HLSLNumThreadsAttr *mergeNumThreadsAttr(Decl *D,
const AttributeCommonInfo &AL, int X,
int Y, int Z);
HLSLShaderAttr *mergeShaderAttr(Decl *D, const AttributeCommonInfo &AL,
HLSLShaderAttr::ShaderType ShaderType);
HLSLParamModifierAttr *
mergeParamModifierAttr(Decl *D, const AttributeCommonInfo &AL,
HLSLParamModifierAttr::Spelling Spelling);
void ActOnTopLevelFunction(FunctionDecl *FD);
void CheckEntryPoint(FunctionDecl *FD);
void CheckSemanticAnnotation(FunctionDecl *EntryPoint, const Decl *Param,
const HLSLAnnotationAttr *AnnotationAttr);
void DiagnoseAttrStageMismatch(
const Attr *A, HLSLShaderAttr::ShaderType Stage,
std::initializer_list<HLSLShaderAttr::ShaderType> AllowedStages);
};

} // namespace clang
Expand Down
40 changes: 39 additions & 1 deletion clang/include/clang/Sema/SemaOpenACC.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,12 @@ class SemaOpenACC : public SemaBase {
OpenACCDefaultClauseKind DefaultClauseKind;
};

std::variant<DefaultDetails> Details;
struct ConditionDetails {
Expr *ConditionExpr;
};

std::variant<std::monostate, DefaultDetails, ConditionDetails> Details =
std::monostate{};

public:
OpenACCParsedClause(OpenACCDirectiveKind DirKind,
Expand All @@ -63,6 +68,25 @@ class SemaOpenACC : public SemaBase {
return std::get<DefaultDetails>(Details).DefaultClauseKind;
}

const Expr *getConditionExpr() const {
return const_cast<OpenACCParsedClause *>(this)->getConditionExpr();
}

Expr *getConditionExpr() {
assert((ClauseKind == OpenACCClauseKind::If ||
(ClauseKind == OpenACCClauseKind::Self &&
DirKind != OpenACCDirectiveKind::Update)) &&
"Parsed clause kind does not have a condition expr");

// 'self' has an optional ConditionExpr, so be tolerant of that. This will
// assert in variant otherwise.
if (ClauseKind == OpenACCClauseKind::Self &&
std::holds_alternative<std::monostate>(Details))
return nullptr;

return std::get<ConditionDetails>(Details).ConditionExpr;
}

void setLParenLoc(SourceLocation EndLoc) { LParenLoc = EndLoc; }
void setEndLoc(SourceLocation EndLoc) { ClauseRange.setEnd(EndLoc); }

Expand All @@ -71,6 +95,20 @@ class SemaOpenACC : public SemaBase {
"Parsed clause is not a default clause");
Details = DefaultDetails{DefKind};
}

void setConditionDetails(Expr *ConditionExpr) {
assert((ClauseKind == OpenACCClauseKind::If ||
(ClauseKind == OpenACCClauseKind::Self &&
DirKind != OpenACCDirectiveKind::Update)) &&
"Parsed clause kind does not have a condition expr");
// In C++ we can count on this being a 'bool', but in C this gets left as
// some sort of scalar that codegen will have to take care of converting.
assert((!ConditionExpr || ConditionExpr->isInstantiationDependent() ||
ConditionExpr->getType()->isScalarType()) &&
"Condition expression type not scalar/dependent");

Details = ConditionDetails{ConditionExpr};
}
};

SemaOpenACC(Sema &S);
Expand Down
1,447 changes: 1,447 additions & 0 deletions clang/include/clang/Sema/SemaOpenMP.h

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions clang/include/clang/Serialization/ASTBitCodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -698,6 +698,10 @@ enum ASTRecordTypes {
/// Record code for an unterminated \#pragma clang assume_nonnull begin
/// recorded in a preamble.
PP_ASSUME_NONNULL_LOC = 67,

/// Record code for lexical and visible block for delayed namespace in
/// reduced BMI.
DELAYED_NAMESPACE_LEXICAL_VISIBLE_RECORD = 68,
};

/// Record types used within a source manager block.
Expand Down
22 changes: 21 additions & 1 deletion clang/include/clang/Serialization/ASTReader.h
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,20 @@ class ASTReader
/// in the chain.
DeclUpdateOffsetsMap DeclUpdateOffsets;

using DelayedNamespaceOffsetMapTy = llvm::DenseMap<
serialization::DeclID,
std::pair</*LexicalOffset*/ uint64_t, /*VisibleOffset*/ uint64_t>>;

/// Mapping from global declaration IDs to the lexical and visible block
/// offset for delayed namespace in reduced BMI.
///
/// We can't use the existing DeclUpdate mechanism since the DeclUpdate
/// may only be applied in an outer most read. However, we need to know
/// whether or not a DeclContext has external storage during the recursive
/// reading. So we need to apply the offset immediately after we read the
/// namespace as if it is not delayed.
DelayedNamespaceOffsetMapTy DelayedNamespaceOffsetMap;

struct PendingUpdateRecord {
Decl *D;
serialization::GlobalDeclID ID;
Expand Down Expand Up @@ -859,7 +873,7 @@ class ASTReader

/// Our current depth in #pragma cuda force_host_device begin/end
/// macros.
unsigned ForceCUDAHostDeviceDepth = 0;
unsigned ForceHostDeviceDepth = 0;

/// The IDs of the declarations Sema stores directly.
///
Expand Down Expand Up @@ -2443,6 +2457,12 @@ class BitsUnpacker {
uint32_t Value;
uint32_t CurrentBitsIndex = ~0;
};

inline bool shouldSkipCheckingODR(const Decl *D) {
return D->getASTContext().getLangOpts().SkipODRCheckInGMF &&
D->isFromExplicitGlobalModule();
}

} // namespace clang

#endif // LLVM_CLANG_SERIALIZATION_ASTREADER_H
26 changes: 25 additions & 1 deletion clang/include/clang/Serialization/ASTWriter.h
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,16 @@ class ASTWriter : public ASTDeserializationListener,
/// The declarations and types to emit.
std::queue<DeclOrType> DeclTypesToEmit;

/// The delayed namespace to emit. Only meaningful for reduced BMI.
///
/// In reduced BMI, we want to elide the unreachable declarations in
/// the global module fragment. However, in ASTWriterDecl, when we see
/// a namespace, all the declarations in the namespace would be emitted.
/// So the optimization become meaningless. To solve the issue, we
/// delay recording all the declarations until we emit all the declarations.
/// Then we can safely record the reached declarations only.
llvm::SmallVector<NamespaceDecl *, 16> DelayedNamespace;

/// The first ID number we can use for our own declarations.
serialization::DeclID FirstDeclID = serialization::NUM_PREDEF_DECL_IDS;

Expand Down Expand Up @@ -529,7 +539,8 @@ class ASTWriter : public ASTDeserializationListener,
void WriteType(QualType T);

bool isLookupResultExternal(StoredDeclsList &Result, DeclContext *DC);
bool isLookupResultEntirelyExternal(StoredDeclsList &Result, DeclContext *DC);
bool isLookupResultEntirelyExternalOrUnreachable(StoredDeclsList &Result,
DeclContext *DC);

void GenerateNameLookupTable(const DeclContext *DC,
llvm::SmallVectorImpl<char> &LookupTable);
Expand Down Expand Up @@ -704,6 +715,15 @@ class ASTWriter : public ASTDeserializationListener,
/// declaration.
serialization::DeclID getDeclID(const Decl *D);

/// Whether or not the declaration got emitted. If not, it wouldn't be
/// emitted.
///
/// This may only be called after we've done the job to write the
/// declarations (marked by DoneWritingDeclsAndTypes).
///
/// A declaration may only be omitted in reduced BMI.
bool wasDeclEmitted(const Decl *D) const;

unsigned getAnonymousDeclarationNumber(const NamedDecl *D);

/// Add a string to the given record.
Expand Down Expand Up @@ -798,6 +818,10 @@ class ASTWriter : public ASTDeserializationListener,
return WritingModule && WritingModule->isNamedModule();
}

bool isGeneratingReducedBMI() const { return GeneratingReducedBMI; }

bool getDoneWritingDeclsAndTypes() const { return DoneWritingDeclsAndTypes; }

private:
// ASTDeserializationListener implementation
void ReaderInitialized(ASTReader *Reader) override;
Expand Down
1 change: 0 additions & 1 deletion clang/include/clang/Serialization/ModuleFileExtension.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#ifndef LLVM_CLANG_SERIALIZATION_MODULEFILEEXTENSION_H
#define LLVM_CLANG_SERIALIZATION_MODULEFILEEXTENSION_H

#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "llvm/Support/HashBuilder.h"
#include "llvm/Support/MD5.h"
Expand Down
2 changes: 1 addition & 1 deletion clang/include/clang/Serialization/PCHContainerOperations.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#include "clang/Basic/Module.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <memory>

namespace llvm {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,8 @@ class CachedFileSystemEntry {
CachedFileContents *Contents;
};

using CachedRealPath = llvm::ErrorOr<std::string>;

/// This class is a shared cache, that caches the 'stat' and 'open' calls to the
/// underlying real file system, and the scanned preprocessor directives of
/// files.
Expand All @@ -154,9 +156,11 @@ class DependencyScanningFilesystemSharedCache {
/// The mutex that needs to be locked before mutation of any member.
mutable std::mutex CacheLock;

/// Map from filenames to cached entries.
llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator>
EntriesByFilename;
/// Map from filenames to cached entries and real paths.
llvm::StringMap<
std::pair<const CachedFileSystemEntry *, const CachedRealPath *>,
llvm::BumpPtrAllocator>
CacheByFilename;

/// Map from unique IDs to cached entries.
llvm::DenseMap<llvm::sys::fs::UniqueID, const CachedFileSystemEntry *>
Expand All @@ -168,6 +172,9 @@ class DependencyScanningFilesystemSharedCache {
/// The backing storage for cached contents.
llvm::SpecificBumpPtrAllocator<CachedFileContents> ContentsStorage;

/// The backing storage for cached real paths.
llvm::SpecificBumpPtrAllocator<CachedRealPath> RealPathStorage;

/// Returns entry associated with the filename or nullptr if none is found.
const CachedFileSystemEntry *findEntryByFilename(StringRef Filename) const;

Expand All @@ -194,6 +201,17 @@ class DependencyScanningFilesystemSharedCache {
const CachedFileSystemEntry &
getOrInsertEntryForFilename(StringRef Filename,
const CachedFileSystemEntry &Entry);

/// Returns the real path associated with the filename or nullptr if none is
/// found.
const CachedRealPath *findRealPathByFilename(StringRef Filename) const;

/// Returns the real path associated with the filename if there is some.
/// Otherwise, constructs new one with the given one, associates it with the
/// filename and returns the result.
const CachedRealPath &
getOrEmplaceRealPathForFilename(StringRef Filename,
llvm::ErrorOr<StringRef> RealPath);
};

DependencyScanningFilesystemSharedCache();
Expand All @@ -210,14 +228,17 @@ class DependencyScanningFilesystemSharedCache {
/// This class is a local cache, that caches the 'stat' and 'open' calls to the
/// underlying real file system.
class DependencyScanningFilesystemLocalCache {
llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator> Cache;
llvm::StringMap<
std::pair<const CachedFileSystemEntry *, const CachedRealPath *>,
llvm::BumpPtrAllocator>
Cache;

public:
/// Returns entry associated with the filename or nullptr if none is found.
const CachedFileSystemEntry *findEntryByFilename(StringRef Filename) const {
assert(llvm::sys::path::is_absolute_gnu(Filename));
auto It = Cache.find(Filename);
return It == Cache.end() ? nullptr : It->getValue();
return It == Cache.end() ? nullptr : It->getValue().first;
}

/// Associates the given entry with the filename and returns the given entry
Expand All @@ -226,9 +247,40 @@ class DependencyScanningFilesystemLocalCache {
insertEntryForFilename(StringRef Filename,
const CachedFileSystemEntry &Entry) {
assert(llvm::sys::path::is_absolute_gnu(Filename));
const auto *InsertedEntry = Cache.insert({Filename, &Entry}).first->second;
assert(InsertedEntry == &Entry && "entry already present");
return *InsertedEntry;
auto [It, Inserted] = Cache.insert({Filename, {&Entry, nullptr}});
auto &[CachedEntry, CachedRealPath] = It->getValue();
if (!Inserted) {
// The file is already present in the local cache. If we got here, it only
// contains the real path. Let's make sure the entry is populated too.
assert((!CachedEntry && CachedRealPath) && "entry already present");
CachedEntry = &Entry;
}
return *CachedEntry;
}

/// Returns real path associated with the filename or nullptr if none is
/// found.
const CachedRealPath *findRealPathByFilename(StringRef Filename) const {
assert(llvm::sys::path::is_absolute_gnu(Filename));
auto It = Cache.find(Filename);
return It == Cache.end() ? nullptr : It->getValue().second;
}

/// Associates the given real path with the filename and returns the given
/// entry pointer (for convenience).
const CachedRealPath &
insertRealPathForFilename(StringRef Filename,
const CachedRealPath &RealPath) {
assert(llvm::sys::path::is_absolute_gnu(Filename));
auto [It, Inserted] = Cache.insert({Filename, {nullptr, &RealPath}});
auto &[CachedEntry, CachedRealPath] = It->getValue();
if (!Inserted) {
// The file is already present in the local cache. If we got here, it only
// contains the entry. Let's make sure the real path is populated too.
assert((!CachedRealPath && CachedEntry) && "real path already present");
CachedRealPath = &RealPath;
}
return *CachedRealPath;
}
};

Expand Down Expand Up @@ -296,6 +348,9 @@ class DependencyScanningWorkerFilesystem
llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
openFileForRead(const Twine &Path) override;

std::error_code getRealPath(const Twine &Path,
SmallVectorImpl<char> &Output) override;

std::error_code setCurrentWorkingDirectory(const Twine &Path) override;

/// Returns entry for the given filename.
Expand All @@ -310,6 +365,10 @@ class DependencyScanningWorkerFilesystem
/// false if not (i.e. this entry is not a file or its scan fails).
bool ensureDirectiveTokensArePopulated(EntryRef Entry);

/// Check whether \p Path exists. By default checks cached result of \c
/// status(), and falls back on FS if unable to do so.
bool exists(const Twine &Path) override;

private:
/// For a filename that's not yet associated with any entry in the caches,
/// uses the underlying filesystem to either look up the entry based in the
Expand Down Expand Up @@ -402,6 +461,10 @@ class DependencyScanningWorkerFilesystem
llvm::ErrorOr<std::string> WorkingDirForCacheLookup;

void updateWorkingDirForCacheLookup();

llvm::ErrorOr<StringRef>
tryGetFilenameForLookup(StringRef OriginalFilename,
llvm::SmallVectorImpl<char> &PathBuf) const;
};

} // end namespace dependencies
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,11 @@ class ModuleDepCollector final : public DependencyCollector {
ModuleDeps &Deps);
};

/// Resets codegen options that don't affect modules/PCH.
void resetBenignCodeGenOptions(frontend::ActionKind ProgramAction,
const LangOptions &LangOpts,
CodeGenOptions &CGOpts);

} // end namespace dependencies
} // end namespace tooling
} // end namespace clang
Expand Down
137 changes: 55 additions & 82 deletions clang/lib/APINotes/APINotesReader.cpp

Large diffs are not rendered by default.

13 changes: 13 additions & 0 deletions clang/lib/AST/ASTImporter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3947,6 +3947,14 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// decl and its redeclarations may be required.
}

StringLiteral *Msg = D->getDeletedMessage();
if (Msg) {
auto Imported = import(Msg);
if (!Imported)
return Imported.takeError();
Msg = *Imported;
}

ToFunction->setQualifierInfo(ToQualifierLoc);
ToFunction->setAccess(D->getAccess());
ToFunction->setLexicalDeclContext(LexicalDC);
Expand All @@ -3961,6 +3969,11 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ToFunction->setRangeEnd(ToEndLoc);
ToFunction->setDefaultLoc(ToDefaultLoc);

if (Msg)
ToFunction->setDefaultedOrDeletedInfo(
FunctionDecl::DefaultedOrDeletedFunctionInfo::Create(
Importer.getToContext(), {}, Msg));

// Set the parameters.
for (auto *Param : Parameters) {
Param->setOwningFunction(ToFunction);
Expand Down
71 changes: 53 additions & 18 deletions clang/lib/AST/Decl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3058,7 +3058,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.IsTrivialForCall = false;
FunctionDeclBits.IsDefaulted = false;
FunctionDeclBits.IsExplicitlyDefaulted = false;
FunctionDeclBits.HasDefaultedFunctionInfo = false;
FunctionDeclBits.HasDefaultedOrDeletedInfo = false;
FunctionDeclBits.IsIneligibleOrNotSelected = false;
FunctionDeclBits.HasImplicitReturnZero = false;
FunctionDeclBits.IsLateTemplateParsed = false;
Expand Down Expand Up @@ -3092,30 +3092,65 @@ bool FunctionDecl::isVariadic() const {
return false;
}

FunctionDecl::DefaultedFunctionInfo *
FunctionDecl::DefaultedFunctionInfo::Create(ASTContext &Context,
ArrayRef<DeclAccessPair> Lookups) {
DefaultedFunctionInfo *Info = new (Context.Allocate(
totalSizeToAlloc<DeclAccessPair>(Lookups.size()),
std::max(alignof(DefaultedFunctionInfo), alignof(DeclAccessPair))))
DefaultedFunctionInfo;
FunctionDecl::DefaultedOrDeletedFunctionInfo *
FunctionDecl::DefaultedOrDeletedFunctionInfo::Create(
ASTContext &Context, ArrayRef<DeclAccessPair> Lookups,
StringLiteral *DeletedMessage) {
static constexpr size_t Alignment =
std::max({alignof(DefaultedOrDeletedFunctionInfo),
alignof(DeclAccessPair), alignof(StringLiteral *)});
size_t Size = totalSizeToAlloc<DeclAccessPair, StringLiteral *>(
Lookups.size(), DeletedMessage != nullptr);

DefaultedOrDeletedFunctionInfo *Info =
new (Context.Allocate(Size, Alignment)) DefaultedOrDeletedFunctionInfo;
Info->NumLookups = Lookups.size();
Info->HasDeletedMessage = DeletedMessage != nullptr;

std::uninitialized_copy(Lookups.begin(), Lookups.end(),
Info->getTrailingObjects<DeclAccessPair>());
if (DeletedMessage)
*Info->getTrailingObjects<StringLiteral *>() = DeletedMessage;
return Info;
}

void FunctionDecl::setDefaultedFunctionInfo(DefaultedFunctionInfo *Info) {
assert(!FunctionDeclBits.HasDefaultedFunctionInfo && "already have this");
void FunctionDecl::setDefaultedOrDeletedInfo(
DefaultedOrDeletedFunctionInfo *Info) {
assert(!FunctionDeclBits.HasDefaultedOrDeletedInfo && "already have this");
assert(!Body && "can't replace function body with defaulted function info");

FunctionDeclBits.HasDefaultedFunctionInfo = true;
DefaultedInfo = Info;
FunctionDeclBits.HasDefaultedOrDeletedInfo = true;
DefaultedOrDeletedInfo = Info;
}

void FunctionDecl::setDeletedAsWritten(bool D, StringLiteral *Message) {
FunctionDeclBits.IsDeleted = D;

if (Message) {
assert(isDeletedAsWritten() && "Function must be deleted");
if (FunctionDeclBits.HasDefaultedOrDeletedInfo)
DefaultedOrDeletedInfo->setDeletedMessage(Message);
else
setDefaultedOrDeletedInfo(DefaultedOrDeletedFunctionInfo::Create(
getASTContext(), /*Lookups=*/{}, Message));
}
}

void FunctionDecl::DefaultedOrDeletedFunctionInfo::setDeletedMessage(
StringLiteral *Message) {
// We should never get here with the DefaultedOrDeletedInfo populated, but
// no space allocated for the deleted message, since that would require
// recreating this, but setDefaultedOrDeletedInfo() disallows overwriting
// an already existing DefaultedOrDeletedFunctionInfo.
assert(HasDeletedMessage &&
"No space to store a delete message in this DefaultedOrDeletedInfo");
*getTrailingObjects<StringLiteral *>() = Message;
}

FunctionDecl::DefaultedFunctionInfo *
FunctionDecl::getDefaultedFunctionInfo() const {
return FunctionDeclBits.HasDefaultedFunctionInfo ? DefaultedInfo : nullptr;
FunctionDecl::DefaultedOrDeletedFunctionInfo *
FunctionDecl::getDefalutedOrDeletedInfo() const {
return FunctionDeclBits.HasDefaultedOrDeletedInfo ? DefaultedOrDeletedInfo
: nullptr;
}

bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
Expand Down Expand Up @@ -3202,7 +3237,7 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
if (!hasBody(Definition))
return nullptr;

assert(!Definition->FunctionDeclBits.HasDefaultedFunctionInfo &&
assert(!Definition->FunctionDeclBits.HasDefaultedOrDeletedInfo &&
"definition should not have a body");
if (Definition->Body)
return Definition->Body.get(getASTContext().getExternalSource());
Expand All @@ -3211,7 +3246,7 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
}

void FunctionDecl::setBody(Stmt *B) {
FunctionDeclBits.HasDefaultedFunctionInfo = false;
FunctionDeclBits.HasDefaultedOrDeletedInfo = false;
Body = LazyDeclStmtPtr(B);
if (B)
EndRangeLoc = B->getEndLoc();
Expand Down Expand Up @@ -4499,7 +4534,7 @@ unsigned FunctionDecl::getODRHash() {
}

class ODRHash Hash;
Hash.AddFunctionDecl(this, /*SkipBody=*/shouldSkipCheckingODR());
Hash.AddFunctionDecl(this);
setHasODRHash(true);
ODRHash = Hash.CalculateHash();
return ODRHash;
Expand Down
5 changes: 0 additions & 5 deletions clang/lib/AST/DeclBase.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1106,11 +1106,6 @@ bool Decl::isFromExplicitGlobalModule() const {
return getOwningModule() && getOwningModule()->isExplicitGlobalModule();
}

bool Decl::shouldSkipCheckingODR() const {
return getASTContext().getLangOpts().SkipODRCheckInGMF &&
isFromExplicitGlobalModule();
}

static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }

Expand Down
9 changes: 7 additions & 2 deletions clang/lib/AST/DeclPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -822,9 +822,14 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {

if (D->isPureVirtual())
Out << " = 0";
else if (D->isDeletedAsWritten())
else if (D->isDeletedAsWritten()) {
Out << " = delete";
else if (D->isExplicitlyDefaulted())
if (const StringLiteral *M = D->getDeletedMessage()) {
Out << "(";
M->outputString(Out);
Out << ")";
}
} else if (D->isExplicitlyDefaulted())
Out << " = default";
else if (D->doesThisDeclarationHaveABody()) {
if (!Policy.TerseOutput) {
Expand Down
70 changes: 63 additions & 7 deletions clang/lib/AST/Interp/ByteCodeExprGen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
return this->discard(SubExpr);

std::optional<PrimType> FromT = classify(SubExpr->getType());
std::optional<PrimType> ToT = classifyPrim(CE->getType());
std::optional<PrimType> ToT = classify(CE->getType());
if (!FromT || !ToT)
return false;

Expand Down Expand Up @@ -398,6 +398,35 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
return true;
}

case CK_VectorSplat: {
assert(!classify(CE->getType()));
assert(classify(SubExpr->getType()));
assert(CE->getType()->isVectorType());

if (DiscardResult)
return this->discard(SubExpr);

assert(Initializing); // FIXME: Not always correct.
const auto *VT = CE->getType()->getAs<VectorType>();
PrimType ElemT = classifyPrim(SubExpr);
unsigned ElemOffset = allocateLocalPrimitive(
SubExpr, ElemT, /*IsConst=*/true, /*IsExtended=*/false);

if (!this->visit(SubExpr))
return false;
if (!this->emitSetLocal(ElemT, ElemOffset, CE))
return false;

for (unsigned I = 0; I != VT->getNumElements(); ++I) {
if (!this->emitGetLocal(ElemT, ElemOffset, CE))
return false;
if (!this->emitInitElem(ElemT, I, CE))
return false;
}

return true;
}

case CK_ToVoid:
return discard(SubExpr);

Expand Down Expand Up @@ -1251,17 +1280,46 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryExprOrTypeTraitExpr(
return this->emitConst(Size.getQuantity(), E);
}

if (Kind == UETT_VectorElements) {
if (const auto *VT = E->getTypeOfArgument()->getAs<VectorType>())
return this->emitConst(VT->getNumElements(), E);

// FIXME: Apparently we need to catch the fact that a sizeless vector type
// has been passed and diagnose that (at run time).
assert(E->getTypeOfArgument()->isSizelessVectorType());
}

return false;
}

template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitMemberExpr(const MemberExpr *E) {
// 'Base.Member'
const Expr *Base = E->getBase();
const ValueDecl *Member = E->getMemberDecl();

if (DiscardResult)
return this->discard(Base);

// MemberExprs are almost always lvalues, in which case we don't need to
// do the load. But sometimes they aren't.
const auto maybeLoadValue = [&]() -> bool {
if (E->isGLValue())
return true;
if (std::optional<PrimType> T = classify(E))
return this->emitLoadPop(*T, E);
return false;
};

if (const auto *VD = dyn_cast<VarDecl>(Member)) {
// I am almost confident in saying that a var decl must be static
// and therefore registered as a global variable. But this will probably
// turn out to be wrong some time in the future, as always.
if (auto GlobalIndex = P.getGlobal(VD))
return this->emitGetPtrGlobal(*GlobalIndex, E) && maybeLoadValue();
return false;
}

if (Initializing) {
if (!this->delegate(Base))
return false;
Expand All @@ -1271,16 +1329,14 @@ bool ByteCodeExprGen<Emitter>::VisitMemberExpr(const MemberExpr *E) {
}

// Base above gives us a pointer on the stack.
// TODO: Implement non-FieldDecl members.
const ValueDecl *Member = E->getMemberDecl();
if (const auto *FD = dyn_cast<FieldDecl>(Member)) {
const RecordDecl *RD = FD->getParent();
const Record *R = getRecord(RD);
const Record::Field *F = R->getField(FD);
// Leave a pointer to the field on the stack.
if (F->Decl->getType()->isReferenceType())
return this->emitGetFieldPop(PT_Ptr, F->Offset, E);
return this->emitGetPtrField(F->Offset, E);
return this->emitGetFieldPop(PT_Ptr, F->Offset, E) && maybeLoadValue();
return this->emitGetPtrField(F->Offset, E) && maybeLoadValue();
}

return false;
Expand Down Expand Up @@ -1615,7 +1671,7 @@ bool ByteCodeExprGen<Emitter>::VisitCompoundAssignOperator(
return false;
if (!this->emitLoad(*LT, E))
return false;
if (*LT != *LHSComputationT) {
if (LT != LHSComputationT) {
if (!this->emitCast(*LT, *LHSComputationT, E))
return false;
}
Expand Down Expand Up @@ -1671,7 +1727,7 @@ bool ByteCodeExprGen<Emitter>::VisitCompoundAssignOperator(
}

// And now cast from LHSComputationT to ResultT.
if (*ResultT != *LHSComputationT) {
if (ResultT != LHSComputationT) {
if (!this->emitCast(*LHSComputationT, *ResultT, E))
return false;
}
Expand Down
9 changes: 8 additions & 1 deletion clang/lib/AST/Interp/ByteCodeExprGen.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,13 +148,20 @@ class ByteCodeExprGen : public ConstStmtVisitor<ByteCodeExprGen<Emitter>, bool>,
return Ctx.classify(Ty);
}

/// Classifies a known primitive type
/// Classifies a known primitive type.
PrimType classifyPrim(QualType Ty) const {
if (auto T = classify(Ty)) {
return *T;
}
llvm_unreachable("not a primitive type");
}
/// Classifies a known primitive expression.
PrimType classifyPrim(const Expr *E) const {
if (auto T = classify(E))
return *T;
llvm_unreachable("not a primitive type");
}

/// Evaluates an expression and places the result on the stack. If the
/// expression is of composite type, a local variable will be created
/// and a pointer to said variable will be placed on the stack.
Expand Down
18 changes: 17 additions & 1 deletion clang/lib/AST/Interp/Disasm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const {
const Descriptor *Desc = G->block()->getDescriptor();
Pointer GP = getPtrGlobal(GI);

OS << GI << ": " << (void *)G->block() << " ";
OS << GI << ": " << (const void *)G->block() << " ";
{
ColorScope SC(OS, true,
GP.isInitialized()
Expand Down Expand Up @@ -264,3 +264,19 @@ LLVM_DUMP_METHOD void Record::dump(llvm::raw_ostream &OS, unsigned Indentation,
++I;
}
}

LLVM_DUMP_METHOD void Block::dump(llvm::raw_ostream &OS) const {
{
ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_BLUE, true});
OS << "Block " << (const void *)this << "\n";
}
unsigned NPointers = 0;
for (const Pointer *P = Pointers; P; P = P->Next) {
++NPointers;
}
OS << " Pointers: " << NPointers << "\n";
OS << " Dead: " << IsDead << "\n";
OS << " Static: " << IsStatic << "\n";
OS << " Extern: " << IsExtern << "\n";
OS << " Initialized: " << IsInitialized << "\n";
}
7 changes: 7 additions & 0 deletions clang/lib/AST/Interp/FunctionPointer.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,13 @@ class FunctionPointer final {

const Function *getFunction() const { return Func; }
bool isZero() const { return !Func; }
bool isValid() const { return Valid; }
bool isWeak() const {
if (!Func || !Valid)
return false;

return Func->getDecl()->isWeak();
}

APValue toAPValue() const {
if (!Func)
Expand Down
6 changes: 5 additions & 1 deletion clang/lib/AST/Interp/Interp.h
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ inline bool CmpHelperEQ<FunctionPointer>(InterpState &S, CodePtr OpPC,

// We cannot compare against weak declarations at compile time.
for (const auto &FP : {LHS, RHS}) {
if (!FP.isZero() && FP.getFunction()->getDecl()->isWeak()) {
if (FP.isWeak()) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_pointer_weak_comparison)
<< FP.toDiagnosticString(S.getCtx());
Expand Down Expand Up @@ -2236,6 +2236,10 @@ inline bool CallPtr(InterpState &S, CodePtr OpPC, uint32_t ArgSize,
<< const_cast<Expr *>(E) << E->getSourceRange();
return false;
}

if (!FuncPtr.isValid())
return false;

assert(F);

// Check argument nullability state.
Expand Down
3 changes: 3 additions & 0 deletions clang/lib/AST/Interp/InterpBlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,9 @@ class Block final {
IsInitialized = false;
}

void dump() const { dump(llvm::errs()); }
void dump(llvm::raw_ostream &OS) const;

protected:
friend class Pointer;
friend class DeadBlock;
Expand Down
118 changes: 118 additions & 0 deletions clang/lib/AST/Interp/InterpBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -977,6 +977,117 @@ static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
return true;
}

/// __builtin_is_aligned()
/// __builtin_align_up()
/// __builtin_align_down()
/// The first parameter is either an integer or a pointer.
/// The second parameter is the requested alignment as an integer.
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const Function *Func,
const CallExpr *Call) {
unsigned BuiltinOp = Func->getBuiltinID();
unsigned CallSize = callArgSize(S, Call);

PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT);

if (Alignment < 0 || !Alignment.isPowerOf2()) {
S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
return false;
}
unsigned SrcWidth = S.getCtx().getIntWidth(Call->getArg(0)->getType());
APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
if (APSInt::compareValues(Alignment, MaxValue) > 0) {
S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
<< MaxValue << Call->getArg(0)->getType() << Alignment;
return false;
}

// The first parameter is either an integer or a pointer (but not a function
// pointer).
PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));

if (isIntegralType(FirstArgT)) {
const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize);
APSInt Align = Alignment.extOrTrunc(Src.getBitWidth());
if (BuiltinOp == Builtin::BI__builtin_align_up) {
APSInt AlignedVal =
APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
pushInteger(S, AlignedVal, Call->getType());
} else if (BuiltinOp == Builtin::BI__builtin_align_down) {
APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
pushInteger(S, AlignedVal, Call->getType());
} else {
assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
S.Stk.push<Boolean>((Src & (Align - 1)) == 0);
}
return true;
}

assert(FirstArgT == PT_Ptr);
const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize);

unsigned PtrOffset = Ptr.getByteOffset();
PtrOffset = Ptr.getIndex();
CharUnits BaseAlignment =
S.getCtx().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
CharUnits PtrAlign =
BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));

if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
if (PtrAlign.getQuantity() >= Alignment) {
S.Stk.push<Boolean>(true);
return true;
}
// If the alignment is not known to be sufficient, some cases could still
// be aligned at run time. However, if the requested alignment is less or
// equal to the base alignment and the offset is not aligned, we know that
// the run-time value can never be aligned.
if (BaseAlignment.getQuantity() >= Alignment &&
PtrAlign.getQuantity() < Alignment) {
S.Stk.push<Boolean>(false);
return true;
}

S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
<< Alignment;
return false;
}

assert(BuiltinOp == Builtin::BI__builtin_align_down ||
BuiltinOp == Builtin::BI__builtin_align_up);

// For align_up/align_down, we can return the same value if the alignment
// is known to be greater or equal to the requested value.
if (PtrAlign.getQuantity() >= Alignment) {
S.Stk.push<Pointer>(Ptr);
return true;
}

// The alignment could be greater than the minimum at run-time, so we cannot
// infer much about the resulting pointer value. One case is possible:
// For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
// can infer the correct index if the requested alignment is smaller than
// the base alignment so we can perform the computation on the offset.
if (BaseAlignment.getQuantity() >= Alignment) {
assert(Alignment.getBitWidth() <= 64 &&
"Cannot handle > 64-bit address-space");
uint64_t Alignment64 = Alignment.getZExtValue();
CharUnits NewOffset =
CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
? llvm::alignDown(PtrOffset, Alignment64)
: llvm::alignTo(PtrOffset, Alignment64));

S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
return true;
}

// Otherwise, we cannot constant-evaluate the result.
S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
return false;
}

bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
const CallExpr *Call) {
const InterpFrame *Frame = S.Current;
Expand Down Expand Up @@ -1291,6 +1402,13 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
return false;
break;

case Builtin::BI__builtin_is_aligned:
case Builtin::BI__builtin_align_up:
case Builtin::BI__builtin_align_down:
if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, F, Call))
return false;
break;

default:
S.FFDiag(S.Current->getLocation(OpPC),
diag::note_invalid_subexpr_in_const_expr)
Expand Down
7 changes: 7 additions & 0 deletions clang/lib/AST/Interp/InterpFrame.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,13 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx,
}

void InterpFrame::describe(llvm::raw_ostream &OS) const {
// We create frames for builtin functions as well, but we can't reliably
// diagnose them. The 'in call to' diagnostics for them add no value to the
// user _and_ it doesn't generally work since the argument types don't always
// match the function prototype. Just ignore them.
if (const auto *F = getFunction(); F && F->isBuiltin())
return;

const FunctionDecl *F = getCallee();
if (const auto *M = dyn_cast<CXXMethodDecl>(F);
M && M->isInstance() && !isa<CXXConstructorDecl>(F)) {
Expand Down
6 changes: 5 additions & 1 deletion clang/lib/AST/Interp/InterpState.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,11 @@ class InterpState final : public State, public SourceMapper {

/// Delegates source mapping to the mapper.
SourceInfo getSource(const Function *F, CodePtr PC) const override {
return M ? M->getSource(F, PC) : F->getSource(PC);
if (M)
return M->getSource(F, PC);

assert(F && "Function cannot be null");
return F->getSource(PC);
}

Context &getContext() const { return Ctx; }
Expand Down
5 changes: 1 addition & 4 deletions clang/lib/AST/Interp/Pointer.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,13 +241,10 @@ class Pointer {

/// Checks if the pointer is null.
bool isZero() const {
if (Offset != 0)
return false;

if (isBlockPointer())
return asBlockPointer().Pointee == nullptr;
assert(isIntegralPointer());
return asIntPointer().Value == 0;
return asIntPointer().Value == 0 && Offset == 0;
}
/// Checks if the pointer is live.
bool isLive() const {
Expand Down
5 changes: 3 additions & 2 deletions clang/lib/AST/Interp/State.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,8 @@ void State::addCallStack(unsigned Limit) {
SmallString<128> Buffer;
llvm::raw_svector_ostream Out(Buffer);
F->describe(Out);
addDiag(CallRange.getBegin(), diag::note_constexpr_call_here)
<< Out.str() << CallRange;
if (!Buffer.empty())
addDiag(CallRange.getBegin(), diag::note_constexpr_call_here)
<< Out.str() << CallRange;
}
}
11 changes: 11 additions & 0 deletions clang/lib/AST/JSONNodeDumper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -975,6 +975,9 @@ void JSONNodeDumper::VisitFunctionDecl(const FunctionDecl *FD) {
if (FD->isDefaulted())
JOS.attribute("explicitlyDefaulted",
FD->isDeleted() ? "deleted" : "default");

if (StringLiteral *Msg = FD->getDeletedMessage())
JOS.attribute("deletedMessage", Msg->getString());
}

void JSONNodeDumper::VisitEnumDecl(const EnumDecl *ED) {
Expand Down Expand Up @@ -1576,6 +1579,14 @@ void JSONNodeDumper::VisitMaterializeTemporaryExpr(
attributeOnlyIfTrue("boundToLValueRef", MTE->isBoundToLvalueReference());
}

void JSONNodeDumper::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) {
attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit());
}

void JSONNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) {
attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit());
}

void JSONNodeDumper::VisitCXXDependentScopeMemberExpr(
const CXXDependentScopeMemberExpr *DSME) {
JOS.attribute("isArrow", DSME->isArrow());
Expand Down
6 changes: 6 additions & 0 deletions clang/lib/AST/ODRHash.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -696,6 +696,12 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
AddBoolean(Function->isDeletedAsWritten());
AddBoolean(Function->isExplicitlyDefaulted());

StringLiteral *DeletedMessage = Function->getDeletedMessage();
AddBoolean(DeletedMessage);

if (DeletedMessage)
ID.AddString(DeletedMessage->getBytes());

AddDecl(Function);

AddQualType(Function->getReturnType());
Expand Down
68 changes: 66 additions & 2 deletions clang/lib/AST/OpenACCClause.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

#include "clang/AST/OpenACCClause.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"

using namespace clang;

Expand All @@ -27,10 +28,73 @@ OpenACCDefaultClause *OpenACCDefaultClause::Create(const ASTContext &C,
return new (Mem) OpenACCDefaultClause(K, BeginLoc, LParenLoc, EndLoc);
}

OpenACCIfClause *OpenACCIfClause::Create(const ASTContext &C,
SourceLocation BeginLoc,
SourceLocation LParenLoc,
Expr *ConditionExpr,
SourceLocation EndLoc) {
void *Mem = C.Allocate(sizeof(OpenACCIfClause), alignof(OpenACCIfClause));
return new (Mem) OpenACCIfClause(BeginLoc, LParenLoc, ConditionExpr, EndLoc);
}

OpenACCIfClause::OpenACCIfClause(SourceLocation BeginLoc,
SourceLocation LParenLoc, Expr *ConditionExpr,
SourceLocation EndLoc)
: OpenACCClauseWithCondition(OpenACCClauseKind::If, BeginLoc, LParenLoc,
ConditionExpr, EndLoc) {
assert(ConditionExpr && "if clause requires condition expr");
assert((ConditionExpr->isInstantiationDependent() ||
ConditionExpr->getType()->isScalarType()) &&
"Condition expression type not scalar/dependent");
}

OpenACCSelfClause *OpenACCSelfClause::Create(const ASTContext &C,
SourceLocation BeginLoc,
SourceLocation LParenLoc,
Expr *ConditionExpr,
SourceLocation EndLoc) {
void *Mem = C.Allocate(sizeof(OpenACCIfClause), alignof(OpenACCIfClause));
return new (Mem)
OpenACCSelfClause(BeginLoc, LParenLoc, ConditionExpr, EndLoc);
}

OpenACCSelfClause::OpenACCSelfClause(SourceLocation BeginLoc,
SourceLocation LParenLoc,
Expr *ConditionExpr, SourceLocation EndLoc)
: OpenACCClauseWithCondition(OpenACCClauseKind::Self, BeginLoc, LParenLoc,
ConditionExpr, EndLoc) {
assert((!ConditionExpr || ConditionExpr->isInstantiationDependent() ||
ConditionExpr->getType()->isScalarType()) &&
"Condition expression type not scalar/dependent");
}

OpenACCClause::child_range OpenACCClause::children() {
switch (getClauseKind()) {
default:
assert(false && "Clause children function not implemented");
break;
#define VISIT_CLAUSE(CLAUSE_NAME) \
case OpenACCClauseKind::CLAUSE_NAME: \
return cast<OpenACC##CLAUSE_NAME##Clause>(this)->children();

#include "clang/Basic/OpenACCClauses.def"
}
return child_range(child_iterator(), child_iterator());
}

//===----------------------------------------------------------------------===//
// OpenACC clauses printing methods
//===----------------------------------------------------------------------===//
void OpenACCClausePrinter::VisitOpenACCDefaultClause(
const OpenACCDefaultClause &C) {
void OpenACCClausePrinter::VisitDefaultClause(const OpenACCDefaultClause &C) {
OS << "default(" << C.getDefaultClauseKind() << ")";
}

void OpenACCClausePrinter::VisitIfClause(const OpenACCIfClause &C) {
OS << "if(" << C.getConditionExpr() << ")";
}

void OpenACCClausePrinter::VisitSelfClause(const OpenACCSelfClause &C) {
OS << "self";
if (const Expr *CondExpr = C.getConditionExpr())
OS << "(" << CondExpr << ")";
}
46 changes: 40 additions & 6 deletions clang/lib/AST/StmtProfile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2071,13 +2071,31 @@ StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
}

CXXRecordDecl *Lambda = S->getLambdaClass();
ID.AddInteger(Lambda->getODRHash());

for (const auto &Capture : Lambda->captures()) {
ID.AddInteger(Capture.getCaptureKind());
if (Capture.capturesVariable())
VisitDecl(Capture.getCapturedVar());
}

// Profiling the body of the lambda may be dangerous during deserialization.
// So we'd like only to profile the signature here.
ODRHash Hasher;
// FIXME: We can't get the operator call easily by
// `CXXRecordDecl::getLambdaCallOperator()` if we're in deserialization.
// So we have to do something raw here.
for (auto *SubDecl : Lambda->decls()) {
FunctionDecl *Call = nullptr;
if (auto *FTD = dyn_cast<FunctionTemplateDecl>(SubDecl))
Call = FTD->getTemplatedDecl();
else if (auto *FD = dyn_cast<FunctionDecl>(SubDecl))
Call = FD;

if (!Call)
continue;

Hasher.AddFunctionDecl(Call, /*SkipBody=*/true);
}
ID.AddInteger(Hasher.CalculateHash());
}

void
Expand Down Expand Up @@ -2445,9 +2463,10 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
namespace {
class OpenACCClauseProfiler
: public OpenACCClauseVisitor<OpenACCClauseProfiler> {
StmtProfiler &Profiler;

public:
OpenACCClauseProfiler() = default;
OpenACCClauseProfiler(StmtProfiler &P) : Profiler(P) {}

void VisitOpenACCClauseList(ArrayRef<const OpenACCClause *> Clauses) {
for (const OpenACCClause *Clause : Clauses) {
Expand All @@ -2456,20 +2475,35 @@ class OpenACCClauseProfiler
Visit(Clause);
}
}
void VisitOpenACCDefaultClause(const OpenACCDefaultClause &Clause);

#define VISIT_CLAUSE(CLAUSE_NAME) \
void Visit##CLAUSE_NAME##Clause(const OpenACC##CLAUSE_NAME##Clause &Clause);

#include "clang/Basic/OpenACCClauses.def"
};

/// Nothing to do here, there are no sub-statements.
void OpenACCClauseProfiler::VisitOpenACCDefaultClause(
void OpenACCClauseProfiler::VisitDefaultClause(
const OpenACCDefaultClause &Clause) {}

void OpenACCClauseProfiler::VisitIfClause(const OpenACCIfClause &Clause) {
assert(Clause.hasConditionExpr() &&
"if clause requires a valid condition expr");
Profiler.VisitStmt(Clause.getConditionExpr());
}

void OpenACCClauseProfiler::VisitSelfClause(const OpenACCSelfClause &Clause) {
if (Clause.hasConditionExpr())
Profiler.VisitStmt(Clause.getConditionExpr());
}
} // namespace

void StmtProfiler::VisitOpenACCComputeConstruct(
const OpenACCComputeConstruct *S) {
// VisitStmt handles children, so the AssociatedStmt is handled.
VisitStmt(S);

OpenACCClauseProfiler P;
OpenACCClauseProfiler P{*this};
P.VisitOpenACCClauseList(S->clauses());
}

Expand Down
23 changes: 11 additions & 12 deletions clang/lib/AST/TextNodeDumper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,12 @@ void TextNodeDumper::Visit(const OpenACCClause *C) {
case OpenACCClauseKind::Default:
OS << '(' << cast<OpenACCDefaultClause>(C)->getDefaultClauseKind() << ')';
break;
case OpenACCClauseKind::If:
case OpenACCClauseKind::Self:
// The condition expression will be printed as a part of the 'children',
// but print 'clause' here so it is clear what is happening from the dump.
OS << " clause";
break;
default:
// Nothing to do here.
break;
Expand Down Expand Up @@ -1450,23 +1456,13 @@ void TextNodeDumper::VisitExpressionTraitExpr(const ExpressionTraitExpr *Node) {
}

void TextNodeDumper::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) {
if (Node->hasRewrittenInit()) {
if (Node->hasRewrittenInit())
OS << " has rewritten init";
AddChild([=] {
ColorScope Color(OS, ShowColors, StmtColor);
Visit(Node->getExpr());
});
}
}

void TextNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) {
if (Node->hasRewrittenInit()) {
if (Node->hasRewrittenInit())
OS << " has rewritten init";
AddChild([=] {
ColorScope Color(OS, ShowColors, StmtColor);
Visit(Node->getExpr());
});
}
}

void TextNodeDumper::VisitMaterializeTemporaryExpr(
Expand Down Expand Up @@ -1966,6 +1962,9 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (D->isTrivial())
OS << " trivial";

if (const StringLiteral *M = D->getDeletedMessage())
AddChild("delete message", [=] { Visit(M); });

if (D->isIneligibleOrNotSelected())
OS << (isa<CXXDestructorDecl>(D) ? " not_selected" : " ineligible");

Expand Down
249 changes: 249 additions & 0 deletions clang/lib/Analysis/FlowSensitive/ASTOps.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,249 @@
//===-- ASTOps.cc -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Operations on AST nodes that are used in flow-sensitive analysis.
//
//===----------------------------------------------------------------------===//

#include "clang/Analysis/FlowSensitive/ASTOps.h"
#include "clang/AST/ComputeDependence.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include <cassert>
#include <iterator>
#include <vector>

#define DEBUG_TYPE "dataflow"

namespace clang::dataflow {

const Expr &ignoreCFGOmittedNodes(const Expr &E) {
const Expr *Current = &E;
if (auto *EWC = dyn_cast<ExprWithCleanups>(Current)) {
Current = EWC->getSubExpr();
assert(Current != nullptr);
}
Current = Current->IgnoreParens();
assert(Current != nullptr);
return *Current;
}

const Stmt &ignoreCFGOmittedNodes(const Stmt &S) {
if (auto *E = dyn_cast<Expr>(&S))
return ignoreCFGOmittedNodes(*E);
return S;
}

// FIXME: Does not precisely handle non-virtual diamond inheritance. A single
// field decl will be modeled for all instances of the inherited field.
static void getFieldsFromClassHierarchy(QualType Type, FieldSet &Fields) {
if (Type->isIncompleteType() || Type->isDependentType() ||
!Type->isRecordType())
return;

for (const FieldDecl *Field : Type->getAsRecordDecl()->fields())
Fields.insert(Field);
if (auto *CXXRecord = Type->getAsCXXRecordDecl())
for (const CXXBaseSpecifier &Base : CXXRecord->bases())
getFieldsFromClassHierarchy(Base.getType(), Fields);
}

/// Gets the set of all fields in the type.
FieldSet getObjectFields(QualType Type) {
FieldSet Fields;
getFieldsFromClassHierarchy(Type, Fields);
return Fields;
}

bool containsSameFields(const FieldSet &Fields,
const RecordStorageLocation::FieldToLoc &FieldLocs) {
if (Fields.size() != FieldLocs.size())
return false;
for ([[maybe_unused]] auto [Field, Loc] : FieldLocs)
if (!Fields.contains(cast_or_null<FieldDecl>(Field)))
return false;
return true;
}

/// Returns the fields of a `RecordDecl` that are initialized by an
/// `InitListExpr`, in the order in which they appear in
/// `InitListExpr::inits()`.
/// `Init->getType()` must be a record type.
static std::vector<const FieldDecl *>
getFieldsForInitListExpr(const InitListExpr *InitList) {
const RecordDecl *RD = InitList->getType()->getAsRecordDecl();
assert(RD != nullptr);

std::vector<const FieldDecl *> Fields;

if (InitList->getType()->isUnionType()) {
Fields.push_back(InitList->getInitializedFieldInUnion());
return Fields;
}

// Unnamed bitfields are only used for padding and do not appear in
// `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s
// field list, and we thus need to remove them before mapping inits to
// fields to avoid mapping inits to the wrongs fields.
llvm::copy_if(
RD->fields(), std::back_inserter(Fields),
[](const FieldDecl *Field) { return !Field->isUnnamedBitfield(); });
return Fields;
}

RecordInitListHelper::RecordInitListHelper(const InitListExpr *InitList) {
auto *RD = InitList->getType()->getAsCXXRecordDecl();
assert(RD != nullptr);

std::vector<const FieldDecl *> Fields = getFieldsForInitListExpr(InitList);
ArrayRef<Expr *> Inits = InitList->inits();

// Unions initialized with an empty initializer list need special treatment.
// For structs/classes initialized with an empty initializer list, Clang
// puts `ImplicitValueInitExpr`s in `InitListExpr::inits()`, but for unions,
// it doesn't do this -- so we create an `ImplicitValueInitExpr` ourselves.
SmallVector<Expr *> InitsForUnion;
if (InitList->getType()->isUnionType() && Inits.empty()) {
assert(Fields.size() == 1);
ImplicitValueInitForUnion.emplace(Fields.front()->getType());
InitsForUnion.push_back(&*ImplicitValueInitForUnion);
Inits = InitsForUnion;
}

size_t InitIdx = 0;

assert(Fields.size() + RD->getNumBases() == Inits.size());
for (const CXXBaseSpecifier &Base : RD->bases()) {
assert(InitIdx < Inits.size());
Expr *Init = Inits[InitIdx++];
BaseInits.emplace_back(&Base, Init);
}

assert(Fields.size() == Inits.size() - InitIdx);
for (const FieldDecl *Field : Fields) {
assert(InitIdx < Inits.size());
Expr *Init = Inits[InitIdx++];
FieldInits.emplace_back(Field, Init);
}
}

static void insertIfGlobal(const Decl &D,
llvm::DenseSet<const VarDecl *> &Globals) {
if (auto *V = dyn_cast<VarDecl>(&D))
if (V->hasGlobalStorage())
Globals.insert(V);
}

static void insertIfFunction(const Decl &D,
llvm::DenseSet<const FunctionDecl *> &Funcs) {
if (auto *FD = dyn_cast<FunctionDecl>(&D))
Funcs.insert(FD);
}

static MemberExpr *getMemberForAccessor(const CXXMemberCallExpr &C) {
// Use getCalleeDecl instead of getMethodDecl in order to handle
// pointer-to-member calls.
const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(C.getCalleeDecl());
if (!MethodDecl)
return nullptr;
auto *Body = dyn_cast_or_null<CompoundStmt>(MethodDecl->getBody());
if (!Body || Body->size() != 1)
return nullptr;
if (auto *RS = dyn_cast<ReturnStmt>(*Body->body_begin()))
if (auto *Return = RS->getRetValue())
return dyn_cast<MemberExpr>(Return->IgnoreParenImpCasts());
return nullptr;
}

static void getReferencedDecls(const Decl &D, ReferencedDecls &Referenced) {
insertIfGlobal(D, Referenced.Globals);
insertIfFunction(D, Referenced.Functions);
if (const auto *Decomp = dyn_cast<DecompositionDecl>(&D))
for (const auto *B : Decomp->bindings())
if (auto *ME = dyn_cast_or_null<MemberExpr>(B->getBinding()))
// FIXME: should we be using `E->getFoundDecl()`?
if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
Referenced.Fields.insert(FD);
}

/// Traverses `S` and inserts into `Referenced` any declarations that are
/// declared in or referenced from sub-statements.
static void getReferencedDecls(const Stmt &S, ReferencedDecls &Referenced) {
for (auto *Child : S.children())
if (Child != nullptr)
getReferencedDecls(*Child, Referenced);
if (const auto *DefaultArg = dyn_cast<CXXDefaultArgExpr>(&S))
getReferencedDecls(*DefaultArg->getExpr(), Referenced);
if (const auto *DefaultInit = dyn_cast<CXXDefaultInitExpr>(&S))
getReferencedDecls(*DefaultInit->getExpr(), Referenced);

if (auto *DS = dyn_cast<DeclStmt>(&S)) {
if (DS->isSingleDecl())
getReferencedDecls(*DS->getSingleDecl(), Referenced);
else
for (auto *D : DS->getDeclGroup())
getReferencedDecls(*D, Referenced);
} else if (auto *E = dyn_cast<DeclRefExpr>(&S)) {
insertIfGlobal(*E->getDecl(), Referenced.Globals);
insertIfFunction(*E->getDecl(), Referenced.Functions);
} else if (const auto *C = dyn_cast<CXXMemberCallExpr>(&S)) {
// If this is a method that returns a member variable but does nothing else,
// model the field of the return value.
if (MemberExpr *E = getMemberForAccessor(*C))
if (const auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl()))
Referenced.Fields.insert(FD);
} else if (auto *E = dyn_cast<MemberExpr>(&S)) {
// FIXME: should we be using `E->getFoundDecl()`?
const ValueDecl *VD = E->getMemberDecl();
insertIfGlobal(*VD, Referenced.Globals);
insertIfFunction(*VD, Referenced.Functions);
if (const auto *FD = dyn_cast<FieldDecl>(VD))
Referenced.Fields.insert(FD);
} else if (auto *InitList = dyn_cast<InitListExpr>(&S)) {
if (InitList->getType()->isRecordType())
for (const auto *FD : getFieldsForInitListExpr(InitList))
Referenced.Fields.insert(FD);
}
}

ReferencedDecls getReferencedDecls(const FunctionDecl &FD) {
ReferencedDecls Result;
// Look for global variable and field references in the
// constructor-initializers.
if (const auto *CtorDecl = dyn_cast<CXXConstructorDecl>(&FD)) {
for (const auto *Init : CtorDecl->inits()) {
if (Init->isMemberInitializer()) {
Result.Fields.insert(Init->getMember());
} else if (Init->isIndirectMemberInitializer()) {
for (const auto *I : Init->getIndirectMember()->chain())
Result.Fields.insert(cast<FieldDecl>(I));
}
const Expr *E = Init->getInit();
assert(E != nullptr);
getReferencedDecls(*E, Result);
}
// Add all fields mentioned in default member initializers.
for (const FieldDecl *F : CtorDecl->getParent()->fields())
if (const auto *I = F->getInClassInitializer())
getReferencedDecls(*I, Result);
}
getReferencedDecls(*FD.getBody(), Result);

return Result;
}

} // namespace clang::dataflow
1 change: 1 addition & 0 deletions clang/lib/Analysis/FlowSensitive/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
add_clang_library(clangAnalysisFlowSensitive
AdornedCFG.cpp
Arena.cpp
ASTOps.cpp
DataflowAnalysisContext.cpp
DataflowEnvironment.cpp
Formula.cpp
Expand Down
53 changes: 1 addition & 52 deletions clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Analysis/FlowSensitive/ASTOps.h"
#include "clang/Analysis/FlowSensitive/DebugSupport.h"
#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Logger.h"
Expand Down Expand Up @@ -359,55 +360,3 @@ DataflowAnalysisContext::~DataflowAnalysisContext() = default;

} // namespace dataflow
} // namespace clang

using namespace clang;

const Expr &clang::dataflow::ignoreCFGOmittedNodes(const Expr &E) {
const Expr *Current = &E;
if (auto *EWC = dyn_cast<ExprWithCleanups>(Current)) {
Current = EWC->getSubExpr();
assert(Current != nullptr);
}
Current = Current->IgnoreParens();
assert(Current != nullptr);
return *Current;
}

const Stmt &clang::dataflow::ignoreCFGOmittedNodes(const Stmt &S) {
if (auto *E = dyn_cast<Expr>(&S))
return ignoreCFGOmittedNodes(*E);
return S;
}

// FIXME: Does not precisely handle non-virtual diamond inheritance. A single
// field decl will be modeled for all instances of the inherited field.
static void getFieldsFromClassHierarchy(QualType Type,
clang::dataflow::FieldSet &Fields) {
if (Type->isIncompleteType() || Type->isDependentType() ||
!Type->isRecordType())
return;

for (const FieldDecl *Field : Type->getAsRecordDecl()->fields())
Fields.insert(Field);
if (auto *CXXRecord = Type->getAsCXXRecordDecl())
for (const CXXBaseSpecifier &Base : CXXRecord->bases())
getFieldsFromClassHierarchy(Base.getType(), Fields);
}

/// Gets the set of all fields in the type.
clang::dataflow::FieldSet clang::dataflow::getObjectFields(QualType Type) {
FieldSet Fields;
getFieldsFromClassHierarchy(Type, Fields);
return Fields;
}

bool clang::dataflow::containsSameFields(
const clang::dataflow::FieldSet &Fields,
const clang::dataflow::RecordStorageLocation::FieldToLoc &FieldLocs) {
if (Fields.size() != FieldLocs.size())
return false;
for ([[maybe_unused]] auto [Field, Loc] : FieldLocs)
if (!Fields.contains(cast_or_null<FieldDecl>(Field)))
return false;
return true;
}
182 changes: 10 additions & 172 deletions clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/FlowSensitive/ASTOps.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "llvm/ADT/DenseMap.h"
Expand Down Expand Up @@ -304,93 +305,6 @@ widenKeyToValueMap(const llvm::MapVector<Key, Value *> &CurMap,
return WidenedMap;
}

/// Initializes a global storage value.
static void insertIfGlobal(const Decl &D,
llvm::DenseSet<const VarDecl *> &Vars) {
if (auto *V = dyn_cast<VarDecl>(&D))
if (V->hasGlobalStorage())
Vars.insert(V);
}

static void insertIfFunction(const Decl &D,
llvm::DenseSet<const FunctionDecl *> &Funcs) {
if (auto *FD = dyn_cast<FunctionDecl>(&D))
Funcs.insert(FD);
}

static MemberExpr *getMemberForAccessor(const CXXMemberCallExpr &C) {
// Use getCalleeDecl instead of getMethodDecl in order to handle
// pointer-to-member calls.
const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(C.getCalleeDecl());
if (!MethodDecl)
return nullptr;
auto *Body = dyn_cast_or_null<CompoundStmt>(MethodDecl->getBody());
if (!Body || Body->size() != 1)
return nullptr;
if (auto *RS = dyn_cast<ReturnStmt>(*Body->body_begin()))
if (auto *Return = RS->getRetValue())
return dyn_cast<MemberExpr>(Return->IgnoreParenImpCasts());
return nullptr;
}

static void
getFieldsGlobalsAndFuncs(const Decl &D, FieldSet &Fields,
llvm::DenseSet<const VarDecl *> &Vars,
llvm::DenseSet<const FunctionDecl *> &Funcs) {
insertIfGlobal(D, Vars);
insertIfFunction(D, Funcs);
if (const auto *Decomp = dyn_cast<DecompositionDecl>(&D))
for (const auto *B : Decomp->bindings())
if (auto *ME = dyn_cast_or_null<MemberExpr>(B->getBinding()))
// FIXME: should we be using `E->getFoundDecl()`?
if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
Fields.insert(FD);
}

/// Traverses `S` and inserts into `Fields`, `Vars` and `Funcs` any fields,
/// global variables and functions that are declared in or referenced from
/// sub-statements.
static void
getFieldsGlobalsAndFuncs(const Stmt &S, FieldSet &Fields,
llvm::DenseSet<const VarDecl *> &Vars,
llvm::DenseSet<const FunctionDecl *> &Funcs) {
for (auto *Child : S.children())
if (Child != nullptr)
getFieldsGlobalsAndFuncs(*Child, Fields, Vars, Funcs);
if (const auto *DefaultArg = dyn_cast<CXXDefaultArgExpr>(&S))
getFieldsGlobalsAndFuncs(*DefaultArg->getExpr(), Fields, Vars, Funcs);
if (const auto *DefaultInit = dyn_cast<CXXDefaultInitExpr>(&S))
getFieldsGlobalsAndFuncs(*DefaultInit->getExpr(), Fields, Vars, Funcs);

if (auto *DS = dyn_cast<DeclStmt>(&S)) {
if (DS->isSingleDecl())
getFieldsGlobalsAndFuncs(*DS->getSingleDecl(), Fields, Vars, Funcs);
else
for (auto *D : DS->getDeclGroup())
getFieldsGlobalsAndFuncs(*D, Fields, Vars, Funcs);
} else if (auto *E = dyn_cast<DeclRefExpr>(&S)) {
insertIfGlobal(*E->getDecl(), Vars);
insertIfFunction(*E->getDecl(), Funcs);
} else if (const auto *C = dyn_cast<CXXMemberCallExpr>(&S)) {
// If this is a method that returns a member variable but does nothing else,
// model the field of the return value.
if (MemberExpr *E = getMemberForAccessor(*C))
if (const auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl()))
Fields.insert(FD);
} else if (auto *E = dyn_cast<MemberExpr>(&S)) {
// FIXME: should we be using `E->getFoundDecl()`?
const ValueDecl *VD = E->getMemberDecl();
insertIfGlobal(*VD, Vars);
insertIfFunction(*VD, Funcs);
if (const auto *FD = dyn_cast<FieldDecl>(VD))
Fields.insert(FD);
} else if (auto *InitList = dyn_cast<InitListExpr>(&S)) {
if (InitList->getType()->isRecordType())
for (const auto *FD : getFieldsForInitListExpr(InitList))
Fields.insert(FD);
}
}

namespace {

// Visitor that builds a map from record prvalues to result objects.
Expand Down Expand Up @@ -508,6 +422,11 @@ class ResultObjectVisitor : public RecursiveASTVisitor<ResultObjectVisitor> {
isa<CXXStdInitializerListExpr>(E)) {
return;
}
if (auto *Op = dyn_cast<BinaryOperator>(E);
Op && Op->getOpcode() == BO_Cmp) {
// Builtin `<=>` returns a `std::strong_ordering` object.
return;
}

if (auto *InitList = dyn_cast<InitListExpr>(E)) {
if (!InitList->isSemanticForm())
Expand Down Expand Up @@ -648,36 +567,13 @@ void Environment::initialize() {
void Environment::initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl) {
assert(FuncDecl->doesThisDeclarationHaveABody());

FieldSet Fields;
llvm::DenseSet<const VarDecl *> Vars;
llvm::DenseSet<const FunctionDecl *> Funcs;

// Look for global variable and field references in the
// constructor-initializers.
if (const auto *CtorDecl = dyn_cast<CXXConstructorDecl>(FuncDecl)) {
for (const auto *Init : CtorDecl->inits()) {
if (Init->isMemberInitializer()) {
Fields.insert(Init->getMember());
} else if (Init->isIndirectMemberInitializer()) {
for (const auto *I : Init->getIndirectMember()->chain())
Fields.insert(cast<FieldDecl>(I));
}
const Expr *E = Init->getInit();
assert(E != nullptr);
getFieldsGlobalsAndFuncs(*E, Fields, Vars, Funcs);
}
// Add all fields mentioned in default member initializers.
for (const FieldDecl *F : CtorDecl->getParent()->fields())
if (const auto *I = F->getInClassInitializer())
getFieldsGlobalsAndFuncs(*I, Fields, Vars, Funcs);
}
getFieldsGlobalsAndFuncs(*FuncDecl->getBody(), Fields, Vars, Funcs);
ReferencedDecls Referenced = getReferencedDecls(*FuncDecl);

// These have to be added before the lines that follow to ensure that
// `create*` work correctly for structs.
DACtx->addModeledFields(Fields);
DACtx->addModeledFields(Referenced.Fields);

for (const VarDecl *D : Vars) {
for (const VarDecl *D : Referenced.Globals) {
if (getStorageLocation(*D) != nullptr)
continue;

Expand All @@ -689,7 +585,7 @@ void Environment::initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl) {
setStorageLocation(*D, createObject(*D, nullptr));
}

for (const FunctionDecl *FD : Funcs) {
for (const FunctionDecl *FD : Referenced.Functions) {
if (getStorageLocation(*FD) != nullptr)
continue;
auto &Loc = createStorageLocation(*FD);
Expand Down Expand Up @@ -1349,64 +1245,6 @@ RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
return Env.get<RecordStorageLocation>(*Base);
}

std::vector<const FieldDecl *>
getFieldsForInitListExpr(const InitListExpr *InitList) {
const RecordDecl *RD = InitList->getType()->getAsRecordDecl();
assert(RD != nullptr);

std::vector<const FieldDecl *> Fields;

if (InitList->getType()->isUnionType()) {
Fields.push_back(InitList->getInitializedFieldInUnion());
return Fields;
}

// Unnamed bitfields are only used for padding and do not appear in
// `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s
// field list, and we thus need to remove them before mapping inits to
// fields to avoid mapping inits to the wrongs fields.
llvm::copy_if(
RD->fields(), std::back_inserter(Fields),
[](const FieldDecl *Field) { return !Field->isUnnamedBitfield(); });
return Fields;
}

RecordInitListHelper::RecordInitListHelper(const InitListExpr *InitList) {
auto *RD = InitList->getType()->getAsCXXRecordDecl();
assert(RD != nullptr);

std::vector<const FieldDecl *> Fields = getFieldsForInitListExpr(InitList);
ArrayRef<Expr *> Inits = InitList->inits();

// Unions initialized with an empty initializer list need special treatment.
// For structs/classes initialized with an empty initializer list, Clang
// puts `ImplicitValueInitExpr`s in `InitListExpr::inits()`, but for unions,
// it doesn't do this -- so we create an `ImplicitValueInitExpr` ourselves.
SmallVector<Expr *> InitsForUnion;
if (InitList->getType()->isUnionType() && Inits.empty()) {
assert(Fields.size() == 1);
ImplicitValueInitForUnion.emplace(Fields.front()->getType());
InitsForUnion.push_back(&*ImplicitValueInitForUnion);
Inits = InitsForUnion;
}

size_t InitIdx = 0;

assert(Fields.size() + RD->getNumBases() == Inits.size());
for (const CXXBaseSpecifier &Base : RD->bases()) {
assert(InitIdx < Inits.size());
Expr *Init = Inits[InitIdx++];
BaseInits.emplace_back(&Base, Init);
}

assert(Fields.size() == Inits.size() - InitIdx);
for (const FieldDecl *Field : Fields) {
assert(InitIdx < Inits.size());
Expr *Init = Inits[InitIdx++];
FieldInits.emplace_back(Field, Init);
}
}

RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env) {
auto &NewVal = Env.create<RecordValue>(Loc);
Env.setValue(Loc, NewVal);
Expand Down
2 changes: 2 additions & 0 deletions clang/lib/Analysis/FlowSensitive/Transfer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/FlowSensitive/ASTOps.h"
#include "clang/Analysis/FlowSensitive/AdornedCFG.h"
#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/NoopAnalysis.h"
#include "clang/Analysis/FlowSensitive/RecordOps.h"
Expand Down
6 changes: 3 additions & 3 deletions clang/lib/Basic/Cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ static const CudaArchToStringMap arch_names[] = {
// clang-format off
{CudaArch::UNUSED, "", ""},
SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi
SM(30), SM(32), SM(35), SM(37), // Kepler
SM(30), {CudaArch::SM_32_, "sm_32", "compute_32"}, SM(35), SM(37), // Kepler
SM(50), SM(52), SM(53), // Maxwell
SM(60), SM(61), SM(62), // Pascal
SM(70), SM(72), // Volta
Expand Down Expand Up @@ -186,7 +186,7 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
case CudaArch::SM_20:
case CudaArch::SM_21:
case CudaArch::SM_30:
case CudaArch::SM_32:
case CudaArch::SM_32_:
case CudaArch::SM_35:
case CudaArch::SM_37:
case CudaArch::SM_50:
Expand Down Expand Up @@ -231,7 +231,7 @@ CudaVersion MaxVersionForCudaArch(CudaArch A) {
case CudaArch::SM_21:
return CudaVersion::CUDA_80;
case CudaArch::SM_30:
case CudaArch::SM_32:
case CudaArch::SM_32_:
return CudaVersion::CUDA_102;
case CudaArch::SM_35:
case CudaArch::SM_37:
Expand Down
4 changes: 4 additions & 0 deletions clang/lib/Basic/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,10 @@ bool Module::directlyUses(const Module *Requested) {
if (Requested->fullModuleNameIs({"_Builtin_stddef", "max_align_t"}) ||
Requested->fullModuleNameIs({"_Builtin_stddef_wint_t"}))
return true;
// Darwin is allowed is to use our builtin 'ptrauth.h' and its accompanying
// module.
if (!Requested->Parent && Requested->Name == "ptrauth")
return true;

if (NoUndeclaredIncludes)
UndeclaredUses.insert(Requested);
Expand Down
2 changes: 1 addition & 1 deletion clang/lib/Basic/Targets/NVPTX.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
return "210";
case CudaArch::SM_30:
return "300";
case CudaArch::SM_32:
case CudaArch::SM_32_:
return "320";
case CudaArch::SM_35:
return "350";
Expand Down
10 changes: 5 additions & 5 deletions clang/lib/Basic/Targets/SPIR.h
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ class LLVM_LIBRARY_VISIBILITY SPIR32TargetInfo : public SPIRTargetInfo {
SizeType = TargetInfo::UnsignedInt;
PtrDiffType = IntPtrType = TargetInfo::SignedInt;
resetDataLayout("e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
"v96:128-v192:256-v256:256-v512:512-v1024:1024");
"v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}

void getTargetDefines(const LangOptions &Opts,
Expand All @@ -276,7 +276,7 @@ class LLVM_LIBRARY_VISIBILITY SPIR64TargetInfo : public SPIRTargetInfo {
SizeType = TargetInfo::UnsignedLong;
PtrDiffType = IntPtrType = TargetInfo::SignedLong;
resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
"v96:128-v192:256-v256:256-v512:512-v1024:1024");
"v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}

void getTargetDefines(const LangOptions &Opts,
Expand Down Expand Up @@ -315,7 +315,7 @@ class LLVM_LIBRARY_VISIBILITY SPIRVTargetInfo : public BaseSPIRVTargetInfo {
// SPIR-V IDs are represented with a single 32-bit word.
SizeType = TargetInfo::UnsignedInt;
resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
"v96:128-v192:256-v256:256-v512:512-v1024:1024");
"v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}

void getTargetDefines(const LangOptions &Opts,
Expand All @@ -336,7 +336,7 @@ class LLVM_LIBRARY_VISIBILITY SPIRV32TargetInfo : public BaseSPIRVTargetInfo {
SizeType = TargetInfo::UnsignedInt;
PtrDiffType = IntPtrType = TargetInfo::SignedInt;
resetDataLayout("e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
"v96:128-v192:256-v256:256-v512:512-v1024:1024");
"v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}

void getTargetDefines(const LangOptions &Opts,
Expand All @@ -357,7 +357,7 @@ class LLVM_LIBRARY_VISIBILITY SPIRV64TargetInfo : public BaseSPIRVTargetInfo {
SizeType = TargetInfo::UnsignedLong;
PtrDiffType = IntPtrType = TargetInfo::SignedLong;
resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
"v96:128-v192:256-v256:256-v512:512-v1024:1024");
"v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}

void getTargetDefines(const LangOptions &Opts,
Expand Down
Empty file added clang/lib/CIR/CMakeLists.txt
Empty file.
4 changes: 4 additions & 0 deletions clang/lib/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,7 @@ if(CLANG_INCLUDE_TESTS)
endif()
add_subdirectory(Interpreter)
add_subdirectory(Support)

if(CLANG_ENABLE_CIR)
add_subdirectory(CIR)
endif()
6 changes: 5 additions & 1 deletion clang/lib/CodeGen/CGAtomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1806,7 +1806,11 @@ void AtomicInfo::EmitAtomicUpdateOp(
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
Address NewAtomicAddr = CreateTempAlloca();
Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
Address NewAtomicIntAddr =
shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true)
? castToAtomicIntPointer(NewAtomicAddr)
: NewAtomicAddr;

if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
Expand Down
16 changes: 12 additions & 4 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18194,7 +18194,8 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Value *Op0 = EmitScalarExpr(E->getArg(0));
return Builder.CreateIntrinsic(
/*ReturnType=*/llvm::Type::getInt1Ty(getLLVMContext()),
Intrinsic::dx_any, ArrayRef<Value *>{Op0}, nullptr, "dx.any");
CGM.getHLSLRuntime().getAnyIntrinsic(), ArrayRef<Value *>{Op0}, nullptr,
"hlsl.any");
}
case Builtin::BI__builtin_hlsl_elementwise_clamp: {
Value *OpX = EmitScalarExpr(E->getArg(0));
Expand Down Expand Up @@ -18303,9 +18304,16 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Value *Op0 = EmitScalarExpr(E->getArg(0));
if (!E->getArg(0)->getType()->hasFloatingRepresentation())
llvm_unreachable("rcp operand must have a float representation");
return Builder.CreateIntrinsic(
/*ReturnType=*/Op0->getType(), Intrinsic::dx_rcp,
ArrayRef<Value *>{Op0}, nullptr, "dx.rcp");
llvm::Type *Ty = Op0->getType();
llvm::Type *EltTy = Ty->getScalarType();
Constant *One =
Ty->isVectorTy()
? ConstantVector::getSplat(
ElementCount::getFixed(
dyn_cast<FixedVectorType>(Ty)->getNumElements()),
ConstantFP::get(EltTy, 1.0))
: ConstantFP::get(EltTy, 1.0);
return Builder.CreateFDiv(One, Op0, "hlsl.rcp");
}
case Builtin::BI__builtin_hlsl_elementwise_rsqrt: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Expand Down
12 changes: 2 additions & 10 deletions clang/lib/CodeGen/CGCleanup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -667,8 +667,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {

// - whether there's a fallthrough
llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
bool HasFallthrough =
FallthroughSource != nullptr && (IsActive || HasExistingBranches);
bool HasFallthrough = (FallthroughSource != nullptr && IsActive);

// Branch-through fall-throughs leave the insertion point set to the
// end of the last cleanup, which points to the current scope. The
Expand All @@ -693,11 +692,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {

// If we have a prebranched fallthrough into an inactive normal
// cleanup, rewrite it so that it leads to the appropriate place.
if (Scope.isNormalCleanup() && HasPrebranchedFallthrough &&
!RequiresNormalCleanup) {
// FIXME: Come up with a program which would need forwarding prebranched
// fallthrough and add tests. Otherwise delete this and assert against it.
assert(!IsActive);
if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
llvm::BasicBlock *prebranchDest;

// If the prebranch is semantically branching through the next
Expand Down Expand Up @@ -770,7 +765,6 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EmitSehCppScopeEnd();
}
destroyOptimisticNormalEntry(*this, Scope);
Scope.MarkEmitted();
EHStack.popCleanup();
} else {
// If we have a fallthrough and no other need for the cleanup,
Expand All @@ -787,7 +781,6 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
}

destroyOptimisticNormalEntry(*this, Scope);
Scope.MarkEmitted();
EHStack.popCleanup();

EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
Expand Down Expand Up @@ -923,7 +916,6 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
}

// IV. Pop the cleanup and emit it.
Scope.MarkEmitted();
EHStack.popCleanup();
assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);

Expand Down
57 changes: 1 addition & 56 deletions clang/lib/CodeGen/CGCleanup.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,8 @@
#include "EHScopeStack.h"

#include "Address.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Instruction.h"

namespace llvm {
class BasicBlock;
Expand Down Expand Up @@ -269,51 +266,6 @@ class alignas(8) EHCleanupScope : public EHScope {
};
mutable struct ExtInfo *ExtInfo;

/// Erases auxillary allocas and their usages for an unused cleanup.
/// Cleanups should mark these allocas as 'used' if the cleanup is
/// emitted, otherwise these instructions would be erased.
struct AuxillaryAllocas {
SmallVector<llvm::Instruction *, 1> AuxAllocas;
bool used = false;

// Records a potentially unused instruction to be erased later.
void Add(llvm::AllocaInst *Alloca) { AuxAllocas.push_back(Alloca); }

// Mark all recorded instructions as used. These will not be erased later.
void MarkUsed() {
used = true;
AuxAllocas.clear();
}

~AuxillaryAllocas() {
if (used)
return;
llvm::SetVector<llvm::Instruction *> Uses;
for (auto *Inst : llvm::reverse(AuxAllocas))
CollectUses(Inst, Uses);
// Delete uses in the reverse order of insertion.
for (auto *I : llvm::reverse(Uses))
I->eraseFromParent();
}

private:
void CollectUses(llvm::Instruction *I,
llvm::SetVector<llvm::Instruction *> &Uses) {
if (!I || !Uses.insert(I))
return;
for (auto *User : I->users())
CollectUses(cast<llvm::Instruction>(User), Uses);
}
};
mutable struct AuxillaryAllocas *AuxAllocas;

AuxillaryAllocas &getAuxillaryAllocas() {
if (!AuxAllocas) {
AuxAllocas = new struct AuxillaryAllocas();
}
return *AuxAllocas;
}

/// The number of fixups required by enclosing scopes (not including
/// this one). If this is the top cleanup scope, all the fixups
/// from this index onwards belong to this scope.
Expand Down Expand Up @@ -346,7 +298,7 @@ class alignas(8) EHCleanupScope : public EHScope {
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH),
EnclosingNormal(enclosingNormal), NormalBlock(nullptr),
ActiveFlag(Address::invalid()), ExtInfo(nullptr), AuxAllocas(nullptr),
ActiveFlag(Address::invalid()), ExtInfo(nullptr),
FixupDepth(fixupDepth) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
Expand All @@ -360,15 +312,8 @@ class alignas(8) EHCleanupScope : public EHScope {
}

void Destroy() {
if (AuxAllocas)
delete AuxAllocas;
delete ExtInfo;
}
void AddAuxAllocas(llvm::SmallVector<llvm::AllocaInst *> Allocas) {
for (auto *Alloca : Allocas)
getAuxillaryAllocas().Add(Alloca);
}
void MarkEmitted() { getAuxillaryAllocas().MarkUsed(); }
// Objects of EHCleanupScope are not destructed. Use Destroy().
~EHCleanupScope() = delete;

Expand Down
58 changes: 19 additions & 39 deletions clang/lib/CodeGen/CGDecl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "EHScopeStack.h"
#include "PatternInit.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
Expand Down Expand Up @@ -2202,24 +2201,6 @@ void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
destroyer, useEHCleanupForArray);
}

// Pushes a destroy and defers its deactivation until its
// CleanupDeactivationScope is exited.
void CodeGenFunction::pushDestroyAndDeferDeactivation(
QualType::DestructionKind dtorKind, Address addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");

CleanupKind cleanupKind = getCleanupKind(dtorKind);
pushDestroyAndDeferDeactivation(
cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup);
}

void CodeGenFunction::pushDestroyAndDeferDeactivation(
CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray) {
pushCleanupAndDeferDeactivation<DestroyObject>(
cleanupKind, addr, type, destroyer, useEHCleanupForArray);
}

void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
Expand All @@ -2236,19 +2217,16 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
// If we're not in a conditional branch, we don't need to bother generating a
// conditional cleanup.
if (!isInConditionalBranch()) {
// Push an EH-only cleanup for the object now.
// FIXME: When popping normal cleanups, we need to keep this EH cleanup
// around in case a temporary's destructor throws an exception.
if (cleanupKind & EHCleanup)
EHStack.pushCleanup<DestroyObject>(
static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
destroyer, useEHCleanupForArray);

// Add the cleanup to the EHStack. After the full-expr, this would be
// deactivated before being popped from the stack.
pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer,
useEHCleanupForArray);

// Since this is lifetime-extended, push it once again to the EHStack after
// the full expression.
return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
cleanupKind, Address::invalid(), addr, type, destroyer,
useEHCleanupForArray);
cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray);
}

// Otherwise, we should only destroy the object if it's been initialized.
Expand All @@ -2263,12 +2241,13 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
Address ActiveFlag = createCleanupActiveFlag();
SavedType SavedAddr = saveValueInCond(addr);

pushCleanupAndDeferDeactivation<ConditionalCleanupType>(
cleanupKind, SavedAddr, type, destroyer, useEHCleanupForArray);
initFullExprCleanupWithFlag(ActiveFlag);
if (cleanupKind & EHCleanup) {
EHStack.pushCleanup<ConditionalCleanupType>(
static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), SavedAddr, type,
destroyer, useEHCleanupForArray);
initFullExprCleanupWithFlag(ActiveFlag);
}

// Since this is lifetime-extended, push it once again to the EHStack after
// the full expression.
pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
cleanupKind, ActiveFlag, SavedAddr, type, destroyer,
useEHCleanupForArray);
Expand Down Expand Up @@ -2463,9 +2442,9 @@ namespace {
};
} // end anonymous namespace

/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
/// destroy already-constructed elements of the given array. The cleanup may be
/// popped with DeactivateCleanupBlock or PopCleanupBlock.
/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
/// already-constructed elements of the given array. The cleanup
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
///
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
Expand All @@ -2474,9 +2453,10 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
QualType elementType,
CharUnits elementAlign,
Destroyer *destroyer) {
pushFullExprCleanup<IrregularPartialArrayDestroy>(
NormalAndEHCleanup, arrayBegin, arrayEndPointer, elementType,
elementAlign, destroyer);
pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEndPointer,
elementType, elementAlign,
destroyer);
}

/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
Expand Down
12 changes: 3 additions & 9 deletions clang/lib/CodeGen/CGExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,16 +115,10 @@ RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
const Twine &Name,
llvm::Value *ArraySize) {
llvm::AllocaInst *Alloca;
if (ArraySize)
Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
else
Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
ArraySize, Name, AllocaInsertPt);
if (Allocas) {
Allocas->Add(Alloca);
}
return Alloca;
return Builder.CreateAlloca(Ty, ArraySize, Name);
return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
ArraySize, Name, AllocaInsertPt);
}

/// CreateDefaultAlignTempAlloca - This creates an alloca with the
Expand Down
Loading