Skip to content

Commit

Permalink
Reland [Metadata] Add a resize capability to MDNodes and add a push_b…
Browse files Browse the repository at this point in the history
…ack interface to MDNode

Fixed a bug with double destruction of operands and corrected a test issue.

Note that this patch leads to a slight increase in compile time (I measured
about .3%) and a slight increase in memory usage. The increased memory usage
should be offset once resizing is used to a larger extent.

Reviewed By: dexonsmith

Differential Revision: https://reviews.llvm.org/D125998
  • Loading branch information
Wolfgang Pieb committed Jun 27, 2022
1 parent 14d3021 commit a630ea3
Show file tree
Hide file tree
Showing 3 changed files with 301 additions and 25 deletions.
110 changes: 97 additions & 13 deletions llvm/include/llvm/IR/Metadata.h
Expand Up @@ -934,38 +934,101 @@ struct TempMDNodeDeleter {
/// If an unresolved node is part of a cycle, \a resolveCycles() needs
/// to be called on some member of the cycle once all temporary nodes have been
/// replaced.
///
/// MDNodes can be large or small, as well as resizable or non-resizable.
/// Large MDNodes' operands are allocated in a separate storage vector,
/// whereas small MDNodes' operands are co-allocated. Distinct and temporary
/// MDnodes are resizable, but only MDTuples support this capability.
///
/// Clients can add operands to resizable MDNodes using push_back().
class MDNode : public Metadata {
friend class ReplaceableMetadataImpl;
friend class LLVMContextImpl;
friend class DIArgList;

/// The header that is coallocated with an MDNode, along with the operands.
/// It is located immediately before the main body of the node. The operands
/// are in turn located immediately before the header.
/// The header that is coallocated with an MDNode along with its "small"
/// operands. It is located immediately before the main body of the node.
/// The operands are in turn located immediately before the header.
/// For resizable MDNodes, the space for the storage vector is also allocated
/// immediately before the header, overlapping with the operands.
struct Header {
unsigned NumOperands;
bool IsResizable : 1;
bool IsLarge : 1;
size_t SmallSize : 4;
size_t SmallNumOps : 4;
size_t : sizeof(size_t) * CHAR_BIT - 10;

unsigned NumUnresolved = 0;
using LargeStorageVector = SmallVector<MDOperand, 0>;

static constexpr size_t NumOpsFitInVector =
sizeof(LargeStorageVector) / sizeof(MDOperand);
static_assert(
NumOpsFitInVector * sizeof(MDOperand) == sizeof(LargeStorageVector),
"sizeof(LargeStorageVector) must be a multiple of sizeof(MDOperand)");

static constexpr size_t MaxSmallSize = 15;

static constexpr size_t getOpSize(unsigned NumOps) {
return sizeof(MDOperand) * NumOps;
}
static constexpr size_t getAllocSize(unsigned NumOps) {
return getOpSize(NumOps) + sizeof(Header);
/// Returns the number of operands the node has space for based on its
/// allocation characteristics.
static size_t getSmallSize(size_t NumOps, bool IsResizable, bool IsLarge) {
return IsLarge ? NumOpsFitInVector
: std::max(NumOps, NumOpsFitInVector * IsResizable);
}
/// Returns the number of bytes allocated for operands and header.
static size_t getAllocSize(StorageType Storage, size_t NumOps) {
return getOpSize(
getSmallSize(NumOps, isResizable(Storage), isLarge(NumOps))) +
sizeof(Header);
}

/// Only temporary and distinct nodes are resizable.
static bool isResizable(StorageType Storage) { return Storage != Uniqued; }
static bool isLarge(size_t NumOps) { return NumOps > MaxSmallSize; }

size_t getAllocSize() const {
return getOpSize(SmallSize) + sizeof(Header);
}
void *getAllocation() {
return reinterpret_cast<char *>(this + 1) -
alignTo(getAllocSize(NumOperands), alignof(uint64_t));
alignTo(getAllocSize(), alignof(uint64_t));
}

void *getLargePtr() const;
void *getSmallPtr();

LargeStorageVector &getLarge() {
assert(IsLarge);
return *reinterpret_cast<LargeStorageVector *>(getLargePtr());
}

explicit Header(unsigned NumOperands);
const LargeStorageVector &getLarge() const {
assert(IsLarge);
return *reinterpret_cast<const LargeStorageVector *>(getLargePtr());
}

void resizeSmall(size_t NumOps);
void resizeSmallToLarge(size_t NumOps);
void resize(size_t NumOps);

explicit Header(size_t NumOps, StorageType Storage);
~Header();

MutableArrayRef<MDOperand> operands() {
if (IsLarge)
return getLarge();
return makeMutableArrayRef(
reinterpret_cast<MDOperand *>(this) - NumOperands, NumOperands);
reinterpret_cast<MDOperand *>(this) - SmallSize, SmallNumOps);
}

ArrayRef<MDOperand> operands() const {
return makeArrayRef(
reinterpret_cast<const MDOperand *>(this) - NumOperands, NumOperands);
if (IsLarge)
return getLarge();
return makeArrayRef(reinterpret_cast<const MDOperand *>(this) - SmallSize,
SmallNumOps);
}
};

Expand All @@ -982,7 +1045,7 @@ class MDNode : public Metadata {
ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None);
~MDNode() = default;

void *operator new(size_t Size, unsigned NumOps, StorageType Storage);
void *operator new(size_t Size, size_t NumOps, StorageType Storage);
void operator delete(void *Mem);

/// Required by std, but never called.
Expand Down Expand Up @@ -1146,6 +1209,17 @@ class MDNode : public Metadata {
static T *storeImpl(T *N, StorageType Storage, StoreT &Store);
template <class T> static T *storeImpl(T *N, StorageType Storage);

/// Resize the node to hold \a NumOps operands.
///
/// \pre \a isTemporary() or \a isDistinct()
/// \pre MetadataID == MDTupleKind
void resize(size_t NumOps) {
assert(!isUniqued() && "Resizing is not supported for uniqued nodes");
assert(getMetadataID() == MDTupleKind &&
"Resizing is not supported for this node kind");
getHeader().resize(NumOps);
}

private:
void handleChangedOperand(void *Ref, Metadata *New);

Expand Down Expand Up @@ -1207,7 +1281,7 @@ class MDNode : public Metadata {
}

/// Return number of MDNode operands.
unsigned getNumOperands() const { return getHeader().NumOperands; }
unsigned getNumOperands() const { return getHeader().operands().size(); }

/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Metadata *MD) {
Expand Down Expand Up @@ -1292,6 +1366,16 @@ class MDTuple : public MDNode {
/// Return a (temporary) clone of this.
TempMDTuple clone() const { return cloneImpl(); }

/// Append an element to the tuple. This will resize the node.
void push_back(Metadata *MD) {
size_t NumOps = getNumOperands();
resize(NumOps + 1);
setOperand(NumOps, MD);
}

/// Shrink the operands by 1.
void pop_back() { resize(getNumOperands() - 1); }

static bool classof(const Metadata *MD) {
return MD->getMetadataID() == MDTupleKind;
}
Expand Down
92 changes: 80 additions & 12 deletions llvm/lib/IR/Metadata.cpp
Expand Up @@ -521,13 +521,13 @@ StringRef MDString::getString() const {
"Alignment is insufficient after objects prepended to " #CLASS);
#include "llvm/IR/Metadata.def"

void *MDNode::operator new(size_t Size, unsigned NumOps,
StorageType /* Storage */) {
void *MDNode::operator new(size_t Size, size_t NumOps, StorageType Storage) {
// uint64_t is the most aligned type we need support (ensured by static_assert
// above)
size_t AllocSize = alignTo(Header::getAllocSize(NumOps), alignof(uint64_t));
size_t AllocSize =
alignTo(Header::getAllocSize(Storage, NumOps), alignof(uint64_t));
char *Mem = reinterpret_cast<char *>(::operator new(AllocSize + Size));
Header *H = new (Mem + AllocSize - sizeof(Header)) Header(NumOps);
Header *H = new (Mem + AllocSize - sizeof(Header)) Header(NumOps, Storage);
return reinterpret_cast<void *>(H + 1);
}

Expand Down Expand Up @@ -566,17 +566,85 @@ TempMDNode MDNode::clone() const {
}
}

MDNode::Header::Header(unsigned NumOps) {
NumOperands = NumOps;
MDOperand *O = reinterpret_cast<MDOperand *>(this);
for (MDOperand *E = O - NumOps; O != E; --O)
(void)new (O - 1) MDOperand();
MDNode::Header::Header(size_t NumOps, StorageType Storage) {
IsLarge = isLarge(NumOps);
IsResizable = isResizable(Storage);
SmallSize = getSmallSize(NumOps, IsResizable, IsLarge);
if (IsLarge) {
SmallNumOps = 0;
new (getLargePtr()) LargeStorageVector();
getLarge().resize(NumOps);
return;
}
SmallNumOps = NumOps;
MDOperand *O = reinterpret_cast<MDOperand *>(this) - SmallSize;
for (MDOperand *E = O + SmallSize; O != E;)
(void)new (O++) MDOperand();
}

MDNode::Header::~Header() {
MDOperand *O = reinterpret_cast<MDOperand *>(this) - NumOperands;
for (MDOperand *E = O + NumOperands; O != E; ++O)
(void)O->~MDOperand();
if (IsLarge) {
getLarge().~LargeStorageVector();
return;
}
MDOperand *O = reinterpret_cast<MDOperand *>(this);
for (MDOperand *E = O - SmallSize; O != E; --O)
(void)(O - 1)->~MDOperand();
}

void *MDNode::Header::getLargePtr() const {
static_assert(alignof(LargeStorageVector) <= alignof(Header),
"LargeStorageVector too strongly aligned");
return reinterpret_cast<char *>(const_cast<Header *>(this)) -
sizeof(LargeStorageVector);
}

void *MDNode::Header::getSmallPtr() {
static_assert(alignof(MDOperand) <= alignof(Header),
"MDOperand too strongly aligned");
return reinterpret_cast<char *>(const_cast<Header *>(this)) -
sizeof(MDOperand) * SmallSize;
}

void MDNode::Header::resize(size_t NumOps) {
assert(IsResizable && "Node is not resizable");
if (operands().size() == NumOps)
return;

if (IsLarge)
getLarge().resize(NumOps);
else if (NumOps <= SmallSize)
resizeSmall(NumOps);
else
resizeSmallToLarge(NumOps);
}

void MDNode::Header::resizeSmall(size_t NumOps) {
assert(!IsLarge && "Expected a small MDNode");
assert(NumOps <= SmallSize && "NumOps too large for small resize");

MutableArrayRef<MDOperand> ExistingOps = operands();
assert(NumOps != ExistingOps.size() && "Expected a different size");

int NumNew = (int)NumOps - (int)ExistingOps.size();
MDOperand *O = ExistingOps.end();
for (int I = 0, E = NumNew; I < E; ++I)
(O++)->reset();
for (int I = 0, E = NumNew; I > E; --I)
(--O)->reset();
SmallNumOps = NumOps;
assert(O == operands().end() && "Operands not (un)initialized until the end");
}

void MDNode::Header::resizeSmallToLarge(size_t NumOps) {
assert(!IsLarge && "Expected a small MDNode");
assert(NumOps > SmallSize && "Expected NumOps to be larger than allocation");
LargeStorageVector NewOps;
NewOps.resize(NumOps);
llvm::move(operands(), NewOps.begin());
resizeSmall(0);
new (getLargePtr()) LargeStorageVector(std::move(NewOps));
IsLarge = true;
}

static bool isOperandUnresolved(Metadata *Op) {
Expand Down

0 comments on commit a630ea3

Please sign in to comment.