Skip to content

Commit

Permalink
[Alignement][NFC] Deprecate untyped CreateAlignedLoad
Browse files Browse the repository at this point in the history
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: arsenm, jvesely, nhaehnle, hiraditya, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D73260
  • Loading branch information
gchatelet committed Jan 23, 2020
1 parent 1e0174a commit 279fa8e
Show file tree
Hide file tree
Showing 21 changed files with 98 additions and 89 deletions.
25 changes: 16 additions & 9 deletions llvm/include/llvm/IR/IRBuilder.h
Expand Up @@ -1759,8 +1759,10 @@ class IRBuilder : public IRBuilderBase, public Inserter {
/// parameter.
/// FIXME: Remove this function once transition to Align is over.
/// Use the version that takes MaybeAlign instead of this one.
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
const char *Name) {
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
unsigned Align,
const char *Name),
"Use the version that takes NaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
Expand All @@ -1771,8 +1773,10 @@ class IRBuilder : public IRBuilderBase, public Inserter {
}
/// FIXME: Remove this function once transition to Align is over.
/// Use the version that takes MaybeAlign instead of this one.
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
const Twine &Name = "") {
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
unsigned Align,
const Twine &Name = ""),
"Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
Expand All @@ -1783,8 +1787,11 @@ class IRBuilder : public IRBuilderBase, public Inserter {
}
/// FIXME: Remove this function once transition to Align is over.
/// Use the version that takes MaybeAlign instead of this one.
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
bool isVolatile, const Twine &Name = "") {
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
unsigned Align,
bool isVolatile,
const Twine &Name = ""),
"Use the version that takes MaybeAlign instead") {
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), isVolatile, Name);
}
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
Expand All @@ -1797,19 +1804,19 @@ class IRBuilder : public IRBuilderBase, public Inserter {
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
Align, Name);
MaybeAlign(Align), Name);
}
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
const Twine &Name = "") {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
Align, Name);
MaybeAlign(Align), Name);
}
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
const Twine &Name = "") {
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
Align, isVolatile, Name);
MaybeAlign(Align), isVolatile, Name);
}
// Deprecated [opaque pointer types]
LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, const char *Name) {
Expand Down
6 changes: 5 additions & 1 deletion llvm/include/llvm/IR/Instructions.h
Expand Up @@ -109,8 +109,12 @@ class AllocaInst : public UnaryInstruction {

/// Return the alignment of the memory that is being allocated by the
/// instruction.
MaybeAlign getAlign() const {
return decodeMaybeAlign(getSubclassDataFromInstruction() & 31);
}
// FIXME: Remove this one transition to Align is over.
unsigned getAlignment() const {
if (const auto MA = decodeMaybeAlign(getSubclassDataFromInstruction() & 31))
if (const auto MA = getAlign())
return MA->value();
return 0;
}
Expand Down
8 changes: 4 additions & 4 deletions llvm/lib/CodeGen/AtomicExpandPass.cpp
Expand Up @@ -1622,7 +1622,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
bool UseSizedLibcall = canUseSizedAtomicCall(Size, Align, DL);
Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8);

unsigned AllocaAlignment = DL.getPrefTypeAlignment(SizedIntTy);
const llvm::Align AllocaAlignment(DL.getPrefTypeAlignment(SizedIntTy));

// TODO: the "order" argument type is "int", not int32. So
// getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
Expand Down Expand Up @@ -1712,7 +1712,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// 'expected' argument, if present.
if (CASExpected) {
AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
AllocaCASExpected->setAlignment(MaybeAlign(AllocaAlignment));
AllocaCASExpected->setAlignment(AllocaAlignment);
unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace();

AllocaCASExpected_i8 =
Expand All @@ -1731,7 +1731,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
Args.push_back(IntValue);
} else {
AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
AllocaValue->setAlignment(MaybeAlign(AllocaAlignment));
AllocaValue->setAlignment(AllocaAlignment);
AllocaValue_i8 =
Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx));
Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64);
Expand All @@ -1743,7 +1743,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// 'ret' argument.
if (!CASExpected && HasResult && !UseSizedLibcall) {
AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
AllocaResult->setAlignment(MaybeAlign(AllocaAlignment));
AllocaResult->setAlignment(AllocaAlignment);
unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace();
AllocaResult_i8 =
Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
Expand Up @@ -1220,7 +1220,7 @@ bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
"interleaved.wide.ptrcast");

// Create the wide load and update the MemorySSA.
auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlignment(),
auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlign(),
"interleaved.wide.load");
auto MSSAU = MemorySSAUpdater(&MSSA);
MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
Expand Up @@ -46,7 +46,7 @@ static bool lowerLoadRelative(Function &F) {
Value *OffsetPtr =
B.CreateGEP(Int8Ty, CI->getArgOperand(0), CI->getArgOperand(1));
Value *OffsetPtrI32 = B.CreateBitCast(OffsetPtr, Int32PtrTy);
Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, 4);
Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, Align(4));

Value *ResultPtr = B.CreateGEP(Int8Ty, CI->getArgOperand(0), OffsetI32);

Expand Down
23 changes: 12 additions & 11 deletions llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
Expand Up @@ -130,7 +130,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
Value *Mask = CI->getArgOperand(2);
Value *Src0 = CI->getArgOperand(3);

unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue();
VectorType *VecType = cast<VectorType>(CI->getType());

Type *EltTy = VecType->getElementType();
Expand All @@ -151,7 +151,8 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
}

// Adjust alignment for the scalar instruction.
AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
const Align AdjustedAlignVal =
commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
// Bitcast %addr from i8* to EltTy*
Type *NewPtrType =
EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
Expand All @@ -166,7 +167,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
VResult = Builder.CreateInsertElement(VResult, Load, Idx);
}
CI->replaceAllUsesWith(VResult);
Expand Down Expand Up @@ -210,7 +211,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
Builder.SetInsertPoint(InsertPt);

Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);

// Create "else" block, fill it in the next iteration
Expand Down Expand Up @@ -414,8 +415,8 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
LoadInst *Load =
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
LoadInst *Load = Builder.CreateAlignedLoad(
EltTy, Ptr, MaybeAlign(AlignVal), "Load" + Twine(Idx));
VResult =
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
}
Expand Down Expand Up @@ -459,8 +460,8 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
Builder.SetInsertPoint(InsertPt);

Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
LoadInst *Load =
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, MaybeAlign(AlignVal),
"Load" + Twine(Idx));
Value *NewVResult =
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));

Expand Down Expand Up @@ -624,8 +625,8 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
LoadInst *Load =
Builder.CreateAlignedLoad(EltTy, NewPtr, 1, "Load" + Twine(Idx));
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, NewPtr, Align(1),
"Load" + Twine(Idx));
VResult =
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
++MemIndex;
Expand Down Expand Up @@ -670,7 +671,7 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) {
"cond.load");
Builder.SetInsertPoint(InsertPt);

LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, 1);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Align(1));
Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);

// Move the pointer if there are more blocks to come.
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/IR/AutoUpgrade.cpp
Expand Up @@ -2308,7 +2308,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Type *VT = VectorType::get(EltTy, NumSrcElts);
Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
PointerType::getUnqual(VT));
Value *Load = Builder.CreateAlignedLoad(VT, Op, 1);
Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
if (NumSrcElts == 2)
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
{ 0, 1, 0, 1 });
Expand Down Expand Up @@ -3054,7 +3054,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Convert the type of the pointer to a pointer to the stored type.
Value *BC =
Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast");
LoadInst *LI = Builder.CreateAlignedLoad(VTy, BC, VTy->getBitWidth() / 8);
LoadInst *LI =
Builder.CreateAlignedLoad(VTy, BC, Align(VTy->getBitWidth() / 8));
LI->setMetadata(M->getMDKindID("nontemporal"), Node);
Rep = LI;
} else if (IsX86 && (Name.startswith("fma.vfmadd.") ||
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
Expand Up @@ -160,7 +160,7 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
ArgPtr = Builder.CreateBitCast(ArgPtr, AdjustedArgTy->getPointerTo(AS),
ArgPtr->getName() + ".cast");
LoadInst *Load =
Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign.value());
Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));

MDBuilder MDB(Ctx);
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Expand Up @@ -251,10 +251,10 @@ AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
// 32-bit and extract sequence is already present, and it is probably easier
// to CSE this. The loads should be mergable later anyway.
Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, 4);
LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));

Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, 4);
LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));

MDNode *MD = MDNode::get(Mod->getContext(), None);
LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Target/ARM/ARMParallelDSP.cpp
Expand Up @@ -772,8 +772,7 @@ LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
LoadTy->getPointerTo(AddrSpace));
LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr,
Base->getAlignment());
LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr, Base->getAlign());

// Make sure everything is in the correct order in the basic block.
MoveBefore(Base->getPointerOperand(), VecPtr);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/X86/X86ISelLowering.cpp
Expand Up @@ -27474,7 +27474,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// Finally we can emit the atomic load.
LoadInst *Loaded =
Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
AI->getType()->getPrimitiveSizeInBits());
Align(AI->getType()->getPrimitiveSizeInBits()));
Loaded->setAtomic(Order, SSID);
AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/X86/X86InterleavedAccess.cpp
Expand Up @@ -216,7 +216,7 @@ void X86InterleavedAccessGroup::decompose(
Value *NewBasePtr =
Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i));
Instruction *NewLoad =
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlignment());
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlign());
DecomposedVectors.push_back(NewLoad);
}
}
Expand Down
11 changes: 6 additions & 5 deletions llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Expand Up @@ -1056,7 +1056,8 @@ static Value *simplifyX86vpermv(const IntrinsicInst &II,
// * Narrow width by halfs excluding zero/undef lanes
Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
Value *LoadPtr = II.getArgOperand(0);
unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
const Align Alignment =
cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();

// If the mask is all ones or undefs, this is a plain vector load of the 1st
// argument.
Expand All @@ -1066,9 +1067,9 @@ Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {

// If we can unconditionally load from this address, replace with a
// load/select idiom. TODO: use DT for context sensitive query
if (isDereferenceableAndAlignedPointer(
LoadPtr, II.getType(), MaybeAlign(Alignment),
II.getModule()->getDataLayout(), &II, nullptr)) {
if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment,
II.getModule()->getDataLayout(), &II,
nullptr)) {
Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
"unmaskedload");
return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
Expand Down Expand Up @@ -1459,7 +1460,7 @@ static Value *simplifyNeonVld1(const IntrinsicInst &II,

auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
PointerType::get(II.getType(), 0));
return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
}

// Returns true iff the 2 intrinsics have the same operands, limiting the
Expand Down
24 changes: 10 additions & 14 deletions llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
Expand Up @@ -462,12 +462,11 @@ LoadInst *InstCombiner::combineLoadToNewType(LoadInst &LI, Type *NewTy,
NewPtr->getType()->getPointerAddressSpace() == AS))
NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));

unsigned Align = LI.getAlignment();
if (!Align)
// If old load did not have an explicit alignment specified,
// manually preserve the implied (ABI) alignment of the load.
// Else we may inadvertently incorrectly over-promise alignment.
Align = getDataLayout().getABITypeAlignment(LI.getType());
const auto Align =
getDataLayout().getValueOrABITypeAlignment(LI.getAlign(), LI.getType());

LoadInst *NewLoad = Builder.CreateAlignedLoad(
NewTy, NewPtr, Align, LI.isVolatile(), LI.getName() + Suffix);
Expand Down Expand Up @@ -674,9 +673,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
if (SL->hasPadding())
return nullptr;

auto Align = LI.getAlignment();
if (!Align)
Align = DL.getABITypeAlignment(ST);
const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), ST);

auto *Addr = LI.getPointerOperand();
auto *IdxType = Type::getInt32Ty(T->getContext());
Expand All @@ -690,9 +687,9 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
};
auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
Name + ".elt");
auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr,
EltAlign, Name + ".unpack");
auto *L = IC.Builder.CreateAlignedLoad(
ST->getElementType(i), Ptr,
commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
// Propagate AA metadata. It'll still be valid on the narrowed load.
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
Expand Down Expand Up @@ -725,9 +722,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {

const DataLayout &DL = IC.getDataLayout();
auto EltSize = DL.getTypeAllocSize(ET);
auto Align = LI.getAlignment();
if (!Align)
Align = DL.getABITypeAlignment(T);
const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), T);

auto *Addr = LI.getPointerOperand();
auto *IdxType = Type::getInt64Ty(T->getContext());
Expand All @@ -742,8 +737,9 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
};
auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
Name + ".elt");
auto *L = IC.Builder.CreateAlignedLoad(
AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack");
auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
commonAlignment(Align, Offset),
Name + ".unpack");
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
L->setAAMetadata(AAMD);
Expand Down

0 comments on commit 279fa8e

Please sign in to comment.