Large diffs are not rendered by default.

7 changes: 4 additions & 3 deletions llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,9 @@ void RuntimeDyldMachO::resolveRelocation(const RelocationEntry &RE,
case Triple::thumb:
resolveARMRelocation(RE, Value);
break;
case Triple::aarch64:
case Triple::arm64:
resolveARM64Relocation(RE, Value);
resolveAArch64Relocation(RE, Value);
break;
}
}
Expand Down Expand Up @@ -289,8 +290,8 @@ bool RuntimeDyldMachO::resolveARMRelocation(const RelocationEntry &RE,
return false;
}

bool RuntimeDyldMachO::resolveARM64Relocation(const RelocationEntry &RE,
uint64_t Value) {
bool RuntimeDyldMachO::resolveAArch64Relocation(const RelocationEntry &RE,
uint64_t Value) {
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t* LocalAddress = Section.Address + RE.Offset;

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class RuntimeDyldMachO : public RuntimeDyldImpl {
bool resolveI386Relocation(const RelocationEntry &RE, uint64_t Value);
bool resolveX86_64Relocation(const RelocationEntry &RE, uint64_t Value);
bool resolveARMRelocation(const RelocationEntry &RE, uint64_t Value);
bool resolveARM64Relocation(const RelocationEntry &RE, uint64_t Value);
bool resolveAArch64Relocation(const RelocationEntry &RE, uint64_t Value);

// Populate stubs in __jump_table section.
void populateJumpTable(MachOObjectFile &Obj, const SectionRef &JTSection,
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/LTO/LTOCodeGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,8 @@ bool LTOCodeGenerator::determineTarget(std::string &errMsg) {
MCpu = "core2";
else if (Triple.getArch() == llvm::Triple::x86)
MCpu = "yonah";
else if (Triple.getArch() == llvm::Triple::arm64)
else if (Triple.getArch() == llvm::Triple::arm64 ||
Triple.getArch() == llvm::Triple::aarch64)
MCpu = "cyclone";
}

Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/LTO/LTOModule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,8 @@ LTOModule *LTOModule::makeLTOModule(MemoryBuffer *buffer,
CPU = "core2";
else if (Triple.getArch() == llvm::Triple::x86)
CPU = "yonah";
else if (Triple.getArch() == llvm::Triple::arm64)
else if (Triple.getArch() == llvm::Triple::arm64 ||
Triple.getArch() == llvm::Triple::aarch64)
CPU = "cyclone";
}

Expand Down
10 changes: 6 additions & 4 deletions llvm/lib/MC/MCObjectFileInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
IsFunctionEHFrameSymbolPrivate = false;
SupportsWeakOmittedEHFrame = false;

if (T.isOSDarwin() && T.getArch() == Triple::arm64)
if (T.isOSDarwin() &&
(T.getArch() == Triple::arm64 || T.getArch() == Triple::aarch64))
SupportsCompactUnwindWithoutEHFrame = true;

PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel
Expand Down Expand Up @@ -151,15 +152,16 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
COFFDebugSymbolsSection = nullptr;

if ((T.isMacOSX() && !T.isMacOSXVersionLT(10, 6)) ||
(T.isOSDarwin() && T.getArch() == Triple::arm64)) {
(T.isOSDarwin() &&
(T.getArch() == Triple::arm64 || T.getArch() == Triple::aarch64))) {
CompactUnwindSection =
Ctx->getMachOSection("__LD", "__compact_unwind",
MachO::S_ATTR_DEBUG,
SectionKind::getReadOnly());

if (T.getArch() == Triple::x86_64 || T.getArch() == Triple::x86)
CompactUnwindDwarfEHFrameOnly = 0x04000000;
else if (T.getArch() == Triple::arm64)
else if (T.getArch() == Triple::arm64 || T.getArch() == Triple::aarch64)
CompactUnwindDwarfEHFrameOnly = 0x03000000;
}

Expand Down Expand Up @@ -785,7 +787,7 @@ void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model relocm,
// cellspu-apple-darwin. Perhaps we should fix in Triple?
if ((Arch == Triple::x86 || Arch == Triple::x86_64 ||
Arch == Triple::arm || Arch == Triple::thumb ||
Arch == Triple::arm64 ||
Arch == Triple::arm64 || Arch == Triple::aarch64 ||
Arch == Triple::ppc || Arch == Triple::ppc64 ||
Arch == Triple::UnknownArch) &&
(T.isOSDarwin() || T.isOSBinFormatMachO())) {
Expand Down
49 changes: 49 additions & 0 deletions llvm/lib/Target/AArch64/AArch64.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
//==-- AArch64.h - Top-level interface for AArch64 --------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the entry points for global functions defined in the LLVM
// AArch64 back-end.
//
//===----------------------------------------------------------------------===//

#ifndef TARGET_AArch64_H
#define TARGET_AArch64_H

#include "Utils/AArch64BaseInfo.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/DataTypes.h"

namespace llvm {

class AArch64TargetMachine;
class FunctionPass;
class MachineFunctionPass;

FunctionPass *createAArch64DeadRegisterDefinitions();
FunctionPass *createAArch64ConditionalCompares();
FunctionPass *createAArch64AdvSIMDScalar();
FunctionPass *createAArch64BranchRelaxation();
FunctionPass *createAArch64ISelDag(AArch64TargetMachine &TM,
CodeGenOpt::Level OptLevel);
FunctionPass *createAArch64StorePairSuppressPass();
FunctionPass *createAArch64ExpandPseudoPass();
FunctionPass *createAArch64LoadStoreOptimizationPass();
ModulePass *createAArch64PromoteConstantPass();
FunctionPass *createAArch64AddressTypePromotionPass();
/// \brief Creates an ARM-specific Target Transformation Info pass.
ImmutablePass *
createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM);

FunctionPass *createAArch64CleanupLocalDynamicTLSPass();

FunctionPass *createAArch64CollectLOHPass();
} // end namespace llvm

#endif
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===- ARM64.td - Describe the ARM64 Target Machine --------*- tablegen -*-===//
//=- AArch64.td - Describe the AArch64 Target Machine --------*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -17,7 +17,7 @@
include "llvm/Target/Target.td"

//===----------------------------------------------------------------------===//
// ARM64 Subtarget features.
// AArch64 Subtarget features.
//

def FeatureFPARMv8 : SubtargetFeature<"fp-armv8", "HasFPARMv8", "true",
Expand All @@ -44,23 +44,23 @@ def FeatureZCZeroing : SubtargetFeature<"zcz", "HasZeroCycleZeroing", "true",
// Register File Description
//===----------------------------------------------------------------------===//

include "ARM64RegisterInfo.td"
include "ARM64CallingConvention.td"
include "AArch64RegisterInfo.td"
include "AArch64CallingConvention.td"

//===----------------------------------------------------------------------===//
// Instruction Descriptions
//===----------------------------------------------------------------------===//

include "ARM64Schedule.td"
include "ARM64InstrInfo.td"
include "AArch64Schedule.td"
include "AArch64InstrInfo.td"

def ARM64InstrInfo : InstrInfo;
def AArch64InstrInfo : InstrInfo;

//===----------------------------------------------------------------------===//
// ARM64 Processors supported.
// AArch64 Processors supported.
//
include "ARM64SchedA53.td"
include "ARM64SchedCyclone.td"
include "AArch64SchedA53.td"
include "AArch64SchedCyclone.td"

def ProcA53 : SubtargetFeature<"a53", "ARMProcFamily", "CortexA53",
"Cortex-A53 ARM processors",
Expand Down Expand Up @@ -109,7 +109,7 @@ def AppleAsmParserVariant : AsmParserVariant {
//===----------------------------------------------------------------------===//
// Assembly printer
//===----------------------------------------------------------------------===//
// ARM64 Uses the MC printer for asm output, so make sure the TableGen
// AArch64 Uses the MC printer for asm output, so make sure the TableGen
// AsmWriter bits get associated with the correct class.
def GenericAsmWriter : AsmWriter {
string AsmWriterClassName = "InstPrinter";
Expand All @@ -127,8 +127,8 @@ def AppleAsmWriter : AsmWriter {
// Target Declaration
//===----------------------------------------------------------------------===//

def ARM64 : Target {
let InstructionSet = ARM64InstrInfo;
def AArch64 : Target {
let InstructionSet = AArch64InstrInfo;
let AssemblyParserVariants = [GenericAsmParserVariant, AppleAsmParserVariant];
let AssemblyWriters = [GenericAsmWriter, AppleAsmWriter];
}
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@

//===-- ARM64AddressTypePromotion.cpp --- Promote type for addr accesses -===//
//===-- AArch64AddressTypePromotion.cpp --- Promote type for addr accesses -==//
//
// The LLVM Compiler Infrastructure
//
Expand Down Expand Up @@ -29,7 +28,7 @@
// FIXME: This pass may be useful for other targets too.
// ===---------------------------------------------------------------------===//

#include "ARM64.h"
#include "AArch64.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
Expand All @@ -45,38 +44,38 @@

using namespace llvm;

#define DEBUG_TYPE "arm64-type-promotion"
#define DEBUG_TYPE "aarch64-type-promotion"

static cl::opt<bool>
EnableAddressTypePromotion("arm64-type-promotion", cl::Hidden,
EnableAddressTypePromotion("aarch64-type-promotion", cl::Hidden,
cl::desc("Enable the type promotion pass"),
cl::init(true));
static cl::opt<bool>
EnableMerge("arm64-type-promotion-merge", cl::Hidden,
EnableMerge("aarch64-type-promotion-merge", cl::Hidden,
cl::desc("Enable merging of redundant sexts when one is dominating"
" the other."),
cl::init(true));

//===----------------------------------------------------------------------===//
// ARM64AddressTypePromotion
// AArch64AddressTypePromotion
//===----------------------------------------------------------------------===//

namespace llvm {
void initializeARM64AddressTypePromotionPass(PassRegistry &);
void initializeAArch64AddressTypePromotionPass(PassRegistry &);
}

namespace {
class ARM64AddressTypePromotion : public FunctionPass {
class AArch64AddressTypePromotion : public FunctionPass {

public:
static char ID;
ARM64AddressTypePromotion()
AArch64AddressTypePromotion()
: FunctionPass(ID), Func(nullptr), ConsideredSExtType(nullptr) {
initializeARM64AddressTypePromotionPass(*PassRegistry::getPassRegistry());
initializeAArch64AddressTypePromotionPass(*PassRegistry::getPassRegistry());
}

const char *getPassName() const override {
return "ARM64 Address Type Promotion";
return "AArch64 Address Type Promotion";
}

/// Iterate over the functions and promote the computation of interesting
Expand Down Expand Up @@ -140,19 +139,19 @@ class ARM64AddressTypePromotion : public FunctionPass {
};
} // end anonymous namespace.

char ARM64AddressTypePromotion::ID = 0;
char AArch64AddressTypePromotion::ID = 0;

INITIALIZE_PASS_BEGIN(ARM64AddressTypePromotion, "arm64-type-promotion",
"ARM64 Type Promotion Pass", false, false)
INITIALIZE_PASS_BEGIN(AArch64AddressTypePromotion, "aarch64-type-promotion",
"AArch64 Type Promotion Pass", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(ARM64AddressTypePromotion, "arm64-type-promotion",
"ARM64 Type Promotion Pass", false, false)
INITIALIZE_PASS_END(AArch64AddressTypePromotion, "aarch64-type-promotion",
"AArch64 Type Promotion Pass", false, false)

FunctionPass *llvm::createARM64AddressTypePromotionPass() {
return new ARM64AddressTypePromotion();
FunctionPass *llvm::createAArch64AddressTypePromotionPass() {
return new AArch64AddressTypePromotion();
}

bool ARM64AddressTypePromotion::canGetThrough(const Instruction *Inst) {
bool AArch64AddressTypePromotion::canGetThrough(const Instruction *Inst) {
if (isa<SExtInst>(Inst))
return true;

Expand All @@ -175,7 +174,7 @@ bool ARM64AddressTypePromotion::canGetThrough(const Instruction *Inst) {
return false;
}

bool ARM64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) {
bool AArch64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) {
// If the type of the sext is the same as the considered one, this sext
// will become useless.
// Otherwise, we will have to do something to preserve the original value,
Expand Down Expand Up @@ -211,7 +210,7 @@ static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
}

bool
ARM64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
AArch64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
if (SExt->getType() != ConsideredSExtType)
return false;

Expand Down Expand Up @@ -249,7 +248,7 @@ ARM64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
// = a
// Iterate on 'c'.
bool
ARM64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
AArch64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
DEBUG(dbgs() << "*** Propagate Sign Extension ***\n");

bool LocalChange = false;
Expand Down Expand Up @@ -375,8 +374,8 @@ ARM64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
return LocalChange;
}

void ARM64AddressTypePromotion::mergeSExts(ValueToInsts &ValToSExtendedUses,
SetOfInstructions &ToRemove) {
void AArch64AddressTypePromotion::mergeSExts(ValueToInsts &ValToSExtendedUses,
SetOfInstructions &ToRemove) {
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();

for (auto &Entry : ValToSExtendedUses) {
Expand Down Expand Up @@ -414,7 +413,7 @@ void ARM64AddressTypePromotion::mergeSExts(ValueToInsts &ValToSExtendedUses,
}
}

void ARM64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
void AArch64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
DEBUG(dbgs() << "*** Analyze Sign Extensions ***\n");

DenseMap<Value *, Instruction *> SeenChains;
Expand Down Expand Up @@ -479,7 +478,7 @@ void ARM64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
}
}

bool ARM64AddressTypePromotion::runOnFunction(Function &F) {
bool AArch64AddressTypePromotion::runOnFunction(Function &F) {
if (!EnableAddressTypePromotion || F.isDeclaration())
return false;
Func = &F;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===-- ARM64AdvSIMDScalar.cpp - Replace dead defs w/ zero reg --===//
//===-- AArch64AdvSIMDScalar.cpp - Replace dead defs w/ zero reg --===//
//
// The LLVM Compiler Infrastructure
//
Expand Down Expand Up @@ -33,9 +33,9 @@
// solution.
//===----------------------------------------------------------------------===//

#include "ARM64.h"
#include "ARM64InstrInfo.h"
#include "ARM64RegisterInfo.h"
#include "AArch64.h"
#include "AArch64InstrInfo.h"
#include "AArch64RegisterInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFunction.h"
Expand All @@ -47,12 +47,12 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;

#define DEBUG_TYPE "arm64-simd-scalar"
#define DEBUG_TYPE "aarch64-simd-scalar"

// Allow forcing all i64 operations with equivalent SIMD instructions to use
// them. For stress-testing the transformation function.
static cl::opt<bool>
TransformAll("arm64-simd-scalar-force-all",
TransformAll("aarch64-simd-scalar-force-all",
cl::desc("Force use of AdvSIMD scalar instructions everywhere"),
cl::init(false), cl::Hidden);

Expand All @@ -61,9 +61,9 @@ STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted");
STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted");

namespace {
class ARM64AdvSIMDScalar : public MachineFunctionPass {
class AArch64AdvSIMDScalar : public MachineFunctionPass {
MachineRegisterInfo *MRI;
const ARM64InstrInfo *TII;
const AArch64InstrInfo *TII;

private:
// isProfitableToTransform - Predicate function to determine whether an
Expand All @@ -81,7 +81,7 @@ class ARM64AdvSIMDScalar : public MachineFunctionPass {

public:
static char ID; // Pass identification, replacement for typeid.
explicit ARM64AdvSIMDScalar() : MachineFunctionPass(ID) {}
explicit AArch64AdvSIMDScalar() : MachineFunctionPass(ID) {}

bool runOnMachineFunction(MachineFunction &F) override;

Expand All @@ -94,28 +94,28 @@ class ARM64AdvSIMDScalar : public MachineFunctionPass {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
char ARM64AdvSIMDScalar::ID = 0;
char AArch64AdvSIMDScalar::ID = 0;
} // end anonymous namespace

static bool isGPR64(unsigned Reg, unsigned SubReg,
const MachineRegisterInfo *MRI) {
if (SubReg)
return false;
if (TargetRegisterInfo::isVirtualRegister(Reg))
return MRI->getRegClass(Reg)->hasSuperClassEq(&ARM64::GPR64RegClass);
return ARM64::GPR64RegClass.contains(Reg);
return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass);
return AArch64::GPR64RegClass.contains(Reg);
}

static bool isFPR64(unsigned Reg, unsigned SubReg,
const MachineRegisterInfo *MRI) {
if (TargetRegisterInfo::isVirtualRegister(Reg))
return (MRI->getRegClass(Reg)->hasSuperClassEq(&ARM64::FPR64RegClass) &&
return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) &&
SubReg == 0) ||
(MRI->getRegClass(Reg)->hasSuperClassEq(&ARM64::FPR128RegClass) &&
SubReg == ARM64::dsub);
(MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) &&
SubReg == AArch64::dsub);
// Physical register references just check the register class directly.
return (ARM64::FPR64RegClass.contains(Reg) && SubReg == 0) ||
(ARM64::FPR128RegClass.contains(Reg) && SubReg == ARM64::dsub);
return (AArch64::FPR64RegClass.contains(Reg) && SubReg == 0) ||
(AArch64::FPR128RegClass.contains(Reg) && SubReg == AArch64::dsub);
}

// getSrcFromCopy - Get the original source register for a GPR64 <--> FPR64
Expand All @@ -125,17 +125,18 @@ static unsigned getSrcFromCopy(const MachineInstr *MI,
unsigned &SubReg) {
SubReg = 0;
// The "FMOV Xd, Dn" instruction is the typical form.
if (MI->getOpcode() == ARM64::FMOVDXr || MI->getOpcode() == ARM64::FMOVXDr)
if (MI->getOpcode() == AArch64::FMOVDXr ||
MI->getOpcode() == AArch64::FMOVXDr)
return MI->getOperand(1).getReg();
// A lane zero extract "UMOV.d Xd, Vn[0]" is equivalent. We shouldn't see
// these at this stage, but it's easy to check for.
if (MI->getOpcode() == ARM64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
SubReg = ARM64::dsub;
if (MI->getOpcode() == AArch64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
SubReg = AArch64::dsub;
return MI->getOperand(1).getReg();
}
// Or just a plain COPY instruction. This can be directly to/from FPR64,
// or it can be a dsub subreg reference to an FPR128.
if (MI->getOpcode() == ARM64::COPY) {
if (MI->getOpcode() == AArch64::COPY) {
if (isFPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
MRI) &&
isGPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), MRI))
Expand All @@ -161,10 +162,10 @@ static int getTransformOpcode(unsigned Opc) {
default:
break;
// FIXME: Lots more possibilities.
case ARM64::ADDXrr:
return ARM64::ADDv1i64;
case ARM64::SUBXrr:
return ARM64::SUBv1i64;
case AArch64::ADDXrr:
return AArch64::ADDv1i64;
case AArch64::SUBXrr:
return AArch64::SUBv1i64;
}
// No AdvSIMD equivalent, so just return the original opcode.
return Opc;
Expand All @@ -178,7 +179,8 @@ static bool isTransformable(const MachineInstr *MI) {
// isProfitableToTransform - Predicate function to determine whether an
// instruction should be transformed to its equivalent AdvSIMD scalar
// instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
bool ARM64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
bool
AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
// If this instruction isn't eligible to be transformed (no SIMD equivalent),
// early exit since that's the common case.
if (!isTransformable(MI))
Expand Down Expand Up @@ -238,8 +240,8 @@ bool ARM64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
// preferable to have it use the FPR64 in most cases, as if the source
// vector is an IMPLICIT_DEF, the INSERT_SUBREG just goes away entirely.
// Ditto for a lane insert.
else if (Use->getOpcode() == ARM64::INSERT_SUBREG ||
Use->getOpcode() == ARM64::INSvi64gpr)
else if (Use->getOpcode() == AArch64::INSERT_SUBREG ||
Use->getOpcode() == AArch64::INSvi64gpr)
;
else
AllUsesAreCopies = false;
Expand All @@ -259,10 +261,10 @@ bool ARM64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
return TransformAll;
}

static MachineInstr *insertCopy(const ARM64InstrInfo *TII, MachineInstr *MI,
static MachineInstr *insertCopy(const AArch64InstrInfo *TII, MachineInstr *MI,
unsigned Dst, unsigned Src, bool IsKill) {
MachineInstrBuilder MIB =
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(ARM64::COPY),
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AArch64::COPY),
Dst)
.addReg(Src, getKillRegState(IsKill));
DEBUG(dbgs() << " adding copy: " << *MIB);
Expand All @@ -273,7 +275,7 @@ static MachineInstr *insertCopy(const ARM64InstrInfo *TII, MachineInstr *MI,
// transformInstruction - Perform the transformation of an instruction
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
void ARM64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
DEBUG(dbgs() << "Scalar transform: " << *MI);

MachineBasicBlock *MBB = MI->getParent();
Expand Down Expand Up @@ -316,19 +318,19 @@ void ARM64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
// copy.
if (!Src0) {
SubReg0 = 0;
Src0 = MRI->createVirtualRegister(&ARM64::FPR64RegClass);
Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
insertCopy(TII, MI, Src0, OrigSrc0, true);
}
if (!Src1) {
SubReg1 = 0;
Src1 = MRI->createVirtualRegister(&ARM64::FPR64RegClass);
Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
insertCopy(TII, MI, Src1, OrigSrc1, true);
}

// Create a vreg for the destination.
// FIXME: No need to do this if the ultimate user expects an FPR64.
// Check for that and avoid the copy if possible.
unsigned Dst = MRI->createVirtualRegister(&ARM64::FPR64RegClass);
unsigned Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass);

// For now, all of the new instructions have the same simple three-register
// form, so no need to special case based on what instruction we're
Expand All @@ -349,7 +351,7 @@ void ARM64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
}

// processMachineBasicBlock - Main optimzation loop.
bool ARM64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
MachineInstr *MI = I;
Expand All @@ -363,13 +365,13 @@ bool ARM64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
}

// runOnMachineFunction - Pass entry point from PassManager.
bool ARM64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
bool Changed = false;
DEBUG(dbgs() << "***** ARM64AdvSIMDScalar *****\n");
DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");

const TargetMachine &TM = mf.getTarget();
MRI = &mf.getRegInfo();
TII = static_cast<const ARM64InstrInfo *>(TM.getInstrInfo());
TII = static_cast<const AArch64InstrInfo *>(TM.getInstrInfo());

// Just check things on a one-block-at-a-time basis.
for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I)
Expand All @@ -378,8 +380,8 @@ bool ARM64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
return Changed;
}

// createARM64AdvSIMDScalar - Factory function used by ARM64TargetMachine
// createAArch64AdvSIMDScalar - Factory function used by AArch64TargetMachine
// to add the pass to the PassManager.
FunctionPass *llvm::createARM64AdvSIMDScalar() {
return new ARM64AdvSIMDScalar();
FunctionPass *llvm::createAArch64AdvSIMDScalar() {
return new AArch64AdvSIMDScalar();
}

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===-- ARM64BranchRelaxation.cpp - ARM64 branch relaxation ---------------===//
//===-- AArch64BranchRelaxation.cpp - AArch64 branch relaxation -----------===//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -9,9 +9,9 @@
//
//===----------------------------------------------------------------------===//

#include "ARM64.h"
#include "ARM64InstrInfo.h"
#include "ARM64MachineFunctionInfo.h"
#include "AArch64.h"
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
Expand All @@ -23,29 +23,29 @@
#include "llvm/Support/CommandLine.h"
using namespace llvm;

#define DEBUG_TYPE "arm64-branch-relax"
#define DEBUG_TYPE "aarch64-branch-relax"

static cl::opt<bool>
BranchRelaxation("arm64-branch-relax", cl::Hidden, cl::init(true),
BranchRelaxation("aarch64-branch-relax", cl::Hidden, cl::init(true),
cl::desc("Relax out of range conditional branches"));

static cl::opt<unsigned>
TBZDisplacementBits("arm64-tbz-offset-bits", cl::Hidden, cl::init(14),
TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));

static cl::opt<unsigned>
CBZDisplacementBits("arm64-cbz-offset-bits", cl::Hidden, cl::init(19),
CBZDisplacementBits("aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));

static cl::opt<unsigned>
BCCDisplacementBits("arm64-bcc-offset-bits", cl::Hidden, cl::init(19),
BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
cl::desc("Restrict range of Bcc instructions (DEBUG)"));

STATISTIC(NumSplit, "Number of basic blocks split");
STATISTIC(NumRelaxed, "Number of conditional branches relaxed");

namespace {
class ARM64BranchRelaxation : public MachineFunctionPass {
class AArch64BranchRelaxation : public MachineFunctionPass {
/// BasicBlockInfo - Information about the offset and size of a single
/// basic block.
struct BasicBlockInfo {
Expand Down Expand Up @@ -77,7 +77,7 @@ class ARM64BranchRelaxation : public MachineFunctionPass {
SmallVector<BasicBlockInfo, 16> BlockInfo;

MachineFunction *MF;
const ARM64InstrInfo *TII;
const AArch64InstrInfo *TII;

bool relaxBranchInstructions();
void scanFunction();
Expand All @@ -92,19 +92,19 @@ class ARM64BranchRelaxation : public MachineFunctionPass {

public:
static char ID;
ARM64BranchRelaxation() : MachineFunctionPass(ID) {}
AArch64BranchRelaxation() : MachineFunctionPass(ID) {}

bool runOnMachineFunction(MachineFunction &MF) override;

const char *getPassName() const override {
return "ARM64 branch relaxation pass";
return "AArch64 branch relaxation pass";
}
};
char ARM64BranchRelaxation::ID = 0;
char AArch64BranchRelaxation::ID = 0;
}

/// verify - check BBOffsets, BBSizes, alignment of islands
void ARM64BranchRelaxation::verify() {
void AArch64BranchRelaxation::verify() {
#ifndef NDEBUG
unsigned PrevNum = MF->begin()->getNumber();
for (MachineBasicBlock &MBB : *MF) {
Expand All @@ -118,7 +118,7 @@ void ARM64BranchRelaxation::verify() {
}

/// print block size and offset information - debugging
void ARM64BranchRelaxation::dumpBBs() {
void AArch64BranchRelaxation::dumpBBs() {
for (auto &MBB : *MF) {
const BasicBlockInfo &BBI = BlockInfo[MBB.getNumber()];
dbgs() << format("BB#%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset)
Expand All @@ -145,7 +145,7 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) {

/// scanFunction - Do the initial scan of the function, building up
/// information about each block.
void ARM64BranchRelaxation::scanFunction() {
void AArch64BranchRelaxation::scanFunction() {
BlockInfo.clear();
BlockInfo.resize(MF->getNumBlockIDs());

Expand All @@ -162,7 +162,7 @@ void ARM64BranchRelaxation::scanFunction() {

/// computeBlockSize - Compute the size for MBB.
/// This function updates BlockInfo directly.
void ARM64BranchRelaxation::computeBlockSize(const MachineBasicBlock &MBB) {
void AArch64BranchRelaxation::computeBlockSize(const MachineBasicBlock &MBB) {
unsigned Size = 0;
for (const MachineInstr &MI : MBB)
Size += TII->GetInstSizeInBytes(&MI);
Expand All @@ -172,7 +172,7 @@ void ARM64BranchRelaxation::computeBlockSize(const MachineBasicBlock &MBB) {
/// getInstrOffset - Return the current offset of the specified machine
/// instruction from the start of the function. This offset changes as stuff is
/// moved around inside the function.
unsigned ARM64BranchRelaxation::getInstrOffset(MachineInstr *MI) const {
unsigned AArch64BranchRelaxation::getInstrOffset(MachineInstr *MI) const {
MachineBasicBlock *MBB = MI->getParent();

// The offset is composed of two things: the sum of the sizes of all MBB's
Expand All @@ -188,7 +188,7 @@ unsigned ARM64BranchRelaxation::getInstrOffset(MachineInstr *MI) const {
return Offset;
}

void ARM64BranchRelaxation::adjustBlockOffsets(MachineBasicBlock &Start) {
void AArch64BranchRelaxation::adjustBlockOffsets(MachineBasicBlock &Start) {
unsigned PrevNum = Start.getNumber();
for (auto &MBB : make_range(MachineFunction::iterator(Start), MF->end())) {
unsigned Num = MBB.getNumber();
Expand All @@ -209,7 +209,7 @@ void ARM64BranchRelaxation::adjustBlockOffsets(MachineBasicBlock &Start) {
/// and must be updated by the caller! Other transforms follow using this
/// utility function, so no point updating now rather than waiting.
MachineBasicBlock *
ARM64BranchRelaxation::splitBlockBeforeInstr(MachineInstr *MI) {
AArch64BranchRelaxation::splitBlockBeforeInstr(MachineInstr *MI) {
MachineBasicBlock *OrigBB = MI->getParent();

// Create a new MBB for the code after the OrigBB.
Expand All @@ -226,7 +226,7 @@ ARM64BranchRelaxation::splitBlockBeforeInstr(MachineInstr *MI) {
// Note the new unconditional branch is not being recorded.
// There doesn't seem to be meaningful DebugInfo available; this doesn't
// correspond to anything in the source.
BuildMI(OrigBB, DebugLoc(), TII->get(ARM64::B)).addMBB(NewBB);
BuildMI(OrigBB, DebugLoc(), TII->get(AArch64::B)).addMBB(NewBB);

// Insert an entry into BlockInfo to align it properly with the block numbers.
BlockInfo.insert(BlockInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
Expand All @@ -252,9 +252,9 @@ ARM64BranchRelaxation::splitBlockBeforeInstr(MachineInstr *MI) {

/// isBlockInRange - Returns true if the distance between specific MI and
/// specific BB can fit in MI's displacement field.
bool ARM64BranchRelaxation::isBlockInRange(MachineInstr *MI,
MachineBasicBlock *DestBB,
unsigned Bits) {
bool AArch64BranchRelaxation::isBlockInRange(MachineInstr *MI,
MachineBasicBlock *DestBB,
unsigned Bits) {
unsigned MaxOffs = ((1 << (Bits - 1)) - 1) << 2;
unsigned BrOffset = getInstrOffset(MI);
unsigned DestOffset = BlockInfo[DestBB->getNumber()].Offset;
Expand All @@ -275,15 +275,15 @@ static bool isConditionalBranch(unsigned Opc) {
switch (Opc) {
default:
return false;
case ARM64::TBZW:
case ARM64::TBNZW:
case ARM64::TBZX:
case ARM64::TBNZX:
case ARM64::CBZW:
case ARM64::CBNZW:
case ARM64::CBZX:
case ARM64::CBNZX:
case ARM64::Bcc:
case AArch64::TBZW:
case AArch64::TBNZW:
case AArch64::TBZX:
case AArch64::TBNZX:
case AArch64::CBZW:
case AArch64::CBNZW:
case AArch64::CBZX:
case AArch64::CBNZX:
case AArch64::Bcc:
return true;
}
}
Expand All @@ -292,16 +292,16 @@ static MachineBasicBlock *getDestBlock(MachineInstr *MI) {
switch (MI->getOpcode()) {
default:
assert(0 && "unexpected opcode!");
case ARM64::TBZW:
case ARM64::TBNZW:
case ARM64::TBZX:
case ARM64::TBNZX:
case AArch64::TBZW:
case AArch64::TBNZW:
case AArch64::TBZX:
case AArch64::TBNZX:
return MI->getOperand(2).getMBB();
case ARM64::CBZW:
case ARM64::CBNZW:
case ARM64::CBZX:
case ARM64::CBNZX:
case ARM64::Bcc:
case AArch64::CBZW:
case AArch64::CBNZW:
case AArch64::CBZX:
case AArch64::CBNZX:
case AArch64::Bcc:
return MI->getOperand(1).getMBB();
}
}
Expand All @@ -310,48 +310,48 @@ static unsigned getOppositeConditionOpcode(unsigned Opc) {
switch (Opc) {
default:
assert(0 && "unexpected opcode!");
case ARM64::TBNZW: return ARM64::TBZW;
case ARM64::TBNZX: return ARM64::TBZX;
case ARM64::TBZW: return ARM64::TBNZW;
case ARM64::TBZX: return ARM64::TBNZX;
case ARM64::CBNZW: return ARM64::CBZW;
case ARM64::CBNZX: return ARM64::CBZX;
case ARM64::CBZW: return ARM64::CBNZW;
case ARM64::CBZX: return ARM64::CBNZX;
case ARM64::Bcc: return ARM64::Bcc; // Condition is an operand for Bcc.
case AArch64::TBNZW: return AArch64::TBZW;
case AArch64::TBNZX: return AArch64::TBZX;
case AArch64::TBZW: return AArch64::TBNZW;
case AArch64::TBZX: return AArch64::TBNZX;
case AArch64::CBNZW: return AArch64::CBZW;
case AArch64::CBNZX: return AArch64::CBZX;
case AArch64::CBZW: return AArch64::CBNZW;
case AArch64::CBZX: return AArch64::CBNZX;
case AArch64::Bcc: return AArch64::Bcc; // Condition is an operand for Bcc.
}
}

static unsigned getBranchDisplacementBits(unsigned Opc) {
switch (Opc) {
default:
assert(0 && "unexpected opcode!");
case ARM64::TBNZW:
case ARM64::TBZW:
case ARM64::TBNZX:
case ARM64::TBZX:
case AArch64::TBNZW:
case AArch64::TBZW:
case AArch64::TBNZX:
case AArch64::TBZX:
return TBZDisplacementBits;
case ARM64::CBNZW:
case ARM64::CBZW:
case ARM64::CBNZX:
case ARM64::CBZX:
case AArch64::CBNZW:
case AArch64::CBZW:
case AArch64::CBNZX:
case AArch64::CBZX:
return CBZDisplacementBits;
case ARM64::Bcc:
case AArch64::Bcc:
return BCCDisplacementBits;
}
}

static inline void invertBccCondition(MachineInstr *MI) {
assert(MI->getOpcode() == ARM64::Bcc && "Unexpected opcode!");
ARM64CC::CondCode CC = (ARM64CC::CondCode)MI->getOperand(0).getImm();
CC = ARM64CC::getInvertedCondCode(CC);
assert(MI->getOpcode() == AArch64::Bcc && "Unexpected opcode!");
AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(0).getImm();
CC = AArch64CC::getInvertedCondCode(CC);
MI->getOperand(0).setImm((int64_t)CC);
}

/// fixupConditionalBranch - Fix up a conditional branch whose destination is
/// too far away to fit in its displacement field. It is converted to an inverse
/// conditional branch + an unconditional branch to the destination.
bool ARM64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
bool AArch64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
MachineBasicBlock *DestBB = getDestBlock(MI);

// Add an unconditional branch to the destination and invert the branch
Expand All @@ -372,7 +372,7 @@ bool ARM64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
if (BMI != MI) {
if (std::next(MachineBasicBlock::iterator(MI)) ==
std::prev(MBB->getLastNonDebugInstr()) &&
BMI->getOpcode() == ARM64::B) {
BMI->getOpcode() == AArch64::B) {
// Last MI in the BB is an unconditional branch. Can we simply invert the
// condition and swap destinations:
// beq L1
Expand All @@ -386,14 +386,15 @@ bool ARM64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
DEBUG(dbgs() << " Invert condition and swap its destination with "
<< *BMI);
BMI->getOperand(0).setMBB(DestBB);
unsigned OpNum =
(MI->getOpcode() == ARM64::TBZW || MI->getOpcode() == ARM64::TBNZW ||
MI->getOpcode() == ARM64::TBZX || MI->getOpcode() == ARM64::TBNZX)
? 2
: 1;
unsigned OpNum = (MI->getOpcode() == AArch64::TBZW ||
MI->getOpcode() == AArch64::TBNZW ||
MI->getOpcode() == AArch64::TBZX ||
MI->getOpcode() == AArch64::TBNZX)
? 2
: 1;
MI->getOperand(OpNum).setMBB(NewDest);
MI->setDesc(TII->get(getOppositeConditionOpcode(MI->getOpcode())));
if (MI->getOpcode() == ARM64::Bcc)
if (MI->getOpcode() == AArch64::Bcc)
invertBccCondition(MI);
return true;
}
Expand Down Expand Up @@ -429,14 +430,14 @@ bool ARM64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
MachineInstrBuilder MIB = BuildMI(
MBB, DebugLoc(), TII->get(getOppositeConditionOpcode(MI->getOpcode())))
.addOperand(MI->getOperand(0));
if (MI->getOpcode() == ARM64::TBZW || MI->getOpcode() == ARM64::TBNZW ||
MI->getOpcode() == ARM64::TBZX || MI->getOpcode() == ARM64::TBNZX)
if (MI->getOpcode() == AArch64::TBZW || MI->getOpcode() == AArch64::TBNZW ||
MI->getOpcode() == AArch64::TBZX || MI->getOpcode() == AArch64::TBNZX)
MIB.addOperand(MI->getOperand(1));
if (MI->getOpcode() == ARM64::Bcc)
if (MI->getOpcode() == AArch64::Bcc)
invertBccCondition(MIB);
MIB.addMBB(NextBB);
BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
BuildMI(MBB, DebugLoc(), TII->get(ARM64::B)).addMBB(DestBB);
BuildMI(MBB, DebugLoc(), TII->get(AArch64::B)).addMBB(DestBB);
BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());

// Remove the old conditional branch. It may or may not still be in MBB.
Expand All @@ -448,7 +449,7 @@ bool ARM64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
return true;
}

bool ARM64BranchRelaxation::relaxBranchInstructions() {
bool AArch64BranchRelaxation::relaxBranchInstructions() {
bool Changed = false;
// Relaxing branches involves creating new basic blocks, so re-eval
// end() for termination.
Expand All @@ -465,16 +466,16 @@ bool ARM64BranchRelaxation::relaxBranchInstructions() {
return Changed;
}

bool ARM64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
bool AArch64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;

// If the pass is disabled, just bail early.
if (!BranchRelaxation)
return false;

DEBUG(dbgs() << "***** ARM64BranchRelaxation *****\n");
DEBUG(dbgs() << "***** AArch64BranchRelaxation *****\n");

TII = (const ARM64InstrInfo *)MF->getTarget().getInstrInfo();
TII = (const AArch64InstrInfo *)MF->getTarget().getInstrInfo();

// Renumber all of the machine basic blocks in the function, guaranteeing that
// the numbers agree with the position of the block in the function.
Expand Down Expand Up @@ -502,8 +503,8 @@ bool ARM64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
return MadeChange;
}

/// createARM64BranchRelaxation - returns an instance of the constpool
/// createAArch64BranchRelaxation - returns an instance of the constpool
/// island pass.
FunctionPass *llvm::createARM64BranchRelaxation() {
return new ARM64BranchRelaxation();
FunctionPass *llvm::createAArch64BranchRelaxation() {
return new AArch64BranchRelaxation();
}
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//=== ARM64CallingConv.h - Custom Calling Convention Routines -*- C++ -*-===//
//=== AArch64CallingConv.h - Custom Calling Convention Routines -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -7,38 +7,38 @@
//
//===----------------------------------------------------------------------===//
//
// This file contains the custom routines for the ARM64 Calling Convention that
// This file contains the custom routines for the AArch64 Calling Convention that
// aren't done by tablegen.
//
//===----------------------------------------------------------------------===//

#ifndef ARM64CALLINGCONV_H
#define ARM64CALLINGCONV_H
#ifndef AArch64CALLINGCONV_H
#define AArch64CALLINGCONV_H

#include "ARM64InstrInfo.h"
#include "AArch64InstrInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/Target/TargetInstrInfo.h"

namespace llvm {

/// CC_ARM64_Custom_i1i8i16_Reg - customized handling of passing i1/i8/i16 via
/// CC_AArch64_Custom_i1i8i16_Reg - customized handling of passing i1/i8/i16 via
/// register. Here, ValVT can be i1/i8/i16 or i32 depending on whether the
/// argument is already promoted and LocVT is i1/i8/i16. We only promote the
/// argument to i32 if we are sure this argument will be passed in register.
static bool CC_ARM64_Custom_i1i8i16_Reg(unsigned ValNo, MVT ValVT, MVT LocVT,
static bool CC_AArch64_Custom_i1i8i16_Reg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags,
CCState &State,
bool IsWebKitJS = false) {
static const MCPhysReg RegList1[] = { ARM64::W0, ARM64::W1, ARM64::W2,
ARM64::W3, ARM64::W4, ARM64::W5,
ARM64::W6, ARM64::W7 };
static const MCPhysReg RegList2[] = { ARM64::X0, ARM64::X1, ARM64::X2,
ARM64::X3, ARM64::X4, ARM64::X5,
ARM64::X6, ARM64::X7 };
static const MCPhysReg WebKitRegList1[] = { ARM64::W0 };
static const MCPhysReg WebKitRegList2[] = { ARM64::X0 };
static const MCPhysReg RegList1[] = { AArch64::W0, AArch64::W1, AArch64::W2,
AArch64::W3, AArch64::W4, AArch64::W5,
AArch64::W6, AArch64::W7 };
static const MCPhysReg RegList2[] = { AArch64::X0, AArch64::X1, AArch64::X2,
AArch64::X3, AArch64::X4, AArch64::X5,
AArch64::X6, AArch64::X7 };
static const MCPhysReg WebKitRegList1[] = { AArch64::W0 };
static const MCPhysReg WebKitRegList2[] = { AArch64::X0 };

const MCPhysReg *List1 = IsWebKitJS ? WebKitRegList1 : RegList1;
const MCPhysReg *List2 = IsWebKitJS ? WebKitRegList2 : RegList2;
Expand All @@ -63,22 +63,22 @@ static bool CC_ARM64_Custom_i1i8i16_Reg(unsigned ValNo, MVT ValVT, MVT LocVT,
return false;
}

/// CC_ARM64_WebKit_JS_i1i8i16_Reg - customized handling of passing i1/i8/i16
/// via register. This behaves the same as CC_ARM64_Custom_i1i8i16_Reg, but only
/// CC_AArch64_WebKit_JS_i1i8i16_Reg - customized handling of passing i1/i8/i16
/// via register. This behaves the same as CC_AArch64_Custom_i1i8i16_Reg, but only
/// uses the first register.
static bool CC_ARM64_WebKit_JS_i1i8i16_Reg(unsigned ValNo, MVT ValVT, MVT LocVT,
static bool CC_AArch64_WebKit_JS_i1i8i16_Reg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags,
CCState &State) {
return CC_ARM64_Custom_i1i8i16_Reg(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
return CC_AArch64_Custom_i1i8i16_Reg(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
State, true);
}

/// CC_ARM64_Custom_i1i8i16_Stack: customized handling of passing i1/i8/i16 on
/// CC_AArch64_Custom_i1i8i16_Stack: customized handling of passing i1/i8/i16 on
/// stack. Here, ValVT can be i1/i8/i16 or i32 depending on whether the argument
/// is already promoted and LocVT is i1/i8/i16. If ValVT is already promoted,
/// it will be truncated back to i1/i8/i16.
static bool CC_ARM64_Custom_i1i8i16_Stack(unsigned ValNo, MVT ValVT, MVT LocVT,
static bool CC_AArch64_Custom_i1i8i16_Stack(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags,
CCState &State) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===- ARM64CallingConv.td - Calling Conventions for ARM64 -*- tablegen -*-===//
//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
// This describes the calling conventions for ARM64 architecture.
// This describes the calling conventions for AArch64 architecture.
//
//===----------------------------------------------------------------------===//

Expand All @@ -22,7 +22,7 @@ class CCIfBigEndian<CCAction A> :
// ARM AAPCS64 Calling Convention
//===----------------------------------------------------------------------===//

def CC_ARM64_AAPCS : CallingConv<[
def CC_AArch64_AAPCS : CallingConv<[
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,

Expand All @@ -42,7 +42,7 @@ def CC_ARM64_AAPCS : CallingConv<[

// Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
// up to eight each of GPR and FPR.
CCIfType<[i1, i8, i16], CCCustom<"CC_ARM64_Custom_i1i8i16_Reg">>,
CCIfType<[i1, i8, i16], CCCustom<"CC_AArch64_Custom_i1i8i16_Reg">>,
CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
[X0, X1, X2, X3, X4, X5, X6, X7]>>,
// i128 is split to two i64s, we can't fit half to register X7.
Expand Down Expand Up @@ -73,7 +73,7 @@ def CC_ARM64_AAPCS : CallingConv<[
CCAssignToStack<16, 16>>
]>;

def RetCC_ARM64_AAPCS : CallingConv<[
def RetCC_AArch64_AAPCS : CallingConv<[
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,

Expand Down Expand Up @@ -104,7 +104,7 @@ def RetCC_ARM64_AAPCS : CallingConv<[
// from the standard one at this level:
// + i128s (i.e. split i64s) don't need even registers.
// + Stack slots are sized as needed rather than being at least 64-bit.
def CC_ARM64_DarwinPCS : CallingConv<[
def CC_AArch64_DarwinPCS : CallingConv<[
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,

Expand All @@ -117,7 +117,7 @@ def CC_ARM64_DarwinPCS : CallingConv<[

// Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
// up to eight each of GPR and FPR.
CCIfType<[i1, i8, i16], CCCustom<"CC_ARM64_Custom_i1i8i16_Reg">>,
CCIfType<[i1, i8, i16], CCCustom<"CC_AArch64_Custom_i1i8i16_Reg">>,
CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
[X0, X1, X2, X3, X4, X5, X6, X7]>>,
// i128 is split to two i64s, we can't fit half to register X7.
Expand All @@ -140,14 +140,14 @@ def CC_ARM64_DarwinPCS : CallingConv<[
CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,

// If more than will fit in registers, pass them on the stack instead.
CCIfType<[i1, i8, i16], CCCustom<"CC_ARM64_Custom_i1i8i16_Stack">>,
CCIfType<[i1, i8, i16], CCCustom<"CC_AArch64_Custom_i1i8i16_Stack">>,
CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
CCAssignToStack<8, 8>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
]>;

def CC_ARM64_DarwinPCS_VarArg : CallingConv<[
def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,

Expand All @@ -166,9 +166,9 @@ def CC_ARM64_DarwinPCS_VarArg : CallingConv<[
// in register and the remaining arguments on stack. We allow 32bit stack slots,
// so that WebKit can write partial values in the stack and define the other
// 32bit quantity as undef.
def CC_ARM64_WebKit_JS : CallingConv<[
def CC_AArch64_WebKit_JS : CallingConv<[
// Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
CCIfType<[i1, i8, i16], CCCustom<"CC_ARM64_WebKit_JS_i1i8i16_Reg">>,
CCIfType<[i1, i8, i16], CCCustom<"CC_AArch64_WebKit_JS_i1i8i16_Reg">>,
CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>,
CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>,

Expand All @@ -178,7 +178,7 @@ def CC_ARM64_WebKit_JS : CallingConv<[
CCIfType<[i64, f64], CCAssignToStack<8, 8>>
]>;

def RetCC_ARM64_WebKit_JS : CallingConv<[
def RetCC_AArch64_WebKit_JS : CallingConv<[
CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
[X0, X1, X2, X3, X4, X5, X6, X7]>>,
CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
Expand All @@ -197,7 +197,7 @@ def RetCC_ARM64_WebKit_JS : CallingConv<[
// It would be better to model its preservation semantics properly (create a
// vreg on entry, use it in RET & tail call generation; make that vreg def if we
// end up saving LR as part of a call frame). Watch this space...
def CSR_ARM64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
X23, X24, X25, X26, X27, X28,
D8, D9, D10, D11,
D12, D13, D14, D15)>;
Expand All @@ -210,24 +210,24 @@ def CSR_ARM64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
// (For generic ARM 64-bit ABI code, clang will not generate constructors or
// destructors with 'this' returns, so this RegMask will not be used in that
// case)
def CSR_ARM64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_ARM64_AAPCS, X0)>;
def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;

// The function used by Darwin to obtain the address of a thread-local variable
// guarantees more than a normal AAPCS function. x16 and x17 are used on the
// fast path for calculation, but other registers except X0 (argument/return)
// and LR (it is a call, after all) are preserved.
def CSR_ARM64_TLS_Darwin
def CSR_AArch64_TLS_Darwin
: CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
FP,
(sequence "Q%u", 0, 31))>;

// The ELF stub used for TLS-descriptor access saves every feasible
// register. Only X0 and LR are clobbered.
def CSR_ARM64_TLS_ELF
def CSR_AArch64_TLS_ELF
: CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
(sequence "Q%u", 0, 31))>;

def CSR_ARM64_AllRegs
def CSR_AArch64_AllRegs
: CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
(sequence "X%u", 0, 28), FP, LR, SP,
(sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===-- ARM64CleanupLocalDynamicTLSPass.cpp -----------------------*- C++ -*-=//
//===-- AArch64CleanupLocalDynamicTLSPass.cpp ---------------------*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -22,10 +22,10 @@
// pass looks through a function and performs such combinations.
//
//===----------------------------------------------------------------------===//
#include "ARM64.h"
#include "ARM64InstrInfo.h"
#include "ARM64MachineFunctionInfo.h"
#include "ARM64TargetMachine.h"
#include "AArch64.h"
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64TargetMachine.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
Expand All @@ -39,7 +39,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
LDTLSCleanup() : MachineFunctionPass(ID) {}

bool runOnMachineFunction(MachineFunction &MF) override {
ARM64FunctionInfo *AFI = MF.getInfo<ARM64FunctionInfo>();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
if (AFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two.
return false;
Expand All @@ -62,7 +62,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
++I) {
switch (I->getOpcode()) {
case ARM64::TLSDESC_BLR:
case AArch64::TLSDESC_BLR:
// Make sure it's a local dynamic access.
if (!I->getOperand(1).isSymbol() ||
strcmp(I->getOperand(1).getSymbolName(), "_TLS_MODULE_BASE_"))
Expand Down Expand Up @@ -92,15 +92,15 @@ struct LDTLSCleanup : public MachineFunctionPass {
MachineInstr *replaceTLSBaseAddrCall(MachineInstr *I,
unsigned TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent();
const ARM64TargetMachine *TM =
static_cast<const ARM64TargetMachine *>(&MF->getTarget());
const ARM64InstrInfo *TII = TM->getInstrInfo();
const AArch64TargetMachine *TM =
static_cast<const AArch64TargetMachine *>(&MF->getTarget());
const AArch64InstrInfo *TII = TM->getInstrInfo();

// Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the
// code sequence assumes the address will be.
MachineInstr *Copy =
BuildMI(*I->getParent(), I, I->getDebugLoc(),
TII->get(TargetOpcode::COPY), ARM64::X0).addReg(TLSBaseAddrReg);
MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
TII->get(TargetOpcode::COPY),
AArch64::X0).addReg(TLSBaseAddrReg);

// Erase the TLS_base_addr instruction.
I->eraseFromParent();
Expand All @@ -112,19 +112,19 @@ struct LDTLSCleanup : public MachineFunctionPass {
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *setRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent();
const ARM64TargetMachine *TM =
static_cast<const ARM64TargetMachine *>(&MF->getTarget());
const ARM64InstrInfo *TII = TM->getInstrInfo();
const AArch64TargetMachine *TM =
static_cast<const AArch64TargetMachine *>(&MF->getTarget());
const AArch64InstrInfo *TII = TM->getInstrInfo();

// Create a virtual register for the TLS base address.
MachineRegisterInfo &RegInfo = MF->getRegInfo();
*TLSBaseAddrReg = RegInfo.createVirtualRegister(&ARM64::GPR64RegClass);
*TLSBaseAddrReg = RegInfo.createVirtualRegister(&AArch64::GPR64RegClass);

// Insert a copy from X0 to TLSBaseAddrReg for later.
MachineInstr *Next = I->getNextNode();
MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
TII->get(TargetOpcode::COPY),
*TLSBaseAddrReg).addReg(ARM64::X0);
*TLSBaseAddrReg).addReg(AArch64::X0);

return Copy;
}
Expand All @@ -142,6 +142,6 @@ struct LDTLSCleanup : public MachineFunctionPass {
}

char LDTLSCleanup::ID = 0;
FunctionPass *llvm::createARM64CleanupLocalDynamicTLSPass() {
FunctionPass *llvm::createAArch64CleanupLocalDynamicTLSPass() {
return new LDTLSCleanup();
}

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===-- ARM64DeadRegisterDefinitions.cpp - Replace dead defs w/ zero reg --===//
//==-- AArch64DeadRegisterDefinitions.cpp - Replace dead defs w/ zero reg --==//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -11,8 +11,8 @@
// hardware's register renamer.
//===----------------------------------------------------------------------===//

#include "ARM64.h"
#include "ARM64RegisterInfo.h"
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFunction.h"
Expand All @@ -21,20 +21,20 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;

#define DEBUG_TYPE "arm64-dead-defs"
#define DEBUG_TYPE "aarch64-dead-defs"

STATISTIC(NumDeadDefsReplaced, "Number of dead definitions replaced");

namespace {
class ARM64DeadRegisterDefinitions : public MachineFunctionPass {
class AArch64DeadRegisterDefinitions : public MachineFunctionPass {
private:
const TargetRegisterInfo *TRI;
bool implicitlyDefinesOverlappingReg(unsigned Reg, const MachineInstr &MI);
bool processMachineBasicBlock(MachineBasicBlock &MBB);
bool usesFrameIndex(const MachineInstr &MI);
public:
static char ID; // Pass identification, replacement for typeid.
explicit ARM64DeadRegisterDefinitions() : MachineFunctionPass(ID) {}
explicit AArch64DeadRegisterDefinitions() : MachineFunctionPass(ID) {}

virtual bool runOnMachineFunction(MachineFunction &F) override;

Expand All @@ -45,10 +45,10 @@ class ARM64DeadRegisterDefinitions : public MachineFunctionPass {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
char ARM64DeadRegisterDefinitions::ID = 0;
char AArch64DeadRegisterDefinitions::ID = 0;
} // end anonymous namespace

bool ARM64DeadRegisterDefinitions::implicitlyDefinesOverlappingReg(
bool AArch64DeadRegisterDefinitions::implicitlyDefinesOverlappingReg(
unsigned Reg, const MachineInstr &MI) {
for (const MachineOperand &MO : MI.implicit_operands())
if (MO.isReg() && MO.isDef())
Expand All @@ -57,15 +57,15 @@ bool ARM64DeadRegisterDefinitions::implicitlyDefinesOverlappingReg(
return false;
}

bool ARM64DeadRegisterDefinitions::usesFrameIndex(const MachineInstr &MI) {
bool AArch64DeadRegisterDefinitions::usesFrameIndex(const MachineInstr &MI) {
for (const MachineOperand &Op : MI.uses())
if (Op.isFI())
return true;
return false;
}

bool
ARM64DeadRegisterDefinitions::processMachineBasicBlock(MachineBasicBlock &MBB) {
bool AArch64DeadRegisterDefinitions::processMachineBasicBlock(
MachineBasicBlock &MBB) {
bool Changed = false;
for (MachineInstr &MI : MBB) {
if (usesFrameIndex(MI)) {
Expand Down Expand Up @@ -99,11 +99,11 @@ ARM64DeadRegisterDefinitions::processMachineBasicBlock(MachineBasicBlock &MBB) {
default:
DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
continue;
case ARM64::GPR32RegClassID:
NewReg = ARM64::WZR;
case AArch64::GPR32RegClassID:
NewReg = AArch64::WZR;
break;
case ARM64::GPR64RegClassID:
NewReg = ARM64::XZR;
case AArch64::GPR64RegClassID:
NewReg = AArch64::XZR;
break;
}
DEBUG(dbgs() << " Replacing with zero register. New:\n ");
Expand All @@ -118,17 +118,17 @@ ARM64DeadRegisterDefinitions::processMachineBasicBlock(MachineBasicBlock &MBB) {

// Scan the function for instructions that have a dead definition of a
// register. Replace that register with the zero register when possible.
bool ARM64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getTarget().getRegisterInfo();
bool Changed = false;
DEBUG(dbgs() << "***** ARM64DeadRegisterDefinitions *****\n");
DEBUG(dbgs() << "***** AArch64DeadRegisterDefinitions *****\n");

for (auto &MBB : MF)
if (processMachineBasicBlock(MBB))
Changed = true;
return Changed;
}

FunctionPass *llvm::createARM64DeadRegisterDefinitions() {
return new ARM64DeadRegisterDefinitions();
FunctionPass *llvm::createAArch64DeadRegisterDefinitions() {
return new AArch64DeadRegisterDefinitions();
}

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===-- ARM64FrameLowering.h - TargetFrameLowering for ARM64 ----*- C++ -*-===//
//==-- AArch64FrameLowering.h - TargetFrameLowering for AArch64 --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -11,22 +11,22 @@
//
//===----------------------------------------------------------------------===//

#ifndef ARM64_FRAMELOWERING_H
#define ARM64_FRAMELOWERING_H
#ifndef AArch64_FRAMELOWERING_H
#define AArch64_FRAMELOWERING_H

#include "llvm/Target/TargetFrameLowering.h"

namespace llvm {

class ARM64Subtarget;
class ARM64TargetMachine;
class AArch64Subtarget;
class AArch64TargetMachine;

class ARM64FrameLowering : public TargetFrameLowering {
const ARM64TargetMachine &TM;
class AArch64FrameLowering : public TargetFrameLowering {
const AArch64TargetMachine &TM;

public:
explicit ARM64FrameLowering(const ARM64TargetMachine &TM,
const ARM64Subtarget &STI)
explicit AArch64FrameLowering(const AArch64TargetMachine &TM,
const AArch64Subtarget &STI)
: TargetFrameLowering(StackGrowsDown, 16, 0, 16,
false /*StackRealignable*/),
TM(TM) {}
Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//==-- ARM64ISelLowering.h - ARM64 DAG Lowering Interface --------*- C++ -*-==//
//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -7,13 +7,13 @@
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that ARM64 uses to lower LLVM code into a
// This file defines the interfaces that AArch64 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGET_ARM64_ISELLOWERING_H
#define LLVM_TARGET_ARM64_ISELLOWERING_H
#ifndef LLVM_TARGET_AArch64_ISELLOWERING_H
#define LLVM_TARGET_AArch64_ISELLOWERING_H

#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
Expand All @@ -22,7 +22,7 @@

namespace llvm {

namespace ARM64ISD {
namespace AArch64ISD {

enum {
FIRST_NUMBER = ISD::BUILTIN_OP_END,
Expand Down Expand Up @@ -188,16 +188,16 @@ enum {
ST4LANEpost
};

} // end namespace ARM64ISD
} // end namespace AArch64ISD

class ARM64Subtarget;
class ARM64TargetMachine;
class AArch64Subtarget;
class AArch64TargetMachine;

class ARM64TargetLowering : public TargetLowering {
class AArch64TargetLowering : public TargetLowering {
bool RequireStrictAlign;

public:
explicit ARM64TargetLowering(ARM64TargetMachine &TM);
explicit AArch64TargetLowering(AArch64TargetMachine &TM);

/// Selects the correct CCAssignFn for a the given CallingConvention
/// value.
Expand Down Expand Up @@ -325,9 +325,9 @@ class ARM64TargetLowering : public TargetLowering {
bool shouldExpandAtomicInIR(Instruction *Inst) const override;

private:
/// Subtarget - Keep a pointer to the ARM64Subtarget around so that we can
/// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
/// make the right decision when generating code for different targets.
const ARM64Subtarget *Subtarget;
const AArch64Subtarget *Subtarget;

void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
void addDRTypeForNEON(MVT VT);
Expand Down Expand Up @@ -454,11 +454,11 @@ class ARM64TargetLowering : public TargetLowering {
SelectionDAG &DAG) const override;
};

namespace ARM64 {
namespace AArch64 {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
} // end namespace ARM64
} // end namespace AArch64

} // end namespace llvm

#endif // LLVM_TARGET_ARM64_ISELLOWERING_H
#endif // LLVM_TARGET_AArch64_ISELLOWERING_H
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===- ARM64InstrAtomics.td - ARM64 Atomic codegen support -*- tablegen -*-===//
//=- AArch64InstrAtomics.td - AArch64 Atomic codegen support -*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
// ARM64 Atomic operand code-gen constructs.
// AArch64 Atomic operand code-gen constructs.
//
//===----------------------------------------------------------------------===//

Expand Down Expand Up @@ -117,7 +117,7 @@ class releasing_store<PatFrag base>
return Ordering == Release || Ordering == SequentiallyConsistent;
}]>;

// An atomic store operation that doesn't actually need to be atomic on ARM64.
// An atomic store operation that doesn't actually need to be atomic on AArch64.
class relaxed_store<PatFrag base>
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
Expand Down Expand Up @@ -202,19 +202,19 @@ def : Pat<(relaxed_store<atomic_store_64>

// Load-exclusives.

def ldxr_1 : PatFrag<(ops node:$ptr), (int_arm64_ldxr node:$ptr), [{
def ldxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;

def ldxr_2 : PatFrag<(ops node:$ptr), (int_arm64_ldxr node:$ptr), [{
def ldxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;

def ldxr_4 : PatFrag<(ops node:$ptr), (int_arm64_ldxr node:$ptr), [{
def ldxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;

def ldxr_8 : PatFrag<(ops node:$ptr), (int_arm64_ldxr node:$ptr), [{
def ldxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;

Expand All @@ -235,19 +235,19 @@ def : Pat<(and (ldxr_4 GPR64sp:$addr), 0xffffffff),

// Load-exclusives.

def ldaxr_1 : PatFrag<(ops node:$ptr), (int_arm64_ldaxr node:$ptr), [{
def ldaxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;

def ldaxr_2 : PatFrag<(ops node:$ptr), (int_arm64_ldaxr node:$ptr), [{
def ldaxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;

def ldaxr_4 : PatFrag<(ops node:$ptr), (int_arm64_ldaxr node:$ptr), [{
def ldaxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;

def ldaxr_8 : PatFrag<(ops node:$ptr), (int_arm64_ldaxr node:$ptr), [{
def ldaxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;

Expand All @@ -269,22 +269,22 @@ def : Pat<(and (ldaxr_4 GPR64sp:$addr), 0xffffffff),
// Store-exclusives.

def stxr_1 : PatFrag<(ops node:$val, node:$ptr),
(int_arm64_stxr node:$val, node:$ptr), [{
(int_aarch64_stxr node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;

def stxr_2 : PatFrag<(ops node:$val, node:$ptr),
(int_arm64_stxr node:$val, node:$ptr), [{
(int_aarch64_stxr node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;

def stxr_4 : PatFrag<(ops node:$val, node:$ptr),
(int_arm64_stxr node:$val, node:$ptr), [{
(int_aarch64_stxr node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;

def stxr_8 : PatFrag<(ops node:$val, node:$ptr),
(int_arm64_stxr node:$val, node:$ptr), [{
(int_aarch64_stxr node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;

Expand Down Expand Up @@ -315,22 +315,22 @@ def : Pat<(stxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
// Store-release-exclusives.

def stlxr_1 : PatFrag<(ops node:$val, node:$ptr),
(int_arm64_stlxr node:$val, node:$ptr), [{
(int_aarch64_stlxr node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;

def stlxr_2 : PatFrag<(ops node:$val, node:$ptr),
(int_arm64_stlxr node:$val, node:$ptr), [{
(int_aarch64_stlxr node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;

def stlxr_4 : PatFrag<(ops node:$val, node:$ptr),
(int_arm64_stlxr node:$val, node:$ptr), [{
(int_aarch64_stlxr node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;

def stlxr_8 : PatFrag<(ops node:$val, node:$ptr),
(int_arm64_stlxr node:$val, node:$ptr), [{
(int_aarch64_stlxr node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;

Expand Down Expand Up @@ -361,4 +361,4 @@ def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),

// And clear exclusive.

def : Pat<(int_arm64_clrex), (CLREX 0xf)>;
def : Pat<(int_aarch64_clrex), (CLREX 0xf)>;

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===- ARM64InstrInfo.h - ARM64 Instruction Information ---------*- C++ -*-===//
//===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -7,44 +7,44 @@
//
//===----------------------------------------------------------------------===//
//
// This file contains the ARM64 implementation of the TargetInstrInfo class.
// This file contains the AArch64 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGET_ARM64INSTRINFO_H
#define LLVM_TARGET_ARM64INSTRINFO_H
#ifndef LLVM_TARGET_AArch64INSTRINFO_H
#define LLVM_TARGET_AArch64INSTRINFO_H

#include "ARM64.h"
#include "ARM64RegisterInfo.h"
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"

#define GET_INSTRINFO_HEADER
#include "ARM64GenInstrInfo.inc"
#include "AArch64GenInstrInfo.inc"

namespace llvm {

class ARM64Subtarget;
class ARM64TargetMachine;
class AArch64Subtarget;
class AArch64TargetMachine;

class ARM64InstrInfo : public ARM64GenInstrInfo {
class AArch64InstrInfo : public AArch64GenInstrInfo {
// Reserve bits in the MachineMemOperand target hint flags, starting at 1.
// They will be shifted into MOTargetHintStart when accessed.
enum TargetMemOperandFlags {
MOSuppressPair = 1
};

const ARM64RegisterInfo RI;
const ARM64Subtarget &Subtarget;
const AArch64RegisterInfo RI;
const AArch64Subtarget &Subtarget;

public:
explicit ARM64InstrInfo(const ARM64Subtarget &STI);
explicit AArch64InstrInfo(const AArch64Subtarget &STI);

/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
const ARM64RegisterInfo &getRegisterInfo() const { return RI; }
const AArch64RegisterInfo &getRegisterInfo() const { return RI; }

const ARM64Subtarget &getSubTarget() const { return Subtarget; }
const AArch64Subtarget &getSubTarget() const { return Subtarget; }

unsigned GetInstSizeInBytes(const MachineInstr *MI) const;

Expand All @@ -60,8 +60,8 @@ class ARM64InstrInfo : public ARM64GenInstrInfo {
/// is non-zero.
bool hasShiftedReg(const MachineInstr *MI) const;

/// Returns true if there is an extendable register and that the extending value
/// is non-zero.
/// Returns true if there is an extendable register and that the extending
/// value is non-zero.
bool hasExtendedReg(const MachineInstr *MI) const;

/// \brief Does this instruction set its full destination register to zero?
Expand Down Expand Up @@ -168,63 +168,63 @@ class ARM64InstrInfo : public ARM64GenInstrInfo {
/// if necessary, to be replaced by the scavenger at the end of PEI.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
DebugLoc DL, unsigned DestReg, unsigned SrcReg, int Offset,
const ARM64InstrInfo *TII,
const AArch64InstrInfo *TII,
MachineInstr::MIFlag = MachineInstr::NoFlags,
bool SetNZCV = false);

/// rewriteARM64FrameIndex - Rewrite MI to access 'Offset' bytes from the
/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
/// FP. Return false if the offset could not be handled directly in MI, and
/// return the left-over portion by reference.
bool rewriteARM64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const ARM64InstrInfo *TII);
const AArch64InstrInfo *TII);

/// \brief Use to report the frame offset status in isARM64FrameOffsetLegal.
enum ARM64FrameOffsetStatus {
ARM64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
ARM64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
ARM64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
/// \brief Use to report the frame offset status in isAArch64FrameOffsetLegal.
enum AArch64FrameOffsetStatus {
AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
};

/// \brief Check if the @p Offset is a valid frame offset for @p MI.
/// The returned value reports the validity of the frame offset for @p MI.
/// It uses the values defined by ARM64FrameOffsetStatus for that.
/// If result == ARM64FrameOffsetCannotUpdate, @p MI cannot be updated to
/// It uses the values defined by AArch64FrameOffsetStatus for that.
/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
/// use an offset.eq
/// If result & ARM64FrameOffsetIsLegal, @p Offset can completely be
/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
/// rewriten in @p MI.
/// If result & ARM64FrameOffsetCanUpdate, @p Offset contains the
/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
/// amount that is off the limit of the legal offset.
/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
/// If set, @p EmittableOffset contains the amount that can be set in @p MI
/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
/// is a legal offset.
int isARM64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
int isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
bool *OutUseUnscaledOp = nullptr,
unsigned *OutUnscaledOp = nullptr,
int *EmittableOffset = nullptr);

static inline bool isUncondBranchOpcode(int Opc) { return Opc == ARM64::B; }
static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }

static inline bool isCondBranchOpcode(int Opc) {
switch (Opc) {
case ARM64::Bcc:
case ARM64::CBZW:
case ARM64::CBZX:
case ARM64::CBNZW:
case ARM64::CBNZX:
case ARM64::TBZW:
case ARM64::TBZX:
case ARM64::TBNZW:
case ARM64::TBNZX:
case AArch64::Bcc:
case AArch64::CBZW:
case AArch64::CBZX:
case AArch64::CBNZW:
case AArch64::CBNZX:
case AArch64::TBZW:
case AArch64::TBZX:
case AArch64::TBNZW:
case AArch64::TBNZX:
return true;
default:
return false;
}
}

static inline bool isIndirectBranchOpcode(int Opc) { return Opc == ARM64::BR; }
static inline bool isIndirectBranchOpcode(int Opc) { return Opc == AArch64::BR; }

} // end namespace llvm

Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===-- ARM64MCInstLower.cpp - Convert ARM64 MachineInstr to an MCInst---===//
//==-- AArch64MCInstLower.cpp - Convert AArch64 MachineInstr to an MCInst --==//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -7,14 +7,14 @@
//
//===----------------------------------------------------------------------===//
//
// This file contains code to lower ARM64 MachineInstrs to their corresponding
// This file contains code to lower AArch64 MachineInstrs to their corresponding
// MCInst records.
//
//===----------------------------------------------------------------------===//

#include "ARM64MCInstLower.h"
#include "MCTargetDesc/ARM64MCExpr.h"
#include "Utils/ARM64BaseInfo.h"
#include "AArch64MCInstLower.h"
#include "MCTargetDesc/AArch64MCExpr.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
Expand All @@ -25,46 +25,46 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;

ARM64MCInstLower::ARM64MCInstLower(MCContext &ctx, Mangler &mang,
AsmPrinter &printer)
AArch64MCInstLower::AArch64MCInstLower(MCContext &ctx, Mangler &mang,
AsmPrinter &printer)
: Ctx(ctx), Printer(printer), TargetTriple(printer.getTargetTriple()) {}

MCSymbol *
ARM64MCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
AArch64MCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
return Printer.getSymbol(MO.getGlobal());
}

MCSymbol *
ARM64MCInstLower::GetExternalSymbolSymbol(const MachineOperand &MO) const {
AArch64MCInstLower::GetExternalSymbolSymbol(const MachineOperand &MO) const {
return Printer.GetExternalSymbolSymbol(MO.getSymbolName());
}

MCOperand ARM64MCInstLower::lowerSymbolOperandDarwin(const MachineOperand &MO,
MCSymbol *Sym) const {
MCOperand AArch64MCInstLower::lowerSymbolOperandDarwin(const MachineOperand &MO,
MCSymbol *Sym) const {
// FIXME: We would like an efficient form for this, so we don't have to do a
// lot of extra uniquing.
MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
if ((MO.getTargetFlags() & ARM64II::MO_GOT) != 0) {
if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_PAGE)
if ((MO.getTargetFlags() & AArch64II::MO_GOT) != 0) {
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
RefKind = MCSymbolRefExpr::VK_GOTPAGE;
else if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) ==
ARM64II::MO_PAGEOFF)
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
AArch64II::MO_PAGEOFF)
RefKind = MCSymbolRefExpr::VK_GOTPAGEOFF;
else
assert(0 && "Unexpected target flags with MO_GOT on GV operand");
} else if ((MO.getTargetFlags() & ARM64II::MO_TLS) != 0) {
if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_PAGE)
} else if ((MO.getTargetFlags() & AArch64II::MO_TLS) != 0) {
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
RefKind = MCSymbolRefExpr::VK_TLVPPAGE;
else if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) ==
ARM64II::MO_PAGEOFF)
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
AArch64II::MO_PAGEOFF)
RefKind = MCSymbolRefExpr::VK_TLVPPAGEOFF;
else
llvm_unreachable("Unexpected target flags with MO_TLS on GV operand");
} else {
if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_PAGE)
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
RefKind = MCSymbolRefExpr::VK_PAGE;
else if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) ==
ARM64II::MO_PAGEOFF)
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
AArch64II::MO_PAGEOFF)
RefKind = MCSymbolRefExpr::VK_PAGEOFF;
}
const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
Expand All @@ -74,13 +74,13 @@ MCOperand ARM64MCInstLower::lowerSymbolOperandDarwin(const MachineOperand &MO,
return MCOperand::CreateExpr(Expr);
}

MCOperand ARM64MCInstLower::lowerSymbolOperandELF(const MachineOperand &MO,
MCSymbol *Sym) const {
MCOperand AArch64MCInstLower::lowerSymbolOperandELF(const MachineOperand &MO,
MCSymbol *Sym) const {
uint32_t RefFlags = 0;

if (MO.getTargetFlags() & ARM64II::MO_GOT)
RefFlags |= ARM64MCExpr::VK_GOT;
else if (MO.getTargetFlags() & ARM64II::MO_TLS) {
if (MO.getTargetFlags() & AArch64II::MO_GOT)
RefFlags |= AArch64MCExpr::VK_GOT;
else if (MO.getTargetFlags() & AArch64II::MO_TLS) {
TLSModel::Model Model;
if (MO.isGlobal()) {
const GlobalValue *GV = MO.getGlobal();
Expand All @@ -93,64 +93,65 @@ MCOperand ARM64MCInstLower::lowerSymbolOperandELF(const MachineOperand &MO,
}
switch (Model) {
case TLSModel::InitialExec:
RefFlags |= ARM64MCExpr::VK_GOTTPREL;
RefFlags |= AArch64MCExpr::VK_GOTTPREL;
break;
case TLSModel::LocalExec:
RefFlags |= ARM64MCExpr::VK_TPREL;
RefFlags |= AArch64MCExpr::VK_TPREL;
break;
case TLSModel::LocalDynamic:
RefFlags |= ARM64MCExpr::VK_DTPREL;
RefFlags |= AArch64MCExpr::VK_DTPREL;
break;
case TLSModel::GeneralDynamic:
RefFlags |= ARM64MCExpr::VK_TLSDESC;
RefFlags |= AArch64MCExpr::VK_TLSDESC;
break;
}
} else {
// No modifier means this is a generic reference, classified as absolute for
// the cases where it matters (:abs_g0: etc).
RefFlags |= ARM64MCExpr::VK_ABS;
RefFlags |= AArch64MCExpr::VK_ABS;
}

if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_PAGE)
RefFlags |= ARM64MCExpr::VK_PAGE;
else if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_PAGEOFF)
RefFlags |= ARM64MCExpr::VK_PAGEOFF;
else if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_G3)
RefFlags |= ARM64MCExpr::VK_G3;
else if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_G2)
RefFlags |= ARM64MCExpr::VK_G2;
else if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_G1)
RefFlags |= ARM64MCExpr::VK_G1;
else if ((MO.getTargetFlags() & ARM64II::MO_FRAGMENT) == ARM64II::MO_G0)
RefFlags |= ARM64MCExpr::VK_G0;

if (MO.getTargetFlags() & ARM64II::MO_NC)
RefFlags |= ARM64MCExpr::VK_NC;
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
RefFlags |= AArch64MCExpr::VK_PAGE;
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
AArch64II::MO_PAGEOFF)
RefFlags |= AArch64MCExpr::VK_PAGEOFF;
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G3)
RefFlags |= AArch64MCExpr::VK_G3;
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G2)
RefFlags |= AArch64MCExpr::VK_G2;
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G1)
RefFlags |= AArch64MCExpr::VK_G1;
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G0)
RefFlags |= AArch64MCExpr::VK_G0;

if (MO.getTargetFlags() & AArch64II::MO_NC)
RefFlags |= AArch64MCExpr::VK_NC;

const MCExpr *Expr =
MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, Ctx);
if (!MO.isJTI() && MO.getOffset())
Expr = MCBinaryExpr::CreateAdd(
Expr, MCConstantExpr::Create(MO.getOffset(), Ctx), Ctx);

ARM64MCExpr::VariantKind RefKind;
RefKind = static_cast<ARM64MCExpr::VariantKind>(RefFlags);
Expr = ARM64MCExpr::Create(Expr, RefKind, Ctx);
AArch64MCExpr::VariantKind RefKind;
RefKind = static_cast<AArch64MCExpr::VariantKind>(RefFlags);
Expr = AArch64MCExpr::Create(Expr, RefKind, Ctx);

return MCOperand::CreateExpr(Expr);
}

MCOperand ARM64MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
MCSymbol *Sym) const {
MCOperand AArch64MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
MCSymbol *Sym) const {
if (TargetTriple.isOSDarwin())
return lowerSymbolOperandDarwin(MO, Sym);

assert(TargetTriple.isOSBinFormatELF() && "Expect Darwin or ELF target");
return lowerSymbolOperandELF(MO, Sym);
}

bool ARM64MCInstLower::lowerOperand(const MachineOperand &MO,
MCOperand &MCOp) const {
bool AArch64MCInstLower::lowerOperand(const MachineOperand &MO,
MCOperand &MCOp) const {
switch (MO.getType()) {
default:
assert(0 && "unknown operand type");
Expand Down Expand Up @@ -190,7 +191,7 @@ bool ARM64MCInstLower::lowerOperand(const MachineOperand &MO,
return true;
}

void ARM64MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
void AArch64MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.setOpcode(MI->getOpcode());

for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===-- ARM64MCInstLower.h - Lower MachineInstr to MCInst ----------------===//
//===-- AArch64MCInstLower.h - Lower MachineInstr to MCInst ---------------===//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//

#ifndef ARM64_MCINSTLOWER_H
#define ARM64_MCINSTLOWER_H
#ifndef AArch64_MCINSTLOWER_H
#define AArch64_MCINSTLOWER_H

#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
Expand All @@ -25,15 +25,15 @@ class MachineModuleInfoMachO;
class MachineOperand;
class Mangler;

/// ARM64MCInstLower - This class is used to lower an MachineInstr
/// AArch64MCInstLower - This class is used to lower an MachineInstr
/// into an MCInst.
class LLVM_LIBRARY_VISIBILITY ARM64MCInstLower {
class LLVM_LIBRARY_VISIBILITY AArch64MCInstLower {
MCContext &Ctx;
AsmPrinter &Printer;
Triple TargetTriple;

public:
ARM64MCInstLower(MCContext &ctx, Mangler &mang, AsmPrinter &printer);
AArch64MCInstLower(MCContext &ctx, Mangler &mang, AsmPrinter &printer);

bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const;
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===- ARM64MachineFuctionInfo.h - ARM64 machine function info --*- C++ -*-===//
//=- AArch64MachineFuctionInfo.h - AArch64 machine function info --*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -7,12 +7,12 @@
//
//===----------------------------------------------------------------------===//
//
// This file declares ARM64-specific per-machine-function information.
// This file declares AArch64-specific per-machine-function information.
//
//===----------------------------------------------------------------------===//

#ifndef ARM64MACHINEFUNCTIONINFO_H
#define ARM64MACHINEFUNCTIONINFO_H
#ifndef AArch64MACHINEFUNCTIONINFO_H
#define AArch64MACHINEFUNCTIONINFO_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
Expand All @@ -21,9 +21,9 @@

namespace llvm {

/// ARM64FunctionInfo - This class is derived from MachineFunctionInfo and
/// contains private ARM64-specific information for each MachineFunction.
class ARM64FunctionInfo : public MachineFunctionInfo {
/// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
/// contains private AArch64-specific information for each MachineFunction.
class AArch64FunctionInfo : public MachineFunctionInfo {

/// Number of bytes of arguments this function has on the stack. If the callee
/// is expected to restore the argument stack this should be a multiple of 16,
Expand Down Expand Up @@ -73,12 +73,12 @@ class ARM64FunctionInfo : public MachineFunctionInfo {
unsigned VarArgsFPRSize;

public:
ARM64FunctionInfo()
AArch64FunctionInfo()
: BytesInStackArgArea(0), ArgumentStackToRestore(0), HasStackFrame(false),
NumLocalDynamicTLSAccesses(0), VarArgsStackIndex(0), VarArgsGPRIndex(0),
VarArgsGPRSize(0), VarArgsFPRIndex(0), VarArgsFPRSize(0) {}

explicit ARM64FunctionInfo(MachineFunction &MF)
explicit AArch64FunctionInfo(MachineFunction &MF)
: BytesInStackArgArea(0), ArgumentStackToRestore(0), HasStackFrame(false),
NumLocalDynamicTLSAccesses(0), VarArgsStackIndex(0), VarArgsGPRIndex(0),
VarArgsGPRSize(0), VarArgsFPRIndex(0), VarArgsFPRSize(0) {
Expand Down Expand Up @@ -160,4 +160,4 @@ class ARM64FunctionInfo : public MachineFunctionInfo {
};
} // End llvm namespace

#endif // ARM64MACHINEFUNCTIONINFO_H
#endif // AArch64MACHINEFUNCTIONINFO_H
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===-- ARM64PerfectShuffle.h - AdvSIMD Perfect Shuffle Table -------------===//
//===-- AArch64PerfectShuffle.h - AdvSIMD Perfect Shuffle Table -----------===//
//
// The LLVM Compiler Infrastructure
//
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@

//===-- ARM64PromoteConstant.cpp --- Promote constant to global for ARM64 -===//
//=- AArch64PromoteConstant.cpp --- Promote constant to global for AArch64 -==//
//
// The LLVM Compiler Infrastructure
//
Expand All @@ -8,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
// This file implements the ARM64PromoteConstant pass which promotes constants
// This file implements the AArch64PromoteConstant pass which promotes constants
// to global variables when this is likely to be more efficient. Currently only
// types related to constant vector (i.e., constant vector, array of constant
// vectors, constant structure with a constant vector field, etc.) are promoted
Expand All @@ -21,7 +20,7 @@
// FIXME: This pass may be useful for other targets too.
//===----------------------------------------------------------------------===//

#include "ARM64.h"
#include "AArch64.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
Expand All @@ -41,17 +40,17 @@

using namespace llvm;

#define DEBUG_TYPE "arm64-promote-const"
#define DEBUG_TYPE "aarch64-promote-const"

// Stress testing mode - disable heuristics.
static cl::opt<bool> Stress("arm64-stress-promote-const", cl::Hidden,
static cl::opt<bool> Stress("aarch64-stress-promote-const", cl::Hidden,
cl::desc("Promote all vector constants"));

STATISTIC(NumPromoted, "Number of promoted constants");
STATISTIC(NumPromotedUses, "Number of promoted constants uses");

//===----------------------------------------------------------------------===//
// ARM64PromoteConstant
// AArch64PromoteConstant
//===----------------------------------------------------------------------===//

namespace {
Expand Down Expand Up @@ -81,13 +80,13 @@ namespace {
///
/// Therefore the final assembly final has 4 different loads. With this pass
/// enabled, only one load is issued for the constants.
class ARM64PromoteConstant : public ModulePass {
class AArch64PromoteConstant : public ModulePass {

public:
static char ID;
ARM64PromoteConstant() : ModulePass(ID) {}
AArch64PromoteConstant() : ModulePass(ID) {}

const char *getPassName() const override { return "ARM64 Promote Constant"; }
const char *getPassName() const override { return "AArch64 Promote Constant"; }

/// Iterate over the functions and promote the interesting constants into
/// global variables with module scope.
Expand Down Expand Up @@ -202,20 +201,20 @@ class ARM64PromoteConstant : public ModulePass {
};
} // end anonymous namespace

char ARM64PromoteConstant::ID = 0;
char AArch64PromoteConstant::ID = 0;

namespace llvm {
void initializeARM64PromoteConstantPass(PassRegistry &);
void initializeAArch64PromoteConstantPass(PassRegistry &);
}

INITIALIZE_PASS_BEGIN(ARM64PromoteConstant, "arm64-promote-const",
"ARM64 Promote Constant Pass", false, false)
INITIALIZE_PASS_BEGIN(AArch64PromoteConstant, "aarch64-promote-const",
"AArch64 Promote Constant Pass", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(ARM64PromoteConstant, "arm64-promote-const",
"ARM64 Promote Constant Pass", false, false)
INITIALIZE_PASS_END(AArch64PromoteConstant, "aarch64-promote-const",
"AArch64 Promote Constant Pass", false, false)

ModulePass *llvm::createARM64PromoteConstantPass() {
return new ARM64PromoteConstant();
ModulePass *llvm::createAArch64PromoteConstantPass() {
return new AArch64PromoteConstant();
}

/// Check if the given type uses a vector type.
Expand Down Expand Up @@ -330,7 +329,7 @@ static bool shouldConvert(const Constant *Cst) {
}

Instruction *
ARM64PromoteConstant::findInsertionPoint(Value::user_iterator &Use) {
AArch64PromoteConstant::findInsertionPoint(Value::user_iterator &Use) {
// If this user is a phi, the insertion point is in the related
// incoming basic block.
PHINode *PhiInst = dyn_cast<PHINode>(*Use);
Expand All @@ -344,9 +343,9 @@ ARM64PromoteConstant::findInsertionPoint(Value::user_iterator &Use) {
return InsertionPoint;
}

bool ARM64PromoteConstant::isDominated(Instruction *NewPt,
Value::user_iterator &UseIt,
InsertionPoints &InsertPts) {
bool AArch64PromoteConstant::isDominated(Instruction *NewPt,
Value::user_iterator &UseIt,
InsertionPoints &InsertPts) {

DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
*NewPt->getParent()->getParent()).getDomTree();
Expand All @@ -371,9 +370,9 @@ bool ARM64PromoteConstant::isDominated(Instruction *NewPt,
return false;
}

bool ARM64PromoteConstant::tryAndMerge(Instruction *NewPt,
Value::user_iterator &UseIt,
InsertionPoints &InsertPts) {
bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt,
Value::user_iterator &UseIt,
InsertionPoints &InsertPts) {
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
*NewPt->getParent()->getParent()).getDomTree();
BasicBlock *NewBB = NewPt->getParent();
Expand Down Expand Up @@ -422,7 +421,7 @@ bool ARM64PromoteConstant::tryAndMerge(Instruction *NewPt,
return false;
}

void ARM64PromoteConstant::computeInsertionPoints(
void AArch64PromoteConstant::computeInsertionPoints(
Constant *Val, InsertionPointsPerFunc &InsPtsPerFunc) {
DEBUG(dbgs() << "** Compute insertion points **\n");
for (Value::user_iterator UseIt = Val->user_begin(),
Expand Down Expand Up @@ -464,9 +463,8 @@ void ARM64PromoteConstant::computeInsertionPoints(
}
}

bool
ARM64PromoteConstant::insertDefinitions(Constant *Cst,
InsertionPointsPerFunc &InsPtsPerFunc) {
bool AArch64PromoteConstant::insertDefinitions(
Constant *Cst, InsertionPointsPerFunc &InsPtsPerFunc) {
// We will create one global variable per Module.
DenseMap<Module *, GlobalVariable *> ModuleToMergedGV;
bool HasChanged = false;
Expand Down Expand Up @@ -533,13 +531,13 @@ ARM64PromoteConstant::insertDefinitions(Constant *Cst,
return HasChanged;
}

bool ARM64PromoteConstant::computeAndInsertDefinitions(Constant *Val) {
bool AArch64PromoteConstant::computeAndInsertDefinitions(Constant *Val) {
InsertionPointsPerFunc InsertPtsPerFunc;
computeInsertionPoints(Val, InsertPtsPerFunc);
return insertDefinitions(Val, InsertPtsPerFunc);
}

bool ARM64PromoteConstant::promoteConstant(Constant *Cst) {
bool AArch64PromoteConstant::promoteConstant(Constant *Cst) {
assert(Cst && "Given variable is not a valid constant.");

if (!shouldConvert(Cst))
Expand All @@ -553,7 +551,7 @@ bool ARM64PromoteConstant::promoteConstant(Constant *Cst) {
return computeAndInsertDefinitions(Cst);
}

bool ARM64PromoteConstant::runOnFunction(Function &F) {
bool AArch64PromoteConstant::runOnFunction(Function &F) {
// Look for instructions using constant vector. Promote that constant to a
// global variable. Create as few loads of this variable as possible and
// update the uses accordingly.
Expand Down
Loading