Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions llvm/lib/Target/BPF/BPF.td
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ def ALU32 : SubtargetFeature<"alu32", "HasAlu32", "true",
def DwarfRIS: SubtargetFeature<"dwarfris", "UseDwarfRIS", "true",
"Disable MCAsmInfo DwarfUsesRelocationsAcrossSections">;

def MisalignedMemAccess : SubtargetFeature<"allows-misaligned-mem-access",
"AllowsMisalignedMemAccess", "true",
"Allows misaligned memory access">;

def : Proc<"generic", []>;
def : Proc<"v1", []>;
def : Proc<"v2", []>;
Expand Down
20 changes: 20 additions & 0 deletions llvm/lib/Target/BPF/BPFISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,26 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
HasJmp32 = STI.getHasJmp32();
HasJmpExt = STI.getHasJmpExt();
HasMovsx = STI.hasMovsx();

AllowsMisalignedMemAccess = STI.getAllowsMisalignedMemAccess();
}

bool BPFTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
MachineMemOperand::Flags,
unsigned *Fast) const {
// allows-misaligned-mem-access is disabled
if (!AllowsMisalignedMemAccess)
return false;

// only allow misalignment for simple value types
if (!VT.isSimple())
return false;

// always assume fast mode when misalignment is allowed
if (Fast)
*Fast = true;

return true;
}

bool BPFTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
Expand Down
7 changes: 7 additions & 0 deletions llvm/lib/Target/BPF/BPFISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ class BPFTargetLowering : public TargetLowering {
// with the given GlobalAddress is legal.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;

bool allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
MachineMemOperand::Flags,
unsigned *) const override;

BPFTargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const override;

Expand Down Expand Up @@ -61,6 +65,9 @@ class BPFTargetLowering : public TargetLowering {
bool HasJmpExt;
bool HasMovsx;

// Allows Misalignment
bool AllowsMisalignedMemAccess;

SDValue LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/BPF/BPFSubtarget.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ void BPFSubtarget::initializeEnvironment() {
HasStoreImm = false;
HasLoadAcqStoreRel = false;
HasGotox = false;
AllowsMisalignedMemAccess = false;
}

void BPFSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
Expand Down
6 changes: 6 additions & 0 deletions llvm/lib/Target/BPF/BPFSubtarget.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,9 @@ class BPFSubtarget : public BPFGenSubtargetInfo {
// whether we should enable MCAsmInfo DwarfUsesRelocationsAcrossSections
bool UseDwarfRIS;

// whether we allows misaligned memory access
bool AllowsMisalignedMemAccess;

// whether cpu v4 insns are enabled.
bool HasLdsx, HasMovsx, HasBswap, HasSdivSmod, HasGotol, HasStoreImm,
HasLoadAcqStoreRel, HasGotox;
Expand All @@ -87,6 +90,9 @@ class BPFSubtarget : public BPFGenSubtargetInfo {
bool getHasJmp32() const { return HasJmp32; }
bool getHasAlu32() const { return HasAlu32; }
bool getUseDwarfRIS() const { return UseDwarfRIS; }
bool getAllowsMisalignedMemAccess() const {
return AllowsMisalignedMemAccess;
}
bool hasLdsx() const { return HasLdsx; }
bool hasMovsx() const { return HasMovsx; }
bool hasBswap() const { return HasBswap; }
Expand Down
196 changes: 196 additions & 0 deletions llvm/test/CodeGen/BPF/unaligned_load_store.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5

; RUN: llc -mtriple=bpfel -mattr=+allows-misaligned-mem-access -verify-machineinstrs %s -o - \
; RUN: | FileCheck --check-prefixes=ALL,MISALIGN %s
; RUN: llc -mtriple=bpfeb -mattr=+allows-misaligned-mem-access -verify-machineinstrs %s -o - \
; RUN: | FileCheck --check-prefixes=ALL,MISALIGN %s

; RUN: llc -mtriple=bpfel -verify-machineinstrs %s -o - \
; RUN: | FileCheck --check-prefixes=ALL,ALIGN %s
; RUN: llc -mtriple=bpfeb -verify-machineinstrs %s -o - \
; RUN: | FileCheck --check-prefixes=ALL,ALIGN %s
; NOTE:
; This test verifies that the new +bpf-allow-misaligned-mem-access
; feature allows the BPF backend to emit direct unaligned load/store
; instructions instead of byte-by-byte emulation sequences.

; ---------------------------------------------------------------------
; i8 load
; ---------------------------------------------------------------------
define i8 @test_load_i8(i8* %p) {
; ALL-LABEL: test_load_i8:
; ALL: # %bb.0:
; ALL-NEXT: w{{[0-9]+}} = *(u8 *)(r1 + 0)
; ALL-NEXT: exit
%v = load i8, i8* %p, align 1
ret i8 %v
}

; ---------------------------------------------------------------------
; i8 store
; ---------------------------------------------------------------------
define void @test_store_i8(i8* %p, i8 %v) {
; ALL-LABEL: test_store_i8:
; ALL: # %bb.0:
; ALL-NEXT: *(u8 *)(r1 + 0) = w{{[0-9]+}}
; ALL-NEXT: exit
store i8 %v, i8* %p, align 1
ret void
}

; ---------------------------------------------------------------------
; i16 load
; ---------------------------------------------------------------------
define i16 @test_load_i16(i16* %p) {
; MISALIGN-LABEL: test_load_i16:
; MISALIGN: # %bb.0:
; MISALIGN: w{{[0-9]+}} = *(u16 *)(r1 + 0)
; MISALIGN: exit
;
; ALIGN-LABEL: test_load_i16:
; ALIGN: # %bb.0:
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 0)
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 1)
; ALIGN-DAG: w{{[0-9]+}} <<= 8
; ALIGN-DAG: w{{[0-9]+}} |= w{{[0-9]+}}
; ALIGN: exit
%v = load i16, i16* %p, align 1
ret i16 %v
}

; ---------------------------------------------------------------------
; i16 store
; ---------------------------------------------------------------------
define void @test_store_i16(i16* %p, i16 %v) {
; MISALIGN-LABEL: test_store_i16:
; MISALIGN: # %bb.0:
; MISALIGN: *(u16 *)(r1 + 0) = w{{[0-9]+}}
; MISALIGN: exit
;
; ALIGN-LABEL: test_store_i16:
; ALIGN: # %bb.0:
; ALIGN-DAG: *(u8 *)(r1 + 0) = w{{[0-9]+}}
; ALIGN-DAG: w{{[0-9]+}} >>= 8
; ALIGN-DAG: *(u8 *)(r1 + 1) = w{{[0-9]+}}
; ALIGN: exit
store i16 %v, i16* %p, align 1
ret void
}

; ---------------------------------------------------------------------
; i32 load
; ---------------------------------------------------------------------

define i32 @test_load_i32(i32* %p) {
; MISALIGN-LABEL: test_load_i32:
; MISALIGN: # %bb.0:
; MISALIGN: w{{[0-9]+}} = *(u32 *)(r1 + 0)
; MISALIGN: exit
;
; ALIGN-LABEL: test_load_i32:
; ALIGN: # %bb.0:
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 0)
; ALIGN-DAG: w{{[0-9]+}} <<= 8
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 1)
; ALIGN-DAG: w{{[0-9]+}} |= w{{[0-9]+}}
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 2)
; ALIGN-DAG: w{{[0-9]+}} <<= 16
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 3)
; ALIGN-DAG: w{{[0-9]+}} <<= 24
; ALIGN: exit
%v = load i32, i32* %p, align 1
ret i32 %v
}

; ---------------------------------------------------------------------
; i32 store
; ---------------------------------------------------------------------

define void @test_store_i32(i32* %p, i32 %v) {
; MISALIGN-LABEL: test_store_i32:
; MISALIGN: # %bb.0:
; MISALIGN: *(u32 *)(r1 + 0) = w{{[0-9]+}}
; MISALIGN: exit
;
; ALIGN-LABEL: test_store_i32:
; ALIGN: # %bb.0:
; ALIGN-DAG: w{{[0-9]+}} = w{{[0-9]+}}
; ALIGN-DAG: w{{[0-9]+}} >>= 24
; ALIGN-DAG: *(u8 *)(r1 + 0) = w{{[0-9]+}}
; ALIGN-DAG: w{{[0-9]+}} = w{{[0-9]+}}
; ALIGN-DAG: w{{[0-9]+}} >>= 16
; ALIGN-DAG: *(u8 *)(r1 + 1) = w{{[0-9]+}}
; ALIGN-DAG: *(u8 *)(r1 + 2) = w{{[0-9]+}}
; ALIGN-DAG: w{{[0-9]+}} >>= 8
; ALIGN-DAG: *(u8 *)(r1 + 3) = w{{[0-9]+}}
; ALIGN: exit
store i32 %v, i32* %p, align 1
ret void
}

; ---------------------------------------------------------------------
; i64 load
; ---------------------------------------------------------------------

define i64 @test_load_i64(i64* %p) {
; MISALIGN-LABEL: test_load_i64:
; MISALIGN: # %bb.0:
; MISALIGN: r0 = *(u64 *)(r1 + 0)
; MISALIGN: exit
;
; ALIGN-LABEL: test_load_i64:
; ALIGN: # %bb.0:
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 0)
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 1)
; ALIGN-DAG: r{{[0-9]+}} <<= 8
; ALIGN-DAG: r{{[0-9]+}} |= r{{[0-9]+}}
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 2)
; ALIGN-DAG: r{{[0-9]+}} <<= 16
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 3)
; ALIGN-DAG: r{{[0-9]+}} <<= 24
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 4)
; ALIGN-DAG: w{{[0-9]+}} <<= 8
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 5)
; ALIGN-DAG: w{{[0-9]+}} |= w{{[0-9]+}}
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 6)
; ALIGN-DAG: w{{[0-9]+}} <<= 16
; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 7)
; ALIGN-DAG: w{{[0-9]+}} <<= 24
; ALIGN-DAG: r{{[0-9]+}} <<= 32
; ALIGN: exit
%v = load i64, i64* %p, align 1
ret i64 %v
}

; ---------------------------------------------------------------------
; i64 store
; ---------------------------------------------------------------------

define void @test_store_i64(i64* %p, i64 %v) {
; MISALIGN-LABEL: test_store_i64:
; MISALIGN: # %bb.0:
; MISALIGN: *(u64 *)(r1 + 0) = r2
; MISALIGN: exit
;
; ALIGN-LABEL: test_store_i64:
; ALIGN: # %bb.0:
; ALIGN-DAG: *(u8 *)(r1 + 0) = w{{[0-9]+}}
; ALIGN-DAG: r{{[0-9]+}} = r{{[0-9]+}}
; ALIGN-DAG: r{{[0-9]+}} >>= 56
; ALIGN-DAG: *(u8 *)(r1 + 1) = w{{[0-9]+}}
; ALIGN-DAG: r{{[0-9]+}} >>= 48
; ALIGN-DAG: *(u8 *)(r1 + 2) = w{{[0-9]+}}
; ALIGN-DAG: r{{[0-9]+}} >>= 40
; ALIGN-DAG: *(u8 *)(r1 + 3) = w{{[0-9]+}}
; ALIGN-DAG: r{{[0-9]+}} >>= 32
; ALIGN-DAG: *(u8 *)(r1 + 4) = w{{[0-9]+}}
; ALIGN-DAG: r{{[0-9]+}} >>= 24
; ALIGN-DAG: *(u8 *)(r1 + 5) = w{{[0-9]+}}
; ALIGN-DAG: r{{[0-9]+}} >>= 16
; ALIGN-DAG: *(u8 *)(r1 + 6) = w{{[0-9]+}}
; ALIGN-DAG: r{{[0-9]+}} >>= 8
; ALIGN-DAG: *(u8 *)(r1 + 7) = w{{[0-9]+}}
; ALIGN: exit
store i64 %v, i64* %p, align 1
ret void
}
Loading