2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/pr56110.ll
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 | FileCheck %s
; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access | FileCheck %s
; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem | FileCheck %s

define void @foo_set(ptr nocapture noundef %a, i32 noundef %v) {
; CHECK-LABEL: foo_set:
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/riscv-func-target-feature.ll
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ entry:
}

; CHECK-NOT: .option push
define void @test5() "target-features"="+fast-unaligned-access" {
define void @test5() "target-features"="+unaligned-scalar-mem" {
; CHECK-LABEL: test5
; CHECK-NOT: .option pop
entry:
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/concat-vectors-constant-stride.ll
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v,+fast-unaligned-access -target-abi=ilp32 \
; RUN: llc -mtriple=riscv32 -mattr=+v,+unaligned-vector-mem -target-abi=ilp32 \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+fast-unaligned-access -target-abi=lp64 \
; RUN: llc -mtriple=riscv64 -mattr=+v,+unaligned-vector-mem -target-abi=lp64 \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64

define void @constant_forward_stride(ptr %s, ptr %d) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,RV64
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+fast-unaligned-access -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64,RV64-MISALIGN
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+unaligned-vector-mem -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64,RV64-MISALIGN

; RUN: llc -mtriple=riscv64 -mattr=+f,+zfh,+zve64f,+zvl128b,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,ZVE64F

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=SLOW,RV32-SLOW
; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=SLOW,RV64-SLOW
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+fast-unaligned-access -verify-machineinstrs < %s \
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+unaligned-vector-mem -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=FAST,RV32-FAST
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+fast-unaligned-access -verify-machineinstrs < %s \
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+unaligned-vector-mem -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=FAST,RV64-FAST

define <4 x i32> @load_v4i32_align1(ptr %ptr) {
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
; RUN: llc < %s -mtriple=riscv64 -mattr=+v \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem \
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST

; ----------------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+unaligned-scalar-mem,,+unaligned-vector-mem \
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+unaligned-scalar-mem,+unaligned-vector-mem \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: -verify-machineinstrs | FileCheck %s
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v < %s \
; RUN: -verify-machineinstrs | FileCheck %s
; RUN: llc -mtriple riscv32 -mattr=+d,+zfh,+zvfh,+v,+fast-unaligned-access < %s \
; RUN: llc -mtriple riscv32 -mattr=+d,+zfh,+zvfh,+v,+unaligned-vector-mem < %s \
; RUN: -verify-machineinstrs | FileCheck --check-prefix=FAST %s
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v,+fast-unaligned-access < %s \
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v,+unaligned-vector-mem < %s \
; RUN: -verify-machineinstrs | FileCheck --check-prefix=FAST %s


Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/unaligned-load-store.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck -check-prefixes=ALL,SLOW,RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ALL,SLOW,RV64I %s
; RUN: llc -mtriple=riscv32 -mattr=+fast-unaligned-access -verify-machineinstrs < %s \
; RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ALL,FAST,RV32I-FAST %s
; RUN: llc -mtriple=riscv64 -mattr=+fast-unaligned-access -verify-machineinstrs < %s \
; RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ALL,FAST,RV64I-FAST %s

; A collection of cases showing codegen for unaligned loads and stores
Expand Down
30 changes: 15 additions & 15 deletions llvm/unittests/ProfileData/MemProfTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,13 +183,13 @@ TEST(MemProf, FillsValue) {
// We expect 4 records. We attach alloc site data to foo and bar, i.e.
// all frames bottom up until we find a non-inline frame. We attach call site
// data to bar, xyz and abc.
ASSERT_EQ(Records.size(), 4U);
ASSERT_THAT(Records, SizeIs(4));

// Check the memprof record for foo.
const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo");
ASSERT_EQ(Records.count(FooId), 1U);
const MemProfRecord &Foo = Records[FooId];
ASSERT_EQ(Foo.AllocSites.size(), 1U);
ASSERT_THAT(Foo.AllocSites, SizeIs(1));
EXPECT_EQ(Foo.AllocSites[0].Info.getAllocCount(), 1U);
EXPECT_THAT(Foo.AllocSites[0].CallStack[0],
FrameContains("foo", 5U, 30U, true));
Expand All @@ -205,7 +205,7 @@ TEST(MemProf, FillsValue) {
const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar");
ASSERT_EQ(Records.count(BarId), 1U);
const MemProfRecord &Bar = Records[BarId];
ASSERT_EQ(Bar.AllocSites.size(), 1U);
ASSERT_THAT(Bar.AllocSites, SizeIs(1));
EXPECT_EQ(Bar.AllocSites[0].Info.getAllocCount(), 1U);
EXPECT_THAT(Bar.AllocSites[0].CallStack[0],
FrameContains("foo", 5U, 30U, true));
Expand All @@ -216,17 +216,17 @@ TEST(MemProf, FillsValue) {
EXPECT_THAT(Bar.AllocSites[0].CallStack[3],
FrameContains("abc", 5U, 30U, false));

ASSERT_EQ(Bar.CallSites.size(), 1U);
ASSERT_EQ(Bar.CallSites[0].size(), 2U);
ASSERT_THAT(Bar.CallSites, SizeIs(1));
ASSERT_THAT(Bar.CallSites[0], SizeIs(2));
EXPECT_THAT(Bar.CallSites[0][0], FrameContains("foo", 5U, 30U, true));
EXPECT_THAT(Bar.CallSites[0][1], FrameContains("bar", 51U, 20U, false));

// Check the memprof record for xyz.
const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz");
ASSERT_EQ(Records.count(XyzId), 1U);
const MemProfRecord &Xyz = Records[XyzId];
ASSERT_EQ(Xyz.CallSites.size(), 1U);
ASSERT_EQ(Xyz.CallSites[0].size(), 2U);
ASSERT_THAT(Xyz.CallSites, SizeIs(1));
ASSERT_THAT(Xyz.CallSites[0], SizeIs(2));
// Expect the entire frame even though in practice we only need the first
// entry here.
EXPECT_THAT(Xyz.CallSites[0][0], FrameContains("xyz", 5U, 30U, true));
Expand All @@ -237,8 +237,8 @@ TEST(MemProf, FillsValue) {
ASSERT_EQ(Records.count(AbcId), 1U);
const MemProfRecord &Abc = Records[AbcId];
EXPECT_TRUE(Abc.AllocSites.empty());
ASSERT_EQ(Abc.CallSites.size(), 1U);
ASSERT_EQ(Abc.CallSites[0].size(), 2U);
ASSERT_THAT(Abc.CallSites, SizeIs(1));
ASSERT_THAT(Abc.CallSites[0], SizeIs(2));
EXPECT_THAT(Abc.CallSites[0][0], FrameContains("xyz", 5U, 30U, true));
EXPECT_THAT(Abc.CallSites[0][1], FrameContains("abc", 5U, 30U, false));
}
Expand Down Expand Up @@ -393,9 +393,9 @@ TEST(MemProf, SymbolizationFilter) {
Records.push_back(KeyRecordPair.second);
}

ASSERT_EQ(Records.size(), 1U);
ASSERT_EQ(Records[0].AllocSites.size(), 1U);
ASSERT_EQ(Records[0].AllocSites[0].CallStack.size(), 1U);
ASSERT_THAT(Records, SizeIs(1));
ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(1));
EXPECT_THAT(Records[0].AllocSites[0].CallStack[0],
FrameContains("foo", 5U, 30U, false));
}
Expand Down Expand Up @@ -427,9 +427,9 @@ TEST(MemProf, BaseMemProfReader) {
Records.push_back(KeyRecordPair.second);
}

ASSERT_EQ(Records.size(), 1U);
ASSERT_EQ(Records[0].AllocSites.size(), 1U);
ASSERT_EQ(Records[0].AllocSites[0].CallStack.size(), 2U);
ASSERT_THAT(Records, SizeIs(1));
ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(2));
EXPECT_THAT(Records[0].AllocSites[0].CallStack[0],
FrameContains("foo", 20U, 5U, true));
EXPECT_THAT(Records[0].AllocSites[0].CallStack[1],
Expand Down
12 changes: 10 additions & 2 deletions llvm/utils/TableGen/RISCVTargetDefEmitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,19 @@ static void EmitRISCVTargetDef(RecordKeeper &RK, raw_ostream &OS) {
if (MArch.empty())
MArch = getMArch(*Rec);

const bool FastUnalignedAccess =
bool FastScalarUnalignedAccess =
any_of(Rec->getValueAsListOfDefs("Features"), [&](auto &Feature) {
return Feature->getValueAsString("Name") == "fast-unaligned-access";
return Feature->getValueAsString("Name") == "unaligned-scalar-mem";
});

bool FastVectorUnalignedAccess =
any_of(Rec->getValueAsListOfDefs("Features"), [&](auto &Feature) {
return Feature->getValueAsString("Name") == "unaligned-vector-mem";
});

bool FastUnalignedAccess =
FastScalarUnalignedAccess && FastVectorUnalignedAccess;

OS << "PROC(" << Rec->getName() << ", "
<< "{\"" << Rec->getValueAsString("Name") << "\"}, "
<< "{\"" << MArch << "\"}, " << FastUnalignedAccess << ")\n";
Expand Down
324 changes: 174 additions & 150 deletions mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td

Large diffs are not rendered by default.