Skip to content

Commit

Permalink
[7/11][POC][Clang][RISCV] Define tuple type variant of vssseg2e32
Browse files Browse the repository at this point in the history
For the cover letter of this patch-set, please checkout D146872.

Depends on D147912.

This is the 7th patch of the patch-set.

This patch is a proof-of-concept and will be extended to full coverage
in the future. Currently, the old non-tuple strided segment store is
not removed, and only signed integer strided segment store of NF=2,
EEW=32 is defined here.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D147913
  • Loading branch information
eopXD committed May 22, 2023
1 parent bf2511b commit f2ffdaa
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 0 deletions.
46 changes: 46 additions & 0 deletions clang/include/clang/Basic/riscv_vector.td
Expand Up @@ -1699,6 +1699,51 @@ multiclass RVVStridedSegLoadTuple<string op> {
}
}

multiclass RVVStridedSegStoreTuple<string op> {
foreach type = ["i"] in {
defvar eew = !cond(!eq(type, "i") : "32");
foreach nf = [2] in {
let Name = op # nf # "e" # eew # "_v_tuple",
OverloadedName = op # nf # "e" # eew # "_tuple",
IRName = op # nf,
MaskedIRName = op # nf # "_mask",
NF = nf,
HasMaskedOffOperand = false,
MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
// Masked
// Builtin: (mask, ptr, stride, v_tuple, vl)
// Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
// Unmasked
// Builtin: (ptr, stride, v_tuple, vl)
// Intrinsic: (val0, val1, ..., ptr, stride, vl)
unsigned Offset = IsMasked ? 1 : 0;
llvm::Value *VTupleOperand = Ops[Offset + 2];

SmallVector<llvm::Value*, 12> Operands;
for (unsigned I = 0; I < NF; ++I) {
llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
Operands.push_back(V);
}
Operands.push_back(Ops[Offset]); // Ptr
Operands.push_back(Ops[Offset + 1]); // Stride
if (IsMasked)
Operands.push_back(Ops[0]);
Operands.push_back(Ops[Offset + 3]); // VL

IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
}] in {
defvar T = "(Tuple:" # nf # ")";
def : RVVBuiltin<"v", "0Pet" # T # "v", type>;
}
}
}
}

// TODO: Extend for policy
let UnMaskedPolicyScheme = NonePolicy,
MaskedPolicyScheme = NonePolicy,
Expand All @@ -1712,6 +1757,7 @@ let UnMaskedPolicyScheme = NonePolicy,
MaskedPolicyScheme = NonePolicy,
IsTuple = true in {
defm : RVVUnitStridedSegStoreTuple<"vsseg">;
defm : RVVStridedSegStoreTuple<"vssseg">;
}


Expand Down
@@ -0,0 +1,36 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
// RUN: FileCheck --check-prefix=CHECK-RV64 %s

#include <riscv_vector.h>

// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_tuple_i32m1
// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0
// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1
// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]])
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_tuple_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) {
return __riscv_vssseg2e32_v_tuple_i32m1(base, bstride, v_tuple, vl);
}

// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_tuple_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0
// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1
// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0
// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1
// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
// CHECK-RV64-NEXT: ret void
//
void test_vssseg2e32_v_tuple_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) {
return __riscv_vssseg2e32_v_tuple_i32m1_m(mask, base, bstride, v_tuple, vl);
}

0 comments on commit f2ffdaa

Please sign in to comment.