-
Notifications
You must be signed in to change notification settings - Fork 10.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RISCV][GISel] First mask argument placed in v0 according to RISCV Ve… #79343
Conversation
@llvm/pr-subscribers-llvm-globalisel @llvm/pr-subscribers-backend-risc-v Author: Michael Maitland (michaelmaitland) Changes…ctor CC. Full diff: https://github.com/llvm/llvm-project/pull/79343.diff 3 Files Affected:
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 26eac17ed24c9f3..89ce5b393e692a8 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -34,6 +34,9 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
// Whether this is assigning args for a return.
bool IsRet;
+ // true if assignArg has been called for a mask argument, false otherwise.
+ bool AssignedFirstMaskArg = false;
+
public:
RISCVOutgoingValueAssigner(
RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
@@ -48,10 +51,17 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
const DataLayout &DL = MF.getDataLayout();
const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+ std::optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
+ ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
+ FirstMaskArgument = std::make_optional(ValNo);
+ AssignedFirstMaskArg = true;
+ }
+
if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
*Subtarget.getTargetLowering(),
- /*FirstMaskArgument=*/std::nullopt))
+ FirstMaskArgument))
return true;
StackSize = State.getStackSize();
@@ -172,6 +182,9 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
// Whether this is assigning args from a return.
bool IsRet;
+ // true if assignArg has been called for a mask argument, false otherwise.
+ bool AssignedFirstMaskArg = false;
+
public:
RISCVIncomingValueAssigner(
RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
@@ -189,10 +202,16 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
if (LocVT.isScalableVector())
MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
+ std::optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
+ ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
+ FirstMaskArgument = std::make_optional(ValNo);
+ AssignedFirstMaskArg = true;
+ }
+
if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
- *Subtarget.getTargetLowering(),
- /*FirstMaskArgument=*/std::nullopt))
+ *Subtarget.getTargetLowering(), FirstMaskArgument))
return true;
StackSize = State.getStackSize();
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
index 4df0a8f48cc8d0b..3c4cfaef4d5841d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
@@ -407,16 +407,16 @@ entry:
define void @test_args_nxv64i1(<vscale x 64 x i1> %a) {
; RV32-LABEL: name: test_args_nxv64i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv64i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -425,16 +425,16 @@ entry:
define void @test_args_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-LABEL: name: test_args_nxv32i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv32i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -443,16 +443,16 @@ entry:
define void @test_args_nxv16i1(<vscale x 16 x i1> %a) {
; RV32-LABEL: name: test_args_nxv16i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv16i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -461,16 +461,16 @@ entry:
define void @test_args_nxv8i1(<vscale x 8 x i1> %a) {
; RV32-LABEL: name: test_args_nxv8i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv8i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -479,16 +479,16 @@ entry:
define void @test_args_nxv4i1(<vscale x 4 x i1> %a) {
; RV32-LABEL: name: test_args_nxv4i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv4i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -497,16 +497,16 @@ entry:
define void @test_args_nxv2i1(<vscale x 2 x i1> %a) {
; RV32-LABEL: name: test_args_nxv2i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv2i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -515,16 +515,16 @@ entry:
define void @test_args_nxv1i1(<vscale x 1 x i1> %a) {
; RV32-LABEL: name: test_args_nxv1i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv1i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -907,3 +907,63 @@ define void @test_args_nxv32b16(<vscale x 32 x bfloat> %a) {
entry:
ret void
}
+
+define void @test_args_nxv1i1_nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) {
+ ; RV32-LABEL: name: test_args_nxv1i1_nxv1i1
+ ; RV32: bb.1.entry:
+ ; RV32-NEXT: liveins: $v0, $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: test_args_nxv1i1_nxv1i1
+ ; RV64: bb.1.entry:
+ ; RV64-NEXT: liveins: $v0, $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV64-NEXT: PseudoRET
+entry:
+ ret void
+}
+
+define void @test_args_nxv1i1_nxv1i32(<vscale x 1 x i1> %a, <vscale x 1 x i32> %b) {
+ ; RV32-LABEL: name: test_args_nxv1i1_nxv1i32
+ ; RV32: bb.1.entry:
+ ; RV32-NEXT: liveins: $v0, $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: test_args_nxv1i1_nxv1i32
+ ; RV64: bb.1.entry:
+ ; RV64-NEXT: liveins: $v0, $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV64-NEXT: PseudoRET
+entry:
+ ret void
+}
+
+define void @test_args_nxv1i32_nxv1i1(<vscale x 1 x i32> %a, <vscale x 1 x i1> %b) {
+ ; RV32-LABEL: name: test_args_nxv1i32_nxv1i1
+ ; RV32: bb.1.entry:
+ ; RV32-NEXT: liveins: $v0, $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: test_args_nxv1i32_nxv1i1
+ ; RV64: bb.1.entry:
+ ; RV64-NEXT: liveins: $v0, $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: PseudoRET
+entry:
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
index eec9969063c87a5..6385baa38aecfb3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
@@ -364,14 +364,14 @@ define <vscale x 64 x i1> @test_ret_nxv64i1() {
; RV32-LABEL: name: test_ret_nxv64i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 64 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 64 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv64i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 64 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 64 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 64 x i1> undef
}
@@ -380,14 +380,14 @@ define <vscale x 32 x i1> @test_ret_nxv32i1() {
; RV32-LABEL: name: test_ret_nxv32i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 32 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 32 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv32i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 32 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 32 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 32 x i1> undef
}
@@ -396,14 +396,14 @@ define <vscale x 16 x i1> @test_ret_nxv16i1() {
; RV32-LABEL: name: test_ret_nxv16i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 16 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv16i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 16 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 16 x i1> undef
}
@@ -412,14 +412,14 @@ define <vscale x 8 x i1> @test_ret_nxv8i1() {
; RV32-LABEL: name: test_ret_nxv8i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv8i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 8 x i1> undef
}
@@ -428,14 +428,14 @@ define <vscale x 4 x i1> @test_ret_nxv4i1() {
; RV32-LABEL: name: test_ret_nxv4i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv4i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 4 x i1> undef
}
@@ -444,14 +444,14 @@ define <vscale x 2 x i1> @test_ret_nxv2i1() {
; RV32-LABEL: name: test_ret_nxv2i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv2i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 2 x i1> undef
}
@@ -460,14 +460,14 @@ define <vscale x 1 x i1> @test_ret_nxv1i1() {
; RV32-LABEL: name: test_ret_nxv1i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv1i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 1 x i1> undef
}
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
std::optional<unsigned> FirstMaskArgument; | ||
if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg && | ||
ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) { | ||
FirstMaskArgument = std::make_optional(ValNo); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why do we need std::make_optional? Can't we do FirstMaskArgument = ValNo
?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
updated.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
…ctor CC.