diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 2d63774c75e37..84df98b8a613c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -10500,14 +10500,14 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { CLI.Ins.clear(); Type *OrigRetTy = CLI.RetTy; SmallVector RetTys; - SmallVector Offsets; + SmallVector Offsets; auto &DL = CLI.DAG.getDataLayout(); - ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0); + ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets); if (CLI.IsPostTypeLegalization) { // If we are lowering a libcall after legalization, split the return type. SmallVector OldRetTys; - SmallVector OldOffsets; + SmallVector OldOffsets; RetTys.swap(OldRetTys); Offsets.swap(OldOffsets); @@ -10519,7 +10519,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8; RetTys.append(NumRegs, RegisterVT); for (unsigned j = 0; j != NumRegs; ++j) - Offsets.push_back(Offset + j * RegisterVTByteSZ); + Offsets.push_back(TypeSize::getFixed(Offset + j * RegisterVTByteSZ)); } } diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll index 78385a80b47eb..78e8700a9feff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -86,3 +86,79 @@ define @caller_scalable_vector_split_indirect( @callee_scalable_vector_split_indirect( zeroinitializer, %x) ret %a } + +define {, } @caller_tuple_return() { +; RV32-LABEL: caller_tuple_return: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: call callee_tuple_return +; RV32-NEXT: vmv2r.v v12, v8 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv2r.v v10, v12 +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: caller_tuple_return: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: call callee_tuple_return +; RV64-NEXT: vmv2r.v v12, v8 +; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv2r.v v10, v12 +; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %a = call {, } @callee_tuple_return() + %b = extractvalue {, } %a, 0 + %c = extractvalue {, } %a, 1 + %d = insertvalue {, } poison, %c, 0 + %e = insertvalue {, } %d, %b, 1 + ret {, } %e +} + +declare {, } @callee_tuple_return() + +define void @caller_tuple_argument({, } %x) { +; RV32-LABEL: caller_tuple_argument: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: vmv2r.v v12, v8 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv2r.v v10, v12 +; RV32-NEXT: call callee_tuple_argument +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: caller_tuple_argument: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: vmv2r.v v12, v8 +; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv2r.v v10, v12 +; RV64-NEXT: call callee_tuple_argument +; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %a = extractvalue {, } %x, 0 + %b = extractvalue {, } %x, 1 + %c = insertvalue {, } poison, %b, 0 + %d = insertvalue {, } %c, %a, 1 + call void @callee_tuple_argument({, } %d) + ret void +} + +declare void @callee_tuple_argument({, })