22 changes: 11 additions & 11 deletions llvm/test/CodeGen/RISCV/inline-asm.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ define i32 @constraint_r(i32 %a) nounwind {
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: #NO_APP
; RV64I-NEXT: ret
%1 = load i32, i32* @gi
%1 = load i32, ptr @gi
%2 = tail call i32 asm "add $0, $1, $2", "=r,r,r"(i32 %a, i32 %1)
ret i32 %2
}
Expand All @@ -43,12 +43,12 @@ define i32 @constraint_i(i32 %a) nounwind {
; RV64I-NEXT: addi a0, a0, 113
; RV64I-NEXT: #NO_APP
; RV64I-NEXT: ret
%1 = load i32, i32* @gi
%1 = load i32, ptr @gi
%2 = tail call i32 asm "addi $0, $1, $2", "=r,r,i"(i32 %a, i32 113)
ret i32 %2
}

define void @constraint_m(i32* %a) nounwind {
define void @constraint_m(ptr %a) nounwind {
; RV32I-LABEL: constraint_m:
; RV32I: # %bb.0:
; RV32I-NEXT: #APP
Expand All @@ -60,11 +60,11 @@ define void @constraint_m(i32* %a) nounwind {
; RV64I-NEXT: #APP
; RV64I-NEXT: #NO_APP
; RV64I-NEXT: ret
call void asm sideeffect "", "=*m"(i32* elementtype(i32) %a)
call void asm sideeffect "", "=*m"(ptr elementtype(i32) %a)
ret void
}

define i32 @constraint_m2(i32* %a) nounwind {
define i32 @constraint_m2(ptr %a) nounwind {
; RV32I-LABEL: constraint_m2:
; RV32I: # %bb.0:
; RV32I-NEXT: #APP
Expand All @@ -78,7 +78,7 @@ define i32 @constraint_m2(i32* %a) nounwind {
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: #NO_APP
; RV64I-NEXT: ret
%1 = tail call i32 asm "lw $0, $1", "=r,*m"(i32* elementtype(i32) %a)
%1 = tail call i32 asm "lw $0, $1", "=r,*m"(ptr elementtype(i32) %a)
ret i32 %1
}

Expand Down Expand Up @@ -150,7 +150,7 @@ define void @constraint_K() nounwind {
ret void
}

define void @constraint_A(i8* %a) nounwind {
define void @constraint_A(ptr %a) nounwind {
; RV32I-LABEL: constraint_A:
; RV32I: # %bb.0:
; RV32I-NEXT: #APP
Expand All @@ -170,8 +170,8 @@ define void @constraint_A(i8* %a) nounwind {
; RV64I-NEXT: lb s1, 0(a0)
; RV64I-NEXT: #NO_APP
; RV64I-NEXT: ret
tail call void asm sideeffect "sb s0, $0", "*A"(i8* elementtype(i8) %a)
tail call void asm sideeffect "lb s1, $0", "*A"(i8* elementtype(i8) %a)
tail call void asm sideeffect "sb s0, $0", "*A"(ptr elementtype(i8) %a)
tail call void asm sideeffect "lb s1, $0", "*A"(ptr elementtype(i8) %a)
ret void
}

Expand Down Expand Up @@ -261,7 +261,7 @@ define void @operand_global() nounwind {
; RV64I-NEXT: .8byte gi
; RV64I-NEXT: #NO_APP
; RV64I-NEXT: ret
tail call void asm sideeffect ".8byte $0", "i"(i32* @gi)
tail call void asm sideeffect ".8byte $0", "i"(ptr @gi)
ret void
}

Expand All @@ -283,7 +283,7 @@ define void @operand_block_address() nounwind {
; RV64I-NEXT: .Ltmp0: # Block address taken
; RV64I-NEXT: # %bb.1: # %bb
; RV64I-NEXT: ret
call void asm sideeffect "j $0", "i"(i8* blockaddress(@operand_block_address, %bb))
call void asm sideeffect "j $0", "i"(ptr blockaddress(@operand_block_address, %bb))
br label %bb
bb:
ret void
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ define dso_local void @handler() nounwind {
; CHECK-RV32-FD-NEXT: tail write@plt
entry:
%call = tail call i32 @read(i32 8196)
tail call void bitcast (void (...)* @callee to void ()*)()
tail call void @callee()
tail call void @write(i32 %call)
ret void
}
Expand Down
36 changes: 18 additions & 18 deletions llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,10 @@ define void @foo_i32() nounwind #0 {
; CHECK-RV32IFD-NEXT: lw a1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32IFD-NEXT: addi sp, sp, 16
; CHECK-RV32IFD-NEXT: mret
%1 = load i32, i32* @a
%2 = load i32, i32* @b
%1 = load i32, ptr @a
%2 = load i32, ptr @b
%add = add nsw i32 %2, %1
store i32 %add, i32* @c
store i32 %add, ptr @c
ret void
}

Expand Down Expand Up @@ -150,10 +150,10 @@ define void @foo_fp_i32() nounwind #1 {
; CHECK-RV32IFD-NEXT: lw a1, 0(sp) # 4-byte Folded Reload
; CHECK-RV32IFD-NEXT: addi sp, sp, 16
; CHECK-RV32IFD-NEXT: mret
%1 = load i32, i32* @a
%2 = load i32, i32* @b
%1 = load i32, ptr @a
%2 = load i32, ptr @b
%add = add nsw i32 %2, %1
store i32 %add, i32* @c
store i32 %add, ptr @c
ret void
}

Expand Down Expand Up @@ -244,10 +244,10 @@ define void @foo_float() nounwind #0 {
; CHECK-RV32IFD-NEXT: fld ft1, 8(sp) # 8-byte Folded Reload
; CHECK-RV32IFD-NEXT: addi sp, sp, 32
; CHECK-RV32IFD-NEXT: mret
%1 = load float, float* @e
%2 = load float, float* @f
%1 = load float, ptr @e
%2 = load float, ptr @f
%add = fadd float %1, %2
store float %add, float* @d
store float %add, ptr @d
ret void
}

Expand Down Expand Up @@ -350,10 +350,10 @@ define void @foo_fp_float() nounwind #1 {
; CHECK-RV32IFD-NEXT: fld ft1, 0(sp) # 8-byte Folded Reload
; CHECK-RV32IFD-NEXT: addi sp, sp, 32
; CHECK-RV32IFD-NEXT: mret
%1 = load float, float* @e
%2 = load float, float* @f
%1 = load float, ptr @e
%2 = load float, ptr @f
%add = fadd float %1, %2
store float %add, float* @d
store float %add, ptr @d
ret void
}

Expand Down Expand Up @@ -540,10 +540,10 @@ define void @foo_double() nounwind #0 {
; CHECK-RV32IFD-NEXT: fld ft1, 8(sp) # 8-byte Folded Reload
; CHECK-RV32IFD-NEXT: addi sp, sp, 32
; CHECK-RV32IFD-NEXT: mret
%1 = load double, double* @h
%2 = load double, double* @i
%1 = load double, ptr @h
%2 = load double, ptr @i
%add = fadd double %1, %2
store double %add, double* @g
store double %add, ptr @g
ret void
}

Expand Down Expand Up @@ -740,10 +740,10 @@ define void @foo_fp_double() nounwind #1 {
; CHECK-RV32IFD-NEXT: fld ft1, 0(sp) # 8-byte Folded Reload
; CHECK-RV32IFD-NEXT: addi sp, sp, 32
; CHECK-RV32IFD-NEXT: mret
%1 = load double, double* @h
%2 = load double, double* @i
%1 = load double, ptr @h
%2 = load double, ptr @i
%add = fadd double %1, %2
store double %add, double* @g
store double %add, ptr @g
ret void
}

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/interrupt-attr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,7 @@ define void @foo_with_call() #2 {
; CHECK-RV64-FD-NEXT: fld fs11, 0(sp) # 8-byte Folded Reload
; CHECK-RV64-FD-NEXT: addi sp, sp, 384
; CHECK-RV64-FD-NEXT: mret
%call = call i32 bitcast (i32 (...)* @otherfoo to i32 ()*)()
%call = call i32 @otherfoo()
ret void
}

Expand Down Expand Up @@ -1056,7 +1056,7 @@ define void @foo_fp_with_call() #3 {
; CHECK-RV64-FD-NEXT: fld fs11, 8(sp) # 8-byte Folded Reload
; CHECK-RV64-FD-NEXT: addi sp, sp, 400
; CHECK-RV64-FD-NEXT: mret
%call = call i32 bitcast (i32 (...)* @otherfoo to i32 ()*)()
%call = call i32 @otherfoo()
ret void
}

Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/RISCV/isel-optnone.ll
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
; REQUIRES: asserts
; RUN: llc < %s -O0 -mtriple=riscv64 -debug-only=isel 2>&1 | FileCheck %s

define i32* @fooOptnone(i32* %p, i32* %q, i32** %z) #0 {
define ptr @fooOptnone(ptr %p, ptr %q, ptr %z) #0 {
; CHECK-NOT: Changing optimization level for Function fooOptnone
; CHECK-NOT: Restoring optimization level for Function fooOptnone

entry:
%r = load i32, i32* %p
%s = load i32, i32* %q
%y = load i32*, i32** %z
%r = load i32, ptr %p
%s = load i32, ptr %q
%y = load ptr, ptr %z

%t0 = add i32 %r, %s
%t1 = add i32 %t0, 1
%t2 = getelementptr i32, i32* %y, i32 1
%t3 = getelementptr i32, i32* %t2, i32 %t1
%t2 = getelementptr i32, ptr %y, i32 1
%t3 = getelementptr i32, ptr %t2, i32 %t1

ret i32* %t3
ret ptr %t3

}

Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/jumptable.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
; RUN: llc -mtriple=riscv64 -code-model=medium -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I-MEDIUM

define void @below_threshold(i32 %in, i32* %out) nounwind {
define void @below_threshold(i32 %in, ptr %out) nounwind {
; RV32I-SMALL-LABEL: below_threshold:
; RV32I-SMALL: # %bb.0: # %entry
; RV32I-SMALL-NEXT: li a2, 2
Expand Down Expand Up @@ -146,22 +146,22 @@ entry:
i32 4, label %bb4
]
bb1:
store i32 4, i32* %out
store i32 4, ptr %out
br label %exit
bb2:
store i32 3, i32* %out
store i32 3, ptr %out
br label %exit
bb3:
store i32 2, i32* %out
store i32 2, ptr %out
br label %exit
bb4:
store i32 1, i32* %out
store i32 1, ptr %out
br label %exit
exit:
ret void
}

define void @above_threshold(i32 %in, i32* %out) nounwind {
define void @above_threshold(i32 %in, ptr %out) nounwind {
; RV32I-SMALL-LABEL: above_threshold:
; RV32I-SMALL: # %bb.0: # %entry
; RV32I-SMALL-NEXT: addi a0, a0, -1
Expand Down Expand Up @@ -311,22 +311,22 @@ entry:
i32 6, label %bb6
]
bb1:
store i32 4, i32* %out
store i32 4, ptr %out
br label %exit
bb2:
store i32 3, i32* %out
store i32 3, ptr %out
br label %exit
bb3:
store i32 2, i32* %out
store i32 2, ptr %out
br label %exit
bb4:
store i32 1, i32* %out
store i32 1, ptr %out
br label %exit
bb5:
store i32 100, i32* %out
store i32 100, ptr %out
br label %exit
bb6:
store i32 200, i32* %out
store i32 200, ptr %out
br label %exit
exit:
ret void
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/large-stack.ll
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ define void @test_emergency_spill_slot(i32 %a) {
; RV32I-WITHFP-NEXT: addi sp, sp, 2032
; RV32I-WITHFP-NEXT: ret
%data = alloca [ 100000 x i32 ] , align 4
%ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %data, i32 0, i32 80000
%ptr = getelementptr inbounds [100000 x i32], ptr %data, i32 0, i32 80000
%1 = tail call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm sideeffect "nop", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r"()
%asmresult0 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 0
%asmresult1 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 1
Expand All @@ -132,7 +132,7 @@ define void @test_emergency_spill_slot(i32 %a) {
%asmresult12 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 12
%asmresult13 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 13
%asmresult14 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 14
store volatile i32 %a, i32* %ptr
store volatile i32 %a, ptr %ptr
tail call void asm sideeffect "nop", "r,r,r,r,r,r,r,r,r,r,r,r,r,r,r"(i32 %asmresult0, i32 %asmresult1, i32 %asmresult2, i32 %asmresult3, i32 %asmresult4, i32 %asmresult5, i32 %asmresult6, i32 %asmresult7, i32 %asmresult8, i32 %asmresult9, i32 %asmresult10, i32 %asmresult11, i32 %asmresult12, i32 %asmresult13, i32 %asmresult14)
ret void
}
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/RISCV/legalize-fneg.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64 %s

define void @test1(float* %a, float* %b) nounwind {
define void @test1(ptr %a, ptr %b) nounwind {
; RV32-LABEL: test1:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lw a1, 0(a1)
Expand All @@ -21,13 +21,13 @@ define void @test1(float* %a, float* %b) nounwind {
; RV64-NEXT: sw a1, 0(a0)
; RV64-NEXT: ret
entry:
%0 = load float, float* %b
%0 = load float, ptr %b
%neg = fneg float %0
store float %neg, float* %a
store float %neg, ptr %a
ret void
}

define void @test2(double* %a, double* %b) nounwind {
define void @test2(ptr %a, ptr %b) nounwind {
; RV32-LABEL: test2:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lw a2, 4(a1)
Expand All @@ -47,13 +47,13 @@ define void @test2(double* %a, double* %b) nounwind {
; RV64-NEXT: sd a1, 0(a0)
; RV64-NEXT: ret
entry:
%0 = load double, double* %b
%0 = load double, ptr %b
%neg = fneg double %0
store double %neg, double* %a
store double %neg, ptr %a
ret void
}

define void @test3(fp128* %a, fp128* %b) nounwind {
define void @test3(ptr %a, ptr %b) nounwind {
; RV32-LABEL: test3:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lw a2, 4(a1)
Expand All @@ -79,8 +79,8 @@ define void @test3(fp128* %a, fp128* %b) nounwind {
; RV64-NEXT: sd a2, 8(a0)
; RV64-NEXT: ret
entry:
%0 = load fp128, fp128* %b
%0 = load fp128, ptr %b
%neg = fneg fp128 %0
store fp128 %neg, fp128* %a
store fp128 %neg, ptr %a
ret void
}
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ define i64 @llround_f64(double %a) nounwind {

; Atomics libcalls:

define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV32-ALL-LABEL: atomic_load_i8_unordered:
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
Expand All @@ -526,11 +526,11 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
%1 = load atomic i8, i8* %a unordered, align 1
%1 = load atomic i8, ptr %a unordered, align 1
ret i8 %1
}

define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
; RV32-ALL-LABEL: atomicrmw_add_i16_release:
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
Expand All @@ -550,11 +550,11 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
%1 = atomicrmw add i16* %a, i16 %b release
%1 = atomicrmw add ptr %a, i16 %b release
ret i16 %1
}

define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32-ALL-LABEL: atomicrmw_xor_i32_acq_rel:
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
Expand All @@ -574,11 +574,11 @@ define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
%1 = atomicrmw xor i32* %a, i32 %b acq_rel
%1 = atomicrmw xor ptr %a, i32 %b acq_rel
ret i32 %1
}

define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32-ALL-LABEL: atomicrmw_nand_i64_seq_cst:
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
Expand All @@ -598,7 +598,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
%1 = atomicrmw nand i64* %a, i64 %b seq_cst
%1 = atomicrmw nand ptr %a, i64 %b seq_cst
ret i64 %1
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ define void @use_frame_base_reg() {
%va = alloca i8, align 4
%va1 = alloca i8, align 4
%large = alloca [ 100000 x i8 ]
%argp.cur = load volatile i8, i8* %va, align 4
%argp.next = load volatile i8, i8* %va1, align 4
%argp.cur = load volatile i8, ptr %va, align 4
%argp.next = load volatile i8, ptr %va1, align 4
ret void
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
; } while (j < 8193);
; }

@flags2 = internal global [8193 x i8] zeroinitializer, align 32 ; <[8193 x i8]*> [#uses=1]
@flags2 = internal global [8193 x i8] zeroinitializer, align 32 ; <ptr> [#uses=1]

define void @test(i32 signext %i) nounwind {
; RV32-LABEL: test:
Expand Down Expand Up @@ -78,8 +78,8 @@ bb:
%tmp.15 = mul i32 %indvar, %i
%tmp.16 = add i32 %tmp.15, %k_addr.012
%gep.upgrd.1 = zext i32 %tmp.16 to i64
%tmp = getelementptr [8193 x i8], [8193 x i8]* @flags2, i32 0, i64 %gep.upgrd.1
store i8 0, i8* %tmp
%tmp = getelementptr [8193 x i8], ptr @flags2, i32 0, i64 %gep.upgrd.1
store i8 0, ptr %tmp
%tmp.17 = add i32 %tmp.16, %i
%tmp.upgrd.2 = icmp sgt i32 %tmp.17, 8192
%indvar.next = add i32 %indvar, 1
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,10 @@ entry:
for.body:
%i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%sub = add nsw i32 %i.08, -2048
%arrayidx = getelementptr inbounds [4096 x i32], [4096 x i32]* @a, i32 0, i32 %i.08
store i32 %sub, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds [4096 x i32], [4096 x i32]* @b, i32 0, i32 %i.08
store i32 %i.08, i32* %arrayidx1, align 4
%arrayidx = getelementptr inbounds [4096 x i32], ptr @a, i32 0, i32 %i.08
store i32 %sub, ptr %arrayidx, align 4
%arrayidx1 = getelementptr inbounds [4096 x i32], ptr @b, i32 0, i32 %i.08
store i32 %i.08, ptr %arrayidx1, align 4
%inc = add nuw nsw i32 %i.08, 1
%exitcond = icmp eq i32 %inc, 4096
br i1 %exitcond, label %for.end, label %for.body
Expand Down
120 changes: 60 additions & 60 deletions llvm/test/CodeGen/RISCV/machine-cse.ll

Large diffs are not rendered by default.

20 changes: 9 additions & 11 deletions llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,9 @@ define i32 @func1(i32 %x) #0 {
entry:
%mul = mul i32 %x, %x
%add = add i32 %mul, 1
%exception = tail call i8* @__cxa_allocate_exception(i64 4)
%0 = bitcast i8* %exception to i32*
store i32 %add, i32* %0
tail call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
%exception = tail call ptr @__cxa_allocate_exception(i64 4)
store i32 %add, ptr %exception
tail call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null)
unreachable
}

Expand All @@ -52,15 +51,14 @@ define i32 @func2(i32 %x) #0 {
entry:
%mul = mul i32 %x, %x
%add = add i32 %mul, 1
%exception = tail call i8* @__cxa_allocate_exception(i64 4)
%0 = bitcast i8* %exception to i32*
store i32 %add, i32* %0
tail call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
%exception = tail call ptr @__cxa_allocate_exception(i64 4)
store i32 %add, ptr %exception
tail call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null)
unreachable
}

@_ZTIi = external constant i8*
declare i8* @__cxa_allocate_exception(i64)
declare void @__cxa_throw(i8*, i8*, i8*)
@_ZTIi = external constant ptr
declare ptr @__cxa_allocate_exception(i64)
declare void @__cxa_throw(ptr, ptr, ptr)

attributes #0 = { minsize noreturn }
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ entry:

loop:
%i = phi i32 [ %inc, %loop ], [ 0, %entry ]
%0 = load volatile i32, i32* @l, align 4
%0 = load volatile i32, ptr @l, align 4
%inc = add nuw nsw i32 %i, 1
%cmp = icmp slt i32 %inc, %n
br i1 %cmp, label %loop, label %ret
Expand Down Expand Up @@ -83,7 +83,7 @@ entry:

loop:
%i = phi i32 [ %inc, %loop ], [ 0, %entry ]
%0 = load volatile i32, i32* @g, align 4
%0 = load volatile i32, ptr @g, align 4
%inc = add nuw nsw i32 %i, 1
%cmp = icmp slt i32 %inc, %n
br i1 %cmp, label %loop, label %ret
Expand Down Expand Up @@ -129,7 +129,7 @@ entry:

loop:
%i = phi i32 [ %inc, %loop ], [ 0, %entry ]
%0 = load volatile i32, i32* @ie, align 4
%0 = load volatile i32, ptr @ie, align 4
%inc = add nuw nsw i32 %i, 1
%cmp = icmp slt i32 %inc, %n
br i1 %cmp, label %loop, label %ret
Expand Down Expand Up @@ -199,7 +199,7 @@ entry:

loop:
%i = phi i32 [ %inc, %loop ], [ 0, %entry ]
%0 = load volatile i32, i32* @gd, align 4
%0 = load volatile i32, ptr @gd, align 4
%inc = add nuw nsw i32 %i, 1
%cmp = icmp slt i32 %inc, %n
br i1 %cmp, label %loop, label %ret
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ define void @foo(i32 signext %0, i32 signext %1) {
; FUSION-NEXT: addi a0, a0, %lo(.L.str)
; FUSION-NEXT: tail bar@plt
%3 = sitofp i32 %1 to float
tail call void @bar(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), float %3)
tail call void @bar(ptr @.str, float %3)
ret void
}

declare void @bar(i8*, float)
declare void @bar(ptr, float)

; Test that we prefer lui+addiw over li+slli.
define i32 @test_matint() {
Expand Down
174 changes: 87 additions & 87 deletions llvm/test/CodeGen/RISCV/mem.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,121 +4,121 @@

; Check indexed and unindexed, sext, zext and anyext loads

define dso_local i32 @lb(i8 *%a) nounwind {
define dso_local i32 @lb(ptr %a) nounwind {
; RV32I-LABEL: lb:
; RV32I: # %bb.0:
; RV32I-NEXT: lb a1, 1(a0)
; RV32I-NEXT: lb a0, 0(a0)
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
%1 = getelementptr i8, i8* %a, i32 1
%2 = load i8, i8* %1
%1 = getelementptr i8, ptr %a, i32 1
%2 = load i8, ptr %1
%3 = sext i8 %2 to i32
; the unused load will produce an anyext for selection
%4 = load volatile i8, i8* %a
%4 = load volatile i8, ptr %a
ret i32 %3
}

define dso_local i32 @lh(i16 *%a) nounwind {
define dso_local i32 @lh(ptr %a) nounwind {
; RV32I-LABEL: lh:
; RV32I: # %bb.0:
; RV32I-NEXT: lh a1, 4(a0)
; RV32I-NEXT: lh a0, 0(a0)
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
%1 = getelementptr i16, i16* %a, i32 2
%2 = load i16, i16* %1
%1 = getelementptr i16, ptr %a, i32 2
%2 = load i16, ptr %1
%3 = sext i16 %2 to i32
; the unused load will produce an anyext for selection
%4 = load volatile i16, i16* %a
%4 = load volatile i16, ptr %a
ret i32 %3
}

define dso_local i32 @lw(i32 *%a) nounwind {
define dso_local i32 @lw(ptr %a) nounwind {
; RV32I-LABEL: lw:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a1, 12(a0)
; RV32I-NEXT: lw a0, 0(a0)
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
%1 = getelementptr i32, i32* %a, i32 3
%2 = load i32, i32* %1
%3 = load volatile i32, i32* %a
%1 = getelementptr i32, ptr %a, i32 3
%2 = load i32, ptr %1
%3 = load volatile i32, ptr %a
ret i32 %2
}

define dso_local i32 @lbu(i8 *%a) nounwind {
define dso_local i32 @lbu(ptr %a) nounwind {
; RV32I-LABEL: lbu:
; RV32I: # %bb.0:
; RV32I-NEXT: lbu a1, 4(a0)
; RV32I-NEXT: lbu a0, 0(a0)
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
%1 = getelementptr i8, i8* %a, i32 4
%2 = load i8, i8* %1
%1 = getelementptr i8, ptr %a, i32 4
%2 = load i8, ptr %1
%3 = zext i8 %2 to i32
%4 = load volatile i8, i8* %a
%4 = load volatile i8, ptr %a
%5 = zext i8 %4 to i32
%6 = add i32 %3, %5
ret i32 %6
}

define dso_local i32 @lhu(i16 *%a) nounwind {
define dso_local i32 @lhu(ptr %a) nounwind {
; RV32I-LABEL: lhu:
; RV32I: # %bb.0:
; RV32I-NEXT: lhu a1, 10(a0)
; RV32I-NEXT: lhu a0, 0(a0)
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
%1 = getelementptr i16, i16* %a, i32 5
%2 = load i16, i16* %1
%1 = getelementptr i16, ptr %a, i32 5
%2 = load i16, ptr %1
%3 = zext i16 %2 to i32
%4 = load volatile i16, i16* %a
%4 = load volatile i16, ptr %a
%5 = zext i16 %4 to i32
%6 = add i32 %3, %5
ret i32 %6
}

; Check indexed and unindexed stores

define dso_local void @sb(i8 *%a, i8 %b) nounwind {
define dso_local void @sb(ptr %a, i8 %b) nounwind {
; RV32I-LABEL: sb:
; RV32I: # %bb.0:
; RV32I-NEXT: sb a1, 0(a0)
; RV32I-NEXT: sb a1, 6(a0)
; RV32I-NEXT: ret
store i8 %b, i8* %a
%1 = getelementptr i8, i8* %a, i32 6
store i8 %b, i8* %1
store i8 %b, ptr %a
%1 = getelementptr i8, ptr %a, i32 6
store i8 %b, ptr %1
ret void
}

define dso_local void @sh(i16 *%a, i16 %b) nounwind {
define dso_local void @sh(ptr %a, i16 %b) nounwind {
; RV32I-LABEL: sh:
; RV32I: # %bb.0:
; RV32I-NEXT: sh a1, 0(a0)
; RV32I-NEXT: sh a1, 14(a0)
; RV32I-NEXT: ret
store i16 %b, i16* %a
%1 = getelementptr i16, i16* %a, i32 7
store i16 %b, i16* %1
store i16 %b, ptr %a
%1 = getelementptr i16, ptr %a, i32 7
store i16 %b, ptr %1
ret void
}

define dso_local void @sw(i32 *%a, i32 %b) nounwind {
define dso_local void @sw(ptr %a, i32 %b) nounwind {
; RV32I-LABEL: sw:
; RV32I: # %bb.0:
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: sw a1, 32(a0)
; RV32I-NEXT: ret
store i32 %b, i32* %a
%1 = getelementptr i32, i32* %a, i32 8
store i32 %b, i32* %1
store i32 %b, ptr %a
%1 = getelementptr i32, ptr %a, i32 8
store i32 %b, ptr %1
ret void
}

; Check load and store to an i1 location
define dso_local i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
define dso_local i32 @load_sext_zext_anyext_i1(ptr %a) nounwind {
; RV32I-LABEL: load_sext_zext_anyext_i1:
; RV32I: # %bb.0:
; RV32I-NEXT: lbu a1, 1(a0)
Expand All @@ -127,20 +127,20 @@ define dso_local i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
; RV32I-NEXT: sub a0, a2, a1
; RV32I-NEXT: ret
; sextload i1
%1 = getelementptr i1, i1* %a, i32 1
%2 = load i1, i1* %1
%1 = getelementptr i1, ptr %a, i32 1
%2 = load i1, ptr %1
%3 = sext i1 %2 to i32
; zextload i1
%4 = getelementptr i1, i1* %a, i32 2
%5 = load i1, i1* %4
%4 = getelementptr i1, ptr %a, i32 2
%5 = load i1, ptr %4
%6 = zext i1 %5 to i32
%7 = add i32 %3, %6
; extload i1 (anyext). Produced as the load is unused.
%8 = load volatile i1, i1* %a
%8 = load volatile i1, ptr %a
ret i32 %7
}

define dso_local i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
define dso_local i16 @load_sext_zext_anyext_i1_i16(ptr %a) nounwind {
; RV32I-LABEL: load_sext_zext_anyext_i1_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: lbu a1, 1(a0)
Expand All @@ -149,16 +149,16 @@ define dso_local i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
; RV32I-NEXT: sub a0, a2, a1
; RV32I-NEXT: ret
; sextload i1
%1 = getelementptr i1, i1* %a, i32 1
%2 = load i1, i1* %1
%1 = getelementptr i1, ptr %a, i32 1
%2 = load i1, ptr %1
%3 = sext i1 %2 to i16
; zextload i1
%4 = getelementptr i1, i1* %a, i32 2
%5 = load i1, i1* %4
%4 = getelementptr i1, ptr %a, i32 2
%5 = load i1, ptr %4
%6 = zext i1 %5 to i16
%7 = add i16 %3, %6
; extload i1 (anyext). Produced as the load is unused.
%8 = load volatile i1, i1* %a
%8 = load volatile i1, ptr %a
ret i16 %7
}

Expand All @@ -176,11 +176,11 @@ define dso_local i32 @lw_sw_global(i32 %a) nounwind {
; RV32I-NEXT: sw a0, 36(a3)
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
%1 = load volatile i32, i32* @G
store i32 %a, i32* @G
%2 = getelementptr i32, i32* @G, i32 9
%3 = load volatile i32, i32* %2
store i32 %a, i32* %2
%1 = load volatile i32, ptr @G
store i32 %a, ptr @G
%2 = getelementptr i32, ptr @G, i32 9
%3 = load volatile i32, ptr %2
store i32 %a, ptr %2
ret i32 %1
}

Expand All @@ -193,120 +193,120 @@ define dso_local i32 @lw_sw_constant(i32 %a) nounwind {
; RV32I-NEXT: sw a0, -273(a2)
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
%1 = inttoptr i32 3735928559 to i32*
%2 = load volatile i32, i32* %1
store i32 %a, i32* %1
%1 = inttoptr i32 3735928559 to ptr
%2 = load volatile i32, ptr %1
store i32 %a, ptr %1
ret i32 %2
}

define i32 @lw_near_local(i32* %a) {
define i32 @lw_near_local(ptr %a) {
; RV32I-LABEL: lw_near_local:
; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, a0, 2047
; RV32I-NEXT: lw a0, 5(a0)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i64 513
%2 = load volatile i32, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i64 513
%2 = load volatile i32, ptr %1
ret i32 %2
}

define void @st_near_local(i32* %a, i32 %b) {
define void @st_near_local(ptr %a, i32 %b) {
; RV32I-LABEL: st_near_local:
; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, a0, 2047
; RV32I-NEXT: sw a1, 5(a0)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i64 513
store i32 %b, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i64 513
store i32 %b, ptr %1
ret void
}

define i32 @lw_sw_near_local(i32* %a, i32 %b) {
define i32 @lw_sw_near_local(ptr %a, i32 %b) {
; RV32I-LABEL: lw_sw_near_local:
; RV32I: # %bb.0:
; RV32I-NEXT: addi a2, a0, 2047
; RV32I-NEXT: lw a0, 5(a2)
; RV32I-NEXT: sw a1, 5(a2)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i64 513
%2 = load volatile i32, i32* %1
store i32 %b, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i64 513
%2 = load volatile i32, ptr %1
store i32 %b, ptr %1
ret i32 %2
}

define i32 @lw_far_local(i32* %a) {
define i32 @lw_far_local(ptr %a) {
; RV32I-LABEL: lw_far_local:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 4
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: lw a0, -4(a0)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i64 4095
%2 = load volatile i32, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i64 4095
%2 = load volatile i32, ptr %1
ret i32 %2
}

define void @st_far_local(i32* %a, i32 %b) {
define void @st_far_local(ptr %a, i32 %b) {
; RV32I-LABEL: st_far_local:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a2, 4
; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: sw a1, -4(a0)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i64 4095
store i32 %b, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i64 4095
store i32 %b, ptr %1
ret void
}

define i32 @lw_sw_far_local(i32* %a, i32 %b) {
define i32 @lw_sw_far_local(ptr %a, i32 %b) {
; RV32I-LABEL: lw_sw_far_local:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a2, 4
; RV32I-NEXT: add a2, a0, a2
; RV32I-NEXT: lw a0, -4(a2)
; RV32I-NEXT: sw a1, -4(a2)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i64 4095
%2 = load volatile i32, i32* %1
store i32 %b, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i64 4095
%2 = load volatile i32, ptr %1
store i32 %b, ptr %1
ret i32 %2
}

define i32 @lw_really_far_local(i32* %a) {
define i32 @lw_really_far_local(ptr %a) {
; RV32I-LABEL: lw_really_far_local:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: lw a0, -2048(a0)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i32 536870400
%2 = load volatile i32, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i32 536870400
%2 = load volatile i32, ptr %1
ret i32 %2
}

define void @st_really_far_local(i32* %a, i32 %b) {
define void @st_really_far_local(ptr %a, i32 %b) {
; RV32I-LABEL: st_really_far_local:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: sw a1, -2048(a0)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i32 536870400
store i32 %b, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i32 536870400
store i32 %b, ptr %1
ret void
}

define i32 @lw_sw_really_far_local(i32* %a, i32 %b) {
define i32 @lw_sw_really_far_local(ptr %a, i32 %b) {
; RV32I-LABEL: lw_sw_really_far_local:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: add a2, a0, a2
; RV32I-NEXT: lw a0, -2048(a2)
; RV32I-NEXT: sw a1, -2048(a2)
; RV32I-NEXT: ret
%1 = getelementptr inbounds i32, i32* %a, i32 536870400
%2 = load volatile i32, i32* %1
store i32 %b, i32* %1
%1 = getelementptr inbounds i32, ptr %a, i32 536870400
%2 = load volatile i32, ptr %1
store i32 %b, ptr %1
ret i32 %2
}

Expand All @@ -330,11 +330,11 @@ define void @addi_fold_crash(i32 %arg) nounwind {
; RV32I-NEXT: ret
bb:
%tmp = alloca %struct.quux, align 4
%tmp1 = getelementptr inbounds %struct.quux, %struct.quux* %tmp, i32 0, i32 1
%tmp2 = getelementptr inbounds %struct.quux, %struct.quux* %tmp, i32 0, i32 1, i32 %arg
store i8 0, i8* %tmp2, align 1
call void @snork([0 x i8]* %tmp1)
%tmp1 = getelementptr inbounds %struct.quux, ptr %tmp, i32 0, i32 1
%tmp2 = getelementptr inbounds %struct.quux, ptr %tmp, i32 0, i32 1, i32 %arg
store i8 0, ptr %tmp2, align 1
call void @snork(ptr %tmp1)
ret void
}

declare void @snork([0 x i8]*)
declare void @snork(ptr)
192 changes: 96 additions & 96 deletions llvm/test/CodeGen/RISCV/mem64.ll

Large diffs are not rendered by default.

34 changes: 16 additions & 18 deletions llvm/test/CodeGen/RISCV/memcpy-inline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@ define i32 @t0() {
; RV64-NEXT: li a0, 0
; RV64-NEXT: ret
entry:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i1 false)
call void @llvm.memcpy.p0.p0.i32(ptr align 8 @dst, ptr align 8 @src, i32 11, i1 false)
ret i32 0
}

define void @t1(i8* nocapture %C) nounwind {
define void @t1(ptr nocapture %C) nounwind {
; RV32-LABEL: t1:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lui a1, %hi(.L.str1)
Expand Down Expand Up @@ -91,11 +91,11 @@ define void @t1(i8* nocapture %C) nounwind {
; RV64UNALIGNED-NEXT: sd a1, 8(a0)
; RV64UNALIGNED-NEXT: ret
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i1 false)
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str1, i64 31, i1 false)
ret void
}

define void @t2(i8* nocapture %C) nounwind {
define void @t2(ptr nocapture %C) nounwind {
; RV32-LABEL: t2:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lui a1, %hi(.L.str2)
Expand Down Expand Up @@ -127,11 +127,11 @@ define void @t2(i8* nocapture %C) nounwind {
; RV64UNALIGNED-NEXT: sd a1, 8(a0)
; RV64UNALIGNED-NEXT: ret
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i1 false)
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str2, i64 36, i1 false)
ret void
}

define void @t3(i8* nocapture %C) nounwind {
define void @t3(ptr nocapture %C) nounwind {
; RV32ALIGNED-LABEL: t3:
; RV32ALIGNED: # %bb.0: # %entry
; RV32ALIGNED-NEXT: lui a1, %hi(.L.str3)
Expand Down Expand Up @@ -180,11 +180,11 @@ define void @t3(i8* nocapture %C) nounwind {
; RV64UNALIGNED-NEXT: sd a1, 8(a0)
; RV64UNALIGNED-NEXT: ret
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i1 false)
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str3, i64 24, i1 false)
ret void
}

define void @t4(i8* nocapture %C) nounwind {
define void @t4(ptr nocapture %C) nounwind {
; RV32ALIGNED-LABEL: t4:
; RV32ALIGNED: # %bb.0: # %entry
; RV32ALIGNED-NEXT: lui a1, %hi(.L.str4)
Expand Down Expand Up @@ -229,11 +229,11 @@ define void @t4(i8* nocapture %C) nounwind {
; RV64UNALIGNED-NEXT: sd a1, 8(a0)
; RV64UNALIGNED-NEXT: ret
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i1 false)
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str4, i64 18, i1 false)
ret void
}

define void @t5(i8* nocapture %C) nounwind {
define void @t5(ptr nocapture %C) nounwind {
; RV32ALIGNED-LABEL: t5:
; RV32ALIGNED: # %bb.0: # %entry
; RV32ALIGNED-NEXT: sb zero, 6(a0)
Expand Down Expand Up @@ -290,7 +290,7 @@ define void @t5(i8* nocapture %C) nounwind {
; RV64UNALIGNED-NEXT: sw a1, 0(a0)
; RV64UNALIGNED-NEXT: ret
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i1 false)
tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str5, i64 7, i1 false)
ret void
}

Expand Down Expand Up @@ -352,13 +352,13 @@ define void @t6() nounwind {
; RV64UNALIGNED-NEXT: sw a0, %lo(spool.splbuf+8)(a1)
; RV64UNALIGNED-NEXT: ret
entry:
call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr @spool.splbuf, ptr @.str6, i64 14, i1 false)
ret void
}

%struct.Foo = type { i32, i32, i32, i32 }

define void @t7(%struct.Foo* nocapture %a, %struct.Foo* nocapture %b) nounwind {
define void @t7(ptr nocapture %a, ptr nocapture %b) nounwind {
; RV32-LABEL: t7:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lw a2, 12(a1)
Expand Down Expand Up @@ -391,11 +391,9 @@ define void @t7(%struct.Foo* nocapture %a, %struct.Foo* nocapture %b) nounwind {
; RV64UNALIGNED-NEXT: sd a1, 0(a0)
; RV64UNALIGNED-NEXT: ret
entry:
%0 = bitcast %struct.Foo* %a to i8*
%1 = bitcast %struct.Foo* %b to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 16, i1 false)
tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 %a, ptr align 4 %b, i32 16, i1 false)
ret void
}

declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/RISCV/mir-target-flags.ll
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,12 @@ define i32 @caller(i32 %a) nounwind {
; RV32-MED-NEXT: target-flags(riscv-tprel-lo) @t_le
; RV32-MED: target-flags(riscv-plt) @callee
;
%b = load i32, i32* @g_e
%c = load i32, i32* @g_i
%d = load i32, i32* @t_un
%e = load i32, i32* @t_ld
%f = load i32, i32* @t_ie
%g = load i32, i32* @t_le
%b = load i32, ptr @g_e
%c = load i32, ptr @g_i
%d = load i32, ptr @t_un
%e = load i32, ptr @t_ld
%f = load i32, ptr @t_ie
%g = load i32, ptr @t_le
%sum = bitcast i32 0 to i32
%sum.a = add i32 %sum, %a
%sum.b = add i32 %sum.a, %b
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/musttail-call.ll
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@

%struct.A = type { i32 }

declare void @callee_musttail(%struct.A* sret(%struct.A) %a)
define void @caller_musttail(%struct.A* sret(%struct.A) %a) {
declare void @callee_musttail(ptr sret(%struct.A) %a)
define void @caller_musttail(ptr sret(%struct.A) %a) {
; CHECK: LLVM ERROR: failed to perform tail call elimination on a call site marked musttail
entry:
musttail call void @callee_musttail(%struct.A* sret(%struct.A) %a)
musttail call void @callee_musttail(ptr sret(%struct.A) %a)
ret void
}
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/neg-abs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ define i64 @select_neg_abs64(i64 %x) {
ret i64 %3
}

define i32 @neg_abs32_multiuse(i32 %x, i32* %y) {
define i32 @neg_abs32_multiuse(i32 %x, ptr %y) {
; RV32I-LABEL: neg_abs32_multiuse:
; RV32I: # %bb.0:
; RV32I-NEXT: srai a2, a0, 31
Expand Down Expand Up @@ -193,12 +193,12 @@ define i32 @neg_abs32_multiuse(i32 %x, i32* %y) {
; RV64ZBB-NEXT: sw a2, 0(a1)
; RV64ZBB-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
store i32 %abs, i32* %y
store i32 %abs, ptr %y
%neg = sub nsw i32 0, %abs
ret i32 %neg
}

define i64 @neg_abs64_multiuse(i64 %x, i64* %y) {
define i64 @neg_abs64_multiuse(i64 %x, ptr %y) {
; RV32I-LABEL: neg_abs64_multiuse:
; RV32I: # %bb.0:
; RV32I-NEXT: bgez a1, .LBB5_2
Expand Down Expand Up @@ -252,7 +252,7 @@ define i64 @neg_abs64_multiuse(i64 %x, i64* %y) {
; RV64ZBB-NEXT: sd a2, 0(a1)
; RV64ZBB-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
store i64 %abs, i64* %y
store i64 %abs, ptr %y
%neg = sub nsw i64 0, %abs
ret i64 %neg
}
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/option-nopic.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@ define i32 @get_symbol() nounwind {
; CHECK: lw a0, 0(a0)
; CHECK: lw a0, 0(a0)
tail call void asm sideeffect ".option nopic", ""()
%v = load i32, i32* @symbol
%v = load i32, ptr @symbol
ret i32 %v
}
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/option-pic.ll
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@ define i32 @get_symbol() nounwind {
; CHECK: lui a0, 0
; CHECK: lw a0, 0(a0)
tail call void asm sideeffect ".option pic", ""()
%v = load i32, i32* @symbol
%v = load i32, ptr @symbol
ret i32 %v
}
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/optnone-store-no-combine.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@

; This test verifies that a repeated store is not eliminated with optnone (improves debugging).

define void @foo(i32* %p) noinline optnone {
define void @foo(ptr %p) noinline optnone {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 8
; CHECK-NEXT: sw a1, 0(a0)
; CHECK-NEXT: sw a1, 0(a0)
; CHECK-NEXT: ret
store i32 8, i32* %p, align 4
store i32 8, i32* %p, align 4
store i32 8, ptr %p, align 4
store i32 8, ptr %p, align 4
ret void
}
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
; RUN: llc %s -mtriple=riscv32 -o - | FileCheck %s

define i1 @no__mulodi4(i32 %a, i64 %b, i32* %c) {
define i1 @no__mulodi4(i32 %a, i64 %b, ptr %c) {
; CHECK-LABEL: no__mulodi4
; CHECK-NOT: call __mulodi4@plt
; CHECK-NOT: call __multi3@plt
Expand All @@ -13,7 +13,7 @@ entry:
%5 = sext i32 %4 to i64
%6 = icmp ne i64 %3, %5
%7 = or i1 %2, %6
store i32 %4, i32* %c, align 4
store i32 %4, ptr %c, align 4
ret i1 %7
}

Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/pic-models.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

; external address

define i32* @f1() nounwind {
define ptr @f1() nounwind {
; RV32-STATIC-LABEL: f1:
; RV32-STATIC: # %bb.0: # %entry
; RV32-STATIC-NEXT: lui a0, %hi(external_var)
Expand All @@ -44,13 +44,13 @@ define i32* @f1() nounwind {
; RV64-PIC-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi0)(a0)
; RV64-PIC-NEXT: ret
entry:
ret i32* @external_var
ret ptr @external_var
}


; internal address

define i32* @f2() nounwind {
define ptr @f2() nounwind {
; RV32-STATIC-LABEL: f2:
; RV32-STATIC: # %bb.0: # %entry
; RV32-STATIC-NEXT: lui a0, %hi(internal_var)
Expand All @@ -77,5 +77,5 @@ define i32* @f2() nounwind {
; RV64-PIC-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi1)
; RV64-PIC-NEXT: ret
entry:
ret i32* @internal_var
ret ptr @internal_var
}
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/RISCV/pr51206.ll
Original file line number Diff line number Diff line change
Expand Up @@ -38,22 +38,22 @@ define signext i32 @wobble() nounwind {
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
bb:
%tmp = load i8, i8* @global, align 1
%tmp = load i8, ptr @global, align 1
%tmp1 = zext i8 %tmp to i32
%tmp2 = add nuw nsw i32 %tmp1, 1
store i32 %tmp2, i32* @global.1, align 4
%tmp3 = load i8, i8* @global.2, align 1
store i32 %tmp2, ptr @global.1, align 4
%tmp3 = load i8, ptr @global.2, align 1
%tmp4 = zext i8 %tmp3 to i32
%tmp5 = mul nuw nsw i32 %tmp2, %tmp4
%tmp6 = trunc i32 %tmp5 to i16
%tmp7 = udiv i16 %tmp6, 5
%tmp8 = zext i16 %tmp7 to i32
store i32 %tmp8, i32* @global.3, align 4
store i32 %tmp8, ptr @global.3, align 4
%tmp9 = icmp ult i32 %tmp5, 5
br i1 %tmp9, label %bb12, label %bb10

bb10: ; preds = %bb
%tmp11 = tail call signext i32 bitcast (i32 (...)* @quux to i32 ()*)()
%tmp11 = tail call signext i32 @quux()
br label %bb12

bb12: ; preds = %bb10, %bb
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/pr58025.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ BB:
br label %BB1

BB1: ; preds = %BB
store <1 x float> %B, <1 x float>* %PTR
store <1 x float> %B, ptr %PTR
ret void
}
116 changes: 58 additions & 58 deletions llvm/test/CodeGen/RISCV/pr58286.ll
Original file line number Diff line number Diff line change
Expand Up @@ -97,38 +97,38 @@ define void @func() {
%stackspace = alloca[1024 x i32], align 4

;; Load values to increase register pressure.
%v0 = load volatile i32, i32* @var
%v1 = load volatile i32, i32* @var
%v2 = load volatile i32, i32* @var
%v3 = load volatile i32, i32* @var
%v4 = load volatile i32, i32* @var
%v5 = load volatile i32, i32* @var
%v6 = load volatile i32, i32* @var
%v7 = load volatile i32, i32* @var
%v8 = load volatile i32, i32* @var
%v9 = load volatile i32, i32* @var
%v10 = load volatile i32, i32* @var
%v11 = load volatile i32, i32* @var
%v12 = load volatile i32, i32* @var
%v13 = load volatile i32, i32* @var
%v0 = load volatile i32, ptr @var
%v1 = load volatile i32, ptr @var
%v2 = load volatile i32, ptr @var
%v3 = load volatile i32, ptr @var
%v4 = load volatile i32, ptr @var
%v5 = load volatile i32, ptr @var
%v6 = load volatile i32, ptr @var
%v7 = load volatile i32, ptr @var
%v8 = load volatile i32, ptr @var
%v9 = load volatile i32, ptr @var
%v10 = load volatile i32, ptr @var
%v11 = load volatile i32, ptr @var
%v12 = load volatile i32, ptr @var
%v13 = load volatile i32, ptr @var

store volatile i32 %v0, i32* %space
store volatile i32 %v0, ptr %space

;; store values so they are used.
store volatile i32 %v0, i32* @var
store volatile i32 %v1, i32* @var
store volatile i32 %v2, i32* @var
store volatile i32 %v3, i32* @var
store volatile i32 %v4, i32* @var
store volatile i32 %v5, i32* @var
store volatile i32 %v6, i32* @var
store volatile i32 %v7, i32* @var
store volatile i32 %v8, i32* @var
store volatile i32 %v9, i32* @var
store volatile i32 %v10, i32* @var
store volatile i32 %v11, i32* @var
store volatile i32 %v12, i32* @var
store volatile i32 %v13, i32* @var
store volatile i32 %v0, ptr @var
store volatile i32 %v1, ptr @var
store volatile i32 %v2, ptr @var
store volatile i32 %v3, ptr @var
store volatile i32 %v4, ptr @var
store volatile i32 %v5, ptr @var
store volatile i32 %v6, ptr @var
store volatile i32 %v7, ptr @var
store volatile i32 %v8, ptr @var
store volatile i32 %v9, ptr @var
store volatile i32 %v10, ptr @var
store volatile i32 %v11, ptr @var
store volatile i32 %v12, ptr @var
store volatile i32 %v13, ptr @var

ret void
}
Expand Down Expand Up @@ -238,38 +238,38 @@ define void @shrink_wrap(i1 %c) {
bar:

;; Load values to increase register pressure.
%v0 = load volatile i32, i32* @var
%v1 = load volatile i32, i32* @var
%v2 = load volatile i32, i32* @var
%v3 = load volatile i32, i32* @var
%v4 = load volatile i32, i32* @var
%v5 = load volatile i32, i32* @var
%v6 = load volatile i32, i32* @var
%v7 = load volatile i32, i32* @var
%v8 = load volatile i32, i32* @var
%v9 = load volatile i32, i32* @var
%v10 = load volatile i32, i32* @var
%v11 = load volatile i32, i32* @var
%v12 = load volatile i32, i32* @var
%v13 = load volatile i32, i32* @var
%v0 = load volatile i32, ptr @var
%v1 = load volatile i32, ptr @var
%v2 = load volatile i32, ptr @var
%v3 = load volatile i32, ptr @var
%v4 = load volatile i32, ptr @var
%v5 = load volatile i32, ptr @var
%v6 = load volatile i32, ptr @var
%v7 = load volatile i32, ptr @var
%v8 = load volatile i32, ptr @var
%v9 = load volatile i32, ptr @var
%v10 = load volatile i32, ptr @var
%v11 = load volatile i32, ptr @var
%v12 = load volatile i32, ptr @var
%v13 = load volatile i32, ptr @var

store volatile i32 %v0, i32* %space
store volatile i32 %v0, ptr %space

;; store values so they are used.
store volatile i32 %v0, i32* @var
store volatile i32 %v1, i32* @var
store volatile i32 %v2, i32* @var
store volatile i32 %v3, i32* @var
store volatile i32 %v4, i32* @var
store volatile i32 %v5, i32* @var
store volatile i32 %v6, i32* @var
store volatile i32 %v7, i32* @var
store volatile i32 %v8, i32* @var
store volatile i32 %v9, i32* @var
store volatile i32 %v10, i32* @var
store volatile i32 %v11, i32* @var
store volatile i32 %v12, i32* @var
store volatile i32 %v13, i32* @var
store volatile i32 %v0, ptr @var
store volatile i32 %v1, ptr @var
store volatile i32 %v2, ptr @var
store volatile i32 %v3, ptr @var
store volatile i32 %v4, ptr @var
store volatile i32 %v5, ptr @var
store volatile i32 %v6, ptr @var
store volatile i32 %v7, ptr @var
store volatile i32 %v8, ptr @var
store volatile i32 %v9, ptr @var
store volatile i32 %v10, ptr @var
store volatile i32 %v11, ptr @var
store volatile i32 %v12, ptr @var
store volatile i32 %v13, ptr @var
br label %foo

foo:
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/prefetch.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,16 @@
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s

declare void @llvm.prefetch(i8*, i32, i32, i32)
declare void @llvm.prefetch(ptr, i32, i32, i32)

define void @test_prefetch(i8* %a) nounwind {
define void @test_prefetch(ptr %a) nounwind {
; RV32I-LABEL: test_prefetch:
; RV32I: # %bb.0:
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_prefetch:
; RV64I: # %bb.0:
; RV64I-NEXT: ret
call void @llvm.prefetch(i8* %a, i32 0, i32 2, i32 1)
call void @llvm.prefetch(ptr %a, i32 0, i32 2, i32 1)
ret void
}
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: addi sp, sp, 32
; SUBREGLIVENESS-NEXT: ret
entry:
%i = call { <vscale x 16 x half>, <vscale x 16 x half>} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( <vscale x 16 x half> undef, <vscale x 16 x half> undef, half* nonnull poison, <vscale x 16 x i32> poison, i64 55)
%i = call { <vscale x 16 x half>, <vscale x 16 x half>} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( <vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr nonnull poison, <vscale x 16 x i32> poison, i64 55)
%i1 = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } %i, 0
%i2 = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> zeroinitializer, i64 36, i64 0)
call void @func()
Expand All @@ -163,7 +163,7 @@ entry:
}

declare void @func()
declare { <vscale x 16 x half>, <vscale x 16 x half>} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( <vscale x 16 x half>, <vscale x 16 x half>, half* nocapture, <vscale x 16 x i32>, i64)
declare { <vscale x 16 x half>, <vscale x 16 x half>} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( <vscale x 16 x half>, <vscale x 16 x half>, ptr nocapture, <vscale x 16 x i32>, i64)
declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i64, i64 immarg)
declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64 immarg)
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x half>, i64)
Expand Down
52 changes: 26 additions & 26 deletions llvm/test/CodeGen/RISCV/remat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -129,70 +129,70 @@ define i32 @test() nounwind {
; RV32I-NEXT: addi sp, sp, 64
; RV32I-NEXT: ret
entry:
%.pr = load i32, i32* @a, align 4
%.pr = load i32, ptr @a, align 4
%tobool14 = icmp eq i32 %.pr, 0
br i1 %tobool14, label %for.end, label %for.body

for.body: ; preds = %entry, %for.inc
%0 = phi i32 [ %dec, %for.inc ], [ %.pr, %entry ]
%1 = load i32, i32* @l, align 4
%1 = load i32, ptr @l, align 4
%tobool1 = icmp eq i32 %1, 0
br i1 %tobool1, label %if.end, label %if.then

if.then: ; preds = %for.body
%2 = load i32, i32* @b, align 4
%3 = load i32, i32* @c, align 4
%4 = load i32, i32* @d, align 4
%5 = load i32, i32* @e, align 4
%2 = load i32, ptr @b, align 4
%3 = load i32, ptr @c, align 4
%4 = load i32, ptr @d, align 4
%5 = load i32, ptr @e, align 4
%call = tail call i32 @foo(i32 %0, i32 %2, i32 %3, i32 %4, i32 %5, i32 32)
br label %if.end

if.end: ; preds = %for.body, %if.then
%6 = load i32, i32* @k, align 4
%6 = load i32, ptr @k, align 4
%tobool2 = icmp eq i32 %6, 0
br i1 %tobool2, label %if.end5, label %if.then3

if.then3: ; preds = %if.end
%7 = load i32, i32* @b, align 4
%8 = load i32, i32* @c, align 4
%9 = load i32, i32* @d, align 4
%10 = load i32, i32* @e, align 4
%11 = load i32, i32* @f, align 4
%7 = load i32, ptr @b, align 4
%8 = load i32, ptr @c, align 4
%9 = load i32, ptr @d, align 4
%10 = load i32, ptr @e, align 4
%11 = load i32, ptr @f, align 4
%call4 = tail call i32 @foo(i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 64)
br label %if.end5

if.end5: ; preds = %if.end, %if.then3
%12 = load i32, i32* @j, align 4
%12 = load i32, ptr @j, align 4
%tobool6 = icmp eq i32 %12, 0
br i1 %tobool6, label %if.end9, label %if.then7

if.then7: ; preds = %if.end5
%13 = load i32, i32* @c, align 4
%14 = load i32, i32* @d, align 4
%15 = load i32, i32* @e, align 4
%16 = load i32, i32* @f, align 4
%17 = load i32, i32* @g, align 4
%13 = load i32, ptr @c, align 4
%14 = load i32, ptr @d, align 4
%15 = load i32, ptr @e, align 4
%16 = load i32, ptr @f, align 4
%17 = load i32, ptr @g, align 4
%call8 = tail call i32 @foo(i32 %13, i32 %14, i32 %15, i32 %16, i32 %17, i32 32)
br label %if.end9

if.end9: ; preds = %if.end5, %if.then7
%18 = load i32, i32* @i, align 4
%18 = load i32, ptr @i, align 4
%tobool10 = icmp eq i32 %18, 0
br i1 %tobool10, label %for.inc, label %if.then11

if.then11: ; preds = %if.end9
%19 = load i32, i32* @d, align 4
%20 = load i32, i32* @e, align 4
%21 = load i32, i32* @f, align 4
%22 = load i32, i32* @g, align 4
%23 = load i32, i32* @h, align 4
%19 = load i32, ptr @d, align 4
%20 = load i32, ptr @e, align 4
%21 = load i32, ptr @f, align 4
%22 = load i32, ptr @g, align 4
%23 = load i32, ptr @h, align 4
%call12 = tail call i32 @foo(i32 %19, i32 %20, i32 %21, i32 %22, i32 %23, i32 32)
br label %for.inc

for.inc: ; preds = %if.end9, %if.then11
%24 = load i32, i32* @a, align 4
%24 = load i32, ptr @a, align 4
%dec = add nsw i32 %24, -1
store i32 %dec, i32* @a, align 4
store i32 %dec, ptr @a, align 4
%tobool = icmp eq i32 %dec, 0
br i1 %tobool, label %for.end, label %for.body

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/reserved-regs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@
@var = global [32 x i64] zeroinitializer

define void @foo() {
%1 = load volatile [32 x i64], [32 x i64]* @var
store volatile [32 x i64] %1, [32 x i64]* @var
%1 = load volatile [32 x i64], ptr @var
store volatile [32 x i64] %1, ptr @var

; X3-NOT: lw gp,
; X3-NOT: ld gp,
Expand Down
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/RISCV/rv32zba.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+m,+zba -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBA

define signext i16 @sh1add(i64 %0, i16* %1) {
define signext i16 @sh1add(i64 %0, ptr %1) {
; RV32I-LABEL: sh1add:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 1
Expand All @@ -17,12 +17,12 @@ define signext i16 @sh1add(i64 %0, i16* %1) {
; RV32ZBA-NEXT: sh1add a0, a0, a2
; RV32ZBA-NEXT: lh a0, 0(a0)
; RV32ZBA-NEXT: ret
%3 = getelementptr inbounds i16, i16* %1, i64 %0
%4 = load i16, i16* %3
%3 = getelementptr inbounds i16, ptr %1, i64 %0
%4 = load i16, ptr %3
ret i16 %4
}

define i32 @sh2add(i64 %0, i32* %1) {
define i32 @sh2add(i64 %0, ptr %1) {
; RV32I-LABEL: sh2add:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 2
Expand All @@ -35,12 +35,12 @@ define i32 @sh2add(i64 %0, i32* %1) {
; RV32ZBA-NEXT: sh2add a0, a0, a2
; RV32ZBA-NEXT: lw a0, 0(a0)
; RV32ZBA-NEXT: ret
%3 = getelementptr inbounds i32, i32* %1, i64 %0
%4 = load i32, i32* %3
%3 = getelementptr inbounds i32, ptr %1, i64 %0
%4 = load i32, ptr %3
ret i32 %4
}

define i64 @sh3add(i64 %0, i64* %1) {
define i64 @sh3add(i64 %0, ptr %1) {
; RV32I-LABEL: sh3add:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 3
Expand All @@ -55,8 +55,8 @@ define i64 @sh3add(i64 %0, i64* %1) {
; RV32ZBA-NEXT: lw a0, 0(a1)
; RV32ZBA-NEXT: lw a1, 4(a1)
; RV32ZBA-NEXT: ret
%3 = getelementptr inbounds i64, i64* %1, i64 %0
%4 = load i64, i64* %3
%3 = getelementptr inbounds i64, ptr %1, i64 %0
%4 = load i64, ptr %3
ret i64 %4
}

Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rv64-large-stack.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ define void @foo() nounwind {
; CHECK-NEXT: ret
entry:
%w = alloca [100000000 x { fp128, fp128 }], align 16
%arraydecay = getelementptr inbounds [100000000 x { fp128, fp128 }], [100000000 x { fp128, fp128 }]* %w, i64 0, i64 0
call void @baz({ fp128, fp128 }* nonnull %arraydecay)
%arraydecay = getelementptr inbounds [100000000 x { fp128, fp128 }], ptr %w, i64 0, i64 0
call void @baz(ptr nonnull %arraydecay)
ret void
}

declare void @baz({ fp128, fp128 }*)
declare void @baz(ptr)
42 changes: 21 additions & 21 deletions llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ define i64 @test2(i32 signext %a) nounwind {
ret i64 %3
}

define i64 @test3(i32* %a) nounwind {
define i64 @test3(ptr %a) nounwind {
; RV64I-LABEL: test3:
; RV64I: # %bb.0:
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: slli a0, a0, 4
; RV64I-NEXT: ret
%1 = load i32, i32* %a
%1 = load i32, ptr %a
%2 = zext i32 %1 to i64
%3 = shl i64 %2, 32
%4 = ashr i64 %3, 28
Expand Down Expand Up @@ -81,7 +81,7 @@ define i64 @test6(i32 signext %a, i32 signext %b) nounwind {
; The ashr+add+shl is canonical IR from InstCombine for
; (sext (add (trunc X to i32), 1) to i32).
; That can be implemented as addiw make sure we recover it.
define i64 @test7(i32* %0, i64 %1) {
define i64 @test7(ptr %0, i64 %1) {
; RV64I-LABEL: test7:
; RV64I: # %bb.0:
; RV64I-NEXT: addiw a0, a1, 1
Expand All @@ -95,7 +95,7 @@ define i64 @test7(i32* %0, i64 %1) {
; The ashr+add+shl is canonical IR from InstCombine for
; (sext (sub 1, (trunc X to i32)) to i32).
; That can be implemented as (li 1)+subw make sure we recover it.
define i64 @test8(i32* %0, i64 %1) {
define i64 @test8(ptr %0, i64 %1) {
; RV64I-LABEL: test8:
; RV64I: # %bb.0:
; RV64I-NEXT: li a0, 1
Expand All @@ -109,7 +109,7 @@ define i64 @test8(i32* %0, i64 %1) {

; The gep is here to introduce a shl by 2 after the ashr that will get folded
; and make this harder to recover.
define signext i32 @test9(i32* %0, i64 %1) {
define signext i32 @test9(ptr %0, i64 %1) {
; RV64I-LABEL: test9:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, 1
Expand All @@ -122,14 +122,14 @@ define signext i32 @test9(i32* %0, i64 %1) {
%3 = shl i64 %1, 32
%4 = add i64 %3, 17596481011712 ; 4097 << 32
%5 = ashr exact i64 %4, 32
%6 = getelementptr inbounds i32, i32* %0, i64 %5
%7 = load i32, i32* %6, align 4
%6 = getelementptr inbounds i32, ptr %0, i64 %5
%7 = load i32, ptr %6, align 4
ret i32 %7
}

; The gep is here to introduce a shl by 2 after the ashr that will get folded
; and make this harder to recover.
define signext i32 @test10(i32* %0, i64 %1) {
define signext i32 @test10(ptr %0, i64 %1) {
; RV64I-LABEL: test10:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, 30141
Expand All @@ -142,12 +142,12 @@ define signext i32 @test10(i32* %0, i64 %1) {
%3 = mul i64 %1, -4294967296
%4 = add i64 %3, 530242871224172544 ; 123456789 << 32
%5 = ashr exact i64 %4, 32
%6 = getelementptr inbounds i32, i32* %0, i64 %5
%7 = load i32, i32* %6, align 4
%6 = getelementptr inbounds i32, ptr %0, i64 %5
%7 = load i32, ptr %6, align 4
ret i32 %7
}

define i64 @test11(i32* %0, i64 %1) {
define i64 @test11(ptr %0, i64 %1) {
; RV64I-LABEL: test11:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a0, 524288
Expand All @@ -171,7 +171,7 @@ define i32 @test12(i32 signext %0) {
ret i32 %3
}

define i8 @test13(i8* %0, i64 %1) {
define i8 @test13(ptr %0, i64 %1) {
; RV64I-LABEL: test13:
; RV64I: # %bb.0:
; RV64I-NEXT: li a2, 1
Expand All @@ -187,17 +187,17 @@ define i8 @test13(i8* %0, i64 %1) {
%3 = mul i64 %1, -4294967296
%4 = add i64 %3, 4294967296 ; 1 << 32
%5 = ashr exact i64 %4, 32
%6 = getelementptr inbounds i8, i8* %0, i64 %5
%7 = load i8, i8* %6, align 4
%6 = getelementptr inbounds i8, ptr %0, i64 %5
%7 = load i8, ptr %6, align 4
%8 = add i64 %3, 8589934592 ; 2 << 32
%9 = ashr exact i64 %8, 32
%10 = getelementptr inbounds i8, i8* %0, i64 %9
%11 = load i8, i8* %10, align 4
%10 = getelementptr inbounds i8, ptr %0, i64 %9
%11 = load i8, ptr %10, align 4
%12 = add i8 %7, %11
ret i8 %12
}

define signext i32 @test14(i8* %0, i32* %1, i64 %2) {
define signext i32 @test14(ptr %0, ptr %1, i64 %2) {
; RV64I-LABEL: test14:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, 1
Expand All @@ -212,11 +212,11 @@ define signext i32 @test14(i8* %0, i32* %1, i64 %2) {
%4 = mul i64 %2, -4294967296
%5 = add i64 %4, 4294967296 ; 1 << 32
%6 = ashr exact i64 %5, 32
%7 = getelementptr inbounds i8, i8* %0, i64 %6
%8 = load i8, i8* %7, align 4
%7 = getelementptr inbounds i8, ptr %0, i64 %6
%8 = load i8, ptr %7, align 4
%9 = zext i8 %8 to i32
%10 = getelementptr inbounds i32, i32* %1, i64 %6
%11 = load i32, i32* %10, align 4
%10 = getelementptr inbounds i32, ptr %1, i64 %6
%11 = load i32, ptr %10, align 4
%12 = add i32 %9, %11
ret i32 %12
}
174 changes: 87 additions & 87 deletions llvm/test/CodeGen/RISCV/rv64zba.ll

Large diffs are not rendered by default.

16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind {
}

; Similar to rol_i32, but doesn't sign extend the result.
define void @rol_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
define void @rol_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind {
; RV64I-LABEL: rol_i32_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: sllw a3, a0, a1
Expand All @@ -138,7 +138,7 @@ define void @rol_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
; RV64ZBB-ZBKB-NEXT: sw a0, 0(a2)
; RV64ZBB-ZBKB-NEXT: ret
%1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
store i32 %1, i32* %x
store i32 %1, ptr %x
ret void
}

Expand Down Expand Up @@ -200,7 +200,7 @@ define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind {
}

; Similar to ror_i32, but doesn't sign extend the result.
define void @ror_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
define void @ror_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind {
; RV64I-LABEL: ror_i32_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: srlw a3, a0, a1
Expand All @@ -216,7 +216,7 @@ define void @ror_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
; RV64ZBB-ZBKB-NEXT: sw a0, 0(a2)
; RV64ZBB-ZBKB-NEXT: ret
%1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
store i32 %1, i32* %x
store i32 %1, ptr %x
ret void
}

Expand Down Expand Up @@ -275,7 +275,7 @@ define signext i32 @rori_i32_fshl(i32 signext %a) nounwind {
}

; Similar to rori_i32_fshl, but doesn't sign extend the result.
define void @rori_i32_fshl_nosext(i32 signext %a, i32* %x) nounwind {
define void @rori_i32_fshl_nosext(i32 signext %a, ptr %x) nounwind {
; RV64I-LABEL: rori_i32_fshl_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a2, a0, 1
Expand All @@ -290,7 +290,7 @@ define void @rori_i32_fshl_nosext(i32 signext %a, i32* %x) nounwind {
; RV64ZBB-ZBKB-NEXT: sw a0, 0(a1)
; RV64ZBB-ZBKB-NEXT: ret
%1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
store i32 %1, i32* %x
store i32 %1, ptr %x
ret void
}

Expand All @@ -311,7 +311,7 @@ define signext i32 @rori_i32_fshr(i32 signext %a) nounwind {
}

; Similar to rori_i32_fshr, but doesn't sign extend the result.
define void @rori_i32_fshr_nosext(i32 signext %a, i32* %x) nounwind {
define void @rori_i32_fshr_nosext(i32 signext %a, ptr %x) nounwind {
; RV64I-LABEL: rori_i32_fshr_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: slliw a2, a0, 1
Expand All @@ -326,7 +326,7 @@ define void @rori_i32_fshr_nosext(i32 signext %a, i32* %x) nounwind {
; RV64ZBB-ZBKB-NEXT: sw a0, 0(a1)
; RV64ZBB-ZBKB-NEXT: ret
%1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
store i32 %1, i32* %x
store i32 %1, ptr %x
ret void
}

Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rv64zbb.ll
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
ret i32 %1
}

define signext i32 @ctpop_i32_load(i32* %p) nounwind {
define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-LABEL: ctpop_i32_load:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
Expand Down Expand Up @@ -605,7 +605,7 @@ define signext i32 @ctpop_i32_load(i32* %p) nounwind {
; RV64ZBB-NEXT: lwu a0, 0(a0)
; RV64ZBB-NEXT: cpopw a0, a0
; RV64ZBB-NEXT: ret
%a = load i32, i32* %p
%a = load i32, ptr %p
%1 = call i32 @llvm.ctpop.i32(i32 %a)
ret i32 %1
}
Expand Down Expand Up @@ -971,7 +971,7 @@ define signext i32 @bswap_i32(i32 signext %a) nounwind {
}

; Similar to bswap_i32 but the result is not sign extended.
define void @bswap_i32_nosext(i32 signext %a, i32* %x) nounwind {
define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind {
; RV64I-LABEL: bswap_i32_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a2, a0, 8
Expand All @@ -995,7 +995,7 @@ define void @bswap_i32_nosext(i32 signext %a, i32* %x) nounwind {
; RV64ZBB-NEXT: sw a0, 0(a1)
; RV64ZBB-NEXT: ret
%1 = tail call i32 @llvm.bswap.i32(i32 %a)
store i32 %1, i32* %x
store i32 %1, ptr %x
ret void
}

Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/RISCV/rv64zbs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ define signext i32 @bclr_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
ret i32 %and1
}

define signext i32 @bclr_i32_load(i32* %p, i32 signext %b) nounwind {
define signext i32 @bclr_i32_load(ptr %p, i32 signext %b) nounwind {
; RV64I-LABEL: bclr_i32_load:
; RV64I: # %bb.0:
; RV64I-NEXT: lw a0, 0(a0)
Expand All @@ -62,7 +62,7 @@ define signext i32 @bclr_i32_load(i32* %p, i32 signext %b) nounwind {
; RV64ZBS-NEXT: bclr a0, a0, a1
; RV64ZBS-NEXT: sext.w a0, a0
; RV64ZBS-NEXT: ret
%a = load i32, i32* %p
%a = load i32, ptr %p
%shl = shl i32 1, %b
%neg = xor i32 %shl, -1
%and1 = and i32 %neg, %a
Expand Down Expand Up @@ -146,7 +146,7 @@ define signext i32 @bset_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
ret i32 %or
}

define signext i32 @bset_i32_load(i32* %p, i32 signext %b) nounwind {
define signext i32 @bset_i32_load(ptr %p, i32 signext %b) nounwind {
; RV64I-LABEL: bset_i32_load:
; RV64I: # %bb.0:
; RV64I-NEXT: lw a0, 0(a0)
Expand All @@ -161,7 +161,7 @@ define signext i32 @bset_i32_load(i32* %p, i32 signext %b) nounwind {
; RV64ZBS-NEXT: bset a0, a0, a1
; RV64ZBS-NEXT: sext.w a0, a0
; RV64ZBS-NEXT: ret
%a = load i32, i32* %p
%a = load i32, ptr %p
%shl = shl i32 1, %b
%or = or i32 %shl, %a
ret i32 %or
Expand Down Expand Up @@ -273,7 +273,7 @@ define signext i32 @binv_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
ret i32 %xor
}

define signext i32 @binv_i32_load(i32* %p, i32 signext %b) nounwind {
define signext i32 @binv_i32_load(ptr %p, i32 signext %b) nounwind {
; RV64I-LABEL: binv_i32_load:
; RV64I: # %bb.0:
; RV64I-NEXT: lw a0, 0(a0)
Expand All @@ -288,7 +288,7 @@ define signext i32 @binv_i32_load(i32* %p, i32 signext %b) nounwind {
; RV64ZBS-NEXT: binv a0, a0, a1
; RV64ZBS-NEXT: sext.w a0, a0
; RV64ZBS-NEXT: ret
%a = load i32, i32* %p
%a = load i32, ptr %p
%shl = shl i32 1, %b
%xor = xor i32 %shl, %a
ret i32 %xor
Expand Down
16 changes: 7 additions & 9 deletions llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+m,+v -O2 < %s \
; RUN: | FileCheck %s -check-prefix=RV64IV

define <vscale x 1 x i64> @access_fixed_object(i64 *%val) {
define <vscale x 1 x i64> @access_fixed_object(ptr %val) {
; RV64IV-LABEL: access_fixed_object:
; RV64IV: # %bb.0:
; RV64IV-NEXT: addi sp, sp, -528
Expand All @@ -15,10 +15,9 @@ define <vscale x 1 x i64> @access_fixed_object(i64 *%val) {
; RV64IV-NEXT: ret
%local = alloca i64
%array = alloca [64 x i64]
%vptr = bitcast [64 x i64]* %array to <vscale x 1 x i64>*
%v = load <vscale x 1 x i64>, <vscale x 1 x i64>* %vptr
%len = load i64, i64* %local
store i64 %len, i64* %val
%v = load <vscale x 1 x i64>, <vscale x 1 x i64>* %array
%len = load i64, ptr %local
store i64 %len, ptr %val
ret <vscale x 1 x i64> %v
}

Expand All @@ -28,7 +27,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
i64);

define <vscale x 1 x i64> @access_fixed_and_vector_objects(i64 *%val) {
define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
; RV64IV-LABEL: access_fixed_and_vector_objects:
; RV64IV: # %bb.0:
; RV64IV-NEXT: addi sp, sp, -528
Expand All @@ -52,10 +51,9 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(i64 *%val) {
%local = alloca i64
%vector = alloca <vscale x 1 x i64>
%array = alloca [64 x i64]
%vptr = bitcast [64 x i64]* %array to <vscale x 1 x i64>*
%v1 = load <vscale x 1 x i64>, <vscale x 1 x i64>* %vptr
%v1 = load <vscale x 1 x i64>, <vscale x 1 x i64>* %array
%v2 = load <vscale x 1 x i64>, <vscale x 1 x i64>* %vector
%len = load i64, i64* %local
%len = load i64, ptr %local

%a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
<vscale x 1 x i64> undef,
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ define void @gpr_and_lmul1_and_2() nounwind {
%x1 = alloca i64
%v1 = alloca <vscale x 1 x i64>
%v2 = alloca <vscale x 2 x i64>
store volatile i64 3, i64* %x1
store volatile i64 3, ptr %x1
ret void
}

Expand All @@ -293,7 +293,7 @@ define void @gpr_and_lmul1_and_4() nounwind {
%x1 = alloca i64
%v1 = alloca <vscale x 1 x i64>
%v2 = alloca <vscale x 4 x i64>
store volatile i64 3, i64* %x1
store volatile i64 3, ptr %x1
ret void
}

Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@
; RUN: llc -mtriple=riscv32 -mattr=+d,+v -verify-machineinstrs -riscv-v-vector-bits-min=128 < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+v -verify-machineinstrs -riscv-v-vector-bits-min=128 < %s | FileCheck %s

define void @combine_fp_zero_stores_crash(float* %ptr) {
define void @combine_fp_zero_stores_crash(ptr %ptr) {
; CHECK-LABEL: combine_fp_zero_stores_crash:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%addr1 = getelementptr float, float * %ptr, i64 1
%addr2 = getelementptr float, float * %ptr, i64 2
store float 0.000000e+00, float * %addr1, align 4
store float 0.000000e+00, float * %addr2, align 4
%addr1 = getelementptr float, ptr %ptr, i64 1
%addr2 = getelementptr float, ptr %ptr, i64 2
store float 0.000000e+00, ptr %addr1, align 4
store float 0.000000e+00, ptr %addr2, align 4
ret void
}
15 changes: 7 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
; visited while it wasn't "dead". At the point of visiting the constant, we
; crashed.

define void @constant_folding_crash(i8* %v54, <4 x <4 x i32>*> %lanes.a, <4 x <4 x i32>*> %lanes.b, <4 x i1> %sel) {
define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lanes.b, <4 x i1> %sel) {
; RV32-LABEL: constant_folding_crash:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lw a0, 8(a0)
Expand Down Expand Up @@ -70,16 +70,15 @@ define void @constant_folding_crash(i8* %v54, <4 x <4 x i32>*> %lanes.a, <4 x <4
; RV64-NEXT: vse32.v v8, (a0), v0.t
; RV64-NEXT: ret
entry:
%sunkaddr = getelementptr i8, i8* %v54, i64 8
%v55 = bitcast i8* %sunkaddr to i64*
%v56 = load i64, i64* %v55, align 8
%sunkaddr = getelementptr i8, ptr %v54, i64 8
%v56 = load i64, ptr %sunkaddr, align 8
%trunc = and i64 %v56, 1
%cmp = icmp eq i64 %trunc, 0
%ptrs = select i1 %cmp, <4 x <4 x i32>*> %lanes.a, <4 x <4 x i32>*> %lanes.b
%v67 = extractelement <4 x <4 x i32>*> %ptrs, i64 0
%ptrs = select i1 %cmp, <4 x ptr> %lanes.a, <4 x ptr> %lanes.b
%v67 = extractelement <4 x ptr> %ptrs, i64 0
%mask = shufflevector <4 x i1> %sel, <4 x i1> undef, <4 x i32> zeroinitializer
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> <i32 10, i32 10, i32 10, i32 10>, <4 x i32>* %v67, i32 16, <4 x i1> %mask)
call void @llvm.masked.store.v4i32.p0(<4 x i32> <i32 10, i32 10, i32 10, i32 10>, ptr %v67, i32 16, <4 x i1> %mask)
ret void
}

declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
133 changes: 63 additions & 70 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2,128 +2,121 @@
; RUN: llc -mtriple riscv64 -mattr=+zve64x -riscv-v-vector-bits-min=128 < %s \
; RUN: | FileCheck %s

define <8 x i8> @load_factor2(<16 x i8>* %ptr) {
define <8 x i8> @load_factor2(ptr %ptr) {
; CHECK-LABEL: load_factor2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
%1 = bitcast <16 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0i8.i64(i8* %1, i64 8)
%3 = extractvalue { <8 x i8>, <8 x i8> } %2, 0
%4 = extractvalue { <8 x i8>, <8 x i8> } %2, 1
ret <8 x i8> %4
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
ret <8 x i8> %3
}

define <8 x i8> @load_factor3(<24 x i8>* %ptr) {
define <8 x i8> @load_factor3(ptr %ptr) {
; CHECK-LABEL: load_factor3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg3e8.v v6, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <24 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0i8.i64(i8* %1, i64 8)
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %2, 0
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %2, 1
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %2, 2
ret <8 x i8> %5
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
ret <8 x i8> %4
}

define <8 x i8> @load_factor4(<32 x i8>* %ptr) {
define <8 x i8> @load_factor4(ptr %ptr) {
; CHECK-LABEL: load_factor4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg4e8.v v5, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <32 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0i8.i64(i8* %1, i64 8)
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 0
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 1
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 2
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 3
ret <8 x i8> %6
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3
ret <8 x i8> %5
}

define <8 x i8> @load_factor5(<40 x i8>* %ptr) {
define <8 x i8> @load_factor5(ptr %ptr) {
; CHECK-LABEL: load_factor5:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg5e8.v v4, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <40 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0i8.i64(i8* %1, i64 8)
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 0
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 1
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 2
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 3
%7 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 4
ret <8 x i8> %7
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 4
ret <8 x i8> %6
}

define <8 x i8> @load_factor6(<48 x i8>* %ptr) {
define <8 x i8> @load_factor6(ptr %ptr) {
; CHECK-LABEL: load_factor6:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg6e8.v v3, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <48 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0i8.i64(i8* %1, i64 8)
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 0
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 1
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 2
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 3
%7 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 4
%8 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 5
ret <8 x i8> %8
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 4
%7 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 5
ret <8 x i8> %7
}

define <8 x i8> @load_factor7(<56 x i8>* %ptr) {
define <8 x i8> @load_factor7(ptr %ptr) {
; CHECK-LABEL: load_factor7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg7e8.v v2, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v2_v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <56 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0i8.i64(i8* %1, i64 8)
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 0
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 1
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 2
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 3
%7 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 4
%8 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 5
%9 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 6
ret <8 x i8> %9
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 4
%7 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 5
%8 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 6
ret <8 x i8> %8
}

define <8 x i8> @load_factor8(<64 x i8>* %ptr) {
define <8 x i8> @load_factor8(ptr %ptr) {
; CHECK-LABEL: load_factor8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg8e8.v v1, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v1_v2_v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <64 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0i8.i64(i8* %1, i64 8)
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 0
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 1
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 2
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 3
%7 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 4
%8 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 5
%9 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 6
%10 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %2, 7
ret <8 x i8> %10
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr %ptr, i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
%5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3
%6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 4
%7 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 5
%8 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 6
%9 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 7
ret <8 x i8> %9
}
declare { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0i8.i64(i8*, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0i8.i64(i8*, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0i8.i64(i8*, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0i8.i64(i8*, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0i8.i64(i8*, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0i8.i64(i8*, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0i8.i64(i8*, i64)
declare { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr, i64)

Large diffs are not rendered by default.

48 changes: 24 additions & 24 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1-RV32
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1-RV64

define void @abs_v16i8(<16 x i8>* %x) {
define void @abs_v16i8(ptr %x) {
; CHECK-LABEL: abs_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
Expand All @@ -13,14 +13,14 @@ define void @abs_v16i8(<16 x i8>* %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, <16 x i8>* %x
%a = load <16 x i8>, ptr %x
%b = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false)
store <16 x i8> %b, <16 x i8>* %x
store <16 x i8> %b, ptr %x
ret void
}
declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)

define void @abs_v8i16(<8 x i16>* %x) {
define void @abs_v8i16(ptr %x) {
; CHECK-LABEL: abs_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
Expand All @@ -29,14 +29,14 @@ define void @abs_v8i16(<8 x i16>* %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
%a = load <8 x i16>, ptr %x
%b = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false)
store <8 x i16> %b, <8 x i16>* %x
store <8 x i16> %b, ptr %x
ret void
}
declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)

define void @abs_v4i32(<4 x i32>* %x) {
define void @abs_v4i32(ptr %x) {
; CHECK-LABEL: abs_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
Expand All @@ -45,14 +45,14 @@ define void @abs_v4i32(<4 x i32>* %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
%a = load <4 x i32>, ptr %x
%b = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false)
store <4 x i32> %b, <4 x i32>* %x
store <4 x i32> %b, ptr %x
ret void
}
declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)

define void @abs_v2i64(<2 x i64>* %x) {
define void @abs_v2i64(ptr %x) {
; CHECK-LABEL: abs_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
Expand All @@ -61,14 +61,14 @@ define void @abs_v2i64(<2 x i64>* %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
%a = load <2 x i64>, ptr %x
%b = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false)
store <2 x i64> %b, <2 x i64>* %x
store <2 x i64> %b, ptr %x
ret void
}
declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)

define void @abs_v32i8(<32 x i8>* %x) {
define void @abs_v32i8(ptr %x) {
; LMULMAX2-LABEL: abs_v32i8:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: li a1, 32
Expand Down Expand Up @@ -106,14 +106,14 @@ define void @abs_v32i8(<32 x i8>* %x) {
; LMULMAX1-RV64-NEXT: vse8.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse8.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <32 x i8>, <32 x i8>* %x
%a = load <32 x i8>, ptr %x
%b = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false)
store <32 x i8> %b, <32 x i8>* %x
store <32 x i8> %b, ptr %x
ret void
}
declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)

define void @abs_v16i16(<16 x i16>* %x) {
define void @abs_v16i16(ptr %x) {
; LMULMAX2-LABEL: abs_v16i16:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
Expand Down Expand Up @@ -150,14 +150,14 @@ define void @abs_v16i16(<16 x i16>* %x) {
; LMULMAX1-RV64-NEXT: vse16.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse16.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <16 x i16>, <16 x i16>* %x
%a = load <16 x i16>, ptr %x
%b = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false)
store <16 x i16> %b, <16 x i16>* %x
store <16 x i16> %b, ptr %x
ret void
}
declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)

define void @abs_v8i32(<8 x i32>* %x) {
define void @abs_v8i32(ptr %x) {
; LMULMAX2-LABEL: abs_v8i32:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
Expand Down Expand Up @@ -194,14 +194,14 @@ define void @abs_v8i32(<8 x i32>* %x) {
; LMULMAX1-RV64-NEXT: vse32.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse32.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <8 x i32>, <8 x i32>* %x
%a = load <8 x i32>, ptr %x
%b = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false)
store <8 x i32> %b, <8 x i32>* %x
store <8 x i32> %b, ptr %x
ret void
}
declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)

define void @abs_v4i64(<4 x i64>* %x) {
define void @abs_v4i64(ptr %x) {
; LMULMAX2-LABEL: abs_v4i64:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
Expand Down Expand Up @@ -238,9 +238,9 @@ define void @abs_v4i64(<4 x i64>* %x) {
; LMULMAX1-RV64-NEXT: vse64.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <4 x i64>, <4 x i64>* %x
%a = load <4 x i64>, ptr %x
%b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
store <4 x i64> %b, <4 x i64>* %x
store <4 x i64> %b, ptr %x
ret void
}
declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
48 changes: 24 additions & 24 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32,LMULMAX1-RV32
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64,LMULMAX1-RV64

define void @bitreverse_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
define void @bitreverse_v8i16(ptr %x, ptr %y) {
; RV32-LABEL: bitreverse_v8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
Expand Down Expand Up @@ -66,15 +66,15 @@ define void @bitreverse_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; RV64-NEXT: vor.vv v8, v9, v8
; RV64-NEXT: vse16.v v8, (a0)
; RV64-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
%b = load <8 x i16>, <8 x i16>* %y
%a = load <8 x i16>, ptr %x
%b = load <8 x i16>, ptr %y
%c = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a)
store <8 x i16> %c, <8 x i16>* %x
store <8 x i16> %c, ptr %x
ret void
}
declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)

define void @bitreverse_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
define void @bitreverse_v4i32(ptr %x, ptr %y) {
; RV32-LABEL: bitreverse_v4i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
Expand Down Expand Up @@ -152,15 +152,15 @@ define void @bitreverse_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; RV64-NEXT: vor.vv v8, v9, v8
; RV64-NEXT: vse32.v v8, (a0)
; RV64-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
%b = load <4 x i32>, <4 x i32>* %y
%a = load <4 x i32>, ptr %x
%b = load <4 x i32>, ptr %y
%c = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a)
store <4 x i32> %c, <4 x i32>* %x
store <4 x i32> %c, ptr %x
ret void
}
declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>)

define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
define void @bitreverse_v2i64(ptr %x, ptr %y) {
; RV32-LABEL: bitreverse_v2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
Expand Down Expand Up @@ -286,15 +286,15 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; RV64-NEXT: vor.vv v8, v9, v8
; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
%b = load <2 x i64>, <2 x i64>* %y
%a = load <2 x i64>, ptr %x
%b = load <2 x i64>, ptr %y
%c = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a)
store <2 x i64> %c, <2 x i64>* %x
store <2 x i64> %c, ptr %x
ret void
}
declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>)

define void @bitreverse_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
define void @bitreverse_v16i16(ptr %x, ptr %y) {
; LMULMAX2-RV32-LABEL: bitreverse_v16i16:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
Expand Down Expand Up @@ -460,15 +460,15 @@ define void @bitreverse_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX1-RV64-NEXT: vse16.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse16.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <16 x i16>, <16 x i16>* %x
%b = load <16 x i16>, <16 x i16>* %y
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a)
store <16 x i16> %c, <16 x i16>* %x
store <16 x i16> %c, ptr %x
ret void
}
declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>)

define void @bitreverse_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
define void @bitreverse_v8i32(ptr %x, ptr %y) {
; LMULMAX2-RV32-LABEL: bitreverse_v8i32:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
Expand Down Expand Up @@ -678,15 +678,15 @@ define void @bitreverse_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX1-RV64-NEXT: vse32.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse32.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <8 x i32>, <8 x i32>* %x
%b = load <8 x i32>, <8 x i32>* %y
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a)
store <8 x i32> %c, <8 x i32>* %x
store <8 x i32> %c, ptr %x
ret void
}
declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>)

define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
define void @bitreverse_v4i64(ptr %x, ptr %y) {
; LMULMAX2-RV32-LABEL: bitreverse_v4i64:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
Expand Down Expand Up @@ -1016,10 +1016,10 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v9, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <4 x i64>, <4 x i64>* %x
%b = load <4 x i64>, <4 x i64>* %y
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a)
store <4 x i64> %c, <4 x i64>* %x
store <4 x i64> %c, ptr %x
ret void
}
declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>)
48 changes: 24 additions & 24 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX1-RV32
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX1-RV64

define void @bswap_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
define void @bswap_v8i16(ptr %x, ptr %y) {
; CHECK-LABEL: bswap_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
Expand All @@ -14,15 +14,15 @@ define void @bswap_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-NEXT: vor.vv v8, v8, v9
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
%b = load <8 x i16>, <8 x i16>* %y
%a = load <8 x i16>, ptr %x
%b = load <8 x i16>, ptr %y
%c = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %a)
store <8 x i16> %c, <8 x i16>* %x
store <8 x i16> %c, ptr %x
ret void
}
declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)

define void @bswap_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
define void @bswap_v4i32(ptr %x, ptr %y) {
; RV32-LABEL: bswap_v4i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
Expand Down Expand Up @@ -58,15 +58,15 @@ define void @bswap_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; RV64-NEXT: vor.vv v8, v8, v9
; RV64-NEXT: vse32.v v8, (a0)
; RV64-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
%b = load <4 x i32>, <4 x i32>* %y
%a = load <4 x i32>, ptr %x
%b = load <4 x i32>, ptr %y
%c = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %a)
store <4 x i32> %c, <4 x i32>* %x
store <4 x i32> %c, ptr %x
ret void
}
declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)

define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
define void @bswap_v2i64(ptr %x, ptr %y) {
; RV32-LABEL: bswap_v2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
Expand Down Expand Up @@ -141,15 +141,15 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; RV64-NEXT: vor.vv v8, v8, v9
; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
%b = load <2 x i64>, <2 x i64>* %y
%a = load <2 x i64>, ptr %x
%b = load <2 x i64>, ptr %y
%c = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %a)
store <2 x i64> %c, <2 x i64>* %x
store <2 x i64> %c, ptr %x
ret void
}
declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)

define void @bswap_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
define void @bswap_v16i16(ptr %x, ptr %y) {
; LMULMAX2-RV32-LABEL: bswap_v16i16:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
Expand Down Expand Up @@ -201,15 +201,15 @@ define void @bswap_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX1-RV64-NEXT: vse16.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse16.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <16 x i16>, <16 x i16>* %x
%b = load <16 x i16>, <16 x i16>* %y
%a = load <16 x i16>, ptr %x
%b = load <16 x i16>, ptr %y
%c = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %a)
store <16 x i16> %c, <16 x i16>* %x
store <16 x i16> %c, ptr %x
ret void
}
declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)

define void @bswap_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
define void @bswap_v8i32(ptr %x, ptr %y) {
; LMULMAX2-RV32-LABEL: bswap_v8i32:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
Expand Down Expand Up @@ -305,15 +305,15 @@ define void @bswap_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX1-RV64-NEXT: vse32.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse32.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <8 x i32>, <8 x i32>* %x
%b = load <8 x i32>, <8 x i32>* %y
%a = load <8 x i32>, ptr %x
%b = load <8 x i32>, ptr %y
%c = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %a)
store <8 x i32> %c, <8 x i32>* %x
store <8 x i32> %c, ptr %x
ret void
}
declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)

define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
define void @bswap_v4i64(ptr %x, ptr %y) {
; LMULMAX2-RV32-LABEL: bswap_v4i64:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
Expand Down Expand Up @@ -511,10 +511,10 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX1-RV64-NEXT: vse64.v v9, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v8, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <4 x i64>, <4 x i64>* %x
%b = load <4 x i64>, <4 x i64>* %y
%a = load <4 x i64>, ptr %x
%b = load <4 x i64>, ptr %y
%c = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %a)
store <4 x i64> %c, <4 x i64>* %x
store <4 x i64> %c, ptr %x
ret void
}
declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>)
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2,37 +2,37 @@
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4

define fastcc <4 x i8> @ret_v4i8(<4 x i8>* %p) {
define fastcc <4 x i8> @ret_v4i8(ptr %p) {
; CHECK-LABEL: ret_v4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: ret
%v = load <4 x i8>, <4 x i8>* %p
%v = load <4 x i8>, ptr %p
ret <4 x i8> %v
}

define fastcc <4 x i32> @ret_v4i32(<4 x i32>* %p) {
define fastcc <4 x i32> @ret_v4i32(ptr %p) {
; CHECK-LABEL: ret_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: ret
%v = load <4 x i32>, <4 x i32>* %p
%v = load <4 x i32>, ptr %p
ret <4 x i32> %v
}

define fastcc <8 x i32> @ret_v8i32(<8 x i32>* %p) {
define fastcc <8 x i32> @ret_v8i32(ptr %p) {
; CHECK-LABEL: ret_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: ret
%v = load <8 x i32>, <8 x i32>* %p
%v = load <8 x i32>, ptr %p
ret <8 x i32> %v
}

define fastcc <16 x i64> @ret_v16i64(<16 x i64>* %p) {
define fastcc <16 x i64> @ret_v16i64(ptr %p) {
; LMULMAX8-LABEL: ret_v16i64:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma
Expand All @@ -46,33 +46,33 @@ define fastcc <16 x i64> @ret_v16i64(<16 x i64>* %p) {
; LMULMAX4-NEXT: addi a0, a0, 64
; LMULMAX4-NEXT: vle64.v v12, (a0)
; LMULMAX4-NEXT: ret
%v = load <16 x i64>, <16 x i64>* %p
%v = load <16 x i64>, ptr %p
ret <16 x i64> %v
}

define fastcc <8 x i1> @ret_mask_v8i1(<8 x i1>* %p) {
define fastcc <8 x i1> @ret_mask_v8i1(ptr %p) {
; CHECK-LABEL: ret_mask_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: ret
%v = load <8 x i1>, <8 x i1>* %p
%v = load <8 x i1>, ptr %p
ret <8 x i1> %v
}

define fastcc <32 x i1> @ret_mask_v32i1(<32 x i1>* %p) {
define fastcc <32 x i1> @ret_mask_v32i1(ptr %p) {
; CHECK-LABEL: ret_mask_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: ret
%v = load <32 x i1>, <32 x i1>* %p
%v = load <32 x i1>, ptr %p
ret <32 x i1> %v
}

; Return the vector via registers v8-v23
define fastcc <64 x i32> @ret_split_v64i32(<64 x i32>* %x) {
define fastcc <64 x i32> @ret_split_v64i32(ptr %x) {
; LMULMAX8-LABEL: ret_split_v64i32:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a1, 32
Expand All @@ -93,12 +93,12 @@ define fastcc <64 x i32> @ret_split_v64i32(<64 x i32>* %x) {
; LMULMAX4-NEXT: addi a0, a0, 192
; LMULMAX4-NEXT: vle32.v v20, (a0)
; LMULMAX4-NEXT: ret
%v = load <64 x i32>, <64 x i32>* %x
%v = load <64 x i32>, ptr %x
ret <64 x i32> %v
}

; Return the vector fully via the stack
define fastcc <128 x i32> @ret_split_v128i32(<128 x i32>* %x) {
define fastcc <128 x i32> @ret_split_v128i32(ptr %x) {
; LMULMAX8-LABEL: ret_split_v128i32:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a2, a1, 128
Expand Down Expand Up @@ -153,7 +153,7 @@ define fastcc <128 x i32> @ret_split_v128i32(<128 x i32>* %x) {
; LMULMAX4-NEXT: addi a0, a0, 64
; LMULMAX4-NEXT: vse32.v v8, (a0)
; LMULMAX4-NEXT: ret
%v = load <128 x i32>, <128 x i32>* %x
%v = load <128 x i32>, ptr %x
ret <128 x i32> %v
}

Expand Down
Loading