diff --git a/llvm/test/CodeGen/AArch64/128bit_load_store.ll b/llvm/test/CodeGen/AArch64/128bit_load_store.ll index 38d30dba4b8ce2..ee092bc4cb7d26 100644 --- a/llvm/test/CodeGen/AArch64/128bit_load_store.ll +++ b/llvm/test/CodeGen/AArch64/128bit_load_store.ll @@ -1,53 +1,49 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=neon | FileCheck %s -define void @test_store_f128(fp128* %ptr, fp128 %val) #0 { +define void @test_store_f128(ptr %ptr, fp128 %val) #0 { ; CHECK-LABEL: test_store_f128 ; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}] entry: - store fp128 %val, fp128* %ptr, align 16 + store fp128 %val, ptr %ptr, align 16 ret void } -define fp128 @test_load_f128(fp128* readonly %ptr) #2 { +define fp128 @test_load_f128(ptr readonly %ptr) #2 { ; CHECK-LABEL: test_load_f128 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}] entry: - %0 = load fp128, fp128* %ptr, align 16 + %0 = load fp128, ptr %ptr, align 16 ret fp128 %0 } -define void @test_vstrq_p128(i128* %ptr, i128 %val) #0 { +define void @test_vstrq_p128(ptr %ptr, i128 %val) #0 { ; CHECK-LABEL: test_vstrq_p128 ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [{{x[0-9]+}}] entry: - %0 = bitcast i128* %ptr to fp128* - %1 = bitcast i128 %val to fp128 - store fp128 %1, fp128* %0, align 16 + %0 = bitcast i128 %val to fp128 + store fp128 %0, ptr %ptr, align 16 ret void } -define i128 @test_vldrq_p128(i128* readonly %ptr) #2 { +define i128 @test_vldrq_p128(ptr readonly %ptr) #2 { ; CHECK-LABEL: test_vldrq_p128 ; CHECK: ldp {{x[0-9]+}}, {{x[0-9]+}}, [{{x[0-9]+}}] entry: - %0 = bitcast i128* %ptr to fp128* - %1 = load fp128, fp128* %0, align 16 - %2 = bitcast fp128 %1 to i128 - ret i128 %2 + %0 = load fp128, ptr %ptr, align 16 + %1 = bitcast fp128 %0 to i128 + ret i128 %1 } -define void @test_ld_st_p128(i128* nocapture %ptr) #0 { +define void @test_ld_st_p128(ptr nocapture %ptr) #0 { ; CHECK-LABEL: test_ld_st_p128 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}] ; CHECK-NEXT: str {{q[0-9]+}}, [{{x[0-9]+}}, #16] entry: - %0 = bitcast i128* %ptr to fp128* - %1 = load fp128, fp128* %0, align 16 - %add.ptr = getelementptr inbounds i128, i128* %ptr, i64 1 - %2 = bitcast i128* %add.ptr to fp128* - store fp128 %1, fp128* %2, align 16 + %0 = load fp128, ptr %ptr, align 16 + %add.ptr = getelementptr inbounds i128, ptr %ptr, i64 1 + store fp128 %0, ptr %add.ptr, align 16 ret void } diff --git a/llvm/test/CodeGen/AArch64/2s-complement-asm.ll b/llvm/test/CodeGen/AArch64/2s-complement-asm.ll index cf646d13602042..b58515c497c327 100644 --- a/llvm/test/CodeGen/AArch64/2s-complement-asm.ll +++ b/llvm/test/CodeGen/AArch64/2s-complement-asm.ll @@ -4,6 +4,6 @@ ; CHECK: 0000002a 59ed145d @other = global i32 42 @var = global i32 sub(i32 646102975, - i32 add (i32 trunc(i64 sub(i64 ptrtoint(i32* @var to i64), - i64 ptrtoint(i32* @other to i64)) to i32), + i32 add (i32 trunc(i64 sub(i64 ptrtoint(ptr @var to i64), + i64 ptrtoint(ptr @other to i64)) to i32), i32 3432360802)) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll b/llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll index abc2cae35a9e2b..37c0ea45e8c406 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll @@ -7,7 +7,7 @@ declare void @may_throw() ; This test checks that the widened G_CONSTANT operand to the phi in "continue" bb ; is placed before the potentially throwing call in the entry block. -define i1 @test_lpad_phi_widen_into_pred() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define i1 @test_lpad_phi_widen_into_pred() personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: name: test_lpad_phi_widen_into_pred ; CHECK: bb.1 (%ir-block.0): ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) @@ -42,15 +42,15 @@ define i1 @test_lpad_phi_widen_into_pred() personality i8* bitcast (i32 (...)* @ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C4]] ; CHECK-NEXT: $w0 = COPY [[AND]](s32) ; CHECK-NEXT: RET_ReallyLR implicit $w0 - store i32 42, i32* @global_var + store i32 42, ptr @global_var invoke void @may_throw() to label %continue unwind label %lpad lpad: ; preds = %entry %p = phi i32 [ 11, %0 ] - %1 = landingpad { i8*, i32 } - catch i8* null - store i32 %p, i32* @global_var + %1 = landingpad { ptr, i32 } + catch ptr null + store i32 %p, ptr @global_var br label %continue continue: ; preds = %entry, %lpad @@ -59,7 +59,7 @@ continue: ; preds = %entry, %lpad } ; Same test but with extensions. -define i1 @test_lpad_phi_widen_into_pred_ext(i1 *%ptr) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define i1 @test_lpad_phi_widen_into_pred_ext(ptr %ptr) personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: name: test_lpad_phi_widen_into_pred_ext ; CHECK: bb.1 (%ir-block.0): ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) @@ -98,16 +98,16 @@ define i1 @test_lpad_phi_widen_into_pred_ext(i1 *%ptr) personality i8* bitcast ( ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]] ; CHECK-NEXT: $w0 = COPY [[AND]](s32) ; CHECK-NEXT: RET_ReallyLR implicit $w0 - store i32 42, i32* @global_var - %v = load i1, i1* %ptr + store i32 42, ptr @global_var + %v = load i1, ptr %ptr invoke void @may_throw() to label %continue unwind label %lpad lpad: ; preds = %entry %p = phi i32 [ 11, %0 ] - %1 = landingpad { i8*, i32 } - catch i8* null - store i32 %p, i32* @global_var + %1 = landingpad { ptr, i32 } + catch ptr null + store i32 %p, ptr @global_var br label %continue continue: ; preds = %entry, %lpad diff --git a/llvm/test/CodeGen/AArch64/PBQP-chain.ll b/llvm/test/CodeGen/AArch64/PBQP-chain.ll index 3e5fa741c243ab..10b299f6afa799 100644 --- a/llvm/test/CodeGen/AArch64/PBQP-chain.ll +++ b/llvm/test/CodeGen/AArch64/PBQP-chain.ll @@ -20,85 +20,85 @@ target triple = "aarch64" ; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}} ; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}} ; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}} -define void @fir(double* nocapture %rx, double* nocapture %ry, double* nocapture %c, double* nocapture %x, double* nocapture %y) { +define void @fir(ptr nocapture %rx, ptr nocapture %ry, ptr nocapture %c, ptr nocapture %x, ptr nocapture %y) { entry: - %0 = load double, double* %c, align 8 - %1 = load double, double* %x, align 8 + %0 = load double, ptr %c, align 8 + %1 = load double, ptr %x, align 8 %mul = fmul fast double %1, %0 - %2 = load double, double* %y, align 8 + %2 = load double, ptr %y, align 8 %mul7 = fmul fast double %2, %0 - %arrayidx.1 = getelementptr inbounds double, double* %c, i64 1 - %3 = load double, double* %arrayidx.1, align 8 - %arrayidx2.1 = getelementptr inbounds double, double* %x, i64 1 - %4 = load double, double* %arrayidx2.1, align 8 + %arrayidx.1 = getelementptr inbounds double, ptr %c, i64 1 + %3 = load double, ptr %arrayidx.1, align 8 + %arrayidx2.1 = getelementptr inbounds double, ptr %x, i64 1 + %4 = load double, ptr %arrayidx2.1, align 8 %mul.1 = fmul fast double %4, %3 %add.1 = fadd fast double %mul.1, %mul - %arrayidx6.1 = getelementptr inbounds double, double* %y, i64 1 - %5 = load double, double* %arrayidx6.1, align 8 + %arrayidx6.1 = getelementptr inbounds double, ptr %y, i64 1 + %5 = load double, ptr %arrayidx6.1, align 8 %mul7.1 = fmul fast double %5, %3 %add8.1 = fadd fast double %mul7.1, %mul7 - %arrayidx.2 = getelementptr inbounds double, double* %c, i64 2 - %6 = load double, double* %arrayidx.2, align 8 - %arrayidx2.2 = getelementptr inbounds double, double* %x, i64 2 - %7 = load double, double* %arrayidx2.2, align 8 + %arrayidx.2 = getelementptr inbounds double, ptr %c, i64 2 + %6 = load double, ptr %arrayidx.2, align 8 + %arrayidx2.2 = getelementptr inbounds double, ptr %x, i64 2 + %7 = load double, ptr %arrayidx2.2, align 8 %mul.2 = fmul fast double %7, %6 %add.2 = fadd fast double %mul.2, %add.1 - %arrayidx6.2 = getelementptr inbounds double, double* %y, i64 2 - %8 = load double, double* %arrayidx6.2, align 8 + %arrayidx6.2 = getelementptr inbounds double, ptr %y, i64 2 + %8 = load double, ptr %arrayidx6.2, align 8 %mul7.2 = fmul fast double %8, %6 %add8.2 = fadd fast double %mul7.2, %add8.1 - %arrayidx.3 = getelementptr inbounds double, double* %c, i64 3 - %9 = load double, double* %arrayidx.3, align 8 - %arrayidx2.3 = getelementptr inbounds double, double* %x, i64 3 - %10 = load double, double* %arrayidx2.3, align 8 + %arrayidx.3 = getelementptr inbounds double, ptr %c, i64 3 + %9 = load double, ptr %arrayidx.3, align 8 + %arrayidx2.3 = getelementptr inbounds double, ptr %x, i64 3 + %10 = load double, ptr %arrayidx2.3, align 8 %mul.3 = fmul fast double %10, %9 %add.3 = fadd fast double %mul.3, %add.2 - %arrayidx6.3 = getelementptr inbounds double, double* %y, i64 3 - %11 = load double, double* %arrayidx6.3, align 8 + %arrayidx6.3 = getelementptr inbounds double, ptr %y, i64 3 + %11 = load double, ptr %arrayidx6.3, align 8 %mul7.3 = fmul fast double %11, %9 %add8.3 = fadd fast double %mul7.3, %add8.2 - %arrayidx.4 = getelementptr inbounds double, double* %c, i64 4 - %12 = load double, double* %arrayidx.4, align 8 - %arrayidx2.4 = getelementptr inbounds double, double* %x, i64 4 - %13 = load double, double* %arrayidx2.4, align 8 + %arrayidx.4 = getelementptr inbounds double, ptr %c, i64 4 + %12 = load double, ptr %arrayidx.4, align 8 + %arrayidx2.4 = getelementptr inbounds double, ptr %x, i64 4 + %13 = load double, ptr %arrayidx2.4, align 8 %mul.4 = fmul fast double %13, %12 %add.4 = fadd fast double %mul.4, %add.3 - %arrayidx6.4 = getelementptr inbounds double, double* %y, i64 4 - %14 = load double, double* %arrayidx6.4, align 8 + %arrayidx6.4 = getelementptr inbounds double, ptr %y, i64 4 + %14 = load double, ptr %arrayidx6.4, align 8 %mul7.4 = fmul fast double %14, %12 %add8.4 = fadd fast double %mul7.4, %add8.3 - %arrayidx.5 = getelementptr inbounds double, double* %c, i64 5 - %15 = load double, double* %arrayidx.5, align 8 - %arrayidx2.5 = getelementptr inbounds double, double* %x, i64 5 - %16 = load double, double* %arrayidx2.5, align 8 + %arrayidx.5 = getelementptr inbounds double, ptr %c, i64 5 + %15 = load double, ptr %arrayidx.5, align 8 + %arrayidx2.5 = getelementptr inbounds double, ptr %x, i64 5 + %16 = load double, ptr %arrayidx2.5, align 8 %mul.5 = fmul fast double %16, %15 %add.5 = fadd fast double %mul.5, %add.4 - %arrayidx6.5 = getelementptr inbounds double, double* %y, i64 5 - %17 = load double, double* %arrayidx6.5, align 8 + %arrayidx6.5 = getelementptr inbounds double, ptr %y, i64 5 + %17 = load double, ptr %arrayidx6.5, align 8 %mul7.5 = fmul fast double %17, %15 %add8.5 = fadd fast double %mul7.5, %add8.4 - %arrayidx.6 = getelementptr inbounds double, double* %c, i64 6 - %18 = load double, double* %arrayidx.6, align 8 - %arrayidx2.6 = getelementptr inbounds double, double* %x, i64 6 - %19 = load double, double* %arrayidx2.6, align 8 + %arrayidx.6 = getelementptr inbounds double, ptr %c, i64 6 + %18 = load double, ptr %arrayidx.6, align 8 + %arrayidx2.6 = getelementptr inbounds double, ptr %x, i64 6 + %19 = load double, ptr %arrayidx2.6, align 8 %mul.6 = fmul fast double %19, %18 %add.6 = fadd fast double %mul.6, %add.5 - %arrayidx6.6 = getelementptr inbounds double, double* %y, i64 6 - %20 = load double, double* %arrayidx6.6, align 8 + %arrayidx6.6 = getelementptr inbounds double, ptr %y, i64 6 + %20 = load double, ptr %arrayidx6.6, align 8 %mul7.6 = fmul fast double %20, %18 %add8.6 = fadd fast double %mul7.6, %add8.5 - %arrayidx.7 = getelementptr inbounds double, double* %c, i64 7 - %21 = load double, double* %arrayidx.7, align 8 - %arrayidx2.7 = getelementptr inbounds double, double* %x, i64 7 - %22 = load double, double* %arrayidx2.7, align 8 + %arrayidx.7 = getelementptr inbounds double, ptr %c, i64 7 + %21 = load double, ptr %arrayidx.7, align 8 + %arrayidx2.7 = getelementptr inbounds double, ptr %x, i64 7 + %22 = load double, ptr %arrayidx2.7, align 8 %mul.7 = fmul fast double %22, %21 %add.7 = fadd fast double %mul.7, %add.6 - %arrayidx6.7 = getelementptr inbounds double, double* %y, i64 7 - %23 = load double, double* %arrayidx6.7, align 8 + %arrayidx6.7 = getelementptr inbounds double, ptr %y, i64 7 + %23 = load double, ptr %arrayidx6.7, align 8 %mul7.7 = fmul fast double %23, %21 %add8.7 = fadd fast double %mul7.7, %add8.6 - store double %add.7, double* %rx, align 8 - store double %add8.7, double* %ry, align 8 + store double %add.7, ptr %rx, align 8 + store double %add8.7, ptr %ry, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll b/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll index bd50b2d84b7427..6c9e250af0886b 100644 --- a/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll +++ b/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll @@ -1,13 +1,13 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a57 -mattr=+neon -fp-contract=fast -regalloc=pbqp -pbqp-coalescing | FileCheck %s ; CHECK-LABEL: test: -define i32 @test(i32 %acc, i32* nocapture readonly %c) { +define i32 @test(i32 %acc, ptr nocapture readonly %c) { entry: - %0 = load i32, i32* %c, align 4 + %0 = load i32, ptr %c, align 4 ; CHECK-NOT: mov w{{[0-9]*}}, w0 %add = add nsw i32 %0, %acc - %arrayidx1 = getelementptr inbounds i32, i32* %c, i64 1 - %1 = load i32, i32* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds i32, ptr %c, i64 1 + %1 = load i32, ptr %arrayidx1, align 4 %add2 = add nsw i32 %add, %1 ret i32 %add2 } diff --git a/llvm/test/CodeGen/AArch64/Redundantstore.ll b/llvm/test/CodeGen/AArch64/Redundantstore.ll index b7822a882b4aba..6fec5573fdcb12 100644 --- a/llvm/test/CodeGen/AArch64/Redundantstore.ll +++ b/llvm/test/CodeGen/AArch64/Redundantstore.ll @@ -1,25 +1,23 @@ ; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -@end_of_array = common global i8* null, align 8 +@end_of_array = common global ptr null, align 8 ; CHECK-LABEL: @test ; CHECK: stur ; CHECK-NOT: stur -define i8* @test(i32 %size) { +define ptr @test(i32 %size) { entry: - %0 = load i8*, i8** @end_of_array, align 8 + %0 = load ptr, ptr @end_of_array, align 8 %conv = sext i32 %size to i64 %and = and i64 %conv, -8 %conv2 = trunc i64 %and to i32 %add.ptr.sum = add nsw i64 %and, -4 - %add.ptr3 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum - %size4 = bitcast i8* %add.ptr3 to i32* - store i32 %conv2, i32* %size4, align 4 + %add.ptr3 = getelementptr inbounds i8, ptr %0, i64 %add.ptr.sum + store i32 %conv2, ptr %add.ptr3, align 4 %add.ptr.sum9 = add nsw i64 %and, -4 - %add.ptr5 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum9 - %size6 = bitcast i8* %add.ptr5 to i32* - store i32 %conv2, i32* %size6, align 4 - ret i8* %0 + %add.ptr5 = getelementptr inbounds i8, ptr %0, i64 %add.ptr.sum9 + store i32 %conv2, ptr %add.ptr5, align 4 + ret ptr %0 } diff --git a/llvm/test/CodeGen/AArch64/a57-csel.ll b/llvm/test/CodeGen/AArch64/a57-csel.ll index 3c99a90fe28a03..b8df1d9eaa9359 100644 --- a/llvm/test/CodeGen/AArch64/a57-csel.ll +++ b/llvm/test/CodeGen/AArch64/a57-csel.ll @@ -2,9 +2,9 @@ ; Check that the select isn't expanded into a branch sequence ; when the icmp's first operand %x0 is from load. -define i64 @f(i64 %a, i64 %b, i64* %c, i64 %d, i64 %e) { +define i64 @f(i64 %a, i64 %b, ptr %c, i64 %d, i64 %e) { ; CHECK: csel - %x0 = load i64, i64* %c + %x0 = load i64, ptr %c %x1 = icmp eq i64 %x0, 0 %x2 = select i1 %x1, i64 %a, i64 %b %x3 = add i64 %x2, %d diff --git a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll index 0b48bb62851e8f..0c1776e61a4d41 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll @@ -8,11 +8,11 @@ entry: br label %for.body, !dbg !39 for.body: ; preds = %for.body, %entry - %arrayidx5 = getelementptr inbounds i32, i32* null, i64 1, !dbg !43 - %0 = load i32, i32* null, align 4, !dbg !45, !tbaa !46 + %arrayidx5 = getelementptr inbounds i32, ptr null, i64 1, !dbg !43 + %0 = load i32, ptr null, align 4, !dbg !45, !tbaa !46 %s1 = sub nsw i32 0, %0, !dbg !50 %n1 = sext i32 %s1 to i64, !dbg !50 - %arrayidx21 = getelementptr inbounds i32, i32* null, i64 3, !dbg !51 + %arrayidx21 = getelementptr inbounds i32, ptr null, i64 3, !dbg !51 %add53 = add nsw i64 %n1, 0, !dbg !52 %add55 = add nsw i64 %n1, 0, !dbg !53 %mul63 = mul nsw i64 %add53, -20995, !dbg !54 @@ -24,13 +24,13 @@ for.body: ; preds = %for.body, %entry %add82 = add i64 %add81, 0, !dbg !58 %shr83351 = lshr i64 %add82, 11, !dbg !58 %conv84 = trunc i64 %shr83351 to i32, !dbg !58 - store i32 %conv84, i32* %arrayidx21, align 4, !dbg !58, !tbaa !46 + store i32 %conv84, ptr %arrayidx21, align 4, !dbg !58, !tbaa !46 %add86 = add i64 0, 1024, !dbg !59 %add87 = add i64 %add86, 0, !dbg !59 %add88 = add i64 %add87, %add67, !dbg !59 %shr89352 = lshr i64 %add88, 11, !dbg !59 %n2 = trunc i64 %shr89352 to i32, !dbg !59 - store i32 %n2, i32* %arrayidx5, align 4, !dbg !59, !tbaa !46 + store i32 %n2, ptr %arrayidx5, align 4, !dbg !59, !tbaa !46 br label %for.body, !dbg !39 } diff --git a/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll b/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll index b2ee517f886813..9e27e9add6f02d 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll @@ -9,8 +9,8 @@ define void @foo() { entry: ;CHECK-LABEL: foo: ;CHECK: __floatsisf - %0 = load i32, i32* @x, align 4 + %0 = load i32, ptr @x, align 4 %conv = sitofp i32 %0 to float - store float %conv, float* bitcast (i32* @t to float*), align 4 + store float %conv, ptr @t, align 4 ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll index 043ce0933a9b86..136093bb960c0d 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll @@ -3,38 +3,36 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" -declare void @extern(i8*) +declare void @extern(ptr) ; Function Attrs: argmemonly nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #0 +declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #0 ; Function Attrs: nounwind -define void @func(float* noalias %arg, i32* noalias %arg1, i8* noalias %arg2, i8* noalias %arg3) #1 { +define void @func(ptr noalias %arg, ptr noalias %arg1, ptr noalias %arg2, ptr noalias %arg3) #1 { bb: - %tmp = getelementptr inbounds i8, i8* %arg2, i64 88 - tail call void @llvm.memset.p0i8.i64(i8* align 8 noalias %arg2, i8 0, i64 40, i1 false) - store i8 0, i8* %arg3 - store i8 2, i8* %arg2 - store float 0.000000e+00, float* %arg - %tmp4 = bitcast i8* %tmp to <4 x float>* - store volatile <4 x float> zeroinitializer, <4 x float>* %tmp4 - store i32 5, i32* %arg1 - tail call void @extern(i8* %tmp) + %tmp = getelementptr inbounds i8, ptr %arg2, i64 88 + tail call void @llvm.memset.p0.i64(ptr align 8 noalias %arg2, i8 0, i64 40, i1 false) + store i8 0, ptr %arg3 + store i8 2, ptr %arg2 + store float 0.000000e+00, ptr %arg + store volatile <4 x float> zeroinitializer, ptr %tmp + store i32 5, ptr %arg1 + tail call void @extern(ptr %tmp) ret void } ; Function Attrs: nounwind -define void @func2(float* noalias %arg, i32* noalias %arg1, i8* noalias %arg2, i8* noalias %arg3) #1 { +define void @func2(ptr noalias %arg, ptr noalias %arg1, ptr noalias %arg2, ptr noalias %arg3) #1 { bb: - %tmp = getelementptr inbounds i8, i8* %arg2, i64 88 - tail call void @llvm.memset.p0i8.i64(i8* align 8 noalias %arg2, i8 0, i64 40, i1 false) - store i8 0, i8* %arg3 - store i8 2, i8* %arg2 - store float 0.000000e+00, float* %arg - %tmp4 = bitcast i8* %tmp to <4 x float>* - store <4 x float> zeroinitializer, <4 x float>* %tmp4 - store i32 5, i32* %arg1 - tail call void @extern(i8* %tmp) + %tmp = getelementptr inbounds i8, ptr %arg2, i64 88 + tail call void @llvm.memset.p0.i64(ptr align 8 noalias %arg2, i8 0, i64 40, i1 false) + store i8 0, ptr %arg3 + store i8 2, ptr %arg2 + store float 0.000000e+00, ptr %arg + store <4 x float> zeroinitializer, ptr %tmp + store i32 5, ptr %arg1 + tail call void @extern(ptr %tmp) ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll index 4a126113d93711..e90fa3044aa223 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll @@ -33,17 +33,17 @@ target triple = "aarch64" ; CHECK: fmadd [[x]] ; CHECK: str [[x]] -define void @f1(double* nocapture readonly %p, double* nocapture %q) #0 { +define void @f1(ptr nocapture readonly %p, ptr nocapture %q) #0 { entry: - %0 = load double, double* %p, align 8 - %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double, double* %arrayidx1, align 8 - %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double, double* %arrayidx2, align 8 - %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double, double* %arrayidx3, align 8 - %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double, double* %arrayidx4, align 8 + %0 = load double, ptr %p, align 8 + %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1 + %1 = load double, ptr %arrayidx1, align 8 + %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2 + %2 = load double, ptr %arrayidx2, align 8 + %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3 + %3 = load double, ptr %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4 + %4 = load double, ptr %arrayidx4, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %4 %mul5 = fmul fast double %1, %2 @@ -52,20 +52,20 @@ entry: %sub = fsub fast double %add6, %mul7 %mul8 = fmul fast double %2, %3 %add9 = fadd fast double %mul8, %sub - store double %add9, double* %q, align 8 - %arrayidx11 = getelementptr inbounds double, double* %p, i64 5 - %5 = load double, double* %arrayidx11, align 8 - %arrayidx12 = getelementptr inbounds double, double* %p, i64 6 - %6 = load double, double* %arrayidx12, align 8 - %arrayidx13 = getelementptr inbounds double, double* %p, i64 7 - %7 = load double, double* %arrayidx13, align 8 + store double %add9, ptr %q, align 8 + %arrayidx11 = getelementptr inbounds double, ptr %p, i64 5 + %5 = load double, ptr %arrayidx11, align 8 + %arrayidx12 = getelementptr inbounds double, ptr %p, i64 6 + %6 = load double, ptr %arrayidx12, align 8 + %arrayidx13 = getelementptr inbounds double, ptr %p, i64 7 + %7 = load double, ptr %arrayidx13, align 8 %mul15 = fmul fast double %6, %7 %mul16 = fmul fast double %0, %5 %add17 = fadd fast double %mul16, %mul15 %mul18 = fmul fast double %5, %6 %add19 = fadd fast double %mul18, %add17 - %arrayidx20 = getelementptr inbounds double, double* %q, i64 1 - store double %add19, double* %arrayidx20, align 8 + %arrayidx20 = getelementptr inbounds double, ptr %q, i64 1 + store double %add19, ptr %arrayidx20, align 8 ret void } @@ -85,23 +85,23 @@ entry: ; CHECK-A53-DAG: str [[x]] ; CHECK-A53-DAG: str [[y]] -define void @f2(double* nocapture readonly %p, double* nocapture %q) #0 { +define void @f2(ptr nocapture readonly %p, ptr nocapture %q) #0 { entry: - %0 = load double, double* %p, align 8 - %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double, double* %arrayidx1, align 8 - %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double, double* %arrayidx2, align 8 - %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double, double* %arrayidx3, align 8 - %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double, double* %arrayidx4, align 8 - %arrayidx5 = getelementptr inbounds double, double* %p, i64 5 - %5 = load double, double* %arrayidx5, align 8 - %arrayidx6 = getelementptr inbounds double, double* %p, i64 6 - %6 = load double, double* %arrayidx6, align 8 - %arrayidx7 = getelementptr inbounds double, double* %p, i64 7 - %7 = load double, double* %arrayidx7, align 8 + %0 = load double, ptr %p, align 8 + %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1 + %1 = load double, ptr %arrayidx1, align 8 + %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2 + %2 = load double, ptr %arrayidx2, align 8 + %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3 + %3 = load double, ptr %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4 + %4 = load double, ptr %arrayidx4, align 8 + %arrayidx5 = getelementptr inbounds double, ptr %p, i64 5 + %5 = load double, ptr %arrayidx5, align 8 + %arrayidx6 = getelementptr inbounds double, ptr %p, i64 6 + %6 = load double, ptr %arrayidx6, align 8 + %arrayidx7 = getelementptr inbounds double, ptr %p, i64 7 + %7 = load double, ptr %arrayidx7, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %7 %mul8 = fmul fast double %5, %6 @@ -115,9 +115,9 @@ entry: %add15 = fadd fast double %mul14, %add12 %mul16 = fmul fast double %2, %3 %add17 = fadd fast double %mul16, %sub - store double %add17, double* %q, align 8 - %arrayidx19 = getelementptr inbounds double, double* %q, i64 1 - store double %add15, double* %arrayidx19, align 8 + store double %add17, ptr %q, align 8 + %arrayidx19 = getelementptr inbounds double, ptr %q, i64 1 + store double %add15, ptr %arrayidx19, align 8 ret void } @@ -131,17 +131,17 @@ entry: ; CHECK: fmadd [[y:d[0-9]+]], {{.*}}, [[x]] ; CHECK: str [[y]] -define void @f3(double* nocapture readonly %p, double* nocapture %q) #0 { +define void @f3(ptr nocapture readonly %p, ptr nocapture %q) #0 { entry: - %0 = load double, double* %p, align 8 - %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double, double* %arrayidx1, align 8 - %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double, double* %arrayidx2, align 8 - %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double, double* %arrayidx3, align 8 - %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double, double* %arrayidx4, align 8 + %0 = load double, ptr %p, align 8 + %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1 + %1 = load double, ptr %arrayidx1, align 8 + %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2 + %2 = load double, ptr %arrayidx2, align 8 + %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3 + %3 = load double, ptr %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4 + %4 = load double, ptr %arrayidx4, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %4 %mul5 = fmul fast double %1, %2 @@ -154,11 +154,11 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - tail call void bitcast (void (...)* @g to void ()*)() #2 + tail call void @g() #2 br label %if.end if.end: ; preds = %if.then, %entry - store double %add9, double* %q, align 8 + store double %add9, ptr %q, align 8 ret void } @@ -180,23 +180,23 @@ declare void @g(...) #1 ; CHECK-A53-DAG: str [[x]] ; CHECK-A53-DAG: str [[y]] -define void @f4(float* nocapture readonly %p, float* nocapture %q) #0 { +define void @f4(ptr nocapture readonly %p, ptr nocapture %q) #0 { entry: - %0 = load float, float* %p, align 4 - %arrayidx1 = getelementptr inbounds float, float* %p, i64 1 - %1 = load float, float* %arrayidx1, align 4 - %arrayidx2 = getelementptr inbounds float, float* %p, i64 2 - %2 = load float, float* %arrayidx2, align 4 - %arrayidx3 = getelementptr inbounds float, float* %p, i64 3 - %3 = load float, float* %arrayidx3, align 4 - %arrayidx4 = getelementptr inbounds float, float* %p, i64 4 - %4 = load float, float* %arrayidx4, align 4 - %arrayidx5 = getelementptr inbounds float, float* %p, i64 5 - %5 = load float, float* %arrayidx5, align 4 - %arrayidx6 = getelementptr inbounds float, float* %p, i64 6 - %6 = load float, float* %arrayidx6, align 4 - %arrayidx7 = getelementptr inbounds float, float* %p, i64 7 - %7 = load float, float* %arrayidx7, align 4 + %0 = load float, ptr %p, align 4 + %arrayidx1 = getelementptr inbounds float, ptr %p, i64 1 + %1 = load float, ptr %arrayidx1, align 4 + %arrayidx2 = getelementptr inbounds float, ptr %p, i64 2 + %2 = load float, ptr %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds float, ptr %p, i64 3 + %3 = load float, ptr %arrayidx3, align 4 + %arrayidx4 = getelementptr inbounds float, ptr %p, i64 4 + %4 = load float, ptr %arrayidx4, align 4 + %arrayidx5 = getelementptr inbounds float, ptr %p, i64 5 + %5 = load float, ptr %arrayidx5, align 4 + %arrayidx6 = getelementptr inbounds float, ptr %p, i64 6 + %6 = load float, ptr %arrayidx6, align 4 + %arrayidx7 = getelementptr inbounds float, ptr %p, i64 7 + %7 = load float, ptr %arrayidx7, align 4 %mul = fmul fast float %0, %1 %add = fadd fast float %mul, %7 %mul8 = fmul fast float %5, %6 @@ -210,9 +210,9 @@ entry: %add15 = fadd fast float %mul14, %add12 %mul16 = fmul fast float %2, %3 %add17 = fadd fast float %mul16, %sub - store float %add17, float* %q, align 4 - %arrayidx19 = getelementptr inbounds float, float* %q, i64 1 - store float %add15, float* %arrayidx19, align 4 + store float %add17, ptr %q, align 4 + %arrayidx19 = getelementptr inbounds float, ptr %q, i64 1 + store float %add15, ptr %arrayidx19, align 4 ret void } @@ -226,17 +226,17 @@ entry: ; CHECK: fmadd [[y:s[0-9]+]], {{.*}}, [[x]] ; CHECK: str [[y]] -define void @f5(float* nocapture readonly %p, float* nocapture %q) #0 { +define void @f5(ptr nocapture readonly %p, ptr nocapture %q) #0 { entry: - %0 = load float, float* %p, align 4 - %arrayidx1 = getelementptr inbounds float, float* %p, i64 1 - %1 = load float, float* %arrayidx1, align 4 - %arrayidx2 = getelementptr inbounds float, float* %p, i64 2 - %2 = load float, float* %arrayidx2, align 4 - %arrayidx3 = getelementptr inbounds float, float* %p, i64 3 - %3 = load float, float* %arrayidx3, align 4 - %arrayidx4 = getelementptr inbounds float, float* %p, i64 4 - %4 = load float, float* %arrayidx4, align 4 + %0 = load float, ptr %p, align 4 + %arrayidx1 = getelementptr inbounds float, ptr %p, i64 1 + %1 = load float, ptr %arrayidx1, align 4 + %arrayidx2 = getelementptr inbounds float, ptr %p, i64 2 + %2 = load float, ptr %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds float, ptr %p, i64 3 + %3 = load float, ptr %arrayidx3, align 4 + %arrayidx4 = getelementptr inbounds float, ptr %p, i64 4 + %4 = load float, ptr %arrayidx4, align 4 %mul = fmul fast float %0, %1 %add = fadd fast float %mul, %4 %mul5 = fmul fast float %1, %2 @@ -249,11 +249,11 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - tail call void bitcast (void (...)* @g to void ()*)() #2 + tail call void @g() #2 br label %if.end if.end: ; preds = %if.then, %entry - store float %add9, float* %q, align 4 + store float %add9, ptr %q, align 4 ret void } @@ -268,17 +268,17 @@ if.end: ; preds = %if.then, %entry ; CHECK: bl hh ; CHECK: str d0 -define void @f6(double* nocapture readonly %p, double* nocapture %q) #0 { +define void @f6(ptr nocapture readonly %p, ptr nocapture %q) #0 { entry: - %0 = load double, double* %p, align 8 - %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double, double* %arrayidx1, align 8 - %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double, double* %arrayidx2, align 8 - %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double, double* %arrayidx3, align 8 - %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double, double* %arrayidx4, align 8 + %0 = load double, ptr %p, align 8 + %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1 + %1 = load double, ptr %arrayidx1, align 8 + %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2 + %2 = load double, ptr %arrayidx2, align 8 + %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3 + %3 = load double, ptr %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4 + %4 = load double, ptr %arrayidx4, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %4 %mul5 = fmul fast double %1, %2 @@ -288,7 +288,7 @@ entry: %mul8 = fmul fast double %2, %3 %add9 = fadd fast double %mul8, %sub %call = tail call double @hh(double %add9) #2 - store double %call, double* %q, align 8 + store double %call, ptr %q, align 8 ret void } @@ -303,17 +303,17 @@ declare double @hh(double) #1 ; CHECK: fmadd [[x:d[0-9]+]] ; CHECK: fadd d1, [[x]], [[x]] -define void @f7(double* nocapture readonly %p, double* nocapture %q) #0 { +define void @f7(ptr nocapture readonly %p, ptr nocapture %q) #0 { entry: - %0 = load double, double* %p, align 8 - %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double, double* %arrayidx1, align 8 - %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double, double* %arrayidx2, align 8 - %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double, double* %arrayidx3, align 8 - %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double, double* %arrayidx4, align 8 + %0 = load double, ptr %p, align 8 + %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1 + %1 = load double, ptr %arrayidx1, align 8 + %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2 + %2 = load double, ptr %arrayidx2, align 8 + %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3 + %3 = load double, ptr %arrayidx3, align 8 + %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4 + %4 = load double, ptr %arrayidx4, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %4 %mul5 = fmul fast double %1, %2 diff --git a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll index 0c6be21f890737..5041cfbbaa8634 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll @@ -11,14 +11,14 @@ invoke.cont145: br i1 %or.cond, label %if.then274, label %invoke.cont145 if.then274: - %0 = load i32, i32* null, align 4 + %0 = load i32, ptr null, align 4 br i1 undef, label %invoke.cont291, label %if.else313 invoke.cont291: %idxprom.i.i.i605 = sext i32 %0 to i64 - %arrayidx.i.i.i607 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i605 + %arrayidx.i.i.i607 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i605 %idxprom.i.i.i596 = sext i32 %0 to i64 - %arrayidx.i.i.i598 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i596 + %arrayidx.i.i.i598 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i596 br label %if.end356 if.else313: @@ -30,7 +30,7 @@ invoke.cont317: invoke.cont326: %idxprom.i.i.i587 = sext i32 %0 to i64 - %arrayidx.i.i.i589 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i587 + %arrayidx.i.i.i589 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i587 %sub329 = fsub fast double undef, undef br label %invoke.cont334 @@ -40,12 +40,12 @@ invoke.cont334: invoke.cont342: %idxprom.i.i.i578 = sext i32 %0 to i64 - %arrayidx.i.i.i580 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i578 + %arrayidx.i.i.i580 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i578 br label %if.end356 invoke.cont353: %idxprom.i.i.i572 = sext i32 %0 to i64 - %arrayidx.i.i.i574 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i572 + %arrayidx.i.i.i574 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i572 br label %if.end356 if.end356: diff --git a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll index 07e0ba654d21c0..d8280dadc550ea 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll @@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 target triple = "arm64-apple-macosx10.9" ; Check that sexts get promoted above adds. -define void @foo(i32* nocapture %a, i32 %i) { +define void @foo(ptr nocapture %a, i32 %i) { entry: ; CHECK-LABEL: _foo: ; CHECK: add @@ -14,15 +14,15 @@ entry: ; CHECK-NEXT: ret %add = add nsw i32 %i, 1 %idxprom = sext i32 %add to i64 - %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom - %0 = load i32, i32* %arrayidx, align 4 + %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom + %0 = load i32, ptr %arrayidx, align 4 %add1 = add nsw i32 %i, 2 %idxprom2 = sext i32 %add1 to i64 - %arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %idxprom2 - %1 = load i32, i32* %arrayidx3, align 4 + %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %idxprom2 + %1 = load i32, ptr %arrayidx3, align 4 %add4 = add nsw i32 %1, %0 %idxprom5 = sext i32 %i to i64 - %arrayidx6 = getelementptr inbounds i32, i32* %a, i64 %idxprom5 - store i32 %add4, i32* %arrayidx6, align 4 + %arrayidx6 = getelementptr inbounds i32, ptr %a, i64 %idxprom5 + store i32 %add4, ptr %arrayidx6, align 4 ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll index 36b418b0cee1ed..2b71126ee175bd 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll @@ -9,57 +9,57 @@ declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>) declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) -define i8 @add_B(<16 x i8>* %arr) { +define i8 @add_B(ptr %arr) { ; CHECK-LABEL: add_B: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: addv b0, v0.16b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %bin.rdx = load <16 x i8>, <16 x i8>* %arr + %bin.rdx = load <16 x i8>, ptr %arr %r = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %bin.rdx) ret i8 %r } -define i16 @add_H(<8 x i16>* %arr) { +define i16 @add_H(ptr %arr) { ; CHECK-LABEL: add_H: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: addv h0, v0.8h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %bin.rdx = load <8 x i16>, <8 x i16>* %arr + %bin.rdx = load <8 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %bin.rdx) ret i16 %r } -define i32 @add_S( <4 x i32>* %arr) { +define i32 @add_S( ptr %arr) { ; CHECK-LABEL: add_S: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: addv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %bin.rdx = load <4 x i32>, <4 x i32>* %arr + %bin.rdx = load <4 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %bin.rdx) ret i32 %r } -define i64 @add_D(<2 x i64>* %arr) { +define i64 @add_D(ptr %arr) { ; CHECK-LABEL: add_D: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: addp d0, v0.2d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret - %bin.rdx = load <2 x i64>, <2 x i64>* %arr + %bin.rdx = load <2 x i64>, ptr %arr %r = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %bin.rdx) ret i64 %r } declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) -define i32 @oversized_ADDV_256(i8* noalias nocapture readonly %arg1, i8* noalias nocapture readonly %arg2) { +define i32 @oversized_ADDV_256(ptr noalias nocapture readonly %arg1, ptr noalias nocapture readonly %arg2) { ; CHECK-LABEL: oversized_ADDV_256: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr d0, [x0] @@ -69,23 +69,21 @@ define i32 @oversized_ADDV_256(i8* noalias nocapture readonly %arg1, i8* noalias ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: - %0 = bitcast i8* %arg1 to <8 x i8>* - %1 = load <8 x i8>, <8 x i8>* %0, align 1 - %2 = zext <8 x i8> %1 to <8 x i32> - %3 = bitcast i8* %arg2 to <8 x i8>* - %4 = load <8 x i8>, <8 x i8>* %3, align 1 - %5 = zext <8 x i8> %4 to <8 x i32> - %6 = sub nsw <8 x i32> %2, %5 - %7 = icmp slt <8 x i32> %6, zeroinitializer - %8 = sub nsw <8 x i32> zeroinitializer, %6 - %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6 - %r = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %9) + %0 = load <8 x i8>, ptr %arg1, align 1 + %1 = zext <8 x i8> %0 to <8 x i32> + %2 = load <8 x i8>, ptr %arg2, align 1 + %3 = zext <8 x i8> %2 to <8 x i32> + %4 = sub nsw <8 x i32> %1, %3 + %5 = icmp slt <8 x i32> %4, zeroinitializer + %6 = sub nsw <8 x i32> zeroinitializer, %4 + %7 = select <8 x i1> %5, <8 x i32> %6, <8 x i32> %4 + %r = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %7) ret i32 %r } declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) -define i32 @oversized_ADDV_512(<16 x i32>* %arr) { +define i32 @oversized_ADDV_512(ptr %arr) { ; CHECK-LABEL: oversized_ADDV_512: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0, #32] @@ -96,7 +94,7 @@ define i32 @oversized_ADDV_512(<16 x i32>* %arr) { ; CHECK-NEXT: addv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret - %bin.rdx = load <16 x i32>, <16 x i32>* %arr + %bin.rdx = load <16 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %bin.rdx) ret i32 %r } diff --git a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll index 6bb4ab74a85c05..dd562a4b2177b5 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll @@ -13,9 +13,9 @@ define dso_local void @movi_modimm_t1() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -29,9 +29,9 @@ define dso_local void @movi_modimm_t2() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -45,9 +45,9 @@ define dso_local void @movi_modimm_t3() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -61,9 +61,9 @@ define dso_local void @movi_modimm_t4() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -77,9 +77,9 @@ define dso_local void @movi_modimm_t5() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -93,9 +93,9 @@ define dso_local void @movi_modimm_t6() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -109,9 +109,9 @@ define dso_local void @movi_modimm_t7() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -125,9 +125,9 @@ define dso_local void @movi_modimm_t8() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -141,9 +141,9 @@ define dso_local void @movi_modimm_t9() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -157,9 +157,9 @@ define dso_local void @movi_modimm_t10() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -173,9 +173,9 @@ define dso_local void @fmov_modimm_t11() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -189,9 +189,9 @@ define dso_local void @fmov_modimm_t12() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -205,9 +205,9 @@ define dso_local void @mvni_modimm_t1() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -221,9 +221,9 @@ define dso_local void @mvni_modimm_t2() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -237,9 +237,9 @@ define dso_local void @mvni_modimm_t3() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -253,9 +253,9 @@ define dso_local void @mvni_modimm_t4() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -269,9 +269,9 @@ define dso_local void @mvni_modimm_t5() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -285,9 +285,9 @@ define dso_local void @mvni_modimm_t6() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -301,9 +301,9 @@ define dso_local void @mvni_modimm_t7() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -317,9 +317,9 @@ define dso_local void @mvni_modimm_t8() nounwind { ; CHECK-NEXT: add v0.8h, v0.8h, v1.8h ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = add <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -332,9 +332,9 @@ define dso_local void @bic_modimm_t1() nounwind { ; CHECK-NEXT: bic v0.4s, #1 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = and <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -347,9 +347,9 @@ define dso_local void @bic_modimm_t2() nounwind { ; CHECK-NEXT: bic v0.4s, #1, lsl #8 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = and <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -362,9 +362,9 @@ define dso_local void @bic_modimm_t3() nounwind { ; CHECK-NEXT: bic v0.4s, #1, lsl #16 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = and <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -377,9 +377,9 @@ define dso_local void @bic_modimm_t4() nounwind { ; CHECK-NEXT: bic v0.4s, #1, lsl #24 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = and <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -392,9 +392,9 @@ define dso_local void @bic_modimm_t5() nounwind { ; CHECK-NEXT: bic v0.8h, #1 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = and <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -407,9 +407,9 @@ define dso_local void @bic_modimm_t6() nounwind { ; CHECK-NEXT: bic v0.8h, #1, lsl #8 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = and <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -422,9 +422,9 @@ define dso_local void @orr_modimm_t1() nounwind { ; CHECK-NEXT: orr v0.4s, #1 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = or <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -437,9 +437,9 @@ define dso_local void @orr_modimm_t2() nounwind { ; CHECK-NEXT: orr v0.4s, #1, lsl #8 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = or <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -452,9 +452,9 @@ define dso_local void @orr_modimm_t3() nounwind { ; CHECK-NEXT: orr v0.4s, #1, lsl #16 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = or <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -467,9 +467,9 @@ define dso_local void @orr_modimm_t4() nounwind { ; CHECK-NEXT: orr v0.4s, #1, lsl #24 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = or <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -482,9 +482,9 @@ define dso_local void @orr_modimm_t5() nounwind { ; CHECK-NEXT: orr v0.8h, #1 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = or <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -497,9 +497,9 @@ define dso_local void @orr_modimm_t6() nounwind { ; CHECK-NEXT: orr v0.8h, #1, lsl #8 ; CHECK-NEXT: st1 { v0.8h }, [x8] ; CHECK-NEXT: ret - %in = load <8 x i16>, <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, ptr @vec_v8i16 %rv = or <8 x i16> %in, - store <8 x i16> %rv, <8 x i16>* @vec_v8i16 + store <8 x i16> %rv, ptr @vec_v8i16 ret void } @@ -1031,7 +1031,7 @@ define dso_local void @modimm_t12_call() { ret void } -define <2 x double> @test_v1f64(<1 x double> %0, <2 x double>* %1) { +define <2 x double> @test_v1f64(<1 x double> %0, ptr %1) { ; CHECK-LABEL: test_v1f64: ; CHECK: // %bb.0: ; CHECK-NEXT: mvni v1.2s, #31, msl #16 diff --git a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll index abbf523e1661c4..903da96c3b960c 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll @@ -7,69 +7,67 @@ %struct.bfloat16x4x4_t = type { [4 x <4 x bfloat>] } %struct.bfloat16x8x4_t = type { [4 x <8 x bfloat>] } -define <4 x bfloat> @test_vld1_bf16(bfloat* nocapture readonly %ptr) local_unnamed_addr nounwind { +define <4 x bfloat> @test_vld1_bf16(ptr nocapture readonly %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <4 x bfloat>* - %1 = load <4 x bfloat>, <4 x bfloat>* %0, align 2 - ret <4 x bfloat> %1 + %0 = load <4 x bfloat>, ptr %ptr, align 2 + ret <4 x bfloat> %0 } -define <8 x bfloat> @test_vld1q_bf16(bfloat* nocapture readonly %ptr) local_unnamed_addr nounwind { +define <8 x bfloat> @test_vld1q_bf16(ptr nocapture readonly %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1q_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <8 x bfloat>* - %1 = load <8 x bfloat>, <8 x bfloat>* %0, align 2 - ret <8 x bfloat> %1 + %0 = load <8 x bfloat>, ptr %ptr, align 2 + ret <8 x bfloat> %0 } -define <4 x bfloat> @test_vld1_lane_bf16(bfloat* nocapture readonly %ptr, <4 x bfloat> %src) local_unnamed_addr nounwind { +define <4 x bfloat> @test_vld1_lane_bf16(ptr nocapture readonly %ptr, <4 x bfloat> %src) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: ld1 { v0.h }[0], [x0] ; CHECK: ret entry: - %0 = load bfloat, bfloat* %ptr, align 2 + %0 = load bfloat, ptr %ptr, align 2 %vld1_lane = insertelement <4 x bfloat> %src, bfloat %0, i32 0 ret <4 x bfloat> %vld1_lane } -define <8 x bfloat> @test_vld1q_lane_bf16(bfloat* nocapture readonly %ptr, <8 x bfloat> %src) local_unnamed_addr nounwind { +define <8 x bfloat> @test_vld1q_lane_bf16(ptr nocapture readonly %ptr, <8 x bfloat> %src) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1q_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1 { v0.h }[7], [x0] ; CHECK-NEXT: ret entry: - %0 = load bfloat, bfloat* %ptr, align 2 + %0 = load bfloat, ptr %ptr, align 2 %vld1_lane = insertelement <8 x bfloat> %src, bfloat %0, i32 7 ret <8 x bfloat> %vld1_lane } -define <4 x bfloat> @test_vld1_dup_bf16(bfloat* nocapture readonly %ptr) local_unnamed_addr nounwind { +define <4 x bfloat> @test_vld1_dup_bf16(ptr nocapture readonly %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1_dup_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1r { v0.4h }, [x0] ; CHECK-NEXT: ret entry: - %0 = load bfloat, bfloat* %ptr, align 2 + %0 = load bfloat, ptr %ptr, align 2 %1 = insertelement <4 x bfloat> undef, bfloat %0, i32 0 %lane = shufflevector <4 x bfloat> %1, <4 x bfloat> undef, <4 x i32> zeroinitializer ret <4 x bfloat> %lane } -define %struct.bfloat16x4x2_t @test_vld1_bf16_x2(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x2_t @test_vld1_bf16_x2(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1_bf16_x2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1 { v0.4h, v1.4h }, [x0] ; CHECK-NEXT: ret entry: - %vld1xN = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x2.v4bf16.p0bf16(bfloat* %ptr) + %vld1xN = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x2.v4bf16.p0(ptr %ptr) %vld1xN.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld1xN, 0 %vld1xN.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld1xN, 1 %.fca.0.0.insert = insertvalue %struct.bfloat16x4x2_t undef, <4 x bfloat> %vld1xN.fca.0.extract, 0, 0 @@ -77,15 +75,15 @@ entry: ret %struct.bfloat16x4x2_t %.fca.0.1.insert } -declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x2.v4bf16.p0bf16(bfloat*) nounwind +declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x2.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x2_t @test_vld1q_bf16_x2(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x2_t @test_vld1q_bf16_x2(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1q_bf16_x2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1 { v0.8h, v1.8h }, [x0] ; CHECK-NEXT: ret entry: - %vld1xN = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x2.v8bf16.p0bf16(bfloat* %ptr) + %vld1xN = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x2.v8bf16.p0(ptr %ptr) %vld1xN.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld1xN, 0 %vld1xN.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld1xN, 1 %.fca.0.0.insert = insertvalue %struct.bfloat16x8x2_t undef, <8 x bfloat> %vld1xN.fca.0.extract, 0, 0 @@ -94,15 +92,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x2.v8bf16.p0bf16(bfloat*) nounwind +declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x2.v8bf16.p0(ptr) nounwind -define %struct.bfloat16x4x3_t @test_vld1_bf16_x3(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x3_t @test_vld1_bf16_x3(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1_bf16_x3: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1 { v0.4h, v1.4h, v2.4h }, [x0] ; CHECK-NEXT: ret entry: - %vld1xN = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x3.v4bf16.p0bf16(bfloat* %ptr) + %vld1xN = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x3.v4bf16.p0(ptr %ptr) %vld1xN.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 0 %vld1xN.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 1 %vld1xN.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 2 @@ -113,15 +111,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x3.v4bf16.p0bf16(bfloat*) nounwind +declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x3.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x3_t @test_vld1q_bf16_x3(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x3_t @test_vld1q_bf16_x3(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1q_bf16_x3: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1 { v0.8h, v1.8h, v2.8h }, [x0] ; CHECK-NEXT: ret entry: - %vld1xN = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x3.v8bf16.p0bf16(bfloat* %ptr) + %vld1xN = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x3.v8bf16.p0(ptr %ptr) %vld1xN.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 0 %vld1xN.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 1 %vld1xN.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 2 @@ -132,15 +130,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x3.v8bf16.p0bf16(bfloat*) nounwind +declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x3.v8bf16.p0(ptr) nounwind -define %struct.bfloat16x4x4_t @test_vld1_bf16_x4(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x4_t @test_vld1_bf16_x4(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1_bf16_x4: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0] ; CHECK-NEXT: ret entry: - %vld1xN = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x4.v4bf16.p0bf16(bfloat* %ptr) + %vld1xN = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x4.v4bf16.p0(ptr %ptr) %vld1xN.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 0 %vld1xN.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 1 %vld1xN.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 2 @@ -153,15 +151,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x4.v4bf16.p0bf16(bfloat*) nounwind +declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x4.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x4_t @test_vld1q_bf16_x4(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x4_t @test_vld1q_bf16_x4(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1q_bf16_x4: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0] ; CHECK-NEXT: ret entry: - %vld1xN = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x4.v8bf16.p0bf16(bfloat* %ptr) + %vld1xN = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x4.v8bf16.p0(ptr %ptr) %vld1xN.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 0 %vld1xN.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 1 %vld1xN.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 2 @@ -174,28 +172,27 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x4.v8bf16.p0bf16(bfloat*) nounwind +declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x4.v8bf16.p0(ptr) nounwind -define <8 x bfloat> @test_vld1q_dup_bf16(bfloat* nocapture readonly %ptr) local_unnamed_addr nounwind { +define <8 x bfloat> @test_vld1q_dup_bf16(ptr nocapture readonly %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld1q_dup_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld1r { v0.8h }, [x0] ; CHECK-NEXT: ret entry: - %0 = load bfloat, bfloat* %ptr, align 2 + %0 = load bfloat, ptr %ptr, align 2 %1 = insertelement <8 x bfloat> undef, bfloat %0, i32 0 %lane = shufflevector <8 x bfloat> %1, <8 x bfloat> undef, <8 x i32> zeroinitializer ret <8 x bfloat> %lane } -define %struct.bfloat16x4x2_t @test_vld2_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x2_t @test_vld2_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld2_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld2 { v0.4h, v1.4h }, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <4 x bfloat>* - %vld2 = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2.v4bf16.p0v4bf16(<4 x bfloat>* %0) + %vld2 = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2.v4bf16.p0(ptr %ptr) %vld2.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2, 0 %vld2.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2, 1 %.fca.0.0.insert = insertvalue %struct.bfloat16x4x2_t undef, <4 x bfloat> %vld2.fca.0.extract, 0, 0 @@ -204,16 +201,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2.v4bf16.p0v4bf16(<4 x bfloat>*) nounwind +declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x2_t @test_vld2q_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x2_t @test_vld2q_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld2q_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld2 { v0.8h, v1.8h }, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <8 x bfloat>* - %vld2 = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2.v8bf16.p0v8bf16(<8 x bfloat>* %0) + %vld2 = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2.v8bf16.p0(ptr %ptr) %vld2.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2, 0 %vld2.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2, 1 %.fca.0.0.insert = insertvalue %struct.bfloat16x8x2_t undef, <8 x bfloat> %vld2.fca.0.extract, 0, 0 @@ -222,8 +218,8 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2.v8bf16.p0v8bf16(<8 x bfloat>*) nounwind -define %struct.bfloat16x4x2_t @test_vld2_lane_bf16(bfloat* %ptr, [2 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind { +declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2.v8bf16.p0(ptr) nounwind +define %struct.bfloat16x4x2_t @test_vld2_lane_bf16(ptr %ptr, [2 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld2_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: ld2 { v0.h, v1.h }[1], [x0] @@ -231,8 +227,7 @@ define %struct.bfloat16x4x2_t @test_vld2_lane_bf16(bfloat* %ptr, [2 x <4 x bfloa entry: %src.coerce.fca.0.extract = extractvalue [2 x <4 x bfloat>] %src.coerce, 0 %src.coerce.fca.1.extract = extractvalue [2 x <4 x bfloat>] %src.coerce, 1 - %0 = bitcast bfloat* %ptr to i8* - %vld2_lane = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2lane.v4bf16.p0i8(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, i64 1, i8* %0) + %vld2_lane = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2lane.v4bf16.p0(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, i64 1, ptr %ptr) %vld2_lane.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2_lane, 0 %vld2_lane.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2_lane, 1 %.fca.0.0.insert = insertvalue %struct.bfloat16x4x2_t undef, <4 x bfloat> %vld2_lane.fca.0.extract, 0, 0 @@ -241,9 +236,9 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, i64, i8*) nounwind +declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, i64, ptr) nounwind -define %struct.bfloat16x8x2_t @test_vld2q_lane_bf16(bfloat* %ptr, [2 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind { +define %struct.bfloat16x8x2_t @test_vld2q_lane_bf16(ptr %ptr, [2 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld2q_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: ld2 { v0.h, v1.h }[7], [x0] @@ -251,8 +246,7 @@ define %struct.bfloat16x8x2_t @test_vld2q_lane_bf16(bfloat* %ptr, [2 x <8 x bflo entry: %src.coerce.fca.0.extract = extractvalue [2 x <8 x bfloat>] %src.coerce, 0 %src.coerce.fca.1.extract = extractvalue [2 x <8 x bfloat>] %src.coerce, 1 - %0 = bitcast bfloat* %ptr to i8* - %vld2_lane = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2lane.v8bf16.p0i8(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, i64 7, i8* %0) + %vld2_lane = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2lane.v8bf16.p0(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, i64 7, ptr %ptr) %vld2_lane.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2_lane, 0 %vld2_lane.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2_lane, 1 %.fca.0.0.insert = insertvalue %struct.bfloat16x8x2_t undef, <8 x bfloat> %vld2_lane.fca.0.extract, 0, 0 @@ -261,16 +255,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, i64, i8*) nounwind +declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, i64, ptr) nounwind -define %struct.bfloat16x4x3_t @test_vld3_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x3_t @test_vld3_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld3_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld3 { v0.4h, v1.4h, v2.4h }, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <4 x bfloat>* - %vld3 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3.v4bf16.p0v4bf16(<4 x bfloat>* %0) + %vld3 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3.v4bf16.p0(ptr %ptr) %vld3.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 0 %vld3.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 1 %vld3.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 2 @@ -281,16 +274,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3.v4bf16.p0v4bf16(<4 x bfloat>*) nounwind +declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x3_t @test_vld3q_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x3_t @test_vld3q_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld3q_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld3 { v0.8h, v1.8h, v2.8h }, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <8 x bfloat>* - %vld3 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3.v8bf16.p0v8bf16(<8 x bfloat>* %0) + %vld3 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3.v8bf16.p0(ptr %ptr) %vld3.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 0 %vld3.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 1 %vld3.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 2 @@ -301,9 +293,9 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3.v8bf16.p0v8bf16(<8 x bfloat>*) nounwind +declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3.v8bf16.p0(ptr) nounwind -define %struct.bfloat16x4x3_t @test_vld3_lane_bf16(bfloat* %ptr, [3 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind { +define %struct.bfloat16x4x3_t @test_vld3_lane_bf16(ptr %ptr, [3 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld3_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: ld3 { v0.h, v1.h, v2.h }[1], [x0] @@ -312,8 +304,7 @@ entry: %src.coerce.fca.0.extract = extractvalue [3 x <4 x bfloat>] %src.coerce, 0 %src.coerce.fca.1.extract = extractvalue [3 x <4 x bfloat>] %src.coerce, 1 %src.coerce.fca.2.extract = extractvalue [3 x <4 x bfloat>] %src.coerce, 2 - %0 = bitcast bfloat* %ptr to i8* - %vld3_lane = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.v4bf16.p0i8(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, <4 x bfloat> %src.coerce.fca.2.extract, i64 1, i8* %0) + %vld3_lane = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.v4bf16.p0(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, <4 x bfloat> %src.coerce.fca.2.extract, i64 1, ptr %ptr) %vld3_lane.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3_lane, 0 %vld3_lane.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3_lane, 1 %vld3_lane.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3_lane, 2 @@ -324,9 +315,9 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, i8*) nounwind +declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, ptr) nounwind -define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(bfloat* %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind { +define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(ptr %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld3q_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECKT: ld3 { v0.h, v1.h, v2.h }[7], [x0] @@ -335,8 +326,7 @@ entry: %src.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 0 %src.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 1 %src.coerce.fca.2.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 2 - %0 = bitcast bfloat* %ptr to i8* - %vld3_lane = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3lane.v8bf16.p0i8(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, <8 x bfloat> %src.coerce.fca.2.extract, i64 7, i8* %0) + %vld3_lane = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3lane.v8bf16.p0(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, <8 x bfloat> %src.coerce.fca.2.extract, i64 7, ptr %ptr) %vld3_lane.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3_lane, 0 %vld3_lane.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3_lane, 1 %vld3_lane.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3_lane, 2 @@ -347,16 +337,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, i8*) nounwind +declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, ptr) nounwind -define %struct.bfloat16x4x4_t @test_vld4_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x4_t @test_vld4_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld4_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <4 x bfloat>* - %vld4 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4.v4bf16.p0v4bf16(<4 x bfloat>* %0) + %vld4 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4.v4bf16.p0(ptr %ptr) %vld4.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 0 %vld4.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 1 %vld4.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 2 @@ -369,16 +358,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4.v4bf16.p0v4bf16(<4 x bfloat>*) nounwind +declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x4_t @test_vld4q_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x4_t @test_vld4q_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld4q_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld4 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <8 x bfloat>* - %vld4 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4.v8bf16.p0v8bf16(<8 x bfloat>* %0) + %vld4 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4.v8bf16.p0(ptr %ptr) %vld4.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 0 %vld4.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 1 %vld4.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 2 @@ -391,9 +379,9 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4.v8bf16.p0v8bf16(<8 x bfloat>*) nounwind +declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4.v8bf16.p0(ptr) nounwind -define %struct.bfloat16x4x4_t @test_vld4_lane_bf16(bfloat* %ptr, [4 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind { +define %struct.bfloat16x4x4_t @test_vld4_lane_bf16(ptr %ptr, [4 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld4_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: ld4 { v0.h, v1.h, v2.h, v3.h }[1], [x0] @@ -403,8 +391,7 @@ entry: %src.coerce.fca.1.extract = extractvalue [4 x <4 x bfloat>] %src.coerce, 1 %src.coerce.fca.2.extract = extractvalue [4 x <4 x bfloat>] %src.coerce, 2 %src.coerce.fca.3.extract = extractvalue [4 x <4 x bfloat>] %src.coerce, 3 - %0 = bitcast bfloat* %ptr to i8* - %vld4_lane = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4lane.v4bf16.p0i8(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, <4 x bfloat> %src.coerce.fca.2.extract, <4 x bfloat> %src.coerce.fca.3.extract, i64 1, i8* %0) + %vld4_lane = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4lane.v4bf16.p0(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, <4 x bfloat> %src.coerce.fca.2.extract, <4 x bfloat> %src.coerce.fca.3.extract, i64 1, ptr %ptr) %vld4_lane.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4_lane, 0 %vld4_lane.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4_lane, 1 %vld4_lane.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4_lane, 2 @@ -417,9 +404,9 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, i8*) nounwind +declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, ptr) nounwind -define %struct.bfloat16x8x4_t @test_vld4q_lane_bf16(bfloat* %ptr, [4 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind { +define %struct.bfloat16x8x4_t @test_vld4q_lane_bf16(ptr %ptr, [4 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld4q_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: ld4 { v0.h, v1.h, v2.h, v3.h }[7], [x0] @@ -429,8 +416,7 @@ entry: %src.coerce.fca.1.extract = extractvalue [4 x <8 x bfloat>] %src.coerce, 1 %src.coerce.fca.2.extract = extractvalue [4 x <8 x bfloat>] %src.coerce, 2 %src.coerce.fca.3.extract = extractvalue [4 x <8 x bfloat>] %src.coerce, 3 - %0 = bitcast bfloat* %ptr to i8* - %vld4_lane = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4lane.v8bf16.p0i8(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, <8 x bfloat> %src.coerce.fca.2.extract, <8 x bfloat> %src.coerce.fca.3.extract, i64 7, i8* %0) + %vld4_lane = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4lane.v8bf16.p0(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, <8 x bfloat> %src.coerce.fca.2.extract, <8 x bfloat> %src.coerce.fca.3.extract, i64 7, ptr %ptr) %vld4_lane.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4_lane, 0 %vld4_lane.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4_lane, 1 %vld4_lane.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4_lane, 2 @@ -443,15 +429,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, i8*) nounwind +declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, ptr) nounwind -define %struct.bfloat16x4x2_t @test_vld2_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x2_t @test_vld2_dup_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld2_dup_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld2r { v0.4h, v1.4h }, [x0] ; CHECK-NEXT: ret entry: - %vld2 = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2r.v4bf16.p0bf16(bfloat* %ptr) + %vld2 = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2r.v4bf16.p0(ptr %ptr) %vld2.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2, 0 %vld2.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2, 1 %.fca.0.0.insert = insertvalue %struct.bfloat16x4x2_t undef, <4 x bfloat> %vld2.fca.0.extract, 0, 0 @@ -460,15 +446,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2r.v4bf16.p0bf16(bfloat*) nounwind +declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2r.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x2_t @test_vld2q_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x2_t @test_vld2q_dup_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld2q_dup_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld2r { v0.8h, v1.8h }, [x0] ; CHECK-NEXT: ret entry: - %vld2 = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2r.v8bf16.p0bf16(bfloat* %ptr) + %vld2 = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2r.v8bf16.p0(ptr %ptr) %vld2.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2, 0 %vld2.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2, 1 %.fca.0.0.insert = insertvalue %struct.bfloat16x8x2_t undef, <8 x bfloat> %vld2.fca.0.extract, 0, 0 @@ -477,15 +463,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2r.v8bf16.p0bf16(bfloat*) nounwind +declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2r.v8bf16.p0(ptr) nounwind -define %struct.bfloat16x4x3_t @test_vld3_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x3_t @test_vld3_dup_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld3_dup_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld3r { v0.4h, v1.4h, v2.4h }, [x0] ; CHECK-NEXT: ret entry: - %vld3 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3r.v4bf16.p0bf16(bfloat* %ptr) + %vld3 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3r.v4bf16.p0(ptr %ptr) %vld3.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 0 %vld3.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 1 %vld3.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 2 @@ -496,15 +482,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3r.v4bf16.p0bf16(bfloat*) nounwind +declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3r.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x3_t @test_vld3q_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x3_t @test_vld3q_dup_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld3q_dup_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld3r { v0.8h, v1.8h, v2.8h }, [x0] ; CHECK-NEXT: ret entry: - %vld3 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3r.v8bf16.p0bf16(bfloat* %ptr) + %vld3 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3r.v8bf16.p0(ptr %ptr) %vld3.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 0 %vld3.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 1 %vld3.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 2 @@ -515,15 +501,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3r.v8bf16.p0bf16(bfloat*) nounwind +declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3r.v8bf16.p0(ptr) nounwind -define %struct.bfloat16x4x4_t @test_vld4_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x4x4_t @test_vld4_dup_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld4_dup_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld4r { v0.4h, v1.4h, v2.4h, v3.4h }, [x0] ; CHECK-NEXT: ret entry: - %vld4 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4r.v4bf16.p0bf16(bfloat* %ptr) + %vld4 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4r.v4bf16.p0(ptr %ptr) %vld4.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 0 %vld4.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 1 %vld4.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 2 @@ -536,15 +522,15 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4r.v4bf16.p0bf16(bfloat*) nounwind +declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4r.v4bf16.p0(ptr) nounwind -define %struct.bfloat16x8x4_t @test_vld4q_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind { +define %struct.bfloat16x8x4_t @test_vld4q_dup_bf16(ptr %ptr) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vld4q_dup_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ld4r { v0.8h, v1.8h, v2.8h, v3.8h }, [x0] ; CHECK-NEXT: ret entry: - %vld4 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4r.v8bf16.p0bf16(bfloat* %ptr) + %vld4 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4r.v8bf16.p0(ptr %ptr) %vld4.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 0 %vld4.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 1 %vld4.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 2 @@ -557,53 +543,51 @@ entry: } ; Function Attrs: argmemonly nounwind readonly -declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4r.v8bf16.p0bf16(bfloat*) nounwind +declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4r.v8bf16.p0(ptr) nounwind -define void @test_vst1_bf16(bfloat* nocapture %ptr, <4 x bfloat> %val) local_unnamed_addr nounwind { +define void @test_vst1_bf16(ptr nocapture %ptr, <4 x bfloat> %val) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str d0, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <4 x bfloat>* - store <4 x bfloat> %val, <4 x bfloat>* %0, align 8 + store <4 x bfloat> %val, ptr %ptr, align 8 ret void } -define void @test_vst1q_bf16(bfloat* nocapture %ptr, <8 x bfloat> %val) local_unnamed_addr nounwind { +define void @test_vst1q_bf16(ptr nocapture %ptr, <8 x bfloat> %val) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1q_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str q0, [x0] ; CHECK-NEXT: ret entry: - %0 = bitcast bfloat* %ptr to <8 x bfloat>* - store <8 x bfloat> %val, <8 x bfloat>* %0, align 16 + store <8 x bfloat> %val, ptr %ptr, align 16 ret void } -define void @test_vst1_lane_bf16(bfloat* nocapture %ptr, <4 x bfloat> %val) local_unnamed_addr nounwind { +define void @test_vst1_lane_bf16(ptr nocapture %ptr, <4 x bfloat> %val) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st1 { v0.h }[1], [x0] ; CHECK: ret entry: %0 = extractelement <4 x bfloat> %val, i32 1 - store bfloat %0, bfloat* %ptr, align 2 + store bfloat %0, ptr %ptr, align 2 ret void } -define void @test_vst1q_lane_bf16(bfloat* nocapture %ptr, <8 x bfloat> %val) local_unnamed_addr nounwind { +define void @test_vst1q_lane_bf16(ptr nocapture %ptr, <8 x bfloat> %val) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1q_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: st1 { v0.h }[7], [x0] ; CHECK-NEXT: ret entry: %0 = extractelement <8 x bfloat> %val, i32 7 - store bfloat %0, bfloat* %ptr, align 2 + store bfloat %0, ptr %ptr, align 2 ret void } -define void @test_vst1_bf16_x2(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst1_bf16_x2(ptr nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1_bf16_x2: ; CHECK: // %bb.0: // %entry ; CHECK: st1 { v0.4h, v1.4h }, [x0] @@ -611,14 +595,14 @@ define void @test_vst1_bf16_x2(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.c entry: %val.coerce.fca.0.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 1 - tail call void @llvm.aarch64.neon.st1x2.v4bf16.p0bf16(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, bfloat* %ptr) + tail call void @llvm.aarch64.neon.st1x2.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st1x2.v4bf16.p0bf16(<4 x bfloat>, <4 x bfloat>, bfloat* nocapture) nounwind +declare void @llvm.aarch64.neon.st1x2.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind -define void @test_vst1q_bf16_x2(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst1q_bf16_x2(ptr nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1q_bf16_x2: ; CHECK: // %bb.0: // %entry ; CHECK: st1 { v0.8h, v1.8h }, [x0] @@ -626,14 +610,14 @@ define void @test_vst1q_bf16_x2(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val. entry: %val.coerce.fca.0.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 1 - tail call void @llvm.aarch64.neon.st1x2.v8bf16.p0bf16(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, bfloat* %ptr) + tail call void @llvm.aarch64.neon.st1x2.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st1x2.v8bf16.p0bf16(<8 x bfloat>, <8 x bfloat>, bfloat* nocapture) nounwind +declare void @llvm.aarch64.neon.st1x2.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind -define void @test_vst1_bf16_x3(bfloat* nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst1_bf16_x3(ptr nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1_bf16_x3: ; CHECK: // %bb.0: // %entry ; CHECK: st1 { v0.4h, v1.4h, v2.4h }, [x0] @@ -642,14 +626,14 @@ entry: %val.coerce.fca.0.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 2 - tail call void @llvm.aarch64.neon.st1x3.v4bf16.p0bf16(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, bfloat* %ptr) + tail call void @llvm.aarch64.neon.st1x3.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st1x3.v4bf16.p0bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, bfloat* nocapture) nounwind +declare void @llvm.aarch64.neon.st1x3.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind -define void @test_vst1q_bf16_x3(bfloat* nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst1q_bf16_x3(ptr nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1q_bf16_x3: ; CHECK: // %bb.0: // %entry ; CHECK: st1 { v0.8h, v1.8h, v2.8h }, [x0] @@ -658,15 +642,15 @@ entry: %val.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 2 - tail call void @llvm.aarch64.neon.st1x3.v8bf16.p0bf16(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, bfloat* %ptr) + tail call void @llvm.aarch64.neon.st1x3.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st1x3.v8bf16.p0bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, bfloat* nocapture) nounwind +declare void @llvm.aarch64.neon.st1x3.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst1_bf16_x4(bfloat* nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst1_bf16_x4(ptr nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1_bf16_x4: ; CHECK: // %bb.0: // %entry ; CHECK: st1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0] @@ -676,14 +660,14 @@ entry: %val.coerce.fca.1.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 2 %val.coerce.fca.3.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 3 - tail call void @llvm.aarch64.neon.st1x4.v4bf16.p0bf16(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, bfloat* %ptr) + tail call void @llvm.aarch64.neon.st1x4.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st1x4.v4bf16.p0bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, bfloat* nocapture) nounwind +declare void @llvm.aarch64.neon.st1x4.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind -define void @test_vst1q_bf16_x4(bfloat* nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst1q_bf16_x4(ptr nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst1q_bf16_x4: ; CHECK: // %bb.0: // %entry ; CHECK: st1 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0] @@ -693,14 +677,14 @@ entry: %val.coerce.fca.1.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 2 %val.coerce.fca.3.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 3 - tail call void @llvm.aarch64.neon.st1x4.v8bf16.p0bf16(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, bfloat* %ptr) + tail call void @llvm.aarch64.neon.st1x4.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st1x4.v8bf16.p0bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, bfloat* nocapture) nounwind +declare void @llvm.aarch64.neon.st1x4.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind -define void @test_vst2_bf16(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst2_bf16(ptr nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst2_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st2 { v0.4h, v1.4h }, [x0] @@ -708,15 +692,14 @@ define void @test_vst2_bf16(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.coer entry: %val.coerce.fca.0.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 1 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st2.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, i8* %0) + tail call void @llvm.aarch64.neon.st2.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st2.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st2.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind -define void @test_vst2q_bf16(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst2q_bf16(ptr nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst2q_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st2 { v0.8h, v1.8h }, [x0] @@ -724,15 +707,14 @@ define void @test_vst2q_bf16(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.coe entry: %val.coerce.fca.0.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 1 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st2.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, i8* %0) + tail call void @llvm.aarch64.neon.st2.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st2.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st2.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind -define void @test_vst2_lane_bf16(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst2_lane_bf16(ptr nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst2_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st2 { v0.h, v1.h }[1], [x0] @@ -740,16 +722,15 @@ define void @test_vst2_lane_bf16(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val entry: %val.coerce.fca.0.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 1 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st2lane.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, i64 1, i8* %0) + tail call void @llvm.aarch64.neon.st2lane.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, i64 1, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st2lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, i64, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st2lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, i64, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst2q_lane_bf16(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst2q_lane_bf16(ptr nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst2q_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st2 { v0.h, v1.h }[7], [x0] @@ -757,16 +738,15 @@ define void @test_vst2q_lane_bf16(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %va entry: %val.coerce.fca.0.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 1 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st2lane.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, i64 7, i8* %0) + tail call void @llvm.aarch64.neon.st2lane.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, i64 7, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st2lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, i64, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st2lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, i64, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst3_bf16(bfloat* nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst3_bf16(ptr nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst3_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st3 { v0.4h, v1.4h, v2.4h }, [x0] @@ -775,16 +755,15 @@ entry: %val.coerce.fca.0.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 2 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st3.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, i8* %0) + tail call void @llvm.aarch64.neon.st3.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st3.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st3.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst3q_bf16(bfloat* nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst3q_bf16(ptr nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst3q_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st3 { v0.8h, v1.8h, v2.8h }, [x0] @@ -793,16 +772,15 @@ entry: %val.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 2 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st3.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, i8* %0) + tail call void @llvm.aarch64.neon.st3.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st3.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st3.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst3_lane_bf16(bfloat* nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst3_lane_bf16(ptr nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst3_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st3 { v0.h, v1.h, v2.h }[1], [x0] @@ -811,16 +789,15 @@ entry: %val.coerce.fca.0.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 2 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st3lane.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, i64 1, i8* %0) + tail call void @llvm.aarch64.neon.st3lane.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, i64 1, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st3lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st3lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst3q_lane_bf16(bfloat* nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst3q_lane_bf16(ptr nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst3q_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st3 { v0.h, v1.h, v2.h }[7], [x0] @@ -829,16 +806,15 @@ entry: %val.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 0 %val.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 2 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st3lane.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, i64 7, i8* %0) + tail call void @llvm.aarch64.neon.st3lane.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, i64 7, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st3lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st3lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst4_bf16(bfloat* nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst4_bf16(ptr nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst4_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0] @@ -848,16 +824,15 @@ entry: %val.coerce.fca.1.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 2 %val.coerce.fca.3.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 3 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st4.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, i8* %0) + tail call void @llvm.aarch64.neon.st4.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st4.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st4.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst4q_bf16(bfloat* nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst4q_bf16(ptr nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst4q_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st4 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0] @@ -867,16 +842,15 @@ entry: %val.coerce.fca.1.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 2 %val.coerce.fca.3.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 3 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st4.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, i8* %0) + tail call void @llvm.aarch64.neon.st4.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st4.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st4.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst4_lane_bf16(bfloat* nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst4_lane_bf16(ptr nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst4_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st4 { v0.h, v1.h, v2.h, v3.h }[1], [x0] @@ -886,16 +860,15 @@ entry: %val.coerce.fca.1.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 2 %val.coerce.fca.3.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 3 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st4lane.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, i64 1, i8* %0) + tail call void @llvm.aarch64.neon.st4lane.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, i64 1, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st4lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st4lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, ptr nocapture) nounwind ; Function Attrs: nounwind -define void @test_vst4q_lane_bf16(bfloat* nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { +define void @test_vst4q_lane_bf16(ptr nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind { ; CHECK-LABEL: test_vst4q_lane_bf16: ; CHECK: // %bb.0: // %entry ; CHECK: st4 { v0.h, v1.h, v2.h, v3.h }[7], [x0] @@ -905,12 +878,11 @@ entry: %val.coerce.fca.1.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 1 %val.coerce.fca.2.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 2 %val.coerce.fca.3.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 3 - %0 = bitcast bfloat* %ptr to i8* - tail call void @llvm.aarch64.neon.st4lane.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, i64 7, i8* %0) + tail call void @llvm.aarch64.neon.st4lane.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, i64 7, ptr %ptr) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.aarch64.neon.st4lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, i8* nocapture) nounwind +declare void @llvm.aarch64.neon.st4lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, ptr nocapture) nounwind diff --git a/llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll b/llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll index 9c7fbdd0615657..f5494c3cee0e91 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll @@ -8,12 +8,12 @@ ; ; SelectionDAG has 16 nodes: ; t0: ch = EntryToken -; t3: i64 = add nuw GlobalAddress:i64<%str0* @g0> 0, Constant:i64<8> +; t3: i64 = add nuw GlobalAddress:i64 0, Constant:i64<8> ; t6: ch = store<(store (s64) into %ir.sp1, align 1, !tbaa !1)> t0, Constant:i64<0>, t3, undef:i64 -; t7: i64,ch = load<(load (s64) from `%str1** undef`, align 1)> t6, undef:i64, undef:i64 +; t7: i64,ch = load<(load (s64) from `ptr undef`, align 1)> t6, undef:i64, undef:i64 ; t8: i64 = add nuw t7, Constant:i64<8> ; t9: i64,ch = load<(load (s64) from %ir.lp0, align 1)> t6, t8, undef:i64 -; t21: ch = store<(store (s64) into %ir.sp01, align 1)> t19:1, Constant:i64<0>, GlobalAddress:i64<%str0* @g0> 0, undef:i64 +; t21: ch = store<(store (s64) into %ir.sp01, align 1)> t19:1, Constant:i64<0>, GlobalAddress:i64 0, undef:i64 ; t24: ch = TokenFactor t7:1, t9:1, t21 ; t14: ch,glue = CopyToReg t24, Register:i64 $x0, t19 ; t19: i64,ch = load<(load (s64) from %ir.lp12, align 1, !tbaa !7)> t0, t9, undef:i64 @@ -33,7 +33,7 @@ ; performed. %str0 = type { i64, i64 } -%str1 = type { i64, %str1* } +%str1 = type { i64, ptr } @g0 = external global %str0, align 1 @@ -49,15 +49,13 @@ define i64 @foo() { ; CHECK-NEXT: str xzr, [x8] ; CHECK-NEXT: ret entry: - %sp0 = getelementptr inbounds %str0, %str0* @g0, i32 0, i32 0 - %sp1 = getelementptr inbounds %str0, %str0* @g0, i32 0, i32 1 - store i64 0, i64* %sp1, align 1, !tbaa !1 - %l0 = load %str1*, %str1** undef, align 1 - %lp0 = getelementptr inbounds %str1, %str1* %l0, i32 0, i32 1 - %l1 = load %str1*, %str1** %lp0, align 1 - %lp1 = getelementptr inbounds %str1, %str1* %l1, i32 0, i32 0 - %l2 = load i64, i64* %lp1, align 1, !tbaa !7 - store i64 0, i64* %sp0, align 1 + %sp1 = getelementptr inbounds %str0, ptr @g0, i32 0, i32 1 + store i64 0, ptr %sp1, align 1, !tbaa !1 + %l0 = load ptr, ptr undef, align 1 + %lp0 = getelementptr inbounds %str1, ptr %l0, i32 0, i32 1 + %l1 = load ptr, ptr %lp0, align 1 + %l2 = load i64, ptr %l1, align 1, !tbaa !7 + store i64 0, ptr @g0, align 1 ret i64 %l2 } diff --git a/llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll b/llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll index 3fe7e65bf2454a..92f29dac13fa41 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll @@ -6,7 +6,7 @@ target triple = "aarch64--linux-gnu" %struct.match_state = type { i64, i64 } ; %add is also promoted by forking an extra sext. -define void @promoteTwoOne(i32 %i, i32 %j, i64* %P1, i64* %P2 ) { +define void @promoteTwoOne(i32 %i, i32 %j, ptr %P1, ptr %P2 ) { ; CHECK-LABEL: @promoteTwoOne ; CHECK-LABEL: entry: ; CHECK: %[[SEXT1:.*]] = sext i32 %i to i64 @@ -15,16 +15,16 @@ define void @promoteTwoOne(i32 %i, i32 %j, i64* %P1, i64* %P2 ) { entry: %add = add nsw i32 %i, %j %s = sext i32 %add to i64 - %addr1 = getelementptr inbounds i64, i64* %P1, i64 %s - store i64 %s, i64* %addr1 + %addr1 = getelementptr inbounds i64, ptr %P1, i64 %s + store i64 %s, ptr %addr1 %s2 = sext i32 %i to i64 - %addr2 = getelementptr inbounds i64, i64* %P2, i64 %s2 - store i64 %s2, i64* %addr2 + %addr2 = getelementptr inbounds i64, ptr %P2, i64 %s2 + store i64 %s2, ptr %addr2 ret void } ; Both %add1 and %add2 are promoted by forking extra sexts. -define void @promoteTwoTwo(i32 %i, i32 %j, i32 %k, i64* %P1, i64* %P2) { +define void @promoteTwoTwo(i32 %i, i32 %j, i32 %k, ptr %P1, ptr %P2) { ; CHECK-LABEL: @promoteTwoTwo ; CHECK-LABEL:entry: ; CHECK: %[[SEXT1:.*]] = sext i32 %j to i64 @@ -35,16 +35,16 @@ define void @promoteTwoTwo(i32 %i, i32 %j, i32 %k, i64* %P1, i64* %P2) { entry: %add1 = add nsw i32 %j, %i %s = sext i32 %add1 to i64 - %addr1 = getelementptr inbounds i64, i64* %P1, i64 %s - store i64 %s, i64* %addr1 + %addr1 = getelementptr inbounds i64, ptr %P1, i64 %s + store i64 %s, ptr %addr1 %add2 = add nsw i32 %j, %k %s2 = sext i32 %add2 to i64 - %addr2 = getelementptr inbounds i64, i64* %P2, i64 %s2 - store i64 %s2, i64* %addr2 + %addr2 = getelementptr inbounds i64, ptr %P2, i64 %s2 + store i64 %s2, ptr %addr2 ret void } -define i64 @promoteGEPSunk(i1 %cond, i64* %base, i32 %i) { +define i64 @promoteGEPSunk(i1 %cond, ptr %base, i32 %i) { ; CHECK-LABEL: @promoteGEPSunk ; CHECK-LABEL: entry: ; CHECK: %[[SEXT:.*]] = sext i32 %i to i64 @@ -53,14 +53,14 @@ define i64 @promoteGEPSunk(i1 %cond, i64* %base, i32 %i) { entry: %add = add nsw i32 %i, 1 %s = sext i32 %add to i64 - %addr = getelementptr inbounds i64, i64* %base, i64 %s + %addr = getelementptr inbounds i64, ptr %base, i64 %s %add2 = add nsw i32 %i, 2 %s2 = sext i32 %add2 to i64 - %addr2 = getelementptr inbounds i64, i64* %base, i64 %s2 + %addr2 = getelementptr inbounds i64, ptr %base, i64 %s2 br i1 %cond, label %if.then, label %if.then2 if.then: - %v = load i64, i64* %addr - %v2 = load i64, i64* %addr2 + %v = load i64, ptr %addr + %v2 = load i64, ptr %addr2 %r = add i64 %v, %v2 ret i64 %r if.then2: diff --git a/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll b/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll index e0d7759c5a66b2..ea21af56dcc767 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll @@ -191,7 +191,7 @@ entry: ret <4 x i32> %out } -define void @typei1_orig(i64 %a, i8* %p, <8 x i16>* %q) { +define void @typei1_orig(i64 %a, ptr %p, ptr %q) { ; CHECK-LABEL: typei1_orig: ; CHECK: // %bb.0: ; CHECK-NEXT: cmp x0, #0 @@ -207,7 +207,7 @@ define void @typei1_orig(i64 %a, i8* %p, <8 x i16>* %q) { ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret %tmp = xor <16 x i1> zeroinitializer, - %tmp6 = load <8 x i16>, <8 x i16>* %q, align 2 + %tmp6 = load <8 x i16>, ptr %q, align 2 %tmp7 = sub <8 x i16> zeroinitializer, %tmp6 %tmp8 = shufflevector <8 x i16> %tmp7, <8 x i16> undef, <16 x i32> %tmp9 = icmp slt i64 0, %a @@ -218,8 +218,7 @@ define void @typei1_orig(i64 %a, i8* %p, <8 x i16>* %q) { %tmp14 = icmp ne <16 x i16> %tmp13, zeroinitializer %tmp15 = and <16 x i1> %tmp14, %tmp %tmp16 = sext <16 x i1> %tmp15 to <16 x i8> - %tmp17 = bitcast i8* %p to <16 x i8>* - store <16 x i8> %tmp16, <16 x i8>* %tmp17, align 1 + store <16 x i8> %tmp16, ptr %p, align 1 ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll index 106074ea9addee..57f7a66cbab695 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll @@ -89,7 +89,7 @@ entry: %l1 = alloca i32, align 4 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 4 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 4 %add1 = or i32 %add, %l1.0.l1.0. %call = tail call i32 @g() %add2 = add nsw i32 %add1, %call @@ -149,7 +149,7 @@ entry: %l1 = alloca i32, align 4 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 4 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 4 %add1 = add nsw i32 %add, %l1.0.l1.0. ret i32 %add1 } @@ -171,7 +171,7 @@ entry: %l1 = alloca i32, align 128 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 128 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 128 %add1 = or i32 %add, %l1.0.l1.0. %call = tail call i32 @g() %add2 = add nsw i32 %add1, %call @@ -244,7 +244,7 @@ entry: %l1 = alloca i32, align 128 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 128 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 128 %add1 = add nsw i32 %add, %l1.0.l1.0. ret i32 %add1 } @@ -275,11 +275,11 @@ entry: %vla = alloca i32, i64 %0, align 4 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 4 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 4 %add1 = or i32 %add, %l1.0.l1.0. %call = tail call i32 @g() %add2 = add nsw i32 %add1, %call - %1 = load volatile i32, i32* %vla, align 4, !tbaa !1 + %1 = load volatile i32, ptr %vla, align 4, !tbaa !1 %add3 = add nsw i32 %add2, %1 ret i32 %add3 } @@ -332,9 +332,9 @@ entry: %vla = alloca i32, i64 %0, align 4 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 4 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 4 %add1 = add nsw i32 %add, %l1.0.l1.0. - %1 = load volatile i32, i32* %vla, align 4, !tbaa !1 + %1 = load volatile i32, ptr %vla, align 4, !tbaa !1 %add2 = add nsw i32 %add1, %1 ret i32 %add2 } @@ -375,11 +375,11 @@ entry: %vla = alloca i32, i64 %0, align 4 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 128 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 128 %add1 = or i32 %add, %l1.0.l1.0. %call = tail call i32 @g() %add2 = add nsw i32 %add1, %call - %1 = load volatile i32, i32* %vla, align 4, !tbaa !1 + %1 = load volatile i32, ptr %vla, align 4, !tbaa !1 %add3 = add nsw i32 %add2, %1 ret i32 %add3 } @@ -486,9 +486,9 @@ entry: %vla = alloca i32, i64 %0, align 4 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 128 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 128 %add1 = add nsw i32 %add, %l1.0.l1.0. - %1 = load volatile i32, i32* %vla, align 4, !tbaa !1 + %1 = load volatile i32, ptr %vla, align 4, !tbaa !1 %add2 = add nsw i32 %add1, %1 ret i32 %add2 } @@ -570,9 +570,9 @@ entry: %vla = alloca i32, i64 %0, align 4 %conv = fptosi double %d10 to i32 %add = add nsw i32 %conv, %i10 - %l1.0.l1.0. = load volatile i32, i32* %l1, align 32768 + %l1.0.l1.0. = load volatile i32, ptr %l1, align 32768 %add1 = add nsw i32 %add, %l1.0.l1.0. - %1 = load volatile i32, i32* %vla, align 4, !tbaa !1 + %1 = load volatile i32, ptr %vla, align 4, !tbaa !1 %add2 = add nsw i32 %add1, %1 ret i32 %add2 } diff --git a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll index 00dcd86ad9fc8c..296435adc8de52 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll @@ -20,9 +20,9 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-gnu" -define i64 @f_load_madd_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { +define i64 @f_load_madd_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i64, i64* %c, align 8 + %0 = load i64, ptr %c, align 8 %mul = mul nsw i64 %0, %b %add = add nsw i64 %mul, %a ret i64 %add @@ -39,9 +39,9 @@ entry: ; CHECK-BASIC-PASS-DISABLED-NEXT: madd -define i32 @f_load_madd_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 { +define i32 @f_load_madd_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i32, i32* %c, align 4 + %0 = load i32, ptr %c, align 4 %mul = mul nsw i32 %0, %b %add = add nsw i32 %mul, %a ret i32 %add @@ -54,9 +54,9 @@ entry: ; CHECK-NOWORKAROUND-NEXT: madd -define i64 @f_load_msub_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { +define i64 @f_load_msub_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i64, i64* %c, align 8 + %0 = load i64, ptr %c, align 8 %mul = mul nsw i64 %0, %b %sub = sub nsw i64 %a, %mul ret i64 %sub @@ -70,9 +70,9 @@ entry: ; CHECK-NOWORKAROUND-NEXT: msub -define i32 @f_load_msub_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 { +define i32 @f_load_msub_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i32, i32* %c, align 4 + %0 = load i32, ptr %c, align 4 %mul = mul nsw i32 %0, %b %sub = sub nsw i32 %a, %mul ret i32 %sub @@ -85,9 +85,9 @@ entry: ; CHECK-NOWORKAROUND-NEXT: msub -define i64 @f_load_mul_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { +define i64 @f_load_mul_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i64, i64* %c, align 8 + %0 = load i64, ptr %c, align 8 %mul = mul nsw i64 %0, %b ret i64 %mul } @@ -99,9 +99,9 @@ entry: ; CHECK-NOWORKAROUND-NEXT: mul -define i32 @f_load_mul_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 { +define i32 @f_load_mul_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i32, i32* %c, align 4 + %0 = load i32, ptr %c, align 4 %mul = mul nsw i32 %0, %b ret i32 %mul } @@ -113,9 +113,9 @@ entry: ; CHECK-NOWORKAROUND-NEXT: mul -define i64 @f_load_mneg_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { +define i64 @f_load_mneg_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i64, i64* %c, align 8 + %0 = load i64, ptr %c, align 8 %mul = sub i64 0, %b %sub = mul i64 %0, %mul ret i64 %sub @@ -131,9 +131,9 @@ entry: ; FIXME-CHECK-NOWORKAROUND-NEXT: mneg -define i32 @f_load_mneg_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 { +define i32 @f_load_mneg_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i32, i32* %c, align 4 + %0 = load i32, ptr %c, align 4 %mul = sub i32 0, %b %sub = mul i32 %0, %mul ret i32 %sub @@ -148,13 +148,13 @@ entry: ; FIXME-CHECK-NOWORKAROUND-NEXT: mneg -define i64 @f_load_smaddl(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 { +define i64 @f_load_smaddl(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 { entry: %conv = sext i32 %b to i64 %conv1 = sext i32 %c to i64 %mul = mul nsw i64 %conv1, %conv %add = add nsw i64 %mul, %a - %0 = load i32, i32* %d, align 4 + %0 = load i32, ptr %d, align 4 %conv2 = sext i32 %0 to i64 %add3 = add nsw i64 %add, %conv2 ret i64 %add3 @@ -168,13 +168,13 @@ entry: ; CHECK-NOWORKAROUND-NEXT: smaddl -define i64 @f_load_smsubl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 { +define i64 @f_load_smsubl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 { entry: %conv = sext i32 %b to i64 %conv1 = sext i32 %c to i64 %mul = mul nsw i64 %conv1, %conv %sub = sub i64 %a, %mul - %0 = load i32, i32* %d, align 4 + %0 = load i32, ptr %d, align 4 %conv2 = sext i32 %0 to i64 %add = add nsw i64 %sub, %conv2 ret i64 %add @@ -188,12 +188,12 @@ entry: ; CHECK-NOWORKAROUND-NEXT: smsubl -define i64 @f_load_smull(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 { +define i64 @f_load_smull(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 { entry: %conv = sext i32 %b to i64 %conv1 = sext i32 %c to i64 %mul = mul nsw i64 %conv1, %conv - %0 = load i32, i32* %d, align 4 + %0 = load i32, ptr %d, align 4 %conv2 = sext i32 %0 to i64 %div = sdiv i64 %mul, %conv2 ret i64 %div @@ -206,13 +206,13 @@ entry: ; CHECK-NOWORKAROUND-NEXT: smull -define i64 @f_load_smnegl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 { +define i64 @f_load_smnegl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 { entry: %conv = sext i32 %b to i64 %conv1 = sext i32 %c to i64 %mul = sub nsw i64 0, %conv %sub = mul i64 %conv1, %mul - %0 = load i32, i32* %d, align 4 + %0 = load i32, ptr %d, align 4 %conv2 = sext i32 %0 to i64 %div = sdiv i64 %sub, %conv2 ret i64 %div @@ -223,13 +223,13 @@ entry: ; smnegl instructions -define i64 @f_load_umaddl(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 { +define i64 @f_load_umaddl(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 { entry: %conv = zext i32 %b to i64 %conv1 = zext i32 %c to i64 %mul = mul i64 %conv1, %conv %add = add i64 %mul, %a - %0 = load i32, i32* %d, align 4 + %0 = load i32, ptr %d, align 4 %conv2 = zext i32 %0 to i64 %add3 = add i64 %add, %conv2 ret i64 %add3 @@ -243,13 +243,13 @@ entry: ; CHECK-NOWORKAROUND-NEXT: umaddl -define i64 @f_load_umsubl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 { +define i64 @f_load_umsubl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 { entry: %conv = zext i32 %b to i64 %conv1 = zext i32 %c to i64 %mul = mul i64 %conv1, %conv %sub = sub i64 %a, %mul - %0 = load i32, i32* %d, align 4 + %0 = load i32, ptr %d, align 4 %conv2 = zext i32 %0 to i64 %add = add i64 %sub, %conv2 ret i64 %add @@ -263,12 +263,12 @@ entry: ; CHECK-NOWORKAROUND-NEXT: umsubl -define i64 @f_load_umull(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 { +define i64 @f_load_umull(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 { entry: %conv = zext i32 %b to i64 %conv1 = zext i32 %c to i64 %mul = mul i64 %conv1, %conv - %0 = load i32, i32* %d, align 4 + %0 = load i32, ptr %d, align 4 %conv2 = zext i32 %0 to i64 %div = udiv i64 %mul, %conv2 ret i64 %div @@ -281,13 +281,13 @@ entry: ; CHECK-NOWORKAROUND-NEXT: umull -define i64 @f_load_umnegl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 { +define i64 @f_load_umnegl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 { entry: %conv = zext i32 %b to i64 %conv1 = zext i32 %c to i64 %mul = sub nsw i64 0, %conv %sub = mul i64 %conv1, %mul - %0 = load i32, i32* %d, align 4 + %0 = load i32, ptr %d, align 4 %conv2 = zext i32 %0 to i64 %div = udiv i64 %sub, %conv2 ret i64 %div @@ -298,10 +298,10 @@ entry: ; umnegl instructions -define i64 @f_store_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { +define i64 @f_store_madd_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i64, i64* %cp, align 8 - store i64 %a, i64* %e, align 8 + %0 = load i64, ptr %cp, align 8 + store i64 %a, ptr %e, align 8 %mul = mul nsw i64 %0, %b %add = add nsw i64 %mul, %a ret i64 %add @@ -315,10 +315,10 @@ entry: ; CHECK-NOWORKAROUND-NEXT: madd -define i32 @f_store_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { +define i32 @f_store_madd_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i32, i32* %cp, align 4 - store i32 %a, i32* %e, align 4 + %0 = load i32, ptr %cp, align 4 + store i32 %a, ptr %e, align 4 %mul = mul nsw i32 %0, %b %add = add nsw i32 %mul, %a ret i32 %add @@ -331,10 +331,10 @@ entry: ; CHECK-NOWORKAROUND-NEXT: madd -define i64 @f_store_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { +define i64 @f_store_msub_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i64, i64* %cp, align 8 - store i64 %a, i64* %e, align 8 + %0 = load i64, ptr %cp, align 8 + store i64 %a, ptr %e, align 8 %mul = mul nsw i64 %0, %b %sub = sub nsw i64 %a, %mul ret i64 %sub @@ -348,10 +348,10 @@ entry: ; CHECK-NOWORKAROUND-NEXT: msub -define i32 @f_store_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { +define i32 @f_store_msub_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i32, i32* %cp, align 4 - store i32 %a, i32* %e, align 4 + %0 = load i32, ptr %cp, align 4 + store i32 %a, ptr %e, align 4 %mul = mul nsw i32 %0, %b %sub = sub nsw i32 %a, %mul ret i32 %sub @@ -364,10 +364,10 @@ entry: ; CHECK-NOWORKAROUND-NEXT: msub -define i64 @f_store_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { +define i64 @f_store_mul_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i64, i64* %cp, align 8 - store i64 %a, i64* %e, align 8 + %0 = load i64, ptr %cp, align 8 + store i64 %a, ptr %e, align 8 %mul = mul nsw i64 %0, %b ret i64 %mul } @@ -379,10 +379,10 @@ entry: ; CHECK-NOWORKAROUND-NEXT: mul -define i32 @f_store_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { +define i32 @f_store_mul_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i32, i32* %cp, align 4 - store i32 %a, i32* %e, align 4 + %0 = load i32, ptr %cp, align 4 + store i32 %a, ptr %e, align 4 %mul = mul nsw i32 %0, %b ret i32 %mul } @@ -394,11 +394,10 @@ entry: ; CHECK-NOWORKAROUND-NEXT: mul -define i64 @f_prefetch_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { +define i64 @f_prefetch_madd_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i64, i64* %cp, align 8 - %1 = bitcast i64* %e to i8* - tail call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1) + %0 = load i64, ptr %cp, align 8 + tail call void @llvm.prefetch(ptr %e, i32 0, i32 0, i32 1) %mul = mul nsw i64 %0, %b %add = add nsw i64 %mul, %a ret i64 %add @@ -411,13 +410,12 @@ entry: ; CHECK-NOWORKAROUND: prfm ; CHECK-NOWORKAROUND-NEXT: madd -declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) #2 +declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) #2 -define i32 @f_prefetch_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { +define i32 @f_prefetch_madd_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i32, i32* %cp, align 4 - %1 = bitcast i32* %e to i8* - tail call void @llvm.prefetch(i8* %1, i32 1, i32 0, i32 1) + %0 = load i32, ptr %cp, align 4 + tail call void @llvm.prefetch(ptr %e, i32 1, i32 0, i32 1) %mul = mul nsw i32 %0, %b %add = add nsw i32 %mul, %a ret i32 %add @@ -429,11 +427,10 @@ entry: ; CHECK-NOWORKAROUND: prfm ; CHECK-NOWORKAROUND-NEXT: madd -define i64 @f_prefetch_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { +define i64 @f_prefetch_msub_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i64, i64* %cp, align 8 - %1 = bitcast i64* %e to i8* - tail call void @llvm.prefetch(i8* %1, i32 0, i32 1, i32 1) + %0 = load i64, ptr %cp, align 8 + tail call void @llvm.prefetch(ptr %e, i32 0, i32 1, i32 1) %mul = mul nsw i64 %0, %b %sub = sub nsw i64 %a, %mul ret i64 %sub @@ -446,11 +443,10 @@ entry: ; CHECK-NOWORKAROUND: prfm ; CHECK-NOWORKAROUND-NEXT: msub -define i32 @f_prefetch_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { +define i32 @f_prefetch_msub_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i32, i32* %cp, align 4 - %1 = bitcast i32* %e to i8* - tail call void @llvm.prefetch(i8* %1, i32 1, i32 1, i32 1) + %0 = load i32, ptr %cp, align 4 + tail call void @llvm.prefetch(ptr %e, i32 1, i32 1, i32 1) %mul = mul nsw i32 %0, %b %sub = sub nsw i32 %a, %mul ret i32 %sub @@ -462,11 +458,10 @@ entry: ; CHECK-NOWORKAROUND: prfm ; CHECK-NOWORKAROUND-NEXT: msub -define i64 @f_prefetch_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { +define i64 @f_prefetch_mul_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i64, i64* %cp, align 8 - %1 = bitcast i64* %e to i8* - tail call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1) + %0 = load i64, ptr %cp, align 8 + tail call void @llvm.prefetch(ptr %e, i32 0, i32 3, i32 1) %mul = mul nsw i64 %0, %b ret i64 %mul } @@ -477,11 +472,10 @@ entry: ; CHECK-NOWORKAROUND: prfm ; CHECK-NOWORKAROUND-NEXT: mul -define i32 @f_prefetch_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { +define i32 @f_prefetch_mul_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 { entry: - %0 = load i32, i32* %cp, align 4 - %1 = bitcast i32* %e to i8* - tail call void @llvm.prefetch(i8* %1, i32 1, i32 3, i32 1) + %0 = load i32, ptr %cp, align 4 + tail call void @llvm.prefetch(ptr %e, i32 1, i32 3, i32 1) %mul = mul nsw i32 %0, %b ret i32 %mul } @@ -492,15 +486,15 @@ entry: ; CHECK-NOWORKAROUND: prfm ; CHECK-NOWORKAROUND-NEXT: mul -define i64 @fall_through(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { +define i64 @fall_through(i64 %a, i64 %b, ptr nocapture readonly %c) #0 { entry: - %0 = load i64, i64* %c, align 8 + %0 = load i64, ptr %c, align 8 br label %block1 block1: %mul = mul nsw i64 %0, %b %add = add nsw i64 %mul, %a - %tmp = ptrtoint i8* blockaddress(@fall_through, %block1) to i64 + %tmp = ptrtoint ptr blockaddress(@fall_through, %block1) to i64 %ret = add nsw i64 %tmp, %add ret i64 %ret } @@ -517,7 +511,7 @@ block1: ; CHECK-NOWORKAROUND-NEXT: madd ; No checks for this, just check it doesn't crash -define i32 @crash_check(i8** nocapture readnone %data) #0 { +define i32 @crash_check(ptr nocapture readnone %data) #0 { entry: br label %while.cond diff --git a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll index 25ea3933c00642..eaa89081199ed6 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll @@ -7,7 +7,7 @@ %struct.c = type [256 x i64] declare void @foo() -define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind { +define i16 @halfword(ptr %ctx, i32 %xor72) nounwind { ; CHECK0-LABEL: halfword: ; CHECK0: // %bb.0: ; CHECK0-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill @@ -41,14 +41,14 @@ define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind { %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 - %arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83 - %result = load i16, i16* %arrayidx86, align 2 + %arrayidx86 = getelementptr inbounds %struct.a, ptr %ctx, i64 0, i64 %idxprom83 + %result = load i16, ptr %arrayidx86, align 2 call void @foo() - store i16 %result, i16* %arrayidx86, align 2 + store i16 %result, ptr %arrayidx86, align 2 ret i16 %result } -define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind { +define i32 @word(ptr %ctx, i32 %xor72) nounwind { ; CHECK0-LABEL: word: ; CHECK0: // %bb.0: ; CHECK0-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill @@ -82,14 +82,14 @@ define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind { %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 - %arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83 - %result = load i32, i32* %arrayidx86, align 4 + %arrayidx86 = getelementptr inbounds %struct.b, ptr %ctx, i64 0, i64 %idxprom83 + %result = load i32, ptr %arrayidx86, align 4 call void @foo() - store i32 %result, i32* %arrayidx86, align 4 + store i32 %result, ptr %arrayidx86, align 4 ret i32 %result } -define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind { +define i64 @doubleword(ptr %ctx, i32 %xor72) nounwind { ; CHECK0-LABEL: doubleword: ; CHECK0: // %bb.0: ; CHECK0-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill @@ -123,10 +123,10 @@ define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind { %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 - %arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83 - %result = load i64, i64* %arrayidx86, align 8 + %arrayidx86 = getelementptr inbounds %struct.c, ptr %ctx, i64 0, i64 %idxprom83 + %result = load i64, ptr %arrayidx86, align 8 call void @foo() - store i64 %result, i64* %arrayidx86, align 8 + store i64 %result, ptr %arrayidx86, align 8 ret i64 %result } @@ -162,7 +162,7 @@ endbb: ret i64 %mul2 } -define i64 @gep3(i64 *%p, i64 %b) { +define i64 @gep3(ptr %p, i64 %b) { ; CHECK0-LABEL: gep3: ; CHECK0: // %bb.0: ; CHECK0-NEXT: lsl x9, x1, #3 @@ -177,22 +177,22 @@ define i64 @gep3(i64 *%p, i64 %b) { ; CHECK3-NEXT: ldr x0, [x0, x1, lsl #3] ; CHECK3-NEXT: str x1, [x8, x1, lsl #3] ; CHECK3-NEXT: ret - %g = getelementptr inbounds i64, i64* %p, i64 %b - %l = load i64, i64* %g - store i64 %b, i64* %g + %g = getelementptr inbounds i64, ptr %p, i64 %b + %l = load i64, ptr %g + store i64 %b, ptr %g ret i64 %l } -define i128 @gep4(i128 *%p, i128 %a, i64 %b) { +define i128 @gep4(ptr %p, i128 %a, i64 %b) { ; CHECK-LABEL: gep4: ; CHECK: // %bb.0: ; CHECK-NEXT: add x8, x0, x4, lsl #4 ; CHECK-NEXT: ldp x0, x1, [x8] ; CHECK-NEXT: stp x2, x3, [x8] ; CHECK-NEXT: ret - %g = getelementptr inbounds i128, i128* %p, i64 %b - %l = load i128, i128* %g - store i128 %a, i128* %g + %g = getelementptr inbounds i128, ptr %p, i64 %b + %l = load i128, ptr %g + store i128 %a, ptr %g ret i128 %l } diff --git a/llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll b/llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll index 0337f04e579633..dd1bf14f5a5691 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll @@ -6,14 +6,14 @@ define <8 x i16> @c(i32 %e) { entry: - %0 = load <4 x i16>, <4 x i16>* @d, align 8 + %0 = load <4 x i16>, ptr @d, align 8 %vminv = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %0) %1 = trunc i32 %vminv to i16 %vecinit3 = insertelement <4 x i16> , i16 %1, i32 1 %call = tail call <8 x i16> @c(i32 0) #3 %vgetq_lane = extractelement <8 x i16> %call, i32 0 %vset_lane = insertelement <4 x i16> %vecinit3, i16 %vgetq_lane, i32 0 - %call4 = tail call i32 bitcast (i32 (...)* @k to i32 (<4 x i16>)*)(<4 x i16> %vset_lane) #3 + %call4 = tail call i32 @k(<4 x i16> %vset_lane) #3 ret <8 x i16> undef } diff --git a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll index b73be5c1c39ee3..bc95cfd7d28d97 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefix CHECK-LE ; RUN: llc -mtriple=aarch64_be-unknown-linux-gnu < %s | FileCheck %s --check-prefix CHECK-BE -define <2 x i16> @test0(i16* %i16_ptr, i64 %inc) { +define <2 x i16> @test0(ptr %i16_ptr, i64 %inc) { ; CHECK-LE-LABEL: test0: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ld1 { v0.h }[0], [x0] @@ -14,12 +14,12 @@ define <2 x i16> @test0(i16* %i16_ptr, i64 %inc) { ; CHECK-BE-NEXT: ld1 { v0.h }[0], [x0] ; CHECK-BE-NEXT: rev64 v0.2s, v0.2s ; CHECK-BE-NEXT: ret - %i_0 = load i16, i16* %i16_ptr + %i_0 = load i16, ptr %i16_ptr %v0 = insertelement <2 x i16> undef, i16 %i_0, i32 0 ret <2 x i16> %v0 } -define <2 x i16> @test1(<2 x i16>* %v2i16_ptr) { +define <2 x i16> @test1(ptr %v2i16_ptr) { ; CHECK-LE-LABEL: test1: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ld1 { v0.h }[0], [x0] @@ -35,11 +35,11 @@ define <2 x i16> @test1(<2 x i16>* %v2i16_ptr) { ; CHECK-BE-NEXT: ld1 { v0.h }[2], [x8] ; CHECK-BE-NEXT: rev64 v0.2s, v0.2s ; CHECK-BE-NEXT: ret - %v2i16 = load <2 x i16>, <2 x i16>* %v2i16_ptr + %v2i16 = load <2 x i16>, ptr %v2i16_ptr ret <2 x i16> %v2i16 } -define <2 x i16> @test2(i16* %i16_ptr, i64 %inc) { +define <2 x i16> @test2(ptr %i16_ptr, i64 %inc) { ; CHECK-LE-LABEL: test2: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ld1 { v0.h }[0], [x0] @@ -55,15 +55,15 @@ define <2 x i16> @test2(i16* %i16_ptr, i64 %inc) { ; CHECK-BE-NEXT: ld1 { v0.h }[2], [x8] ; CHECK-BE-NEXT: rev64 v0.2s, v0.2s ; CHECK-BE-NEXT: ret - %i_0 = load i16, i16* %i16_ptr - %i16_ptr_inc = getelementptr i16, i16* %i16_ptr, i64 %inc - %i_1 = load i16, i16* %i16_ptr_inc + %i_0 = load i16, ptr %i16_ptr + %i16_ptr_inc = getelementptr i16, ptr %i16_ptr, i64 %inc + %i_1 = load i16, ptr %i16_ptr_inc %v0 = insertelement <2 x i16> undef, i16 %i_0, i32 0 %v1 = insertelement <2 x i16> %v0, i16 %i_1, i32 1 ret <2 x i16> %v1 } -define <2 x i8> @test3(<2 x i8>* %v2i8_ptr) { +define <2 x i8> @test3(ptr %v2i8_ptr) { ; CHECK-LE-LABEL: test3: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ld1 { v0.b }[0], [x0] @@ -79,11 +79,11 @@ define <2 x i8> @test3(<2 x i8>* %v2i8_ptr) { ; CHECK-BE-NEXT: ld1 { v0.b }[4], [x8] ; CHECK-BE-NEXT: rev64 v0.2s, v0.2s ; CHECK-BE-NEXT: ret - %v2i8 = load <2 x i8>, <2 x i8>* %v2i8_ptr + %v2i8 = load <2 x i8>, ptr %v2i8_ptr ret <2 x i8> %v2i8 } -define <4 x i8> @test4(<4 x i8>* %v4i8_ptr) { +define <4 x i8> @test4(ptr %v4i8_ptr) { ; CHECK-LE-LABEL: test4: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -98,11 +98,11 @@ define <4 x i8> @test4(<4 x i8>* %v4i8_ptr) { ; CHECK-BE-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-BE-NEXT: rev64 v0.4h, v0.4h ; CHECK-BE-NEXT: ret - %v4i8 = load <4 x i8>, <4 x i8>* %v4i8_ptr + %v4i8 = load <4 x i8>, ptr %v4i8_ptr ret <4 x i8> %v4i8 } -define <2 x i32> @fsext_v2i32(<2 x i8>* %a) { +define <2 x i32> @fsext_v2i32(ptr %a) { ; CHECK-LE-LABEL: fsext_v2i32: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldrsb w8, [x0] @@ -120,12 +120,12 @@ define <2 x i32> @fsext_v2i32(<2 x i8>* %a) { ; CHECK-BE-NEXT: mov v0.s[1], w8 ; CHECK-BE-NEXT: rev64 v0.2s, v0.2s ; CHECK-BE-NEXT: ret - %x = load <2 x i8>, <2 x i8>* %a + %x = load <2 x i8>, ptr %a %y = sext <2 x i8> %x to <2 x i32> ret <2 x i32> %y } -define <3 x i32> @fsext_v3i32(<3 x i8>* %a) { +define <3 x i32> @fsext_v3i32(ptr %a) { ; CHECK-LE-LABEL: fsext_v3i32: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -147,12 +147,12 @@ define <3 x i32> @fsext_v3i32(<3 x i8>* %a) { ; CHECK-BE-NEXT: rev64 v0.4s, v0.4s ; CHECK-BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-BE-NEXT: ret - %x = load <3 x i8>, <3 x i8>* %a + %x = load <3 x i8>, ptr %a %y = sext <3 x i8> %x to <3 x i32> ret <3 x i32> %y } -define <4 x i32> @fsext_v4i32(<4 x i8>* %a) { +define <4 x i32> @fsext_v4i32(ptr %a) { ; CHECK-LE-LABEL: fsext_v4i32: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -169,12 +169,12 @@ define <4 x i32> @fsext_v4i32(<4 x i8>* %a) { ; CHECK-BE-NEXT: rev64 v0.4s, v0.4s ; CHECK-BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-BE-NEXT: ret - %x = load <4 x i8>, <4 x i8>* %a + %x = load <4 x i8>, ptr %a %y = sext <4 x i8> %x to <4 x i32> ret <4 x i32> %y } -define <8 x i32> @fsext_v8i32(<8 x i8>* %a) { +define <8 x i32> @fsext_v8i32(ptr %a) { ; CHECK-LE-LABEL: fsext_v8i32: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr d0, [x0] @@ -194,12 +194,12 @@ define <8 x i32> @fsext_v8i32(<8 x i8>* %a) { ; CHECK-BE-NEXT: ext v1.16b, v1.16b, v1.16b, #8 ; CHECK-BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-BE-NEXT: ret - %x = load <8 x i8>, <8 x i8>* %a + %x = load <8 x i8>, ptr %a %y = sext <8 x i8> %x to <8 x i32> ret <8 x i32> %y } -define <4 x i32> @fzext_v4i32(<4 x i8>* %a) { +define <4 x i32> @fzext_v4i32(ptr %a) { ; CHECK-LE-LABEL: fzext_v4i32: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -216,7 +216,7 @@ define <4 x i32> @fzext_v4i32(<4 x i8>* %a) { ; CHECK-BE-NEXT: rev64 v0.4s, v0.4s ; CHECK-BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-BE-NEXT: ret - %x = load <4 x i8>, <4 x i8>* %a + %x = load <4 x i8>, ptr %a %y = zext <4 x i8> %x to <4 x i32> ret <4 x i32> %y } @@ -224,7 +224,7 @@ define <4 x i32> @fzext_v4i32(<4 x i8>* %a) { ; TODO: This codegen could just be: ; ldrb w0, [x0] ; -define i32 @loadExti32(<4 x i8>* %ref) { +define i32 @loadExti32(ptr %ref) { ; CHECK-LE-LABEL: loadExti32: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -241,13 +241,13 @@ define i32 @loadExti32(<4 x i8>* %ref) { ; CHECK-BE-NEXT: umov w8, v0.h[0] ; CHECK-BE-NEXT: and w0, w8, #0xff ; CHECK-BE-NEXT: ret - %a = load <4 x i8>, <4 x i8>* %ref + %a = load <4 x i8>, ptr %ref %vecext = extractelement <4 x i8> %a, i32 0 %conv = zext i8 %vecext to i32 ret i32 %conv } -define <2 x i16> @fsext_v2i16(<2 x i8>* %a) { +define <2 x i16> @fsext_v2i16(ptr %a) { ; CHECK-LE-LABEL: fsext_v2i16: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldrsb w8, [x0] @@ -265,12 +265,12 @@ define <2 x i16> @fsext_v2i16(<2 x i8>* %a) { ; CHECK-BE-NEXT: mov v0.s[1], w8 ; CHECK-BE-NEXT: rev64 v0.2s, v0.2s ; CHECK-BE-NEXT: ret - %x = load <2 x i8>, <2 x i8>* %a + %x = load <2 x i8>, ptr %a %y = sext <2 x i8> %x to <2 x i16> ret <2 x i16> %y } -define <3 x i16> @fsext_v3i16(<3 x i8>* %a) { +define <3 x i16> @fsext_v3i16(ptr %a) { ; CHECK-LE-LABEL: fsext_v3i16: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -289,12 +289,12 @@ define <3 x i16> @fsext_v3i16(<3 x i8>* %a) { ; CHECK-BE-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-BE-NEXT: rev64 v0.4h, v0.4h ; CHECK-BE-NEXT: ret - %x = load <3 x i8>, <3 x i8>* %a + %x = load <3 x i8>, ptr %a %y = sext <3 x i8> %x to <3 x i16> ret <3 x i16> %y } -define <4 x i16> @fsext_v4i16(<4 x i8>* %a) { +define <4 x i16> @fsext_v4i16(ptr %a) { ; CHECK-LE-LABEL: fsext_v4i16: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -309,12 +309,12 @@ define <4 x i16> @fsext_v4i16(<4 x i8>* %a) { ; CHECK-BE-NEXT: sshll v0.8h, v0.8b, #0 ; CHECK-BE-NEXT: rev64 v0.4h, v0.4h ; CHECK-BE-NEXT: ret - %x = load <4 x i8>, <4 x i8>* %a + %x = load <4 x i8>, ptr %a %y = sext <4 x i8> %x to <4 x i16> ret <4 x i16> %y } -define <8 x i16> @fsext_v8i16(<8 x i8>* %a) { +define <8 x i16> @fsext_v8i16(ptr %a) { ; CHECK-LE-LABEL: fsext_v8i16: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr d0, [x0] @@ -328,12 +328,12 @@ define <8 x i16> @fsext_v8i16(<8 x i8>* %a) { ; CHECK-BE-NEXT: rev64 v0.8h, v0.8h ; CHECK-BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-BE-NEXT: ret - %x = load <8 x i8>, <8 x i8>* %a + %x = load <8 x i8>, ptr %a %y = sext <8 x i8> %x to <8 x i16> ret <8 x i16> %y } -define <16 x i16> @fsext_v16i16(<16 x i8>* %a) { +define <16 x i16> @fsext_v16i16(ptr %a) { ; CHECK-LE-LABEL: fsext_v16i16: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr q0, [x0] @@ -351,12 +351,12 @@ define <16 x i16> @fsext_v16i16(<16 x i8>* %a) { ; CHECK-BE-NEXT: ext v1.16b, v1.16b, v1.16b, #8 ; CHECK-BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-BE-NEXT: ret - %x = load <16 x i8>, <16 x i8>* %a + %x = load <16 x i8>, ptr %a %y = sext <16 x i8> %x to <16 x i16> ret <16 x i16> %y } -define <4 x i16> @fzext_v4i16(<4 x i8>* %a) { +define <4 x i16> @fzext_v4i16(ptr %a) { ; CHECK-LE-LABEL: fzext_v4i16: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -371,12 +371,12 @@ define <4 x i16> @fzext_v4i16(<4 x i8>* %a) { ; CHECK-BE-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-BE-NEXT: rev64 v0.4h, v0.4h ; CHECK-BE-NEXT: ret - %x = load <4 x i8>, <4 x i8>* %a + %x = load <4 x i8>, ptr %a %y = zext <4 x i8> %x to <4 x i16> ret <4 x i16> %y } -define <4 x i16> @anyext_v4i16(<4 x i8> *%a, <4 x i8> *%b) { +define <4 x i16> @anyext_v4i16(ptr %a, ptr %b) { ; CHECK-LE-LABEL: anyext_v4i16: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -401,14 +401,14 @@ define <4 x i16> @anyext_v4i16(<4 x i8> *%a, <4 x i8> *%b) { ; CHECK-BE-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-BE-NEXT: rev64 v0.4h, v0.4h ; CHECK-BE-NEXT: ret - %x = load <4 x i8>, <4 x i8>* %a, align 4 - %y = load <4 x i8>, <4 x i8>* %b, align 4 + %x = load <4 x i8>, ptr %a, align 4 + %y = load <4 x i8>, ptr %b, align 4 %z = add <4 x i8> %x, %y %s = sext <4 x i8> %z to <4 x i16> ret <4 x i16> %s } -define <4 x i32> @anyext_v4i32(<4 x i8> *%a, <4 x i8> *%b) { +define <4 x i32> @anyext_v4i32(ptr %a, ptr %b) { ; CHECK-LE-LABEL: anyext_v4i32: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ldr s0, [x0] @@ -436,8 +436,8 @@ define <4 x i32> @anyext_v4i32(<4 x i8> *%a, <4 x i8> *%b) { ; CHECK-BE-NEXT: rev64 v0.4s, v0.4s ; CHECK-BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-BE-NEXT: ret - %x = load <4 x i8>, <4 x i8>* %a, align 4 - %y = load <4 x i8>, <4 x i8>* %b, align 4 + %x = load <4 x i8>, ptr %a, align 4 + %y = load <4 x i8>, ptr %b, align 4 %z = add <4 x i8> %x, %y %s = sext <4 x i8> %z to <4 x i32> ret <4 x i32> %s diff --git a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll index 4ff80296c0d99d..f5d7d330b45c44 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll @@ -21,208 +21,208 @@ declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) ; CHECK-LABEL: smax_B ; CHECK: smaxv {{b[0-9]+}}, {{v[0-9]+}}.16b -define i8 @smax_B(<16 x i8>* nocapture readonly %arr) { - %arr.load = load <16 x i8>, <16 x i8>* %arr +define i8 @smax_B(ptr nocapture readonly %arr) { + %arr.load = load <16 x i8>, ptr %arr %r = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %arr.load) ret i8 %r } ; CHECK-LABEL: smax_H ; CHECK: smaxv {{h[0-9]+}}, {{v[0-9]+}}.8h -define i16 @smax_H(<8 x i16>* nocapture readonly %arr) { - %arr.load = load <8 x i16>, <8 x i16>* %arr +define i16 @smax_H(ptr nocapture readonly %arr) { + %arr.load = load <8 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %arr.load) ret i16 %r } ; CHECK-LABEL: smax_S ; CHECK: smaxv {{s[0-9]+}}, {{v[0-9]+}}.4s -define i32 @smax_S(<4 x i32> * nocapture readonly %arr) { - %arr.load = load <4 x i32>, <4 x i32>* %arr +define i32 @smax_S(ptr nocapture readonly %arr) { + %arr.load = load <4 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %arr.load) ret i32 %r } ; CHECK-LABEL: umax_B ; CHECK: umaxv {{b[0-9]+}}, {{v[0-9]+}}.16b -define i8 @umax_B(<16 x i8>* nocapture readonly %arr) { - %arr.load = load <16 x i8>, <16 x i8>* %arr +define i8 @umax_B(ptr nocapture readonly %arr) { + %arr.load = load <16 x i8>, ptr %arr %r = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %arr.load) ret i8 %r } ; CHECK-LABEL: umax_H ; CHECK: umaxv {{h[0-9]+}}, {{v[0-9]+}}.8h -define i16 @umax_H(<8 x i16>* nocapture readonly %arr) { - %arr.load = load <8 x i16>, <8 x i16>* %arr +define i16 @umax_H(ptr nocapture readonly %arr) { + %arr.load = load <8 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %arr.load) ret i16 %r } ; CHECK-LABEL: umax_S ; CHECK: umaxv {{s[0-9]+}}, {{v[0-9]+}}.4s -define i32 @umax_S(<4 x i32>* nocapture readonly %arr) { - %arr.load = load <4 x i32>, <4 x i32>* %arr +define i32 @umax_S(ptr nocapture readonly %arr) { + %arr.load = load <4 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %arr.load) ret i32 %r } ; CHECK-LABEL: smin_B ; CHECK: sminv {{b[0-9]+}}, {{v[0-9]+}}.16b -define i8 @smin_B(<16 x i8>* nocapture readonly %arr) { - %arr.load = load <16 x i8>, <16 x i8>* %arr +define i8 @smin_B(ptr nocapture readonly %arr) { + %arr.load = load <16 x i8>, ptr %arr %r = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %arr.load) ret i8 %r } ; CHECK-LABEL: smin_H ; CHECK: sminv {{h[0-9]+}}, {{v[0-9]+}}.8h -define i16 @smin_H(<8 x i16>* nocapture readonly %arr) { - %arr.load = load <8 x i16>, <8 x i16>* %arr +define i16 @smin_H(ptr nocapture readonly %arr) { + %arr.load = load <8 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %arr.load) ret i16 %r } ; CHECK-LABEL: smin_S ; CHECK: sminv {{s[0-9]+}}, {{v[0-9]+}}.4s -define i32 @smin_S(<4 x i32>* nocapture readonly %arr) { - %arr.load = load <4 x i32>, <4 x i32>* %arr +define i32 @smin_S(ptr nocapture readonly %arr) { + %arr.load = load <4 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %arr.load) ret i32 %r } ; CHECK-LABEL: umin_B ; CHECK: uminv {{b[0-9]+}}, {{v[0-9]+}}.16b -define i8 @umin_B(<16 x i8>* nocapture readonly %arr) { - %arr.load = load <16 x i8>, <16 x i8>* %arr +define i8 @umin_B(ptr nocapture readonly %arr) { + %arr.load = load <16 x i8>, ptr %arr %r = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %arr.load) ret i8 %r } ; CHECK-LABEL: umin_H ; CHECK: uminv {{h[0-9]+}}, {{v[0-9]+}}.8h -define i16 @umin_H(<8 x i16>* nocapture readonly %arr) { - %arr.load = load <8 x i16>, <8 x i16>* %arr +define i16 @umin_H(ptr nocapture readonly %arr) { + %arr.load = load <8 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %arr.load) ret i16 %r } ; CHECK-LABEL: umin_S ; CHECK: uminv {{s[0-9]+}}, {{v[0-9]+}}.4s -define i32 @umin_S(<4 x i32>* nocapture readonly %arr) { - %arr.load = load <4 x i32>, <4 x i32>* %arr +define i32 @umin_S(ptr nocapture readonly %arr) { + %arr.load = load <4 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %arr.load) ret i32 %r } ; CHECK-LABEL: fmaxnm_S ; CHECK: fmaxnmv -define float @fmaxnm_S(<4 x float>* nocapture readonly %arr) { - %arr.load = load <4 x float>, <4 x float>* %arr +define float @fmaxnm_S(ptr nocapture readonly %arr) { + %arr.load = load <4 x float>, ptr %arr %r = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %arr.load) ret float %r } ; CHECK-LABEL: fminnm_S ; CHECK: fminnmv -define float @fminnm_S(<4 x float>* nocapture readonly %arr) { - %arr.load = load <4 x float>, <4 x float>* %arr +define float @fminnm_S(ptr nocapture readonly %arr) { + %arr.load = load <4 x float>, ptr %arr %r = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %arr.load) ret float %r } declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>) -define i16 @oversized_umax_256(<16 x i16>* nocapture readonly %arr) { +define i16 @oversized_umax_256(ptr nocapture readonly %arr) { ; CHECK-LABEL: oversized_umax_256 ; CHECK: umax [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h ; CHECK: umaxv {{h[0-9]+}}, [[V0]] - %arr.load = load <16 x i16>, <16 x i16>* %arr + %arr.load = load <16 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %arr.load) ret i16 %r } declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>) -define i32 @oversized_umax_512(<16 x i32>* nocapture readonly %arr) { +define i32 @oversized_umax_512(ptr nocapture readonly %arr) { ; CHECK-LABEL: oversized_umax_512 ; CHECK: umax v ; CHECK-NEXT: umax v ; CHECK-NEXT: umax [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s ; CHECK-NEXT: umaxv {{s[0-9]+}}, [[V0]] - %arr.load = load <16 x i32>, <16 x i32>* %arr + %arr.load = load <16 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %arr.load) ret i32 %r } declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>) -define i16 @oversized_umin_256(<16 x i16>* nocapture readonly %arr) { +define i16 @oversized_umin_256(ptr nocapture readonly %arr) { ; CHECK-LABEL: oversized_umin_256 ; CHECK: umin [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h ; CHECK: uminv {{h[0-9]+}}, [[V0]] - %arr.load = load <16 x i16>, <16 x i16>* %arr + %arr.load = load <16 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %arr.load) ret i16 %r } declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>) -define i32 @oversized_umin_512(<16 x i32>* nocapture readonly %arr) { +define i32 @oversized_umin_512(ptr nocapture readonly %arr) { ; CHECK-LABEL: oversized_umin_512 ; CHECK: umin v ; CHECK-NEXT: umin v ; CHECK-NEXT: umin [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s ; CHECK-NEXT: uminv {{s[0-9]+}}, [[V0]] - %arr.load = load <16 x i32>, <16 x i32>* %arr + %arr.load = load <16 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %arr.load) ret i32 %r } declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>) -define i16 @oversized_smax_256(<16 x i16>* nocapture readonly %arr) { +define i16 @oversized_smax_256(ptr nocapture readonly %arr) { ; CHECK-LABEL: oversized_smax_256 ; CHECK: smax [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h ; CHECK: smaxv {{h[0-9]+}}, [[V0]] - %arr.load = load <16 x i16>, <16 x i16>* %arr + %arr.load = load <16 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %arr.load) ret i16 %r } declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>) -define i32 @oversized_smax_512(<16 x i32>* nocapture readonly %arr) { +define i32 @oversized_smax_512(ptr nocapture readonly %arr) { ; CHECK-LABEL: oversized_smax_512 ; CHECK: smax v ; CHECK-NEXT: smax v ; CHECK-NEXT: smax [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s ; CHECK-NEXT: smaxv {{s[0-9]+}}, [[V0]] - %arr.load = load <16 x i32>, <16 x i32>* %arr + %arr.load = load <16 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %arr.load) ret i32 %r } declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>) -define i16 @oversized_smin_256(<16 x i16>* nocapture readonly %arr) { +define i16 @oversized_smin_256(ptr nocapture readonly %arr) { ; CHECK-LABEL: oversized_smin_256 ; CHECK: smin [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h ; CHECK: sminv {{h[0-9]+}}, [[V0]] - %arr.load = load <16 x i16>, <16 x i16>* %arr + %arr.load = load <16 x i16>, ptr %arr %r = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %arr.load) ret i16 %r } declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>) -define i32 @oversized_smin_512(<16 x i32>* nocapture readonly %arr) { +define i32 @oversized_smin_512(ptr nocapture readonly %arr) { ; CHECK-LABEL: oversized_smin_512 ; CHECK: smin v ; CHECK-NEXT: smin v ; CHECK-NEXT: smin [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s ; CHECK-NEXT: sminv {{s[0-9]+}}, [[V0]] - %arr.load = load <16 x i32>, <16 x i32>* %arr + %arr.load = load <16 x i32>, ptr %arr %r = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %arr.load) ret i32 %r } diff --git a/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll b/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll index 1b1ae0853d5f35..cea255b2252120 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll @@ -2,9 +2,9 @@ ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -O2 -mattr=+mops | FileCheck %s --check-prefix=CHECK-MOPS -declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) +declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) -declare void @fn(i8*, i8*) +declare void @fn(ptr, ptr) define void @consecutive() { ; CHECK-MOPS-LABEL: consecutive: @@ -42,28 +42,24 @@ define void @consecutive() { entry: %buf_from = alloca [1000 x i8], align 16 %buf_to = alloca [1000 x i8], align 1 - %0 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 0 - %1 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_to, i64 0, i64 0 - call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(1000) %1, i8 0, i64 1000, i1 false) - %2 = bitcast [1000 x i8]* %buf_from to <16 x i8>* - store <16 x i8> , <16 x i8>* %2, align 16 - %arrayidx.16 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 16 - %3 = bitcast i8* %arrayidx.16 to <8 x i8>* - store <8 x i8> , <8 x i8>* %3, align 16 - %arrayidx.24 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 24 - store i8 24, i8* %arrayidx.24, align 8 - %arrayidx.25 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 25 - store i8 25, i8* %arrayidx.25, align 1 - %arrayidx.26 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 26 - store i8 26, i8* %arrayidx.26, align 2 - %arrayidx.27 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 27 - store i8 27, i8* %arrayidx.27, align 1 - %arrayidx.28 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 28 - store i8 28, i8* %arrayidx.28, align 4 - %arrayidx.29 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 29 - store i8 29, i8* %arrayidx.29, align 1 - %arrayidx.30 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 30 - store i8 30, i8* %arrayidx.30, align 2 - call void @fn(i8* nonnull %0, i8* nonnull %1) + call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(1000) %buf_to, i8 0, i64 1000, i1 false) + store <16 x i8> , ptr %buf_from, align 16 + %arrayidx.16 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 16 + store <8 x i8> , ptr %arrayidx.16, align 16 + %arrayidx.24 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 24 + store i8 24, ptr %arrayidx.24, align 8 + %arrayidx.25 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 25 + store i8 25, ptr %arrayidx.25, align 1 + %arrayidx.26 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 26 + store i8 26, ptr %arrayidx.26, align 2 + %arrayidx.27 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 27 + store i8 27, ptr %arrayidx.27, align 1 + %arrayidx.28 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 28 + store i8 28, ptr %arrayidx.28, align 4 + %arrayidx.29 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 29 + store i8 29, ptr %arrayidx.29, align 1 + %arrayidx.30 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 30 + store i8 30, ptr %arrayidx.30, align 2 + call void @fn(ptr nonnull %buf_from, ptr nonnull %buf_to) ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll b/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll index 5b71648ac1a92b..1fe1308d8b3517 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll @@ -4,9 +4,9 @@ ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -global-isel=1 -global-isel-abort=1 -mattr=+mops,+mte | FileCheck %s --check-prefix=GISel ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -O2 -mattr=+mops,+mte | FileCheck %s --check-prefix=SDAG -declare i8* @llvm.aarch64.mops.memset.tag(i8*, i8, i64) +declare ptr @llvm.aarch64.mops.memset.tag(ptr, i8, i64) -define i8* @memset_tagged_0_zeroval(i8* %dst, i64 %size) { +define ptr @memset_tagged_0_zeroval(ptr %dst, i64 %size) { ; GISel-O0-LABEL: memset_tagged_0_zeroval: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: mov x8, xzr @@ -31,11 +31,11 @@ define i8* @memset_tagged_0_zeroval(i8* %dst, i64 %size) { ; SDAG-NEXT: setge [x0]!, x8!, xzr ; SDAG-NEXT: ret entry: - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 0) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 0) + ret ptr %r } -define i8* @memset_tagged_1_zeroval(i8* %dst, i64 %size) { +define ptr @memset_tagged_1_zeroval(ptr %dst, i64 %size) { ; GISel-O0-LABEL: memset_tagged_1_zeroval: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: mov x9, xzr @@ -62,11 +62,11 @@ define i8* @memset_tagged_1_zeroval(i8* %dst, i64 %size) { ; SDAG-NEXT: setge [x0]!, x8!, xzr ; SDAG-NEXT: ret entry: - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 1) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 1) + ret ptr %r } -define i8* @memset_tagged_10_zeroval(i8* %dst, i64 %size) { +define ptr @memset_tagged_10_zeroval(ptr %dst, i64 %size) { ; GISel-O0-LABEL: memset_tagged_10_zeroval: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: mov x9, xzr @@ -93,11 +93,11 @@ define i8* @memset_tagged_10_zeroval(i8* %dst, i64 %size) { ; SDAG-NEXT: setge [x0]!, x8!, xzr ; SDAG-NEXT: ret entry: - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 10) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 10) + ret ptr %r } -define i8* @memset_tagged_10000_zeroval(i8* %dst, i64 %size) { +define ptr @memset_tagged_10000_zeroval(ptr %dst, i64 %size) { ; GISel-O0-LABEL: memset_tagged_10000_zeroval: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: mov x9, xzr @@ -124,11 +124,11 @@ define i8* @memset_tagged_10000_zeroval(i8* %dst, i64 %size) { ; SDAG-NEXT: setge [x0]!, x8!, xzr ; SDAG-NEXT: ret entry: - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 10000) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 10000) + ret ptr %r } -define i8* @memset_tagged_size_zeroval(i8* %dst, i64 %size) { +define ptr @memset_tagged_size_zeroval(ptr %dst, i64 %size) { ; GISel-O0-LABEL: memset_tagged_size_zeroval: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: mov x8, xzr @@ -151,11 +151,11 @@ define i8* @memset_tagged_size_zeroval(i8* %dst, i64 %size) { ; SDAG-NEXT: setge [x0]!, x1!, xzr ; SDAG-NEXT: ret entry: - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 %size) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 %size) + ret ptr %r } -define i8* @memset_tagged_0(i8* %dst, i64 %size, i32 %value) { +define ptr @memset_tagged_0(ptr %dst, i64 %size, i32 %value) { ; GISel-O0-LABEL: memset_tagged_0: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: // implicit-def: $x9 @@ -185,11 +185,11 @@ define i8* @memset_tagged_0(i8* %dst, i64 %size, i32 %value) { ; SDAG-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 0) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 0) + ret ptr %r } -define i8* @memset_tagged_1(i8* %dst, i64 %size, i32 %value) { +define ptr @memset_tagged_1(ptr %dst, i64 %size, i32 %value) { ; GISel-O0-LABEL: memset_tagged_1: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: // implicit-def: $x9 @@ -220,11 +220,11 @@ define i8* @memset_tagged_1(i8* %dst, i64 %size, i32 %value) { ; SDAG-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 1) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 1) + ret ptr %r } -define i8* @memset_tagged_10(i8* %dst, i64 %size, i32 %value) { +define ptr @memset_tagged_10(ptr %dst, i64 %size, i32 %value) { ; GISel-O0-LABEL: memset_tagged_10: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: // implicit-def: $x9 @@ -255,11 +255,11 @@ define i8* @memset_tagged_10(i8* %dst, i64 %size, i32 %value) { ; SDAG-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 10) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 10) + ret ptr %r } -define i8* @memset_tagged_10000(i8* %dst, i64 %size, i32 %value) { +define ptr @memset_tagged_10000(ptr %dst, i64 %size, i32 %value) { ; GISel-O0-LABEL: memset_tagged_10000: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: // implicit-def: $x9 @@ -290,11 +290,11 @@ define i8* @memset_tagged_10000(i8* %dst, i64 %size, i32 %value) { ; SDAG-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 10000) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 10000) + ret ptr %r } -define i8* @memset_tagged_size(i8* %dst, i64 %size, i32 %value) { +define ptr @memset_tagged_size(ptr %dst, i64 %size, i32 %value) { ; GISel-O0-LABEL: memset_tagged_size: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: // implicit-def: $x8 @@ -321,11 +321,11 @@ define i8* @memset_tagged_size(i8* %dst, i64 %size, i32 %value) { ; SDAG-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 %size) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 %size) + ret ptr %r } -define i8* @memset_tagged_size_aligned(i8* %dst, i64 %size, i32 %value) { +define ptr @memset_tagged_size_aligned(ptr %dst, i64 %size, i32 %value) { ; GISel-O0-LABEL: memset_tagged_size_aligned: ; GISel-O0: // %bb.0: // %entry ; GISel-O0-NEXT: // implicit-def: $x8 @@ -352,6 +352,6 @@ define i8* @memset_tagged_size_aligned(i8* %dst, i64 %size, i32 %value) { ; SDAG-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* align 16 %dst, i8 %value_trunc, i64 %size) - ret i8* %r + %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr align 16 %dst, i8 %value_trunc, i64 %size) + ret ptr %r } diff --git a/llvm/test/CodeGen/AArch64/aarch64-mops.ll b/llvm/test/CodeGen/AArch64/aarch64-mops.ll index 5a880d3e59aebb..e342f37ebe4791 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-mops.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-mops.ll @@ -7,15 +7,15 @@ ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -O2 | FileCheck %s --check-prefix=SDAG-WITHOUT-MOPS-O2 ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -O2 -mattr=+mops | FileCheck %s --check-prefix=SDAG-MOPS-O2 -declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) +declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) +declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) -declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) +declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) -declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) +declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) -define void @memset_0_zeroval(i8* %dst) { +define void @memset_0_zeroval(ptr %dst) { ; GISel-WITHOUT-MOPS-LABEL: memset_0_zeroval: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -32,11 +32,11 @@ define void @memset_0_zeroval(i8* %dst) { ; SDAG-MOPS-O2: // %bb.0: // %entry ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 0, i1 false) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 0, i1 false) ret void } -define void @memset_0_zeroval_volatile(i8* %dst) { +define void @memset_0_zeroval_volatile(ptr %dst) { ; GISel-WITHOUT-MOPS-LABEL: memset_0_zeroval_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -53,11 +53,11 @@ define void @memset_0_zeroval_volatile(i8* %dst) { ; SDAG-MOPS-O2: // %bb.0: // %entry ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 0, i1 true) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 0, i1 true) ret void } -define void @memset_10_zeroval(i8* %dst) { +define void @memset_10_zeroval(ptr %dst) { ; GISel-WITHOUT-MOPS-LABEL: memset_10_zeroval: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: str xzr, [x0] @@ -82,11 +82,11 @@ define void @memset_10_zeroval(i8* %dst) { ; SDAG-MOPS-O2-NEXT: str xzr, [x0] ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 10, i1 false) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 10, i1 false) ret void } -define void @memset_10_zeroval_volatile(i8* %dst) { +define void @memset_10_zeroval_volatile(ptr %dst) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10_zeroval_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -140,11 +140,11 @@ define void @memset_10_zeroval_volatile(i8* %dst) { ; SDAG-MOPS-O2-NEXT: str xzr, [x0] ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 10, i1 true) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 10, i1 true) ret void } -define void @memset_10000_zeroval(i8* %dst) { +define void @memset_10000_zeroval(ptr %dst) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10000_zeroval: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -205,11 +205,11 @@ define void @memset_10000_zeroval(i8* %dst) { ; SDAG-MOPS-O2-NEXT: sete [x0]!, x8!, xzr ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 10000, i1 false) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 10000, i1 false) ret void } -define void @memset_10000_zeroval_volatile(i8* %dst) { +define void @memset_10000_zeroval_volatile(ptr %dst) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10000_zeroval_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -270,11 +270,11 @@ define void @memset_10000_zeroval_volatile(i8* %dst) { ; SDAG-MOPS-O2-NEXT: sete [x0]!, x8!, xzr ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 10000, i1 true) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 10000, i1 true) ret void } -define void @memset_size_zeroval(i8* %dst, i64 %size) { +define void @memset_size_zeroval(ptr %dst, i64 %size) { ; GISel-WITHOUT-MOPS-LABEL: memset_size_zeroval: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -319,11 +319,11 @@ define void @memset_size_zeroval(i8* %dst, i64 %size) { ; SDAG-MOPS-O2-NEXT: sete [x0]!, x1!, xzr ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %size, i1 false) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %size, i1 false) ret void } -define void @memset_size_zeroval_volatile(i8* %dst, i64 %size) { +define void @memset_size_zeroval_volatile(ptr %dst, i64 %size) { ; GISel-WITHOUT-MOPS-LABEL: memset_size_zeroval_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -368,12 +368,12 @@ define void @memset_size_zeroval_volatile(i8* %dst, i64 %size) { ; SDAG-MOPS-O2-NEXT: sete [x0]!, x1!, xzr ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %size, i1 true) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %size, i1 true) ret void } -define void @memset_0(i8* %dst, i32 %value) { +define void @memset_0(ptr %dst, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memset_0: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -391,11 +391,11 @@ define void @memset_0(i8* %dst, i32 %value) { ; SDAG-MOPS-O2-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 0, i1 false) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 0, i1 false) ret void } -define void @memset_0_volatile(i8* %dst, i32 %value) { +define void @memset_0_volatile(ptr %dst, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memset_0_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -413,11 +413,11 @@ define void @memset_0_volatile(i8* %dst, i32 %value) { ; SDAG-MOPS-O2-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 0, i1 true) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 0, i1 true) ret void } -define void @memset_10(i8* %dst, i32 %value) { +define void @memset_10(ptr %dst, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: // implicit-def: $x8 @@ -483,11 +483,11 @@ define void @memset_10(i8* %dst, i32 %value) { ; SDAG-MOPS-O2-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 10, i1 false) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 10, i1 false) ret void } -define void @memset_10_volatile(i8* %dst, i32 %value) { +define void @memset_10_volatile(ptr %dst, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -550,11 +550,11 @@ define void @memset_10_volatile(i8* %dst, i32 %value) { ; SDAG-MOPS-O2-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 10, i1 true) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 10, i1 true) ret void } -define void @memset_10000(i8* %dst, i32 %value) { +define void @memset_10000(ptr %dst, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10000: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -616,11 +616,11 @@ define void @memset_10000(i8* %dst, i32 %value) { ; SDAG-MOPS-O2-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 10000, i1 false) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 10000, i1 false) ret void } -define void @memset_10000_volatile(i8* %dst, i32 %value) { +define void @memset_10000_volatile(ptr %dst, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10000_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -682,11 +682,11 @@ define void @memset_10000_volatile(i8* %dst, i32 %value) { ; SDAG-MOPS-O2-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 10000, i1 true) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 10000, i1 true) ret void } -define void @memset_size(i8* %dst, i64 %size, i32 %value) { +define void @memset_size(ptr %dst, i64 %size, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_size: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: sub sp, sp, #32 @@ -751,11 +751,11 @@ define void @memset_size(i8* %dst, i64 %size, i32 %value) { ; SDAG-MOPS-O2-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 %size, i1 false) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 %size, i1 false) ret void } -define void @memset_size_volatile(i8* %dst, i64 %size, i32 %value) { +define void @memset_size_volatile(ptr %dst, i64 %size, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_size_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: sub sp, sp, #32 @@ -820,12 +820,12 @@ define void @memset_size_volatile(i8* %dst, i64 %size, i32 %value) { ; SDAG-MOPS-O2-NEXT: ret entry: %value_trunc = trunc i32 %value to i8 - call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 %size, i1 true) + call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 %size, i1 true) ret void } -define void @memcpy_0(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_0(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_0: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -842,11 +842,11 @@ define void @memcpy_0(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2: // %bb.0: // %entry ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 false) + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 false) ret void } -define void @memcpy_0_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_0_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_0_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -863,11 +863,11 @@ define void @memcpy_0_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2: // %bb.0: // %entry ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 true) + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 true) ret void } -define void @memcpy_10(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_10(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_10: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ldr x8, [x1] @@ -900,11 +900,11 @@ define void @memcpy_10(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: str x9, [x0] ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 false) + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 false) ret void } -define void @memcpy_10_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_10_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_10_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -959,11 +959,11 @@ define void @memcpy_10_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: str x8, [x0] ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 true) + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 true) ret void } -define void @memcpy_1000(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_1000(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_1000: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1020,11 +1020,11 @@ define void @memcpy_1000(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpyfe [x0]!, [x1]!, x8! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 1000, i1 false) + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 1000, i1 false) ret void } -define void @memcpy_1000_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_1000_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_1000_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1081,11 +1081,11 @@ define void @memcpy_1000_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpyfe [x0]!, [x1]!, x8! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 1000, i1 true) + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 1000, i1 true) ret void } -define void @memcpy_n(i8* %dst, i8* %src, i64 %size, i32 %value) { +define void @memcpy_n(ptr %dst, ptr %src, i64 %size, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_n: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1118,11 +1118,11 @@ define void @memcpy_n(i8* %dst, i8* %src, i64 %size, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpyfe [x0]!, [x1]!, x2! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 %size, i1 false) + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 %size, i1 false) ret void } -define void @memcpy_n_volatile(i8* %dst, i8* %src, i64 %size, i32 %value) { +define void @memcpy_n_volatile(ptr %dst, ptr %src, i64 %size, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_n_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1155,12 +1155,12 @@ define void @memcpy_n_volatile(i8* %dst, i8* %src, i64 %size, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpyfe [x0]!, [x1]!, x2! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 %size, i1 true) + call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 %size, i1 true) ret void } -define void @memcpy_inline_0(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_inline_0(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_0: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -1177,11 +1177,11 @@ define void @memcpy_inline_0(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2: // %bb.0: // %entry ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 false) + call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 false) ret void } -define void @memcpy_inline_0_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_inline_0_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_0_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -1198,11 +1198,11 @@ define void @memcpy_inline_0_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2: // %bb.0: // %entry ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 true) + call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 true) ret void } -define void @memcpy_inline_10(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_inline_10(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_10: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ldr x8, [x1] @@ -1235,11 +1235,11 @@ define void @memcpy_inline_10(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: str x9, [x0] ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 false) + call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 false) ret void } -define void @memcpy_inline_10_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_inline_10_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_10_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ldr x8, [x1] @@ -1272,11 +1272,11 @@ define void @memcpy_inline_10_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: str x8, [x0] ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 true) + call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 true) ret void } -define void @memcpy_inline_300(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_inline_300(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_inline_300: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: ldr q0, [x1] @@ -1489,11 +1489,11 @@ define void @memcpy_inline_300(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpyfe [x0]!, [x1]!, x8! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 300, i1 false) + call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 300, i1 false) ret void } -define void @memcpy_inline_300_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memcpy_inline_300_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_300_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ldr q0, [x1] @@ -1634,11 +1634,11 @@ define void @memcpy_inline_300_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpyfe [x0]!, [x1]!, x8! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 300, i1 true) + call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 300, i1 true) ret void } -define void @memmove_0(i8* %dst, i8* %src, i32 %value) { +define void @memmove_0(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memmove_0: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -1655,11 +1655,11 @@ define void @memmove_0(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2: // %bb.0: // %entry ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 false) + call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 false) ret void } -define void @memmove_0_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memmove_0_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memmove_0_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: ret @@ -1676,11 +1676,11 @@ define void @memmove_0_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2: // %bb.0: // %entry ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 true) + call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 true) ret void } -define void @memmove_10(i8* %dst, i8* %src, i32 %value) { +define void @memmove_10(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_10: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: ldr x9, [x1] @@ -1729,11 +1729,11 @@ define void @memmove_10(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: str x9, [x0] ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 false) + call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 false) ret void } -define void @memmove_10_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memmove_10_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_10_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1788,11 +1788,11 @@ define void @memmove_10_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: str x8, [x0] ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 true) + call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 true) ret void } -define void @memmove_1000(i8* %dst, i8* %src, i32 %value) { +define void @memmove_1000(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_1000: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1849,11 +1849,11 @@ define void @memmove_1000(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpye [x0]!, [x1]!, x8! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 1000, i1 false) + call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 1000, i1 false) ret void } -define void @memmove_1000_volatile(i8* %dst, i8* %src, i32 %value) { +define void @memmove_1000_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_1000_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1910,11 +1910,11 @@ define void @memmove_1000_volatile(i8* %dst, i8* %src, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpye [x0]!, [x1]!, x8! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 1000, i1 true) + call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 1000, i1 true) ret void } -define void @memmove_n(i8* %dst, i8* %src, i64 %size, i32 %value) { +define void @memmove_n(ptr %dst, ptr %src, i64 %size, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memmove_n: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1947,11 +1947,11 @@ define void @memmove_n(i8* %dst, i8* %src, i64 %size, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpye [x0]!, [x1]!, x2! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 %size, i1 false) + call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 %size, i1 false) ret void } -define void @memmove_n_volatile(i8* %dst, i8* %src, i64 %size, i32 %value) { +define void @memmove_n_volatile(ptr %dst, ptr %src, i64 %size, i32 %value) { ; GISel-WITHOUT-MOPS-LABEL: memmove_n_volatile: ; GISel-WITHOUT-MOPS: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill @@ -1984,6 +1984,6 @@ define void @memmove_n_volatile(i8* %dst, i8* %src, i64 %size, i32 %value) { ; SDAG-MOPS-O2-NEXT: cpye [x0]!, [x1]!, x2! ; SDAG-MOPS-O2-NEXT: ret entry: - call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 %size, i1 true) + call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 %size, i1 true) ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll b/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll index 6172cb0df142c7..71db09f7706244 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll @@ -77,7 +77,7 @@ entry: ret i64 %mul } -define i64 @smull_ldrsb_b(i8* %x0, i8 %x1) { +define i64 @smull_ldrsb_b(ptr %x0, i8 %x1) { ; CHECK-LABEL: smull_ldrsb_b: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -86,14 +86,14 @@ define i64 @smull_ldrsb_b(i8* %x0, i8 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i8 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsb_b_commuted(i8* %x0, i8 %x1) { +define i64 @smull_ldrsb_b_commuted(ptr %x0, i8 %x1) { ; CHECK-LABEL: smull_ldrsb_b_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -102,14 +102,14 @@ define i64 @smull_ldrsb_b_commuted(i8* %x0, i8 %x1) { ; CHECK-NEXT: smull x0, w9, w8 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i8 %x1 to i64 %mul = mul i64 %sext4, %sext ret i64 %mul } -define i64 @smull_ldrsb_h(i8* %x0, i16 %x1) { +define i64 @smull_ldrsb_h(ptr %x0, i16 %x1) { ; CHECK-LABEL: smull_ldrsb_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -118,28 +118,28 @@ define i64 @smull_ldrsb_h(i8* %x0, i16 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsb_w(i8* %x0, i32 %x1) { +define i64 @smull_ldrsb_w(ptr %x0, i32 %x1) { ; CHECK-LABEL: smull_ldrsb_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] ; CHECK-NEXT: smull x0, w8, w1 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsh_b(i16* %x0, i8 %x1) { +define i64 @smull_ldrsh_b(ptr %x0, i8 %x1) { ; CHECK-LABEL: smull_ldrsh_b: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] @@ -148,14 +148,14 @@ define i64 @smull_ldrsh_b(i16* %x0, i8 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i8 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsh_h(i16* %x0, i16 %x1) { +define i64 @smull_ldrsh_h(ptr %x0, i16 %x1) { ; CHECK-LABEL: smull_ldrsh_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] @@ -164,14 +164,14 @@ define i64 @smull_ldrsh_h(i16* %x0, i16 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsh_h_commuted(i16* %x0, i16 %x1) { +define i64 @smull_ldrsh_h_commuted(ptr %x0, i16 %x1) { ; CHECK-LABEL: smull_ldrsh_h_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] @@ -180,28 +180,28 @@ define i64 @smull_ldrsh_h_commuted(i16* %x0, i16 %x1) { ; CHECK-NEXT: smull x0, w9, w8 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext4, %sext ret i64 %mul } -define i64 @smull_ldrsh_w(i16* %x0, i32 %x1) { +define i64 @smull_ldrsh_w(ptr %x0, i32 %x1) { ; CHECK-LABEL: smull_ldrsh_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] ; CHECK-NEXT: smull x0, w8, w1 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsw_b(i32* %x0, i8 %x1) { +define i64 @smull_ldrsw_b(ptr %x0, i8 %x1) { ; CHECK-LABEL: smull_ldrsw_b: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -210,14 +210,14 @@ define i64 @smull_ldrsw_b(i32* %x0, i8 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext4 = sext i8 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsw_h(i32* %x0, i16 %x1) { +define i64 @smull_ldrsw_h(ptr %x0, i16 %x1) { ; CHECK-LABEL: smull_ldrsw_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -226,35 +226,35 @@ define i64 @smull_ldrsw_h(i32* %x0, i16 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsw_w(i32* %x0, i32 %x1) { +define i64 @smull_ldrsw_w(ptr %x0, i32 %x1) { ; CHECK-LABEL: smull_ldrsw_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] ; CHECK-NEXT: smull x0, w8, w1 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext, %sext4 ret i64 %mul } -define i64 @smull_ldrsw_w_commuted(i32* %x0, i32 %x1) { +define i64 @smull_ldrsw_w_commuted(ptr %x0, i32 %x1) { ; CHECK-LABEL: smull_ldrsw_w_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] ; CHECK-NEXT: smull x0, w8, w1 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext4, %sext @@ -277,7 +277,7 @@ entry: ret i64 %mul } -define i64 @smull_ldrsw_shift(i32* %x0, i64 %x1) { +define i64 @smull_ldrsw_shift(ptr %x0, i64 %x1) { ; CHECK-LABEL: smull_ldrsw_shift: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -285,7 +285,7 @@ define i64 @smull_ldrsw_shift(i32* %x0, i64 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %shl = shl i64 %x1, 32 %shr = ashr exact i64 %shl, 32 @@ -293,7 +293,7 @@ entry: ret i64 %mul } -define i64 @smull_ldrsh_zextw(i16* %x0, i32 %x1) { +define i64 @smull_ldrsh_zextw(ptr %x0, i32 %x1) { ; CHECK-LABEL: smull_ldrsh_zextw: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] @@ -301,14 +301,14 @@ define i64 @smull_ldrsh_zextw(i16* %x0, i32 %x1) { ; CHECK-NEXT: mul x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %zext = zext i32 %x1 to i64 %mul = mul i64 %sext, %zext ret i64 %mul } -define i64 @smull_ldrsw_zexth(i32* %x0, i16 %x1) { +define i64 @smull_ldrsw_zexth(ptr %x0, i16 %x1) { ; CHECK-LABEL: smull_ldrsw_zexth: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -317,14 +317,14 @@ define i64 @smull_ldrsw_zexth(i32* %x0, i16 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %zext = zext i16 %x1 to i64 %mul = mul i64 %sext, %zext ret i64 %mul } -define i64 @smull_ldrsw_zextb(i32* %x0, i8 %x1) { +define i64 @smull_ldrsw_zextb(ptr %x0, i8 %x1) { ; CHECK-LABEL: smull_ldrsw_zextb: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -333,14 +333,14 @@ define i64 @smull_ldrsw_zextb(i32* %x0, i8 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %zext = zext i8 %x1 to i64 %mul = mul i64 %sext, %zext ret i64 %mul } -define i64 @smull_ldrsw_zextb_commuted(i32* %x0, i8 %x1) { +define i64 @smull_ldrsw_zextb_commuted(ptr %x0, i8 %x1) { ; CHECK-LABEL: smull_ldrsw_zextb_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -349,14 +349,14 @@ define i64 @smull_ldrsw_zextb_commuted(i32* %x0, i8 %x1) { ; CHECK-NEXT: smull x0, w9, w8 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %zext = zext i8 %x1 to i64 %mul = mul i64 %zext, %sext ret i64 %mul } -define i64 @smaddl_ldrsb_h(i8* %x0, i16 %x1, i64 %x2) { +define i64 @smaddl_ldrsb_h(ptr %x0, i16 %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsb_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -365,7 +365,7 @@ define i64 @smaddl_ldrsb_h(i8* %x0, i16 %x1, i64 %x2) { ; CHECK-NEXT: smaddl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext, %sext4 @@ -373,7 +373,7 @@ entry: ret i64 %add } -define i64 @smaddl_ldrsb_h_commuted(i8* %x0, i16 %x1, i64 %x2) { +define i64 @smaddl_ldrsb_h_commuted(ptr %x0, i16 %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsb_h_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -382,7 +382,7 @@ define i64 @smaddl_ldrsb_h_commuted(i8* %x0, i16 %x1, i64 %x2) { ; CHECK-NEXT: smaddl x0, w9, w8, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext4, %sext @@ -390,14 +390,14 @@ entry: ret i64 %add } -define i64 @smaddl_ldrsh_w(i16* %x0, i32 %x1, i64 %x2) { +define i64 @smaddl_ldrsh_w(ptr %x0, i32 %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsh_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] ; CHECK-NEXT: smaddl x0, w8, w1, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext, %sext4 @@ -405,14 +405,14 @@ entry: ret i64 %add } -define i64 @smaddl_ldrsh_w_commuted(i16* %x0, i32 %x1, i64 %x2) { +define i64 @smaddl_ldrsh_w_commuted(ptr %x0, i32 %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsh_w_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] ; CHECK-NEXT: smaddl x0, w8, w1, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext4, %sext @@ -420,7 +420,7 @@ entry: ret i64 %add } -define i64 @smaddl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) { +define i64 @smaddl_ldrsw_b(ptr %x0, i8 %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsw_b: ; CHECK: // %bb.0: ; CHECK-NEXT: ldrsw x8, [x0] @@ -428,7 +428,7 @@ define i64 @smaddl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) { ; CHECK-NEXT: sxtb x9, w1 ; CHECK-NEXT: smaddl x0, w8, w9, x2 ; CHECK-NEXT: ret - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext2 = sext i8 %x1 to i64 %mul = mul i64 %sext, %sext2 @@ -436,7 +436,7 @@ define i64 @smaddl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) { ret i64 %add } -define i64 @smaddl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) { +define i64 @smaddl_ldrsw_b_commuted(ptr %x0, i8 %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsw_b_commuted: ; CHECK: // %bb.0: ; CHECK-NEXT: ldrsw x8, [x0] @@ -444,7 +444,7 @@ define i64 @smaddl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) { ; CHECK-NEXT: sxtb x9, w1 ; CHECK-NEXT: smaddl x0, w9, w8, x2 ; CHECK-NEXT: ret - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext2 = sext i8 %x1 to i64 %mul = mul i64 %sext2, %sext @@ -452,7 +452,7 @@ define i64 @smaddl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) { ret i64 %add } -define i64 @smaddl_ldrsw_ldrsw(i32* %x0, i32* %x1, i64 %x2) { +define i64 @smaddl_ldrsw_ldrsw(ptr %x0, ptr %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsw_ldrsw: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -460,8 +460,8 @@ define i64 @smaddl_ldrsw_ldrsw(i32* %x0, i32* %x1, i64 %x2) { ; CHECK-NEXT: smaddl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 - %ext64_2 = load i32, i32* %x1 + %ext64 = load i32, ptr %x0 + %ext64_2 = load i32, ptr %x1 %sext = sext i32 %ext64 to i64 %sext2 = sext i32 %ext64_2 to i64 %mul = mul i64 %sext, %sext2 @@ -486,7 +486,7 @@ entry: ret i64 %add } -define i64 @smaddl_ldrsw_shift(i32* %x0, i64 %x1, i64 %x2) { +define i64 @smaddl_ldrsw_shift(ptr %x0, i64 %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsw_shift: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -494,7 +494,7 @@ define i64 @smaddl_ldrsw_shift(i32* %x0, i64 %x1, i64 %x2) { ; CHECK-NEXT: smaddl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %shl = shl i64 %x1, 32 %shr = ashr exact i64 %shl, 32 @@ -503,7 +503,7 @@ entry: ret i64 %add } -define i64 @smaddl_ldrsw_zextb(i32* %x0, i8 %x1, i64 %x2) { +define i64 @smaddl_ldrsw_zextb(ptr %x0, i8 %x1, i64 %x2) { ; CHECK-LABEL: smaddl_ldrsw_zextb: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -512,7 +512,7 @@ define i64 @smaddl_ldrsw_zextb(i32* %x0, i8 %x1, i64 %x2) { ; CHECK-NEXT: smaddl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %zext = zext i8 %x1 to i64 %mul = mul i64 %sext, %zext @@ -520,7 +520,7 @@ entry: ret i64 %add } -define i64 @smnegl_ldrsb_h(i8* %x0, i16 %x1) { +define i64 @smnegl_ldrsb_h(ptr %x0, i16 %x1) { ; CHECK-LABEL: smnegl_ldrsb_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -529,7 +529,7 @@ define i64 @smnegl_ldrsb_h(i8* %x0, i16 %x1) { ; CHECK-NEXT: smnegl x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext, %sext4 @@ -537,7 +537,7 @@ entry: ret i64 %sub } -define i64 @smnegl_ldrsb_h_commuted(i8* %x0, i16 %x1) { +define i64 @smnegl_ldrsb_h_commuted(ptr %x0, i16 %x1) { ; CHECK-LABEL: smnegl_ldrsb_h_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -546,7 +546,7 @@ define i64 @smnegl_ldrsb_h_commuted(i8* %x0, i16 %x1) { ; CHECK-NEXT: smnegl x0, w9, w8 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext4, %sext @@ -554,14 +554,14 @@ entry: ret i64 %sub } -define i64 @smnegl_ldrsh_w(i16* %x0, i32 %x1) { +define i64 @smnegl_ldrsh_w(ptr %x0, i32 %x1) { ; CHECK-LABEL: smnegl_ldrsh_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] ; CHECK-NEXT: smnegl x0, w8, w1 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext, %sext4 @@ -569,14 +569,14 @@ entry: ret i64 %sub } -define i64 @smnegl_ldrsh_w_commuted(i16* %x0, i32 %x1) { +define i64 @smnegl_ldrsh_w_commuted(ptr %x0, i32 %x1) { ; CHECK-LABEL: smnegl_ldrsh_w_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] ; CHECK-NEXT: smnegl x0, w8, w1 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext4, %sext @@ -584,7 +584,7 @@ entry: ret i64 %sub } -define i64 @smnegl_ldrsw_b(i32* %x0, i8 %x1) { +define i64 @smnegl_ldrsw_b(ptr %x0, i8 %x1) { ; CHECK-LABEL: smnegl_ldrsw_b: ; CHECK: // %bb.0: ; CHECK-NEXT: ldrsw x8, [x0] @@ -592,7 +592,7 @@ define i64 @smnegl_ldrsw_b(i32* %x0, i8 %x1) { ; CHECK-NEXT: sxtb x9, w1 ; CHECK-NEXT: smnegl x0, w8, w9 ; CHECK-NEXT: ret - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext2 = sext i8 %x1 to i64 %mul = mul i64 %sext, %sext2 @@ -600,7 +600,7 @@ define i64 @smnegl_ldrsw_b(i32* %x0, i8 %x1) { ret i64 %sub } -define i64 @smnegl_ldrsw_b_commuted(i32* %x0, i8 %x1) { +define i64 @smnegl_ldrsw_b_commuted(ptr %x0, i8 %x1) { ; CHECK-LABEL: smnegl_ldrsw_b_commuted: ; CHECK: // %bb.0: ; CHECK-NEXT: ldrsw x8, [x0] @@ -608,7 +608,7 @@ define i64 @smnegl_ldrsw_b_commuted(i32* %x0, i8 %x1) { ; CHECK-NEXT: sxtb x9, w1 ; CHECK-NEXT: smnegl x0, w9, w8 ; CHECK-NEXT: ret - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext2 = sext i8 %x1 to i64 %mul = mul i64 %sext2, %sext @@ -616,7 +616,7 @@ define i64 @smnegl_ldrsw_b_commuted(i32* %x0, i8 %x1) { ret i64 %sub } -define i64 @smnegl_ldrsw_ldrsw(i32* %x0, i32* %x1) { +define i64 @smnegl_ldrsw_ldrsw(ptr %x0, ptr %x1) { ; CHECK-LABEL: smnegl_ldrsw_ldrsw: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -624,8 +624,8 @@ define i64 @smnegl_ldrsw_ldrsw(i32* %x0, i32* %x1) { ; CHECK-NEXT: smnegl x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 - %ext64_2 = load i32, i32* %x1 + %ext64 = load i32, ptr %x0 + %ext64_2 = load i32, ptr %x1 %sext = sext i32 %ext64 to i64 %sext2 = sext i32 %ext64_2 to i64 %mul = mul i64 %sext, %sext2 @@ -650,7 +650,7 @@ entry: ret i64 %sub } -define i64 @smnegl_ldrsw_shift(i32* %x0, i64 %x1) { +define i64 @smnegl_ldrsw_shift(ptr %x0, i64 %x1) { ; CHECK-LABEL: smnegl_ldrsw_shift: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -658,7 +658,7 @@ define i64 @smnegl_ldrsw_shift(i32* %x0, i64 %x1) { ; CHECK-NEXT: smnegl x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %shl = shl i64 %x1, 32 %shr = ashr exact i64 %shl, 32 @@ -667,7 +667,7 @@ entry: ret i64 %sub } -define i64 @smnegl_ldrsw_zextb(i32* %x0, i8 %x1) { +define i64 @smnegl_ldrsw_zextb(ptr %x0, i8 %x1) { ; CHECK-LABEL: smnegl_ldrsw_zextb: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -676,7 +676,7 @@ define i64 @smnegl_ldrsw_zextb(i32* %x0, i8 %x1) { ; CHECK-NEXT: smnegl x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %zext = zext i8 %x1 to i64 %mul = mul i64 %sext, %zext @@ -684,7 +684,7 @@ entry: ret i64 %sub } -define i64 @smsubl_ldrsb_h(i8* %x0, i16 %x1, i64 %x2) { +define i64 @smsubl_ldrsb_h(ptr %x0, i16 %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsb_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -693,7 +693,7 @@ define i64 @smsubl_ldrsb_h(i8* %x0, i16 %x1, i64 %x2) { ; CHECK-NEXT: smsubl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext, %sext4 @@ -701,7 +701,7 @@ entry: ret i64 %sub } -define i64 @smsubl_ldrsb_h_commuted(i8* %x0, i16 %x1, i64 %x2) { +define i64 @smsubl_ldrsb_h_commuted(ptr %x0, i16 %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsb_h_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsb x8, [x0] @@ -710,7 +710,7 @@ define i64 @smsubl_ldrsb_h_commuted(i8* %x0, i16 %x1, i64 %x2) { ; CHECK-NEXT: smsubl x0, w9, w8, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %sext = sext i8 %ext64 to i64 %sext4 = sext i16 %x1 to i64 %mul = mul i64 %sext4, %sext @@ -718,14 +718,14 @@ entry: ret i64 %sub } -define i64 @smsubl_ldrsh_w(i16* %x0, i32 %x1, i64 %x2) { +define i64 @smsubl_ldrsh_w(ptr %x0, i32 %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsh_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] ; CHECK-NEXT: smsubl x0, w8, w1, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext, %sext4 @@ -733,14 +733,14 @@ entry: ret i64 %sub } -define i64 @smsubl_ldrsh_w_commuted(i16* %x0, i32 %x1, i64 %x2) { +define i64 @smsubl_ldrsh_w_commuted(ptr %x0, i32 %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsh_w_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsh x8, [x0] ; CHECK-NEXT: smsubl x0, w8, w1, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %sext = sext i16 %ext64 to i64 %sext4 = sext i32 %x1 to i64 %mul = mul i64 %sext4, %sext @@ -748,7 +748,7 @@ entry: ret i64 %sub } -define i64 @smsubl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) { +define i64 @smsubl_ldrsw_b(ptr %x0, i8 %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsw_b: ; CHECK: // %bb.0: ; CHECK-NEXT: ldrsw x8, [x0] @@ -756,7 +756,7 @@ define i64 @smsubl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) { ; CHECK-NEXT: sxtb x9, w1 ; CHECK-NEXT: smsubl x0, w8, w9, x2 ; CHECK-NEXT: ret - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext2 = sext i8 %x1 to i64 %mul = mul i64 %sext, %sext2 @@ -764,7 +764,7 @@ define i64 @smsubl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) { ret i64 %sub } -define i64 @smsubl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) { +define i64 @smsubl_ldrsw_b_commuted(ptr %x0, i8 %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsw_b_commuted: ; CHECK: // %bb.0: ; CHECK-NEXT: ldrsw x8, [x0] @@ -772,7 +772,7 @@ define i64 @smsubl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) { ; CHECK-NEXT: sxtb x9, w1 ; CHECK-NEXT: smsubl x0, w9, w8, x2 ; CHECK-NEXT: ret - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %sext2 = sext i8 %x1 to i64 %mul = mul i64 %sext2, %sext @@ -780,7 +780,7 @@ define i64 @smsubl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) { ret i64 %sub } -define i64 @smsubl_ldrsw_ldrsw(i32* %x0, i32* %x1, i64 %x2) { +define i64 @smsubl_ldrsw_ldrsw(ptr %x0, ptr %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsw_ldrsw: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -788,8 +788,8 @@ define i64 @smsubl_ldrsw_ldrsw(i32* %x0, i32* %x1, i64 %x2) { ; CHECK-NEXT: smsubl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 - %ext64_2 = load i32, i32* %x1 + %ext64 = load i32, ptr %x0 + %ext64_2 = load i32, ptr %x1 %sext = sext i32 %ext64 to i64 %sext2 = sext i32 %ext64_2 to i64 %mul = mul i64 %sext, %sext2 @@ -814,7 +814,7 @@ entry: ret i64 %sub } -define i64 @smsubl_ldrsw_shift(i32* %x0, i64 %x1, i64 %x2) { +define i64 @smsubl_ldrsw_shift(ptr %x0, i64 %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsw_shift: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -822,7 +822,7 @@ define i64 @smsubl_ldrsw_shift(i32* %x0, i64 %x1, i64 %x2) { ; CHECK-NEXT: smsubl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %shl = shl i64 %x1, 32 %shr = ashr exact i64 %shl, 32 @@ -831,7 +831,7 @@ entry: ret i64 %sub } -define i64 @smsubl_ldrsw_zextb(i32* %x0, i8 %x1, i64 %x2) { +define i64 @smsubl_ldrsw_zextb(ptr %x0, i8 %x1, i64 %x2) { ; CHECK-LABEL: smsubl_ldrsw_zextb: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrsw x8, [x0] @@ -840,7 +840,7 @@ define i64 @smsubl_ldrsw_zextb(i32* %x0, i8 %x1, i64 %x2) { ; CHECK-NEXT: smsubl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %sext = sext i32 %ext64 to i64 %zext = zext i8 %x1 to i64 %mul = mul i64 %sext, %zext @@ -905,7 +905,7 @@ entry: ret i64 %tmp3 } -define i64 @umull_ldrb_h(i8* %x0, i16 %x1) { +define i64 @umull_ldrb_h(ptr %x0, i16 %x1) { ; CHECK-LABEL: umull_ldrb_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -914,14 +914,14 @@ define i64 @umull_ldrb_h(i8* %x0, i16 %x1) { ; CHECK-NEXT: smull x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %zext = zext i8 %ext64 to i64 %zext4 = zext i16 %x1 to i64 %mul = mul i64 %zext, %zext4 ret i64 %mul } -define i64 @umull_ldrb_h_commuted(i8* %x0, i16 %x1) { +define i64 @umull_ldrb_h_commuted(ptr %x0, i16 %x1) { ; CHECK-LABEL: umull_ldrb_h_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -930,14 +930,14 @@ define i64 @umull_ldrb_h_commuted(i8* %x0, i16 %x1) { ; CHECK-NEXT: smull x0, w9, w8 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %zext = zext i8 %ext64 to i64 %zext4 = zext i16 %x1 to i64 %mul = mul i64 %zext4, %zext ret i64 %mul } -define i64 @umull_ldrh_w(i16* %x0, i32 %x1) { +define i64 @umull_ldrh_w(ptr %x0, i32 %x1) { ; CHECK-LABEL: umull_ldrh_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrh w8, [x0] @@ -945,14 +945,14 @@ define i64 @umull_ldrh_w(i16* %x0, i32 %x1) { ; CHECK-NEXT: mul x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %zext = zext i16 %ext64 to i64 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %zext, %zext4 ret i64 %mul } -define i64 @umull_ldr_b(i32* %x0, i8 %x1) { +define i64 @umull_ldr_b(ptr %x0, i8 %x1) { ; CHECK-LABEL: umull_ldr_b: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -961,14 +961,14 @@ define i64 @umull_ldr_b(i32* %x0, i8 %x1) { ; CHECK-NEXT: mul x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %zext = zext i32 %ext64 to i64 %zext4 = zext i8 %x1 to i64 %mul = mul i64 %zext, %zext4 ret i64 %mul } -define i64 @umull_ldr2_w(i64* %x0, i32 %x1) { +define i64 @umull_ldr2_w(ptr %x0, i32 %x1) { ; CHECK-LABEL: umull_ldr2_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -976,14 +976,14 @@ define i64 @umull_ldr2_w(i64* %x0, i32 %x1) { ; CHECK-NEXT: mul x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %and, %zext4 ret i64 %mul } -define i64 @umull_ldr2_ldr2(i64* %x0, i64* %x1) { +define i64 @umull_ldr2_ldr2(ptr %x0, ptr %x1) { ; CHECK-LABEL: umull_ldr2_ldr2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -991,15 +991,15 @@ define i64 @umull_ldr2_ldr2(i64* %x0, i64* %x1) { ; CHECK-NEXT: mul x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 - %ext64_2 = load i64, i64* %x1 + %ext64_2 = load i64, ptr %x1 %and2 = and i64 %ext64_2, 4294967295 %mul = mul i64 %and, %and2 ret i64 %mul } -define i64 @umull_ldr2_d(i64* %x0, i64 %x1) { +define i64 @umull_ldr2_d(ptr %x0, i64 %x1) { ; CHECK-LABEL: umull_ldr2_d: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1007,14 +1007,14 @@ define i64 @umull_ldr2_d(i64* %x0, i64 %x1) { ; CHECK-NEXT: mul x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 %and2 = and i64 %x1, 4294967295 %mul = mul i64 %and, %and2 ret i64 %mul } -define i64 @umaddl_ldrb_h(i8* %x0, i16 %x1, i64 %x2) { +define i64 @umaddl_ldrb_h(ptr %x0, i16 %x1, i64 %x2) { ; CHECK-LABEL: umaddl_ldrb_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -1023,7 +1023,7 @@ define i64 @umaddl_ldrb_h(i8* %x0, i16 %x1, i64 %x2) { ; CHECK-NEXT: smaddl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %zext = zext i8 %ext64 to i64 %zext4 = zext i16 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1031,7 +1031,7 @@ entry: ret i64 %add } -define i64 @umaddl_ldrb_h_commuted(i8* %x0, i16 %x1, i64 %x2) { +define i64 @umaddl_ldrb_h_commuted(ptr %x0, i16 %x1, i64 %x2) { ; CHECK-LABEL: umaddl_ldrb_h_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -1040,7 +1040,7 @@ define i64 @umaddl_ldrb_h_commuted(i8* %x0, i16 %x1, i64 %x2) { ; CHECK-NEXT: smaddl x0, w9, w8, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %zext = zext i8 %ext64 to i64 %zext4 = zext i16 %x1 to i64 %mul = mul i64 %zext4, %zext @@ -1048,7 +1048,7 @@ entry: ret i64 %add } -define i64 @umaddl_ldrh_w(i16* %x0, i32 %x1, i64 %x2) { +define i64 @umaddl_ldrh_w(ptr %x0, i32 %x1, i64 %x2) { ; CHECK-LABEL: umaddl_ldrh_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrh w8, [x0] @@ -1056,7 +1056,7 @@ define i64 @umaddl_ldrh_w(i16* %x0, i32 %x1, i64 %x2) { ; CHECK-NEXT: madd x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %zext = zext i16 %ext64 to i64 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1064,7 +1064,7 @@ entry: ret i64 %add } -define i64 @umaddl_ldr_b(i32* %x0, i8 %x1, i64 %x2) { +define i64 @umaddl_ldr_b(ptr %x0, i8 %x1, i64 %x2) { ; CHECK-LABEL: umaddl_ldr_b: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1073,7 +1073,7 @@ define i64 @umaddl_ldr_b(i32* %x0, i8 %x1, i64 %x2) { ; CHECK-NEXT: madd x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %zext = zext i32 %ext64 to i64 %zext4 = zext i8 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1081,7 +1081,7 @@ entry: ret i64 %add } -define i64 @umaddl_ldr2_w(i64* %x0, i32 %x1, i64 %x2) { +define i64 @umaddl_ldr2_w(ptr %x0, i32 %x1, i64 %x2) { ; CHECK-LABEL: umaddl_ldr2_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1089,7 +1089,7 @@ define i64 @umaddl_ldr2_w(i64* %x0, i32 %x1, i64 %x2) { ; CHECK-NEXT: madd x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %and, %zext4 @@ -1097,7 +1097,7 @@ entry: ret i64 %add } -define i64 @umaddl_ldr2_ldr2(i64* %x0, i64* %x1, i64 %x2) { +define i64 @umaddl_ldr2_ldr2(ptr %x0, ptr %x1, i64 %x2) { ; CHECK-LABEL: umaddl_ldr2_ldr2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1105,16 +1105,16 @@ define i64 @umaddl_ldr2_ldr2(i64* %x0, i64* %x1, i64 %x2) { ; CHECK-NEXT: madd x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 - %ext64_2 = load i64, i64* %x1 + %ext64_2 = load i64, ptr %x1 %and2 = and i64 %ext64_2, 4294967295 %mul = mul i64 %and, %and2 %add = add i64 %mul, %x2 ret i64 %add } -define i64 @umaddl_ldr2_d(i64* %x0, i64 %x1, i64 %x2) { +define i64 @umaddl_ldr2_d(ptr %x0, i64 %x1, i64 %x2) { ; CHECK-LABEL: umaddl_ldr2_d: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1122,7 +1122,7 @@ define i64 @umaddl_ldr2_d(i64* %x0, i64 %x1, i64 %x2) { ; CHECK-NEXT: madd x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 %and2 = and i64 %x1, 4294967295 %mul = mul i64 %and, %and2 @@ -1130,7 +1130,7 @@ entry: ret i64 %add } -define i64 @umnegl_ldrb_h(i8* %x0, i16 %x1) { +define i64 @umnegl_ldrb_h(ptr %x0, i16 %x1) { ; CHECK-LABEL: umnegl_ldrb_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -1139,7 +1139,7 @@ define i64 @umnegl_ldrb_h(i8* %x0, i16 %x1) { ; CHECK-NEXT: smnegl x0, w8, w9 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %zext = zext i8 %ext64 to i64 %zext4 = zext i16 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1147,7 +1147,7 @@ entry: ret i64 %sub } -define i64 @umnegl_ldrb_h_commuted(i8* %x0, i16 %x1) { +define i64 @umnegl_ldrb_h_commuted(ptr %x0, i16 %x1) { ; CHECK-LABEL: umnegl_ldrb_h_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -1156,7 +1156,7 @@ define i64 @umnegl_ldrb_h_commuted(i8* %x0, i16 %x1) { ; CHECK-NEXT: smnegl x0, w9, w8 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %zext = zext i8 %ext64 to i64 %zext4 = zext i16 %x1 to i64 %mul = mul i64 %zext4, %zext @@ -1164,7 +1164,7 @@ entry: ret i64 %sub } -define i64 @umnegl_ldrh_w(i16* %x0, i32 %x1) { +define i64 @umnegl_ldrh_w(ptr %x0, i32 %x1) { ; CHECK-LABEL: umnegl_ldrh_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrh w8, [x0] @@ -1172,7 +1172,7 @@ define i64 @umnegl_ldrh_w(i16* %x0, i32 %x1) { ; CHECK-NEXT: mneg x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %zext = zext i16 %ext64 to i64 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1180,7 +1180,7 @@ entry: ret i64 %sub } -define i64 @umnegl_ldr_b(i32* %x0, i8 %x1) { +define i64 @umnegl_ldr_b(ptr %x0, i8 %x1) { ; CHECK-LABEL: umnegl_ldr_b: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1189,7 +1189,7 @@ define i64 @umnegl_ldr_b(i32* %x0, i8 %x1) { ; CHECK-NEXT: mneg x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %zext = zext i32 %ext64 to i64 %zext4 = zext i8 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1197,7 +1197,7 @@ entry: ret i64 %sub } -define i64 @umnegl_ldr2_w(i64* %x0, i32 %x1) { +define i64 @umnegl_ldr2_w(ptr %x0, i32 %x1) { ; CHECK-LABEL: umnegl_ldr2_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1205,7 +1205,7 @@ define i64 @umnegl_ldr2_w(i64* %x0, i32 %x1) { ; CHECK-NEXT: mneg x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %and, %zext4 @@ -1213,7 +1213,7 @@ entry: ret i64 %sub } -define i64 @umnegl_ldr2_ldr2(i64* %x0, i64* %x1) { +define i64 @umnegl_ldr2_ldr2(ptr %x0, ptr %x1) { ; CHECK-LABEL: umnegl_ldr2_ldr2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1221,16 +1221,16 @@ define i64 @umnegl_ldr2_ldr2(i64* %x0, i64* %x1) { ; CHECK-NEXT: mneg x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 - %ext64_2 = load i64, i64* %x1 + %ext64_2 = load i64, ptr %x1 %and2 = and i64 %ext64_2, 4294967295 %mul = mul i64 %and, %and2 %sub = sub i64 0, %mul ret i64 %sub } -define i64 @umnegl_ldr2_d(i64* %x0, i64 %x1) { +define i64 @umnegl_ldr2_d(ptr %x0, i64 %x1) { ; CHECK-LABEL: umnegl_ldr2_d: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1238,7 +1238,7 @@ define i64 @umnegl_ldr2_d(i64* %x0, i64 %x1) { ; CHECK-NEXT: mneg x0, x8, x9 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 %and2 = and i64 %x1, 4294967295 %mul = mul i64 %and, %and2 @@ -1246,7 +1246,7 @@ entry: ret i64 %sub } -define i64 @umsubl_ldrb_h(i8* %x0, i16 %x1, i64 %x2) { +define i64 @umsubl_ldrb_h(ptr %x0, i16 %x1, i64 %x2) { ; CHECK-LABEL: umsubl_ldrb_h: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -1255,7 +1255,7 @@ define i64 @umsubl_ldrb_h(i8* %x0, i16 %x1, i64 %x2) { ; CHECK-NEXT: smsubl x0, w8, w9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %zext = zext i8 %ext64 to i64 %zext4 = zext i16 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1263,7 +1263,7 @@ entry: ret i64 %sub } -define i64 @umsubl_ldrb_h_commuted(i8* %x0, i16 %x1, i64 %x2) { +define i64 @umsubl_ldrb_h_commuted(ptr %x0, i16 %x1, i64 %x2) { ; CHECK-LABEL: umsubl_ldrb_h_commuted: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -1272,7 +1272,7 @@ define i64 @umsubl_ldrb_h_commuted(i8* %x0, i16 %x1, i64 %x2) { ; CHECK-NEXT: smsubl x0, w9, w8, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i8, i8* %x0 + %ext64 = load i8, ptr %x0 %zext = zext i8 %ext64 to i64 %zext4 = zext i16 %x1 to i64 %mul = mul i64 %zext4, %zext @@ -1280,7 +1280,7 @@ entry: ret i64 %sub } -define i64 @umsubl_ldrh_w(i16* %x0, i32 %x1, i64 %x2) { +define i64 @umsubl_ldrh_w(ptr %x0, i32 %x1, i64 %x2) { ; CHECK-LABEL: umsubl_ldrh_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrh w8, [x0] @@ -1288,7 +1288,7 @@ define i64 @umsubl_ldrh_w(i16* %x0, i32 %x1, i64 %x2) { ; CHECK-NEXT: msub x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i16, i16* %x0 + %ext64 = load i16, ptr %x0 %zext = zext i16 %ext64 to i64 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1296,7 +1296,7 @@ entry: ret i64 %sub } -define i64 @umsubl_ldr_b(i32* %x0, i8 %x1, i64 %x2) { +define i64 @umsubl_ldr_b(ptr %x0, i8 %x1, i64 %x2) { ; CHECK-LABEL: umsubl_ldr_b: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1305,7 +1305,7 @@ define i64 @umsubl_ldr_b(i32* %x0, i8 %x1, i64 %x2) { ; CHECK-NEXT: msub x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i32, i32* %x0 + %ext64 = load i32, ptr %x0 %zext = zext i32 %ext64 to i64 %zext4 = zext i8 %x1 to i64 %mul = mul i64 %zext, %zext4 @@ -1313,7 +1313,7 @@ entry: ret i64 %sub } -define i64 @umsubl_ldr2_w(i64* %x0, i32 %x1, i64 %x2) { +define i64 @umsubl_ldr2_w(ptr %x0, i32 %x1, i64 %x2) { ; CHECK-LABEL: umsubl_ldr2_w: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1321,7 +1321,7 @@ define i64 @umsubl_ldr2_w(i64* %x0, i32 %x1, i64 %x2) { ; CHECK-NEXT: msub x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %and, %zext4 @@ -1329,7 +1329,7 @@ entry: ret i64 %sub } -define i64 @umsubl_ldr2_ldr2(i64* %x0, i64* %x1, i64 %x2) { +define i64 @umsubl_ldr2_ldr2(ptr %x0, ptr %x1, i64 %x2) { ; CHECK-LABEL: umsubl_ldr2_ldr2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1337,16 +1337,16 @@ define i64 @umsubl_ldr2_ldr2(i64* %x0, i64* %x1, i64 %x2) { ; CHECK-NEXT: msub x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 - %ext64_2 = load i64, i64* %x1 + %ext64_2 = load i64, ptr %x1 %and2 = and i64 %ext64_2, 4294967295 %mul = mul i64 %and, %and2 %sub = sub i64 %x2, %mul ret i64 %sub } -define i64 @umsubl_ldr2_d(i64* %x0, i64 %x1, i64 %x2) { +define i64 @umsubl_ldr2_d(ptr %x0, i64 %x1, i64 %x2) { ; CHECK-LABEL: umsubl_ldr2_d: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x0] @@ -1354,7 +1354,7 @@ define i64 @umsubl_ldr2_d(i64* %x0, i64 %x1, i64 %x2) { ; CHECK-NEXT: msub x0, x8, x9, x2 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 4294967295 %and2 = and i64 %x1, 4294967295 %mul = mul i64 %and, %and2 @@ -1362,7 +1362,7 @@ entry: ret i64 %sub } -define i64 @umull_ldr2_w_cc1(i64* %x0, i32 %x1) { +define i64 @umull_ldr2_w_cc1(ptr %x0, i32 %x1) { ; CHECK-LABEL: umull_ldr2_w_cc1: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr x9, [x0] @@ -1371,14 +1371,14 @@ define i64 @umull_ldr2_w_cc1(i64* %x0, i32 %x1) { ; CHECK-NEXT: mul x0, x9, x8 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 2147483647 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %and, %zext4 ret i64 %mul } -define i64 @umull_ldr2_w_cc2(i64* %x0, i32 %x1) { +define i64 @umull_ldr2_w_cc2(ptr %x0, i32 %x1) { ; CHECK-LABEL: umull_ldr2_w_cc2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr x9, [x0] @@ -1387,7 +1387,7 @@ define i64 @umull_ldr2_w_cc2(i64* %x0, i32 %x1) { ; CHECK-NEXT: mul x0, x9, x8 ; CHECK-NEXT: ret entry: - %ext64 = load i64, i64* %x0 + %ext64 = load i64, ptr %x0 %and = and i64 %ext64, 8589934591 %zext4 = zext i32 %x1 to i64 %mul = mul i64 %and, %zext4 diff --git a/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll b/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll index e01ef7b9eb3480..7bf444344f2860 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll @@ -4,7 +4,7 @@ target triple = "aarch64-unknown-linux-gnu" -define dso_local void @memset_unroll2(double* nocapture %array, i64 %size) { +define dso_local void @memset_unroll2(ptr nocapture %array, i64 %size) { ; DEFAULT-LABEL: memset_unroll2: ; DEFAULT: // %bb.0: // %entry ; DEFAULT-NEXT: fmov v0.2d, #2.00000000 @@ -52,33 +52,25 @@ entry: vector.body: ; preds = %vector.body, %entry %index = phi i64 [ 0, %entry ], [ %index16, %vector.body ] %niter = phi i64 [ %size, %entry ], [ %niter.nsub.3, %vector.body ] - %array0 = getelementptr inbounds double, double* %array, i64 %index - %array0.cast = bitcast double* %array0 to <2 x double>* - store <2 x double> , <2 x double>* %array0.cast, align 8 - %array2 = getelementptr inbounds double, double* %array0, i64 2 - %array2.cast = bitcast double* %array2 to <2 x double>* - store <2 x double> , <2 x double>* %array2.cast, align 8 + %array0 = getelementptr inbounds double, ptr %array, i64 %index + store <2 x double> , ptr %array0, align 8 + %array2 = getelementptr inbounds double, ptr %array0, i64 2 + store <2 x double> , ptr %array2, align 8 %index4 = or i64 %index, 4 - %array4 = getelementptr inbounds double, double* %array, i64 %index4 - %array4.cast = bitcast double* %array4 to <2 x double>* - store <2 x double> , <2 x double>* %array4.cast, align 8 - %array6 = getelementptr inbounds double, double* %array4, i64 2 - %array6.cast = bitcast double* %array6 to <2 x double>* - store <2 x double> , <2 x double>* %array6.cast, align 8 + %array4 = getelementptr inbounds double, ptr %array, i64 %index4 + store <2 x double> , ptr %array4, align 8 + %array6 = getelementptr inbounds double, ptr %array4, i64 2 + store <2 x double> , ptr %array6, align 8 %index8 = or i64 %index, 8 - %array8 = getelementptr inbounds double, double* %array, i64 %index8 - %array8.cast = bitcast double* %array8 to <2 x double>* - store <2 x double> , <2 x double>* %array8.cast, align 8 - %array10 = getelementptr inbounds double, double* %array8, i64 2 - %array10.cast = bitcast double* %array10 to <2 x double>* - store <2 x double> , <2 x double>* %array10.cast, align 8 + %array8 = getelementptr inbounds double, ptr %array, i64 %index8 + store <2 x double> , ptr %array8, align 8 + %array10 = getelementptr inbounds double, ptr %array8, i64 2 + store <2 x double> , ptr %array10, align 8 %index12 = or i64 %index, 12 - %array12 = getelementptr inbounds double, double* %array, i64 %index12 - %array12.cast = bitcast double* %array12 to <2 x double>* - store <2 x double> , <2 x double>* %array12.cast, align 8 - %array14 = getelementptr inbounds double, double* %array12, i64 2 - %array14.cast = bitcast double* %array14 to <2 x double>* - store <2 x double> , <2 x double>* %array14.cast, align 8 + %array12 = getelementptr inbounds double, ptr %array, i64 %index12 + store <2 x double> , ptr %array12, align 8 + %array14 = getelementptr inbounds double, ptr %array12, i64 2 + store <2 x double> , ptr %array14, align 8 %index16 = add i64 %index, 16 %niter.nsub.3 = add i64 %niter, -4 %niter.ncmp.3 = icmp eq i64 %niter.nsub.3, 0 diff --git a/llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll b/llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll index 12a4939e9e5200..05ece948b48286 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll @@ -7,7 +7,7 @@ ; therefore this instruction can be safely used for any pre Armv8.3-A architectures. ; On Armv8.3-A and onwards XPACI is available so use that instead. -define i8* @ra0() nounwind readnone { +define ptr @ra0() nounwind readnone { entry: ; CHECK-LABEL: ra0: ; CHECK-NEXT: str x30, [sp, #-16]! @@ -20,11 +20,11 @@ entry: ; CHECKV83-NEXT: mov x0, x30 ; CHECKV83-NEXT: ldr x30, [sp], #16 ; CHECKV83-NEXT: ret - %0 = tail call i8* @llvm.returnaddress(i32 0) - ret i8* %0 + %0 = tail call ptr @llvm.returnaddress(i32 0) + ret ptr %0 } -define i8* @ra1() nounwind readnone #0 { +define ptr @ra1() nounwind readnone #0 { entry: ; CHECK-LABEL: ra1: ; CHECK: hint #25 @@ -40,10 +40,10 @@ entry: ; CHECKV83-NEXT: mov x0, x30 ; CHECKV83-NEXT: ldr x30, [sp], #16 ; CHECKV83-NEXT: retaa - %0 = tail call i8* @llvm.returnaddress(i32 0) - ret i8* %0 + %0 = tail call ptr @llvm.returnaddress(i32 0) + ret ptr %0 } attributes #0 = { "sign-return-address"="all" } -declare i8* @llvm.returnaddress(i32) nounwind readnone +declare ptr @llvm.returnaddress(i32) nounwind readnone diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll index 9ebbe18dc1ddb6..50a0f614457360 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll @@ -1,52 +1,52 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s -o -| FileCheck %s -define <8 x i16> @smull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { +define <8 x i16> @smull_v8i8_v8i16(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: smull v0.8h, v0.8b, v1.8b ; CHECK-NEXT: ret - %tmp1 = load <8 x i8>, <8 x i8>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B %tmp3 = sext <8 x i8> %tmp1 to <8 x i16> %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> %tmp5 = mul <8 x i16> %tmp3, %tmp4 ret <8 x i16> %tmp5 } -define <4 x i32> @smull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { +define <4 x i32> @smull_v4i16_v4i32(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h ; CHECK-NEXT: ret - %tmp1 = load <4 x i16>, <4 x i16>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B + %tmp1 = load <4 x i16>, ptr %A + %tmp2 = load <4 x i16>, ptr %B %tmp3 = sext <4 x i16> %tmp1 to <4 x i32> %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> %tmp5 = mul <4 x i32> %tmp3, %tmp4 ret <4 x i32> %tmp5 } -define <2 x i64> @smull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { +define <2 x i64> @smull_v2i32_v2i64(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s ; CHECK-NEXT: ret - %tmp1 = load <2 x i32>, <2 x i32>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B + %tmp1 = load <2 x i32>, ptr %A + %tmp2 = load <2 x i32>, ptr %B %tmp3 = sext <2 x i32> %tmp1 to <2 x i64> %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> %tmp5 = mul <2 x i64> %tmp3, %tmp4 ret <2 x i64> %tmp5 } -define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind { +define <8 x i32> @smull_zext_v8i8_v8i32(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_zext_v8i8_v8i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] @@ -55,15 +55,15 @@ define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind { ; CHECK-NEXT: smull2 v1.4s, v0.8h, v2.8h ; CHECK-NEXT: smull v0.4s, v0.4h, v2.4h ; CHECK-NEXT: ret - %load.A = load <8 x i8>, <8 x i8>* %A - %load.B = load <8 x i16>, <8 x i16>* %B + %load.A = load <8 x i8>, ptr %A + %load.B = load <8 x i16>, ptr %B %zext.A = zext <8 x i8> %load.A to <8 x i32> %sext.B = sext <8 x i16> %load.B to <8 x i32> %res = mul <8 x i32> %zext.A, %sext.B ret <8 x i32> %res } -define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(<8 x i16>* %A, <8 x i8>* %B) nounwind { +define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_zext_v8i8_v8i32_sext_first_operand: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x1] @@ -72,15 +72,15 @@ define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(<8 x i16>* %A, <8 x i ; CHECK-NEXT: smull2 v1.4s, v2.8h, v0.8h ; CHECK-NEXT: smull v0.4s, v2.4h, v0.4h ; CHECK-NEXT: ret - %load.A = load <8 x i16>, <8 x i16>* %A - %load.B = load <8 x i8>, <8 x i8>* %B + %load.A = load <8 x i16>, ptr %A + %load.B = load <8 x i8>, ptr %B %sext.A = sext <8 x i16> %load.A to <8 x i32> %zext.B = zext <8 x i8> %load.B to <8 x i32> %res = mul <8 x i32> %sext.A, %zext.B ret <8 x i32> %res } -define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(<8 x i16>* %A, <8 x i16>* %B) nounwind { +define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_zext_v8i8_v8i32_top_bit_is_1: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] @@ -93,16 +93,16 @@ define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(<8 x i16>* %A, <8 x i16>* % ; CHECK-NEXT: mul v1.4s, v3.4s, v1.4s ; CHECK-NEXT: mul v0.4s, v0.4s, v2.4s ; CHECK-NEXT: ret - %load.A = load <8 x i16>, <8 x i16>* %A + %load.A = load <8 x i16>, ptr %A %or.A = or <8 x i16> %load.A, - %load.B = load <8 x i16>, <8 x i16>* %B + %load.B = load <8 x i16>, ptr %B %zext.A = zext <8 x i16> %or.A to <8 x i32> %sext.B = sext <8 x i16> %load.B to <8 x i32> %res = mul <8 x i32> %zext.A, %sext.B ret <8 x i32> %res } -define <4 x i32> @smull_zext_v4i16_v4i32(<4 x i8>* %A, <4 x i16>* %B) nounwind { +define <4 x i32> @smull_zext_v4i16_v4i32(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_zext_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr s0, [x0] @@ -110,15 +110,15 @@ define <4 x i32> @smull_zext_v4i16_v4i32(<4 x i8>* %A, <4 x i16>* %B) nounwind { ; CHECK-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h ; CHECK-NEXT: ret - %load.A = load <4 x i8>, <4 x i8>* %A - %load.B = load <4 x i16>, <4 x i16>* %B + %load.A = load <4 x i8>, ptr %A + %load.B = load <4 x i16>, ptr %B %zext.A = zext <4 x i8> %load.A to <4 x i32> %sext.B = sext <4 x i16> %load.B to <4 x i32> %res = mul <4 x i32> %zext.A, %sext.B ret <4 x i32> %res } -define <2 x i64> @smull_zext_v2i32_v2i64(<2 x i16>* %A, <2 x i32>* %B) nounwind { +define <2 x i64> @smull_zext_v2i32_v2i64(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_zext_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x1] @@ -132,15 +132,15 @@ define <2 x i64> @smull_zext_v2i32_v2i64(<2 x i16>* %A, <2 x i32>* %B) nounwind ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: mov v0.d[1], x9 ; CHECK-NEXT: ret - %load.A = load <2 x i16>, <2 x i16>* %A - %load.B = load <2 x i32>, <2 x i32>* %B + %load.A = load <2 x i16>, ptr %A + %load.B = load <2 x i32>, ptr %B %zext.A = zext <2 x i16> %load.A to <2 x i64> %sext.B = sext <2 x i32> %load.B to <2 x i64> %res = mul <2 x i64> %zext.A, %sext.B ret <2 x i64> %res } -define <2 x i64> @smull_zext_and_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { +define <2 x i64> @smull_zext_and_v2i32_v2i64(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull_zext_and_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] @@ -148,61 +148,61 @@ define <2 x i64> @smull_zext_and_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounw ; CHECK-NEXT: bic v0.2s, #128, lsl #24 ; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s ; CHECK-NEXT: ret - %load.A = load <2 x i32>, <2 x i32>* %A + %load.A = load <2 x i32>, ptr %A %and.A = and <2 x i32> %load.A, - %load.B = load <2 x i32>, <2 x i32>* %B + %load.B = load <2 x i32>, ptr %B %zext.A = zext <2 x i32> %and.A to <2 x i64> %sext.B = sext <2 x i32> %load.B to <2 x i64> %res = mul <2 x i64> %zext.A, %sext.B ret <2 x i64> %res } -define <8 x i16> @umull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { +define <8 x i16> @umull_v8i8_v8i16(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: umull_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: umull v0.8h, v0.8b, v1.8b ; CHECK-NEXT: ret - %tmp1 = load <8 x i8>, <8 x i8>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B %tmp3 = zext <8 x i8> %tmp1 to <8 x i16> %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = mul <8 x i16> %tmp3, %tmp4 ret <8 x i16> %tmp5 } -define <4 x i32> @umull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { +define <4 x i32> @umull_v4i16_v4i32(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: umull_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: umull v0.4s, v0.4h, v1.4h ; CHECK-NEXT: ret - %tmp1 = load <4 x i16>, <4 x i16>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B + %tmp1 = load <4 x i16>, ptr %A + %tmp2 = load <4 x i16>, ptr %B %tmp3 = zext <4 x i16> %tmp1 to <4 x i32> %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = mul <4 x i32> %tmp3, %tmp4 ret <4 x i32> %tmp5 } -define <2 x i64> @umull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { +define <2 x i64> @umull_v2i32_v2i64(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: umull_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: umull v0.2d, v0.2s, v1.2s ; CHECK-NEXT: ret - %tmp1 = load <2 x i32>, <2 x i32>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B + %tmp1 = load <2 x i32>, ptr %A + %tmp2 = load <2 x i32>, ptr %B %tmp3 = zext <2 x i32> %tmp1 to <2 x i64> %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = mul <2 x i64> %tmp3, %tmp4 ret <2 x i64> %tmp5 } -define <8 x i16> @amull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { +define <8 x i16> @amull_v8i8_v8i16(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: amull_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] @@ -210,8 +210,8 @@ define <8 x i16> @amull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-NEXT: smull v0.8h, v0.8b, v1.8b ; CHECK-NEXT: bic v0.8h, #255, lsl #8 ; CHECK-NEXT: ret - %tmp1 = load <8 x i8>, <8 x i8>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B %tmp3 = zext <8 x i8> %tmp1 to <8 x i16> %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = mul <8 x i16> %tmp3, %tmp4 @@ -219,7 +219,7 @@ define <8 x i16> @amull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { ret <8 x i16> %and } -define <4 x i32> @amull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { +define <4 x i32> @amull_v4i16_v4i32(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: amull_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x0] @@ -228,8 +228,8 @@ define <4 x i32> @amull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-NEXT: smull v1.4s, v1.4h, v2.4h ; CHECK-NEXT: and v0.16b, v1.16b, v0.16b ; CHECK-NEXT: ret - %tmp1 = load <4 x i16>, <4 x i16>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B + %tmp1 = load <4 x i16>, ptr %A + %tmp2 = load <4 x i16>, ptr %B %tmp3 = zext <4 x i16> %tmp1 to <4 x i32> %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = mul <4 x i32> %tmp3, %tmp4 @@ -237,7 +237,7 @@ define <4 x i32> @amull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { ret <4 x i32> %and } -define <2 x i64> @amull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { +define <2 x i64> @amull_v2i32_v2i64(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: amull_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x0] @@ -246,8 +246,8 @@ define <2 x i64> @amull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-NEXT: smull v1.2d, v1.2s, v2.2s ; CHECK-NEXT: and v0.16b, v1.16b, v0.16b ; CHECK-NEXT: ret - %tmp1 = load <2 x i32>, <2 x i32>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B + %tmp1 = load <2 x i32>, ptr %A + %tmp2 = load <2 x i32>, ptr %B %tmp3 = zext <2 x i32> %tmp1 to <2 x i64> %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = mul <2 x i64> %tmp3, %tmp4 @@ -255,7 +255,7 @@ define <2 x i64> @amull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { ret <2 x i64> %and } -define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { +define <8 x i16> @smlal_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: smlal_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -263,9 +263,9 @@ define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: smlal v0.8h, v1.8b, v2.8b ; CHECK-NEXT: ret - %tmp1 = load <8 x i16>, <8 x i16>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B - %tmp3 = load <8 x i8>, <8 x i8>* %C + %tmp1 = load <8 x i16>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = load <8 x i8>, ptr %C %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> %tmp5 = sext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -273,7 +273,7 @@ define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ret <8 x i16> %tmp7 } -define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { +define <4 x i32> @smlal_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: smlal_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -281,9 +281,9 @@ define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: smlal v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret - %tmp1 = load <4 x i32>, <4 x i32>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B - %tmp3 = load <4 x i16>, <4 x i16>* %C + %tmp1 = load <4 x i32>, ptr %A + %tmp2 = load <4 x i16>, ptr %B + %tmp3 = load <4 x i16>, ptr %C %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> %tmp5 = sext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -291,7 +291,7 @@ define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ret <4 x i32> %tmp7 } -define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { +define <2 x i64> @smlal_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: smlal_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -299,9 +299,9 @@ define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: smlal v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret - %tmp1 = load <2 x i64>, <2 x i64>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B - %tmp3 = load <2 x i32>, <2 x i32>* %C + %tmp1 = load <2 x i64>, ptr %A + %tmp2 = load <2 x i32>, ptr %B + %tmp3 = load <2 x i32>, ptr %C %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> %tmp5 = sext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -309,7 +309,7 @@ define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ret <2 x i64> %tmp7 } -define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { +define <8 x i16> @umlal_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: umlal_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -317,9 +317,9 @@ define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: umlal v0.8h, v1.8b, v2.8b ; CHECK-NEXT: ret - %tmp1 = load <8 x i16>, <8 x i16>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B - %tmp3 = load <8 x i8>, <8 x i8>* %C + %tmp1 = load <8 x i16>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = load <8 x i8>, ptr %C %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = zext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -327,7 +327,7 @@ define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ret <8 x i16> %tmp7 } -define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { +define <4 x i32> @umlal_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: umlal_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -335,9 +335,9 @@ define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: umlal v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret - %tmp1 = load <4 x i32>, <4 x i32>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B - %tmp3 = load <4 x i16>, <4 x i16>* %C + %tmp1 = load <4 x i32>, ptr %A + %tmp2 = load <4 x i16>, ptr %B + %tmp3 = load <4 x i16>, ptr %C %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = zext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -345,7 +345,7 @@ define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ret <4 x i32> %tmp7 } -define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { +define <2 x i64> @umlal_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: umlal_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -353,9 +353,9 @@ define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: umlal v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret - %tmp1 = load <2 x i64>, <2 x i64>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B - %tmp3 = load <2 x i32>, <2 x i32>* %C + %tmp1 = load <2 x i64>, ptr %A + %tmp2 = load <2 x i32>, ptr %B + %tmp3 = load <2 x i32>, ptr %C %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = zext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -363,7 +363,7 @@ define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ret <2 x i64> %tmp7 } -define <8 x i16> @amlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { +define <8 x i16> @amlal_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: amlal_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -372,9 +372,9 @@ define <8 x i16> @amlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ; CHECK-NEXT: smlal v0.8h, v1.8b, v2.8b ; CHECK-NEXT: bic v0.8h, #255, lsl #8 ; CHECK-NEXT: ret - %tmp1 = load <8 x i16>, <8 x i16>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B - %tmp3 = load <8 x i8>, <8 x i8>* %C + %tmp1 = load <8 x i16>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = load <8 x i8>, ptr %C %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = zext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -383,7 +383,7 @@ define <8 x i16> @amlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ret <8 x i16> %and } -define <4 x i32> @amlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { +define <4 x i32> @amlal_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: amlal_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -393,9 +393,9 @@ define <4 x i32> @amlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ; CHECK-NEXT: smlal v2.4s, v1.4h, v3.4h ; CHECK-NEXT: and v0.16b, v2.16b, v0.16b ; CHECK-NEXT: ret - %tmp1 = load <4 x i32>, <4 x i32>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B - %tmp3 = load <4 x i16>, <4 x i16>* %C + %tmp1 = load <4 x i32>, ptr %A + %tmp2 = load <4 x i16>, ptr %B + %tmp3 = load <4 x i16>, ptr %C %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = zext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -404,7 +404,7 @@ define <4 x i32> @amlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ret <4 x i32> %and } -define <2 x i64> @amlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { +define <2 x i64> @amlal_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: amlal_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -414,9 +414,9 @@ define <2 x i64> @amlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ; CHECK-NEXT: smlal v2.2d, v1.2s, v3.2s ; CHECK-NEXT: and v0.16b, v2.16b, v0.16b ; CHECK-NEXT: ret - %tmp1 = load <2 x i64>, <2 x i64>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B - %tmp3 = load <2 x i32>, <2 x i32>* %C + %tmp1 = load <2 x i64>, ptr %A + %tmp2 = load <2 x i32>, ptr %B + %tmp3 = load <2 x i32>, ptr %C %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = zext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -425,7 +425,7 @@ define <2 x i64> @amlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ret <2 x i64> %and } -define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { +define <8 x i16> @smlsl_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: smlsl_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -433,9 +433,9 @@ define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: smlsl v0.8h, v1.8b, v2.8b ; CHECK-NEXT: ret - %tmp1 = load <8 x i16>, <8 x i16>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B - %tmp3 = load <8 x i8>, <8 x i8>* %C + %tmp1 = load <8 x i16>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = load <8 x i8>, ptr %C %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> %tmp5 = sext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -443,7 +443,7 @@ define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ret <8 x i16> %tmp7 } -define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { +define <4 x i32> @smlsl_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: smlsl_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -451,9 +451,9 @@ define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: smlsl v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret - %tmp1 = load <4 x i32>, <4 x i32>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B - %tmp3 = load <4 x i16>, <4 x i16>* %C + %tmp1 = load <4 x i32>, ptr %A + %tmp2 = load <4 x i16>, ptr %B + %tmp3 = load <4 x i16>, ptr %C %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> %tmp5 = sext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -461,7 +461,7 @@ define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ret <4 x i32> %tmp7 } -define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { +define <2 x i64> @smlsl_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: smlsl_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -469,9 +469,9 @@ define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: smlsl v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret - %tmp1 = load <2 x i64>, <2 x i64>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B - %tmp3 = load <2 x i32>, <2 x i32>* %C + %tmp1 = load <2 x i64>, ptr %A + %tmp2 = load <2 x i32>, ptr %B + %tmp3 = load <2 x i32>, ptr %C %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> %tmp5 = sext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -479,7 +479,7 @@ define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ret <2 x i64> %tmp7 } -define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { +define <8 x i16> @umlsl_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: umlsl_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -487,9 +487,9 @@ define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: umlsl v0.8h, v1.8b, v2.8b ; CHECK-NEXT: ret - %tmp1 = load <8 x i16>, <8 x i16>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B - %tmp3 = load <8 x i8>, <8 x i8>* %C + %tmp1 = load <8 x i16>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = load <8 x i8>, ptr %C %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = zext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -497,7 +497,7 @@ define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ret <8 x i16> %tmp7 } -define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { +define <4 x i32> @umlsl_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: umlsl_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -505,9 +505,9 @@ define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: umlsl v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret - %tmp1 = load <4 x i32>, <4 x i32>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B - %tmp3 = load <4 x i16>, <4 x i16>* %C + %tmp1 = load <4 x i32>, ptr %A + %tmp2 = load <4 x i16>, ptr %B + %tmp3 = load <4 x i16>, ptr %C %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = zext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -515,7 +515,7 @@ define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ret <4 x i32> %tmp7 } -define <2 x i64> @umlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { +define <2 x i64> @umlsl_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: umlsl_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -523,9 +523,9 @@ define <2 x i64> @umlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ; CHECK-NEXT: ldr d2, [x2] ; CHECK-NEXT: umlsl v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret - %tmp1 = load <2 x i64>, <2 x i64>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B - %tmp3 = load <2 x i32>, <2 x i32>* %C + %tmp1 = load <2 x i64>, ptr %A + %tmp2 = load <2 x i32>, ptr %B + %tmp3 = load <2 x i32>, ptr %C %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = zext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -533,7 +533,7 @@ define <2 x i64> @umlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ret <2 x i64> %tmp7 } -define <8 x i16> @amlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { +define <8 x i16> @amlsl_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: amlsl_v8i8_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -542,9 +542,9 @@ define <8 x i16> @amlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ; CHECK-NEXT: smlsl v0.8h, v1.8b, v2.8b ; CHECK-NEXT: bic v0.8h, #255, lsl #8 ; CHECK-NEXT: ret - %tmp1 = load <8 x i16>, <8 x i16>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B - %tmp3 = load <8 x i8>, <8 x i8>* %C + %tmp1 = load <8 x i16>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = load <8 x i8>, ptr %C %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = zext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -553,7 +553,7 @@ define <8 x i16> @amlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no ret <8 x i16> %and } -define <4 x i32> @amlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { +define <4 x i32> @amlsl_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: amlsl_v4i16_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -563,9 +563,9 @@ define <4 x i32> @amlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ; CHECK-NEXT: smlsl v2.4s, v1.4h, v3.4h ; CHECK-NEXT: and v0.16b, v2.16b, v0.16b ; CHECK-NEXT: ret - %tmp1 = load <4 x i32>, <4 x i32>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B - %tmp3 = load <4 x i16>, <4 x i16>* %C + %tmp1 = load <4 x i32>, ptr %A + %tmp2 = load <4 x i16>, ptr %B + %tmp3 = load <4 x i16>, ptr %C %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = zext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -574,7 +574,7 @@ define <4 x i32> @amlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) ret <4 x i32> %and } -define <2 x i64> @amlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { +define <2 x i64> @amlsl_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-LABEL: amlsl_v2i32_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d1, [x1] @@ -584,9 +584,9 @@ define <2 x i64> @amlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) ; CHECK-NEXT: smlsl v2.2d, v1.2s, v3.2s ; CHECK-NEXT: and v0.16b, v2.16b, v0.16b ; CHECK-NEXT: ret - %tmp1 = load <2 x i64>, <2 x i64>* %A - %tmp2 = load <2 x i32>, <2 x i32>* %B - %tmp3 = load <2 x i32>, <2 x i32>* %C + %tmp1 = load <2 x i64>, ptr %A + %tmp2 = load <2 x i32>, ptr %B + %tmp3 = load <2 x i32>, ptr %C %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = zext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -773,7 +773,7 @@ ret <8 x i16> %3 } -define void @distribute(<8 x i16>* %dst, <16 x i8>* %src, i32 %mul) nounwind { +define void @distribute(ptr %dst, ptr %src, i32 %mul) nounwind { ; CHECK-LABEL: distribute: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr q0, [x1] @@ -787,7 +787,7 @@ entry: %0 = trunc i32 %mul to i8 %1 = insertelement <8 x i8> undef, i8 %0, i32 0 %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer - %3 = load <16 x i8>, <16 x i8>* %src, align 1 + %3 = load <16 x i8>, ptr %src, align 1 %4 = bitcast <16 x i8> %3 to <2 x double> %5 = extractelement <2 x double> %4, i32 1 %6 = bitcast double %5 to <8 x i8> @@ -798,7 +798,7 @@ entry: %11 = zext <8 x i8> %10 to <8 x i16> %12 = add <8 x i16> %7, %11 %13 = mul <8 x i16> %12, %8 - store <8 x i16> %13, <8 x i16>* %dst, align 2 + store <8 x i16> %13, ptr %dst, align 2 ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll index 24b528fe7df01c..cf9ed4d5f0e16a 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll @@ -116,7 +116,7 @@ entry: ; The split bitmask immediates should be hoisted outside loop because they are ; loop invariant. -define void @test8(i64 %a, i64* noalias %src, i64* noalias %dst, i64 %n) { +define void @test8(i64 %a, ptr noalias %src, ptr noalias %dst, i64 %n) { ; CHECK-LABEL: test8: ; CHECK: // %bb.0: // %loop.ph ; CHECK-NEXT: and x9, x0, #0x3ffc00 @@ -150,10 +150,10 @@ loop: br i1 %cmp, label %if.then, label %if.else if.then: - %src.arrayidx = getelementptr inbounds i64, i64* %src, i64 %iv - %val = load i64, i64* %src.arrayidx - %dst.arrayidx = getelementptr inbounds i64, i64* %dst, i64 %iv - store i64 %val, i64* %dst.arrayidx + %src.arrayidx = getelementptr inbounds i64, ptr %src, i64 %iv + %val = load i64, ptr %src.arrayidx + %dst.arrayidx = getelementptr inbounds i64, ptr %dst, i64 %iv + store i64 %val, ptr %dst.arrayidx br label %for.inc if.else: @@ -169,7 +169,7 @@ exit: } ; This constant should not be split because the `and` is not loop invariant. -define i32 @test9(i32* nocapture %x, i32* nocapture readonly %y, i32 %n) { +define i32 @test9(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) { ; CHECK-LABEL: test9: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cmp w2, #1 @@ -201,11 +201,11 @@ for.cond.cleanup: ; preds = %for.body, %entry for.body: ; preds = %for.body.preheader, %for.body %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] - %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvars.iv - %0 = load i32, i32* %arrayidx, align 4 + %arrayidx = getelementptr inbounds i32, ptr %y, i64 %indvars.iv + %0 = load i32, ptr %arrayidx, align 4 %and = and i32 %0, 2098176 - %arrayidx2 = getelementptr inbounds i32, i32* %x, i64 %indvars.iv - store i32 %and, i32* %arrayidx2, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %x, i64 %indvars.iv + store i32 %and, ptr %arrayidx2, align 4 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count br i1 %exitcond.not, label %for.cond.cleanup, label %for.body @@ -222,7 +222,7 @@ for.body: ; preds = %for.body.preheader, ; ; In this case, the constant should not be split because it causes more ; instructions. -define void @test10(i32* nocapture %x, i32* nocapture readonly %y, i32* nocapture %z) { +define void @test10(ptr nocapture %x, ptr nocapture readonly %y, ptr nocapture %z) { ; CHECK-LABEL: test10: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr w8, [x1] @@ -235,12 +235,12 @@ define void @test10(i32* nocapture %x, i32* nocapture readonly %y, i32* nocaptur ; CHECK-NEXT: str w8, [x2] ; CHECK-NEXT: ret entry: - %0 = load i32, i32* %y, align 4 + %0 = load i32, ptr %y, align 4 %and = and i32 %0, 2098176 - store i32 %and, i32* %x, align 4 - %1 = load i32, i32* %y, align 4 + store i32 %and, ptr %x, align 4 + %1 = load i32, ptr %y, align 4 %or = or i32 %1, 2098176 - store i32 %or, i32* %z, align 4 + store i32 %or, ptr %z, align 4 ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll index 1c093989c3951f..d97d9d1e0e091e 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll @@ -10,16 +10,16 @@ ; CHECK:SU(3): STRXui %1:gpr64, %0:gpr64common, 2 ; CHECK:SU(2): STRXui %1:gpr64, %0:gpr64common, 3 ; CHECK:SU(5): STRXui %1:gpr64, %0:gpr64common, 4 -define i64 @stp_i64_scale(i64* nocapture %P, i64 %v) { +define i64 @stp_i64_scale(ptr nocapture %P, i64 %v) { entry: - %arrayidx = getelementptr inbounds i64, i64* %P, i64 3 - store i64 %v, i64* %arrayidx - %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 2 - store i64 %v, i64* %arrayidx1 - %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 1 - store i64 %v, i64* %arrayidx2 - %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 4 - store i64 %v, i64* %arrayidx3 + %arrayidx = getelementptr inbounds i64, ptr %P, i64 3 + store i64 %v, ptr %arrayidx + %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 2 + store i64 %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 1 + store i64 %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 4 + store i64 %v, ptr %arrayidx3 ret i64 %v } @@ -31,16 +31,16 @@ entry: ; CHECK:SU(3): STRWui %1:gpr32, %0:gpr64common, 2 ; CHECK:SU(2): STRWui %1:gpr32, %0:gpr64common, 3 ; CHECK:SU(5): STRWui %1:gpr32, %0:gpr64common, 4 -define i32 @stp_i32_scale(i32* nocapture %P, i32 %v) { +define i32 @stp_i32_scale(ptr nocapture %P, i32 %v) { entry: - %arrayidx = getelementptr inbounds i32, i32* %P, i32 3 - store i32 %v, i32* %arrayidx - %arrayidx1 = getelementptr inbounds i32, i32* %P, i32 2 - store i32 %v, i32* %arrayidx1 - %arrayidx2 = getelementptr inbounds i32, i32* %P, i32 1 - store i32 %v, i32* %arrayidx2 - %arrayidx3 = getelementptr inbounds i32, i32* %P, i32 4 - store i32 %v, i32* %arrayidx3 + %arrayidx = getelementptr inbounds i32, ptr %P, i32 3 + store i32 %v, ptr %arrayidx + %arrayidx1 = getelementptr inbounds i32, ptr %P, i32 2 + store i32 %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds i32, ptr %P, i32 1 + store i32 %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds i32, ptr %P, i32 4 + store i32 %v, ptr %arrayidx3 ret i32 %v } @@ -52,16 +52,16 @@ entry: ; CHECK:SU(3): STURXi %1:gpr64, %0:gpr64common, -8 ; CHECK:SU(4): STURXi %1:gpr64, %0:gpr64common, -16 ; CHECK:SU(5): STURXi %1:gpr64, %0:gpr64common, -32 -define void @stp_i64_unscale(i64* nocapture %P, i64 %v) #0 { +define void @stp_i64_unscale(ptr nocapture %P, i64 %v) #0 { entry: - %arrayidx = getelementptr inbounds i64, i64* %P, i64 -3 - store i64 %v, i64* %arrayidx - %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 -1 - store i64 %v, i64* %arrayidx1 - %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 -2 - store i64 %v, i64* %arrayidx2 - %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 -4 - store i64 %v, i64* %arrayidx3 + %arrayidx = getelementptr inbounds i64, ptr %P, i64 -3 + store i64 %v, ptr %arrayidx + %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 -1 + store i64 %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 -2 + store i64 %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 -4 + store i64 %v, ptr %arrayidx3 ret void } @@ -73,16 +73,16 @@ entry: ; CHECK:SU(3): STURWi %1:gpr32, %0:gpr64common, -4 ; CHECK:SU(4): STURWi %1:gpr32, %0:gpr64common, -8 ; CHECK:SU(5): STURWi %1:gpr32, %0:gpr64common, -16 -define void @stp_i32_unscale(i32* nocapture %P, i32 %v) #0 { +define void @stp_i32_unscale(ptr nocapture %P, i32 %v) #0 { entry: - %arrayidx = getelementptr inbounds i32, i32* %P, i32 -3 - store i32 %v, i32* %arrayidx - %arrayidx1 = getelementptr inbounds i32, i32* %P, i32 -1 - store i32 %v, i32* %arrayidx1 - %arrayidx2 = getelementptr inbounds i32, i32* %P, i32 -2 - store i32 %v, i32* %arrayidx2 - %arrayidx3 = getelementptr inbounds i32, i32* %P, i32 -4 - store i32 %v, i32* %arrayidx3 + %arrayidx = getelementptr inbounds i32, ptr %P, i32 -3 + store i32 %v, ptr %arrayidx + %arrayidx1 = getelementptr inbounds i32, ptr %P, i32 -1 + store i32 %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds i32, ptr %P, i32 -2 + store i32 %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds i32, ptr %P, i32 -4 + store i32 %v, ptr %arrayidx3 ret void } @@ -94,16 +94,16 @@ entry: ; CHECK:SU(4): STRDui %1:fpr64, %0:gpr64common, 2 ; CHECK:SU(2): STRDui %1:fpr64, %0:gpr64common, 3 ; CHECK:SU(5): STRDui %1:fpr64, %0:gpr64common, 4 -define void @stp_double(double* nocapture %P, double %v) { +define void @stp_double(ptr nocapture %P, double %v) { entry: - %arrayidx = getelementptr inbounds double, double* %P, i64 3 - store double %v, double* %arrayidx - %arrayidx1 = getelementptr inbounds double, double* %P, i64 1 - store double %v, double* %arrayidx1 - %arrayidx2 = getelementptr inbounds double, double* %P, i64 2 - store double %v, double* %arrayidx2 - %arrayidx3 = getelementptr inbounds double, double* %P, i64 4 - store double %v, double* %arrayidx3 + %arrayidx = getelementptr inbounds double, ptr %P, i64 3 + store double %v, ptr %arrayidx + %arrayidx1 = getelementptr inbounds double, ptr %P, i64 1 + store double %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds double, ptr %P, i64 2 + store double %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds double, ptr %P, i64 4 + store double %v, ptr %arrayidx3 ret void } @@ -115,16 +115,16 @@ entry: ; CHECK:SU(4): STRSui %1:fpr32, %0:gpr64common, 2 ; CHECK:SU(2): STRSui %1:fpr32, %0:gpr64common, 3 ; CHECK:SU(5): STRSui %1:fpr32, %0:gpr64common, 4 -define void @stp_float(float* nocapture %P, float %v) { +define void @stp_float(ptr nocapture %P, float %v) { entry: - %arrayidx = getelementptr inbounds float, float* %P, i64 3 - store float %v, float* %arrayidx - %arrayidx1 = getelementptr inbounds float, float* %P, i64 1 - store float %v, float* %arrayidx1 - %arrayidx2 = getelementptr inbounds float, float* %P, i64 2 - store float %v, float* %arrayidx2 - %arrayidx3 = getelementptr inbounds float, float* %P, i64 4 - store float %v, float* %arrayidx3 + %arrayidx = getelementptr inbounds float, ptr %P, i64 3 + store float %v, ptr %arrayidx + %arrayidx1 = getelementptr inbounds float, ptr %P, i64 1 + store float %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds float, ptr %P, i64 2 + store float %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds float, ptr %P, i64 4 + store float %v, ptr %arrayidx3 ret void } @@ -135,16 +135,16 @@ entry: ; CHECK:SU(3): STRXui %1:gpr64, %0:gpr64common, 2 :: (volatile ; CHECK:SU(4): STRXui %1:gpr64, %0:gpr64common, 1 :: (volatile ; CHECK:SU(5): STRXui %1:gpr64, %0:gpr64common, 4 :: (volatile -define i64 @stp_volatile(i64* nocapture %P, i64 %v) { +define i64 @stp_volatile(ptr nocapture %P, i64 %v) { entry: - %arrayidx = getelementptr inbounds i64, i64* %P, i64 3 - store volatile i64 %v, i64* %arrayidx - %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 2 - store volatile i64 %v, i64* %arrayidx1 - %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 1 - store volatile i64 %v, i64* %arrayidx2 - %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 4 - store volatile i64 %v, i64* %arrayidx3 + %arrayidx = getelementptr inbounds i64, ptr %P, i64 3 + store volatile i64 %v, ptr %arrayidx + %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 2 + store volatile i64 %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 1 + store volatile i64 %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 4 + store volatile i64 %v, ptr %arrayidx3 ret i64 %v } @@ -156,43 +156,43 @@ entry: ; CHECK:SU(10): STRXui %12:gpr64, %0:gpr64common, 1 :: ; CHECK:SU(15): STRXui %17:gpr64, %0:gpr64common, 2 :: ; CHECK:SU(20): STRXui %22:gpr64, %0:gpr64common, 3 :: -define void @stp_i64_with_ld(i64* noalias nocapture %a, i64* noalias nocapture readnone %b, i64* noalias nocapture readnone %c) { +define void @stp_i64_with_ld(ptr noalias nocapture %a, ptr noalias nocapture readnone %b, ptr noalias nocapture readnone %c) { entry: - %arrayidx = getelementptr inbounds i64, i64* %a, i64 8 - %0 = load i64, i64* %arrayidx, align 8 - %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 16 - %1 = load i64, i64* %arrayidx3, align 8 + %arrayidx = getelementptr inbounds i64, ptr %a, i64 8 + %0 = load i64, ptr %arrayidx, align 8 + %arrayidx3 = getelementptr inbounds i64, ptr %a, i64 16 + %1 = load i64, ptr %arrayidx3, align 8 %mul = mul nsw i64 %1, %0 - %2 = load i64, i64* %a, align 8 + %2 = load i64, ptr %a, align 8 %add6 = add nsw i64 %2, %mul - store i64 %add6, i64* %a, align 8 - %arrayidx.1 = getelementptr inbounds i64, i64* %a, i64 9 - %3 = load i64, i64* %arrayidx.1, align 8 - %arrayidx3.1 = getelementptr inbounds i64, i64* %a, i64 17 - %4 = load i64, i64* %arrayidx3.1, align 8 + store i64 %add6, ptr %a, align 8 + %arrayidx.1 = getelementptr inbounds i64, ptr %a, i64 9 + %3 = load i64, ptr %arrayidx.1, align 8 + %arrayidx3.1 = getelementptr inbounds i64, ptr %a, i64 17 + %4 = load i64, ptr %arrayidx3.1, align 8 %mul.1 = mul nsw i64 %4, %3 - %arrayidx5.1 = getelementptr inbounds i64, i64* %a, i64 1 - %5 = load i64, i64* %arrayidx5.1, align 8 + %arrayidx5.1 = getelementptr inbounds i64, ptr %a, i64 1 + %5 = load i64, ptr %arrayidx5.1, align 8 %add6.1 = add nsw i64 %5, %mul.1 - store i64 %add6.1, i64* %arrayidx5.1, align 8 - %arrayidx.2 = getelementptr inbounds i64, i64* %a, i64 10 - %6 = load i64, i64* %arrayidx.2, align 8 - %arrayidx3.2 = getelementptr inbounds i64, i64* %a, i64 18 - %7 = load i64, i64* %arrayidx3.2, align 8 + store i64 %add6.1, ptr %arrayidx5.1, align 8 + %arrayidx.2 = getelementptr inbounds i64, ptr %a, i64 10 + %6 = load i64, ptr %arrayidx.2, align 8 + %arrayidx3.2 = getelementptr inbounds i64, ptr %a, i64 18 + %7 = load i64, ptr %arrayidx3.2, align 8 %mul.2 = mul nsw i64 %7, %6 - %arrayidx5.2 = getelementptr inbounds i64, i64* %a, i64 2 - %8 = load i64, i64* %arrayidx5.2, align 8 + %arrayidx5.2 = getelementptr inbounds i64, ptr %a, i64 2 + %8 = load i64, ptr %arrayidx5.2, align 8 %add6.2 = add nsw i64 %8, %mul.2 - store i64 %add6.2, i64* %arrayidx5.2, align 8 - %arrayidx.3 = getelementptr inbounds i64, i64* %a, i64 11 - %9 = load i64, i64* %arrayidx.3, align 8 - %arrayidx3.3 = getelementptr inbounds i64, i64* %a, i64 19 - %10 = load i64, i64* %arrayidx3.3, align 8 + store i64 %add6.2, ptr %arrayidx5.2, align 8 + %arrayidx.3 = getelementptr inbounds i64, ptr %a, i64 11 + %9 = load i64, ptr %arrayidx.3, align 8 + %arrayidx3.3 = getelementptr inbounds i64, ptr %a, i64 19 + %10 = load i64, ptr %arrayidx3.3, align 8 %mul.3 = mul nsw i64 %10, %9 - %arrayidx5.3 = getelementptr inbounds i64, i64* %a, i64 3 - %11 = load i64, i64* %arrayidx5.3, align 8 + %arrayidx5.3 = getelementptr inbounds i64, ptr %a, i64 3 + %11 = load i64, ptr %arrayidx5.3, align 8 %add6.3 = add nsw i64 %11, %mul.3 - store i64 %add6.3, i64* %arrayidx5.3, align 8 + store i64 %add6.3, ptr %arrayidx5.3, align 8 ret void } @@ -206,12 +206,12 @@ entry: ; CHECK:SU(3): STRWui %1:gpr32, %0:gpr64common, 0 ; CHECK:SU(4): %3:gpr32common = nsw ADDWri %2:gpr32common, 5, 0 ; CHECK:SU(5): STRWui %3:gpr32common, %0:gpr64common, 1 -define void @stp_missing_preds_edges(i32* %p, i32 %m, i32 %n) { +define void @stp_missing_preds_edges(ptr %p, i32 %m, i32 %n) { entry: - store i32 %m, i32* %p, align 4 + store i32 %m, ptr %p, align 4 %add = add nsw i32 %n, 5 - %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1 - store i32 %add, i32* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 1 + store i32 %add, ptr %arrayidx1, align 4 ret void } @@ -232,14 +232,13 @@ entry: ; CHECK-FAST-NOT: Cluster ld/st ; CHECK-FAST:SU(3): STRWui %2:gpr32, %0:gpr64common, 0 :: ; CHECK-FAST:SU(4): %3:gpr32 = LDRWui %1:gpr64common, 0 :: -define i32 @cluster_with_different_preds(i32* %p, i32* %q) { +define i32 @cluster_with_different_preds(ptr %p, ptr %q) { entry: - store i32 3, i32* %p, align 4 - %0 = load i32, i32* %q, align 4 - %add.ptr = getelementptr inbounds i32, i32* %q, i64 1 - %1 = bitcast i32* %add.ptr to i8* - store i8 5, i8* %1, align 1 - %2 = load i32, i32* %add.ptr, align 4 - %add = add nsw i32 %2, %0 + store i32 3, ptr %p, align 4 + %0 = load i32, ptr %q, align 4 + %add.ptr = getelementptr inbounds i32, ptr %q, i64 1 + store i8 5, ptr %add.ptr, align 1 + %1 = load i32, ptr %add.ptr, align 4 + %add = add nsw i32 %1, %0 ret i32 %add } diff --git a/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll b/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll index a150d0383698e1..be07404f4b2fc2 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll @@ -6,14 +6,14 @@ ; RUN: llc -mtriple=aarch64-none-linux -tail-dup-placement-threshold=4 < %s | FileCheck %s --check-prefix=CHECK-O2 ; RUN: llc -mtriple=aarch64-none-linux -tail-dup-placement-threshold=6 < %s | FileCheck %s --check-prefix=CHECK-O3 -%a = type { %a*, i32, %b } +%a = type { ptr, i32, %b } %b = type { %c } %c = type { i32, i32, [31 x i8] } -@global_ptr = dso_local local_unnamed_addr global %a* null, align 8 +@global_ptr = dso_local local_unnamed_addr global ptr null, align 8 @global_int = dso_local local_unnamed_addr global i32 0, align 4 -define dso_local void @testcase(%a** nocapture %arg){ +define dso_local void @testcase(ptr nocapture %arg){ ; CHECK-O2-LABEL: testcase: ; CHECK-O2: // %bb.0: // %entry ; CHECK-O2-NEXT: adrp x8, global_ptr @@ -55,23 +55,22 @@ define dso_local void @testcase(%a** nocapture %arg){ ; CHECK-O3-NEXT: ldr w1, [x9, :lo12:global_int] ; CHECK-O3-NEXT: b externalfunc entry: - %0 = load %a*, %a** @global_ptr, align 8 - %cmp.not = icmp eq %a* %0, null + %0 = load ptr, ptr @global_ptr, align 8 + %cmp.not = icmp eq ptr %0, null br i1 %cmp.not, label %if.end, label %if.then if.then: ; preds = %entry - %1 = getelementptr inbounds %a, %a* %0, i64 0, i32 0 - %2 = load %a*, %a** %1, align 8 - store %a* %2, %a** %arg, align 8 - %.pre = load %a*, %a** @global_ptr, align 8 + %1 = load ptr, ptr %0, align 8 + store ptr %1, ptr %arg, align 8 + %.pre = load ptr, ptr @global_ptr, align 8 br label %if.end if.end: ; preds = %if.then, %entry - %3 = phi %a* [ %.pre, %if.then ], [ null, %entry ] - %4 = load i32, i32* @global_int, align 4 - %5 = getelementptr inbounds %a, %a* %3, i64 0, i32 2, i32 0, i32 1 - tail call void @externalfunc(i32 10, i32 %4, i32* nonnull %5) + %2 = phi ptr [ %.pre, %if.then ], [ null, %entry ] + %3 = load i32, ptr @global_int, align 4 + %4 = getelementptr inbounds %a, ptr %2, i64 0, i32 2, i32 0, i32 1 + tail call void @externalfunc(i32 10, i32 %3, ptr nonnull %4) ret void } -declare dso_local void @externalfunc(i32, i32, i32*) +declare dso_local void @externalfunc(i32, i32, ptr) diff --git a/llvm/test/CodeGen/AArch64/aarch64-tbz.ll b/llvm/test/CodeGen/AArch64/aarch64-tbz.ll index 8a57f9f9dc2d67..28629a8c2f0dd3 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-tbz.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-tbz.ll @@ -31,16 +31,16 @@ if.end3: ; preds = %if.then2, %entry ; CHECK-NOT: and x{{[0-9]+}}, x[[REG1]], #0x08 ; CHECK-NOT: cbz x{{[0-9]+}}, .LBB1_3 -define void @test2(i64 %A, i64* readonly %B) #0 { +define void @test2(i64 %A, ptr readonly %B) #0 { entry: - %tobool = icmp eq i64* %B, null + %tobool = icmp eq ptr %B, null %and = and i64 %A, 8 %tobool1 = icmp eq i64 %and, 0 %or.cond = or i1 %tobool, %tobool1 br i1 %or.cond, label %if.end3, label %if.then2 if.then2: ; preds = %entry - %0 = load i64, i64* %B, align 4 + %0 = load i64, ptr %B, align 4 tail call void @foo(i64 %A, i64 %0) br label %if.end3 diff --git a/llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll index 3c986ba2e5139d..8d226398a4f48a 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll @@ -3,34 +3,34 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-gnu" ; Function Attrs: noreturn nounwind -define void @foo(i32* %d) { +define void @foo(ptr %d) { entry: - %0 = ptrtoint i32* %d to i64 + %0 = ptrtoint ptr %d to i64 %1 = and i64 %0, -36028797018963969 - %2 = inttoptr i64 %1 to i32* - %arrayidx5 = getelementptr inbounds i32, i32* %2, i64 1 - %arrayidx6 = getelementptr inbounds i32, i32* %2, i64 2 - %arrayidx7 = getelementptr inbounds i32, i32* %2, i64 3 + %2 = inttoptr i64 %1 to ptr + %arrayidx5 = getelementptr inbounds i32, ptr %2, i64 1 + %arrayidx6 = getelementptr inbounds i32, ptr %2, i64 2 + %arrayidx7 = getelementptr inbounds i32, ptr %2, i64 3 br label %for.cond for.cond: ; preds = %for.cond, %entry - %B.0 = phi i32* [ %d, %entry ], [ %12, %for.cond ] - %3 = ptrtoint i32* %B.0 to i64 + %B.0 = phi ptr [ %d, %entry ], [ %12, %for.cond ] + %3 = ptrtoint ptr %B.0 to i64 %4 = and i64 %3, -36028797018963969 - %5 = inttoptr i64 %4 to i32* - %6 = load i32, i32* %5, align 4 - %arrayidx1 = getelementptr inbounds i32, i32* %5, i64 1 - %7 = load i32, i32* %arrayidx1, align 4 - %arrayidx2 = getelementptr inbounds i32, i32* %5, i64 2 - %8 = load i32, i32* %arrayidx2, align 4 - %arrayidx3 = getelementptr inbounds i32, i32* %5, i64 3 - %9 = load i32, i32* %arrayidx3, align 4 - store i32 %6, i32* %2, align 4 - store i32 %7, i32* %arrayidx5, align 4 - store i32 %8, i32* %arrayidx6, align 4 - store i32 %9, i32* %arrayidx7, align 4 - %10 = ptrtoint i32* %arrayidx1 to i64 + %5 = inttoptr i64 %4 to ptr + %6 = load i32, ptr %5, align 4 + %arrayidx1 = getelementptr inbounds i32, ptr %5, i64 1 + %7 = load i32, ptr %arrayidx1, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %5, i64 2 + %8 = load i32, ptr %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds i32, ptr %5, i64 3 + %9 = load i32, ptr %arrayidx3, align 4 + store i32 %6, ptr %2, align 4 + store i32 %7, ptr %arrayidx5, align 4 + store i32 %8, ptr %arrayidx6, align 4 + store i32 %9, ptr %arrayidx7, align 4 + %10 = ptrtoint ptr %arrayidx1 to i64 %11 = or i64 %10, 36028797018963968 - %12 = inttoptr i64 %11 to i32* + %12 = inttoptr i64 %11 to ptr br label %for.cond } diff --git a/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll b/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll index 89122d080e4b93..af5f6a9d6924b4 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll @@ -3,7 +3,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" -define void @unj(i32 %I, i32 %argj, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 { +define void @unj(i32 %I, i32 %argj, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 { ; CHECK-LABEL: @unj( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ARGJ:%.*]], 0 @@ -21,23 +21,23 @@ define void @unj(i32 %I, i32 %argj, i32* noalias nocapture %A, i32* noalias noca ; CHECK-NEXT: [[SUM_2:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_2:%.*]], [[FOR_INNER]] ] ; CHECK-NEXT: [[J_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[INC_3:%.*]], [[FOR_INNER]] ] ; CHECK-NEXT: [[SUM_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_3:%.*]], [[FOR_INNER]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[J]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[J]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = add i32 [[SUM]], 10 ; CHECK-NEXT: [[ADD]] = sub i32 [[SUB]], [[TMP0]] ; CHECK-NEXT: [[INC]] = add nuw i32 [[J]], 1 -; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_1]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4 ; CHECK-NEXT: [[SUB_1:%.*]] = add i32 [[SUM_1]], 10 ; CHECK-NEXT: [[ADD_1]] = sub i32 [[SUB_1]], [[TMP1]] ; CHECK-NEXT: [[INC_1]] = add nuw i32 [[J_1]], 1 -; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_2]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4 ; CHECK-NEXT: [[SUB_2:%.*]] = add i32 [[SUM_2]], 10 ; CHECK-NEXT: [[ADD_2]] = sub i32 [[SUB_2]], [[TMP2]] ; CHECK-NEXT: [[INC_2]] = add nuw i32 [[J_2]], 1 -; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_3]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_3]] +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4 ; CHECK-NEXT: [[SUB_3:%.*]] = add i32 [[SUM_3]], 10 ; CHECK-NEXT: [[ADD_3]] = sub i32 [[SUB_3]], [[TMP3]] ; CHECK-NEXT: [[INC_3]] = add nuw i32 [[J_3]], 1 @@ -48,13 +48,13 @@ define void @unj(i32 %I, i32 %argj, i32* noalias nocapture %A, i32* noalias noca ; CHECK-NEXT: [[ADD_LCSSA_1:%.*]] = phi i32 [ [[ADD_1]], [[FOR_INNER]] ] ; CHECK-NEXT: [[ADD_LCSSA_2:%.*]] = phi i32 [ [[ADD_2]], [[FOR_INNER]] ] ; CHECK-NEXT: [[ADD_LCSSA_3:%.*]] = phi i32 [ [[ADD_3]], [[FOR_INNER]] ] -; CHECK-NEXT: store i32 [[ADD_LCSSA]], i32* [[A:%.*]], align 4 -; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 1 -; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], i32* [[ARRAYIDX6_1]], align 4 -; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 2 -; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], i32* [[ARRAYIDX6_2]], align 4 -; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 3 -; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], i32* [[ARRAYIDX6_3]], align 4 +; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr [[A:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 1 +; CHECK-NEXT: store i32 [[ADD_LCSSA_1]], ptr [[ARRAYIDX6_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 2 +; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], ptr [[ARRAYIDX6_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 3 +; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX6_3]], align 4 ; CHECK-NEXT: br label [[FOR_END_LOOPEXIT:%.*]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: br label [[FOR_END]] @@ -75,8 +75,8 @@ for.outer: for.inner: %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ] %sum = phi i32 [ 0, %for.outer ], [ %add, %for.inner ] - %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j - %0 = load i32, i32* %arrayidx, align 4 + %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j + %0 = load i32, ptr %arrayidx, align 4 %sub = add i32 %sum, 10 %add = sub i32 %sub, %0 %inc = add nuw i32 %j, 1 @@ -84,8 +84,8 @@ for.inner: br i1 %exitcond, label %for.latch, label %for.inner for.latch: - %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i - store i32 %add, i32* %arrayidx6, align 4 + %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i + store i32 %add, ptr %arrayidx6, align 4 %add8 = add nuw nsw i32 %i, 1 %exitcond23 = icmp eq i32 %add8, 4 br i1 %exitcond23, label %for.end, label %for.outer diff --git a/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll b/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll index a71b5e86138df9..463084e6fe6a1b 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll @@ -12,9 +12,9 @@ define void @fun1() local_unnamed_addr { entry: %mul = fmul <4 x double> zeroinitializer, %toi = fptosi <4 x double> %mul to <4 x i64> - %ptr = getelementptr inbounds %struct.a, %struct.a* undef, i64 0, i32 2 + %ptr = getelementptr inbounds %struct.a, ptr undef, i64 0, i32 2 %elem = extractelement <4 x i64> %toi, i32 1 - store i64 %elem, i64* %ptr, align 8 + store i64 %elem, ptr %ptr, align 8 call void @llvm.trap() unreachable } diff --git a/llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll index e17add462ff8ca..2430551f7a9f4d 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll @@ -5,9 +5,9 @@ ; target triple = "aarch64-unknown-linux-gnu" -define void @test_crash(i8* %dst_ptr) { +define void @test_crash(ptr %dst_ptr) { entry: - %vec_load = load <4 x i16>, <4 x i16>* undef, align 8 + %vec_load = load <4 x i16>, ptr undef, align 8 %0 = sext <4 x i16> %vec_load to <4 x i32> %add71vec = add nsw <4 x i32> %0, %add104vec = add nsw <4 x i32> %add71vec, zeroinitializer @@ -16,13 +16,12 @@ entry: %1 = trunc <4 x i32> %vec to <4 x i16> %2 = shufflevector <4 x i16> %1, <4 x i16> undef, <2 x i32> %3 = sext <2 x i16> %2 to <2 x i32> - %4 = bitcast i8* %dst_ptr to <4 x i8>* - %5 = shufflevector <2 x i32> %3, <2 x i32> poison, <4 x i32> - %6 = shufflevector <4 x i32> undef, <4 x i32> %5, <4 x i32> - %7 = insertelement <4 x i32> %6, i32 undef, i64 3 - %8 = add nsw <4 x i32> %7, zeroinitializer - %9 = select <4 x i1> zeroinitializer, <4 x i32> %8, <4 x i32> undef - %10 = trunc <4 x i32> %9 to <4 x i8> - store <4 x i8> %10, <4 x i8>* %4, align 1 + %4 = shufflevector <2 x i32> %3, <2 x i32> poison, <4 x i32> + %5 = shufflevector <4 x i32> undef, <4 x i32> %4, <4 x i32> + %6 = insertelement <4 x i32> %5, i32 undef, i64 3 + %7 = add nsw <4 x i32> %6, zeroinitializer + %8 = select <4 x i1> zeroinitializer, <4 x i32> %7, <4 x i32> undef + %9 = trunc <4 x i32> %8 to <4 x i8> + store <4 x i8> %9, ptr %dst_ptr, align 1 ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64-vuzp.ll b/llvm/test/CodeGen/AArch64/aarch64-vuzp.ll index a7b20f25557ca3..52457f8d4cab15 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-vuzp.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-vuzp.ll @@ -8,8 +8,8 @@ define i32 @fun1() { entry: %vtbl1.i.1 = tail call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> , <16 x i8> undef) %vuzp.i212.1 = shufflevector <16 x i8> %vtbl1.i.1, <16 x i8> undef, <8 x i32> - %scevgep = getelementptr <8 x i8>, <8 x i8>* undef, i64 1 - store <8 x i8> %vuzp.i212.1, <8 x i8>* %scevgep, align 1 + %scevgep = getelementptr <8 x i8>, ptr undef, i64 1 + store <8 x i8> %vuzp.i212.1, ptr %scevgep, align 1 ret i32 undef } @@ -19,8 +19,8 @@ define i32 @fun2() { entry: %vtbl1.i.1 = tail call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> , <16 x i8> undef) %vuzp.i212.1 = shufflevector <16 x i8> %vtbl1.i.1, <16 x i8> undef, <8 x i32> - %scevgep = getelementptr <8 x i8>, <8 x i8>* undef, i64 1 - store <8 x i8> %vuzp.i212.1, <8 x i8>* %scevgep, align 1 + %scevgep = getelementptr <8 x i8>, ptr undef, i64 1 + store <8 x i8> %vuzp.i212.1, ptr %scevgep, align 1 ret i32 undef } @@ -30,8 +30,8 @@ define i32 @fun3() { entry: %vtbl1.i.1 = tail call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> , <16 x i8> undef) %vuzp.i212.1 = shufflevector <16 x i8> %vtbl1.i.1, <16 x i8> undef, <8 x i32> - %scevgep = getelementptr <8 x i8>, <8 x i8>* undef, i64 1 - store <8 x i8> %vuzp.i212.1, <8 x i8>* %scevgep, align 1 + %scevgep = getelementptr <8 x i8>, ptr undef, i64 1 + store <8 x i8> %vuzp.i212.1, ptr %scevgep, align 1 ret i32 undef } @@ -41,21 +41,19 @@ define i32 @fun4() { entry: %vtbl1.i.1 = tail call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> , <16 x i8> undef) %vuzp.i212.1 = shufflevector <16 x i8> %vtbl1.i.1, <16 x i8> undef, <8 x i32> - %scevgep = getelementptr <8 x i8>, <8 x i8>* undef, i64 1 - store <8 x i8> %vuzp.i212.1, <8 x i8>* %scevgep, align 1 + %scevgep = getelementptr <8 x i8>, ptr undef, i64 1 + store <8 x i8> %vuzp.i212.1, ptr %scevgep, align 1 ret i32 undef } ; CHECK-LABEL: pr36582: ; Check that this does not ICE. -define void @pr36582(i8* %p1, i32* %p2) { +define void @pr36582(ptr %p1, ptr %p2) { entry: - %x = bitcast i8* %p1 to <8 x i8>* - %wide.vec = load <8 x i8>, <8 x i8>* %x, align 1 + %wide.vec = load <8 x i8>, ptr %p1, align 1 %strided.vec = shufflevector <8 x i8> %wide.vec, <8 x i8> undef, <4 x i32> %y = zext <4 x i8> %strided.vec to <4 x i32> - %z = bitcast i32* %p2 to <4 x i32>* - store <4 x i32> %y, <4 x i32>* %z, align 4 + store <4 x i32> %y, ptr %p2, align 4 ret void } @@ -63,8 +61,8 @@ entry: ; that the vector blend transform does not scramble the pattern. ; CHECK-LABEL: vzipNoBlend: ; CHECK: zip1 -define <8 x i8> @vzipNoBlend(<8 x i8>* %A, <8 x i16>* %B) nounwind { - %t = load <8 x i8>, <8 x i8>* %A +define <8 x i8> @vzipNoBlend(ptr %A, ptr %B) nounwind { + %t = load <8 x i8>, ptr %A %vzip = shufflevector <8 x i8> %t, <8 x i8> , <8 x i32> ret <8 x i8> %vzip } diff --git a/llvm/test/CodeGen/AArch64/aarch64_f16_be.ll b/llvm/test/CodeGen/AArch64/aarch64_f16_be.ll index b51798be169783..3c260629362501 100644 --- a/llvm/test/CodeGen/AArch64/aarch64_f16_be.ll +++ b/llvm/test/CodeGen/AArch64/aarch64_f16_be.ll @@ -10,7 +10,7 @@ define void @test_bitcast_v8f16_to_v4f32(<8 x half> %a) { %x = alloca <4 x float>, align 16 %y = bitcast <8 x half> %a to <4 x float> - store <4 x float> %y, <4 x float>* %x, align 16 + store <4 x float> %y, ptr %x, align 16 ret void } @@ -23,7 +23,7 @@ define void @test_bitcast_v8f16_to_v2f64(<8 x half> %a) { %x = alloca <2 x double>, align 16 %y = bitcast <8 x half> %a to <2 x double> - store <2 x double> %y, <2 x double>* %x, align 16 + store <2 x double> %y, ptr %x, align 16 ret void } @@ -36,7 +36,7 @@ define void @test_bitcast_v8f16_to_fp128(<8 x half> %a) { %x = alloca fp128, align 16 %y = bitcast <8 x half> %a to fp128 - store fp128 %y, fp128* %x, align 16 + store fp128 %y, ptr %x, align 16 ret void } @@ -49,7 +49,7 @@ define void @test_bitcast_v4f16_to_v2f32(<4 x half> %a) { %x = alloca <2 x float>, align 8 %y = bitcast <4 x half> %a to <2 x float> - store <2 x float> %y, <2 x float>* %x, align 8 + store <2 x float> %y, ptr %x, align 8 ret void } @@ -62,6 +62,6 @@ define void @test_bitcast_v4f16_to_v1f64(<4 x half> %a) { %x = alloca <1 x double>, align 8 %y = bitcast <4 x half> %a to <1 x double> - store <1 x double> %y, <1 x double>* %x, align 8 + store <1 x double> %y, ptr %x, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll b/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll index 50ec43d8862ac3..e7c3db33814f0b 100644 --- a/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll +++ b/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll @@ -18,20 +18,19 @@ define win64cc void @pass_va(i32 %count, ...) nounwind { ; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret entry: - %ap = alloca i8*, align 8 - %ap1 = bitcast i8** %ap to i8* - call void @llvm.va_start(i8* %ap1) - %ap2 = load i8*, i8** %ap, align 8 - call void @other_func(i8* %ap2) + %ap = alloca ptr, align 8 + call void @llvm.va_start(ptr %ap) + %ap2 = load ptr, ptr %ap, align 8 + call void @other_func(ptr %ap2) ret void } -declare void @other_func(i8*) local_unnamed_addr +declare void @other_func(ptr) local_unnamed_addr -declare void @llvm.va_start(i8*) nounwind -declare void @llvm.va_copy(i8*, i8*) nounwind +declare void @llvm.va_start(ptr) nounwind +declare void @llvm.va_copy(ptr, ptr) nounwind -define win64cc i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind { +define win64cc ptr @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind { ; CHECK-LABEL: f9: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x18, [sp, #-16]! // 8-byte Folded Spill @@ -41,14 +40,13 @@ define win64cc i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 ; CHECK-NEXT: ldr x18, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: - %ap = alloca i8*, align 8 - %ap1 = bitcast i8** %ap to i8* - call void @llvm.va_start(i8* %ap1) - %ap2 = load i8*, i8** %ap, align 8 - ret i8* %ap2 + %ap = alloca ptr, align 8 + call void @llvm.va_start(ptr %ap) + %ap2 = load ptr, ptr %ap, align 8 + ret ptr %ap2 } -define win64cc i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind { +define win64cc ptr @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind { ; CHECK-LABEL: f8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x18, [sp, #-16]! // 8-byte Folded Spill @@ -58,14 +56,13 @@ define win64cc i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 ; CHECK-NEXT: ldr x18, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: - %ap = alloca i8*, align 8 - %ap1 = bitcast i8** %ap to i8* - call void @llvm.va_start(i8* %ap1) - %ap2 = load i8*, i8** %ap, align 8 - ret i8* %ap2 + %ap = alloca ptr, align 8 + call void @llvm.va_start(ptr %ap) + %ap2 = load ptr, ptr %ap, align 8 + ret ptr %ap2 } -define win64cc i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind { +define win64cc ptr @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind { ; CHECK-LABEL: f7: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x18, [sp, #-32]! // 8-byte Folded Spill @@ -76,9 +73,8 @@ define win64cc i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 ; CHECK-NEXT: ldr x18, [sp], #32 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: - %ap = alloca i8*, align 8 - %ap1 = bitcast i8** %ap to i8* - call void @llvm.va_start(i8* %ap1) - %ap2 = load i8*, i8** %ap, align 8 - ret i8* %ap2 + %ap = alloca ptr, align 8 + call void @llvm.va_start(ptr %ap) + %ap2 = load ptr, ptr %ap, align 8 + ret ptr %ap2 } diff --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll index 91c7ee7292c637..0df9131f3c554b 100644 --- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll +++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s | FileCheck %s target triple = "arm64-apple-ios7.0" -define i64 @foo(i64* nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 { +define i64 @foo(ptr nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 { ; CHECK-LABEL: foo: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: ldr w8, [x0, #4] @@ -13,7 +13,7 @@ define i64 @foo(i64* nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unna ; CHECK-NEXT: ret entry: %0 = lshr i64 %a, 32 - %1 = load i64, i64* %ptr, align 8 + %1 = load i64, ptr %ptr, align 8 %2 = lshr i64 %1, 32 %3 = mul nuw i64 %2, %0 %4 = add i64 %c, %b diff --git a/llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll b/llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll index a6bc36441b1154..2de708d66f59f2 100644 --- a/llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll +++ b/llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll @@ -2,22 +2,22 @@ ; Test generated from C code: ; #include -; void *foo() { +; ptr foo() { ; return _AddressOfReturnAddress(); ; } -; int bar(int x(va_list, void*), ...) { +; int bar(int x(va_list, ptr), ...) { ; va_list y; ; va_start(y, x); ; return x(y, _AddressOfReturnAddress()) + 1; ; } -declare void @llvm.va_start(i8*) -declare i8* @llvm.addressofreturnaddress() +declare void @llvm.va_start(ptr) +declare ptr @llvm.addressofreturnaddress() -define dso_local i8* @"foo"() { +define dso_local ptr @"foo"() { entry: - %0 = call i8* @llvm.addressofreturnaddress() - ret i8* %0 + %0 = call ptr @llvm.addressofreturnaddress() + ret ptr %0 ; CHECK-LABEL: foo ; CHECK: stp x29, x30, [sp, #-16]! @@ -26,17 +26,16 @@ entry: ; CHECK: ldp x29, x30, [sp], #16 } -define dso_local i32 @"bar"(i32 (i8*, i8*)* %x, ...) { +define dso_local i32 @"bar"(ptr %x, ...) { entry: - %x.addr = alloca i32 (i8*, i8*)*, align 8 - %y = alloca i8*, align 8 - store i32 (i8*, i8*)* %x, i32 (i8*, i8*)** %x.addr, align 8 - %y1 = bitcast i8** %y to i8* - call void @llvm.va_start(i8* %y1) - %0 = load i32 (i8*, i8*)*, i32 (i8*, i8*)** %x.addr, align 8 - %1 = call i8* @llvm.addressofreturnaddress() - %2 = load i8*, i8** %y, align 8 - %call = call i32 %0(i8* %2, i8* %1) + %x.addr = alloca ptr, align 8 + %y = alloca ptr, align 8 + store ptr %x, ptr %x.addr, align 8 + call void @llvm.va_start(ptr %y) + %0 = load ptr, ptr %x.addr, align 8 + %1 = call ptr @llvm.addressofreturnaddress() + %2 = load ptr, ptr %y, align 8 + %call = call i32 %0(ptr %2, ptr %1) %add = add nsw i32 %call, 1 ret i32 %add diff --git a/llvm/test/CodeGen/AArch64/addsub-shifted.ll b/llvm/test/CodeGen/AArch64/addsub-shifted.ll index d5c6eacc742353..2580d3532ba0db 100644 --- a/llvm/test/CodeGen/AArch64/addsub-shifted.ll +++ b/llvm/test/CodeGen/AArch64/addsub-shifted.ll @@ -9,66 +9,66 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { ; CHECK-LABEL: test_lsl_arith: - %rhs1 = load volatile i32, i32* @var32 + %rhs1 = load volatile i32, ptr @var32 %shift1 = shl i32 %rhs1, 18 %val1 = add i32 %lhs32, %shift1 - store volatile i32 %val1, i32* @var32 + store volatile i32 %val1, ptr @var32 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18 - %rhs2 = load volatile i32, i32* @var32 + %rhs2 = load volatile i32, ptr @var32 %shift2 = shl i32 %rhs2, 31 %val2 = add i32 %shift2, %lhs32 - store volatile i32 %val2, i32* @var32 + store volatile i32 %val2, ptr @var32 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 - %rhs3 = load volatile i32, i32* @var32 + %rhs3 = load volatile i32, ptr @var32 %shift3 = shl i32 %rhs3, 5 %val3 = sub i32 %lhs32, %shift3 - store volatile i32 %val3, i32* @var32 + store volatile i32 %val3, ptr @var32 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5 ; Subtraction is not commutative! - %rhs4 = load volatile i32, i32* @var32 + %rhs4 = load volatile i32, ptr @var32 %shift4 = shl i32 %rhs4, 19 %val4 = sub i32 %shift4, %lhs32 - store volatile i32 %val4, i32* @var32 + store volatile i32 %val4, ptr @var32 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19 - %lhs4a = load volatile i32, i32* @var32 + %lhs4a = load volatile i32, ptr @var32 %shift4a = shl i32 %lhs4a, 15 %val4a = sub i32 0, %shift4a - store volatile i32 %val4a, i32* @var32 + store volatile i32 %val4a, ptr @var32 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15 - %rhs5 = load volatile i64, i64* @var64 + %rhs5 = load volatile i64, ptr @var64 %shift5 = shl i64 %rhs5, 18 %val5 = add i64 %lhs64, %shift5 - store volatile i64 %val5, i64* @var64 + store volatile i64 %val5, ptr @var64 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18 - %rhs6 = load volatile i64, i64* @var64 + %rhs6 = load volatile i64, ptr @var64 %shift6 = shl i64 %rhs6, 31 %val6 = add i64 %shift6, %lhs64 - store volatile i64 %val6, i64* @var64 + store volatile i64 %val6, ptr @var64 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31 - %rhs7 = load volatile i64, i64* @var64 + %rhs7 = load volatile i64, ptr @var64 %shift7 = shl i64 %rhs7, 5 %val7 = sub i64 %lhs64, %shift7 - store volatile i64 %val7, i64* @var64 + store volatile i64 %val7, ptr @var64 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5 ; Subtraction is not commutative! - %rhs8 = load volatile i64, i64* @var64 + %rhs8 = load volatile i64, ptr @var64 %shift8 = shl i64 %rhs8, 19 %val8 = sub i64 %shift8, %lhs64 - store volatile i64 %val8, i64* @var64 + store volatile i64 %val8, ptr @var64 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19 - %lhs8a = load volatile i64, i64* @var64 + %lhs8a = load volatile i64, ptr @var64 %shift8a = shl i64 %lhs8a, 60 %val8a = sub i64 0, %shift8a - store volatile i64 %val8a, i64* @var64 + store volatile i64 %val8a, ptr @var64 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60 ret void @@ -80,54 +80,54 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { %shift1 = lshr i32 %rhs32, 18 %val1 = add i32 %lhs32, %shift1 - store volatile i32 %val1, i32* @var32 + store volatile i32 %val1, ptr @var32 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18 %shift2 = lshr i32 %rhs32, 31 %val2 = add i32 %shift2, %lhs32 - store volatile i32 %val2, i32* @var32 + store volatile i32 %val2, ptr @var32 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31 %shift3 = lshr i32 %rhs32, 5 %val3 = sub i32 %lhs32, %shift3 - store volatile i32 %val3, i32* @var32 + store volatile i32 %val3, ptr @var32 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5 ; Subtraction is not commutative! %shift4 = lshr i32 %rhs32, 19 %val4 = sub i32 %shift4, %lhs32 - store volatile i32 %val4, i32* @var32 + store volatile i32 %val4, ptr @var32 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19 %shift4a = lshr i32 %lhs32, 15 %val4a = sub i32 0, %shift4a - store volatile i32 %val4a, i32* @var32 + store volatile i32 %val4a, ptr @var32 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15 %shift5 = lshr i64 %rhs64, 18 %val5 = add i64 %lhs64, %shift5 - store volatile i64 %val5, i64* @var64 + store volatile i64 %val5, ptr @var64 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18 %shift6 = lshr i64 %rhs64, 31 %val6 = add i64 %shift6, %lhs64 - store volatile i64 %val6, i64* @var64 + store volatile i64 %val6, ptr @var64 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31 %shift7 = lshr i64 %rhs64, 5 %val7 = sub i64 %lhs64, %shift7 - store volatile i64 %val7, i64* @var64 + store volatile i64 %val7, ptr @var64 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5 ; Subtraction is not commutative! %shift8 = lshr i64 %rhs64, 19 %val8 = sub i64 %shift8, %lhs64 - store volatile i64 %val8, i64* @var64 + store volatile i64 %val8, ptr @var64 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19 %shift8a = lshr i64 %lhs64, 45 %val8a = sub i64 0, %shift8a - store volatile i64 %val8a, i64* @var64 + store volatile i64 %val8a, ptr @var64 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45 ret void @@ -139,54 +139,54 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { %shift1 = ashr i32 %rhs32, 18 %val1 = add i32 %lhs32, %shift1 - store volatile i32 %val1, i32* @var32 + store volatile i32 %val1, ptr @var32 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18 %shift2 = ashr i32 %rhs32, 31 %val2 = add i32 %shift2, %lhs32 - store volatile i32 %val2, i32* @var32 + store volatile i32 %val2, ptr @var32 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31 %shift3 = ashr i32 %rhs32, 5 %val3 = sub i32 %lhs32, %shift3 - store volatile i32 %val3, i32* @var32 + store volatile i32 %val3, ptr @var32 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5 ; Subtraction is not commutative! %shift4 = ashr i32 %rhs32, 19 %val4 = sub i32 %shift4, %lhs32 - store volatile i32 %val4, i32* @var32 + store volatile i32 %val4, ptr @var32 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19 %shift4a = ashr i32 %lhs32, 15 %val4a = sub i32 0, %shift4a - store volatile i32 %val4a, i32* @var32 + store volatile i32 %val4a, ptr @var32 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15 %shift5 = ashr i64 %rhs64, 18 %val5 = add i64 %lhs64, %shift5 - store volatile i64 %val5, i64* @var64 + store volatile i64 %val5, ptr @var64 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18 %shift6 = ashr i64 %rhs64, 31 %val6 = add i64 %shift6, %lhs64 - store volatile i64 %val6, i64* @var64 + store volatile i64 %val6, ptr @var64 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31 %shift7 = ashr i64 %rhs64, 5 %val7 = sub i64 %lhs64, %shift7 - store volatile i64 %val7, i64* @var64 + store volatile i64 %val7, ptr @var64 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5 ; Subtraction is not commutative! %shift8 = ashr i64 %rhs64, 19 %val8 = sub i64 %shift8, %lhs64 - store volatile i64 %val8, i64* @var64 + store volatile i64 %val8, ptr @var64 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19 %shift8a = ashr i64 %lhs64, 45 %val8a = sub i64 0, %shift8a - store volatile i64 %val8a, i64* @var64 + store volatile i64 %val8a, ptr @var64 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45 ret void @@ -202,42 +202,42 @@ define void @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64, i32 %v) { ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13 t2: - store volatile i32 %v, i32* @var32 + store volatile i32 %v, ptr @var32 %shift2 = lshr i32 %rhs32, 20 %tst2 = icmp ne i32 %lhs32, %shift2 br i1 %tst2, label %t3, label %end ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20 t3: - store volatile i32 %v, i32* @var32 + store volatile i32 %v, ptr @var32 %shift3 = ashr i32 %rhs32, 9 %tst3 = icmp ne i32 %lhs32, %shift3 br i1 %tst3, label %t4, label %end ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9 t4: - store volatile i32 %v, i32* @var32 + store volatile i32 %v, ptr @var32 %shift4 = shl i64 %rhs64, 43 %tst4 = icmp uge i64 %lhs64, %shift4 br i1 %tst4, label %t5, label %end ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43 t5: - store volatile i32 %v, i32* @var32 + store volatile i32 %v, ptr @var32 %shift5 = lshr i64 %rhs64, 20 %tst5 = icmp ne i64 %lhs64, %shift5 br i1 %tst5, label %t6, label %end ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20 t6: - store volatile i32 %v, i32* @var32 + store volatile i32 %v, ptr @var32 %shift6 = ashr i64 %rhs64, 59 %tst6 = icmp ne i64 %lhs64, %shift6 br i1 %tst6, label %t7, label %end ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59 t7: - store volatile i32 %v, i32* @var32 + store volatile i32 %v, ptr @var32 br label %end end: diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll index 3020576e0bbf0e..3848a3304c7dca 100644 --- a/llvm/test/CodeGen/AArch64/addsub.ll +++ b/llvm/test/CodeGen/AArch64/addsub.ll @@ -25,13 +25,13 @@ define void @add_small() { ; CHECK-NEXT: str x11, [x9] ; CHECK-NEXT: ret - %val32 = load i32, i32* @var_i32 + %val32 = load i32, ptr @var_i32 %newval32 = add i32 %val32, 4095 - store i32 %newval32, i32* @var_i32 + store i32 %newval32, ptr @var_i32 - %val64 = load i64, i64* @var_i64 + %val64 = load i64, ptr @var_i64 %newval64 = add i64 %val64, 52 - store i64 %newval64, i64* @var_i64 + store i64 %newval64, ptr @var_i64 ret void } @@ -45,7 +45,7 @@ define void @add_small() { ; whereas this can be achieved with: ; wA = ldrb ; xC = add xA, #12 ; <- xA implicitly zero extend wA. -define void @add_small_imm(i8* %p, i64* %q, i32 %b, i32* %addr) { +define void @add_small_imm(ptr %p, ptr %q, i32 %b, ptr %addr) { ; CHECK-LABEL: add_small_imm: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -56,15 +56,15 @@ define void @add_small_imm(i8* %p, i64* %q, i32 %b, i32* %addr) { ; CHECK-NEXT: ret entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %promoted = zext i8 %t to i64 %zextt = zext i8 %t to i32 %add = add nuw i32 %zextt, %b %add2 = add nuw i64 %promoted, 12 - store i32 %add, i32* %addr + store i32 %add, ptr %addr - store i64 %add2, i64* %q + store i64 %add2, ptr %q ret void } @@ -84,13 +84,13 @@ define void @add_med() { ; CHECK-NEXT: str x11, [x9] ; CHECK-NEXT: ret - %val32 = load i32, i32* @var_i32 + %val32 = load i32, ptr @var_i32 %newval32 = add i32 %val32, 14610432 ; =0xdef000 - store i32 %newval32, i32* @var_i32 + store i32 %newval32, ptr @var_i32 - %val64 = load i64, i64* @var_i64 + %val64 = load i64, ptr @var_i64 %newval64 = add i64 %val64, 16773120 ; =0xfff000 - store i64 %newval64, i64* @var_i64 + store i64 %newval64, ptr @var_i64 ret void } @@ -111,13 +111,13 @@ define void @sub_small() { ; CHECK-NEXT: str x11, [x9] ; CHECK-NEXT: ret - %val32 = load i32, i32* @var_i32 + %val32 = load i32, ptr @var_i32 %newval32 = sub i32 %val32, 4095 - store i32 %newval32, i32* @var_i32 + store i32 %newval32, ptr @var_i32 - %val64 = load i64, i64* @var_i64 + %val64 = load i64, ptr @var_i64 %newval64 = sub i64 %val64, 52 - store i64 %newval64, i64* @var_i64 + store i64 %newval64, ptr @var_i64 ret void } @@ -138,13 +138,13 @@ define void @sub_med() { ; CHECK-NEXT: str x11, [x9] ; CHECK-NEXT: ret - %val32 = load i32, i32* @var_i32 + %val32 = load i32, ptr @var_i32 %newval32 = sub i32 %val32, 14610432 ; =0xdef000 - store i32 %newval32, i32* @var_i32 + store i32 %newval32, ptr @var_i32 - %val64 = load i64, i64* @var_i64 + %val64 = load i64, ptr @var_i64 %newval64 = sub i64 %val64, 16773120 ; =0xfff000 - store i64 %newval64, i64* @var_i64 + store i64 %newval64, ptr @var_i64 ret void } @@ -309,39 +309,39 @@ define void @testing() { ; CHECK-NEXT: str w9, [x8] ; CHECK-NEXT: .LBB16_6: // %common.ret ; CHECK-NEXT: ret - %val = load i32, i32* @var_i32 - %val2 = load i32, i32* @var2_i32 + %val = load i32, ptr @var_i32 + %val2 = load i32, ptr @var2_i32 %cmp_pos_small = icmp ne i32 %val, 4095 br i1 %cmp_pos_small, label %ret, label %test2 test2: %newval2 = add i32 %val, 1 - store i32 %newval2, i32* @var_i32 + store i32 %newval2, ptr @var_i32 %cmp_pos_big = icmp ult i32 %val2, 14610432 br i1 %cmp_pos_big, label %ret, label %test3 test3: %newval3 = add i32 %val, 2 - store i32 %newval3, i32* @var_i32 + store i32 %newval3, ptr @var_i32 %cmp_pos_slt = icmp slt i32 %val, 123 br i1 %cmp_pos_slt, label %ret, label %test4 test4: %newval4 = add i32 %val, 3 - store i32 %newval4, i32* @var_i32 + store i32 %newval4, ptr @var_i32 %cmp_pos_sgt = icmp sgt i32 %val2, 321 br i1 %cmp_pos_sgt, label %ret, label %test5 test5: %newval5 = add i32 %val, 4 - store i32 %newval5, i32* @var_i32 + store i32 %newval5, ptr @var_i32 %cmp_neg_uge = icmp sgt i32 %val2, -444 br i1 %cmp_neg_uge, label %ret, label %test6 test6: %newval6 = add i32 %val, 5 - store i32 %newval6, i32* @var_i32 + store i32 %newval6, ptr @var_i32 ret void ret: @@ -350,7 +350,7 @@ ret: declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) -define i1 @sadd_add(i32 %a, i32 %b, i32* %p) { +define i1 @sadd_add(i32 %a, i32 %b, ptr %p) { ; CHECK-LABEL: sadd_add: ; CHECK: // %bb.0: ; CHECK-NEXT: mvn w8, w0 @@ -364,13 +364,13 @@ define i1 @sadd_add(i32 %a, i32 %b, i32* %p) { %e0 = extractvalue {i32, i1} %a0, 0 %e1 = extractvalue {i32, i1} %a0, 1 %res = add i32 %e0, 1 - store i32 %res, i32* %p + store i32 %res, ptr %p ret i1 %e1 } declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) -define i1 @uadd_add(i8 %a, i8 %b, i8* %p) { +define i1 @uadd_add(i8 %a, i8 %b, ptr %p) { ; CHECK-LABEL: uadd_add: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #255 @@ -385,7 +385,7 @@ define i1 @uadd_add(i8 %a, i8 %b, i8* %p) { %e0 = extractvalue {i8, i1} %a0, 0 %e1 = extractvalue {i8, i1} %a0, 1 %res = add i8 %e0, 1 - store i8 %res, i8* %p + store i8 %res, ptr %p ret i1 %e1 } @@ -636,7 +636,7 @@ define dso_local i32 @neigh_periodic_work_tbl_1() { ; CHECK-NEXT: .LBB35_2: // %if.end ; CHECK-NEXT: ret entry: - br i1 icmp slt (i64 add (i64 ptrtoint (i32 ()* @neigh_periodic_work_tbl_1 to i64), i64 75000), i64 0), label %for.cond, label %if.end + br i1 icmp slt (i64 add (i64 ptrtoint (ptr @neigh_periodic_work_tbl_1 to i64), i64 75000), i64 0), label %for.cond, label %if.end for.cond: ; preds = %entry, %for.cond br label %for.cond if.end: ; preds = %entry @@ -676,19 +676,19 @@ define dso_local i32 @_extract_crng_crng() { ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: - br i1 icmp slt (i32 ()* @_extract_crng_crng, i32 ()* null), label %if.then, label %lor.lhs.false + br i1 icmp slt (ptr @_extract_crng_crng, ptr null), label %if.then, label %lor.lhs.false lor.lhs.false: ; preds = %entry - %0 = load i32, i32* @jiffies, align 4 + %0 = load i32, ptr @jiffies, align 4 %idx.ext = sext i32 %0 to i64 %idx.neg = sub nsw i64 0, %idx.ext - %add.ptr = getelementptr i8, i8* getelementptr (i8, i8* bitcast (i32 ()* @_extract_crng_crng to i8*), i64 75000), i64 %idx.neg - %cmp = icmp slt i8* %add.ptr, null + %add.ptr = getelementptr i8, ptr getelementptr (i8, ptr @_extract_crng_crng, i64 75000), i64 %idx.neg + %cmp = icmp slt ptr %add.ptr, null br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %lor.lhs.false, %entry - %1 = load i32, i32* @primary_crng, align 4 + %1 = load i32, ptr @primary_crng, align 4 %tobool.not = icmp eq i32 %1, 0 - %cond = select i1 %tobool.not, i32* null, i32* @input_pool - %call = tail call i32 bitcast (i32 (...)* @crng_reseed to i32 (i32*)*)(i32* noundef %cond) + %cond = select i1 %tobool.not, ptr null, ptr @input_pool + %call = tail call i32 @crng_reseed(ptr noundef %cond) br label %if.end if.end: ; preds = %if.then, %lor.lhs.false ret i32 undef diff --git a/llvm/test/CodeGen/AArch64/alloca.ll b/llvm/test/CodeGen/AArch64/alloca.ll index e7906a1e9d2fd4..ca3a500d79f327 100644 --- a/llvm/test/CodeGen/AArch64/alloca.ll +++ b/llvm/test/CodeGen/AArch64/alloca.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=arm64-apple-ios -disable-post-ra -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=CHECK-MACHO ; RUN: llc -mtriple=aarch64-none-linux-gnu -disable-post-ra -mattr=-fp-armv8 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-NOFP-ARM64 %s -declare void @use_addr(i8*) +declare void @use_addr(ptr) define void @test_simple_alloca(i64 %n) { ; CHECK-LABEL: test_simple_alloca: @@ -19,7 +19,7 @@ define void @test_simple_alloca(i64 %n) { ; CHECK: sub [[NEWSP:x[0-9]+]], [[TMP]], [[SPDELTA]] ; CHECK: mov sp, [[NEWSP]] - call void @use_addr(i8* %buf) + call void @use_addr(ptr %buf) ; CHECK: bl use_addr ret void @@ -28,7 +28,7 @@ define void @test_simple_alloca(i64 %n) { ; CHECK: ret } -declare void @use_addr_loc(i8*, i64*) +declare void @use_addr_loc(ptr, ptr) define i64 @test_alloca_with_local(i64 %n) { ; CHECK-LABEL: test_alloca_with_local: @@ -49,10 +49,10 @@ define i64 @test_alloca_with_local(i64 %n) { ; CHECK: sub {{x[0-9]+}}, x29, #[[LOC_FROM_FP:[0-9]+]] - call void @use_addr_loc(i8* %buf, i64* %loc) + call void @use_addr_loc(ptr %buf, ptr %loc) ; CHECK: bl use_addr - %val = load i64, i64* %loc + %val = load i64, ptr %loc ; CHECK: ldur x0, [x29, #-[[LOC_FROM_FP]]] @@ -99,7 +99,7 @@ define void @test_variadic_alloca(i64 %n, ...) { %addr = alloca i8, i64 %n - call void @use_addr(i8* %addr) + call void @use_addr(ptr %addr) ; CHECK: bl use_addr ret void @@ -132,7 +132,7 @@ define void @test_alloca_large_frame(i64 %n) { %addr1 = alloca i8, i64 %n %addr2 = alloca i64, i64 1000000 - call void @use_addr_loc(i8* %addr1, i64* %addr2) + call void @use_addr_loc(ptr %addr1, ptr %addr2) ret void @@ -145,13 +145,13 @@ define void @test_alloca_large_frame(i64 %n) { ; CHECK-MACHO: ldp x20, x19, [sp], #32 } -declare i8* @llvm.stacksave() -declare void @llvm.stackrestore(i8*) +declare ptr @llvm.stacksave() +declare void @llvm.stackrestore(ptr) define void @test_scoped_alloca(i64 %n) { ; CHECK-LABEL: test_scoped_alloca: - %sp = call i8* @llvm.stacksave() + %sp = call ptr @llvm.stacksave() ; CHECK: mov x29, sp ; CHECK: mov [[SAVED_SP:x[0-9]+]], sp ; CHECK: mov [[OLDSP:x[0-9]+]], sp @@ -161,10 +161,10 @@ define void @test_scoped_alloca(i64 %n) { ; CHECK-DAG: sub [[NEWSP:x[0-9]+]], [[OLDSP]], [[SPDELTA]] ; CHECK: mov sp, [[NEWSP]] - call void @use_addr(i8* %addr) + call void @use_addr(ptr %addr) ; CHECK: bl use_addr - call void @llvm.stackrestore(i8* %sp) + call void @llvm.stackrestore(ptr %sp) ; CHECK: mov sp, [[SAVED_SP]] ret void diff --git a/llvm/test/CodeGen/AArch64/analyzecmp.ll b/llvm/test/CodeGen/AArch64/analyzecmp.ll index 0b3bcd887b5b6e..3d7644e320cc21 100644 --- a/llvm/test/CodeGen/AArch64/analyzecmp.ll +++ b/llvm/test/CodeGen/AArch64/analyzecmp.ll @@ -7,7 +7,7 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" target triple = "arm64--linux-gnueabi" -define void @test(i64 %a, i64* %ptr1, i64* %ptr2) #0 align 2 { +define void @test(i64 %a, ptr %ptr1, ptr %ptr2) #0 align 2 { entry: %conv = and i64 %a, 4294967295 %add = add nsw i64 %conv, -1 @@ -24,8 +24,8 @@ if.then: exit: %__n = phi i64 [ %add3, %if.then ], [ %div, %entry ] %__n.0 = phi i64 [ %add2, %if.then ], [ %rem, %entry ] - store i64 %__n, i64* %ptr1 - store i64 %__n.0, i64* %ptr2 + store i64 %__n, ptr %ptr1 + store i64 %__n.0, ptr %ptr2 ret void } diff --git a/llvm/test/CodeGen/AArch64/and-mask-removal.ll b/llvm/test/CodeGen/AArch64/and-mask-removal.ll index d5b9ca253e8dad..f3307144e08df3 100644 --- a/llvm/test/CodeGen/AArch64/and-mask-removal.ll +++ b/llvm/test/CodeGen/AArch64/and-mask-removal.ll @@ -47,16 +47,16 @@ define void @new_position(i32 %pos) { ; CHECK-GI-NEXT: ret entry: %idxprom = sext i32 %pos to i64 - %arrayidx = getelementptr inbounds [400 x i8], [400 x i8]* @board, i64 0, i64 %idxprom - %tmp = load i8, i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds [400 x i8], ptr @board, i64 0, i64 %idxprom + %tmp = load i8, ptr %arrayidx, align 1 %.off = add i8 %tmp, -1 %switch = icmp ult i8 %.off, 2 br i1 %switch, label %if.then, label %if.end if.then: ; preds = %entry - %tmp1 = load i32, i32* @next_string, align 4 - %arrayidx8 = getelementptr inbounds [400 x i32], [400 x i32]* @string_number, i64 0, i64 %idxprom - store i32 %tmp1, i32* %arrayidx8, align 4 + %tmp1 = load i32, ptr @next_string, align 4 + %arrayidx8 = getelementptr inbounds [400 x i32], ptr @string_number, i64 0, i64 %idxprom + store i32 %tmp1, ptr %arrayidx8, align 4 br label %if.end if.end: ; preds = %if.then, %entry diff --git a/llvm/test/CodeGen/AArch64/and-sink.ll b/llvm/test/CodeGen/AArch64/and-sink.ll index 9b4a627b1efc80..f4e9551259e4e6 100644 --- a/llvm/test/CodeGen/AArch64/and-sink.ll +++ b/llvm/test/CodeGen/AArch64/and-sink.ll @@ -24,7 +24,7 @@ bb0: ; CHECK-CGP-NEXT: store ; CHECK-CGP-NEXT: br %cmp = icmp eq i32 %and, 0 - store i32 0, i32* @A + store i32 0, ptr @A br i1 %cmp, label %bb1, label %bb2 bb1: ret i32 1 @@ -45,14 +45,14 @@ define dso_local i32 @and_sink2(i32 %a, i1 %c, i1 %c2) { ; CHECK-CGP-LABEL: @and_sink2( ; CHECK-CGP-NOT: and i32 %and = and i32 %a, 4 - store i32 0, i32* @A + store i32 0, ptr @A br i1 %c, label %bb0, label %bb3 bb0: ; CHECK-CGP-LABEL: bb0: ; CHECK-CGP-NOT: and i32 ; CHECK-CGP-NOT: icmp %cmp = icmp eq i32 %and, 0 - store i32 0, i32* @B + store i32 0, ptr @B br i1 %c2, label %bb1, label %bb3 bb1: ; CHECK-CGP-LABEL: bb1: @@ -60,7 +60,7 @@ bb1: ; CHECK-CGP-NEXT: icmp eq i32 ; CHECK-CGP-NEXT: store ; CHECK-CGP-NEXT: br - store i32 0, i32* @C + store i32 0, ptr @C br i1 %cmp, label %bb2, label %bb0 bb2: ret i32 1 @@ -84,7 +84,7 @@ bb0: ; CHECK-CGP-LABEL: bb0: ; CHECK-CGP-NOT: and i32 %cmp = icmp eq i32 %and, 0 - store i32 0, i32* @A + store i32 0, ptr @A br i1 %cmp, label %bb0, label %bb2 bb2: ret i32 0 diff --git a/llvm/test/CodeGen/AArch64/andorbrcompare.ll b/llvm/test/CodeGen/AArch64/andorbrcompare.ll index 8a16d9af1b06d3..a2485495ec72f3 100644 --- a/llvm/test/CodeGen/AArch64/andorbrcompare.ll +++ b/llvm/test/CodeGen/AArch64/andorbrcompare.ll @@ -4,7 +4,7 @@ declare void @dummy() -define i32 @and_eq_ne_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) { +define i32 @and_eq_ne_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) { ; SDISEL-LABEL: and_eq_ne_ult: ; SDISEL: // %bb.0: // %entry ; SDISEL-NEXT: cmp w2, w3 @@ -46,14 +46,14 @@ entry: br i1 %o, label %if, label %else if: - store i32 1, i32* %p + store i32 1, ptr %p ret i32 1 else: ret i32 0 } -define i32 @and_ne_ult_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) { +define i32 @and_ne_ult_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) { ; SDISEL-LABEL: and_ne_ult_ule: ; SDISEL: // %bb.0: // %entry ; SDISEL-NEXT: cmp w2, w3 @@ -95,14 +95,14 @@ entry: br i1 %o, label %if, label %else if: - store i32 1, i32* %p + store i32 1, ptr %p ret i32 1 else: ret i32 0 } -define i32 @and_ult_ule_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) { +define i32 @and_ult_ule_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) { ; SDISEL-LABEL: and_ult_ule_ugt: ; SDISEL: // %bb.0: // %entry ; SDISEL-NEXT: cmp w2, w3 @@ -144,14 +144,14 @@ entry: br i1 %o, label %if, label %else if: - store i32 1, i32* %p + store i32 1, ptr %p ret i32 1 else: ret i32 0 } -define i32 @and_ule_ugt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) { +define i32 @and_ule_ugt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) { ; SDISEL-LABEL: and_ule_ugt_uge: ; SDISEL: // %bb.0: // %entry ; SDISEL-NEXT: cmp w2, w3 @@ -193,14 +193,14 @@ entry: br i1 %o, label %if, label %else if: - store i32 1, i32* %p + store i32 1, ptr %p ret i32 1 else: ret i32 0 } -define i32 @and_ugt_uge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) { +define i32 @and_ugt_uge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) { ; SDISEL-LABEL: and_ugt_uge_slt: ; SDISEL: // %bb.0: // %entry ; SDISEL-NEXT: cmp w2, w3 @@ -242,14 +242,14 @@ entry: br i1 %o, label %if, label %else if: - store i32 1, i32* %p + store i32 1, ptr %p ret i32 1 else: ret i32 0 } -define i32 @and_uge_slt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) { +define i32 @and_uge_slt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) { ; SDISEL-LABEL: and_uge_slt_sle: ; SDISEL: // %bb.0: // %entry ; SDISEL-NEXT: cmp w2, w3 @@ -291,14 +291,14 @@ entry: br i1 %o, label %if, label %else if: - store i32 1, i32* %p + store i32 1, ptr %p ret i32 1 else: ret i32 0 } -define i32 @and_slt_sle_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) { +define i32 @and_slt_sle_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) { ; SDISEL-LABEL: and_slt_sle_sgt: ; SDISEL: // %bb.0: // %entry ; SDISEL-NEXT: cmp w2, w3 @@ -340,14 +340,14 @@ entry: br i1 %o, label %if, label %else if: - store i32 1, i32* %p + store i32 1, ptr %p ret i32 1 else: ret i32 0 } -define i32 @and_sle_sgt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) { +define i32 @and_sle_sgt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) { ; SDISEL-LABEL: and_sle_sgt_sge: ; SDISEL: // %bb.0: // %entry ; SDISEL-NEXT: cmp w2, w3 @@ -389,7 +389,7 @@ entry: br i1 %o, label %if, label %else if: - store i32 1, i32* %p + store i32 1, ptr %p ret i32 1 else: diff --git a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll index 0b11b1555fb88e..e90f89f359ac76 100644 --- a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll +++ b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll @@ -394,7 +394,7 @@ define void @caller_in_block() { ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %1 = call %T_IN_BLOCK @return_in_block() - store %T_IN_BLOCK %1, %T_IN_BLOCK* @in_block_store + store %T_IN_BLOCK %1, ptr @in_block_store ret void } @@ -410,7 +410,7 @@ define void @callee_in_block(%T_IN_BLOCK %a) { ; CHECK-NEXT: str d1, [x8, #8] ; CHECK-NEXT: str d0, [x8] ; CHECK-NEXT: ret - store %T_IN_BLOCK %a, %T_IN_BLOCK* @in_block_store + store %T_IN_BLOCK %a, ptr @in_block_store ret void } @@ -428,7 +428,7 @@ define void @argument_in_block() { ; CHECK-NEXT: bl callee_in_block ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %1 = load %T_IN_BLOCK, %T_IN_BLOCK* @in_block_store + %1 = load %T_IN_BLOCK, ptr @in_block_store call void @callee_in_block(%T_IN_BLOCK %1) ret void } @@ -471,7 +471,7 @@ define void @caller_in_memory() { ; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret %1 = call %T_IN_MEMORY @return_in_memory() - store %T_IN_MEMORY %1, %T_IN_MEMORY* @in_memory_store + store %T_IN_MEMORY %1, ptr @in_memory_store ret void } @@ -488,7 +488,7 @@ define void @callee_in_memory(%T_IN_MEMORY %a) { ; CHECK-NEXT: stp q1, q2, [x8, #32] ; CHECK-NEXT: stp q0, q3, [x8] ; CHECK-NEXT: ret - store %T_IN_MEMORY %a, %T_IN_MEMORY* @in_memory_store + store %T_IN_MEMORY %a, ptr @in_memory_store ret void } @@ -511,7 +511,7 @@ define void @argument_in_memory() { ; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload ; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret - %1 = load %T_IN_MEMORY, %T_IN_MEMORY* @in_memory_store + %1 = load %T_IN_MEMORY, ptr @in_memory_store call void @callee_in_memory(%T_IN_MEMORY %1) ret void } @@ -547,7 +547,7 @@ define void @caller_no_block() { ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %1 = call %T_NO_BLOCK @return_no_block() - store %T_NO_BLOCK %1, %T_NO_BLOCK* @no_block_store + store %T_NO_BLOCK %1, ptr @no_block_store ret void } @@ -561,7 +561,7 @@ define void @callee_no_block(%T_NO_BLOCK %a) { ; CHECK-NEXT: str w0, [x8, #8] ; CHECK-NEXT: str d0, [x8] ; CHECK-NEXT: ret - store %T_NO_BLOCK %a, %T_NO_BLOCK* @no_block_store + store %T_NO_BLOCK %a, ptr @no_block_store ret void } @@ -580,7 +580,7 @@ define void @argument_no_block() { ; CHECK-NEXT: bl callee_no_block ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %1 = load %T_NO_BLOCK, %T_NO_BLOCK* @no_block_store + %1 = load %T_NO_BLOCK, ptr @no_block_store call void @callee_no_block(%T_NO_BLOCK %1) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll b/llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll index 6fb7c3fb5e0aac..f31f43f1fde45a 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll @@ -21,7 +21,7 @@ _ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71: ; preds = %bb.i69, %1 = fdiv float %0, undef %2 = fcmp ult float %1, 0xBF847AE140000000 %storemerge9 = select i1 %2, float %1, float 0.000000e+00 - store float %storemerge9, float* undef, align 4 + store float %storemerge9, ptr undef, align 4 br i1 undef, label %bb42, label %bb47 bb42: ; preds = %_ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71 diff --git a/llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll b/llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll index d9d12c3f43e78b..a4720a9a738f72 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll @@ -14,8 +14,8 @@ if.then24: ; preds = %entry unreachable if.else295: ; preds = %entry - call void @llvm.dbg.declare(metadata i32* %do_tab_convert, metadata !14, metadata !16), !dbg !17 - store i32 0, i32* %do_tab_convert, align 4, !dbg !18 + call void @llvm.dbg.declare(metadata ptr %do_tab_convert, metadata !14, metadata !16), !dbg !17 + store i32 0, ptr %do_tab_convert, align 4, !dbg !18 unreachable } diff --git a/llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll b/llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll index 72213bbcf9675a..7c25e9e609a176 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll @@ -5,8 +5,7 @@ define void @foo(i64 %val) { ; instruction that can handle that. ; CHECK: stur x0, [sp, #20] %a = alloca [49 x i32], align 4 - %p32 = getelementptr inbounds [49 x i32], [49 x i32]* %a, i64 0, i64 2 - %p = bitcast i32* %p32 to i64* - store i64 %val, i64* %p, align 8 + %p32 = getelementptr inbounds [49 x i32], ptr %a, i64 0, i64 2 + store i64 %val, ptr %p32, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll b/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll index e2c39e0b62328b..83a9ae7a1b7b0a 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll @@ -5,10 +5,10 @@ define hidden void @t() nounwind { entry: - %cmp = icmp eq i32* null, undef + %cmp = icmp eq ptr null, undef %frombool = zext i1 %cmp to i8 - store i8 %frombool, i8* undef, align 1 - %tmp4 = load i8, i8* undef, align 1 + store i8 %frombool, ptr undef, align 1 + %tmp4 = load i8, ptr undef, align 1 %tobool = trunc i8 %tmp4 to i1 br i1 %tobool, label %land.lhs.true, label %if.end diff --git a/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll b/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll index b69cd242116629..3b6c4fa875e604 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll @@ -18,9 +18,9 @@ for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %0 = shl nsw i64 %indvars.iv, 12 %add = add nsw i64 %0, 34628173824 - %1 = inttoptr i64 %add to i32* - %2 = load volatile i32, i32* %1, align 4096 - store volatile i32 %2, i32* @test_data, align 4 + %1 = inttoptr i64 %add to ptr + %2 = load volatile i32, ptr %1, align 4096 + store volatile i32 %2, ptr @test_data, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp eq i32 %lftr.wideiv, 200 diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll b/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll index b8855fb5cdb396..b7b111c60bd9ba 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll @@ -13,7 +13,7 @@ lor.lhs.false: br i1 undef, label %return, label %if.end if.end: - %tmp.i = load i64, i64* undef, align 8 + %tmp.i = load i64, ptr undef, align 8 %and.i.i.i = and i64 %tmp.i, -16 br i1 %IsArrow, label %if.else_crit_edge, label %if.end32 @@ -26,7 +26,7 @@ if.end32: %.pn.v = select i1 %0, i320 128, i320 64 %.pn = shl i320 %1, %.pn.v %ins346392 = or i320 %.pn, 0 - store i320 %ins346392, i320* undef, align 8 + store i320 %ins346392, ptr undef, align 8 br i1 undef, label %sw.bb.i.i, label %exit sw.bb.i.i: diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll index dc1dc56eedb99e..626dba4c321272 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll @@ -13,9 +13,9 @@ ; CHECK-NEXT: str [[VAL]], [x0, #8] ; CHECK-NEXT: str [[VAL2]], [x0] -define void @foo(i8* %a) { - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 bitcast ([3 x i32]* @b to i8*), i64 12, i1 false) +define void @foo(ptr %a) { + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a, ptr align 4 @b, i64 12, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind +declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll index 7da2d2ca513e5a..f859d1f66e60db 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll @@ -2,9 +2,9 @@ ; RUN: llc -mtriple=arm64-linux-gnu -relocation-model=pic < %s | FileCheck %s --check-prefix=CHECK-LINUX ; -define hidden void @t(i64* %addr) optsize ssp { +define hidden void @t(ptr %addr) optsize ssp { entry: - store i64 zext (i32 ptrtoint (i64 (i32)* @x to i32) to i64), i64* %addr, align 8 + store i64 zext (i32 ptrtoint (ptr @x to i32) to i64), ptr %addr, align 8 ; CHECK: adrp x{{[0-9]+}}, _x@GOTPAGE ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, _x@GOTPAGEOFF] ; CHECK-NEXT: and x{{[0-9]+}}, x{{[0-9]+}}, #0xffffffff diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll index bd0028c74528c6..972879f0db3575 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll @@ -11,27 +11,22 @@ @"OBJC_IVAR_$_UIScreen._bounds" = external hidden global i64, section "__DATA, __objc_ivar", align 8 -define hidden %struct.CGRect @t(%0* nocapture %self, i8* nocapture %_cmd) nounwind readonly optsize ssp { +define hidden %struct.CGRect @t(ptr nocapture %self, ptr nocapture %_cmd) nounwind readonly optsize ssp { entry: ; CHECK-LABEL: t: ; CHECK: ldp d{{[0-9]+}}, d{{[0-9]+}} - %ivar = load i64, i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4 - %0 = bitcast %0* %self to i8* - %add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar - %add.ptr10.0 = bitcast i8* %add.ptr to double* - %tmp11 = load double, double* %add.ptr10.0, align 8 + %ivar = load i64, ptr @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4 + %add.ptr = getelementptr inbounds i8, ptr %self, i64 %ivar + %tmp11 = load double, ptr %add.ptr, align 8 %add.ptr.sum = add i64 %ivar, 8 - %add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum - %1 = bitcast i8* %add.ptr10.1 to double* - %tmp12 = load double, double* %1, align 8 + %add.ptr10.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr.sum + %tmp12 = load double, ptr %add.ptr10.1, align 8 %add.ptr.sum17 = add i64 %ivar, 16 - %add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum17 - %add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double* - %tmp = load double, double* %add.ptr4.1.0, align 8 + %add.ptr4.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr.sum17 + %tmp = load double, ptr %add.ptr4.1, align 8 %add.ptr4.1.sum = add i64 %ivar, 24 - %add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr4.1.sum - %2 = bitcast i8* %add.ptr4.1.1 to double* - %tmp5 = load double, double* %2, align 8 + %add.ptr4.1.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr4.1.sum + %tmp5 = load double, ptr %add.ptr4.1.1, align 8 %insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0 %insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1 %insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0 diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll index 369b94be94c514..93eaf3618cbb23 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll @@ -12,19 +12,19 @@ define void @testDouble(double %d) ssp { ; CHECK: fcvtzu w{{[0-9]+}}, d{{[0-9]+}} entry: %d.addr = alloca double, align 8 - store double %d, double* %d.addr, align 8 - %0 = load double, double* %d.addr, align 8 - %1 = load double, double* %d.addr, align 8 + store double %d, ptr %d.addr, align 8 + %0 = load double, ptr %d.addr, align 8 + %1 = load double, ptr %d.addr, align 8 %conv = fptoui double %1 to i64 - %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), double %0, i64 %conv) - %2 = load double, double* %d.addr, align 8 - %3 = load double, double* %d.addr, align 8 + %call = call i32 (ptr, ...) @printf(ptr @.str, double %0, i64 %conv) + %2 = load double, ptr %d.addr, align 8 + %3 = load double, ptr %d.addr, align 8 %conv1 = fptoui double %3 to i32 - %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str1, i32 0, i32 0), double %2, i32 %conv1) + %call2 = call i32 (ptr, ...) @printf(ptr @.str1, double %2, i32 %conv1) ret void } -declare i32 @printf(i8*, ...) +declare i32 @printf(ptr, ...) define void @testFloat(float %f) ssp { ; CHECK-LABEL: testFloat: @@ -32,28 +32,28 @@ define void @testFloat(float %f) ssp { ; CHECK: fcvtzu w{{[0-9]+}}, s{{[0-9]+}} entry: %f.addr = alloca float, align 4 - store float %f, float* %f.addr, align 4 - %0 = load float, float* %f.addr, align 4 + store float %f, ptr %f.addr, align 4 + %0 = load float, ptr %f.addr, align 4 %conv = fpext float %0 to double - %1 = load float, float* %f.addr, align 4 + %1 = load float, ptr %f.addr, align 4 %conv1 = fptoui float %1 to i64 - %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str2, i32 0, i32 0), double %conv, i64 %conv1) - %2 = load float, float* %f.addr, align 4 + %call = call i32 (ptr, ...) @printf(ptr @.str2, double %conv, i64 %conv1) + %2 = load float, ptr %f.addr, align 4 %conv2 = fpext float %2 to double - %3 = load float, float* %f.addr, align 4 + %3 = load float, ptr %f.addr, align 4 %conv3 = fptoui float %3 to i32 - %call4 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str3, i32 0, i32 0), double %conv2, i32 %conv3) + %call4 = call i32 (ptr, ...) @printf(ptr @.str3, double %conv2, i32 %conv3) ret void } -define i32 @main(i32 %argc, i8** %argv) ssp { +define i32 @main(i32 %argc, ptr %argv) ssp { entry: %retval = alloca i32, align 4 %argc.addr = alloca i32, align 4 - %argv.addr = alloca i8**, align 8 - store i32 0, i32* %retval - store i32 %argc, i32* %argc.addr, align 4 - store i8** %argv, i8*** %argv.addr, align 8 + %argv.addr = alloca ptr, align 8 + store i32 0, ptr %retval + store i32 %argc, ptr %argc.addr, align 4 + store ptr %argv, ptr %argv.addr, align 8 call void @testDouble(double 1.159198e+01) call void @testFloat(float 0x40272F1800000000) ret i32 0 diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll index 997431bda56042..b87fe926fb32c0 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll @@ -1,21 +1,21 @@ ; RUN: llc < %s -mtriple=arm64-apple-ios ; rdar://11849816 -@shlib_path_substitutions = external hidden unnamed_addr global i8**, align 8 +@shlib_path_substitutions = external hidden unnamed_addr global ptr, align 8 -declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone +declare i64 @llvm.objectsize.i64(ptr, i1) nounwind readnone -declare noalias i8* @xmalloc(i64) optsize +declare noalias ptr @xmalloc(i64) optsize -declare i64 @strlen(i8* nocapture) nounwind readonly optsize +declare i64 @strlen(ptr nocapture) nounwind readonly optsize -declare i8* @__strcpy_chk(i8*, i8*, i64) nounwind optsize +declare ptr @__strcpy_chk(ptr, ptr, i64) nounwind optsize -declare i8* @__strcat_chk(i8*, i8*, i64) nounwind optsize +declare ptr @__strcat_chk(ptr, ptr, i64) nounwind optsize -declare noalias i8* @xstrdup(i8*) optsize +declare noalias ptr @xstrdup(ptr) optsize -define i8* @dyld_fix_path(i8* %path) nounwind optsize ssp { +define ptr @dyld_fix_path(ptr %path) nounwind optsize ssp { entry: br i1 undef, label %if.end56, label %for.cond @@ -29,7 +29,7 @@ for.cond10: ; preds = %for.cond br i1 undef, label %if.end56, label %for.body14 for.body14: ; preds = %for.cond10 - %call22 = tail call i64 @strlen(i8* undef) nounwind optsize + %call22 = tail call i64 @strlen(ptr undef) nounwind optsize %sext = shl i64 %call22, 32 %conv30 = ashr exact i64 %sext, 32 %add29 = sub i64 0, %conv30 @@ -37,20 +37,20 @@ for.body14: ; preds = %for.cond10 %add31 = shl i64 %sub, 32 %sext59 = add i64 %add31, 4294967296 %conv33 = ashr exact i64 %sext59, 32 - %call34 = tail call noalias i8* @xmalloc(i64 %conv33) nounwind optsize + %call34 = tail call noalias ptr @xmalloc(i64 %conv33) nounwind optsize br i1 undef, label %cond.false45, label %cond.true43 cond.true43: ; preds = %for.body14 unreachable cond.false45: ; preds = %for.body14 - %add.ptr = getelementptr inbounds i8, i8* %path, i64 %conv30 + %add.ptr = getelementptr inbounds i8, ptr %path, i64 %conv30 unreachable if.end56: ; preds = %for.cond10, %entry - ret i8* null + ret ptr null } -declare i32 @strncmp(i8* nocapture, i8* nocapture, i64) nounwind readonly optsize +declare i32 @strncmp(ptr nocapture, ptr nocapture, i64) nounwind readonly optsize -declare i8* @strcpy(i8*, i8* nocapture) nounwind +declare ptr @strcpy(ptr, ptr nocapture) nounwind diff --git a/llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll b/llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll index 4d78b331353032..f28702bdbdd0cb 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll @@ -7,7 +7,7 @@ entry: br i1 undef, label %CF, label %CF77 CF: ; preds = %CF, %CF76 - store float %B26, float* undef + store float %B26, ptr undef br i1 undef, label %CF, label %CF77 CF77: ; preds = %CF diff --git a/llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll b/llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll index 9b1dec1ac892d9..29f5cf8e72b64a 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll @@ -32,6 +32,6 @@ CF83: ; preds = %CF define void @_Z12my_example2bv() nounwind noinline ssp { entry: %0 = fptosi <2 x double> undef to <2 x i32> - store <2 x i32> %0, <2 x i32>* undef, align 8 + store <2 x i32> %0, ptr undef, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll b/llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll index c13b65d34a1a10..da121b97b55d55 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll @@ -3,7 +3,7 @@ ;CHECK-LABEL: Shuff: ;CHECK: tbl.8b ;CHECK: ret -define <8 x i8 > @Shuff(<8 x i8> %in, <8 x i8>* %out) nounwind ssp { +define <8 x i8 > @Shuff(<8 x i8> %in, ptr %out) nounwind ssp { %value = shufflevector <8 x i8> %in, <8 x i8> zeroinitializer, <8 x i32> ret <8 x i8> %value } diff --git a/llvm/test/CodeGen/AArch64/arm64-aapcs.ll b/llvm/test/CodeGen/AArch64/arm64-aapcs.ll index 1ace2461e6412a..03393ad6aef5c8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-aapcs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-aapcs.ll @@ -4,7 +4,7 @@ ; CHECK-LABEL: @test_i128_align define dso_local i128 @test_i128_align(i32, i128 %arg, i32 %after) { - store i32 %after, i32* @var, align 4 + store i32 %after, ptr @var, align 4 ; CHECK-DAG: str w4, [{{x[0-9]+}}, :lo12:var] ret i128 %arg @@ -14,7 +14,7 @@ define dso_local i128 @test_i128_align(i32, i128 %arg, i32 %after) { ; CHECK-LABEL: @test_i64x2_align define [2 x i64] @test_i64x2_align(i32, [2 x i64] %arg, i32 %after) { - store i32 %after, i32* @var, align 4 + store i32 %after, ptr @var, align 4 ; CHECK-DAG: str w3, [{{x[0-9]+}}, :lo12:var] ret [2 x i64] %arg @@ -35,22 +35,22 @@ define dso_local void @test_stack_slots([8 x i64], i1 %bool, i8 %char, i16 %shor ; CHECK-DAG: ldrb w[[ext5:[0-9]+]], [sp] %ext_bool = zext i1 %bool to i64 - store volatile i64 %ext_bool, i64* @var64, align 8 + store volatile i64 %ext_bool, ptr @var64, align 8 ; CHECK: str x[[ext5]], [{{x[0-9]+}}, :lo12:var64] %ext_char = zext i8 %char to i64 - store volatile i64 %ext_char, i64* @var64, align 8 + store volatile i64 %ext_char, ptr @var64, align 8 ; CHECK: str x[[ext3]], [{{x[0-9]+}}, :lo12:var64] %ext_short = zext i16 %short to i64 - store volatile i64 %ext_short, i64* @var64, align 8 + store volatile i64 %ext_short, ptr @var64, align 8 ; CHECK: str x[[ext2]], [{{x[0-9]+}}, :lo12:var64] %ext_int = zext i32 %int to i64 - store volatile i64 %ext_int, i64* @var64, align 8 + store volatile i64 %ext_int, ptr @var64, align 8 ; CHECK: str x[[ext1]], [{{x[0-9]+}}, :lo12:var64] - store volatile i64 %long, i64* @var64, align 8 + store volatile i64 %long, ptr @var64, align 8 ; CHECK: str x[[ext4]], [{{x[0-9]+}}, :lo12:var64] ret void @@ -61,22 +61,22 @@ define dso_local void @test_stack_slots([8 x i64], i1 %bool, i8 %char, i16 %shor define dso_local void @test_extension(i1 %bool, i8 %char, i16 %short, i32 %int) { %ext_bool = zext i1 %bool to i64 - store volatile i64 %ext_bool, i64* @var64 + store volatile i64 %ext_bool, ptr @var64 ; CHECK: and [[EXT:x[0-9]+]], x0, #0x1 ; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64] %ext_char = sext i8 %char to i64 - store volatile i64 %ext_char, i64* @var64 + store volatile i64 %ext_char, ptr @var64 ; CHECK: sxtb [[EXT:x[0-9]+]], w1 ; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64] %ext_short = zext i16 %short to i64 - store volatile i64 %ext_short, i64* @var64 + store volatile i64 %ext_short, ptr @var64 ; CHECK: and [[EXT:x[0-9]+]], x2, #0xffff ; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64] %ext_int = zext i32 %int to i64 - store volatile i64 %ext_int, i64* @var64 + store volatile i64 %ext_int, ptr @var64 ; CHECK: mov w[[EXT:[0-9]+]], w3 ; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64] diff --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll index b2ac6fa3143514..e8c1c124c06bca 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -4,7 +4,7 @@ ; rdar://13625505 ; Here we have 9 fixed integer arguments the 9th argument in on stack, the ; varargs start right after at 8-byte alignment. -define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp { +define void @fn9(ptr %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp { ; CHECK-LABEL: fn9: ; CHECK: ; %bb.0: ; CHECK-NEXT: sub sp, sp, #64 @@ -34,31 +34,30 @@ define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, %7 = alloca i32, align 4 %8 = alloca i32, align 4 %9 = alloca i32, align 4 - %args = alloca i8*, align 8 + %args = alloca ptr, align 8 %a10 = alloca i32, align 4 %a11 = alloca i32, align 4 %a12 = alloca i32, align 4 - store i32 %a2, i32* %2, align 4 - store i32 %a3, i32* %3, align 4 - store i32 %a4, i32* %4, align 4 - store i32 %a5, i32* %5, align 4 - store i32 %a6, i32* %6, align 4 - store i32 %a7, i32* %7, align 4 - store i32 %a8, i32* %8, align 4 - store i32 %a9, i32* %9, align 4 - store i32 %a9, i32* %a1 - %10 = bitcast i8** %args to i8* - call void @llvm.va_start(i8* %10) - %11 = va_arg i8** %args, i32 - store i32 %11, i32* %a10, align 4 - %12 = va_arg i8** %args, i32 - store i32 %12, i32* %a11, align 4 - %13 = va_arg i8** %args, i32 - store i32 %13, i32* %a12, align 4 + store i32 %a2, ptr %2, align 4 + store i32 %a3, ptr %3, align 4 + store i32 %a4, ptr %4, align 4 + store i32 %a5, ptr %5, align 4 + store i32 %a6, ptr %6, align 4 + store i32 %a7, ptr %7, align 4 + store i32 %a8, ptr %8, align 4 + store i32 %a9, ptr %9, align 4 + store i32 %a9, ptr %a1 + call void @llvm.va_start(ptr %args) + %10 = va_arg ptr %args, i32 + store i32 %10, ptr %a10, align 4 + %11 = va_arg ptr %args, i32 + store i32 %11, ptr %a11, align 4 + %12 = va_arg ptr %args, i32 + store i32 %12, ptr %a12, align 4 ret void } -declare void @llvm.va_start(i8*) nounwind +declare void @llvm.va_start(ptr) nounwind define i32 @main() nounwind ssp { ; CHECK-LABEL: main: @@ -111,37 +110,37 @@ define i32 @main() nounwind ssp { %a10 = alloca i32, align 4 %a11 = alloca i32, align 4 %a12 = alloca i32, align 4 - store i32 1, i32* %a1, align 4 - store i32 2, i32* %a2, align 4 - store i32 3, i32* %a3, align 4 - store i32 4, i32* %a4, align 4 - store i32 5, i32* %a5, align 4 - store i32 6, i32* %a6, align 4 - store i32 7, i32* %a7, align 4 - store i32 8, i32* %a8, align 4 - store i32 9, i32* %a9, align 4 - store i32 10, i32* %a10, align 4 - store i32 11, i32* %a11, align 4 - store i32 12, i32* %a12, align 4 - %1 = load i32, i32* %a1, align 4 - %2 = load i32, i32* %a2, align 4 - %3 = load i32, i32* %a3, align 4 - %4 = load i32, i32* %a4, align 4 - %5 = load i32, i32* %a5, align 4 - %6 = load i32, i32* %a6, align 4 - %7 = load i32, i32* %a7, align 4 - %8 = load i32, i32* %a8, align 4 - %9 = load i32, i32* %a9, align 4 - %10 = load i32, i32* %a10, align 4 - %11 = load i32, i32* %a11, align 4 - %12 = load i32, i32* %a12, align 4 - call void (i32*, i32, i32, i32, i32, i32, i32, i32, i32, ...) @fn9(i32* %a1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12) + store i32 1, ptr %a1, align 4 + store i32 2, ptr %a2, align 4 + store i32 3, ptr %a3, align 4 + store i32 4, ptr %a4, align 4 + store i32 5, ptr %a5, align 4 + store i32 6, ptr %a6, align 4 + store i32 7, ptr %a7, align 4 + store i32 8, ptr %a8, align 4 + store i32 9, ptr %a9, align 4 + store i32 10, ptr %a10, align 4 + store i32 11, ptr %a11, align 4 + store i32 12, ptr %a12, align 4 + %1 = load i32, ptr %a1, align 4 + %2 = load i32, ptr %a2, align 4 + %3 = load i32, ptr %a3, align 4 + %4 = load i32, ptr %a4, align 4 + %5 = load i32, ptr %a5, align 4 + %6 = load i32, ptr %a6, align 4 + %7 = load i32, ptr %a7, align 4 + %8 = load i32, ptr %a8, align 4 + %9 = load i32, ptr %a9, align 4 + %10 = load i32, ptr %a10, align 4 + %11 = load i32, ptr %a11, align 4 + %12 = load i32, ptr %a12, align 4 + call void (ptr, i32, i32, i32, i32, i32, i32, i32, i32, ...) @fn9(ptr %a1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12) ret i32 0 } ;rdar://13668483 @.str = private unnamed_addr constant [4 x i8] c"fmt\00", align 1 -define void @foo(i8* %fmt, ...) nounwind { +define void @foo(ptr %fmt, ...) nounwind { ; CHECK-LABEL: foo: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: sub sp, sp, #48 @@ -156,17 +155,16 @@ define void @foo(i8* %fmt, ...) nounwind { ; CHECK-NEXT: str q0, [sp], #48 ; CHECK-NEXT: ret entry: - %fmt.addr = alloca i8*, align 8 - %args = alloca i8*, align 8 + %fmt.addr = alloca ptr, align 8 + %args = alloca ptr, align 8 %vc = alloca i32, align 4 %vv = alloca <4 x i32>, align 16 - store i8* %fmt, i8** %fmt.addr, align 8 - %args1 = bitcast i8** %args to i8* - call void @llvm.va_start(i8* %args1) - %0 = va_arg i8** %args, i32 - store i32 %0, i32* %vc, align 4 - %1 = va_arg i8** %args, <4 x i32> - store <4 x i32> %1, <4 x i32>* %vv, align 16 + store ptr %fmt, ptr %fmt.addr, align 8 + call void @llvm.va_start(ptr %args) + %0 = va_arg ptr %args, i32 + store i32 %0, ptr %vc, align 4 + %1 = va_arg ptr %args, <4 x i32> + store <4 x i32> %1, ptr %vv, align 16 ret void } @@ -191,11 +189,11 @@ define void @bar(i32 %x, <4 x i32> %y) nounwind { entry: %x.addr = alloca i32, align 4 %y.addr = alloca <4 x i32>, align 16 - store i32 %x, i32* %x.addr, align 4 - store <4 x i32> %y, <4 x i32>* %y.addr, align 16 - %0 = load i32, i32* %x.addr, align 4 - %1 = load <4 x i32>, <4 x i32>* %y.addr, align 16 - call void (i8*, ...) @foo(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %0, <4 x i32> %1) + store i32 %x, ptr %x.addr, align 4 + store <4 x i32> %y, ptr %y.addr, align 16 + %0 = load i32, ptr %x.addr, align 4 + %1 = load <4 x i32>, ptr %y.addr, align 16 + call void (ptr, ...) @foo(ptr @.str, i32 %0, <4 x i32> %1) ret void } @@ -203,7 +201,7 @@ entry: ; When passing 16-byte aligned small structs as vararg, make sure the caller ; side is 16-byte aligned on stack. %struct.s41 = type { i32, i16, i32, i16 } -define void @foo2(i8* %fmt, ...) nounwind { +define void @foo2(ptr %fmt, ...) nounwind { ; CHECK-LABEL: foo2: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: sub sp, sp, #48 @@ -218,29 +216,25 @@ define void @foo2(i8* %fmt, ...) nounwind { ; CHECK-NEXT: str q0, [sp], #48 ; CHECK-NEXT: ret entry: - %fmt.addr = alloca i8*, align 8 - %args = alloca i8*, align 8 + %fmt.addr = alloca ptr, align 8 + %args = alloca ptr, align 8 %vc = alloca i32, align 4 %vs = alloca %struct.s41, align 16 - store i8* %fmt, i8** %fmt.addr, align 8 - %args1 = bitcast i8** %args to i8* - call void @llvm.va_start(i8* %args1) - %0 = va_arg i8** %args, i32 - store i32 %0, i32* %vc, align 4 - %ap.cur = load i8*, i8** %args - %1 = getelementptr i8, i8* %ap.cur, i32 15 - %2 = ptrtoint i8* %1 to i64 + store ptr %fmt, ptr %fmt.addr, align 8 + call void @llvm.va_start(ptr %args) + %0 = va_arg ptr %args, i32 + store i32 %0, ptr %vc, align 4 + %ap.cur = load ptr, ptr %args + %1 = getelementptr i8, ptr %ap.cur, i32 15 + %2 = ptrtoint ptr %1 to i64 %3 = and i64 %2, -16 - %ap.align = inttoptr i64 %3 to i8* - %ap.next = getelementptr i8, i8* %ap.align, i32 16 - store i8* %ap.next, i8** %args - %4 = bitcast i8* %ap.align to %struct.s41* - %5 = bitcast %struct.s41* %vs to i8* - %6 = bitcast %struct.s41* %4 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %5, i8* align 16 %6, i64 16, i1 false) + %ap.align = inttoptr i64 %3 to ptr + %ap.next = getelementptr i8, ptr %ap.align, i32 16 + store ptr %ap.next, ptr %args + call void @llvm.memcpy.p0.p0.i64(ptr align 16 %vs, ptr align 16 %ap.align, i64 16, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind +declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind define void @bar2(i32 %x, i128 %s41.coerce) nounwind { ; CHECK-LABEL: bar2: @@ -264,12 +258,10 @@ define void @bar2(i32 %x, i128 %s41.coerce) nounwind { entry: %x.addr = alloca i32, align 4 %s41 = alloca %struct.s41, align 16 - store i32 %x, i32* %x.addr, align 4 - %0 = bitcast %struct.s41* %s41 to i128* - store i128 %s41.coerce, i128* %0, align 1 - %1 = load i32, i32* %x.addr, align 4 - %2 = bitcast %struct.s41* %s41 to i128* - %3 = load i128, i128* %2, align 1 - call void (i8*, ...) @foo2(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %1, i128 %3) + store i32 %x, ptr %x.addr, align 4 + store i128 %s41.coerce, ptr %s41, align 1 + %0 = load i32, ptr %x.addr, align 4 + %1 = load i128, ptr %s41, align 1 + call void (ptr, ...) @foo2(ptr @.str, i32 %0, i128 %1) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-abi.ll b/llvm/test/CodeGen/AArch64/arm64-abi.ll index ba17810e32a6cf..4168fdfda0954a 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi.ll @@ -75,7 +75,7 @@ declare double @ext([2 x float]) ; rdar://12656141 ; 16-byte vector should be aligned at 16-byte when passing on stack. ; A double argument will be passed on stack, so vecotr should be at sp+16. -define double @fixed_4i(<4 x i32>* nocapture %in) nounwind { +define double @fixed_4i(ptr nocapture %in) nounwind { entry: ; CHECK-LABEL: fixed_4i ; CHECK: str [[REG_1:q[0-9]+]], [sp, #16] @@ -83,7 +83,7 @@ entry: ; FAST: sub sp, sp ; FAST: mov x[[ADDR:[0-9]+]], sp ; FAST: str [[REG_1:q[0-9]+]], [x[[ADDR]], #16] - %0 = load <4 x i32>, <4 x i32>* %in, align 16 + %0 = load <4 x i32>, ptr %in, align 16 %call = tail call double @args_vec_4i(double 3.000000e+00, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, double 3.000000e+00, <4 x i32> %0, i8 signext 3) ret double %call } @@ -104,7 +104,7 @@ entry: %conv1 = fpext float %add to double %add2 = fadd double %conv1, %d7 %add3 = fadd double %add2, %d8 - store double %add3, double* @g_d, align 8 + store double %add3, ptr @g_d, align 8 ret void } @@ -123,13 +123,13 @@ entry: %add3 = fadd double %conv2, %conv1 %conv4 = sitofp i32 %i9 to double %add5 = fadd double %conv4, %add3 - store double %add5, double* @g_d, align 8 + store double %add5, ptr @g_d, align 8 ret void } ; rdar://12648441 ; Check alignment on stack for v64, f64, i64, f32, i32. -define double @test3(<2 x i32>* nocapture %in) nounwind { +define double @test3(ptr nocapture %in) nounwind { entry: ; CHECK-LABEL: test3 ; CHECK: str [[REG_1:d[0-9]+]], [sp, #8] @@ -137,7 +137,7 @@ entry: ; FAST: sub sp, sp, #{{[0-9]+}} ; FAST: mov x[[ADDR:[0-9]+]], sp ; FAST: str [[REG_1:d[0-9]+]], [x[[ADDR]], #8] - %0 = load <2 x i32>, <2 x i32>* %in, align 8 + %0 = load <2 x i32>, ptr %in, align 8 %call = tail call double @args_vec_2i(double 3.000000e+00, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, float 3.000000e+00, <2 x i32> %0, i8 signext 3) @@ -146,13 +146,13 @@ entry: declare double @args_vec_2i(double, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, <2 x i32>, i8 signext) -define double @test4(double* nocapture %in) nounwind { +define double @test4(ptr nocapture %in) nounwind { entry: ; CHECK-LABEL: test4 ; CHECK: str [[REG_1:d[0-9]+]], [sp, #8] ; CHECK: str [[REG_2:w[0-9]+]], [sp] ; CHECK: mov w0, #3 - %0 = load double, double* %in, align 8 + %0 = load double, ptr %in, align 8 %call = tail call double @args_f64(double 3.000000e+00, double %0, double %0, double %0, double %0, double %0, double %0, double %0, float 3.000000e+00, double %0, i8 signext 3) @@ -161,13 +161,13 @@ entry: declare double @args_f64(double, double, double, double, double, double, double, double, float, double, i8 signext) -define i64 @test5(i64* nocapture %in) nounwind { +define i64 @test5(ptr nocapture %in) nounwind { entry: ; CHECK-LABEL: test5 ; CHECK: strb [[REG_3:w[0-9]+]], [sp, #16] ; CHECK: str [[REG_1:x[0-9]+]], [sp, #8] ; CHECK: str [[REG_2:w[0-9]+]], [sp] - %0 = load i64, i64* %in, align 8 + %0 = load i64, ptr %in, align 8 %call = tail call i64 @args_i64(i64 3, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0, i32 3, i64 %0, i8 signext 3) ret i64 %call @@ -175,13 +175,13 @@ entry: declare i64 @args_i64(i64, i64, i64, i64, i64, i64, i64, i64, i32, i64, i8 signext) -define i32 @test6(float* nocapture %in) nounwind { +define i32 @test6(ptr nocapture %in) nounwind { entry: ; CHECK-LABEL: test6 ; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8] ; CHECK: str [[REG_1:s[0-9]+]], [sp, #4] ; CHECK: strh [[REG_3:w[0-9]+]], [sp] - %0 = load float, float* %in, align 4 + %0 = load float, ptr %in, align 4 %call = tail call i32 @args_f32(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, i16 signext 3, float %0, @@ -192,13 +192,13 @@ declare i32 @args_f32(i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, i16 signext, float, i8 signext) -define i32 @test7(i32* nocapture %in) nounwind { +define i32 @test7(ptr nocapture %in) nounwind { entry: ; CHECK-LABEL: test7 ; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8] ; CHECK: str [[REG_1:w[0-9]+]], [sp, #4] ; CHECK: strh [[REG_3:w[0-9]+]], [sp] - %0 = load i32, i32* %in, align 4 + %0 = load i32, ptr %in, align 4 %call = tail call i32 @args_i32(i32 3, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0, i16 signext 3, i32 %0, i8 signext 4) ret i32 %call @@ -206,7 +206,7 @@ entry: declare i32 @args_i32(i32, i32, i32, i32, i32, i32, i32, i32, i16 signext, i32, i8 signext) -define i32 @test8(i32 %argc, i8** nocapture %argv) nounwind { +define i32 @test8(i32 %argc, ptr nocapture %argv) nounwind { entry: ; CHECK-LABEL: test8 ; CHECK: str w8, [sp] diff --git a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll index 66bd2ef5ef3949..089e171e5a4a79 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll @@ -58,8 +58,8 @@ entry: ; CHECK-LABEL: caller38 ; CHECK: ldr x1, ; CHECK: ldr x2, - %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 - %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 + %0 = load i64, ptr @g38, align 4 + %1 = load i64, ptr @g38_2, align 4 %call = tail call i32 @f38(i32 3, i64 %0, i64 %1) #5 ret i32 %call } @@ -75,8 +75,8 @@ entry: ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] ; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] - %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 - %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 + %0 = load i64, ptr @g38, align 4 + %1 = load i64, ptr @g38_2, align 4 %call = tail call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i64 %0, i64 %1) #5 ret i32 %call @@ -111,8 +111,8 @@ entry: ; CHECK-LABEL: caller39 ; CHECK: ldp x1, x2, ; CHECK: ldp x3, x4, - %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 - %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 + %0 = load i128, ptr @g39, align 16 + %1 = load i128, ptr @g39_2, align 16 %call = tail call i32 @f39(i32 3, i128 %0, i128 %1) #5 ret i32 %call } @@ -129,8 +129,8 @@ entry: ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16] ; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] - %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 - %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 + %0 = load i128, ptr @g39, align 16 + %1 = load i128, ptr @g39_2, align 16 %call = tail call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %0, i128 %1) #5 ret i32 %call @@ -167,8 +167,8 @@ entry: ; CHECK-LABEL: caller40 ; CHECK: ldp x1, x2, ; CHECK: ldp x3, x4, - %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 - %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 + %0 = load [2 x i64], ptr @g40, align 4 + %1 = load [2 x i64], ptr @g40_2, align 4 %call = tail call i32 @f40(i32 3, [2 x i64] %0, [2 x i64] %1) #5 ret i32 %call } @@ -185,8 +185,8 @@ entry: ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] ; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] - %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 - %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 + %0 = load [2 x i64], ptr @g40, align 4 + %1 = load [2 x i64], ptr @g40_2, align 4 %call = tail call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, [2 x i64] %0, [2 x i64] %1) #5 ret i32 %call @@ -221,8 +221,8 @@ entry: ; CHECK-LABEL: caller41 ; CHECK: ldp x1, x2, ; CHECK: ldp x3, x4, - %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 - %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 + %0 = load i128, ptr @g41, align 16 + %1 = load i128, ptr @g41_2, align 16 %call = tail call i32 @f41(i32 3, i128 %0, i128 %1) #5 ret i32 %call } @@ -239,15 +239,15 @@ entry: ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16] ; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] - %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 - %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 + %0 = load i128, ptr @g41, align 16 + %1 = load i128, ptr @g41_2, align 16 %call = tail call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %0, i128 %1) #5 ret i32 %call } ; structs with size of 22 bytes, passed indirectly in x1 and x2 -define i32 @f42(i32 %i, %struct.s42* nocapture %s1, %struct.s42* nocapture %s2) #2 { +define i32 @f42(i32 %i, ptr nocapture %s1, ptr nocapture %s2) #2 { entry: ; CHECK-LABEL: f42 ; CHECK: ldr w[[A:[0-9]+]], [x1] @@ -259,15 +259,13 @@ entry: ; FAST: ldr w[[B:[0-9]+]], [x2] ; FAST: add w[[C:[0-9]+]], w[[A]], w0 ; FAST: add {{w[0-9]+}}, w[[C]], w[[B]] - %i1 = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 0 - %0 = load i32, i32* %i1, align 4, !tbaa !0 - %i2 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 0 - %1 = load i32, i32* %i2, align 4, !tbaa !0 - %s = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 1 - %2 = load i16, i16* %s, align 2, !tbaa !3 + %0 = load i32, ptr %s1, align 4, !tbaa !0 + %1 = load i32, ptr %s2, align 4, !tbaa !0 + %s = getelementptr inbounds %struct.s42, ptr %s1, i64 0, i32 1 + %2 = load i16, ptr %s, align 2, !tbaa !3 %conv = sext i16 %2 to i32 - %s5 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 1 - %3 = load i16, i16* %s5, align 2, !tbaa !3 + %s5 = getelementptr inbounds %struct.s42, ptr %s2, i64 0, i32 1 + %3 = load i16, ptr %s5, align 2, !tbaa !3 %conv6 = sext i16 %3 to i32 %add = add i32 %0, %i %add3 = add i32 %add, %1 @@ -300,19 +298,17 @@ entry: ; FAST: bl _memcpy %tmp = alloca %struct.s42, align 4 %tmp1 = alloca %struct.s42, align 4 - %0 = bitcast %struct.s42* %tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast (%struct.s42* @g42 to i8*), i64 24, i1 false), !tbaa.struct !4 - %1 = bitcast %struct.s42* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%struct.s42* @g42_2 to i8*), i64 24, i1 false), !tbaa.struct !4 - %call = call i32 @f42(i32 3, %struct.s42* %tmp, %struct.s42* %tmp1) #5 + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 @g42, i64 24, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp1, ptr align 4 @g42_2, i64 24, i1 false), !tbaa.struct !4 + %call = call i32 @f42(i32 3, ptr %tmp, ptr %tmp1) #5 ret i32 %call } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #4 +declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) #4 declare i32 @f42_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, - i32 %i7, i32 %i8, i32 %i9, %struct.s42* nocapture %s1, - %struct.s42* nocapture %s2) #2 + i32 %i7, i32 %i8, i32 %i9, ptr nocapture %s1, + ptr nocapture %s2) #2 define i32 @caller42_stack() #3 { entry: @@ -349,18 +345,16 @@ entry: ; FAST: str {{x[0-9]+}}, [sp, #16] %tmp = alloca %struct.s42, align 4 %tmp1 = alloca %struct.s42, align 4 - %0 = bitcast %struct.s42* %tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast (%struct.s42* @g42 to i8*), i64 24, i1 false), !tbaa.struct !4 - %1 = bitcast %struct.s42* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%struct.s42* @g42_2 to i8*), i64 24, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 @g42, i64 24, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp1, ptr align 4 @g42_2, i64 24, i1 false), !tbaa.struct !4 %call = call i32 @f42_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, - i32 8, i32 9, %struct.s42* %tmp, %struct.s42* %tmp1) #5 + i32 8, i32 9, ptr %tmp, ptr %tmp1) #5 ret i32 %call } ; structs with size of 22 bytes, alignment of 16 ; passed indirectly in x1 and x2 -define i32 @f43(i32 %i, %struct.s43* nocapture %s1, %struct.s43* nocapture %s2) #2 { +define i32 @f43(i32 %i, ptr nocapture %s1, ptr nocapture %s2) #2 { entry: ; CHECK-LABEL: f43 ; CHECK: ldr w[[A:[0-9]+]], [x1] @@ -372,15 +366,13 @@ entry: ; FAST: ldr w[[B:[0-9]+]], [x2] ; FAST: add w[[C:[0-9]+]], w[[A]], w0 ; FAST: add {{w[0-9]+}}, w[[C]], w[[B]] - %i1 = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 0 - %0 = load i32, i32* %i1, align 4, !tbaa !0 - %i2 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 0 - %1 = load i32, i32* %i2, align 4, !tbaa !0 - %s = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 1 - %2 = load i16, i16* %s, align 2, !tbaa !3 + %0 = load i32, ptr %s1, align 4, !tbaa !0 + %1 = load i32, ptr %s2, align 4, !tbaa !0 + %s = getelementptr inbounds %struct.s43, ptr %s1, i64 0, i32 1 + %2 = load i16, ptr %s, align 2, !tbaa !3 %conv = sext i16 %2 to i32 - %s5 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 1 - %3 = load i16, i16* %s5, align 2, !tbaa !3 + %s5 = getelementptr inbounds %struct.s43, ptr %s2, i64 0, i32 1 + %3 = load i16, ptr %s5, align 2, !tbaa !3 %conv6 = sext i16 %3 to i32 %add = add i32 %0, %i %add3 = add i32 %add, %1 @@ -415,17 +407,15 @@ entry: ; FAST: mov x2, sp %tmp = alloca %struct.s43, align 16 %tmp1 = alloca %struct.s43, align 16 - %0 = bitcast %struct.s43* %tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.s43* @g43 to i8*), i64 32, i1 false), !tbaa.struct !4 - %1 = bitcast %struct.s43* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 bitcast (%struct.s43* @g43_2 to i8*), i64 32, i1 false), !tbaa.struct !4 - %call = call i32 @f43(i32 3, %struct.s43* %tmp, %struct.s43* %tmp1) #5 + call void @llvm.memcpy.p0.p0.i64(ptr align 16 %tmp, ptr align 16 @g43, i64 32, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0.p0.i64(ptr align 16 %tmp1, ptr align 16 @g43_2, i64 32, i1 false), !tbaa.struct !4 + %call = call i32 @f43(i32 3, ptr %tmp, ptr %tmp1) #5 ret i32 %call } declare i32 @f43_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, - i32 %i7, i32 %i8, i32 %i9, %struct.s43* nocapture %s1, - %struct.s43* nocapture %s2) #2 + i32 %i7, i32 %i8, i32 %i9, ptr nocapture %s1, + ptr nocapture %s2) #2 define i32 @caller43_stack() #3 { entry: @@ -464,12 +454,10 @@ entry: ; FAST: str x[[B]], [sp, #16] %tmp = alloca %struct.s43, align 16 %tmp1 = alloca %struct.s43, align 16 - %0 = bitcast %struct.s43* %tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.s43* @g43 to i8*), i64 32, i1 false), !tbaa.struct !4 - %1 = bitcast %struct.s43* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 bitcast (%struct.s43* @g43_2 to i8*), i64 32, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0.p0.i64(ptr align 16 %tmp, ptr align 16 @g43, i64 32, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0.p0.i64(ptr align 16 %tmp1, ptr align 16 @g43_2, i64 32, i1 false), !tbaa.struct !4 %call = call i32 @f43_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, - i32 8, i32 9, %struct.s43* %tmp, %struct.s43* %tmp1) #5 + i32 8, i32 9, ptr %tmp, ptr %tmp1) #5 ret i32 %call } @@ -492,7 +480,7 @@ entry: ; Load/Store opt is disabled with -O0, so the i128 is split. ; FAST: str {{x[0-9]+}}, [x[[ADDR]], #8] ; FAST: str {{x[0-9]+}}, [x[[ADDR]]] - %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 + %0 = load i128, ptr @g41, align 16 %call = tail call i32 @callee_i128_split(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 %0, i32 8) #5 ret i32 %call @@ -513,7 +501,7 @@ entry: ; FAST: mov x[[R0:[0-9]+]], sp ; FAST: mov w[[R1:[0-9]+]], #8 ; FAST: str w[[R1]], [x[[R0]]] - %0 = load i64, i64* bitcast (%struct.s41* @g41 to i64*), align 16 + %0 = load i64, ptr @g41, align 16 %call = tail call i32 @callee_i64(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i64 %0, i32 8) #5 ret i32 %call diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll index da1f366757a830..d593272be1aa23 100644 --- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll +++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll @@ -1,7 +1,7 @@ ; RUN: llc -O3 -mtriple arm64-apple-ios3 -aarch64-enable-gep-opt=false %s -o - | FileCheck %s ; -@block = common global i8* null, align 8 +@block = common global ptr null, align 8 define i32 @fct(i32 %i1, i32 %i2) { ; CHECK: @fct @@ -11,12 +11,12 @@ define i32 @fct(i32 %i1, i32 %i2) { ; _CHECK-NOT: , sxtw] entry: %idxprom = sext i32 %i1 to i64 - %0 = load i8*, i8** @block, align 8 - %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom - %1 = load i8, i8* %arrayidx, align 1 + %0 = load ptr, ptr @block, align 8 + %arrayidx = getelementptr inbounds i8, ptr %0, i64 %idxprom + %1 = load i8, ptr %arrayidx, align 1 %idxprom1 = sext i32 %i2 to i64 - %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1 - %2 = load i8, i8* %arrayidx2, align 1 + %arrayidx2 = getelementptr inbounds i8, ptr %0, i64 %idxprom1 + %2 = load i8, ptr %arrayidx2, align 1 %cmp = icmp eq i8 %1, %2 br i1 %cmp, label %if.end, label %if.then @@ -29,11 +29,11 @@ if.end: ; preds = %entry %inc = add nsw i32 %i1, 1 %inc9 = add nsw i32 %i2, 1 %idxprom10 = sext i32 %inc to i64 - %arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10 - %3 = load i8, i8* %arrayidx11, align 1 + %arrayidx11 = getelementptr inbounds i8, ptr %0, i64 %idxprom10 + %3 = load i8, ptr %arrayidx11, align 1 %idxprom12 = sext i32 %inc9 to i64 - %arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12 - %4 = load i8, i8* %arrayidx13, align 1 + %arrayidx13 = getelementptr inbounds i8, ptr %0, i64 %idxprom12 + %4 = load i8, ptr %arrayidx13, align 1 %cmp16 = icmp eq i8 %3, %4 br i1 %cmp16, label %if.end23, label %if.then18 @@ -46,11 +46,11 @@ if.end23: ; preds = %if.end %inc24 = add nsw i32 %i1, 2 %inc25 = add nsw i32 %i2, 2 %idxprom26 = sext i32 %inc24 to i64 - %arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26 - %5 = load i8, i8* %arrayidx27, align 1 + %arrayidx27 = getelementptr inbounds i8, ptr %0, i64 %idxprom26 + %5 = load i8, ptr %arrayidx27, align 1 %idxprom28 = sext i32 %inc25 to i64 - %arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28 - %6 = load i8, i8* %arrayidx29, align 1 + %arrayidx29 = getelementptr inbounds i8, ptr %0, i64 %idxprom28 + %6 = load i8, ptr %arrayidx29, align 1 %cmp32 = icmp eq i8 %5, %6 br i1 %cmp32, label %return, label %if.then34 @@ -71,12 +71,12 @@ define i32 @fct1(i32 %i1, i32 %i2) optsize { ; CHECK: , sxtw] entry: %idxprom = sext i32 %i1 to i64 - %0 = load i8*, i8** @block, align 8 - %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom - %1 = load i8, i8* %arrayidx, align 1 + %0 = load ptr, ptr @block, align 8 + %arrayidx = getelementptr inbounds i8, ptr %0, i64 %idxprom + %1 = load i8, ptr %arrayidx, align 1 %idxprom1 = sext i32 %i2 to i64 - %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1 - %2 = load i8, i8* %arrayidx2, align 1 + %arrayidx2 = getelementptr inbounds i8, ptr %0, i64 %idxprom1 + %2 = load i8, ptr %arrayidx2, align 1 %cmp = icmp eq i8 %1, %2 br i1 %cmp, label %if.end, label %if.then @@ -89,11 +89,11 @@ if.end: ; preds = %entry %inc = add nsw i32 %i1, 1 %inc9 = add nsw i32 %i2, 1 %idxprom10 = sext i32 %inc to i64 - %arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10 - %3 = load i8, i8* %arrayidx11, align 1 + %arrayidx11 = getelementptr inbounds i8, ptr %0, i64 %idxprom10 + %3 = load i8, ptr %arrayidx11, align 1 %idxprom12 = sext i32 %inc9 to i64 - %arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12 - %4 = load i8, i8* %arrayidx13, align 1 + %arrayidx13 = getelementptr inbounds i8, ptr %0, i64 %idxprom12 + %4 = load i8, ptr %arrayidx13, align 1 %cmp16 = icmp eq i8 %3, %4 br i1 %cmp16, label %if.end23, label %if.then18 @@ -106,11 +106,11 @@ if.end23: ; preds = %if.end %inc24 = add nsw i32 %i1, 2 %inc25 = add nsw i32 %i2, 2 %idxprom26 = sext i32 %inc24 to i64 - %arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26 - %5 = load i8, i8* %arrayidx27, align 1 + %arrayidx27 = getelementptr inbounds i8, ptr %0, i64 %idxprom26 + %5 = load i8, ptr %arrayidx27, align 1 %idxprom28 = sext i32 %inc25 to i64 - %arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28 - %6 = load i8, i8* %arrayidx29, align 1 + %arrayidx29 = getelementptr inbounds i8, ptr %0, i64 %idxprom28 + %6 = load i8, ptr %arrayidx29, align 1 %cmp32 = icmp eq i8 %5, %6 br i1 %cmp32, label %return, label %if.then34 @@ -126,7 +126,7 @@ return: ; preds = %if.end23, %if.then3 ; CHECK: @test ; CHECK-NOT: , uxtw #2] -define i32 @test(i32* %array, i8 zeroext %c, i32 %arg) { +define i32 @test(ptr %array, i8 zeroext %c, i32 %arg) { entry: %conv = zext i8 %c to i32 %add = sub i32 0, %arg @@ -135,9 +135,9 @@ entry: if.then: ; preds = %entry %idxprom = zext i8 %c to i64 - %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom - %0 = load volatile i32, i32* %arrayidx, align 4 - %1 = load volatile i32, i32* %arrayidx, align 4 + %arrayidx = getelementptr inbounds i32, ptr %array, i64 %idxprom + %0 = load volatile i32, ptr %arrayidx, align 4 + %1 = load volatile i32, ptr %arrayidx, align 4 %add3 = add nsw i32 %1, %0 br label %if.end @@ -150,7 +150,7 @@ if.end: ; preds = %entry, %if.then ; CHECK: @test2 ; CHECK: , uxtw #2] ; CHECK: , uxtw #2] -define i32 @test2(i32* %array, i8 zeroext %c, i32 %arg) optsize { +define i32 @test2(ptr %array, i8 zeroext %c, i32 %arg) optsize { entry: %conv = zext i8 %c to i32 %add = sub i32 0, %arg @@ -159,9 +159,9 @@ entry: if.then: ; preds = %entry %idxprom = zext i8 %c to i64 - %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom - %0 = load volatile i32, i32* %arrayidx, align 4 - %1 = load volatile i32, i32* %arrayidx, align 4 + %arrayidx = getelementptr inbounds i32, ptr %array, i64 %idxprom + %0 = load volatile i32, ptr %arrayidx, align 4 + %1 = load volatile i32, ptr %arrayidx, align 4 %add3 = add nsw i32 %1, %0 br label %if.end diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll index 09fd578f8ccc13..3163ca0fb891b8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll +++ b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll @@ -5,7 +5,7 @@ ; way of the NEXT patterns. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -@block = common global i8* null, align 8 +@block = common global ptr null, align 8 define zeroext i8 @fullGtU(i32 %i1, i32 %i2) { ; CHECK-LABEL: fullGtU: @@ -40,12 +40,12 @@ define zeroext i8 @fullGtU(i32 %i1, i32 %i2) { ; CHECK-NEXT: ret entry: %idxprom = sext i32 %i1 to i64 - %tmp = load i8*, i8** @block, align 8 - %arrayidx = getelementptr inbounds i8, i8* %tmp, i64 %idxprom - %tmp1 = load i8, i8* %arrayidx, align 1 + %tmp = load ptr, ptr @block, align 8 + %arrayidx = getelementptr inbounds i8, ptr %tmp, i64 %idxprom + %tmp1 = load i8, ptr %arrayidx, align 1 %idxprom1 = sext i32 %i2 to i64 - %arrayidx2 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom1 - %tmp2 = load i8, i8* %arrayidx2, align 1 + %arrayidx2 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom1 + %tmp2 = load i8, ptr %arrayidx2, align 1 %cmp = icmp eq i8 %tmp1, %tmp2 br i1 %cmp, label %if.end, label %if.then @@ -58,11 +58,11 @@ if.end: ; preds = %entry %inc = add nsw i32 %i1, 1 %inc10 = add nsw i32 %i2, 1 %idxprom11 = sext i32 %inc to i64 - %arrayidx12 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom11 - %tmp3 = load i8, i8* %arrayidx12, align 1 + %arrayidx12 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom11 + %tmp3 = load i8, ptr %arrayidx12, align 1 %idxprom13 = sext i32 %inc10 to i64 - %arrayidx14 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom13 - %tmp4 = load i8, i8* %arrayidx14, align 1 + %arrayidx14 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom13 + %tmp4 = load i8, ptr %arrayidx14, align 1 %cmp17 = icmp eq i8 %tmp3, %tmp4 br i1 %cmp17, label %if.end25, label %if.then19 @@ -75,11 +75,11 @@ if.end25: ; preds = %if.end %inc26 = add nsw i32 %i1, 2 %inc27 = add nsw i32 %i2, 2 %idxprom28 = sext i32 %inc26 to i64 - %arrayidx29 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom28 - %tmp5 = load i8, i8* %arrayidx29, align 1 + %arrayidx29 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom28 + %tmp5 = load i8, ptr %arrayidx29, align 1 %idxprom30 = sext i32 %inc27 to i64 - %arrayidx31 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom30 - %tmp6 = load i8, i8* %arrayidx31, align 1 + %arrayidx31 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom30 + %tmp6 = load i8, ptr %arrayidx31, align 1 %cmp34 = icmp eq i8 %tmp5, %tmp6 br i1 %cmp34, label %return, label %if.then36 diff --git a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll index 447095284cef7b..cc9b47c049d562 100644 --- a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll +++ b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll @@ -5,48 +5,48 @@ @object = external hidden global i64, section "__DATA, __objc_ivar", align 8 ; base + offset (imm9) -define void @t1(i64* %object) { +define void @t1(ptr %object) { ; CHECK-LABEL: t1: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr xzr, [x0, #8] ; CHECK-NEXT: ret - %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 1 - %tmp = load volatile i64, i64* %incdec.ptr, align 8 + %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 1 + %tmp = load volatile i64, ptr %incdec.ptr, align 8 ret void } ; base + offset (> imm9) -define void @t2(i64* %object) { +define void @t2(ptr %object) { ; CHECK-LABEL: t2: ; CHECK: // %bb.0: ; CHECK-NEXT: sub x8, x0, #264 ; CHECK-NEXT: ldr xzr, [x8] ; CHECK-NEXT: ret - %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 -33 - %tmp = load volatile i64, i64* %incdec.ptr, align 8 + %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 -33 + %tmp = load volatile i64, ptr %incdec.ptr, align 8 ret void } ; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes) -define void @t3(i64* %object) { +define void @t3(ptr %object) { ; CHECK-LABEL: t3: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr xzr, [x0, #32760] ; CHECK-NEXT: ret - %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4095 - %tmp = load volatile i64, i64* %incdec.ptr, align 8 + %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 4095 + %tmp = load volatile i64, ptr %incdec.ptr, align 8 ret void } ; base + unsigned offset (> imm12 * size of type in bytes) -define void @t4(i64* %object) { +define void @t4(ptr %object) { ; CHECK-LABEL: t4: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #32768 ; CHECK-NEXT: ldr xzr, [x0, x8] ; CHECK-NEXT: ret - %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4096 - %tmp = load volatile i64, i64* %incdec.ptr, align 8 + %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 4096 + %tmp = load volatile i64, ptr %incdec.ptr, align 8 ret void } @@ -58,22 +58,22 @@ define void @t5(i64 %a) { ; CHECK-NEXT: add x8, x8, :lo12:object ; CHECK-NEXT: ldr xzr, [x8, x0, lsl #3] ; CHECK-NEXT: ret - %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a - %tmp = load volatile i64, i64* %incdec.ptr, align 8 + %incdec.ptr = getelementptr inbounds i64, ptr @object, i64 %a + %tmp = load volatile i64, ptr %incdec.ptr, align 8 ret void } ; base + reg + imm -define void @t6(i64 %a, i64* %object) { +define void @t6(i64 %a, ptr %object) { ; CHECK-LABEL: t6: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #32768 ; CHECK-NEXT: add x9, x1, x0, lsl #3 ; CHECK-NEXT: ldr xzr, [x9, x8] ; CHECK-NEXT: ret - %tmp1 = getelementptr inbounds i64, i64* %object, i64 %a - %incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096 - %tmp = load volatile i64, i64* %incdec.ptr, align 8 + %tmp1 = getelementptr inbounds i64, ptr %object, i64 %a + %incdec.ptr = getelementptr inbounds i64, ptr %tmp1, i64 4096 + %tmp = load volatile i64, ptr %incdec.ptr, align 8 ret void } @@ -85,8 +85,8 @@ define void @t7(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, x8] ; CHECK-NEXT: ret %1 = add i64 %a, 65535 ;0xffff - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -97,8 +97,8 @@ define void @t8(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, x8] ; CHECK-NEXT: ret %1 = sub i64 %a, 4662 ;-4662 is 0xffffffffffffedca - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -109,8 +109,8 @@ define void @t9(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, x8] ; CHECK-NEXT: ret %1 = add i64 -305463297, %a ;-305463297 is 0xffffffffedcaffff - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -121,8 +121,8 @@ define void @t10(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, x8] ; CHECK-NEXT: ret %1 = add i64 %a, 81909218222800896 ;0x123000000000000 - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -134,8 +134,8 @@ define void @t11(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, x8] ; CHECK-NEXT: ret %1 = add i64 %a, 19088743 ;0x1234567 - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -147,8 +147,8 @@ define void @t12(i64 %a) { ; CHECK-NEXT: ldr xzr, [x8] ; CHECK-NEXT: ret %1 = add i64 %a, 4095 ;0xfff - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -159,8 +159,8 @@ define void @t13(i64 %a) { ; CHECK-NEXT: ldr xzr, [x8] ; CHECK-NEXT: ret %1 = add i64 %a, -4095 ;-0xfff - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -171,8 +171,8 @@ define void @t14(i64 %a) { ; CHECK-NEXT: ldr xzr, [x8] ; CHECK-NEXT: ret %1 = add i64 %a, 1191936 ;0x123000 - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -183,8 +183,8 @@ define void @t15(i64 %a) { ; CHECK-NEXT: ldr xzr, [x8] ; CHECK-NEXT: ret %1 = add i64 %a, -1191936 ;0xFFFFFFFFFFEDD000 - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -194,8 +194,8 @@ define void @t16(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, #28672] ; CHECK-NEXT: ret %1 = add i64 %a, 28672 ;0x7000 - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } @@ -205,7 +205,7 @@ define void @t17(i64 %a) { ; CHECK-NEXT: ldur xzr, [x0, #-256] ; CHECK-NEXT: ret %1 = add i64 %a, -256 ;-0x100 - %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64, i64* %2, align 8 + %2 = inttoptr i64 %1 to ptr + %3 = load volatile i64, ptr %2, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll b/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll index 83ac21c165f972..0a91ada527d828 100644 --- a/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll +++ b/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll @@ -5,7 +5,7 @@ ; aligned. @T3_retval = common global <16 x float> zeroinitializer, align 16 -define void @test(<16 x float>* noalias sret(<16 x float>) %agg.result) nounwind ssp { +define void @test(ptr noalias sret(<16 x float>) %agg.result) nounwind ssp { entry: ; CHECK: test ; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [sp, #32] @@ -13,9 +13,9 @@ entry: ; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [[[BASE:x[0-9]+]], #32] ; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [[[BASE]]] %retval = alloca <16 x float>, align 16 - %0 = load <16 x float>, <16 x float>* @T3_retval, align 16 - store <16 x float> %0, <16 x float>* %retval - %1 = load <16 x float>, <16 x float>* %retval - store <16 x float> %1, <16 x float>* %agg.result, align 16 + %0 = load <16 x float>, ptr @T3_retval, align 16 + store <16 x float> %0, ptr %retval + %1 = load <16 x float>, ptr %retval + store <16 x float> %1, ptr %agg.result, align 16 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll b/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll index 0267acdda658b7..58446f39c4a33b 100644 --- a/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll +++ b/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll @@ -10,18 +10,17 @@ define i32 @foo(i32 %a) nounwind { %i = alloca i32, align 4 %arr2 = alloca [32 x i32], align 4 %j = alloca i32, align 4 - store i32 %a, i32* %a.addr, align 4 - %tmp = load i32, i32* %a.addr, align 4 + store i32 %a, ptr %a.addr, align 4 + %tmp = load i32, ptr %a.addr, align 4 %tmp1 = zext i32 %tmp to i64 %v = mul i64 4, %tmp1 %vla = alloca i8, i64 %v, align 4 - %tmp2 = bitcast i8* %vla to i32* - %tmp3 = load i32, i32* %a.addr, align 4 - store i32 %tmp3, i32* %i, align 4 - %tmp4 = load i32, i32* %a.addr, align 4 - store i32 %tmp4, i32* %j, align 4 - %tmp5 = load i32, i32* %j, align 4 - store i32 %tmp5, i32* %retval - %x = load i32, i32* %retval + %tmp3 = load i32, ptr %a.addr, align 4 + store i32 %tmp3, ptr %i, align 4 + %tmp4 = load i32, ptr %a.addr, align 4 + store i32 %tmp4, ptr %j, align 4 + %tmp5 = load i32, ptr %j, align 4 + store i32 %tmp5, ptr %retval + %x = load i32, ptr %retval ret i32 %x } diff --git a/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll b/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll index f528c9cfabf4c5..4bc3ba0f0a9664 100644 --- a/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll +++ b/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll @@ -2,10 +2,10 @@ ; ModuleID = 'and-cbz-extr-mr.bc' target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -define zeroext i1 @foo(i1 %IsEditable, i1 %isTextField, i8* %str1, i8* %str2, i8* %str3, i8* %str4, i8* %str5, i8* %str6, i8* %str7, i8* %str8, i8* %str9, i8* %str10, i8* %str11, i8* %str12, i8* %str13, i32 %int1, i8* %str14) unnamed_addr #0 align 2 { +define zeroext i1 @foo(i1 %IsEditable, i1 %isTextField, ptr %str1, ptr %str2, ptr %str3, ptr %str4, ptr %str5, ptr %str6, ptr %str7, ptr %str8, ptr %str9, ptr %str10, ptr %str11, ptr %str12, ptr %str13, i32 %int1, ptr %str14) unnamed_addr #0 align 2 { ; CHECK: _foo: entry: - %tobool = icmp eq i8* %str14, null + %tobool = icmp eq ptr %str14, null br i1 %tobool, label %return, label %if.end ; CHECK: %if.end @@ -16,19 +16,19 @@ if.end: ; preds = %entry br i1 %tobool.i.i.i, label %if.end12, label %land.rhs.i, !prof !1 land.rhs.i: ; preds = %if.end - %cmp.i.i.i = icmp eq i8* %str12, %str13 + %cmp.i.i.i = icmp eq ptr %str12, %str13 br i1 %cmp.i.i.i, label %if.then3, label %lor.rhs.i.i.i lor.rhs.i.i.i: ; preds = %land.rhs.i - %cmp.i13.i.i.i = icmp eq i8* %str10, %str11 + %cmp.i13.i.i.i = icmp eq ptr %str10, %str11 br i1 %cmp.i13.i.i.i, label %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, label %if.end5 _ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit: ; preds = %lor.rhs.i.i.i - %cmp.i.i.i.i = icmp eq i8* %str8, %str9 + %cmp.i.i.i.i = icmp eq ptr %str8, %str9 br i1 %cmp.i.i.i.i, label %if.then3, label %if.end5 if.then3: ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, %land.rhs.i - %tmp11 = load i8, i8* %str14, align 8 + %tmp11 = load i8, ptr %str14, align 8 %tmp12 = and i8 %tmp11, 2 %tmp13 = icmp ne i8 %tmp12, 0 br label %return @@ -39,22 +39,22 @@ if.end5: ; preds = %_ZNK7WebCore4Node10 br i1 %tobool.i.i.i, label %if.end12, label %land.rhs.i19, !prof !1 land.rhs.i19: ; preds = %if.end5 - %cmp.i.i.i18 = icmp eq i8* %str6, %str7 + %cmp.i.i.i18 = icmp eq ptr %str6, %str7 br i1 %cmp.i.i.i18, label %if.then7, label %lor.rhs.i.i.i23 lor.rhs.i.i.i23: ; preds = %land.rhs.i19 - %cmp.i13.i.i.i22 = icmp eq i8* %str3, %str4 + %cmp.i13.i.i.i22 = icmp eq ptr %str3, %str4 br i1 %cmp.i13.i.i.i22, label %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, label %if.end12 _ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28: ; preds = %lor.rhs.i.i.i23 - %cmp.i.i.i.i26 = icmp eq i8* %str1, %str2 + %cmp.i.i.i.i26 = icmp eq ptr %str1, %str2 br i1 %cmp.i.i.i.i26, label %if.then7, label %if.end12 if.then7: ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, %land.rhs.i19 br i1 %isTextField, label %if.then9, label %if.end12 if.then9: ; preds = %if.then7 - %tmp23 = load i8, i8* %str5, align 8 + %tmp23 = load i8, ptr %str5, align 8 %tmp24 = and i8 %tmp23, 2 %tmp25 = icmp ne i8 %tmp24, 0 br label %return diff --git a/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll b/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll index 2bf13606ba7d06..9a5069b8973a93 100644 --- a/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll +++ b/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll @@ -5,7 +5,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" ; Function Attrs: nounwind ssp uwtable define i32 @test1() #0 { %tmp1 = alloca i8 - %tmp2 = icmp eq i8* %tmp1, null + %tmp2 = icmp eq ptr %tmp1, null %tmp3 = zext i1 %tmp2 to i32 ret i32 %tmp3 diff --git a/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll b/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll index df4a9010dfa9cd..9cbbabed349364 100644 --- a/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll +++ b/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll @@ -4,7 +4,7 @@ declare i32 @test(i32) local_unnamed_addr declare i32 @test1(i64) local_unnamed_addr -define i32 @assertzext(i32 %n, i1 %a, i32* %b) local_unnamed_addr { +define i32 @assertzext(i32 %n, i1 %a, ptr %b) local_unnamed_addr { ; CHECK-LABEL: assertzext: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill @@ -34,7 +34,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - store i32 0, i32* %b, align 4 + store i32 0, ptr %b, align 4 br label %if.end if.end: ; preds = %if.then, %entry diff --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll index c5884aecc706e9..37c61d0a4a0fb6 100644 --- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll +++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll @@ -5,7 +5,7 @@ @var = global i128 0 -define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) { +define i128 @val_compare_and_swap(ptr %p, i128 %oldval, i128 %newval) { ; NOOUTLINE-LABEL: val_compare_and_swap: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB0_1: // =>This Inner Loop Header: Depth=1 @@ -51,12 +51,12 @@ define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) { ; LSE-NEXT: mov x0, x2 ; LSE-NEXT: mov x1, x3 ; LSE-NEXT: ret - %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire + %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval acquire acquire %val = extractvalue { i128, i1 } %pair, 0 ret i128 %val } -define i128 @val_compare_and_swap_seqcst(i128* %p, i128 %oldval, i128 %newval) { +define i128 @val_compare_and_swap_seqcst(ptr %p, i128 %oldval, i128 %newval) { ; NOOUTLINE-LABEL: val_compare_and_swap_seqcst: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB1_1: // =>This Inner Loop Header: Depth=1 @@ -102,12 +102,12 @@ define i128 @val_compare_and_swap_seqcst(i128* %p, i128 %oldval, i128 %newval) { ; LSE-NEXT: mov x0, x2 ; LSE-NEXT: mov x1, x3 ; LSE-NEXT: ret - %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval seq_cst seq_cst + %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval seq_cst seq_cst %val = extractvalue { i128, i1 } %pair, 0 ret i128 %val } -define i128 @val_compare_and_swap_release(i128* %p, i128 %oldval, i128 %newval) { +define i128 @val_compare_and_swap_release(ptr %p, i128 %oldval, i128 %newval) { ; NOOUTLINE-LABEL: val_compare_and_swap_release: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB2_1: // =>This Inner Loop Header: Depth=1 @@ -153,12 +153,12 @@ define i128 @val_compare_and_swap_release(i128* %p, i128 %oldval, i128 %newval) ; LSE-NEXT: mov x0, x2 ; LSE-NEXT: mov x1, x3 ; LSE-NEXT: ret - %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release monotonic + %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval release monotonic %val = extractvalue { i128, i1 } %pair, 0 ret i128 %val } -define i128 @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval) { +define i128 @val_compare_and_swap_monotonic(ptr %p, i128 %oldval, i128 %newval) { ; NOOUTLINE-LABEL: val_compare_and_swap_monotonic: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB3_1: // =>This Inner Loop Header: Depth=1 @@ -204,12 +204,12 @@ define i128 @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval ; LSE-NEXT: mov x0, x2 ; LSE-NEXT: mov x1, x3 ; LSE-NEXT: ret - %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval monotonic monotonic + %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval monotonic monotonic %val = extractvalue { i128, i1 } %pair, 0 ret i128 %val } -define void @fetch_and_nand(i128* %p, i128 %bits) { +define void @fetch_and_nand(ptr %p, i128 %bits) { ; NOOUTLINE-LABEL: fetch_and_nand: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB4_1: // %atomicrmw.start @@ -267,12 +267,12 @@ define void @fetch_and_nand(i128* %p, i128 %bits) { ; LSE-NEXT: stp x4, x5, [x8] ; LSE-NEXT: ret - %val = atomicrmw nand i128* %p, i128 %bits release - store i128 %val, i128* @var, align 16 + %val = atomicrmw nand ptr %p, i128 %bits release + store i128 %val, ptr @var, align 16 ret void } -define void @fetch_and_or(i128* %p, i128 %bits) { +define void @fetch_and_or(ptr %p, i128 %bits) { ; NOOUTLINE-LABEL: fetch_and_or: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB5_1: // %atomicrmw.start @@ -324,12 +324,12 @@ define void @fetch_and_or(i128* %p, i128 %bits) { ; LSE-NEXT: stp x4, x5, [x8] ; LSE-NEXT: ret - %val = atomicrmw or i128* %p, i128 %bits seq_cst - store i128 %val, i128* @var, align 16 + %val = atomicrmw or ptr %p, i128 %bits seq_cst + store i128 %val, ptr @var, align 16 ret void } -define void @fetch_and_add(i128* %p, i128 %bits) { +define void @fetch_and_add(ptr %p, i128 %bits) { ; NOOUTLINE-LABEL: fetch_and_add: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB6_1: // %atomicrmw.start @@ -380,12 +380,12 @@ define void @fetch_and_add(i128* %p, i128 %bits) { ; LSE-NEXT: ldr x8, [x8, :got_lo12:var] ; LSE-NEXT: stp x4, x5, [x8] ; LSE-NEXT: ret - %val = atomicrmw add i128* %p, i128 %bits seq_cst - store i128 %val, i128* @var, align 16 + %val = atomicrmw add ptr %p, i128 %bits seq_cst + store i128 %val, ptr @var, align 16 ret void } -define void @fetch_and_sub(i128* %p, i128 %bits) { +define void @fetch_and_sub(ptr %p, i128 %bits) { ; NOOUTLINE-LABEL: fetch_and_sub: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB7_1: // %atomicrmw.start @@ -436,12 +436,12 @@ define void @fetch_and_sub(i128* %p, i128 %bits) { ; LSE-NEXT: ldr x8, [x8, :got_lo12:var] ; LSE-NEXT: stp x4, x5, [x8] ; LSE-NEXT: ret - %val = atomicrmw sub i128* %p, i128 %bits seq_cst - store i128 %val, i128* @var, align 16 + %val = atomicrmw sub ptr %p, i128 %bits seq_cst + store i128 %val, ptr @var, align 16 ret void } -define void @fetch_and_min(i128* %p, i128 %bits) { +define void @fetch_and_min(ptr %p, i128 %bits) { ; NOOUTLINE-LABEL: fetch_and_min: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB8_1: // %atomicrmw.start @@ -498,12 +498,12 @@ define void @fetch_and_min(i128* %p, i128 %bits) { ; LSE-NEXT: ldr x8, [x8, :got_lo12:var] ; LSE-NEXT: stp x4, x5, [x8] ; LSE-NEXT: ret - %val = atomicrmw min i128* %p, i128 %bits seq_cst - store i128 %val, i128* @var, align 16 + %val = atomicrmw min ptr %p, i128 %bits seq_cst + store i128 %val, ptr @var, align 16 ret void } -define void @fetch_and_max(i128* %p, i128 %bits) { +define void @fetch_and_max(ptr %p, i128 %bits) { ; NOOUTLINE-LABEL: fetch_and_max: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB9_1: // %atomicrmw.start @@ -560,12 +560,12 @@ define void @fetch_and_max(i128* %p, i128 %bits) { ; LSE-NEXT: ldr x8, [x8, :got_lo12:var] ; LSE-NEXT: stp x4, x5, [x8] ; LSE-NEXT: ret - %val = atomicrmw max i128* %p, i128 %bits seq_cst - store i128 %val, i128* @var, align 16 + %val = atomicrmw max ptr %p, i128 %bits seq_cst + store i128 %val, ptr @var, align 16 ret void } -define void @fetch_and_umin(i128* %p, i128 %bits) { +define void @fetch_and_umin(ptr %p, i128 %bits) { ; NOOUTLINE-LABEL: fetch_and_umin: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB10_1: // %atomicrmw.start @@ -622,12 +622,12 @@ define void @fetch_and_umin(i128* %p, i128 %bits) { ; LSE-NEXT: ldr x8, [x8, :got_lo12:var] ; LSE-NEXT: stp x4, x5, [x8] ; LSE-NEXT: ret - %val = atomicrmw umin i128* %p, i128 %bits seq_cst - store i128 %val, i128* @var, align 16 + %val = atomicrmw umin ptr %p, i128 %bits seq_cst + store i128 %val, ptr @var, align 16 ret void } -define void @fetch_and_umax(i128* %p, i128 %bits) { +define void @fetch_and_umax(ptr %p, i128 %bits) { ; NOOUTLINE-LABEL: fetch_and_umax: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB11_1: // %atomicrmw.start @@ -684,12 +684,12 @@ define void @fetch_and_umax(i128* %p, i128 %bits) { ; LSE-NEXT: ldr x8, [x8, :got_lo12:var] ; LSE-NEXT: stp x4, x5, [x8] ; LSE-NEXT: ret - %val = atomicrmw umax i128* %p, i128 %bits seq_cst - store i128 %val, i128* @var, align 16 + %val = atomicrmw umax ptr %p, i128 %bits seq_cst + store i128 %val, ptr @var, align 16 ret void } -define i128 @atomic_load_seq_cst(i128* %p) { +define i128 @atomic_load_seq_cst(ptr %p) { ; NOOUTLINE-LABEL: atomic_load_seq_cst: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: mov x8, x0 @@ -720,11 +720,11 @@ define i128 @atomic_load_seq_cst(i128* %p) { ; LSE-NEXT: mov x0, x2 ; LSE-NEXT: mov x1, x3 ; LSE-NEXT: ret - %r = load atomic i128, i128* %p seq_cst, align 16 + %r = load atomic i128, ptr %p seq_cst, align 16 ret i128 %r } -define i128 @atomic_load_relaxed(i64, i64, i128* %p) { +define i128 @atomic_load_relaxed(i64, i64, ptr %p) { ; NOOUTLINE-LABEL: atomic_load_relaxed: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB13_1: // %atomicrmw.start @@ -751,12 +751,12 @@ define i128 @atomic_load_relaxed(i64, i64, i128* %p) { ; LSE-NEXT: mov x1, #0 ; LSE-NEXT: casp x0, x1, x0, x1, [x2] ; LSE-NEXT: ret - %r = load atomic i128, i128* %p monotonic, align 16 + %r = load atomic i128, ptr %p monotonic, align 16 ret i128 %r } -define void @atomic_store_seq_cst(i128 %in, i128* %p) { +define void @atomic_store_seq_cst(i128 %in, ptr %p) { ; NOOUTLINE-LABEL: atomic_store_seq_cst: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB14_1: // %atomicrmw.start @@ -794,11 +794,11 @@ define void @atomic_store_seq_cst(i128 %in, i128* %p) { ; LSE-NEXT: b.ne .LBB14_1 ; LSE-NEXT: // %bb.2: // %atomicrmw.end ; LSE-NEXT: ret - store atomic i128 %in, i128* %p seq_cst, align 16 + store atomic i128 %in, ptr %p seq_cst, align 16 ret void } -define void @atomic_store_release(i128 %in, i128* %p) { +define void @atomic_store_release(i128 %in, ptr %p) { ; NOOUTLINE-LABEL: atomic_store_release: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB15_1: // %atomicrmw.start @@ -836,11 +836,11 @@ define void @atomic_store_release(i128 %in, i128* %p) { ; LSE-NEXT: b.ne .LBB15_1 ; LSE-NEXT: // %bb.2: // %atomicrmw.end ; LSE-NEXT: ret - store atomic i128 %in, i128* %p release, align 16 + store atomic i128 %in, ptr %p release, align 16 ret void } -define void @atomic_store_relaxed(i128 %in, i128* %p) { +define void @atomic_store_relaxed(i128 %in, ptr %p) { ; NOOUTLINE-LABEL: atomic_store_relaxed: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB16_1: // %atomicrmw.start @@ -878,13 +878,13 @@ define void @atomic_store_relaxed(i128 %in, i128* %p) { ; LSE-NEXT: b.ne .LBB16_1 ; LSE-NEXT: // %bb.2: // %atomicrmw.end ; LSE-NEXT: ret - store atomic i128 %in, i128* %p unordered, align 16 + store atomic i128 %in, ptr %p unordered, align 16 ret void } ; Since we store the original value to ensure no tearing for the unsuccessful ; case, the register used must not be xzr. -define void @cmpxchg_dead(i128* %ptr, i128 %desired, i128 %new) { +define void @cmpxchg_dead(ptr %ptr, i128 %desired, i128 %new) { ; NOOUTLINE-LABEL: cmpxchg_dead: ; NOOUTLINE: // %bb.0: ; NOOUTLINE-NEXT: .LBB17_1: // =>This Inner Loop Header: Depth=1 @@ -927,6 +927,6 @@ define void @cmpxchg_dead(i128* %ptr, i128 %desired, i128 %new) { ; LSE-NEXT: // kill: def $x2 killed $x2 killed $x2_x3 def $x2_x3 ; LSE-NEXT: casp x2, x3, x4, x5, [x0] ; LSE-NEXT: ret - cmpxchg i128* %ptr, i128 %desired, i128 %new monotonic monotonic + cmpxchg ptr %ptr, i128 %desired, i128 %new monotonic monotonic ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/arm64-atomic.ll index cfdbcf9bfac99e..739fc8bbcaf072 100644 --- a/llvm/test/CodeGen/AArch64/arm64-atomic.ll +++ b/llvm/test/CodeGen/AArch64/arm64-atomic.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck -enable-var-scope %s ; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false -verify-machineinstrs -mcpu=cyclone -mattr=+outline-atomics | FileCheck -enable-var-scope %s -check-prefix=OUTLINE-ATOMICS -define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 { +define i32 @val_compare_and_swap(ptr %p, i32 %cmp, i32 %new) #0 { ; OUTLINE-ATOMICS: bl __aarch64_cas4_acq ; CHECK-LABEL: val_compare_and_swap: ; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0 @@ -15,12 +15,12 @@ define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 { ; CHECK-NEXT: [[FAILBB]]: ; CHECK-NEXT: clrex ; CHECK-NEXT: ret - %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire + %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acquire acquire %val = extractvalue { i32, i1 } %pair, 0 ret i32 %val } -define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 { +define i32 @val_compare_and_swap_from_load(ptr %p, i32 %cmp, ptr %pnew) #0 { ; OUTLINE-ATOMICS: bl __aarch64_cas4_acq ; CHECK-LABEL: val_compare_and_swap_from_load: ; CHECK-NEXT: ldr [[NEW:w[0-9]+]], [x2] @@ -36,13 +36,13 @@ define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 { ; CHECK-NEXT: clrex ; CHECK-NEXT: mov x0, x[[RESULT]] ; CHECK-NEXT: ret - %new = load i32, i32* %pnew - %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire + %new = load i32, ptr %pnew + %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acquire acquire %val = extractvalue { i32, i1 } %pair, 0 ret i32 %val } -define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 { +define i32 @val_compare_and_swap_rel(ptr %p, i32 %cmp, i32 %new) #0 { ; OUTLINE-ATOMICS: bl __aarch64_cas4_acq_rel ; CHECK-LABEL: val_compare_and_swap_rel: ; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0 @@ -56,12 +56,12 @@ define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 { ; CHECK-NEXT: [[FAILBB]]: ; CHECK-NEXT: clrex ; CHECK-NEXT: ret - %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic + %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acq_rel monotonic %val = extractvalue { i32, i1 } %pair, 0 ret i32 %val } -define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 { +define i64 @val_compare_and_swap_64(ptr %p, i64 %cmp, i64 %new) #0 { ; OUTLINE-ATOMICS: bl __aarch64_cas8_relax ; CHECK-LABEL: val_compare_and_swap_64: ; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0 @@ -75,12 +75,12 @@ define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 { ; CHECK-NEXT: [[FAILBB]]: ; CHECK-NEXT: clrex ; CHECK-NEXT: ret - %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic + %pair = cmpxchg ptr %p, i64 %cmp, i64 %new monotonic monotonic %val = extractvalue { i64, i1 } %pair, 0 ret i64 %val } -define i32 @fetch_and_nand(i32* %p) #0 { +define i32 @fetch_and_nand(ptr %p) #0 { ; CHECK-LABEL: fetch_and_nand: ; CHECK: [[TRYBB:.?LBB[0-9_]+]]: ; CHECK: ldxr w[[DEST_REG:[0-9]+]], [x0] @@ -90,11 +90,11 @@ define i32 @fetch_and_nand(i32* %p) #0 { ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0] ; CHECK: cbnz [[SCRATCH_REG]], [[TRYBB]] ; CHECK: mov x0, x[[DEST_REG]] - %val = atomicrmw nand i32* %p, i32 7 release + %val = atomicrmw nand ptr %p, i32 7 release ret i32 %val } -define i64 @fetch_and_nand_64(i64* %p) #0 { +define i64 @fetch_and_nand_64(ptr %p) #0 { ; CHECK-LABEL: fetch_and_nand_64: ; CHECK: mov x[[ADDR:[0-9]+]], x0 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]: @@ -104,11 +104,11 @@ define i64 @fetch_and_nand_64(i64* %p) #0 { ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]] ; CHECK: cbnz [[SCRATCH_REG]], [[TRYBB]] - %val = atomicrmw nand i64* %p, i64 7 acq_rel + %val = atomicrmw nand ptr %p, i64 7 acq_rel ret i64 %val } -define i32 @fetch_and_or(i32* %p) #0 { +define i32 @fetch_and_or(ptr %p) #0 { ; OUTLINE-ATOMICS: bl __aarch64_ldset4_acq_rel ; CHECK-LABEL: fetch_and_or: ; CHECK: mov [[OLDVAL_REG:w[0-9]+]], #5 @@ -119,11 +119,11 @@ define i32 @fetch_and_or(i32* %p) #0 { ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0] ; CHECK: cbnz [[SCRATCH_REG]], [[TRYBB]] ; CHECK: mov x0, x[[DEST_REG]] - %val = atomicrmw or i32* %p, i32 5 seq_cst + %val = atomicrmw or ptr %p, i32 5 seq_cst ret i32 %val } -define i64 @fetch_and_or_64(i64* %p) #0 { +define i64 @fetch_and_or_64(ptr %p) #0 { ; OUTLINE-ATOMICS: bl __aarch64_ldset8_relax ; CHECK: fetch_and_or_64: ; CHECK: mov x[[ADDR:[0-9]+]], x0 @@ -132,7 +132,7 @@ define i64 @fetch_and_or_64(i64* %p) #0 { ; CHECK: orr [[SCRATCH2_REG:x[0-9]+]], [[DEST_REG]], #0x7 ; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]] ; CHECK: cbnz [[SCRATCH_REG]], [[TRYBB]] - %val = atomicrmw or i64* %p, i64 7 monotonic + %val = atomicrmw or ptr %p, i64 7 monotonic ret i64 %val } @@ -157,31 +157,31 @@ define void @seq_cst_fence() #0 { ; CHECK: dmb ish{{$}} } -define i32 @atomic_load(i32* %p) #0 { - %r = load atomic i32, i32* %p seq_cst, align 4 +define i32 @atomic_load(ptr %p) #0 { + %r = load atomic i32, ptr %p seq_cst, align 4 ret i32 %r ; CHECK-LABEL: atomic_load: ; CHECK: ldar } -define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 { +define i8 @atomic_load_relaxed_8(ptr %p, i32 %off32) #0 { ; CHECK-LABEL: atomic_load_relaxed_8: - %ptr_unsigned = getelementptr i8, i8* %p, i32 4095 - %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1 + %ptr_unsigned = getelementptr i8, ptr %p, i32 4095 + %val_unsigned = load atomic i8, ptr %ptr_unsigned monotonic, align 1 ; CHECK: ldrb {{w[0-9]+}}, [x0, #4095] - %ptr_regoff = getelementptr i8, i8* %p, i32 %off32 - %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1 + %ptr_regoff = getelementptr i8, ptr %p, i32 %off32 + %val_regoff = load atomic i8, ptr %ptr_regoff unordered, align 1 %tot1 = add i8 %val_unsigned, %val_regoff ; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw] - %ptr_unscaled = getelementptr i8, i8* %p, i32 -256 - %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1 + %ptr_unscaled = getelementptr i8, ptr %p, i32 -256 + %val_unscaled = load atomic i8, ptr %ptr_unscaled monotonic, align 1 %tot2 = add i8 %tot1, %val_unscaled ; CHECK: ldurb {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) - %val_random = load atomic i8, i8* %ptr_random unordered, align 1 + %ptr_random = getelementptr i8, ptr %p, i32 1191936 ; 0x123000 (i.e. ADD imm) + %val_random = load atomic i8, ptr %ptr_random unordered, align 1 %tot3 = add i8 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]] @@ -189,24 +189,24 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 { ret i8 %tot3 } -define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 { +define i16 @atomic_load_relaxed_16(ptr %p, i32 %off32) #0 { ; CHECK-LABEL: atomic_load_relaxed_16: - %ptr_unsigned = getelementptr i16, i16* %p, i32 4095 - %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2 + %ptr_unsigned = getelementptr i16, ptr %p, i32 4095 + %val_unsigned = load atomic i16, ptr %ptr_unsigned monotonic, align 2 ; CHECK: ldrh {{w[0-9]+}}, [x0, #8190] - %ptr_regoff = getelementptr i16, i16* %p, i32 %off32 - %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2 + %ptr_regoff = getelementptr i16, ptr %p, i32 %off32 + %val_regoff = load atomic i16, ptr %ptr_regoff unordered, align 2 %tot1 = add i16 %val_unsigned, %val_regoff ; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1] - %ptr_unscaled = getelementptr i16, i16* %p, i32 -128 - %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2 + %ptr_unscaled = getelementptr i16, ptr %p, i32 -128 + %val_unscaled = load atomic i16, ptr %ptr_unscaled monotonic, align 2 %tot2 = add i16 %tot1, %val_unscaled ; CHECK: ldurh {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) - %val_random = load atomic i16, i16* %ptr_random unordered, align 2 + %ptr_random = getelementptr i16, ptr %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) + %val_random = load atomic i16, ptr %ptr_random unordered, align 2 %tot3 = add i16 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]] @@ -214,24 +214,24 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 { ret i16 %tot3 } -define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 { +define i32 @atomic_load_relaxed_32(ptr %p, i32 %off32) #0 { ; CHECK-LABEL: atomic_load_relaxed_32: - %ptr_unsigned = getelementptr i32, i32* %p, i32 4095 - %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4 + %ptr_unsigned = getelementptr i32, ptr %p, i32 4095 + %val_unsigned = load atomic i32, ptr %ptr_unsigned monotonic, align 4 ; CHECK: ldr {{w[0-9]+}}, [x0, #16380] - %ptr_regoff = getelementptr i32, i32* %p, i32 %off32 - %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4 + %ptr_regoff = getelementptr i32, ptr %p, i32 %off32 + %val_regoff = load atomic i32, ptr %ptr_regoff unordered, align 4 %tot1 = add i32 %val_unsigned, %val_regoff ; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2] - %ptr_unscaled = getelementptr i32, i32* %p, i32 -64 - %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4 + %ptr_unscaled = getelementptr i32, ptr %p, i32 -64 + %val_unscaled = load atomic i32, ptr %ptr_unscaled monotonic, align 4 %tot2 = add i32 %tot1, %val_unscaled ; CHECK: ldur {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) - %val_random = load atomic i32, i32* %ptr_random unordered, align 4 + %ptr_random = getelementptr i32, ptr %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) + %val_random = load atomic i32, ptr %ptr_random unordered, align 4 %tot3 = add i32 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]] @@ -239,24 +239,24 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 { ret i32 %tot3 } -define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 { +define i64 @atomic_load_relaxed_64(ptr %p, i32 %off32) #0 { ; CHECK-LABEL: atomic_load_relaxed_64: - %ptr_unsigned = getelementptr i64, i64* %p, i32 4095 - %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8 + %ptr_unsigned = getelementptr i64, ptr %p, i32 4095 + %val_unsigned = load atomic i64, ptr %ptr_unsigned monotonic, align 8 ; CHECK: ldr {{x[0-9]+}}, [x0, #32760] - %ptr_regoff = getelementptr i64, i64* %p, i32 %off32 - %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8 + %ptr_regoff = getelementptr i64, ptr %p, i32 %off32 + %val_regoff = load atomic i64, ptr %ptr_regoff unordered, align 8 %tot1 = add i64 %val_unsigned, %val_regoff ; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3] - %ptr_unscaled = getelementptr i64, i64* %p, i32 -32 - %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8 + %ptr_unscaled = getelementptr i64, ptr %p, i32 -32 + %val_unscaled = load atomic i64, ptr %ptr_unscaled monotonic, align 8 %tot2 = add i64 %tot1, %val_unscaled ; CHECK: ldur {{x[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) - %val_random = load atomic i64, i64* %ptr_random unordered, align 8 + %ptr_random = getelementptr i64, ptr %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) + %val_random = load atomic i64, ptr %ptr_random unordered, align 8 %tot3 = add i64 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]] @@ -265,96 +265,96 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 { } -define void @atomc_store(i32* %p) #0 { - store atomic i32 4, i32* %p seq_cst, align 4 +define void @atomc_store(ptr %p) #0 { + store atomic i32 4, ptr %p seq_cst, align 4 ret void ; CHECK-LABEL: atomc_store: ; CHECK: stlr } -define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) #0 { +define void @atomic_store_relaxed_8(ptr %p, i32 %off32, i8 %val) #0 { ; CHECK-LABEL: atomic_store_relaxed_8: - %ptr_unsigned = getelementptr i8, i8* %p, i32 4095 - store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1 + %ptr_unsigned = getelementptr i8, ptr %p, i32 4095 + store atomic i8 %val, ptr %ptr_unsigned monotonic, align 1 ; CHECK: strb {{w[0-9]+}}, [x0, #4095] - %ptr_regoff = getelementptr i8, i8* %p, i32 %off32 - store atomic i8 %val, i8* %ptr_regoff unordered, align 1 + %ptr_regoff = getelementptr i8, ptr %p, i32 %off32 + store atomic i8 %val, ptr %ptr_regoff unordered, align 1 ; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw] - %ptr_unscaled = getelementptr i8, i8* %p, i32 -256 - store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1 + %ptr_unscaled = getelementptr i8, ptr %p, i32 -256 + store atomic i8 %val, ptr %ptr_unscaled monotonic, align 1 ; CHECK: sturb {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) - store atomic i8 %val, i8* %ptr_random unordered, align 1 + %ptr_random = getelementptr i8, ptr %p, i32 1191936 ; 0x123000 (i.e. ADD imm) + store atomic i8 %val, ptr %ptr_random unordered, align 1 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: strb {{w[0-9]+}}, [x[[ADDR]]] ret void } -define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) #0 { +define void @atomic_store_relaxed_16(ptr %p, i32 %off32, i16 %val) #0 { ; CHECK-LABEL: atomic_store_relaxed_16: - %ptr_unsigned = getelementptr i16, i16* %p, i32 4095 - store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2 + %ptr_unsigned = getelementptr i16, ptr %p, i32 4095 + store atomic i16 %val, ptr %ptr_unsigned monotonic, align 2 ; CHECK: strh {{w[0-9]+}}, [x0, #8190] - %ptr_regoff = getelementptr i16, i16* %p, i32 %off32 - store atomic i16 %val, i16* %ptr_regoff unordered, align 2 + %ptr_regoff = getelementptr i16, ptr %p, i32 %off32 + store atomic i16 %val, ptr %ptr_regoff unordered, align 2 ; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1] - %ptr_unscaled = getelementptr i16, i16* %p, i32 -128 - store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2 + %ptr_unscaled = getelementptr i16, ptr %p, i32 -128 + store atomic i16 %val, ptr %ptr_unscaled monotonic, align 2 ; CHECK: sturh {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) - store atomic i16 %val, i16* %ptr_random unordered, align 2 + %ptr_random = getelementptr i16, ptr %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) + store atomic i16 %val, ptr %ptr_random unordered, align 2 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: strh {{w[0-9]+}}, [x[[ADDR]]] ret void } -define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) #0 { +define void @atomic_store_relaxed_32(ptr %p, i32 %off32, i32 %val) #0 { ; CHECK-LABEL: atomic_store_relaxed_32: - %ptr_unsigned = getelementptr i32, i32* %p, i32 4095 - store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4 + %ptr_unsigned = getelementptr i32, ptr %p, i32 4095 + store atomic i32 %val, ptr %ptr_unsigned monotonic, align 4 ; CHECK: str {{w[0-9]+}}, [x0, #16380] - %ptr_regoff = getelementptr i32, i32* %p, i32 %off32 - store atomic i32 %val, i32* %ptr_regoff unordered, align 4 + %ptr_regoff = getelementptr i32, ptr %p, i32 %off32 + store atomic i32 %val, ptr %ptr_regoff unordered, align 4 ; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2] - %ptr_unscaled = getelementptr i32, i32* %p, i32 -64 - store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4 + %ptr_unscaled = getelementptr i32, ptr %p, i32 -64 + store atomic i32 %val, ptr %ptr_unscaled monotonic, align 4 ; CHECK: stur {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) - store atomic i32 %val, i32* %ptr_random unordered, align 4 + %ptr_random = getelementptr i32, ptr %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) + store atomic i32 %val, ptr %ptr_random unordered, align 4 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: str {{w[0-9]+}}, [x[[ADDR]]] ret void } -define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 { +define void @atomic_store_relaxed_64(ptr %p, i32 %off32, i64 %val) #0 { ; OUTLINE-ATOMICS: bl __aarch64_ldadd4_acq_rel ; CHECK-LABEL: atomic_store_relaxed_64: - %ptr_unsigned = getelementptr i64, i64* %p, i32 4095 - store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8 + %ptr_unsigned = getelementptr i64, ptr %p, i32 4095 + store atomic i64 %val, ptr %ptr_unsigned monotonic, align 8 ; CHECK: str {{x[0-9]+}}, [x0, #32760] - %ptr_regoff = getelementptr i64, i64* %p, i32 %off32 - store atomic i64 %val, i64* %ptr_regoff unordered, align 8 + %ptr_regoff = getelementptr i64, ptr %p, i32 %off32 + store atomic i64 %val, ptr %ptr_regoff unordered, align 8 ; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3] - %ptr_unscaled = getelementptr i64, i64* %p, i32 -32 - store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8 + %ptr_unscaled = getelementptr i64, ptr %p, i32 -32 + store atomic i64 %val, ptr %ptr_unscaled monotonic, align 8 ; CHECK: stur {{x[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) - store atomic i64 %val, i64* %ptr_random unordered, align 8 + %ptr_random = getelementptr i64, ptr %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) + store atomic i64 %val, ptr %ptr_random unordered, align 8 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: str {{x[0-9]+}}, [x[[ADDR]]] @@ -371,13 +371,13 @@ define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 { define i32 @next_id() nounwind optsize ssp align 2 { entry: - %0 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst + %0 = atomicrmw add ptr @counter, i32 1 seq_cst %add.i = add i32 %0, 1 %tobool = icmp eq i32 %add.i, 0 br i1 %tobool, label %if.else, label %return if.else: ; preds = %entry - %1 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst + %1 = atomicrmw add ptr @counter, i32 1 seq_cst %add.i2 = add i32 %1, 1 br label %return diff --git a/llvm/test/CodeGen/AArch64/arm64-bcc.ll b/llvm/test/CodeGen/AArch64/arm64-bcc.ll index 66d2f52ab969e2..08e7e9f57b6411 100644 --- a/llvm/test/CodeGen/AArch64/arm64-bcc.ll +++ b/llvm/test/CodeGen/AArch64/arm64-bcc.ll @@ -24,13 +24,12 @@ entry: ; Checks for compfail when optimizing csincr-cbz sequence -define { i64, i1 } @foo(i64* , %Sstruct* , i1, i64) { +define { i64, i1 } @foo(ptr , ptr , i1, i64) { entry: %.sroa.0 = alloca i72, align 16 - %.count.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 0, i32 0 - %4 = load i64, i64* %.count.value, align 8 - %.repeatedValue.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 1, i32 0 - %5 = load i32, i32* %.repeatedValue.value, align 8 + %4 = load i64, ptr %1, align 8 + %.repeatedValue.value = getelementptr inbounds %Sstruct, ptr %1, i64 0, i32 1, i32 0 + %5 = load i32, ptr %.repeatedValue.value, align 8 %6 = icmp eq i64 %4, 0 br label %7 diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll index 5cd96d09b14d0f..f5aa4c666a5681 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll @@ -2,1322 +2,1321 @@ ; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -O0 -fast-isel=true -o - | FileCheck %s ; CHECK-LABEL: test_i64_f64: -define void @test_i64_f64(double* %p, i64* %q) { +define void @test_i64_f64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: str - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = bitcast double %2 to i64 %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v1i64: -define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) { +define void @test_i64_v1i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: str - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to i64 %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v2f32: -define void @test_i64_v2f32(<2 x float>* %p, i64* %q) { +define void @test_i64_v2f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to i64 %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v2i32: -define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) { +define void @test_i64_v2i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to i64 %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v4f16: -define void @test_i64_v4f16(<4 x half>* %p, i64* %q) { +define void @test_i64_v4f16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK-NOT: rev ; CHECK: fadd ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: str - %1 = load <4 x half>, <4 x half>* %p + %1 = load <4 x half>, ptr %p %2 = fadd <4 x half> %1, %1 %3 = bitcast <4 x half> %2 to i64 %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v4i16: -define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) { +define void @test_i64_v4i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: str - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to i64 %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v8i8: -define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) { +define void @test_i64_v8i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: str - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to i64 %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_f64_i64: -define void @test_f64_i64(i64* %p, double* %q) { +define void @test_f64_i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: str - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to double %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v1i64: -define void @test_f64_v1i64(<1 x i64>* %p, double* %q) { +define void @test_f64_v1i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: str - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to double %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v2f32: -define void @test_f64_v2f32(<2 x float>* %p, double* %q) { +define void @test_f64_v2f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to double %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v2i32: -define void @test_f64_v2i32(<2 x i32>* %p, double* %q) { +define void @test_f64_v2i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to double %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v4i16: -define void @test_f64_v4i16(<4 x i16>* %p, double* %q) { +define void @test_f64_v4i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: str - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to double %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v4f16: -define void @test_f64_v4f16(<4 x half>* %p, double* %q) { +define void @test_f64_v4f16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK-NOT: rev ; CHECK: fadd ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: fadd ; CHECK: str - %1 = load <4 x half>, <4 x half>* %p + %1 = load <4 x half>, ptr %p %2 = fadd <4 x half> %1, %1 %3 = bitcast <4 x half> %2 to double %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v8i8: -define void @test_f64_v8i8(<8 x i8>* %p, double* %q) { +define void @test_f64_v8i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: str - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to double %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_i64: -define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) { +define void @test_v1i64_i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: str - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <1 x i64> %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_f64: -define void @test_v1i64_f64(double* %p, <1 x i64>* %q) { +define void @test_v1i64_f64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: str - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <1 x i64> %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v2f32: -define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) { +define void @test_v1i64_v2f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v2i32: -define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) { +define void @test_v1i64_v2i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v4f16: -define void @test_v1i64_v4f16(<4 x half>* %p, <1 x i64>* %q) { +define void @test_v1i64_v4f16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK-NOT: rev ; CHECK: fadd ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: str - %1 = load <4 x half>, <4 x half>* %p + %1 = load <4 x half>, ptr %p %2 = fadd <4 x half> %1, %1 %3 = bitcast <4 x half> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v4i16: -define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) { +define void @test_v1i64_v4i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: str - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v8i8: -define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) { +define void @test_v1i64_v8i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: str - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_i64: -define void @test_v2f32_i64(i64* %p, <2 x float>* %q) { +define void @test_v2f32_i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <2 x float> %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_f64: -define void @test_v2f32_f64(double* %p, <2 x float>* %q) { +define void @test_v2f32_f64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <2 x float> %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v1i64: -define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) { +define void @test_v2f32_v1i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v2i32: -define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) { +define void @test_v2f32_v2i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v4i16: -define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) { +define void @test_v2f32_v4i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v4f16: -define void @test_v2f32_v4f16(<4 x half>* %p, <2 x float>* %q) { +define void @test_v2f32_v4f16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK-NOT: rev ; CHECK: fadd ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <4 x half>, <4 x half>* %p + %1 = load <4 x half>, ptr %p %2 = fadd <4 x half> %1, %1 %3 = bitcast <4 x half> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v8i8: -define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) { +define void @test_v2f32_v8i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev32 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_i64: -define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) { +define void @test_v2i32_i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <2 x i32> %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_f64: -define void @test_v2i32_f64(double* %p, <2 x i32>* %q) { +define void @test_v2i32_f64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <2 x i32> %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_v1i64: -define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) { +define void @test_v2i32_v1i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <2 x i32> %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_v2f32: -define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) { +define void @test_v2i32_v2f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <2 x i32> %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_v4i16: -define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) { +define void @test_v2i32_v4i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <2 x i32> %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_v8i8: -define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) { +define void @test_v2i32_v8i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev32 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <2 x i32> %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_i64: -define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) { +define void @test_v4i16_i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <4 x i16> %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_f64: -define void @test_v4i16_f64(double* %p, <4 x i16>* %q) { +define void @test_v4i16_f64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <4 x i16> %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v1i64: -define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) { +define void @test_v4i16_v1i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v2f32: -define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) { +define void @test_v4i16_v2f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v2i32: -define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) { +define void @test_v4i16_v2i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v4f16: -define void @test_v4i16_v4f16(<4 x half>* %p, <4 x i16>* %q) { +define void @test_v4i16_v4f16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <4 x half>, <4 x half>* %p + %1 = load <4 x half>, ptr %p %2 = fadd <4 x half> %1, %1 %3 = bitcast <4 x half> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v8i8: -define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) { +define void @test_v4i16_v8i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev16 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f16_i64: -define void @test_v4f16_i64(i64* %p, <4 x half>* %q) { +define void @test_v4f16_i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: fadd ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <4 x half> %4 = fadd <4 x half> %3, %3 - store <4 x half> %4, <4 x half>* %q + store <4 x half> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f16_f64: -define void @test_v4f16_f64(double* %p, <4 x half>* %q) { +define void @test_v4f16_f64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: fadd ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <4 x half> %4 = fadd <4 x half> %3, %3 - store <4 x half> %4, <4 x half>* %q + store <4 x half> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f16_v1i64: -define void @test_v4f16_v1i64(<1 x i64>* %p, <4 x half>* %q) { +define void @test_v4f16_v1i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: fadd ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <4 x half> %4 = fadd <4 x half> %3, %3 - store <4 x half> %4, <4 x half>* %q + store <4 x half> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f16_v2f32: -define void @test_v4f16_v2f32(<2 x float>* %p, <4 x half>* %q) { +define void @test_v4f16_v2f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: fadd ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <4 x half> %4 = fadd <4 x half> %3, %3 - store <4 x half> %4, <4 x half>* %q + store <4 x half> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f16_v2i32: -define void @test_v4f16_v2i32(<2 x i32>* %p, <4 x half>* %q) { +define void @test_v4f16_v2i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: fadd ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <4 x half> %4 = fadd <4 x half> %3, %3 - store <4 x half> %4, <4 x half>* %q + store <4 x half> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f16_v4i16: -define void @test_v4f16_v4i16(<4 x i16>* %p, <4 x half>* %q) { +define void @test_v4f16_v4i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <4 x half> %4 = fadd <4 x half> %3, %3 - store <4 x half> %4, <4 x half>* %q + store <4 x half> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f16_v8i8: -define void @test_v4f16_v8i8(<8 x i8>* %p, <4 x half>* %q) { +define void @test_v4f16_v8i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev16 v{{[0-9]+}}.8b ; CHECK: fadd ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <4 x half> %4 = fadd <4 x half> %3, %3 - store <4 x half> %4, <4 x half>* %q + store <4 x half> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_i64: -define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) { +define void @test_v8i8_i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <8 x i8> %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_f64: -define void @test_v8i8_f64(double* %p, <8 x i8>* %q) { +define void @test_v8i8_f64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <8 x i8> %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_v1i64: -define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) { +define void @test_v8i8_v1i64(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <8 x i8> %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_v2f32: -define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) { +define void @test_v8i8_v2f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <8 x i8> %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_v2i32: -define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) { +define void @test_v8i8_v2i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <8 x i8> %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_v4i16: -define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) { +define void @test_v8i8_v4i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev16 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <8 x i8> %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v2f64: -define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) { +define void @test_f128_v2f64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: ext ; CHECK: str - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to fp128 %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v2i64: -define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) { +define void @test_f128_v2i64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: ext ; CHECK: str - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to fp128 %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v4f32: -define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) { +define void @test_f128_v4f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK-NOT: rev ; CHECK: fadd ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: str q - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to fp128 %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v4i32: -define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) { +define void @test_f128_v4i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: str - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to fp128 %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v8i16: -define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) { +define void @test_f128_v8i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: str - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to fp128 %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v16i8: -define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) { +define void @test_f128_v16i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: ext ; CHECK: str q - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to fp128 %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_f128: -define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) { +define void @test_v2f64_f128(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <2 x double> %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v2i64: -define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) { +define void @test_v2f64_v2i64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v4f32: -define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) { +define void @test_v2f64_v4f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK-NOT: rev ; CHECK: fadd ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v4i32: -define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) { +define void @test_v2f64_v4i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v8i16: -define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) { +define void @test_v2f64_v8i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v16i8: -define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) { +define void @test_v2f64_v16i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_f128: -define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) { +define void @test_v2i64_f128(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <2 x i64> %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v2f64: -define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) { +define void @test_v2i64_v2f64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v4f32: -define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) { +define void @test_v2i64_v4f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK-NOT: rev ; CHECK: fadd ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: add ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v4i32: -define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) { +define void @test_v2i64_v4i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v8i16: -define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) { +define void @test_v2i64_v8i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v16i8: -define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) { +define void @test_v2i64_v16i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_f128: -define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) { +define void @test_v4f32_f128(ptr %p, ptr %q) { ; CHECK: ldr q ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <4 x float> %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v2f64: -define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) { +define void @test_v4f32_v2f64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v2i64: -define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) { +define void @test_v4f32_v2i64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: fadd ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v4i32: -define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) { +define void @test_v4f32_v4i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v8i16: -define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) { +define void @test_v4f32_v8i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev32 v{{[0-9]+}}.8h ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v16i8: -define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) { +define void @test_v4f32_v16i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev32 v{{[0-9]+}}.16b ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_f128: -define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) { +define void @test_v4i32_f128(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <4 x i32> %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v2f64: -define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) { +define void @test_v4i32_v2f64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v2i64: -define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) { +define void @test_v4i32_v2i64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v4f32: -define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) { +define void @test_v4i32_v4f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v8i16: -define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) { +define void @test_v4i32_v8i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev32 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v16i8: -define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) { +define void @test_v4i32_v16i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev32 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_f128: -define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) { +define void @test_v8i16_f128(ptr %p, ptr %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <8 x i16> %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v2f64: -define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) { +define void @test_v8i16_v2f64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v2i64: -define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) { +define void @test_v8i16_v2i64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v4f32: -define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) { +define void @test_v8i16_v4f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev32 v{{[0-9]+}}.8h ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v4i32: -define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) { +define void @test_v8i16_v4i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev32 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v8f16: -define void @test_v8i16_v8f16(<8 x half>* %p, <8 x i16>* %q) { +define void @test_v8i16_v8f16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <8 x half>, <8 x half>* %p + %1 = load <8 x half>, ptr %p %2 = fadd <8 x half> %1, %1 %3 = bitcast <8 x half> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v16i8: -define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) { +define void @test_v8i16_v16i8(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev16 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_f128: -define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) { +define void @test_v16i8_f128(ptr %p, ptr %q) { ; CHECK: ldr q ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <16 x i8> %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v2f64: -define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) { +define void @test_v16i8_v2f64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v2i64: -define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) { +define void @test_v16i8_v2i64(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v4f32: -define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) { +define void @test_v16i8_v4f32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev32 v{{[0-9]+}}.16b ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v4i32: -define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) { +define void @test_v16i8_v4i32(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev32 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v8f16: -define void @test_v16i8_v8f16(<8 x half>* %p, <16 x i8>* %q) { +define void @test_v16i8_v8f16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev16 v{{[0-9]+}}.16b ; CHECK-NOT: rev ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <8 x half>, <8 x half>* %p + %1 = load <8 x half>, ptr %p %2 = fadd <8 x half> %1, %1 %3 = bitcast <8 x half> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v8i16: -define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) { +define void @test_v16i8_v8i16(ptr %p, ptr %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev16 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f16_struct: %struct.struct1 = type { half, half, half, half } -define %struct.struct1 @test_v4f16_struct(%struct.struct1* %ret) { +define %struct.struct1 @test_v4f16_struct(ptr %ret) { entry: ; CHECK: ld1 { {{v[0-9]+}}.4h } ; CHECK-NOT: rev - %0 = bitcast %struct.struct1* %ret to <4 x half>* - %1 = load <4 x half>, <4 x half>* %0, align 2 - %2 = extractelement <4 x half> %1, i32 0 - %.fca.0.insert = insertvalue %struct.struct1 undef, half %2, 0 + %0 = load <4 x half>, ptr %ret, align 2 + %1 = extractelement <4 x half> %0, i32 0 + %.fca.0.insert = insertvalue %struct.struct1 undef, half %1, 0 ret %struct.struct1 %.fca.0.insert } diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll index b387209d5132ac..c6f955f05837d1 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll @@ -14,16 +14,16 @@ ; } ;} -define void @_Z4testii(i32 %a, i32 %b) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define void @_Z4testii(i32 %a, i32 %b) #0 personality ptr @__gxx_personality_v0 { entry: invoke void @_Z3fooi(i32 %a) to label %try.cont unwind label %lpad lpad: ; preds = %entry - %0 = landingpad { i8*, i32 } - catch i8* null - %1 = extractvalue { i8*, i32 } %0, 0 - %2 = tail call i8* @__cxa_begin_catch(i8* %1) #2 + %0 = landingpad { ptr, i32 } + catch ptr null + %1 = extractvalue { ptr, i32 } %0, 0 + %2 = tail call ptr @__cxa_begin_catch(ptr %1) #2 invoke void @_Z3fooi(i32 %b) to label %invoke.cont2 unwind label %lpad1 @@ -35,19 +35,19 @@ try.cont: ; preds = %entry, %invoke.cont ret void lpad1: ; preds = %lpad - %3 = landingpad { i8*, i32 } + %3 = landingpad { ptr, i32 } cleanup invoke void @__cxa_end_catch() to label %eh.resume unwind label %terminate.lpad eh.resume: ; preds = %lpad1 - resume { i8*, i32 } %3 + resume { ptr, i32 } %3 terminate.lpad: ; preds = %lpad1 - %4 = landingpad { i8*, i32 } - catch i8* null - %5 = extractvalue { i8*, i32 } %4, 0 - tail call void @__clang_call_terminate(i8* %5) #3 + %4 = landingpad { ptr, i32 } + catch ptr null + %5 = extractvalue { ptr, i32 } %4, 0 + tail call void @__clang_call_terminate(ptr %5) #3 unreachable } @@ -55,13 +55,13 @@ declare void @_Z3fooi(i32) #0 declare i32 @__gxx_personality_v0(...) -declare i8* @__cxa_begin_catch(i8*) +declare ptr @__cxa_begin_catch(ptr) declare void @__cxa_end_catch() ; Function Attrs: noinline noreturn nounwind -define linkonce_odr hidden void @__clang_call_terminate(i8*) #1 { - %2 = tail call i8* @__cxa_begin_catch(i8* %0) #2 +define linkonce_odr hidden void @__clang_call_terminate(ptr) #1 { + %2 = tail call ptr @__cxa_begin_catch(ptr %0) #2 tail call void @_ZSt9terminatev() #3 unreachable } diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll index e5e16848a4b0cd..0e424f1e506f17 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll @@ -5,10 +5,10 @@ target datalayout = "E-m:e-i64:64-i128:128-n32:64-S128" target triple = "aarch64_be-arm-none-eabi" -%struct.__va_list = type { i8*, i8*, i8*, i32, i32 } +%struct.__va_list = type { ptr, ptr, ptr, i32, i32 } -declare void @llvm.va_start(i8*) nounwind -declare void @llvm.va_end(i8*) nounwind +declare void @llvm.va_start(ptr) nounwind +declare void @llvm.va_end(ptr) nounwind define double @callee(i32 %a, ...) { ; CHECK: stp @@ -19,40 +19,37 @@ define double @callee(i32 %a, ...) { ; CHECK: stp entry: %vl = alloca %struct.__va_list, align 8 - %vl1 = bitcast %struct.__va_list* %vl to i8* - call void @llvm.va_start(i8* %vl1) - %vr_offs_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 4 - %vr_offs = load i32, i32* %vr_offs_p, align 4 + call void @llvm.va_start(ptr %vl) + %vr_offs_p = getelementptr inbounds %struct.__va_list, ptr %vl, i64 0, i32 4 + %vr_offs = load i32, ptr %vr_offs_p, align 4 %0 = icmp sgt i32 %vr_offs, -1 br i1 %0, label %vaarg.on_stack, label %vaarg.maybe_reg vaarg.maybe_reg: ; preds = %entry %new_reg_offs = add i32 %vr_offs, 16 - store i32 %new_reg_offs, i32* %vr_offs_p, align 4 + store i32 %new_reg_offs, ptr %vr_offs_p, align 4 %inreg = icmp slt i32 %new_reg_offs, 1 br i1 %inreg, label %vaarg.in_reg, label %vaarg.on_stack vaarg.in_reg: ; preds = %vaarg.maybe_reg - %reg_top_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 2 - %reg_top = load i8*, i8** %reg_top_p, align 8 + %reg_top_p = getelementptr inbounds %struct.__va_list, ptr %vl, i64 0, i32 2 + %reg_top = load ptr, ptr %reg_top_p, align 8 %1 = sext i32 %vr_offs to i64 - %2 = getelementptr i8, i8* %reg_top, i64 %1 - %3 = ptrtoint i8* %2 to i64 + %2 = getelementptr i8, ptr %reg_top, i64 %1 + %3 = ptrtoint ptr %2 to i64 %align_be = add i64 %3, 8 - %4 = inttoptr i64 %align_be to i8* + %4 = inttoptr i64 %align_be to ptr br label %vaarg.end vaarg.on_stack: ; preds = %vaarg.maybe_reg, %entry - %stack_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 0 - %stack = load i8*, i8** %stack_p, align 8 - %new_stack = getelementptr i8, i8* %stack, i64 8 - store i8* %new_stack, i8** %stack_p, align 8 + %stack = load ptr, ptr %vl, align 8 + %new_stack = getelementptr i8, ptr %stack, i64 8 + store ptr %new_stack, ptr %vl, align 8 br label %vaarg.end vaarg.end: ; preds = %vaarg.on_stack, %vaarg.in_reg - %.sink = phi i8* [ %4, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ] - %5 = bitcast i8* %.sink to double* - %6 = load double, double* %5, align 8 - call void @llvm.va_end(i8* %vl1) - ret double %6 + %.sink = phi ptr [ %4, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ] + %5 = load double, ptr %.sink, align 8 + call void @llvm.va_end(ptr %vl) + ret double %5 } diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll index a1dec896d34a2d..f1dccae36b21ce 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll @@ -7,1266 +7,1266 @@ ; CHECK-LABEL: test_i64_f64: declare i64 @test_i64_f64_helper(double %p) -define void @test_i64_f64(double* %p, i64* %q) { +define void @test_i64_f64(ptr %p, ptr %q) { ; CHECK-NOT: rev - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = call i64 @test_i64_f64_helper(double %2) br label %return_bb return_bb: %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v1i64: declare i64 @test_i64_v1i64_helper(<1 x i64> %p) -define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) { +define void @test_i64_v1i64(ptr %p, ptr %q) { ; CHECK-NOT: rev - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = call i64 @test_i64_v1i64_helper(<1 x i64> %2) br label %return_bb return_bb: %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v2f32: declare i64 @test_i64_v2f32_helper(<2 x float> %p) -define void @test_i64_v2f32(<2 x float>* %p, i64* %q) { +define void @test_i64_v2f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = call i64 @test_i64_v2f32_helper(<2 x float> %2) br label %return_bb return_bb: %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v2i32: declare i64 @test_i64_v2i32_helper(<2 x i32> %p) -define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) { +define void @test_i64_v2i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = call i64 @test_i64_v2i32_helper(<2 x i32> %2) br label %return_bb return_bb: %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v4i16: declare i64 @test_i64_v4i16_helper(<4 x i16> %p) -define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) { +define void @test_i64_v4i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = call i64 @test_i64_v4i16_helper(<4 x i16> %2) br label %return_bb return_bb: %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_i64_v8i8: declare i64 @test_i64_v8i8_helper(<8 x i8> %p) -define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) { +define void @test_i64_v8i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = call i64 @test_i64_v8i8_helper(<8 x i8> %2) br label %return_bb return_bb: %4 = add i64 %3, %3 - store i64 %4, i64* %q + store i64 %4, ptr %q ret void } ; CHECK-LABEL: test_f64_i64: declare double @test_f64_i64_helper(i64 %p) -define void @test_f64_i64(i64* %p, double* %q) { +define void @test_f64_i64(ptr %p, ptr %q) { ; CHECK-NOT: rev - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = call double @test_f64_i64_helper(i64 %2) br label %return_bb return_bb: %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v1i64: declare double @test_f64_v1i64_helper(<1 x i64> %p) -define void @test_f64_v1i64(<1 x i64>* %p, double* %q) { +define void @test_f64_v1i64(ptr %p, ptr %q) { ; CHECK-NOT: rev - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = call double @test_f64_v1i64_helper(<1 x i64> %2) br label %return_bb return_bb: %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v2f32: declare double @test_f64_v2f32_helper(<2 x float> %p) -define void @test_f64_v2f32(<2 x float>* %p, double* %q) { +define void @test_f64_v2f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = call double @test_f64_v2f32_helper(<2 x float> %2) br label %return_bb return_bb: %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v2i32: declare double @test_f64_v2i32_helper(<2 x i32> %p) -define void @test_f64_v2i32(<2 x i32>* %p, double* %q) { +define void @test_f64_v2i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = call double @test_f64_v2i32_helper(<2 x i32> %2) br label %return_bb return_bb: %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v4i16: declare double @test_f64_v4i16_helper(<4 x i16> %p) -define void @test_f64_v4i16(<4 x i16>* %p, double* %q) { +define void @test_f64_v4i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = call double @test_f64_v4i16_helper(<4 x i16> %2) br label %return_bb return_bb: %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_f64_v8i8: declare double @test_f64_v8i8_helper(<8 x i8> %p) -define void @test_f64_v8i8(<8 x i8>* %p, double* %q) { +define void @test_f64_v8i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = call double @test_f64_v8i8_helper(<8 x i8> %2) br label %return_bb return_bb: %4 = fadd double %3, %3 - store double %4, double* %q + store double %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_i64: declare <1 x i64> @test_v1i64_i64_helper(i64 %p) -define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) { +define void @test_v1i64_i64(ptr %p, ptr %q) { ; CHECK-NOT: rev - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = call <1 x i64> @test_v1i64_i64_helper(i64 %2) br label %return_bb return_bb: %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_f64: declare <1 x i64> @test_v1i64_f64_helper(double %p) -define void @test_v1i64_f64(double* %p, <1 x i64>* %q) { +define void @test_v1i64_f64(ptr %p, ptr %q) { ; CHECK-NOT: rev - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = call <1 x i64> @test_v1i64_f64_helper(double %2) br label %return_bb return_bb: %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v2f32: declare <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %p) -define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) { +define void @test_v1i64_v2f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = call <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %2) br label %return_bb return_bb: %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v2i32: declare <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %p) -define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) { +define void @test_v1i64_v2i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = call <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %2) br label %return_bb return_bb: %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v4i16: declare <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %p) -define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) { +define void @test_v1i64_v4i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = call <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %2) br label %return_bb return_bb: %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v1i64_v8i8: declare <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %p) -define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) { +define void @test_v1i64_v8i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = call <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %2) br label %return_bb return_bb: %4 = add <1 x i64> %3, %3 - store <1 x i64> %4, <1 x i64>* %q + store <1 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_i64: declare <2 x float> @test_v2f32_i64_helper(i64 %p) -define void @test_v2f32_i64(i64* %p, <2 x float>* %q) { +define void @test_v2f32_i64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = call <2 x float> @test_v2f32_i64_helper(i64 %2) br label %return_bb return_bb: %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_f64: declare <2 x float> @test_v2f32_f64_helper(double %p) -define void @test_v2f32_f64(double* %p, <2 x float>* %q) { +define void @test_v2f32_f64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = call <2 x float> @test_v2f32_f64_helper(double %2) br label %return_bb return_bb: %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v1i64: declare <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %p) -define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) { +define void @test_v2f32_v1i64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = call <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %2) br label %return_bb return_bb: %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v2i32: declare <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %p) -define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) { +define void @test_v2f32_v2i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = call <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %2) br label %return_bb return_bb: %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v4i16: declare <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %p) -define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) { +define void @test_v2f32_v4i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = call <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %2) br label %return_bb return_bb: %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f32_v8i8: declare <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %p) -define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) { +define void @test_v2f32_v8i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = call <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %2) br label %return_bb return_bb: %4 = fadd <2 x float> %3, %3 - store <2 x float> %4, <2 x float>* %q + store <2 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_i64: declare <2 x i32> @test_v2i32_i64_helper(i64 %p) -define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) { +define void @test_v2i32_i64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = call <2 x i32> @test_v2i32_i64_helper(i64 %2) br label %return_bb return_bb: %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_f64: declare <2 x i32> @test_v2i32_f64_helper(double %p) -define void @test_v2i32_f64(double* %p, <2 x i32>* %q) { +define void @test_v2i32_f64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = call <2 x i32> @test_v2i32_f64_helper(double %2) br label %return_bb return_bb: %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_v1i64: declare <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %p) -define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) { +define void @test_v2i32_v1i64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = call <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %2) br label %return_bb return_bb: %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_v2f32: declare <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %p) -define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) { +define void @test_v2i32_v2f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = call <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %2) br label %return_bb return_bb: %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_v4i16: declare <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %p) -define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) { +define void @test_v2i32_v4i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = call <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %2) br label %return_bb return_bb: %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i32_v8i8: declare <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %p) -define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) { +define void @test_v2i32_v8i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = call <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %2) br label %return_bb return_bb: %4 = add <2 x i32> %3, %3 - store <2 x i32> %4, <2 x i32>* %q + store <2 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_i64: declare <4 x i16> @test_v4i16_i64_helper(i64 %p) -define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) { +define void @test_v4i16_i64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = call <4 x i16> @test_v4i16_i64_helper(i64 %2) br label %return_bb return_bb: %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_f64: declare <4 x i16> @test_v4i16_f64_helper(double %p) -define void @test_v4i16_f64(double* %p, <4 x i16>* %q) { +define void @test_v4i16_f64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = call <4 x i16> @test_v4i16_f64_helper(double %2) br label %return_bb return_bb: %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v1i64: declare <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %p) -define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) { +define void @test_v4i16_v1i64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = call <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %2) br label %return_bb return_bb: %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v2f32: declare <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %p) -define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) { +define void @test_v4i16_v2f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = call <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %2) br label %return_bb return_bb: %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v2i32: declare <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %p) -define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) { +define void @test_v4i16_v2i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = call <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %2) br label %return_bb return_bb: %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i16_v8i8: declare <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %p) -define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) { +define void @test_v4i16_v8i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <8 x i8>, <8 x i8>* %p + %1 = load <8 x i8>, ptr %p %2 = add <8 x i8> %1, %1 %3 = call <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %2) br label %return_bb return_bb: %4 = add <4 x i16> %3, %3 - store <4 x i16> %4, <4 x i16>* %q + store <4 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_i64: declare <8 x i8> @test_v8i8_i64_helper(i64 %p) -define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) { +define void @test_v8i8_i64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load i64, i64* %p + %1 = load i64, ptr %p %2 = add i64 %1, %1 %3 = call <8 x i8> @test_v8i8_i64_helper(i64 %2) br label %return_bb return_bb: %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_f64: declare <8 x i8> @test_v8i8_f64_helper(double %p) -define void @test_v8i8_f64(double* %p, <8 x i8>* %q) { +define void @test_v8i8_f64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load double, double* %p + %1 = load double, ptr %p %2 = fadd double %1, %1 %3 = call <8 x i8> @test_v8i8_f64_helper(double %2) br label %return_bb return_bb: %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_v1i64: declare <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %p) -define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) { +define void @test_v8i8_v1i64(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <1 x i64>, <1 x i64>* %p + %1 = load <1 x i64>, ptr %p %2 = add <1 x i64> %1, %1 %3 = call <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %2) br label %return_bb return_bb: %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_v2f32: declare <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %p) -define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) { +define void @test_v8i8_v2f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <2 x float>, <2 x float>* %p + %1 = load <2 x float>, ptr %p %2 = fadd <2 x float> %1, %1 %3 = call <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %2) br label %return_bb return_bb: %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_v2i32: declare <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %p) -define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) { +define void @test_v8i8_v2i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <2 x i32>, <2 x i32>* %p + %1 = load <2 x i32>, ptr %p %2 = add <2 x i32> %1, %1 %3 = call <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %2) br label %return_bb return_bb: %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i8_v4i16: declare <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %p) -define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) { +define void @test_v8i8_v4i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <4 x i16>, <4 x i16>* %p + %1 = load <4 x i16>, ptr %p %2 = add <4 x i16> %1, %1 %3 = call <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %2) br label %return_bb return_bb: %4 = add <8 x i8> %3, %3 - store <8 x i8> %4, <8 x i8>* %q + store <8 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v2f64: declare fp128 @test_f128_v2f64_helper(<2 x double> %p) -define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) { +define void @test_f128_v2f64(ptr %p, ptr %q) { ; CHECK: ext - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = call fp128 @test_f128_v2f64_helper(<2 x double> %2) br label %return_bb return_bb: %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v2i64: declare fp128 @test_f128_v2i64_helper(<2 x i64> %p) -define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) { +define void @test_f128_v2i64(ptr %p, ptr %q) { ; CHECK: ext - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = call fp128 @test_f128_v2i64_helper(<2 x i64> %2) br label %return_bb return_bb: %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v4f32: declare fp128 @test_f128_v4f32_helper(<4 x float> %p) -define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) { +define void @test_f128_v4f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = call fp128 @test_f128_v4f32_helper(<4 x float> %2) br label %return_bb return_bb: %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v4i32: declare fp128 @test_f128_v4i32_helper(<4 x i32> %p) -define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) { +define void @test_f128_v4i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = call fp128 @test_f128_v4i32_helper(<4 x i32> %2) br label %return_bb return_bb: %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v8i16: declare fp128 @test_f128_v8i16_helper(<8 x i16> %p) -define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) { +define void @test_f128_v8i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = call fp128 @test_f128_v8i16_helper(<8 x i16> %2) br label %return_bb return_bb: %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_f128_v16i8: declare fp128 @test_f128_v16i8_helper(<16 x i8> %p) -define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) { +define void @test_f128_v16i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = call fp128 @test_f128_v16i8_helper(<16 x i8> %2) br label %return_bb return_bb: %4 = fadd fp128 %3, %3 - store fp128 %4, fp128* %q + store fp128 %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_f128: declare <2 x double> @test_v2f64_f128_helper(fp128 %p) -define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) { +define void @test_v2f64_f128(ptr %p, ptr %q) { ; CHECK: ext - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = call <2 x double> @test_v2f64_f128_helper(fp128 %2) br label %return_bb return_bb: %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v2i64: declare <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %p) -define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) { +define void @test_v2f64_v2i64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: ext - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = call <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %2) br label %return_bb return_bb: %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v4f32: declare <2 x double> @test_v2f64_v4f32_helper(<4 x float> %p) -define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) { +define void @test_v2f64_v4f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: ext - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = call <2 x double> @test_v2f64_v4f32_helper(<4 x float> %2) br label %return_bb return_bb: %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v4i32: declare <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %p) -define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) { +define void @test_v2f64_v4i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: ext - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = call <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %2) br label %return_bb return_bb: %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v8i16: declare <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %p) -define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) { +define void @test_v2f64_v8i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: ext - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = call <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %2) br label %return_bb return_bb: %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2f64_v16i8: declare <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %p) -define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) { +define void @test_v2f64_v16i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: ext - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = call <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %2) br label %return_bb return_bb: %4 = fadd <2 x double> %3, %3 - store <2 x double> %4, <2 x double>* %q + store <2 x double> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_f128: declare <2 x i64> @test_v2i64_f128_helper(fp128 %p) -define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) { +define void @test_v2i64_f128(ptr %p, ptr %q) { ; CHECK: ext - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = call <2 x i64> @test_v2i64_f128_helper(fp128 %2) br label %return_bb return_bb: %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v2f64: declare <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %p) -define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) { +define void @test_v2i64_v2f64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: ext - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = call <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %2) br label %return_bb return_bb: %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v4f32: declare <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %p) -define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) { +define void @test_v2i64_v4f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: ext - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = call <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %2) br label %return_bb return_bb: %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v4i32: declare <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %p) -define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) { +define void @test_v2i64_v4i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: ext - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = call <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %2) br label %return_bb return_bb: %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v8i16: declare <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %p) -define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) { +define void @test_v2i64_v8i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: ext - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = call <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %2) br label %return_bb return_bb: %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v2i64_v16i8: declare <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %p) -define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) { +define void @test_v2i64_v16i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: ext - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = call <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %2) br label %return_bb return_bb: %4 = add <2 x i64> %3, %3 - store <2 x i64> %4, <2 x i64>* %q + store <2 x i64> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_f128: declare <4 x float> @test_v4f32_f128_helper(fp128 %p) -define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) { +define void @test_v4f32_f128(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = call <4 x float> @test_v4f32_f128_helper(fp128 %2) br label %return_bb return_bb: %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v2f64: declare <4 x float> @test_v4f32_v2f64_helper(<2 x double> %p) -define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) { +define void @test_v4f32_v2f64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = call <4 x float> @test_v4f32_v2f64_helper(<2 x double> %2) br label %return_bb return_bb: %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v2i64: declare <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %p) -define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) { +define void @test_v4f32_v2i64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = call <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %2) br label %return_bb return_bb: %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v4i32: declare <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %p) -define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) { +define void @test_v4f32_v4i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = call <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %2) br label %return_bb return_bb: %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v8i16: declare <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %p) -define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) { +define void @test_v4f32_v8i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = call <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %2) br label %return_bb return_bb: %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4f32_v16i8: declare <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %p) -define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) { +define void @test_v4f32_v16i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = call <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %2) br label %return_bb return_bb: %4 = fadd <4 x float> %3, %3 - store <4 x float> %4, <4 x float>* %q + store <4 x float> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_f128: declare <4 x i32> @test_v4i32_f128_helper(fp128 %p) -define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) { +define void @test_v4i32_f128(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = call <4 x i32> @test_v4i32_f128_helper(fp128 %2) br label %return_bb return_bb: %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v2f64: declare <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %p) -define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) { +define void @test_v4i32_v2f64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = call <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %2) br label %return_bb return_bb: %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v2i64: declare <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %p) -define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) { +define void @test_v4i32_v2i64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = call <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %2) br label %return_bb return_bb: %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v4f32: declare <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %p) -define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) { +define void @test_v4i32_v4f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = call <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %2) br label %return_bb return_bb: %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v8i16: declare <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %p) -define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) { +define void @test_v4i32_v8i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = call <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %2) br label %return_bb return_bb: %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v4i32_v16i8: declare <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %p) -define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) { +define void @test_v4i32_v16i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = call <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %2) br label %return_bb return_bb: %4 = add <4 x i32> %3, %3 - store <4 x i32> %4, <4 x i32>* %q + store <4 x i32> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_f128: declare <8 x i16> @test_v8i16_f128_helper(fp128 %p) -define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) { +define void @test_v8i16_f128(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = call <8 x i16> @test_v8i16_f128_helper(fp128 %2) br label %return_bb return_bb: %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v2f64: declare <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %p) -define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) { +define void @test_v8i16_v2f64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = call <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %2) br label %return_bb return_bb: %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v2i64: declare <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %p) -define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) { +define void @test_v8i16_v2i64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = call <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %2) br label %return_bb return_bb: %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v4f32: declare <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %p) -define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) { +define void @test_v8i16_v4f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = call <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %2) br label %return_bb return_bb: %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v4i32: declare <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %p) -define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) { +define void @test_v8i16_v4i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = call <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %2) br label %return_bb return_bb: %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v8i16_v16i8: declare <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %p) -define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) { +define void @test_v8i16_v16i8(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <16 x i8>, <16 x i8>* %p + %1 = load <16 x i8>, ptr %p %2 = add <16 x i8> %1, %1 %3 = call <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %2) br label %return_bb return_bb: %4 = add <8 x i16> %3, %3 - store <8 x i16> %4, <8 x i16>* %q + store <8 x i16> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_f128: declare <16 x i8> @test_v16i8_f128_helper(fp128 %p) -define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) { +define void @test_v16i8_f128(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load fp128, fp128* %p + %1 = load fp128, ptr %p %2 = fadd fp128 %1, %1 %3 = call <16 x i8> @test_v16i8_f128_helper(fp128 %2) br label %return_bb return_bb: %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v2f64: declare <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %p) -define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) { +define void @test_v16i8_v2f64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <2 x double>, <2 x double>* %p + %1 = load <2 x double>, ptr %p %2 = fadd <2 x double> %1, %1 %3 = call <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %2) br label %return_bb return_bb: %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v2i64: declare <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %p) -define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) { +define void @test_v16i8_v2i64(ptr %p, ptr %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <2 x i64>, <2 x i64>* %p + %1 = load <2 x i64>, ptr %p %2 = add <2 x i64> %1, %1 %3 = call <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %2) br label %return_bb return_bb: %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v4f32: declare <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %p) -define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) { +define void @test_v16i8_v4f32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <4 x float>, <4 x float>* %p + %1 = load <4 x float>, ptr %p %2 = fadd <4 x float> %1, %1 %3 = call <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %2) br label %return_bb return_bb: %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v4i32: declare <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %p) -define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) { +define void @test_v16i8_v4i32(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <4 x i32>, <4 x i32>* %p + %1 = load <4 x i32>, ptr %p %2 = add <4 x i32> %1, %1 %3 = call <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %2) br label %return_bb return_bb: %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } ; CHECK-LABEL: test_v16i8_v8i16: declare <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %p) -define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) { +define void @test_v16i8_v8i16(ptr %p, ptr %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <8 x i16>, <8 x i16>* %p + %1 = load <8 x i16>, ptr %p %2 = add <8 x i16> %1, %1 %3 = call <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %2) br label %return_bb return_bb: %4 = add <16 x i8> %3, %3 - store <16 x i8> %4, <16 x i8>* %q + store <16 x i8> %4, ptr %q ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll b/llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll index f2b6829316007f..7102e5488a8dd1 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll @@ -3,12 +3,12 @@ ; Make sure large offsets aren't mistaken for valid immediate offsets. ; -define void @f(i32* nocapture %p) { +define void @f(ptr nocapture %p) { entry: - %a = ptrtoint i32* %p to i64 + %a = ptrtoint ptr %p to i64 %ao = add i64 %a, 25769803792 - %b = inttoptr i64 %ao to i32* - store volatile i32 0, i32* %b, align 4 - store volatile i32 0, i32* %b, align 4 + %b = inttoptr i64 %ao to ptr + store volatile i32 0, ptr %b, align 4 + store volatile i32 0, ptr %b, align 4 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-big-stack.ll b/llvm/test/CodeGen/AArch64/arm64-big-stack.ll index c9acbc5f054cfc..a51a9027965302 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-stack.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-stack.ll @@ -13,9 +13,8 @@ target triple = "arm64-apple-macosx10" define void @foo() nounwind ssp { entry: %buffer = alloca [33554432 x i8], align 1 - %arraydecay = getelementptr inbounds [33554432 x i8], [33554432 x i8]* %buffer, i64 0, i64 0 - call void @doit(i8* %arraydecay) nounwind + call void @doit(ptr %buffer) nounwind ret void } -declare void @doit(i8*) +declare void @doit(ptr) diff --git a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll index cf72e4b1fce9b4..caa5a7f9ead14a 100644 --- a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll +++ b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll @@ -7,7 +7,7 @@ %struct.Z = type { i8, i8, [2 x i8], i16 } %struct.A = type { i64, i8 } -define void @foo(%struct.X* nocapture %x, %struct.Y* nocapture %y) nounwind optsize ssp { +define void @foo(ptr nocapture %x, ptr nocapture %y) nounwind optsize ssp { ; LLC-LABEL: foo: ; LLC: // %bb.0: ; LLC-NEXT: ldr w8, [x0] @@ -15,21 +15,19 @@ define void @foo(%struct.X* nocapture %x, %struct.Y* nocapture %y) nounwind opts ; LLC-NEXT: strb w8, [x1, #4] ; LLC-NEXT: ret ; OPT-LABEL: @foo( -; OPT-NEXT: [[TMP:%.*]] = bitcast %struct.X* [[X:%.*]] to i32* -; OPT-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 4 -; OPT-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_Y:%.*]], %struct.Y* [[Y:%.*]], i64 0, i32 1 +; OPT-NEXT: [[TMP1:%.*]] = load i32, ptr [[X:%.*]], align 4 +; OPT-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_Y:%.*]], ptr [[Y:%.*]], i64 0, i32 1 ; OPT-NEXT: [[BF_CLEAR:%.*]] = lshr i32 [[TMP1]], 3 ; OPT-NEXT: [[BF_CLEAR_LOBIT:%.*]] = and i32 [[BF_CLEAR]], 1 ; OPT-NEXT: [[FROMBOOL:%.*]] = trunc i32 [[BF_CLEAR_LOBIT]] to i8 -; OPT-NEXT: store i8 [[FROMBOOL]], i8* [[B]], align 1 +; OPT-NEXT: store i8 [[FROMBOOL]], ptr [[B]], align 1 ; OPT-NEXT: ret void - %tmp = bitcast %struct.X* %x to i32* - %tmp1 = load i32, i32* %tmp, align 4 - %b = getelementptr inbounds %struct.Y, %struct.Y* %y, i64 0, i32 1 + %tmp1 = load i32, ptr %x, align 4 + %b = getelementptr inbounds %struct.Y, ptr %y, i64 0, i32 1 %bf.clear = lshr i32 %tmp1, 3 %bf.clear.lobit = and i32 %bf.clear, 1 %frombool = trunc i32 %bf.clear.lobit to i8 - store i8 %frombool, i8* %b, align 1 + store i8 %frombool, ptr %b, align 1 ret void } @@ -65,7 +63,7 @@ define i32 @bar(i64 %cav1.coerce) nounwind { ret i32 %tmp1 } -define void @fct1(%struct.Z* nocapture %x, %struct.A* nocapture %y) nounwind optsize ssp { +define void @fct1(ptr nocapture %x, ptr nocapture %y) nounwind optsize ssp { ; LLC-LABEL: fct1: ; LLC: // %bb.0: ; LLC-NEXT: ldr x8, [x0] @@ -73,19 +71,15 @@ define void @fct1(%struct.Z* nocapture %x, %struct.A* nocapture %y) nounwind opt ; LLC-NEXT: str x8, [x1] ; LLC-NEXT: ret ; OPT-LABEL: @fct1( -; OPT-NEXT: [[TMP:%.*]] = bitcast %struct.Z* [[X:%.*]] to i64* -; OPT-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP]], align 4 -; OPT-NEXT: [[B1:%.*]] = bitcast %struct.A* [[Y:%.*]] to i64* +; OPT-NEXT: [[TMP1:%.*]] = load i64, ptr [[X:%.*]], align 4 ; OPT-NEXT: [[BF_CLEAR:%.*]] = lshr i64 [[TMP1]], 3 ; OPT-NEXT: [[BF_CLEAR_LOBIT:%.*]] = and i64 [[BF_CLEAR]], 1 -; OPT-NEXT: store i64 [[BF_CLEAR_LOBIT]], i64* [[B1]], align 8 +; OPT-NEXT: store i64 [[BF_CLEAR_LOBIT]], ptr [[Y:%.*]], align 8 ; OPT-NEXT: ret void - %tmp = bitcast %struct.Z* %x to i64* - %tmp1 = load i64, i64* %tmp, align 4 - %b = getelementptr inbounds %struct.A, %struct.A* %y, i64 0, i32 0 + %tmp1 = load i64, ptr %x, align 4 %bf.clear = lshr i64 %tmp1, 3 %bf.clear.lobit = and i64 %bf.clear, 1 - store i64 %bf.clear.lobit, i64* %b, align 8 + store i64 %bf.clear.lobit, ptr %y, align 8 ret void } @@ -117,7 +111,7 @@ define i64 @fct3(i64 %cav1.coerce) nounwind { ret i64 %tmp1 } -define void @fct4(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { +define void @fct4(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct4: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -126,24 +120,24 @@ define void @fct4(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct4( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], -16777216 ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[SHR]], 16777215 ; OPT-NEXT: [[OR:%.*]] = or i64 [[AND]], [[AND1]] -; OPT-NEXT: store i64 [[OR]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[OR]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, -16777216 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 16777215 %or = or i64 %and, %and1 - store i64 %or, i64* %y, align 8 + store i64 %or, ptr %y, align 8 ret void } -define void @fct5(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { +define void @fct5(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct5: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -152,25 +146,25 @@ define void @fct5(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct5( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i32 [[AND]], [[AND1]] -; OPT-NEXT: store i32 [[OR]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[OR]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 %or = or i32 %and, %and1 - store i32 %or, i32* %y, align 8 + store i32 %or, ptr %y, align 8 ret void } ; Check if we can still catch bfm instruction when we drop some low bits -define void @fct6(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { +define void @fct6(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct6: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -180,29 +174,29 @@ define void @fct6(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct6( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i32 [[AND]], [[AND1]] ; OPT-NEXT: [[SHR1:%.*]] = lshr i32 [[OR]], 2 -; OPT-NEXT: store i32 [[SHR1]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[SHR1]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 %or = or i32 %and, %and1 %shr1 = lshr i32 %or, 2 - store i32 %shr1, i32* %y, align 8 + store i32 %shr1, ptr %y, align 8 ret void } ; Check if we can still catch bfm instruction when we drop some high bits -define void @fct7(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { +define void @fct7(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct7: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -212,30 +206,30 @@ define void @fct7(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct7( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i32 [[AND]], [[AND1]] ; OPT-NEXT: [[SHL:%.*]] = shl i32 [[OR]], 2 -; OPT-NEXT: store i32 [[SHL]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[SHL]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsl is an alias of ubfm - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 %or = or i32 %and, %and1 %shl = shl i32 %or, 2 - store i32 %shl, i32* %y, align 8 + store i32 %shl, ptr %y, align 8 ret void } ; Check if we can still catch bfm instruction when we drop some low bits ; (i64 version) -define void @fct8(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { +define void @fct8(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct8: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -245,30 +239,30 @@ define void @fct8(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct8( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i64 [[AND]], [[AND1]] ; OPT-NEXT: [[SHR1:%.*]] = lshr i64 [[OR]], 2 -; OPT-NEXT: store i64 [[SHR1]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[SHR1]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, -8 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 %or = or i64 %and, %and1 %shr1 = lshr i64 %or, 2 - store i64 %shr1, i64* %y, align 8 + store i64 %shr1, ptr %y, align 8 ret void } ; Check if we can still catch bfm instruction when we drop some high bits ; (i64 version) -define void @fct9(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { +define void @fct9(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct9: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -278,29 +272,29 @@ define void @fct9(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct9( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i64 [[AND]], [[AND1]] ; OPT-NEXT: [[SHL:%.*]] = shl i64 [[OR]], 2 -; OPT-NEXT: store i64 [[SHL]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[SHL]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, -8 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 %or = or i64 %and, %and1 %shl = shl i64 %or, 2 - store i64 %shl, i64* %y, align 8 + store i64 %shl, ptr %y, align 8 ret void } ; Check if we can catch bfm instruction when lsb is 0 (i.e., no lshr) ; (i32 version) -define void @fct10(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { +define void @fct10(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct10: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -310,27 +304,27 @@ define void @fct10(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct10( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -8 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[X:%.*]], 7 ; OPT-NEXT: [[OR:%.*]] = or i32 [[AND]], [[AND1]] ; OPT-NEXT: [[SHL:%.*]] = shl i32 [[OR]], 2 -; OPT-NEXT: store i32 [[SHL]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[SHL]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsl is an alias of ubfm - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, -8 %and1 = and i32 %x, 7 %or = or i32 %and, %and1 %shl = shl i32 %or, 2 - store i32 %shl, i32* %y, align 8 + store i32 %shl, ptr %y, align 8 ret void } ; Check if we can catch bfm instruction when lsb is 0 (i.e., no lshr) ; (i64 version) -define void @fct11(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { +define void @fct11(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct11: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -340,21 +334,21 @@ define void @fct11(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct11( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], -8 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[X:%.*]], 7 ; OPT-NEXT: [[OR:%.*]] = or i64 [[AND]], [[AND1]] ; OPT-NEXT: [[SHL:%.*]] = shl i64 [[OR]], 2 -; OPT-NEXT: store i64 [[SHL]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[SHL]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsl is an alias of ubfm - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, -8 %and1 = and i64 %x, 7 %or = or i64 %and, %and1 %shl = shl i64 %or, 2 - store i64 %shl, i64* %y, align 8 + store i64 %shl, ptr %y, align 8 ret void } @@ -374,7 +368,7 @@ define zeroext i1 @fct12bis(i32 %tmp2) unnamed_addr nounwind ssp align 2 { ; Check if we can still catch bfm instruction when we drop some high bits ; and some low bits -define void @fct12(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { +define void @fct12(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct12: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -384,28 +378,28 @@ define void @fct12(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct12( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i32 [[AND]], [[AND1]] ; OPT-NEXT: [[SHL:%.*]] = shl i32 [[OR]], 2 ; OPT-NEXT: [[SHR2:%.*]] = lshr i32 [[SHL]], 4 -; OPT-NEXT: store i32 [[SHR2]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[SHR2]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 %or = or i32 %and, %and1 %shl = shl i32 %or, 2 %shr2 = lshr i32 %shl, 4 - store i32 %shr2, i32* %y, align 8 + store i32 %shr2, ptr %y, align 8 ret void } -define void @fct12_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { +define void @fct12_mask(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct12_mask: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -416,32 +410,32 @@ define void @fct12_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint s ; LLC-NEXT: ret ; OPT-LABEL: @fct12_mask( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i32 [[AND]], [[AND1]] ; OPT-NEXT: [[LSHR:%.*]] = lshr i32 [[OR]], 2 ; OPT-NEXT: [[MASK:%.*]] = and i32 [[LSHR]], 268435455 -; OPT-NEXT: store i32 [[MASK]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[MASK]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 %or = or i32 %and, %and1 %lshr = lshr i32 %or, 2 %mask = and i32 %lshr, 268435455 - store i32 %mask, i32* %y, align 8 + store i32 %mask, ptr %y, align 8 ret void } ; Check if we can still catch bfm instruction when we drop some high bits ; and some low bits ; (i64 version) -define void @fct13(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { +define void @fct13(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct13: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -451,28 +445,28 @@ define void @fct13(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct13( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i64 [[AND]], [[AND1]] ; OPT-NEXT: [[SHL:%.*]] = shl i64 [[OR]], 2 ; OPT-NEXT: [[SHR2:%.*]] = lshr i64 [[SHL]], 4 -; OPT-NEXT: store i64 [[SHR2]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[SHR2]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, -8 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 %or = or i64 %and, %and1 %shl = shl i64 %or, 2 %shr2 = lshr i64 %shl, 4 - store i64 %shr2, i64* %y, align 8 + store i64 %shr2, ptr %y, align 8 ret void } -define void @fct13_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { +define void @fct13_mask(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct13_mask: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -483,32 +477,32 @@ define void @fct13_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint s ; LLC-NEXT: ret ; OPT-LABEL: @fct13_mask( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], -8 ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i64 [[AND]], [[AND1]] ; OPT-NEXT: [[LSHR:%.*]] = lshr i64 [[OR]], 2 ; OPT-NEXT: [[MASK:%.*]] = and i64 [[LSHR]], 1152921504606846975 -; OPT-NEXT: store i64 [[MASK]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[MASK]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, -8 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 %or = or i64 %and, %and1 %lshr = lshr i64 %or, 2 %mask = and i64 %lshr, 1152921504606846975 - store i64 %mask, i64* %y, align 8 + store i64 %mask, ptr %y, align 8 ret void } ; Check if we can still catch bfm instruction when we drop some high bits ; and some low bits -define void @fct14(i32* nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehint ssp { +define void @fct14(ptr nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct14: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -520,7 +514,7 @@ define void @fct14(i32* nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehi ; LLC-NEXT: ret ; OPT-LABEL: @fct14( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -256 ; OPT-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[SHR]], 255 @@ -531,12 +525,12 @@ define void @fct14(i32* nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehi ; OPT-NEXT: [[AND3:%.*]] = and i32 [[SHR1]], 7 ; OPT-NEXT: [[OR1:%.*]] = or i32 [[AND2]], [[AND3]] ; OPT-NEXT: [[SHL1:%.*]] = shl i32 [[OR1]], 2 -; OPT-NEXT: store i32 [[SHL1]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[SHL1]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm ; lsl is an alias of ubfm - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, -256 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 255 @@ -547,14 +541,14 @@ entry: %and3 = and i32 %shr1, 7 %or1 = or i32 %and2, %and3 %shl1 = shl i32 %or1, 2 - store i32 %shl1, i32* %y, align 8 + store i32 %shl1, ptr %y, align 8 ret void } ; Check if we can still catch bfm instruction when we drop some high bits ; and some low bits ; (i64 version) -define void @fct15(i64* nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehint ssp { +define void @fct15(ptr nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct15: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -566,7 +560,7 @@ define void @fct15(i64* nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehi ; LLC-NEXT: ret ; OPT-LABEL: @fct15( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], -256 ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[SHR]], 255 @@ -577,12 +571,12 @@ define void @fct15(i64* nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehi ; OPT-NEXT: [[AND3:%.*]] = and i64 [[SHR1]], 7 ; OPT-NEXT: [[OR1:%.*]] = or i64 [[AND2]], [[AND3]] ; OPT-NEXT: [[SHL1:%.*]] = shl i64 [[OR1]], 2 -; OPT-NEXT: store i64 [[SHL1]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[SHL1]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; lsr is an alias of ubfm ; lsl is an alias of ubfm - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, -256 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 255 @@ -593,13 +587,13 @@ entry: %and3 = and i64 %shr1, 7 %or1 = or i64 %and2, %and3 %shl1 = shl i64 %or1, 2 - store i64 %shl1, i64* %y, align 8 + store i64 %shl1, ptr %y, align 8 ret void } ; Check if we can still catch bfm instruction when we drop some high bits ; and some low bits and a masking operation has to be kept -define void @fct16(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { +define void @fct16(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct16: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -612,30 +606,30 @@ define void @fct16(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct16( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], 1737056 ; OPT-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i32 [[AND]], [[AND1]] ; OPT-NEXT: [[SHL:%.*]] = shl i32 [[OR]], 2 ; OPT-NEXT: [[SHR2:%.*]] = lshr i32 [[SHL]], 4 -; OPT-NEXT: store i32 [[SHR2]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[SHR2]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; Create the constant ; Do the masking ; lsr is an alias of ubfm - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, 1737056 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 %or = or i32 %and, %and1 %shl = shl i32 %or, 2 %shr2 = lshr i32 %shl, 4 - store i32 %shr2, i32* %y, align 8 + store i32 %shr2, ptr %y, align 8 ret void } -define void @fct16_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp { +define void @fct16_mask(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct16_mask: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr w8, [x0] @@ -648,27 +642,27 @@ define void @fct16_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint s ; LLC-NEXT: ret ; OPT-LABEL: @fct16_mask( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i32 [[TMP0]], 1737056 ; OPT-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i32 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i32 [[AND]], [[AND1]] ; OPT-NEXT: [[LSHR:%.*]] = lshr i32 [[OR]], 2 ; OPT-NEXT: [[MASK:%.*]] = and i32 [[LSHR]], 268435455 -; OPT-NEXT: store i32 [[MASK]], i32* [[Y]], align 8 +; OPT-NEXT: store i32 [[MASK]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; Create the constant ; Do the masking ; lsr is an alias of ubfm - %0 = load i32, i32* %y, align 8 + %0 = load i32, ptr %y, align 8 %and = and i32 %0, 1737056 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 %or = or i32 %and, %and1 %lshr = lshr i32 %or, 2 %mask = and i32 %lshr, 268435455 - store i32 %mask, i32* %y, align 8 + store i32 %mask, ptr %y, align 8 ret void } @@ -676,7 +670,7 @@ entry: ; Check if we can still catch bfm instruction when we drop some high bits ; and some low bits and a masking operation has to be kept ; (i64 version) -define void @fct17(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { +define void @fct17(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct17: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -689,30 +683,30 @@ define void @fct17(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-NEXT: ret ; OPT-LABEL: @fct17( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], 1737056 ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i64 [[AND]], [[AND1]] ; OPT-NEXT: [[SHL:%.*]] = shl i64 [[OR]], 2 ; OPT-NEXT: [[SHR2:%.*]] = lshr i64 [[SHL]], 4 -; OPT-NEXT: store i64 [[SHR2]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[SHR2]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; Create the constant ; Do the masking ; lsr is an alias of ubfm - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, 1737056 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 %or = or i64 %and, %and1 %shl = shl i64 %or, 2 %shr2 = lshr i64 %shl, 4 - store i64 %shr2, i64* %y, align 8 + store i64 %shr2, ptr %y, align 8 ret void } -define void @fct17_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp { +define void @fct17_mask(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp { ; LLC-LABEL: fct17_mask: ; LLC: // %bb.0: // %entry ; LLC-NEXT: ldr x8, [x0] @@ -725,27 +719,27 @@ define void @fct17_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint s ; LLC-NEXT: ret ; OPT-LABEL: @fct17_mask( ; OPT-NEXT: entry: -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8 +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8 ; OPT-NEXT: [[AND:%.*]] = and i64 [[TMP0]], 1737056 ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 16 ; OPT-NEXT: [[AND1:%.*]] = and i64 [[SHR]], 7 ; OPT-NEXT: [[OR:%.*]] = or i64 [[AND]], [[AND1]] ; OPT-NEXT: [[LSHR:%.*]] = lshr i64 [[OR]], 2 ; OPT-NEXT: [[MASK:%.*]] = and i64 [[LSHR]], 1152921504606846975 -; OPT-NEXT: store i64 [[MASK]], i64* [[Y]], align 8 +; OPT-NEXT: store i64 [[MASK]], ptr [[Y]], align 8 ; OPT-NEXT: ret void entry: ; Create the constant ; Do the masking ; lsr is an alias of ubfm - %0 = load i64, i64* %y, align 8 + %0 = load i64, ptr %y, align 8 %and = and i64 %0, 1737056 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 %or = or i64 %and, %and1 %lshr = lshr i64 %or, 2 %mask = and i64 %lshr, 1152921504606846975 - store i64 %mask, i64* %y, align 8 + store i64 %mask, ptr %y, align 8 ret void } @@ -809,8 +803,8 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp { ; OPT-NEXT: [[TOBOOL:%.*]] = icmp eq i64 [[X_SROA_5_0_EXTRACT_SHIFT]], 0 ; OPT-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] ; OPT: if.then: -; OPT-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 [[X_SROA_5_0_EXTRACT_SHIFT]] -; OPT-NEXT: [[TMP0:%.*]] = load i8, i8* [[ARRAYIDX3]], align 1 +; OPT-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 [[X_SROA_5_0_EXTRACT_SHIFT]] +; OPT-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX3]], align 1 ; OPT-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32 ; OPT-NEXT: br label [[RETURN:%.*]] ; OPT: if.end: @@ -821,8 +815,8 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp { ; OPT: if.then7: ; OPT-NEXT: [[TMP2:%.*]] = lshr i64 [[ARG1]], 32 ; OPT-NEXT: [[IDXPROM10:%.*]] = and i64 [[TMP2]], 65535 -; OPT-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 [[IDXPROM10]] -; OPT-NEXT: [[TMP3:%.*]] = load i8, i8* [[ARRAYIDX11]], align 1 +; OPT-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 [[IDXPROM10]] +; OPT-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX11]], align 1 ; OPT-NEXT: [[CONV12:%.*]] = zext i8 [[TMP3]] to i32 ; OPT-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV12]], 16 ; OPT-NEXT: br label [[RETURN]] @@ -834,8 +828,8 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp { ; OPT: if.then17: ; OPT-NEXT: [[TMP6:%.*]] = lshr i64 [[ARG1]], 16 ; OPT-NEXT: [[IDXPROM20:%.*]] = and i64 [[TMP6]], 65535 -; OPT-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 [[IDXPROM20]] -; OPT-NEXT: [[TMP7:%.*]] = load i8, i8* [[ARRAYIDX21]], align 1 +; OPT-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 [[IDXPROM20]] +; OPT-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX21]], align 1 ; OPT-NEXT: [[CONV22:%.*]] = zext i8 [[TMP7]] to i32 ; OPT-NEXT: [[ADD23:%.*]] = add nsw i32 [[CONV22]], 32 ; OPT-NEXT: br label [[RETURN]] @@ -851,8 +845,8 @@ entry: br i1 %tobool, label %if.end, label %if.then if.then: ; preds = %entry - %arrayidx3 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift - %0 = load i8, i8* %arrayidx3, align 1 + %arrayidx3 = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift + %0 = load i8, ptr %arrayidx3, align 1 %conv = zext i8 %0 to i32 br label %return @@ -865,8 +859,8 @@ if.then7: ; preds = %if.end ; "and" should be combined to "ubfm" while "ubfm" should be removed by cse. ; So neither of them should be in the assemble code. %idxprom10 = and i64 %x.sroa.3.0.extract.shift, 65535 - %arrayidx11 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom10 - %1 = load i8, i8* %arrayidx11, align 1 + %arrayidx11 = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 %idxprom10 + %1 = load i8, ptr %arrayidx11, align 1 %conv12 = zext i8 %1 to i32 %add = add nsw i32 %conv12, 16 br label %return @@ -879,8 +873,8 @@ if.then17: ; preds = %if.end13 ; "and" should be combined to "ubfm" while "ubfm" should be removed by cse. ; So neither of them should be in the assemble code. %idxprom20 = and i64 %x.sroa.1.0.extract.shift, 65535 - %arrayidx21 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom20 - %2 = load i8, i8* %arrayidx21, align 1 + %arrayidx21 = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 %idxprom20 + %2 = load i8, ptr %arrayidx21, align 1 %conv22 = zext i8 %2 to i32 %add23 = add nsw i32 %conv22, 32 br label %return @@ -950,14 +944,14 @@ define i64 @fct21(i64 %x) { ; OPT-NEXT: entry: ; OPT-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], 4 ; OPT-NEXT: [[AND:%.*]] = and i64 [[SHR]], 15 -; OPT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x [64 x i64]], [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 [[AND]] -; OPT-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; OPT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x [64 x i64]], ptr @arr, i64 0, i64 0, i64 [[AND]] +; OPT-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; OPT-NEXT: ret i64 [[TMP0]] entry: %shr = lshr i64 %x, 4 %and = and i64 %shr, 15 - %arrayidx = getelementptr inbounds [8 x [64 x i64]], [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 %and - %0 = load i64, i64* %arrayidx, align 8 + %arrayidx = getelementptr inbounds [8 x [64 x i64]], ptr @arr, i64 0, i64 0, i64 %and + %0 = load i64, ptr %arrayidx, align 8 ret i64 %0 } @@ -992,7 +986,7 @@ define i16 @test_ignored_rightbits(i32 %dst, i32 %in) { ; The following test excercises the case where we have a BFI ; instruction with the same input in both operands. We need to ; track the useful bits through both operands. -define void @sameOperandBFI(i64 %src, i64 %src2, i16 *%ptr) { +define void @sameOperandBFI(i64 %src, i64 %src2, ptr %ptr) { ; LLC-LABEL: sameOperandBFI: ; LLC: // %bb.0: // %entry ; LLC-NEXT: cbnz wzr, .LBB30_2 @@ -1018,7 +1012,7 @@ define void @sameOperandBFI(i64 %src, i64 %src2, i16 *%ptr) { ; OPT-NEXT: [[BFIRHS:%.*]] = shl nuw nsw i32 [[BFISOURCE]], 4 ; OPT-NEXT: [[BFI:%.*]] = or i32 [[BFIRHS]], [[BFISOURCE]] ; OPT-NEXT: [[BFITRUNC:%.*]] = trunc i32 [[BFI]] to i16 -; OPT-NEXT: store i16 [[BFITRUNC]], i16* [[PTR:%.*]], align 4 +; OPT-NEXT: store i16 [[BFITRUNC]], ptr [[PTR:%.*]], align 4 ; OPT-NEXT: br label [[END]] ; OPT: end: ; OPT-NEXT: ret void @@ -1036,7 +1030,7 @@ if.else: %BFIRHS = shl nuw nsw i32 %BFISource, 4 ; ...0ABCD0000 %BFI = or i32 %BFIRHS, %BFISource ; ...0ABCDABCD %BFItrunc = trunc i32 %BFI to i16 - store i16 %BFItrunc, i16* %ptr, align 4 + store i16 %BFItrunc, ptr %ptr, align 4 br label %end end: diff --git a/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll b/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll index 68b8fcbefc5ab1..69b1872be87db2 100644 --- a/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll +++ b/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll @@ -24,10 +24,10 @@ entry: ; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g3:[[DEST_LBL]] %recover = alloca i64, align 8 - store volatile i64 ptrtoint (i8* blockaddress(@t, %mylabel) to i64), i64* %recover, align 8 + store volatile i64 ptrtoint (ptr blockaddress(@t, %mylabel) to i64), ptr %recover, align 8 br label %mylabel mylabel: - %tmp = load volatile i64, i64* %recover, align 8 + %tmp = load volatile i64, ptr %recover, align 8 ret i64 %tmp } diff --git a/llvm/test/CodeGen/AArch64/arm64-build-vector.ll b/llvm/test/CodeGen/AArch64/arm64-build-vector.ll index 0dc369c90761f8..9b4660c94790c7 100644 --- a/llvm/test/CodeGen/AArch64/arm64-build-vector.ll +++ b/llvm/test/CodeGen/AArch64/arm64-build-vector.ll @@ -53,15 +53,14 @@ define <8 x i16> @concat_2_build_vector(<4 x i16> %in0) { ; an equivalent integer vector and BITCAST-ing that. This case checks that ; normalizing the vector generates a valid result. The choice of the ; constant prevents earlier passes from replacing the BUILD_VECTOR. -define void @widen_f16_build_vector(half* %addr) { +define void @widen_f16_build_vector(ptr %addr) { ; CHECK-LABEL: widen_f16_build_vector: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #13294 ; CHECK-NEXT: movk w8, #13294, lsl #16 ; CHECK-NEXT: str w8, [x0] ; CHECK-NEXT: ret - %1 = bitcast half* %addr to <2 x half>* - store <2 x half> , <2 x half>* %1, align 2 + store <2 x half> , ptr %addr, align 2 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll b/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll index 7d5684778d0600..63f4cb0c1fdafa 100644 --- a/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll +++ b/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll @@ -5,9 +5,9 @@ ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+tpidr-el3 | FileCheck --check-prefix=USEEL3 %s ; Function Attrs: nounwind readnone -declare i8* @llvm.thread.pointer() #1 +declare ptr @llvm.thread.pointer() #1 -define i8* @thread_pointer() { +define ptr @thread_pointer() { ; CHECK: thread_pointer: ; CHECK: mrs {{x[0-9]+}}, TPIDR_EL0 ; USEEL1: thread_pointer: @@ -16,6 +16,6 @@ define i8* @thread_pointer() { ; USEEL2: mrs {{x[0-9]+}}, TPIDR_EL2 ; USEEL3: thread_pointer: ; USEEL3: mrs {{x[0-9]+}}, TPIDR_EL3 - %1 = tail call i8* @llvm.thread.pointer() - ret i8* %1 + %1 = tail call ptr @llvm.thread.pointer() + ret ptr %1 } diff --git a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll index 9b6d1f3a1867e6..7745f8dab1c2bf 100644 --- a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll +++ b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s ; RUN: llc -global-isel < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s -@t = weak global i32 ()* null +@t = weak global ptr null @x = external global i32, align 4 define void @t2() { @@ -10,7 +10,7 @@ define void @t2() { ; CHECK: ldr x[[ADDR:[0-9]+]], [x[[GOTADDR]], _t@GOTPAGEOFF] ; CHECK: ldr x[[DEST:[0-9]+]], [x[[ADDR]]] ; CHECK: br x[[DEST]] - %tmp = load i32 ()*, i32 ()** @t + %tmp = load ptr, ptr @t %tmp.upgrd.2 = tail call i32 %tmp() ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll b/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll index 2f5d16b257952d..06c496dbcafe11 100644 --- a/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll +++ b/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll @@ -3,7 +3,7 @@ ; Zero truncation is not necessary when the values are extended properly ; already. -@block = common global i8* null, align 8 +@block = common global ptr null, align 8 define zeroext i8 @foo(i32 %i1, i32 %i2) { ; CHECK-LABEL: foo: @@ -11,12 +11,12 @@ define zeroext i8 @foo(i32 %i1, i32 %i2) { ; CHECK-NOT: and entry: %idxprom = sext i32 %i1 to i64 - %0 = load i8*, i8** @block, align 8 - %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom - %1 = load i8, i8* %arrayidx, align 1 + %0 = load ptr, ptr @block, align 8 + %arrayidx = getelementptr inbounds i8, ptr %0, i64 %idxprom + %1 = load i8, ptr %arrayidx, align 1 %idxprom1 = sext i32 %i2 to i64 - %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1 - %2 = load i8, i8* %arrayidx2, align 1 + %arrayidx2 = getelementptr inbounds i8, ptr %0, i64 %idxprom1 + %2 = load i8, ptr %arrayidx2, align 1 %cmp = icmp eq i8 %1, %2 br i1 %cmp, label %return, label %if.then diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll index fa2343152f72b2..358a2c34048153 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll @@ -3,10 +3,10 @@ target triple = "arm64-apple-ios7.0.0" @channelColumns = external global i64 @channelTracks = external global i64 -@mazeRoute = external hidden unnamed_addr global i8*, align 8 -@TOP = external global i64* -@BOT = external global i64* -@netsAssign = external global i64* +@mazeRoute = external hidden unnamed_addr global ptr, align 8 +@TOP = external global ptr +@BOT = external global ptr +@netsAssign = external global ptr ; Function from yacr2/maze.c ; The branch at the end of %if.then is driven by %cmp5 and %cmp6. @@ -21,7 +21,7 @@ target triple = "arm64-apple-ios7.0.0" ; CHECK-NEXT: b.lo define i32 @Maze1() nounwind ssp { entry: - %0 = load i64, i64* @channelColumns, align 8, !tbaa !0 + %0 = load i64, ptr @channelColumns, align 8, !tbaa !0 %cmp90 = icmp eq i64 %0, 0 br i1 %cmp90, label %for.end, label %for.body @@ -29,51 +29,51 @@ for.body: ; preds = %for.inc, %entry %1 = phi i64 [ %0, %entry ], [ %37, %for.inc ] %i.092 = phi i64 [ 1, %entry ], [ %inc53, %for.inc ] %numLeft.091 = phi i32 [ 0, %entry ], [ %numLeft.1, %for.inc ] - %2 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 - %arrayidx = getelementptr inbounds i8, i8* %2, i64 %i.092 - %3 = load i8, i8* %arrayidx, align 1, !tbaa !1 + %2 = load ptr, ptr @mazeRoute, align 8, !tbaa !3 + %arrayidx = getelementptr inbounds i8, ptr %2, i64 %i.092 + %3 = load i8, ptr %arrayidx, align 1, !tbaa !1 %tobool = icmp eq i8 %3, 0 br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %4 = load i64*, i64** @TOP, align 8, !tbaa !3 - %arrayidx1 = getelementptr inbounds i64, i64* %4, i64 %i.092 - %5 = load i64, i64* %arrayidx1, align 8, !tbaa !0 - %6 = load i64*, i64** @netsAssign, align 8, !tbaa !3 - %arrayidx2 = getelementptr inbounds i64, i64* %6, i64 %5 - %7 = load i64, i64* %arrayidx2, align 8, !tbaa !0 - %8 = load i64*, i64** @BOT, align 8, !tbaa !3 - %arrayidx3 = getelementptr inbounds i64, i64* %8, i64 %i.092 - %9 = load i64, i64* %arrayidx3, align 8, !tbaa !0 - %arrayidx4 = getelementptr inbounds i64, i64* %6, i64 %9 - %10 = load i64, i64* %arrayidx4, align 8, !tbaa !0 + %4 = load ptr, ptr @TOP, align 8, !tbaa !3 + %arrayidx1 = getelementptr inbounds i64, ptr %4, i64 %i.092 + %5 = load i64, ptr %arrayidx1, align 8, !tbaa !0 + %6 = load ptr, ptr @netsAssign, align 8, !tbaa !3 + %arrayidx2 = getelementptr inbounds i64, ptr %6, i64 %5 + %7 = load i64, ptr %arrayidx2, align 8, !tbaa !0 + %8 = load ptr, ptr @BOT, align 8, !tbaa !3 + %arrayidx3 = getelementptr inbounds i64, ptr %8, i64 %i.092 + %9 = load i64, ptr %arrayidx3, align 8, !tbaa !0 + %arrayidx4 = getelementptr inbounds i64, ptr %6, i64 %9 + %10 = load i64, ptr %arrayidx4, align 8, !tbaa !0 %cmp5 = icmp ugt i64 %i.092, 1 %cmp6 = icmp ugt i64 %10, 1 %or.cond = and i1 %cmp5, %cmp6 br i1 %or.cond, label %land.lhs.true7, label %if.else land.lhs.true7: ; preds = %if.then - %11 = load i64, i64* @channelTracks, align 8, !tbaa !0 + %11 = load i64, ptr @channelTracks, align 8, !tbaa !0 %add = add i64 %11, 1 %call = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add, i64 %10, i64 0, i64 %7, i32 -1, i32 -1) %tobool8 = icmp eq i32 %call, 0 br i1 %tobool8, label %land.lhs.true7.if.else_crit_edge, label %if.then9 land.lhs.true7.if.else_crit_edge: ; preds = %land.lhs.true7 - %.pre = load i64, i64* @channelColumns, align 8, !tbaa !0 + %.pre = load i64, ptr @channelColumns, align 8, !tbaa !0 br label %if.else if.then9: ; preds = %land.lhs.true7 - %12 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 - %arrayidx10 = getelementptr inbounds i8, i8* %12, i64 %i.092 - store i8 0, i8* %arrayidx10, align 1, !tbaa !1 - %13 = load i64*, i64** @TOP, align 8, !tbaa !3 - %arrayidx11 = getelementptr inbounds i64, i64* %13, i64 %i.092 - %14 = load i64, i64* %arrayidx11, align 8, !tbaa !0 + %12 = load ptr, ptr @mazeRoute, align 8, !tbaa !3 + %arrayidx10 = getelementptr inbounds i8, ptr %12, i64 %i.092 + store i8 0, ptr %arrayidx10, align 1, !tbaa !1 + %13 = load ptr, ptr @TOP, align 8, !tbaa !3 + %arrayidx11 = getelementptr inbounds i64, ptr %13, i64 %i.092 + %14 = load i64, ptr %arrayidx11, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %14) - %15 = load i64*, i64** @BOT, align 8, !tbaa !3 - %arrayidx12 = getelementptr inbounds i64, i64* %15, i64 %i.092 - %16 = load i64, i64* %arrayidx12, align 8, !tbaa !0 + %15 = load ptr, ptr @BOT, align 8, !tbaa !3 + %arrayidx12 = getelementptr inbounds i64, ptr %15, i64 %i.092 + %16 = load i64, ptr %arrayidx12, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %16) br label %for.inc @@ -84,23 +84,23 @@ if.else: ; preds = %land.lhs.true7.if.e br i1 %or.cond89, label %land.lhs.true16, label %if.else24 land.lhs.true16: ; preds = %if.else - %18 = load i64, i64* @channelTracks, align 8, !tbaa !0 + %18 = load i64, ptr @channelTracks, align 8, !tbaa !0 %add17 = add i64 %18, 1 %call18 = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add17, i64 %10, i64 0, i64 %7, i32 1, i32 -1) %tobool19 = icmp eq i32 %call18, 0 br i1 %tobool19, label %if.else24, label %if.then20 if.then20: ; preds = %land.lhs.true16 - %19 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 - %arrayidx21 = getelementptr inbounds i8, i8* %19, i64 %i.092 - store i8 0, i8* %arrayidx21, align 1, !tbaa !1 - %20 = load i64*, i64** @TOP, align 8, !tbaa !3 - %arrayidx22 = getelementptr inbounds i64, i64* %20, i64 %i.092 - %21 = load i64, i64* %arrayidx22, align 8, !tbaa !0 + %19 = load ptr, ptr @mazeRoute, align 8, !tbaa !3 + %arrayidx21 = getelementptr inbounds i8, ptr %19, i64 %i.092 + store i8 0, ptr %arrayidx21, align 1, !tbaa !1 + %20 = load ptr, ptr @TOP, align 8, !tbaa !3 + %arrayidx22 = getelementptr inbounds i64, ptr %20, i64 %i.092 + %21 = load i64, ptr %arrayidx22, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %21) - %22 = load i64*, i64** @BOT, align 8, !tbaa !3 - %arrayidx23 = getelementptr inbounds i64, i64* %22, i64 %i.092 - %23 = load i64, i64* %arrayidx23, align 8, !tbaa !0 + %22 = load ptr, ptr @BOT, align 8, !tbaa !3 + %arrayidx23 = getelementptr inbounds i64, ptr %22, i64 %i.092 + %23 = load i64, ptr %arrayidx23, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %23) br label %for.inc @@ -108,7 +108,7 @@ if.else24: ; preds = %land.lhs.true16, %i br i1 %cmp5, label %land.lhs.true26, label %if.else36 land.lhs.true26: ; preds = %if.else24 - %24 = load i64, i64* @channelTracks, align 8, !tbaa !0 + %24 = load i64, ptr @channelTracks, align 8, !tbaa !0 %cmp27 = icmp ult i64 %7, %24 br i1 %cmp27, label %land.lhs.true28, label %if.else36 @@ -119,26 +119,26 @@ land.lhs.true28: ; preds = %land.lhs.true26 br i1 %tobool31, label %if.else36, label %if.then32 if.then32: ; preds = %land.lhs.true28 - %25 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 - %arrayidx33 = getelementptr inbounds i8, i8* %25, i64 %i.092 - store i8 0, i8* %arrayidx33, align 1, !tbaa !1 - %26 = load i64*, i64** @TOP, align 8, !tbaa !3 - %arrayidx34 = getelementptr inbounds i64, i64* %26, i64 %i.092 - %27 = load i64, i64* %arrayidx34, align 8, !tbaa !0 + %25 = load ptr, ptr @mazeRoute, align 8, !tbaa !3 + %arrayidx33 = getelementptr inbounds i8, ptr %25, i64 %i.092 + store i8 0, ptr %arrayidx33, align 1, !tbaa !1 + %26 = load ptr, ptr @TOP, align 8, !tbaa !3 + %arrayidx34 = getelementptr inbounds i64, ptr %26, i64 %i.092 + %27 = load i64, ptr %arrayidx34, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %27) - %28 = load i64*, i64** @BOT, align 8, !tbaa !3 - %arrayidx35 = getelementptr inbounds i64, i64* %28, i64 %i.092 - %29 = load i64, i64* %arrayidx35, align 8, !tbaa !0 + %28 = load ptr, ptr @BOT, align 8, !tbaa !3 + %arrayidx35 = getelementptr inbounds i64, ptr %28, i64 %i.092 + %29 = load i64, ptr %arrayidx35, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %29) br label %for.inc if.else36: ; preds = %land.lhs.true28, %land.lhs.true26, %if.else24 - %30 = load i64, i64* @channelColumns, align 8, !tbaa !0 + %30 = load i64, ptr @channelColumns, align 8, !tbaa !0 %cmp37 = icmp ult i64 %i.092, %30 br i1 %cmp37, label %land.lhs.true38, label %if.else48 land.lhs.true38: ; preds = %if.else36 - %31 = load i64, i64* @channelTracks, align 8, !tbaa !0 + %31 = load i64, ptr @channelTracks, align 8, !tbaa !0 %cmp39 = icmp ult i64 %7, %31 br i1 %cmp39, label %land.lhs.true40, label %if.else48 @@ -149,16 +149,16 @@ land.lhs.true40: ; preds = %land.lhs.true38 br i1 %tobool43, label %if.else48, label %if.then44 if.then44: ; preds = %land.lhs.true40 - %32 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 - %arrayidx45 = getelementptr inbounds i8, i8* %32, i64 %i.092 - store i8 0, i8* %arrayidx45, align 1, !tbaa !1 - %33 = load i64*, i64** @TOP, align 8, !tbaa !3 - %arrayidx46 = getelementptr inbounds i64, i64* %33, i64 %i.092 - %34 = load i64, i64* %arrayidx46, align 8, !tbaa !0 + %32 = load ptr, ptr @mazeRoute, align 8, !tbaa !3 + %arrayidx45 = getelementptr inbounds i8, ptr %32, i64 %i.092 + store i8 0, ptr %arrayidx45, align 1, !tbaa !1 + %33 = load ptr, ptr @TOP, align 8, !tbaa !3 + %arrayidx46 = getelementptr inbounds i64, ptr %33, i64 %i.092 + %34 = load i64, ptr %arrayidx46, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %34) - %35 = load i64*, i64** @BOT, align 8, !tbaa !3 - %arrayidx47 = getelementptr inbounds i64, i64* %35, i64 %i.092 - %36 = load i64, i64* %arrayidx47, align 8, !tbaa !0 + %35 = load ptr, ptr @BOT, align 8, !tbaa !3 + %arrayidx47 = getelementptr inbounds i64, ptr %35, i64 %i.092 + %36 = load i64, ptr %arrayidx47, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %36) br label %for.inc @@ -169,7 +169,7 @@ if.else48: ; preds = %land.lhs.true40, %l for.inc: ; preds = %if.else48, %if.then44, %if.then32, %if.then20, %if.then9, %for.body %numLeft.1 = phi i32 [ %numLeft.091, %if.then9 ], [ %numLeft.091, %if.then20 ], [ %numLeft.091, %if.then32 ], [ %numLeft.091, %if.then44 ], [ %inc, %if.else48 ], [ %numLeft.091, %for.body ] %inc53 = add i64 %i.092, 1 - %37 = load i64, i64* @channelColumns, align 8, !tbaa !0 + %37 = load i64, ptr @channelColumns, align 8, !tbaa !0 %cmp = icmp ugt i64 %inc53, %37 br i1 %cmp, label %for.end, label %for.body diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll index e36aa946323e7c..789dd66b7103de 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll @@ -426,7 +426,7 @@ if.end: declare i32 @foo() %str1 = type { %str2 } -%str2 = type { [24 x i8], i8*, i32, %str1*, i32, [4 x i8], %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, i8*, i8, i8*, %str1*, i8* } +%str2 = type { [24 x i8], ptr, i32, ptr, i32, [4 x i8], ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i8, ptr, ptr, ptr } ; Test case distilled from 126.gcc. ; The phi in sw.bb.i.i gets multiple operands for the %entry predecessor. @@ -449,11 +449,10 @@ if.end85: ret void sw.bb.i.i: - %ref.tr.i.i = phi %str1* [ %0, %sw.bb.i.i ], [ undef, %entry ] - %operands.i.i = getelementptr inbounds %str1, %str1* %ref.tr.i.i, i64 0, i32 0, i32 2 - %arrayidx.i.i = bitcast i32* %operands.i.i to %str1** - %0 = load %str1*, %str1** %arrayidx.i.i, align 8 - %code1.i.i.phi.trans.insert = getelementptr inbounds %str1, %str1* %0, i64 0, i32 0, i32 0, i64 16 + %ref.tr.i.i = phi ptr [ %0, %sw.bb.i.i ], [ undef, %entry ] + %operands.i.i = getelementptr inbounds %str1, ptr %ref.tr.i.i, i64 0, i32 0, i32 2 + %0 = load ptr, ptr %operands.i.i, align 8 + %code1.i.i.phi.trans.insert = getelementptr inbounds %str1, ptr %0, i64 0, i32 0, i32 0, i64 16 br label %sw.bb.i.i } @@ -690,7 +689,7 @@ define i64 @select_noccmp2(i64 %v1, i64 %v2, i64 %v3, i64 %r) { %or = or i1 %c0, %c1 %sel = select i1 %or, i64 0, i64 %r %ext = sext i1 %or to i32 - store volatile i32 %ext, i32* @g + store volatile i32 %ext, ptr @g ret i64 %sel } diff --git a/llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll b/llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll index d5064f6d16e630..4b9591ffeb2bab 100644 --- a/llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll +++ b/llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll @@ -1,16 +1,16 @@ ; RUN: llc -mtriple=arm64-apple-darwin < %s | FileCheck %s ; Check that the peephole optimizer knows about sext and zext instructions. ; CHECK: test1sext -define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind { +define i32 @test1sext(i64 %A, i64 %B, ptr %P, ptr %P2) nounwind { %C = add i64 %A, %B ; CHECK: add x[[SUM:[0-9]+]], x0, x1 %D = trunc i64 %C to i32 %E = shl i64 %C, 32 %F = ashr i64 %E, 32 ; CHECK: sxtw x[[EXT:[0-9]+]], w[[SUM]] - store volatile i64 %F, i64 *%P2 + store volatile i64 %F, ptr %P2 ; CHECK: str x[[EXT]] - store volatile i32 %D, i32* %P + store volatile i32 %D, ptr %P ; Reuse low bits of extended register, don't extend live range of SUM. ; CHECK: str w[[SUM]] ret i32 %D diff --git a/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll b/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll index 3d23dcd3cd2941..889a76b37ebe1c 100644 --- a/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll +++ b/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll @@ -6,18 +6,18 @@ ; so that SelectionDAG can select it with the load. ; ; OPTALL-LABEL: @foo -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; OPTALL-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 -; OPTALL: store i32 [[ZEXT]], i32* %q +; OPTALL: store i32 [[ZEXT]], ptr %q ; OPTALL: ret -define void @foo(i8* %p, i32* %q) { +define void @foo(ptr %p, ptr %q) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %a = icmp slt i8 %t, 20 br i1 %a, label %true, label %false true: %s = zext i8 %t to i32 - store i32 %s, i32* %q + store i32 %s, ptr %q ret void false: ret void @@ -26,23 +26,23 @@ false: ; Check that we manage to form a zextload is an operation with only one ; argument to explicitly extend is in the way. ; OPTALL-LABEL: @promoteOneArg -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 ; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT]], 2 ; Make sure the operation is not promoted when the promotion pass is disabled. ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 [[LD]], 2 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 -; OPTALL: store i32 [[RES]], i32* %q +; OPTALL: store i32 [[RES]], ptr %q ; OPTALL: ret -define void @promoteOneArg(i8* %p, i32* %q) { +define void @promoteOneArg(ptr %p, ptr %q) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %add = add nuw i8 %t, 2 %a = icmp slt i8 %t, 20 br i1 %a, label %true, label %false true: %s = zext i8 %add to i32 - store i32 %s, i32* %q + store i32 %s, ptr %q ret void false: ret void @@ -52,22 +52,22 @@ false: ; argument to explicitly extend is in the way. ; Version with sext. ; OPTALL-LABEL: @promoteOneArgSExt -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32 ; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SEXT]], 2 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], 2 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i32 -; OPTALL: store i32 [[RES]], i32* %q +; OPTALL: store i32 [[RES]], ptr %q ; OPTALL: ret -define void @promoteOneArgSExt(i8* %p, i32* %q) { +define void @promoteOneArgSExt(ptr %p, ptr %q) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %add = add nsw i8 %t, 2 %a = icmp slt i8 %t, 20 br i1 %a, label %true, label %false true: %s = sext i8 %add to i32 - store i32 %s, i32* %q + store i32 %s, ptr %q ret void false: ret void @@ -84,7 +84,7 @@ false: ; transformation, the regular heuristic does not apply the optimization. ; ; OPTALL-LABEL: @promoteTwoArgZext -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32 @@ -96,17 +96,17 @@ false: ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 [[LD]], %b ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 ; -; OPTALL: store i32 [[RES]], i32* %q +; OPTALL: store i32 [[RES]], ptr %q ; OPTALL: ret -define void @promoteTwoArgZext(i8* %p, i32* %q, i8 %b) { +define void @promoteTwoArgZext(ptr %p, ptr %q, i8 %b) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %add = add nuw i8 %t, %b %a = icmp slt i8 %t, 20 br i1 %a, label %true, label %false true: %s = zext i8 %add to i32 - store i32 %s, i32* %q + store i32 %s, ptr %q ret void false: ret void @@ -116,7 +116,7 @@ false: ; arguments to explicitly extend is in the way. ; Version with sext. ; OPTALL-LABEL: @promoteTwoArgSExt -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[SEXTLD:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i8 %b to i32 @@ -127,17 +127,17 @@ false: ; ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], %b ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i32 -; OPTALL: store i32 [[RES]], i32* %q +; OPTALL: store i32 [[RES]], ptr %q ; OPTALL: ret -define void @promoteTwoArgSExt(i8* %p, i32* %q, i8 %b) { +define void @promoteTwoArgSExt(ptr %p, ptr %q, i8 %b) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %add = add nsw i8 %t, %b %a = icmp slt i8 %t, 20 br i1 %a, label %true, label %false true: %s = sext i8 %add to i32 - store i32 %s, i32* %q + store i32 %s, ptr %q ret void false: ret void @@ -146,7 +146,7 @@ false: ; Check that we do not a zextload if we need to introduce more than ; one additional extension. ; OPTALL-LABEL: @promoteThreeArgZext -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32 @@ -162,18 +162,18 @@ false: ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 ; -; OPTALL: store i32 [[RES]], i32* %q +; OPTALL: store i32 [[RES]], ptr %q ; OPTALL: ret -define void @promoteThreeArgZext(i8* %p, i32* %q, i8 %b, i8 %c) { +define void @promoteThreeArgZext(ptr %p, ptr %q, i8 %b, i8 %c) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %tmp = add nuw i8 %t, %b %add = add nuw i8 %tmp, %c %a = icmp slt i8 %t, 20 br i1 %a, label %true, label %false true: %s = zext i8 %add to i32 - store i32 %s, i32* %q + store i32 %s, ptr %q ret void false: ret void @@ -182,7 +182,7 @@ false: ; Check that we manage to form a zextload after promoting and merging ; two extensions. ; OPTALL-LABEL: @promoteMergeExtArgZExt -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i16 %b to i32 @@ -196,18 +196,18 @@ false: ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i16 [[ZEXTLD]], %b ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i16 [[ADD]] to i32 ; -; OPTALL: store i32 [[RES]], i32* %q +; OPTALL: store i32 [[RES]], ptr %q ; OPTALL: ret -define void @promoteMergeExtArgZExt(i8* %p, i32* %q, i16 %b) { +define void @promoteMergeExtArgZExt(ptr %p, ptr %q, i16 %b) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %ext = zext i8 %t to i16 %add = add nuw i16 %ext, %b %a = icmp slt i8 %t, 20 br i1 %a, label %true, label %false true: %s = zext i16 %add to i32 - store i32 %s, i32* %q + store i32 %s, ptr %q ret void false: ret void @@ -217,7 +217,7 @@ false: ; two extensions. ; Version with sext. ; OPTALL-LABEL: @promoteMergeExtArgSExt -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = sext i16 %b to i32 @@ -230,18 +230,18 @@ false: ; DISABLE: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i16 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i16 [[ZEXTLD]], %b ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i16 [[ADD]] to i32 -; OPTALL: store i32 [[RES]], i32* %q +; OPTALL: store i32 [[RES]], ptr %q ; OPTALL: ret -define void @promoteMergeExtArgSExt(i8* %p, i32* %q, i16 %b) { +define void @promoteMergeExtArgSExt(ptr %p, ptr %q, i16 %b) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %ext = zext i8 %t to i16 %add = add nsw i16 %ext, %b %a = icmp slt i8 %t, 20 br i1 %a, label %true, label %false true: %s = sext i16 %add to i32 - store i32 %s, i32* %q + store i32 %s, ptr %q ret void false: ret void @@ -277,10 +277,10 @@ false: ; 3 identical zext of %ld. The extensions will be CSE'ed by SDag. ; ; OPTALL-LABEL: @severalPromotions -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %addr1 +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %addr1 ; OPT-NEXT: [[ZEXTLD1_1:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 ; OPT-NEXT: [[ZEXTLD1_2:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 -; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32, i32* %addr2 +; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32, ptr %addr2 ; OPT-NEXT: [[SEXTLD2:%[a-zA-Z_0-9-]+]] = sext i32 [[LD2]] to i64 ; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 ; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_3]] @@ -298,10 +298,10 @@ false: ; ; OPTALL: call void @dummy(i64 [[RES]], i64 [[RESZA]], i64 [[RESB]]) ; OPTALL: ret -define void @severalPromotions(i8* %addr1, i32* %addr2, i8 %a, i32 %b) { - %ld = load i8, i8* %addr1 +define void @severalPromotions(ptr %addr1, ptr %addr2, i8 %a, i32 %b) { + %ld = load i8, ptr %addr1 %zextld = zext i8 %ld to i32 - %ld2 = load i32, i32* %addr2 + %ld2 = load i32, ptr %addr2 %add = add nsw i32 %ld2, %zextld %sextadd = sext i32 %add to i64 %zexta = zext i8 %a to i32 @@ -335,51 +335,51 @@ entry: ; to an instruction. ; This used to cause a crash. ; OPTALL-LABEL: @promotionOfArgEndsUpInValue -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16, i16* %addr +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16, ptr %addr ; ; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i16 [[LD]] to i32 -; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @c, i64 0, i64 1), i32* @a) to i32) +; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i32) ; -; DISABLE-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw nsw i16 [[LD]], zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @c, i64 0, i64 1), i32* @a) to i16) +; DISABLE-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw nsw i16 [[LD]], zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i16) ; DISABLE-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = sext i16 [[ADD]] to i32 ; ; OPTALL-NEXT: ret i32 [[RES]] -define i32 @promotionOfArgEndsUpInValue(i16* %addr) { +define i32 @promotionOfArgEndsUpInValue(ptr %addr) { entry: - %val = load i16, i16* %addr - %add = add nuw nsw i16 %val, zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @c, i64 0, i64 1), i32* @a) to i16) + %val = load i16, ptr %addr + %add = add nuw nsw i16 %val, zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i16) %conv3 = sext i16 %add to i32 ret i32 %conv3 } ; Check that we see that one zext can be derived from the other for free. ; OPTALL-LABEL: @promoteTwoArgZextWithSourceExtendedTwice -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; OPT-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 ; OPT-NEXT: [[ZEXT32:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 ; OPT-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT32]], %b ; OPT-NEXT: [[RES64:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ZEXT64]], 12 -; OPT-NEXT: store i32 [[RES32]], i32* %addr -; OPT-NEXT: store i64 [[RES64]], i64* %q +; OPT-NEXT: store i32 [[RES32]], ptr %addr +; OPT-NEXT: store i64 [[RES64]], ptr %q ; ; DISABLE-NEXT: [[ZEXT32:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT32]], %b ; DISABLE-NEXT: [[RES2_32:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT32]], 12 -; DISABLE-NEXT: store i32 [[RES32]], i32* %addr +; DISABLE-NEXT: store i32 [[RES32]], ptr %addr ; DISABLE-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i32 [[RES2_32]] to i64 -; DISABLE-NEXT: store i64 [[ZEXT64]], i64* %q +; DISABLE-NEXT: store i64 [[ZEXT64]], ptr %q ; ; OPTALL-NEXT: ret void -define void @promoteTwoArgZextWithSourceExtendedTwice(i8* %p, i64* %q, i32 %b, i32* %addr) { +define void @promoteTwoArgZextWithSourceExtendedTwice(ptr %p, ptr %q, i32 %b, ptr %addr) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %zextt = zext i8 %t to i32 %add = add nuw i32 %zextt, %b %add2 = add nuw i32 %zextt, 12 - store i32 %add, i32 *%addr + store i32 %add, ptr %addr %s = zext i32 %add2 to i64 - store i64 %s, i64* %q + store i64 %s, ptr %q ret void } @@ -388,7 +388,7 @@ entry: ; all the way through the load we would end up with a free zext and a ; non-free sext (of %b). ; OPTALL-LABEL: @doNotPromoteFreeSExtFromAddrMode -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64 @@ -403,17 +403,17 @@ entry: ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXT32]], %b ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = sext i32 [[RES32]] to i64 ; -; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, i32* %addr, i64 [[IDX64]] -; OPTALL-NEXT: store i32 [[RES32]], i32* [[GEP]] +; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, ptr %addr, i64 [[IDX64]] +; OPTALL-NEXT: store i32 [[RES32]], ptr [[GEP]] ; OPTALL-NEXT: ret void -define void @doNotPromoteFreeSExtFromAddrMode(i8* %p, i32 %b, i32* %addr) { +define void @doNotPromoteFreeSExtFromAddrMode(ptr %p, i32 %b, ptr %addr) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %zextt = zext i8 %t to i32 %add = add nsw i32 %zextt, %b %idx64 = sext i32 %add to i64 - %staddr = getelementptr inbounds i32, i32* %addr, i64 %idx64 - store i32 %add, i32 *%staddr + %staddr = getelementptr inbounds i32, ptr %addr, i64 %idx64 + store i32 %add, ptr %staddr ret void } @@ -422,7 +422,7 @@ entry: ; all the way through the load we would end up with a free zext and a ; non-free sext (of %b). ; OPTALL-LABEL: @doNotPromoteFreeSExtFromAddrMode64 -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64 @@ -436,17 +436,17 @@ entry: ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXT32]], %b ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = sext i32 [[RES32]] to i64 ; -; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i64, i64* %addr, i64 [[IDX64]] -; OPTALL-NEXT: store i64 %stuff, i64* [[GEP]] +; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i64, ptr %addr, i64 [[IDX64]] +; OPTALL-NEXT: store i64 %stuff, ptr [[GEP]] ; OPTALL-NEXT: ret void -define void @doNotPromoteFreeSExtFromAddrMode64(i8* %p, i32 %b, i64* %addr, i64 %stuff) { +define void @doNotPromoteFreeSExtFromAddrMode64(ptr %p, i32 %b, ptr %addr, i64 %stuff) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %zextt = zext i8 %t to i32 %add = add nsw i32 %zextt, %b %idx64 = sext i32 %add to i64 - %staddr = getelementptr inbounds i64, i64* %addr, i64 %idx64 - store i64 %stuff, i64 *%staddr + %staddr = getelementptr inbounds i64, ptr %addr, i64 %idx64 + store i64 %stuff, ptr %staddr ret void } @@ -455,7 +455,7 @@ entry: ; all the way through the load we would end up with a free zext and a ; non-free sext (of %b). ; OPTALL-LABEL: @doNotPromoteFreeSExtFromAddrMode128 -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64 @@ -469,17 +469,17 @@ entry: ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXT32]], %b ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = sext i32 [[RES32]] to i64 ; -; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i128, i128* %addr, i64 [[IDX64]] -; OPTALL-NEXT: store i128 %stuff, i128* [[GEP]] +; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i128, ptr %addr, i64 [[IDX64]] +; OPTALL-NEXT: store i128 %stuff, ptr [[GEP]] ; OPTALL-NEXT: ret void -define void @doNotPromoteFreeSExtFromAddrMode128(i8* %p, i32 %b, i128* %addr, i128 %stuff) { +define void @doNotPromoteFreeSExtFromAddrMode128(ptr %p, i32 %b, ptr %addr, i128 %stuff) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %zextt = zext i8 %t to i32 %add = add nsw i32 %zextt, %b %idx64 = sext i32 %add to i64 - %staddr = getelementptr inbounds i128, i128* %addr, i64 %idx64 - store i128 %stuff, i128 *%staddr + %staddr = getelementptr inbounds i128, ptr %addr, i64 %idx64 + store i128 %stuff, ptr %staddr ret void } @@ -489,7 +489,7 @@ entry: ; all the way through the load we would end up with a free zext and a ; non-free sext (of %b). ; OPTALL-LABEL: @promoteSExtFromAddrMode256 -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; OPT-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 ; OPT-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64 @@ -499,17 +499,17 @@ entry: ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXT32]], %b ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = sext i32 [[RES32]] to i64 ; -; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i256, i256* %addr, i64 [[IDX64]] -; OPTALL-NEXT: store i256 %stuff, i256* [[GEP]] +; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i256, ptr %addr, i64 [[IDX64]] +; OPTALL-NEXT: store i256 %stuff, ptr [[GEP]] ; OPTALL-NEXT: ret void -define void @promoteSExtFromAddrMode256(i8* %p, i32 %b, i256* %addr, i256 %stuff) { +define void @promoteSExtFromAddrMode256(ptr %p, i32 %b, ptr %addr, i256 %stuff) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %zextt = zext i8 %t to i32 %add = add nsw i32 %zextt, %b %idx64 = sext i32 %add to i64 - %staddr = getelementptr inbounds i256, i256* %addr, i64 %idx64 - store i256 %stuff, i256 *%staddr + %staddr = getelementptr inbounds i256, ptr %addr, i64 %idx64 + store i256 %stuff, ptr %staddr ret void } @@ -522,7 +522,7 @@ entry: ; expose more opportunities. ; This would need to be fixed at some point. ; OPTALL-LABEL: @doNotPromoteFreeZExtFromAddrMode -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; This transformation should really happen only for stress mode. ; OPT-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 @@ -534,22 +534,22 @@ entry: ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT32]], %b ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = zext i32 [[RES32]] to i64 ; -; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, i32* %addr, i64 [[IDX64]] -; OPTALL-NEXT: store i32 [[RES32]], i32* [[GEP]] +; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, ptr %addr, i64 [[IDX64]] +; OPTALL-NEXT: store i32 [[RES32]], ptr [[GEP]] ; OPTALL-NEXT: ret void -define void @doNotPromoteFreeZExtFromAddrMode(i8* %p, i32 %b, i32* %addr) { +define void @doNotPromoteFreeZExtFromAddrMode(ptr %p, i32 %b, ptr %addr) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %zextt = zext i8 %t to i32 %add = add nuw i32 %zextt, %b %idx64 = zext i32 %add to i64 - %staddr = getelementptr inbounds i32, i32* %addr, i64 %idx64 - store i32 %add, i32 *%staddr + %staddr = getelementptr inbounds i32, ptr %addr, i64 %idx64 + store i32 %add, ptr %staddr ret void } ; OPTALL-LABEL: @doNotPromoteFreeSExtFromShift -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; STRESS-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64 @@ -565,9 +565,9 @@ entry: ; ; OPTALL-NEXT: [[RES64:%[a-zA-Z_0-9-]+]] = shl i64 [[IDX64]], 12 ; OPTALL-NEXT: ret i64 %staddr -define i64 @doNotPromoteFreeSExtFromShift(i8* %p, i32 %b) { +define i64 @doNotPromoteFreeSExtFromShift(ptr %p, i32 %b) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %zextt = zext i8 %t to i32 %add = add nsw i32 %zextt, %b %idx64 = sext i32 %add to i64 @@ -577,7 +577,7 @@ entry: ; Same comment as doNotPromoteFreeZExtFromAddrMode. ; OPTALL-LABEL: @doNotPromoteFreeZExtFromShift -; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p +; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p ; ; This transformation should really happen only for stress mode. ; OPT-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64 @@ -590,9 +590,9 @@ entry: ; ; OPTALL-NEXT: [[RES64:%[a-zA-Z_0-9-]+]] = shl i64 [[IDX64]], 12 ; OPTALL-NEXT: ret i64 %staddr -define i64 @doNotPromoteFreeZExtFromShift(i8* %p, i32 %b) { +define i64 @doNotPromoteFreeZExtFromShift(ptr %p, i32 %b) { entry: - %t = load i8, i8* %p + %t = load i8, ptr %p %zextt = zext i8 %t to i32 %add = add nuw i32 %zextt, %b %idx64 = zext i32 %add to i64 @@ -608,9 +608,9 @@ entry: ; sext. ; This would need to be fixed at some point. ; OPTALL-LABEL: @doNotPromoteBecauseOfPairedLoad -; OPTALL: [[LD0:%[a-zA-Z_0-9-]+]] = load i32, i32* %p -; OPTALL: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, i32* %p, i64 1 -; OPTALL: [[LD1:%[a-zA-Z_0-9-]+]] = load i32, i32* [[GEP]] +; OPTALL: [[LD0:%[a-zA-Z_0-9-]+]] = load i32, ptr %p +; OPTALL: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, ptr %p, i64 1 +; OPTALL: [[LD1:%[a-zA-Z_0-9-]+]] = load i32, ptr [[GEP]] ; ; This transformation should really happen only for stress mode. ; OPT-NEXT: [[SEXTLD1:%[a-zA-Z_0-9-]+]] = sext i32 [[LD1]] to i64 @@ -623,10 +623,10 @@ entry: ; OPTALL-NEXT: [[ZEXTLD0:%[a-zA-Z_0-9-]+]] = zext i32 [[LD0]] to i64 ; OPTALL-NEXT: [[FINAL:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTRES]], [[ZEXTLD0]] ; OPTALL-NEXT: ret i64 [[FINAL]] -define i64 @doNotPromoteBecauseOfPairedLoad(i32* %p, i32 %cst) { - %ld0 = load i32, i32* %p - %idxLd1 = getelementptr inbounds i32, i32* %p, i64 1 - %ld1 = load i32, i32* %idxLd1 +define i64 @doNotPromoteBecauseOfPairedLoad(ptr %p, i32 %cst) { + %ld0 = load i32, ptr %p + %idxLd1 = getelementptr inbounds i32, ptr %p, i64 1 + %ld1 = load i32, ptr %idxLd1 %res = add nsw i32 %ld1, %cst %sextres = sext i32 %res to i64 %zextLd0 = zext i32 %ld0 to i64 @@ -634,17 +634,17 @@ define i64 @doNotPromoteBecauseOfPairedLoad(i32* %p, i32 %cst) { ret i64 %final } -define i64 @promoteZextShl(i1 %c, i16* %P) { +define i64 @promoteZextShl(i1 %c, ptr %P) { entry: ; OPTALL-LABEL: promoteZextShl ; OPTALL: entry: -; OPT: %[[LD:.*]] = load i16, i16* %P +; OPT: %[[LD:.*]] = load i16, ptr %P ; OPT: %[[EXT:.*]] = zext i16 %[[LD]] to i64 ; OPT: if.then: ; OPT: shl nsw i64 %[[EXT]], 1 ; DISABLE: if.then: ; DISABLE: %r = sext i32 %shl2 to i64 - %ld = load i16, i16* %P + %ld = load i16, ptr %P br i1 %c, label %end, label %if.then if.then: %z = zext i16 %ld to i32 diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll index 05f467e1934fd0..330f47da6310a4 100644 --- a/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll @@ -8,13 +8,13 @@ ; to remove arbitrary values, so we have to live with garbage values. ; -%"class.H4ISP::H4ISPDevice" = type { i32 (%"class.H4ISP::H4ISPDevice"*, i32, i8*, i8*)*, i8*, i32*, %"class.H4ISP::H4ISPCameraManager"* } +%"class.H4ISP::H4ISPDevice" = type { ptr, ptr, ptr, ptr } %"class.H4ISP::H4ISPCameraManager" = type opaque -declare i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(%"class.H4ISP::H4ISPDevice"*) +declare i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(ptr) -@pH4ISPDevice = hidden global %"class.H4ISP::H4ISPDevice"* null, align 8 +@pH4ISPDevice = hidden global ptr null, align 8 ; CHECK-LABEL: _foo: ; CHECK: ret @@ -23,14 +23,14 @@ define void @foo() { entry: br label %if.then83 if.then83: ; preds = %if.end81 - %tmp = load %"class.H4ISP::H4ISPDevice"*, %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8 - %call84 = call i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(%"class.H4ISP::H4ISPDevice"* %tmp) #19 + %tmp = load ptr, ptr @pH4ISPDevice, align 8 + %call84 = call i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(ptr %tmp) #19 tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27}"() - %tmp2 = load %"class.H4ISP::H4ISPDevice"*, %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8 + %tmp2 = load ptr, ptr @pH4ISPDevice, align 8 tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x28}"() - %pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice", %"class.H4ISP::H4ISPDevice"* %tmp2, i64 0, i32 3 - %tmp3 = load %"class.H4ISP::H4ISPCameraManager"*, %"class.H4ISP::H4ISPCameraManager"** %pCameraManager.i268, align 8 - %tobool.i269 = icmp eq %"class.H4ISP::H4ISPCameraManager"* %tmp3, null + %pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice", ptr %tmp2, i64 0, i32 3 + %tmp3 = load ptr, ptr %pCameraManager.i268, align 8 + %tobool.i269 = icmp eq ptr %tmp3, null br i1 %tobool.i269, label %if.then83, label %end end: ret void diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll index 962e36ddb61a7a..acc0df12a94e84 100644 --- a/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll +++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll @@ -7,7 +7,7 @@ ; at least provide a wrong one (with the offset folded ; into the definition). -%struct.anon = type { i32*, i32** } +%struct.anon = type { ptr, ptr } @pptp_wan_head = internal global %struct.anon zeroinitializer, align 8 @@ -16,8 +16,8 @@ ; CHECK-NOT: AdrpAddStr define i32 @pptp_wan_init() { entry: - store i32* null, i32** getelementptr inbounds (%struct.anon, %struct.anon* @pptp_wan_head, i64 0, i32 0), align 8 - store i32** getelementptr inbounds (%struct.anon, %struct.anon* @pptp_wan_head, i64 0, i32 0), i32*** getelementptr inbounds (%struct.anon, %struct.anon* @pptp_wan_head, i64 0, i32 1), align 8 + store ptr null, ptr @pptp_wan_head, align 8 + store ptr @pptp_wan_head, ptr getelementptr inbounds (%struct.anon, ptr @pptp_wan_head, i64 0, i32 1), align 8 ret i32 0 } diff --git a/llvm/test/CodeGen/AArch64/arm64-const-addr.ll b/llvm/test/CodeGen/AArch64/arm64-const-addr.ll index bbb1ce4aced72c..79807730746bc3 100644 --- a/llvm/test/CodeGen/AArch64/arm64-const-addr.ll +++ b/llvm/test/CodeGen/AArch64/arm64-const-addr.ll @@ -9,14 +9,14 @@ define i32 @test1() nounwind { ; CHECK-NEXT: movk w8, #1039, lsl #16 ; CHECK-NEXT: ldp w9, w10, [x8, #4] ; CHECK: ldr w8, [x8, #12] - %at = inttoptr i64 68141056 to %T* - %o1 = getelementptr %T, %T* %at, i32 0, i32 1 - %t1 = load i32, i32* %o1 - %o2 = getelementptr %T, %T* %at, i32 0, i32 2 - %t2 = load i32, i32* %o2 + %at = inttoptr i64 68141056 to ptr + %o1 = getelementptr %T, ptr %at, i32 0, i32 1 + %t1 = load i32, ptr %o1 + %o2 = getelementptr %T, ptr %at, i32 0, i32 2 + %t2 = load i32, ptr %o2 %a1 = add i32 %t1, %t2 - %o3 = getelementptr %T, %T* %at, i32 0, i32 3 - %t3 = load i32, i32* %o3 + %o3 = getelementptr %T, ptr %at, i32 0, i32 3 + %t3 = load i32, ptr %o3 %a2 = add i32 %a1, %t3 ret i32 %a2 } diff --git a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll index 9910a4e0f73917..e6b05f7182f8c4 100644 --- a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll +++ b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s -define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) { +define <4 x i16> @fptosi_v4f64_to_v4i16(ptr %ptr) { ; CHECK-LABEL: fptosi_v4f64_to_v4i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] @@ -12,12 +12,12 @@ define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) { ; CHECK-NEXT: xtn v1.2s, v1.2d ; CHECK-NEXT: uzp1 v0.4h, v0.4h, v1.4h ; CHECK-NEXT: ret - %tmp1 = load <4 x double>, <4 x double>* %ptr + %tmp1 = load <4 x double>, ptr %ptr %tmp2 = fptosi <4 x double> %tmp1 to <4 x i16> ret <4 x i16> %tmp2 } -define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) { +define <8 x i8> @fptosi_v4f64_to_v4i8(ptr %ptr) { ; CHECK-LABEL: fptosi_v4f64_to_v4i8: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0, #32] @@ -34,12 +34,12 @@ define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) { ; CHECK-NEXT: uzp1 v1.4h, v2.4h, v3.4h ; CHECK-NEXT: uzp1 v0.8b, v1.8b, v0.8b ; CHECK-NEXT: ret - %tmp1 = load <8 x double>, <8 x double>* %ptr + %tmp1 = load <8 x double>, ptr %ptr %tmp2 = fptosi <8 x double> %tmp1 to <8 x i8> ret <8 x i8> %tmp2 } -define <4 x half> @uitofp_v4i64_to_v4f16(<4 x i64>* %ptr) { +define <4 x half> @uitofp_v4i64_to_v4f16(ptr %ptr) { ; CHECK-LABEL: uitofp_v4i64_to_v4f16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] @@ -49,24 +49,24 @@ define <4 x half> @uitofp_v4i64_to_v4f16(<4 x i64>* %ptr) { ; CHECK-NEXT: fcvtn2 v0.4s, v1.2d ; CHECK-NEXT: fcvtn v0.4h, v0.4s ; CHECK-NEXT: ret - %tmp1 = load <4 x i64>, <4 x i64>* %ptr + %tmp1 = load <4 x i64>, ptr %ptr %tmp2 = uitofp <4 x i64> %tmp1 to <4 x half> ret <4 x half> %tmp2 } -define <4 x i16> @trunc_v4i64_to_v4i16(<4 x i64>* %ptr) { +define <4 x i16> @trunc_v4i64_to_v4i16(ptr %ptr) { ; CHECK-LABEL: trunc_v4i64_to_v4i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s ; CHECK-NEXT: xtn v0.4h, v0.4s ; CHECK-NEXT: ret - %tmp1 = load <4 x i64>, <4 x i64>* %ptr + %tmp1 = load <4 x i64>, ptr %ptr %tmp2 = trunc <4 x i64> %tmp1 to <4 x i16> ret <4 x i16> %tmp2 } -define <4 x i16> @fptoui_v4f64_to_v4i16(<4 x double>* %ptr) { +define <4 x i16> @fptoui_v4f64_to_v4i16(ptr %ptr) { ; CHECK-LABEL: fptoui_v4f64_to_v4i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x0] @@ -76,7 +76,7 @@ define <4 x i16> @fptoui_v4f64_to_v4i16(<4 x double>* %ptr) { ; CHECK-NEXT: xtn v1.2s, v1.2d ; CHECK-NEXT: uzp1 v0.4h, v0.4h, v1.4h ; CHECK-NEXT: ret - %tmp1 = load <4 x double>, <4 x double>* %ptr + %tmp1 = load <4 x double>, ptr %ptr %tmp2 = fptoui <4 x double> %tmp1 to <4 x i16> ret <4 x i16> %tmp2 } diff --git a/llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll b/llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll index 1803787d729f9a..9d71338c9c97a7 100644 --- a/llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll +++ b/llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll @@ -7,140 +7,133 @@ ; We use dummy inline asm to force LLVM to generate a COPY between the registers ; we want by clobbering all the others. -define void @test_D1D2_from_D0D1(i8* %addr) #0 { +define void @test_D1D2_from_D0D1(ptr %addr) #0 { ; CHECK-LABEL: test_D1D2_from_D0D1: ; CHECK: mov.8b v2, v1 ; CHECK: mov.8b v1, v0 entry: - %addr_v8i8 = bitcast i8* %addr to <8 x i8>* - %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) + %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %addr) %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0 %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1 tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) + tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr) tail call void asm sideeffect "", "~{v0},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) + tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr) ret void } -define void @test_D0D1_from_D1D2(i8* %addr) #0 { +define void @test_D0D1_from_D1D2(ptr %addr) #0 { ; CHECK-LABEL: test_D0D1_from_D1D2: ; CHECK: mov.8b v0, v1 ; CHECK: mov.8b v1, v2 entry: - %addr_v8i8 = bitcast i8* %addr to <8 x i8>* - %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) + %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %addr) %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0 %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1 tail call void asm sideeffect "", "~{v0},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) + tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr) tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) + tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr) ret void } -define void @test_D0D1_from_D31D0(i8* %addr) #0 { +define void @test_D0D1_from_D31D0(ptr %addr) #0 { ; CHECK-LABEL: test_D0D1_from_D31D0: ; CHECK: mov.8b v1, v0 ; CHECK: mov.8b v0, v31 entry: - %addr_v8i8 = bitcast i8* %addr to <8 x i8>* - %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) + %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %addr) %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0 %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1 tail call void asm sideeffect "", "~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30}"() - tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) + tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr) tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) + tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr) ret void } -define void @test_D31D0_from_D0D1(i8* %addr) #0 { +define void @test_D31D0_from_D0D1(ptr %addr) #0 { ; CHECK-LABEL: test_D31D0_from_D0D1: ; CHECK: mov.8b v31, v0 ; CHECK: mov.8b v0, v1 entry: - %addr_v8i8 = bitcast i8* %addr to <8 x i8>* - %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) + %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %addr) %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0 %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1 tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) + tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr) tail call void asm sideeffect "", "~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30}"() - tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr) + tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr) ret void } -define void @test_D2D3D4_from_D0D1D2(i8* %addr) #0 { +define void @test_D2D3D4_from_D0D1D2(ptr %addr) #0 { ; CHECK-LABEL: test_D2D3D4_from_D0D1D2: ; CHECK: mov.8b v4, v2 ; CHECK: mov.8b v3, v1 ; CHECK: mov.8b v2, v0 entry: - %addr_v8i8 = bitcast i8* %addr to <8 x i8>* - %vec = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) + %vec = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr %addr) %vec0 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 0 %vec1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 1 %vec2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 2 tail call void asm sideeffect "", "~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, i8* %addr) + tail call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, ptr %addr) tail call void asm sideeffect "", "~{v0},~{v1},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, i8* %addr) + tail call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, ptr %addr) ret void } -define void @test_Q0Q1Q2_from_Q1Q2Q3(i8* %addr) #0 { +define void @test_Q0Q1Q2_from_Q1Q2Q3(ptr %addr) #0 { ; CHECK-LABEL: test_Q0Q1Q2_from_Q1Q2Q3: ; CHECK: mov.16b v0, v1 ; CHECK: mov.16b v1, v2 ; CHECK: mov.16b v2, v3 entry: - %addr_v16i8 = bitcast i8* %addr to <16 x i8>* - %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>* %addr_v16i8) + %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr %addr) %vec0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 0 %vec1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 1 %vec2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 2 tail call void asm sideeffect "", "~{v0},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, i8* %addr) + tail call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, ptr %addr) tail call void asm sideeffect "", "~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, i8* %addr) + tail call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, ptr %addr) ret void } -define void @test_Q1Q2Q3Q4_from_Q30Q31Q0Q1(i8* %addr) #0 { +define void @test_Q1Q2Q3Q4_from_Q30Q31Q0Q1(ptr %addr) #0 { ; CHECK-LABEL: test_Q1Q2Q3Q4_from_Q30Q31Q0Q1: ; CHECK: mov.16b v4, v1 ; CHECK: mov.16b v3, v0 ; CHECK: mov.16b v2, v31 ; CHECK: mov.16b v1, v30 - %addr_v16i8 = bitcast i8* %addr to <16 x i8>* - %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16i8(<16 x i8>* %addr_v16i8) + %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %addr) %vec0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 0 %vec1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 1 %vec2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 2 %vec3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 3 tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29}"() - tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, i8* %addr) + tail call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, ptr %addr) tail call void asm sideeffect "", "~{v0},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() - tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, i8* %addr) + tail call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, ptr %addr) ret void } -declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*) -declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0v8i8(<8 x i8>*) -declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>*) -declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16i8(<16 x i8>*) +declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr) +declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr) +declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr) +declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr) -declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*) -declare void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*) -declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*) -declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*) +declare void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8>, <8 x i8>, ptr) +declare void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, ptr) +declare void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, ptr) +declare void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, ptr) diff --git a/llvm/test/CodeGen/AArch64/arm64-cse.ll b/llvm/test/CodeGen/AArch64/arm64-cse.ll index 00f519a9427217..9ea51161dad0ea 100644 --- a/llvm/test/CodeGen/AArch64/arm64-cse.ll +++ b/llvm/test/CodeGen/AArch64/arm64-cse.ll @@ -5,7 +5,7 @@ target triple = "arm64-apple-ios" ; rdar://12462006 ; CSE between "icmp reg reg" and "sub reg reg". ; Both can be in the same basic block or in different basic blocks. -define i8* @t1(i8* %base, i32* nocapture %offset, i32 %size) nounwind { +define ptr @t1(ptr %base, ptr nocapture %offset, i32 %size) nounwind { ; CHECK-LABEL: t1: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: ldr w9, [x1] @@ -20,7 +20,7 @@ define i8* @t1(i8* %base, i32* nocapture %offset, i32 %size) nounwind { ; CHECK-NEXT: str w9, [x1] ; CHECK-NEXT: ret entry: - %0 = load i32, i32* %offset, align 4 + %0 = load i32, ptr %offset, align 4 %cmp = icmp slt i32 %0, %size %s = sub nsw i32 %0, %size br i1 %cmp, label %return, label %if.end @@ -29,17 +29,17 @@ if.end: %sub = sub nsw i32 %0, %size %s2 = sub nsw i32 %s, %size %s3 = sub nsw i32 %sub, %s2 - store i32 %s3, i32* %offset, align 4 - %add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub + store i32 %s3, ptr %offset, align 4 + %add.ptr = getelementptr inbounds i8, ptr %base, i32 %sub br label %return return: - %retval.0 = phi i8* [ %add.ptr, %if.end ], [ null, %entry ] - ret i8* %retval.0 + %retval.0 = phi ptr [ %add.ptr, %if.end ], [ null, %entry ] + ret ptr %retval.0 } ; CSE between "icmp reg imm" and "sub reg imm". -define i8* @t2(i8* %base, i32* nocapture %offset) nounwind { +define ptr @t2(ptr %base, ptr nocapture %offset) nounwind { ; CHECK-LABEL: t2: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: ldr w8, [x1] @@ -53,17 +53,17 @@ define i8* @t2(i8* %base, i32* nocapture %offset) nounwind { ; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: ret entry: - %0 = load i32, i32* %offset, align 4 + %0 = load i32, ptr %offset, align 4 %cmp = icmp slt i32 %0, 1 br i1 %cmp, label %return, label %if.end if.end: %sub = sub nsw i32 %0, 1 - store i32 %sub, i32* %offset, align 4 - %add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub + store i32 %sub, ptr %offset, align 4 + %add.ptr = getelementptr inbounds i8, ptr %base, i32 %sub br label %return return: - %retval.0 = phi i8* [ %add.ptr, %if.end ], [ null, %entry ] - ret i8* %retval.0 + %retval.0 = phi ptr [ %add.ptr, %if.end ], [ null, %entry ] + ret ptr %retval.0 } diff --git a/llvm/test/CodeGen/AArch64/arm64-csel.ll b/llvm/test/CodeGen/AArch64/arm64-csel.ll index e04b42d637207b..5dd826d9bf5493 100644 --- a/llvm/test/CodeGen/AArch64/arm64-csel.ll +++ b/llvm/test/CodeGen/AArch64/arm64-csel.ll @@ -379,7 +379,7 @@ define i64 @foo23(i64 %x) { ret i64 %res } -define i16 @foo24(i8* nocapture readonly %A, i8* nocapture readonly %B) { +define i16 @foo24(ptr nocapture readonly %A, ptr nocapture readonly %B) { ; CHECK-LABEL: foo24: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] @@ -390,17 +390,17 @@ define i16 @foo24(i8* nocapture readonly %A, i8* nocapture readonly %B) { ; CHECK-NEXT: cinc w0, w8, hi ; CHECK-NEXT: ret entry: - %0 = load i8, i8* %A, align 1 + %0 = load i8, ptr %A, align 1 %cmp = icmp ugt i8 %0, 3 %conv1 = zext i1 %cmp to i16 - %1 = load i8, i8* %B, align 1 + %1 = load i8, ptr %B, align 1 %cmp4 = icmp ugt i8 %1, 33 %conv5 = zext i1 %cmp4 to i16 %add = add nuw nsw i16 %conv5, %conv1 ret i16 %add } -define i64 @foo25(i64* nocapture readonly %A, i64* nocapture readonly %B) { +define i64 @foo25(ptr nocapture readonly %A, ptr nocapture readonly %B) { ; CHECK-LABEL: foo25: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr x8, [x1] @@ -411,10 +411,10 @@ define i64 @foo25(i64* nocapture readonly %A, i64* nocapture readonly %B) { ; CHECK-NEXT: cinc x0, x8, hi ; CHECK-NEXT: ret entry: - %0 = load i64, i64* %A, align 1 + %0 = load i64, ptr %A, align 1 %cmp = icmp ugt i64 %0, 3 %conv1 = zext i1 %cmp to i64 - %1 = load i64, i64* %B, align 1 + %1 = load i64, ptr %B, align 1 %cmp4 = icmp ugt i64 %1, 33 %conv5 = zext i1 %cmp4 to i64 %add = add nuw nsw i64 %conv5, %conv1 diff --git a/llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll b/llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll index e02a3a8ba7fd61..9a485f175b2c40 100644 --- a/llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll +++ b/llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll @@ -20,6 +20,6 @@ define void @test1() { entry: tail call void asm sideeffect "nop", "~{x20},~{x21},~{x22},~{x23}"() nounwind - store i32 0, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @G, i64 0, i64 0), align 4 + store i32 0, ptr @G, align 4 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll b/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll index 3cee66c2d29ff3..31a2c74d3bd15f 100644 --- a/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll +++ b/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll @@ -78,8 +78,8 @@ define dso_local void @callee() { ; CHECK-SAVED-ALL-NEXT: stp x11, x10, [sp ; CHECK-SAVED-ALL-NEXT: stp x9, x8, [sp - %val = load volatile [30 x i64], [30 x i64]* @var - store volatile [30 x i64] %val, [30 x i64]* @var + %val = load volatile [30 x i64], ptr @var + store volatile [30 x i64] %val, ptr @var ; CHECK-SAVED-ALL: ldp x9, x8, [sp ; CHECK-SAVED-ALL-NEXT: ldp x11, x10, [sp @@ -104,7 +104,7 @@ define dso_local void @callee() { define dso_local void @caller() { ; CHECK-LABEL: caller - %val = load volatile [30 x i64], [30 x i64]* @var + %val = load volatile [30 x i64], ptr @var ; CHECK-SAVED-X8: adrp x8, var ; CHECK-SAVED-X9: adrp x9, var ; CHECK-SAVED-X10: adrp x10, var @@ -139,7 +139,7 @@ define dso_local void @caller() { call void @callee() ; CHECK: bl callee - store volatile [30 x i64] %val, [30 x i64]* @var + store volatile [30 x i64] %val, ptr @var ; CHECK-SAVED-ALL-DAG: str x9 ; CHECK-SAVED-ALL-DAG: str x10 ; CHECK-SAVED-ALL-DAG: str x11 diff --git a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll index 37f3504be935f3..efe1e203e2c998 100644 --- a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll +++ b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll @@ -2,7 +2,7 @@ target datalayout = "e-i64:64-n32:64-S128" target triple = "arm64-apple-ios" -%"struct.SU" = type { i32, %"struct.SU"*, i32*, i32, i32, %"struct.BO", i32, [5 x i8] } +%"struct.SU" = type { i32, ptr, ptr, i32, i32, %"struct.BO", i32, [5 x i8] } %"struct.BO" = type { %"struct.RE" } %"struct.RE" = type { i32, i32, i32, i32 } @@ -15,14 +15,13 @@ target triple = "arm64-apple-ios" ; CHECK-NOT: ldr ; CHECK: str wzr ; CHECK-NOT: str -define void @test(%"struct.SU"* nocapture %su) { +define void @test(ptr nocapture %su) { entry: - %r1 = getelementptr inbounds %"struct.SU", %"struct.SU"* %su, i64 1, i32 5 - %r2 = bitcast %"struct.BO"* %r1 to i48* - %r3 = load i48, i48* %r2, align 8 + %r1 = getelementptr inbounds %"struct.SU", ptr %su, i64 1, i32 5 + %r3 = load i48, ptr %r1, align 8 %r4 = and i48 %r3, -4294967296 %r5 = or i48 0, %r4 - store i48 %r5, i48* %r2, align 8 + store i48 %r5, ptr %r1, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll index 72d94ae13b0fcb..7e72e8de01f4ff 100644 --- a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll +++ b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll @@ -12,28 +12,25 @@ ; CHECK: fadd {{s[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]] ; CHECK: fadd {{s[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]] ; CHECK: ret -define void @test(%class.Complex* nocapture %out, i64 %out_start) { +define void @test(ptr nocapture %out, i64 %out_start) { entry: - %arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start - %0 = bitcast %class.Complex* %arrayidx to i64* - %1 = load i64, i64* %0, align 4 - %t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32 - %2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float - %t0.sroa.2.0.extract.shift = lshr i64 %1, 32 + %arrayidx = getelementptr inbounds %class.Complex, ptr %out, i64 %out_start + %0 = load i64, ptr %arrayidx, align 4 + %t0.sroa.0.0.extract.trunc = trunc i64 %0 to i32 + %1 = bitcast i32 %t0.sroa.0.0.extract.trunc to float + %t0.sroa.2.0.extract.shift = lshr i64 %0, 32 %t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32 - %3 = bitcast i32 %t0.sroa.2.0.extract.trunc to float + %2 = bitcast i32 %t0.sroa.2.0.extract.trunc to float %add = add i64 %out_start, 8 - %arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add - %i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0 - %4 = load float, float* %i.i, align 4 - %add.i = fadd float %4, %2 + %arrayidx2 = getelementptr inbounds %class.Complex, ptr %out, i64 %add + %3 = load float, ptr %arrayidx2, align 4 + %add.i = fadd float %3, %1 %retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0 - %r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1 - %5 = load float, float* %r.i, align 4 - %add5.i = fadd float %5, %3 + %r.i = getelementptr inbounds %class.Complex, ptr %arrayidx2, i64 0, i32 1 + %4 = load float, ptr %r.i, align 4 + %add5.i = fadd float %4, %2 %retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1 - %ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>* - store <2 x float> %retval.sroa.0.4.vec.insert.i, <2 x float>* %ref.tmp.sroa.0.0.cast, align 4 + store <2 x float> %retval.sroa.0.4.vec.insert.i, ptr %arrayidx, align 4 ret void } @@ -44,28 +41,25 @@ entry: ; CHECK: add {{w[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]] ; CHECK: add {{w[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]] ; CHECK: ret -define void @test_int(%class.Complex_int* nocapture %out, i64 %out_start) { +define void @test_int(ptr nocapture %out, i64 %out_start) { entry: - %arrayidx = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %out_start - %0 = bitcast %class.Complex_int* %arrayidx to i64* - %1 = load i64, i64* %0, align 4 - %t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32 - %2 = bitcast i32 %t0.sroa.0.0.extract.trunc to i32 - %t0.sroa.2.0.extract.shift = lshr i64 %1, 32 + %arrayidx = getelementptr inbounds %class.Complex_int, ptr %out, i64 %out_start + %0 = load i64, ptr %arrayidx, align 4 + %t0.sroa.0.0.extract.trunc = trunc i64 %0 to i32 + %1 = bitcast i32 %t0.sroa.0.0.extract.trunc to i32 + %t0.sroa.2.0.extract.shift = lshr i64 %0, 32 %t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32 - %3 = bitcast i32 %t0.sroa.2.0.extract.trunc to i32 + %2 = bitcast i32 %t0.sroa.2.0.extract.trunc to i32 %add = add i64 %out_start, 8 - %arrayidx2 = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %add - %i.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 0 - %4 = load i32, i32* %i.i, align 4 - %add.i = add i32 %4, %2 + %arrayidx2 = getelementptr inbounds %class.Complex_int, ptr %out, i64 %add + %3 = load i32, ptr %arrayidx2, align 4 + %add.i = add i32 %3, %1 %retval.sroa.0.0.vec.insert.i = insertelement <2 x i32> undef, i32 %add.i, i32 0 - %r.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 1 - %5 = load i32, i32* %r.i, align 4 - %add5.i = add i32 %5, %3 + %r.i = getelementptr inbounds %class.Complex_int, ptr %arrayidx2, i64 0, i32 1 + %4 = load i32, ptr %r.i, align 4 + %add5.i = add i32 %4, %2 %retval.sroa.0.4.vec.insert.i = insertelement <2 x i32> %retval.sroa.0.0.vec.insert.i, i32 %add5.i, i32 1 - %ref.tmp.sroa.0.0.cast = bitcast %class.Complex_int* %arrayidx to <2 x i32>* - store <2 x i32> %retval.sroa.0.4.vec.insert.i, <2 x i32>* %ref.tmp.sroa.0.0.cast, align 4 + store <2 x i32> %retval.sroa.0.4.vec.insert.i, ptr %arrayidx, align 4 ret void } @@ -76,27 +70,24 @@ entry: ; CHECK: add {{x[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]] ; CHECK: add {{x[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]] ; CHECK: ret -define void @test_long(%class.Complex_long* nocapture %out, i64 %out_start) { +define void @test_long(ptr nocapture %out, i64 %out_start) { entry: - %arrayidx = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %out_start - %0 = bitcast %class.Complex_long* %arrayidx to i128* - %1 = load i128, i128* %0, align 4 - %t0.sroa.0.0.extract.trunc = trunc i128 %1 to i64 - %2 = bitcast i64 %t0.sroa.0.0.extract.trunc to i64 - %t0.sroa.2.0.extract.shift = lshr i128 %1, 64 + %arrayidx = getelementptr inbounds %class.Complex_long, ptr %out, i64 %out_start + %0 = load i128, ptr %arrayidx, align 4 + %t0.sroa.0.0.extract.trunc = trunc i128 %0 to i64 + %1 = bitcast i64 %t0.sroa.0.0.extract.trunc to i64 + %t0.sroa.2.0.extract.shift = lshr i128 %0, 64 %t0.sroa.2.0.extract.trunc = trunc i128 %t0.sroa.2.0.extract.shift to i64 - %3 = bitcast i64 %t0.sroa.2.0.extract.trunc to i64 + %2 = bitcast i64 %t0.sroa.2.0.extract.trunc to i64 %add = add i64 %out_start, 8 - %arrayidx2 = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %add - %i.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 0 - %4 = load i64, i64* %i.i, align 4 - %add.i = add i64 %4, %2 + %arrayidx2 = getelementptr inbounds %class.Complex_long, ptr %out, i64 %add + %3 = load i64, ptr %arrayidx2, align 4 + %add.i = add i64 %3, %1 %retval.sroa.0.0.vec.insert.i = insertelement <2 x i64> undef, i64 %add.i, i32 0 - %r.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 1 - %5 = load i64, i64* %r.i, align 4 - %add5.i = add i64 %5, %3 + %r.i = getelementptr inbounds %class.Complex_long, ptr %arrayidx2, i32 0, i32 1 + %4 = load i64, ptr %r.i, align 4 + %add5.i = add i64 %4, %2 %retval.sroa.0.4.vec.insert.i = insertelement <2 x i64> %retval.sroa.0.0.vec.insert.i, i64 %add5.i, i32 1 - %ref.tmp.sroa.0.0.cast = bitcast %class.Complex_long* %arrayidx to <2 x i64>* - store <2 x i64> %retval.sroa.0.4.vec.insert.i, <2 x i64>* %ref.tmp.sroa.0.0.cast, align 4 + store <2 x i64> %retval.sroa.0.4.vec.insert.i, ptr %arrayidx, align 4 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll b/llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll index 0be3fb12f5adf3..448148fe23f2b7 100644 --- a/llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll +++ b/llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll @@ -6,7 +6,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" define i32 @test1() #0 { %tmp1 = alloca i8 %tmp2 = alloca i32, i32 4096 - %tmp3 = icmp eq i8* %tmp1, null + %tmp3 = icmp eq ptr %tmp1, null %tmp4 = zext i1 %tmp3 to i32 ret i32 %tmp4 diff --git a/llvm/test/CodeGen/AArch64/arm64-dup.ll b/llvm/test/CodeGen/AArch64/arm64-dup.ll index 0947730ebab0ac..6613f911f82585 100644 --- a/llvm/test/CodeGen/AArch64/arm64-dup.ll +++ b/llvm/test/CodeGen/AArch64/arm64-dup.ll @@ -199,90 +199,90 @@ define <4 x float> @v_shuffledupQfloat(float %A) nounwind { ret <4 x float> %tmp2 } -define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind { +define <8 x i8> @vduplane8(ptr %A) nounwind { ; CHECK-LABEL: vduplane8: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: dup.8b v0, v0[1] ; CHECK-NEXT: ret - %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp1 = load <8 x i8>, ptr %A %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 > ret <8 x i8> %tmp2 } -define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind { +define <4 x i16> @vduplane16(ptr %A) nounwind { ; CHECK-LABEL: vduplane16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: dup.4h v0, v0[1] ; CHECK-NEXT: ret - %tmp1 = load <4 x i16>, <4 x i16>* %A + %tmp1 = load <4 x i16>, ptr %A %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ret <4 x i16> %tmp2 } -define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind { +define <2 x i32> @vduplane32(ptr %A) nounwind { ; CHECK-LABEL: vduplane32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: dup.2s v0, v0[1] ; CHECK-NEXT: ret - %tmp1 = load <2 x i32>, <2 x i32>* %A + %tmp1 = load <2 x i32>, ptr %A %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > ret <2 x i32> %tmp2 } -define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind { +define <2 x float> @vduplanefloat(ptr %A) nounwind { ; CHECK-LABEL: vduplanefloat: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: dup.2s v0, v0[1] ; CHECK-NEXT: ret - %tmp1 = load <2 x float>, <2 x float>* %A + %tmp1 = load <2 x float>, ptr %A %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 > ret <2 x float> %tmp2 } -define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind { +define <16 x i8> @vduplaneQ8(ptr %A) nounwind { ; CHECK-LABEL: vduplaneQ8: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: dup.16b v0, v0[1] ; CHECK-NEXT: ret - %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp1 = load <8 x i8>, ptr %A %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 > ret <16 x i8> %tmp2 } -define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind { +define <8 x i16> @vduplaneQ16(ptr %A) nounwind { ; CHECK-LABEL: vduplaneQ16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: dup.8h v0, v0[1] ; CHECK-NEXT: ret - %tmp1 = load <4 x i16>, <4 x i16>* %A + %tmp1 = load <4 x i16>, ptr %A %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 > ret <8 x i16> %tmp2 } -define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind { +define <4 x i32> @vduplaneQ32(ptr %A) nounwind { ; CHECK-LABEL: vduplaneQ32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: dup.4s v0, v0[1] ; CHECK-NEXT: ret - %tmp1 = load <2 x i32>, <2 x i32>* %A + %tmp1 = load <2 x i32>, ptr %A %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ret <4 x i32> %tmp2 } -define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind { +define <4 x float> @vduplaneQfloat(ptr %A) nounwind { ; CHECK-LABEL: vduplaneQfloat: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: dup.4s v0, v0[1] ; CHECK-NEXT: ret - %tmp1 = load <2 x float>, <2 x float>* %A + %tmp1 = load <2 x float>, ptr %A %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ret <4 x float> %tmp2 } @@ -445,7 +445,7 @@ define <4 x float> @test_perfectshuffle_dupext_v4f32(<4 x float> %a, <4 x float> ret <4 x float> %r } -define void @disguised_dup(<4 x float> %x, <4 x float>* %p1, <4 x float>* %p2) { +define void @disguised_dup(<4 x float> %x, ptr %p1, ptr %p2) { ; CHECK-LABEL: disguised_dup: ; CHECK: // %bb.0: ; CHECK-NEXT: ext.16b v1, v0, v0, #4 @@ -456,8 +456,8 @@ define void @disguised_dup(<4 x float> %x, <4 x float>* %p1, <4 x float>* %p2) { ; CHECK-NEXT: ret %shuf = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %dup = shufflevector <4 x float> %shuf, <4 x float> undef, <4 x i32> - store <4 x float> %shuf, <4 x float>* %p1, align 8 - store <4 x float> %dup, <4 x float>* %p2, align 8 + store <4 x float> %shuf, ptr %p1, align 8 + store <4 x float> %dup, ptr %p2, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll b/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll index 5f5672ec0867e3..034822b700a979 100644 --- a/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll +++ b/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll @@ -2,7 +2,7 @@ target triple = "arm64-apple-macosx" ; CHECK: mm2 -define i32 @mm2(i32* nocapture %p, i32 %n) nounwind uwtable readonly ssp { +define i32 @mm2(ptr nocapture %p, i32 %n) nounwind uwtable readonly ssp { entry: br label %do.body @@ -13,9 +13,9 @@ do.body: %max.0 = phi i32 [ 0, %entry ], [ %max.1, %do.cond ] %min.0 = phi i32 [ 0, %entry ], [ %min.1, %do.cond ] %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ] - %p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ] - %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 1 - %0 = load i32, i32* %p.addr.0, align 4 + %p.addr.0 = phi ptr [ %p, %entry ], [ %incdec.ptr, %do.cond ] + %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.0, i64 1 + %0 = load i32, ptr %p.addr.0, align 4 %cmp = icmp sgt i32 %0, %max.0 br i1 %cmp, label %do.cond, label %if.else @@ -400,7 +400,7 @@ entry: br label %for.body for.body: - %x0 = load i32, i32* undef, align 4 + %x0 = load i32, ptr undef, align 4 br i1 undef, label %if.then.i146, label %is_sbox.exit155 if.then.i146: @@ -412,12 +412,12 @@ if.then.i146: is_sbox.exit155: ; preds = %if.then.i146, %for.body %seg_offset.0.i151 = phi i32 [ %add9.i145, %if.then.i146 ], [ undef, %for.body ] %idxprom15.i152 = sext i32 %seg_offset.0.i151 to i64 - %arrayidx18.i154 = getelementptr inbounds i32, i32* null, i64 %idxprom15.i152 - %x1 = load i32, i32* %arrayidx18.i154, align 4 + %arrayidx18.i154 = getelementptr inbounds i32, ptr null, i64 %idxprom15.i152 + %x1 = load i32, ptr %arrayidx18.i154, align 4 br i1 undef, label %for.body51, label %for.body for.body51: ; preds = %is_sbox.exit155 - call fastcc void @get_switch_type(i32 %x1, i32 undef, i16 signext undef, i16 signext undef, i16* undef) + call fastcc void @get_switch_type(i32 %x1, i32 undef, i16 signext undef, i16 signext undef, ptr undef) unreachable } -declare fastcc void @get_switch_type(i32, i32, i16 signext, i16 signext, i16* nocapture) nounwind ssp +declare fastcc void @get_switch_type(i32, i32, i16 signext, i16 signext, ptr nocapture) nounwind ssp diff --git a/llvm/test/CodeGen/AArch64/arm64-ext.ll b/llvm/test/CodeGen/AArch64/arm64-ext.ll index d59d5821ebf367..c38ab076e4ea53 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ext.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ext.ll @@ -1,92 +1,92 @@ ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s -define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { +define <8 x i8> @test_vextd(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextd: ;CHECK: {{ext.8b.*#3}} - %tmp1 = load <8 x i8>, <8 x i8>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> ret <8 x i8> %tmp3 } -define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind { +define <8 x i8> @test_vextRd(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextRd: ;CHECK: {{ext.8b.*#5}} - %tmp1 = load <8 x i8>, <8 x i8>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> ret <8 x i8> %tmp3 } -define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind { +define <16 x i8> @test_vextq(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextq: ;CHECK: {{ext.16b.*3}} - %tmp1 = load <16 x i8>, <16 x i8>* %A - %tmp2 = load <16 x i8>, <16 x i8>* %B + %tmp1 = load <16 x i8>, ptr %A + %tmp2 = load <16 x i8>, ptr %B %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> ret <16 x i8> %tmp3 } -define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind { +define <16 x i8> @test_vextRq(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextRq: ;CHECK: {{ext.16b.*7}} - %tmp1 = load <16 x i8>, <16 x i8>* %A - %tmp2 = load <16 x i8>, <16 x i8>* %B + %tmp1 = load <16 x i8>, ptr %A + %tmp2 = load <16 x i8>, ptr %B %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> ret <16 x i8> %tmp3 } -define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind { +define <4 x i16> @test_vextd16(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextd16: ;CHECK: {{ext.8b.*#6}} - %tmp1 = load <4 x i16>, <4 x i16>* %A - %tmp2 = load <4 x i16>, <4 x i16>* %B + %tmp1 = load <4 x i16>, ptr %A + %tmp2 = load <4 x i16>, ptr %B %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> ret <4 x i16> %tmp3 } -define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind { +define <4 x i32> @test_vextq32(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextq32: ;CHECK: {{ext.16b.*12}} - %tmp1 = load <4 x i32>, <4 x i32>* %A - %tmp2 = load <4 x i32>, <4 x i32>* %B + %tmp1 = load <4 x i32>, ptr %A + %tmp2 = load <4 x i32>, ptr %B %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> ret <4 x i32> %tmp3 } ; Undef shuffle indices should not prevent matching to VEXT: -define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { +define <8 x i8> @test_vextd_undef(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextd_undef: ;CHECK: {{ext.8b.*}} - %tmp1 = load <8 x i8>, <8 x i8>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> ret <8 x i8> %tmp3 } -define <8 x i8> @test_vextd_undef2(<8 x i8>* %A, <8 x i8>* %B) nounwind { +define <8 x i8> @test_vextd_undef2(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextd_undef2: ;CHECK: {{ext.8b.*#6}} - %tmp1 = load <8 x i8>, <8 x i8>* %A - %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> ret <8 x i8> %tmp3 } -define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { +define <16 x i8> @test_vextRq_undef(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_vextRq_undef: ;CHECK: {{ext.16b.*#7}} - %tmp1 = load <16 x i8>, <16 x i8>* %A - %tmp2 = load <16 x i8>, <16 x i8>* %B + %tmp1 = load <16 x i8>, ptr %A + %tmp2 = load <16 x i8>, ptr %B %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> ret <16 x i8> %tmp3 } -define <8 x i16> @test_vextRq_undef2(<8 x i16>* %A) nounwind { +define <8 x i16> @test_vextRq_undef2(ptr %A) nounwind { ;CHECK-LABEL: test_vextRq_undef2: ;CHECK: {{ext.16b.*#10}} - %tmp1 = load <8 x i16>, <8 x i16>* %A + %tmp1 = load <8 x i16>, ptr %A %vext = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> ret <8 x i16> %vext; } @@ -95,11 +95,11 @@ define <8 x i16> @test_vextRq_undef2(<8 x i16>* %A) nounwind { ; chosen to reach lowering phase as a BUILD_VECTOR. ; An undef in the shuffle list should still be optimizable -define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { +define <4 x i16> @test_undef(ptr %A, ptr %B) nounwind { ;CHECK-LABEL: test_undef: ;CHECK: zip1.4h - %tmp1 = load <8 x i16>, <8 x i16>* %A - %tmp2 = load <8 x i16>, <8 x i16>* %B + %tmp1 = load <8 x i16>, ptr %A + %tmp2 = load <8 x i16>, ptr %B %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> ret <4 x i16> %tmp3 } diff --git a/llvm/test/CodeGen/AArch64/arm64-extend.ll b/llvm/test/CodeGen/AArch64/arm64-extend.ll index 0ef68f8a530194..54661ff14697c4 100644 --- a/llvm/test/CodeGen/AArch64/arm64-extend.ll +++ b/llvm/test/CodeGen/AArch64/arm64-extend.ll @@ -8,8 +8,8 @@ define i64 @foo(i32 %i) { ; CHECK: ldrsw x0, [x[[REG1]], w0, sxtw #2] ; CHECK: ret %idxprom = sext i32 %i to i64 - %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @array, i64 0, i64 %idxprom - %tmp1 = load i32, i32* %arrayidx, align 4 + %arrayidx = getelementptr inbounds [0 x i32], ptr @array, i64 0, i64 %idxprom + %tmp1 = load i32, ptr %arrayidx, align 4 %conv = sext i32 %tmp1 to i64 ret i64 %conv } diff --git a/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll b/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll index 5dd8cb282321e8..5963d98ec3240b 100644 --- a/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll +++ b/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; rdar://12771555 -define void @foo(i16* %ptr, i32 %a) nounwind { +define void @foo(ptr %ptr, i32 %a) nounwind { entry: ; CHECK-LABEL: foo: %tmp1 = icmp ult i32 %a, 100 @@ -9,7 +9,7 @@ entry: bb1: ; CHECK: %bb1 ; CHECK: ldrh [[REG:w[0-9]+]] - %tmp2 = load i16, i16* %ptr, align 2 + %tmp2 = load i16, ptr %ptr, align 2 br label %bb2 bb2: ; CHECK-NOT: and {{w[0-9]+}}, [[REG]], #0xffff diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll index 1985a72b6bd8ab..4140e7633e38f0 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll @@ -13,7 +13,7 @@ entry: ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]] ; CHECK: ldr w0, [x[[REG3]]] ; CHECK: ret - %0 = load i32, i32* getelementptr inbounds ([5001 x i32], [5001 x i32]* @sortlist, i32 0, i64 5000), align 4 + %0 = load i32, ptr getelementptr inbounds ([5001 x i32], ptr @sortlist, i32 0, i64 5000), align 4 ret i32 %0 } @@ -26,13 +26,13 @@ entry: ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]] ; CHECK: ldr x0, [x[[REG3]]] ; CHECK: ret - %0 = load i64, i64* getelementptr inbounds ([5001 x i64], [5001 x i64]* @sortlist2, i32 0, i64 5000), align 4 + %0 = load i64, ptr getelementptr inbounds ([5001 x i64], ptr @sortlist2, i32 0, i64 5000), align 4 ret i64 %0 } ; Load an address with a ridiculously large offset. ; rdar://12505553 -@pd2 = common global i8* null, align 8 +@pd2 = common global ptr null, align 8 define signext i8 @foo3() nounwind ssp { entry: @@ -40,8 +40,8 @@ entry: ; CHECK: mov x[[REG:[0-9]+]], #12274 ; CHECK: movk x[[REG]], #29646, lsl #16 ; CHECK: movk x[[REG]], #2874, lsl #32 - %0 = load i8*, i8** @pd2, align 8 - %arrayidx = getelementptr inbounds i8, i8* %0, i64 12345678901234 - %1 = load i8, i8* %arrayidx, align 1 + %0 = load ptr, ptr @pd2, align 8 + %arrayidx = getelementptr inbounds i8, ptr %0, i64 12345678901234 + %1 = load i8, ptr %arrayidx, align 1 ret i8 %1 } diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll index 1b65d9cbce14a5..c15a5dbbf672b7 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll @@ -4,10 +4,10 @@ %struct.S1Ty = type { i64 } %struct.S2Ty = type { %struct.S1Ty, %struct.S1Ty } -define void @takeS1(%struct.S1Ty* %V) nounwind { +define void @takeS1(ptr %V) nounwind { entry: - %V.addr = alloca %struct.S1Ty*, align 8 - store %struct.S1Ty* %V, %struct.S1Ty** %V.addr, align 8 + %V.addr = alloca ptr, align 8 + store ptr %V, ptr %V.addr, align 8 ret void } @@ -18,7 +18,7 @@ entry: ; CHECK: mov [[REG:x[0-9]+]], sp ; CHECK-NEXT: add x0, [[REG]], #8 %E = alloca %struct.S2Ty, align 4 - %B = getelementptr inbounds %struct.S2Ty, %struct.S2Ty* %E, i32 0, i32 1 - call void @takeS1(%struct.S1Ty* %B) + %B = getelementptr inbounds %struct.S2Ty, ptr %E, i32 0, i32 1 + call void @takeS1(ptr %B) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll index d563ccb851ce10..04617d1c89f1b1 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll @@ -2,8 +2,8 @@ define void @branch1() nounwind uwtable ssp { %x = alloca i32, align 4 - store i32 0, i32* %x, align 4 - %1 = load i32, i32* %x, align 4 + store i32 0, ptr %x, align 4 + %1 = load i32, ptr %x, align 4 %2 = icmp ne i32 %1, 0 br i1 %2, label %3, label %4 @@ -19,41 +19,41 @@ define void @branch2() nounwind uwtable ssp { %x = alloca i32, align 4 %y = alloca i32, align 4 %z = alloca i32, align 4 - store i32 0, i32* %1 - store i32 1, i32* %y, align 4 - store i32 1, i32* %x, align 4 - store i32 0, i32* %z, align 4 - %2 = load i32, i32* %x, align 4 + store i32 0, ptr %1 + store i32 1, ptr %y, align 4 + store i32 1, ptr %x, align 4 + store i32 0, ptr %z, align 4 + %2 = load i32, ptr %x, align 4 %3 = icmp ne i32 %2, 0 br i1 %3, label %4, label %5 ;