| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,170 @@ | ||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py | ||
| // REQUIRES: riscv-registered-target | ||
| // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ | ||
| // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ | ||
| // RUN: FileCheck --check-prefix=CHECK-RV64 %s | ||
|
|
||
| #include <riscv_vector.h> | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] | ||
| // | ||
| vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] | ||
| // | ||
| vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] | ||
| // | ||
| vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] | ||
| // | ||
| vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] | ||
| // | ||
| vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] | ||
| // | ||
| vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] | ||
| // | ||
| vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] | ||
| // | ||
| vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] | ||
| // | ||
| vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] | ||
| // | ||
| vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf4(mask, op1, vl); | ||
| } | ||
|
|
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,80 @@ | ||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py | ||
| // REQUIRES: riscv-registered-target | ||
| // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ | ||
| // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ | ||
| // RUN: FileCheck --check-prefix=CHECK-RV64 %s | ||
|
|
||
| #include <riscv_vector.h> | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8(op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8(mask, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8(mask, op1, vl); | ||
| } | ||
|
|
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,152 @@ | ||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py | ||
| // REQUIRES: riscv-registered-target | ||
| // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ | ||
| // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ | ||
| // RUN: FileCheck --check-prefix=CHECK-RV64 %s | ||
|
|
||
| #include <riscv_vector.h> | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m1_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m2_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m4_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m1_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m2_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m4_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m1_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m2_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m4_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m1_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m2_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m4_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_i64m8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,152 @@ | ||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py | ||
| // REQUIRES: riscv-registered-target | ||
| // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ | ||
| // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ | ||
| // RUN: FileCheck --check-prefix=CHECK-RV64 %s | ||
|
|
||
| #include <riscv_vector.h> | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m1_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m2_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m4_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m1_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m2_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m4_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m1_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m2_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m4_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m1_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m2_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m4_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_u64m8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,152 @@ | ||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py | ||
| // REQUIRES: riscv-registered-target | ||
| // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ | ||
| // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ | ||
| // RUN: FileCheck --check-prefix=CHECK-RV64 %s | ||
|
|
||
| #include <riscv_vector.h> | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { | ||
| return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,152 @@ | ||
| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py | ||
| // REQUIRES: riscv-registered-target | ||
| // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ | ||
| // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ | ||
| // RUN: FileCheck --check-prefix=CHECK-RV64 %s | ||
|
|
||
| #include <riscv_vector.h> | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tu(maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tum( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tumu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] | ||
| // | ||
| vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] | ||
| // | ||
| vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] | ||
| // | ||
| vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|
||
| // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mu( | ||
| // CHECK-RV64-NEXT: entry: | ||
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1) | ||
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] | ||
| // | ||
| vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { | ||
| return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); | ||
| } | ||
|
|