|
| 1 | +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| 2 | +// REQUIRES: riscv-registered-target |
| 3 | +// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \ |
| 4 | +// RUN: -target-feature +xandesvdot -disable-O0-optnone \ |
| 5 | +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ |
| 6 | +// RUN: FileCheck --check-prefix=CHECK-RV64 %s |
| 7 | + |
| 8 | +#include <andes_vector.h> |
| 9 | + |
| 10 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32mf2( |
| 11 | +// CHECK-RV64-NEXT: entry: |
| 12 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.nds.vd4dots.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 13 | +// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| 14 | +// |
| 15 | +vint32mf2_t test_nds_vd4dots_vv_i32mf2(vint32mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { |
| 16 | + return __riscv_nds_vd4dots_vv_i32mf2(vd, vs1, vs2, vl); |
| 17 | +} |
| 18 | + |
| 19 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32m1( |
| 20 | +// CHECK-RV64-NEXT: entry: |
| 21 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.nds.vd4dots.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 22 | +// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| 23 | +// |
| 24 | +vint32m1_t test_nds_vd4dots_vv_i32m1(vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { |
| 25 | + return __riscv_nds_vd4dots_vv_i32m1(vd, vs1, vs2, vl); |
| 26 | +} |
| 27 | + |
| 28 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32m2( |
| 29 | +// CHECK-RV64-NEXT: entry: |
| 30 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.nds.vd4dots.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 31 | +// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| 32 | +// |
| 33 | +vint32m2_t test_nds_vd4dots_vv_i32m2(vint32m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { |
| 34 | + return __riscv_nds_vd4dots_vv_i32m2(vd, vs1, vs2, vl); |
| 35 | +} |
| 36 | + |
| 37 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32m4( |
| 38 | +// CHECK-RV64-NEXT: entry: |
| 39 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.nds.vd4dots.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 40 | +// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| 41 | +// |
| 42 | +vint32m4_t test_nds_vd4dots_vv_i32m4(vint32m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { |
| 43 | + return __riscv_nds_vd4dots_vv_i32m4(vd, vs1, vs2, vl); |
| 44 | +} |
| 45 | + |
| 46 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32m8( |
| 47 | +// CHECK-RV64-NEXT: entry: |
| 48 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.nds.vd4dots.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS1:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 49 | +// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] |
| 50 | +// |
| 51 | +vint32m8_t test_nds_vd4dots_vv_i32m8(vint32m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { |
| 52 | + return __riscv_nds_vd4dots_vv_i32m8(vd, vs1, vs2, vl); |
| 53 | +} |
| 54 | + |
| 55 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i64m1( |
| 56 | +// CHECK-RV64-NEXT: entry: |
| 57 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.nds.vd4dots.nxv1i64.nxv4i16.nxv4i16.i64(<vscale x 1 x i64> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 58 | +// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] |
| 59 | +// |
| 60 | +vint64m1_t test_nds_vd4dots_vv_i64m1(vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { |
| 61 | + return __riscv_nds_vd4dots_vv_i64m1(vd, vs1, vs2, vl); |
| 62 | +} |
| 63 | + |
| 64 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i64m2( |
| 65 | +// CHECK-RV64-NEXT: entry: |
| 66 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.nds.vd4dots.nxv2i64.nxv8i16.nxv8i16.i64(<vscale x 2 x i64> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 67 | +// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] |
| 68 | +// |
| 69 | +vint64m2_t test_nds_vd4dots_vv_i64m2(vint64m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { |
| 70 | + return __riscv_nds_vd4dots_vv_i64m2(vd, vs1, vs2, vl); |
| 71 | +} |
| 72 | + |
| 73 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i64m4( |
| 74 | +// CHECK-RV64-NEXT: entry: |
| 75 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.nds.vd4dots.nxv4i64.nxv16i16.nxv16i16.i64(<vscale x 4 x i64> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 76 | +// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] |
| 77 | +// |
| 78 | +vint64m4_t test_nds_vd4dots_vv_i64m4(vint64m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { |
| 79 | + return __riscv_nds_vd4dots_vv_i64m4(vd, vs1, vs2, vl); |
| 80 | +} |
| 81 | + |
| 82 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i64m8( |
| 83 | +// CHECK-RV64-NEXT: entry: |
| 84 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.nds.vd4dots.nxv8i64.nxv32i16.nxv32i16.i64(<vscale x 8 x i64> [[VD:%.*]], <vscale x 32 x i16> [[VS1:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 [[VL:%.*]], i64 3) |
| 85 | +// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] |
| 86 | +// |
| 87 | +vint64m8_t test_nds_vd4dots_vv_i64m8(vint64m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { |
| 88 | + return __riscv_nds_vd4dots_vv_i64m8(vd, vs1, vs2, vl); |
| 89 | +} |
| 90 | + |
| 91 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32mf2_m( |
| 92 | +// CHECK-RV64-NEXT: entry: |
| 93 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.nds.vd4dots.mask.nxv1i32.nxv4i8.nxv4i8.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 94 | +// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| 95 | +// |
| 96 | +vint32mf2_t test_nds_vd4dots_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { |
| 97 | + return __riscv_nds_vd4dots_vv_i32mf2_m(mask, vd, vs1, vs2, vl); |
| 98 | +} |
| 99 | + |
| 100 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32m1_m( |
| 101 | +// CHECK-RV64-NEXT: entry: |
| 102 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.nds.vd4dots.mask.nxv2i32.nxv8i8.nxv8i8.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 103 | +// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| 104 | +// |
| 105 | +vint32m1_t test_nds_vd4dots_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { |
| 106 | + return __riscv_nds_vd4dots_vv_i32m1_m(mask, vd, vs1, vs2, vl); |
| 107 | +} |
| 108 | + |
| 109 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32m2_m( |
| 110 | +// CHECK-RV64-NEXT: entry: |
| 111 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.nds.vd4dots.mask.nxv4i32.nxv16i8.nxv16i8.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 112 | +// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| 113 | +// |
| 114 | +vint32m2_t test_nds_vd4dots_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { |
| 115 | + return __riscv_nds_vd4dots_vv_i32m2_m(mask, vd, vs1, vs2, vl); |
| 116 | +} |
| 117 | + |
| 118 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32m4_m( |
| 119 | +// CHECK-RV64-NEXT: entry: |
| 120 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.nds.vd4dots.mask.nxv8i32.nxv32i8.nxv32i8.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 121 | +// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| 122 | +// |
| 123 | +vint32m4_t test_nds_vd4dots_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { |
| 124 | + return __riscv_nds_vd4dots_vv_i32m4_m(mask, vd, vs1, vs2, vl); |
| 125 | +} |
| 126 | + |
| 127 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i32m8_m( |
| 128 | +// CHECK-RV64-NEXT: entry: |
| 129 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.nds.vd4dots.mask.nxv16i32.nxv64i8.nxv64i8.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 64 x i8> [[VS1:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 130 | +// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] |
| 131 | +// |
| 132 | +vint32m8_t test_nds_vd4dots_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { |
| 133 | + return __riscv_nds_vd4dots_vv_i32m8_m(mask, vd, vs1, vs2, vl); |
| 134 | +} |
| 135 | + |
| 136 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i64m1_m( |
| 137 | +// CHECK-RV64-NEXT: entry: |
| 138 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.nds.vd4dots.mask.nxv1i64.nxv4i16.nxv4i16.i64(<vscale x 1 x i64> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 139 | +// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] |
| 140 | +// |
| 141 | +vint64m1_t test_nds_vd4dots_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { |
| 142 | + return __riscv_nds_vd4dots_vv_i64m1_m(mask, vd, vs1, vs2, vl); |
| 143 | +} |
| 144 | + |
| 145 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i64m2_m( |
| 146 | +// CHECK-RV64-NEXT: entry: |
| 147 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.nds.vd4dots.mask.nxv2i64.nxv8i16.nxv8i16.i64(<vscale x 2 x i64> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 148 | +// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] |
| 149 | +// |
| 150 | +vint64m2_t test_nds_vd4dots_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { |
| 151 | + return __riscv_nds_vd4dots_vv_i64m2_m(mask, vd, vs1, vs2, vl); |
| 152 | +} |
| 153 | + |
| 154 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i64m4_m( |
| 155 | +// CHECK-RV64-NEXT: entry: |
| 156 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.nds.vd4dots.mask.nxv4i64.nxv16i16.nxv16i16.i64(<vscale x 4 x i64> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 157 | +// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] |
| 158 | +// |
| 159 | +vint64m4_t test_nds_vd4dots_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { |
| 160 | + return __riscv_nds_vd4dots_vv_i64m4_m(mask, vd, vs1, vs2, vl); |
| 161 | +} |
| 162 | + |
| 163 | +// CHECK-RV64-LABEL: @test_nds_vd4dots_vv_i64m8_m( |
| 164 | +// CHECK-RV64-NEXT: entry: |
| 165 | +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.nds.vd4dots.mask.nxv8i64.nxv32i16.nxv32i16.i64(<vscale x 8 x i64> [[VD:%.*]], <vscale x 32 x i16> [[VS1:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3) |
| 166 | +// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] |
| 167 | +// |
| 168 | +vint64m8_t test_nds_vd4dots_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { |
| 169 | + return __riscv_nds_vd4dots_vv_i64m8_m(mask, vd, vs1, vs2, vl); |
| 170 | +} |
0 commit comments