vint8mf8_t __riscv_vsadd(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl);
vint8mf8_t __riscv_vsadd(vint8mf8_t vs2, int8_t rs1, size_t vl);
vint8mf4_t __riscv_vsadd(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl);
vint8mf4_t __riscv_vsadd(vint8mf4_t vs2, int8_t rs1, size_t vl);
vint8mf2_t __riscv_vsadd(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl);
vint8mf2_t __riscv_vsadd(vint8mf2_t vs2, int8_t rs1, size_t vl);
vint8m1_t __riscv_vsadd(vint8m1_t vs2, vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vsadd(vint8m1_t vs2, int8_t rs1, size_t vl);
vint8m2_t __riscv_vsadd(vint8m2_t vs2, vint8m2_t vs1, size_t vl);
vint8m2_t __riscv_vsadd(vint8m2_t vs2, int8_t rs1, size_t vl);
vint8m4_t __riscv_vsadd(vint8m4_t vs2, vint8m4_t vs1, size_t vl);
vint8m4_t __riscv_vsadd(vint8m4_t vs2, int8_t rs1, size_t vl);
vint8m8_t __riscv_vsadd(vint8m8_t vs2, vint8m8_t vs1, size_t vl);
vint8m8_t __riscv_vsadd(vint8m8_t vs2, int8_t rs1, size_t vl);
vint16mf4_t __riscv_vsadd(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl);
vint16mf4_t __riscv_vsadd(vint16mf4_t vs2, int16_t rs1, size_t vl);
vint16mf2_t __riscv_vsadd(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl);
vint16mf2_t __riscv_vsadd(vint16mf2_t vs2, int16_t rs1, size_t vl);
vint16m1_t __riscv_vsadd(vint16m1_t vs2, vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vsadd(vint16m1_t vs2, int16_t rs1, size_t vl);
vint16m2_t __riscv_vsadd(vint16m2_t vs2, vint16m2_t vs1, size_t vl);
vint16m2_t __riscv_vsadd(vint16m2_t vs2, int16_t rs1, size_t vl);
vint16m4_t __riscv_vsadd(vint16m4_t vs2, vint16m4_t vs1, size_t vl);
vint16m4_t __riscv_vsadd(vint16m4_t vs2, int16_t rs1, size_t vl);
vint16m8_t __riscv_vsadd(vint16m8_t vs2, vint16m8_t vs1, size_t vl);
vint16m8_t __riscv_vsadd(vint16m8_t vs2, int16_t rs1, size_t vl);
vint32mf2_t __riscv_vsadd(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl);
vint32mf2_t __riscv_vsadd(vint32mf2_t vs2, int32_t rs1, size_t vl);
vint32m1_t __riscv_vsadd(vint32m1_t vs2, vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vsadd(vint32m1_t vs2, int32_t rs1, size_t vl);
vint32m2_t __riscv_vsadd(vint32m2_t vs2, vint32m2_t vs1, size_t vl);
vint32m2_t __riscv_vsadd(vint32m2_t vs2, int32_t rs1, size_t vl);
vint32m4_t __riscv_vsadd(vint32m4_t vs2, vint32m4_t vs1, size_t vl);
vint32m4_t __riscv_vsadd(vint32m4_t vs2, int32_t rs1, size_t vl);
vint32m8_t __riscv_vsadd(vint32m8_t vs2, vint32m8_t vs1, size_t vl);
vint32m8_t __riscv_vsadd(vint32m8_t vs2, int32_t rs1, size_t vl);
vint64m1_t __riscv_vsadd(vint64m1_t vs2, vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vsadd(vint64m1_t vs2, int64_t rs1, size_t vl);
vint64m2_t __riscv_vsadd(vint64m2_t vs2, vint64m2_t vs1, size_t vl);
vint64m2_t __riscv_vsadd(vint64m2_t vs2, int64_t rs1, size_t vl);
vint64m4_t __riscv_vsadd(vint64m4_t vs2, vint64m4_t vs1, size_t vl);
vint64m4_t __riscv_vsadd(vint64m4_t vs2, int64_t rs1, size_t vl);
vint64m8_t __riscv_vsadd(vint64m8_t vs2, vint64m8_t vs1, size_t vl);
vint64m8_t __riscv_vsadd(vint64m8_t vs2, int64_t rs1, size_t vl);
vint8mf8_t __riscv_vssub(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl);
vint8mf8_t __riscv_vssub(vint8mf8_t vs2, int8_t rs1, size_t vl);
vint8mf4_t __riscv_vssub(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl);
vint8mf4_t __riscv_vssub(vint8mf4_t vs2, int8_t rs1, size_t vl);
vint8mf2_t __riscv_vssub(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl);
vint8mf2_t __riscv_vssub(vint8mf2_t vs2, int8_t rs1, size_t vl);
vint8m1_t __riscv_vssub(vint8m1_t vs2, vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vssub(vint8m1_t vs2, int8_t rs1, size_t vl);
vint8m2_t __riscv_vssub(vint8m2_t vs2, vint8m2_t vs1, size_t vl);
vint8m2_t __riscv_vssub(vint8m2_t vs2, int8_t rs1, size_t vl);
vint8m4_t __riscv_vssub(vint8m4_t vs2, vint8m4_t vs1, size_t vl);
vint8m4_t __riscv_vssub(vint8m4_t vs2, int8_t rs1, size_t vl);
vint8m8_t __riscv_vssub(vint8m8_t vs2, vint8m8_t vs1, size_t vl);
vint8m8_t __riscv_vssub(vint8m8_t vs2, int8_t rs1, size_t vl);
vint16mf4_t __riscv_vssub(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl);
vint16mf4_t __riscv_vssub(vint16mf4_t vs2, int16_t rs1, size_t vl);
vint16mf2_t __riscv_vssub(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl);
vint16mf2_t __riscv_vssub(vint16mf2_t vs2, int16_t rs1, size_t vl);
vint16m1_t __riscv_vssub(vint16m1_t vs2, vint16m1_t vs1, size_t vl);
vint16m1_t __riscv_vssub(vint16m1_t vs2, int16_t rs1, size_t vl);
vint16m2_t __riscv_vssub(vint16m2_t vs2, vint16m2_t vs1, size_t vl);
vint16m2_t __riscv_vssub(vint16m2_t vs2, int16_t rs1, size_t vl);
vint16m4_t __riscv_vssub(vint16m4_t vs2, vint16m4_t vs1, size_t vl);
vint16m4_t __riscv_vssub(vint16m4_t vs2, int16_t rs1, size_t vl);
vint16m8_t __riscv_vssub(vint16m8_t vs2, vint16m8_t vs1, size_t vl);
vint16m8_t __riscv_vssub(vint16m8_t vs2, int16_t rs1, size_t vl);
vint32mf2_t __riscv_vssub(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl);
vint32mf2_t __riscv_vssub(vint32mf2_t vs2, int32_t rs1, size_t vl);
vint32m1_t __riscv_vssub(vint32m1_t vs2, vint32m1_t vs1, size_t vl);
vint32m1_t __riscv_vssub(vint32m1_t vs2, int32_t rs1, size_t vl);
vint32m2_t __riscv_vssub(vint32m2_t vs2, vint32m2_t vs1, size_t vl);
vint32m2_t __riscv_vssub(vint32m2_t vs2, int32_t rs1, size_t vl);
vint32m4_t __riscv_vssub(vint32m4_t vs2, vint32m4_t vs1, size_t vl);
vint32m4_t __riscv_vssub(vint32m4_t vs2, int32_t rs1, size_t vl);
vint32m8_t __riscv_vssub(vint32m8_t vs2, vint32m8_t vs1, size_t vl);
vint32m8_t __riscv_vssub(vint32m8_t vs2, int32_t rs1, size_t vl);
vint64m1_t __riscv_vssub(vint64m1_t vs2, vint64m1_t vs1, size_t vl);
vint64m1_t __riscv_vssub(vint64m1_t vs2, int64_t rs1, size_t vl);
vint64m2_t __riscv_vssub(vint64m2_t vs2, vint64m2_t vs1, size_t vl);
vint64m2_t __riscv_vssub(vint64m2_t vs2, int64_t rs1, size_t vl);
vint64m4_t __riscv_vssub(vint64m4_t vs2, vint64m4_t vs1, size_t vl);
vint64m4_t __riscv_vssub(vint64m4_t vs2, int64_t rs1, size_t vl);
vint64m8_t __riscv_vssub(vint64m8_t vs2, vint64m8_t vs1, size_t vl);
vint64m8_t __riscv_vssub(vint64m8_t vs2, int64_t rs1, size_t vl);
vuint8mf8_t __riscv_vsaddu(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl);
vuint8mf8_t __riscv_vsaddu(vuint8mf8_t vs2, uint8_t rs1, size_t vl);
vuint8mf4_t __riscv_vsaddu(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl);
vuint8mf4_t __riscv_vsaddu(vuint8mf4_t vs2, uint8_t rs1, size_t vl);
vuint8mf2_t __riscv_vsaddu(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl);
vuint8mf2_t __riscv_vsaddu(vuint8mf2_t vs2, uint8_t rs1, size_t vl);
vuint8m1_t __riscv_vsaddu(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vsaddu(vuint8m1_t vs2, uint8_t rs1, size_t vl);
vuint8m2_t __riscv_vsaddu(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl);
vuint8m2_t __riscv_vsaddu(vuint8m2_t vs2, uint8_t rs1, size_t vl);
vuint8m4_t __riscv_vsaddu(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl);
vuint8m4_t __riscv_vsaddu(vuint8m4_t vs2, uint8_t rs1, size_t vl);
vuint8m8_t __riscv_vsaddu(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl);
vuint8m8_t __riscv_vsaddu(vuint8m8_t vs2, uint8_t rs1, size_t vl);
vuint16mf4_t __riscv_vsaddu(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl);
vuint16mf4_t __riscv_vsaddu(vuint16mf4_t vs2, uint16_t rs1, size_t vl);
vuint16mf2_t __riscv_vsaddu(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl);
vuint16mf2_t __riscv_vsaddu(vuint16mf2_t vs2, uint16_t rs1, size_t vl);
vuint16m1_t __riscv_vsaddu(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vsaddu(vuint16m1_t vs2, uint16_t rs1, size_t vl);
vuint16m2_t __riscv_vsaddu(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl);
vuint16m2_t __riscv_vsaddu(vuint16m2_t vs2, uint16_t rs1, size_t vl);
vuint16m4_t __riscv_vsaddu(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl);
vuint16m4_t __riscv_vsaddu(vuint16m4_t vs2, uint16_t rs1, size_t vl);
vuint16m8_t __riscv_vsaddu(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl);
vuint16m8_t __riscv_vsaddu(vuint16m8_t vs2, uint16_t rs1, size_t vl);
vuint32mf2_t __riscv_vsaddu(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl);
vuint32mf2_t __riscv_vsaddu(vuint32mf2_t vs2, uint32_t rs1, size_t vl);
vuint32m1_t __riscv_vsaddu(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vsaddu(vuint32m1_t vs2, uint32_t rs1, size_t vl);
vuint32m2_t __riscv_vsaddu(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl);
vuint32m2_t __riscv_vsaddu(vuint32m2_t vs2, uint32_t rs1, size_t vl);
vuint32m4_t __riscv_vsaddu(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl);
vuint32m4_t __riscv_vsaddu(vuint32m4_t vs2, uint32_t rs1, size_t vl);
vuint32m8_t __riscv_vsaddu(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl);
vuint32m8_t __riscv_vsaddu(vuint32m8_t vs2, uint32_t rs1, size_t vl);
vuint64m1_t __riscv_vsaddu(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vsaddu(vuint64m1_t vs2, uint64_t rs1, size_t vl);
vuint64m2_t __riscv_vsaddu(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl);
vuint64m2_t __riscv_vsaddu(vuint64m2_t vs2, uint64_t rs1, size_t vl);
vuint64m4_t __riscv_vsaddu(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl);
vuint64m4_t __riscv_vsaddu(vuint64m4_t vs2, uint64_t rs1, size_t vl);
vuint64m8_t __riscv_vsaddu(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl);
vuint64m8_t __riscv_vsaddu(vuint64m8_t vs2, uint64_t rs1, size_t vl);
vuint8mf8_t __riscv_vssubu(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl);
vuint8mf8_t __riscv_vssubu(vuint8mf8_t vs2, uint8_t rs1, size_t vl);
vuint8mf4_t __riscv_vssubu(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl);
vuint8mf4_t __riscv_vssubu(vuint8mf4_t vs2, uint8_t rs1, size_t vl);
vuint8mf2_t __riscv_vssubu(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl);
vuint8mf2_t __riscv_vssubu(vuint8mf2_t vs2, uint8_t rs1, size_t vl);
vuint8m1_t __riscv_vssubu(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl);
vuint8m1_t __riscv_vssubu(vuint8m1_t vs2, uint8_t rs1, size_t vl);
vuint8m2_t __riscv_vssubu(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl);
vuint8m2_t __riscv_vssubu(vuint8m2_t vs2, uint8_t rs1, size_t vl);
vuint8m4_t __riscv_vssubu(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl);
vuint8m4_t __riscv_vssubu(vuint8m4_t vs2, uint8_t rs1, size_t vl);
vuint8m8_t __riscv_vssubu(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl);
vuint8m8_t __riscv_vssubu(vuint8m8_t vs2, uint8_t rs1, size_t vl);
vuint16mf4_t __riscv_vssubu(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl);
vuint16mf4_t __riscv_vssubu(vuint16mf4_t vs2, uint16_t rs1, size_t vl);
vuint16mf2_t __riscv_vssubu(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl);
vuint16mf2_t __riscv_vssubu(vuint16mf2_t vs2, uint16_t rs1, size_t vl);
vuint16m1_t __riscv_vssubu(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl);
vuint16m1_t __riscv_vssubu(vuint16m1_t vs2, uint16_t rs1, size_t vl);
vuint16m2_t __riscv_vssubu(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl);
vuint16m2_t __riscv_vssubu(vuint16m2_t vs2, uint16_t rs1, size_t vl);
vuint16m4_t __riscv_vssubu(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl);
vuint16m4_t __riscv_vssubu(vuint16m4_t vs2, uint16_t rs1, size_t vl);
vuint16m8_t __riscv_vssubu(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl);
vuint16m8_t __riscv_vssubu(vuint16m8_t vs2, uint16_t rs1, size_t vl);
vuint32mf2_t __riscv_vssubu(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl);
vuint32mf2_t __riscv_vssubu(vuint32mf2_t vs2, uint32_t rs1, size_t vl);
vuint32m1_t __riscv_vssubu(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl);
vuint32m1_t __riscv_vssubu(vuint32m1_t vs2, uint32_t rs1, size_t vl);
vuint32m2_t __riscv_vssubu(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl);
vuint32m2_t __riscv_vssubu(vuint32m2_t vs2, uint32_t rs1, size_t vl);
vuint32m4_t __riscv_vssubu(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl);
vuint32m4_t __riscv_vssubu(vuint32m4_t vs2, uint32_t rs1, size_t vl);
vuint32m8_t __riscv_vssubu(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl);
vuint32m8_t __riscv_vssubu(vuint32m8_t vs2, uint32_t rs1, size_t vl);
vuint64m1_t __riscv_vssubu(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl);
vuint64m1_t __riscv_vssubu(vuint64m1_t vs2, uint64_t rs1, size_t vl);
vuint64m2_t __riscv_vssubu(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl);
vuint64m2_t __riscv_vssubu(vuint64m2_t vs2, uint64_t rs1, size_t vl);
vuint64m4_t __riscv_vssubu(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl);
vuint64m4_t __riscv_vssubu(vuint64m4_t vs2, uint64_t rs1, size_t vl);
vuint64m8_t __riscv_vssubu(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl);
vuint64m8_t __riscv_vssubu(vuint64m8_t vs2, uint64_t rs1, size_t vl);
// masked functions
vint8mf8_t __riscv_vsadd(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
size_t vl);
vint8mf8_t __riscv_vsadd(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl);
vint8mf4_t __riscv_vsadd(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
size_t vl);
vint8mf4_t __riscv_vsadd(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl);
vint8mf2_t __riscv_vsadd(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
size_t vl);
vint8mf2_t __riscv_vsadd(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl);
vint8m1_t __riscv_vsadd(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vsadd(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl);
vint8m2_t __riscv_vsadd(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl);
vint8m2_t __riscv_vsadd(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl);
vint8m4_t __riscv_vsadd(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl);
vint8m4_t __riscv_vsadd(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl);
vint8m8_t __riscv_vsadd(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl);
vint8m8_t __riscv_vsadd(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl);
vint16mf4_t __riscv_vsadd(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1,
size_t vl);
vint16mf4_t __riscv_vsadd(vbool64_t vm, vint16mf4_t vs2, int16_t rs1,
size_t vl);
vint16mf2_t __riscv_vsadd(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1,
size_t vl);
vint16mf2_t __riscv_vsadd(vbool32_t vm, vint16mf2_t vs2, int16_t rs1,
size_t vl);
vint16m1_t __riscv_vsadd(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vsadd(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl);
vint16m2_t __riscv_vsadd(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
size_t vl);
vint16m2_t __riscv_vsadd(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl);
vint16m4_t __riscv_vsadd(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
size_t vl);
vint16m4_t __riscv_vsadd(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl);
vint16m8_t __riscv_vsadd(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
size_t vl);
vint16m8_t __riscv_vsadd(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl);
vint32mf2_t __riscv_vsadd(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1,
size_t vl);
vint32mf2_t __riscv_vsadd(vbool64_t vm, vint32mf2_t vs2, int32_t rs1,
size_t vl);
vint32m1_t __riscv_vsadd(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vsadd(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl);
vint32m2_t __riscv_vsadd(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
size_t vl);
vint32m2_t __riscv_vsadd(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl);
vint32m4_t __riscv_vsadd(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
size_t vl);
vint32m4_t __riscv_vsadd(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl);
vint32m8_t __riscv_vsadd(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
size_t vl);
vint32m8_t __riscv_vsadd(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl);
vint64m1_t __riscv_vsadd(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vsadd(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl);
vint64m2_t __riscv_vsadd(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
size_t vl);
vint64m2_t __riscv_vsadd(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl);
vint64m4_t __riscv_vsadd(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
size_t vl);
vint64m4_t __riscv_vsadd(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl);
vint64m8_t __riscv_vsadd(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
size_t vl);
vint64m8_t __riscv_vsadd(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl);
vint8mf8_t __riscv_vssub(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
size_t vl);
vint8mf8_t __riscv_vssub(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl);
vint8mf4_t __riscv_vssub(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
size_t vl);
vint8mf4_t __riscv_vssub(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl);
vint8mf2_t __riscv_vssub(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
size_t vl);
vint8mf2_t __riscv_vssub(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl);
vint8m1_t __riscv_vssub(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl);
vint8m1_t __riscv_vssub(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl);
vint8m2_t __riscv_vssub(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl);
vint8m2_t __riscv_vssub(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl);
vint8m4_t __riscv_vssub(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl);
vint8m4_t __riscv_vssub(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl);
vint8m8_t __riscv_vssub(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl);
vint8m8_t __riscv_vssub(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl);
vint16mf4_t __riscv_vssub(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1,
size_t vl);
vint16mf4_t __riscv_vssub(vbool64_t vm, vint16mf4_t vs2, int16_t rs1,
size_t vl);
vint16mf2_t __riscv_vssub(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1,
size_t vl);
vint16mf2_t __riscv_vssub(vbool32_t vm, vint16mf2_t vs2, int16_t rs1,
size_t vl);
vint16m1_t __riscv_vssub(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
size_t vl);
vint16m1_t __riscv_vssub(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl);
vint16m2_t __riscv_vssub(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
size_t vl);
vint16m2_t __riscv_vssub(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl);
vint16m4_t __riscv_vssub(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
size_t vl);
vint16m4_t __riscv_vssub(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl);
vint16m8_t __riscv_vssub(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
size_t vl);
vint16m8_t __riscv_vssub(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl);
vint32mf2_t __riscv_vssub(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1,
size_t vl);
vint32mf2_t __riscv_vssub(vbool64_t vm, vint32mf2_t vs2, int32_t rs1,
size_t vl);
vint32m1_t __riscv_vssub(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
size_t vl);
vint32m1_t __riscv_vssub(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl);
vint32m2_t __riscv_vssub(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
size_t vl);
vint32m2_t __riscv_vssub(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl);
vint32m4_t __riscv_vssub(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
size_t vl);
vint32m4_t __riscv_vssub(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl);
vint32m8_t __riscv_vssub(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
size_t vl);
vint32m8_t __riscv_vssub(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl);
vint64m1_t __riscv_vssub(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
size_t vl);
vint64m1_t __riscv_vssub(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl);
vint64m2_t __riscv_vssub(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
size_t vl);
vint64m2_t __riscv_vssub(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl);
vint64m4_t __riscv_vssub(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
size_t vl);
vint64m4_t __riscv_vssub(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl);
vint64m8_t __riscv_vssub(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
size_t vl);
vint64m8_t __riscv_vssub(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl);
vuint8mf8_t __riscv_vsaddu(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1,
size_t vl);
vuint8mf8_t __riscv_vsaddu(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1,
size_t vl);
vuint8mf4_t __riscv_vsaddu(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1,
size_t vl);
vuint8mf4_t __riscv_vsaddu(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1,
size_t vl);
vuint8mf2_t __riscv_vsaddu(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1,
size_t vl);
vuint8mf2_t __riscv_vsaddu(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1,
size_t vl);
vuint8m1_t __riscv_vsaddu(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vsaddu(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl);
vuint8m2_t __riscv_vsaddu(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
size_t vl);
vuint8m2_t __riscv_vsaddu(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl);
vuint8m4_t __riscv_vsaddu(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
size_t vl);
vuint8m4_t __riscv_vsaddu(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl);
vuint8m8_t __riscv_vsaddu(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
size_t vl);
vuint8m8_t __riscv_vsaddu(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl);
vuint16mf4_t __riscv_vsaddu(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1,
size_t vl);
vuint16mf4_t __riscv_vsaddu(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1,
size_t vl);
vuint16mf2_t __riscv_vsaddu(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1,
size_t vl);
vuint16mf2_t __riscv_vsaddu(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1,
size_t vl);
vuint16m1_t __riscv_vsaddu(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vsaddu(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1,
size_t vl);
vuint16m2_t __riscv_vsaddu(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
size_t vl);
vuint16m2_t __riscv_vsaddu(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1,
size_t vl);
vuint16m4_t __riscv_vsaddu(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
size_t vl);
vuint16m4_t __riscv_vsaddu(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1,
size_t vl);
vuint16m8_t __riscv_vsaddu(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1,
size_t vl);
vuint16m8_t __riscv_vsaddu(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1,
size_t vl);
vuint32mf2_t __riscv_vsaddu(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1,
size_t vl);
vuint32mf2_t __riscv_vsaddu(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1,
size_t vl);
vuint32m1_t __riscv_vsaddu(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vsaddu(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1,
size_t vl);
vuint32m2_t __riscv_vsaddu(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1,
size_t vl);
vuint32m2_t __riscv_vsaddu(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1,
size_t vl);
vuint32m4_t __riscv_vsaddu(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1,
size_t vl);
vuint32m4_t __riscv_vsaddu(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1,
size_t vl);
vuint32m8_t __riscv_vsaddu(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1,
size_t vl);
vuint32m8_t __riscv_vsaddu(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1,
size_t vl);
vuint64m1_t __riscv_vsaddu(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vsaddu(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1,
size_t vl);
vuint64m2_t __riscv_vsaddu(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1,
size_t vl);
vuint64m2_t __riscv_vsaddu(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1,
size_t vl);
vuint64m4_t __riscv_vsaddu(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1,
size_t vl);
vuint64m4_t __riscv_vsaddu(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1,
size_t vl);
vuint64m8_t __riscv_vsaddu(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1,
size_t vl);
vuint64m8_t __riscv_vsaddu(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1,
size_t vl);
vuint8mf8_t __riscv_vssubu(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1,
size_t vl);
vuint8mf8_t __riscv_vssubu(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1,
size_t vl);
vuint8mf4_t __riscv_vssubu(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1,
size_t vl);
vuint8mf4_t __riscv_vssubu(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1,
size_t vl);
vuint8mf2_t __riscv_vssubu(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1,
size_t vl);
vuint8mf2_t __riscv_vssubu(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1,
size_t vl);
vuint8m1_t __riscv_vssubu(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
size_t vl);
vuint8m1_t __riscv_vssubu(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl);
vuint8m2_t __riscv_vssubu(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
size_t vl);
vuint8m2_t __riscv_vssubu(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl);
vuint8m4_t __riscv_vssubu(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
size_t vl);
vuint8m4_t __riscv_vssubu(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl);
vuint8m8_t __riscv_vssubu(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
size_t vl);
vuint8m8_t __riscv_vssubu(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl);
vuint16mf4_t __riscv_vssubu(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1,
size_t vl);
vuint16mf4_t __riscv_vssubu(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1,
size_t vl);
vuint16mf2_t __riscv_vssubu(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1,
size_t vl);
vuint16mf2_t __riscv_vssubu(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1,
size_t vl);
vuint16m1_t __riscv_vssubu(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1,
size_t vl);
vuint16m1_t __riscv_vssubu(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1,
size_t vl);
vuint16m2_t __riscv_vssubu(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
size_t vl);
vuint16m2_t __riscv_vssubu(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1,
size_t vl);
vuint16m4_t __riscv_vssubu(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
size_t vl);
vuint16m4_t __riscv_vssubu(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1,
size_t vl);
vuint16m8_t __riscv_vssubu(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1,
size_t vl);
vuint16m8_t __riscv_vssubu(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1,
size_t vl);
vuint32mf2_t __riscv_vssubu(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1,
size_t vl);
vuint32mf2_t __riscv_vssubu(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1,
size_t vl);
vuint32m1_t __riscv_vssubu(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1,
size_t vl);
vuint32m1_t __riscv_vssubu(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1,
size_t vl);
vuint32m2_t __riscv_vssubu(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1,
size_t vl);
vuint32m2_t __riscv_vssubu(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1,
size_t vl);
vuint32m4_t __riscv_vssubu(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1,
size_t vl);
vuint32m4_t __riscv_vssubu(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1,
size_t vl);
vuint32m8_t __riscv_vssubu(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1,
size_t vl);
vuint32m8_t __riscv_vssubu(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1,
size_t vl);
vuint64m1_t __riscv_vssubu(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1,
size_t vl);
vuint64m1_t __riscv_vssubu(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1,
size_t vl);
vuint64m2_t __riscv_vssubu(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1,
size_t vl);
vuint64m2_t __riscv_vssubu(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1,
size_t vl);
vuint64m4_t __riscv_vssubu(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1,
size_t vl);
vuint64m4_t __riscv_vssubu(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1,
size_t vl);
vuint64m8_t __riscv_vssubu(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1,
size_t vl);
vuint64m8_t __riscv_vssubu(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1,
size_t vl);
vint8mf8_t __riscv_vaadd(vint8mf8_t vs2, vint8mf8_t vs1, unsigned int vxrm,
size_t vl);
vint8mf8_t __riscv_vaadd(vint8mf8_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vaadd(vint8mf4_t vs2, vint8mf4_t vs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vaadd(vint8mf4_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vaadd(vint8mf2_t vs2, vint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vaadd(vint8mf2_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vaadd(vint8m1_t vs2, vint8m1_t vs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vaadd(vint8m1_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vaadd(vint8m2_t vs2, vint8m2_t vs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vaadd(vint8m2_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vaadd(vint8m4_t vs2, vint8m4_t vs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vaadd(vint8m4_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m8_t __riscv_vaadd(vint8m8_t vs2, vint8m8_t vs1, unsigned int vxrm,
size_t vl);
vint8m8_t __riscv_vaadd(vint8m8_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vaadd(vint16mf4_t vs2, vint16mf4_t vs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vaadd(vint16mf4_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vaadd(vint16mf2_t vs2, vint16mf2_t vs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vaadd(vint16mf2_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vaadd(vint16m1_t vs2, vint16m1_t vs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vaadd(vint16m1_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vaadd(vint16m2_t vs2, vint16m2_t vs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vaadd(vint16m2_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vaadd(vint16m4_t vs2, vint16m4_t vs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vaadd(vint16m4_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m8_t __riscv_vaadd(vint16m8_t vs2, vint16m8_t vs1, unsigned int vxrm,
size_t vl);
vint16m8_t __riscv_vaadd(vint16m8_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vaadd(vint32mf2_t vs2, vint32mf2_t vs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vaadd(vint32mf2_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vaadd(vint32m1_t vs2, vint32m1_t vs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vaadd(vint32m1_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vaadd(vint32m2_t vs2, vint32m2_t vs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vaadd(vint32m2_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vaadd(vint32m4_t vs2, vint32m4_t vs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vaadd(vint32m4_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m8_t __riscv_vaadd(vint32m8_t vs2, vint32m8_t vs1, unsigned int vxrm,
size_t vl);
vint32m8_t __riscv_vaadd(vint32m8_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint64m1_t __riscv_vaadd(vint64m1_t vs2, vint64m1_t vs1, unsigned int vxrm,
size_t vl);
vint64m1_t __riscv_vaadd(vint64m1_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m2_t __riscv_vaadd(vint64m2_t vs2, vint64m2_t vs1, unsigned int vxrm,
size_t vl);
vint64m2_t __riscv_vaadd(vint64m2_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m4_t __riscv_vaadd(vint64m4_t vs2, vint64m4_t vs1, unsigned int vxrm,
size_t vl);
vint64m4_t __riscv_vaadd(vint64m4_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m8_t __riscv_vaadd(vint64m8_t vs2, vint64m8_t vs1, unsigned int vxrm,
size_t vl);
vint64m8_t __riscv_vaadd(vint64m8_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint8mf8_t __riscv_vasub(vint8mf8_t vs2, vint8mf8_t vs1, unsigned int vxrm,
size_t vl);
vint8mf8_t __riscv_vasub(vint8mf8_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vasub(vint8mf4_t vs2, vint8mf4_t vs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vasub(vint8mf4_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vasub(vint8mf2_t vs2, vint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vasub(vint8mf2_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vasub(vint8m1_t vs2, vint8m1_t vs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vasub(vint8m1_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vasub(vint8m2_t vs2, vint8m2_t vs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vasub(vint8m2_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vasub(vint8m4_t vs2, vint8m4_t vs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vasub(vint8m4_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m8_t __riscv_vasub(vint8m8_t vs2, vint8m8_t vs1, unsigned int vxrm,
size_t vl);
vint8m8_t __riscv_vasub(vint8m8_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vasub(vint16mf4_t vs2, vint16mf4_t vs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vasub(vint16mf4_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vasub(vint16mf2_t vs2, vint16mf2_t vs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vasub(vint16mf2_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vasub(vint16m1_t vs2, vint16m1_t vs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vasub(vint16m1_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vasub(vint16m2_t vs2, vint16m2_t vs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vasub(vint16m2_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vasub(vint16m4_t vs2, vint16m4_t vs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vasub(vint16m4_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m8_t __riscv_vasub(vint16m8_t vs2, vint16m8_t vs1, unsigned int vxrm,
size_t vl);
vint16m8_t __riscv_vasub(vint16m8_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vasub(vint32mf2_t vs2, vint32mf2_t vs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vasub(vint32mf2_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vasub(vint32m1_t vs2, vint32m1_t vs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vasub(vint32m1_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vasub(vint32m2_t vs2, vint32m2_t vs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vasub(vint32m2_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vasub(vint32m4_t vs2, vint32m4_t vs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vasub(vint32m4_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m8_t __riscv_vasub(vint32m8_t vs2, vint32m8_t vs1, unsigned int vxrm,
size_t vl);
vint32m8_t __riscv_vasub(vint32m8_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint64m1_t __riscv_vasub(vint64m1_t vs2, vint64m1_t vs1, unsigned int vxrm,
size_t vl);
vint64m1_t __riscv_vasub(vint64m1_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m2_t __riscv_vasub(vint64m2_t vs2, vint64m2_t vs1, unsigned int vxrm,
size_t vl);
vint64m2_t __riscv_vasub(vint64m2_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m4_t __riscv_vasub(vint64m4_t vs2, vint64m4_t vs1, unsigned int vxrm,
size_t vl);
vint64m4_t __riscv_vasub(vint64m4_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m8_t __riscv_vasub(vint64m8_t vs2, vint64m8_t vs1, unsigned int vxrm,
size_t vl);
vint64m8_t __riscv_vasub(vint64m8_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf8_t __riscv_vaaddu(vuint8mf8_t vs2, vuint8mf8_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf8_t __riscv_vaaddu(vuint8mf8_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf4_t __riscv_vaaddu(vuint8mf4_t vs2, vuint8mf4_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf4_t __riscv_vaaddu(vuint8mf4_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf2_t __riscv_vaaddu(vuint8mf2_t vs2, vuint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf2_t __riscv_vaaddu(vuint8mf2_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8m1_t __riscv_vaaddu(vuint8m1_t vs2, vuint8m1_t vs1, unsigned int vxrm,
size_t vl);
vuint8m1_t __riscv_vaaddu(vuint8m1_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8m2_t __riscv_vaaddu(vuint8m2_t vs2, vuint8m2_t vs1, unsigned int vxrm,
size_t vl);
vuint8m2_t __riscv_vaaddu(vuint8m2_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8m4_t __riscv_vaaddu(vuint8m4_t vs2, vuint8m4_t vs1, unsigned int vxrm,
size_t vl);
vuint8m4_t __riscv_vaaddu(vuint8m4_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8m8_t __riscv_vaaddu(vuint8m8_t vs2, vuint8m8_t vs1, unsigned int vxrm,
size_t vl);
vuint8m8_t __riscv_vaaddu(vuint8m8_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint16mf4_t __riscv_vaaddu(vuint16mf4_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vaaddu(vuint16mf4_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16mf2_t __riscv_vaaddu(vuint16mf2_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vaaddu(vuint16mf2_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16m1_t __riscv_vaaddu(vuint16m1_t vs2, vuint16m1_t vs1, unsigned int vxrm,
size_t vl);
vuint16m1_t __riscv_vaaddu(vuint16m1_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16m2_t __riscv_vaaddu(vuint16m2_t vs2, vuint16m2_t vs1, unsigned int vxrm,
size_t vl);
vuint16m2_t __riscv_vaaddu(vuint16m2_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16m4_t __riscv_vaaddu(vuint16m4_t vs2, vuint16m4_t vs1, unsigned int vxrm,
size_t vl);
vuint16m4_t __riscv_vaaddu(vuint16m4_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16m8_t __riscv_vaaddu(vuint16m8_t vs2, vuint16m8_t vs1, unsigned int vxrm,
size_t vl);
vuint16m8_t __riscv_vaaddu(vuint16m8_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint32mf2_t __riscv_vaaddu(vuint32mf2_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vaaddu(vuint32mf2_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint32m1_t __riscv_vaaddu(vuint32m1_t vs2, vuint32m1_t vs1, unsigned int vxrm,
size_t vl);
vuint32m1_t __riscv_vaaddu(vuint32m1_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint32m2_t __riscv_vaaddu(vuint32m2_t vs2, vuint32m2_t vs1, unsigned int vxrm,
size_t vl);
vuint32m2_t __riscv_vaaddu(vuint32m2_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint32m4_t __riscv_vaaddu(vuint32m4_t vs2, vuint32m4_t vs1, unsigned int vxrm,
size_t vl);
vuint32m4_t __riscv_vaaddu(vuint32m4_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint32m8_t __riscv_vaaddu(vuint32m8_t vs2, vuint32m8_t vs1, unsigned int vxrm,
size_t vl);
vuint32m8_t __riscv_vaaddu(vuint32m8_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint64m1_t __riscv_vaaddu(vuint64m1_t vs2, vuint64m1_t vs1, unsigned int vxrm,
size_t vl);
vuint64m1_t __riscv_vaaddu(vuint64m1_t vs2, uint64_t rs1, unsigned int vxrm,
size_t vl);
vuint64m2_t __riscv_vaaddu(vuint64m2_t vs2, vuint64m2_t vs1, unsigned int vxrm,
size_t vl);
vuint64m2_t __riscv_vaaddu(vuint64m2_t vs2, uint64_t rs1, unsigned int vxrm,
size_t vl);
vuint64m4_t __riscv_vaaddu(vuint64m4_t vs2, vuint64m4_t vs1, unsigned int vxrm,
size_t vl);
vuint64m4_t __riscv_vaaddu(vuint64m4_t vs2, uint64_t rs1, unsigned int vxrm,
size_t vl);
vuint64m8_t __riscv_vaaddu(vuint64m8_t vs2, vuint64m8_t vs1, unsigned int vxrm,
size_t vl);
vuint64m8_t __riscv_vaaddu(vuint64m8_t vs2, uint64_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf8_t __riscv_vasubu(vuint8mf8_t vs2, vuint8mf8_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf8_t __riscv_vasubu(vuint8mf8_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf4_t __riscv_vasubu(vuint8mf4_t vs2, vuint8mf4_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf4_t __riscv_vasubu(vuint8mf4_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf2_t __riscv_vasubu(vuint8mf2_t vs2, vuint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf2_t __riscv_vasubu(vuint8mf2_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8m1_t __riscv_vasubu(vuint8m1_t vs2, vuint8m1_t vs1, unsigned int vxrm,
size_t vl);
vuint8m1_t __riscv_vasubu(vuint8m1_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8m2_t __riscv_vasubu(vuint8m2_t vs2, vuint8m2_t vs1, unsigned int vxrm,
size_t vl);
vuint8m2_t __riscv_vasubu(vuint8m2_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8m4_t __riscv_vasubu(vuint8m4_t vs2, vuint8m4_t vs1, unsigned int vxrm,
size_t vl);
vuint8m4_t __riscv_vasubu(vuint8m4_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint8m8_t __riscv_vasubu(vuint8m8_t vs2, vuint8m8_t vs1, unsigned int vxrm,
size_t vl);
vuint8m8_t __riscv_vasubu(vuint8m8_t vs2, uint8_t rs1, unsigned int vxrm,
size_t vl);
vuint16mf4_t __riscv_vasubu(vuint16mf4_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vasubu(vuint16mf4_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16mf2_t __riscv_vasubu(vuint16mf2_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vasubu(vuint16mf2_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16m1_t __riscv_vasubu(vuint16m1_t vs2, vuint16m1_t vs1, unsigned int vxrm,
size_t vl);
vuint16m1_t __riscv_vasubu(vuint16m1_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16m2_t __riscv_vasubu(vuint16m2_t vs2, vuint16m2_t vs1, unsigned int vxrm,
size_t vl);
vuint16m2_t __riscv_vasubu(vuint16m2_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16m4_t __riscv_vasubu(vuint16m4_t vs2, vuint16m4_t vs1, unsigned int vxrm,
size_t vl);
vuint16m4_t __riscv_vasubu(vuint16m4_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint16m8_t __riscv_vasubu(vuint16m8_t vs2, vuint16m8_t vs1, unsigned int vxrm,
size_t vl);
vuint16m8_t __riscv_vasubu(vuint16m8_t vs2, uint16_t rs1, unsigned int vxrm,
size_t vl);
vuint32mf2_t __riscv_vasubu(vuint32mf2_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vasubu(vuint32mf2_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint32m1_t __riscv_vasubu(vuint32m1_t vs2, vuint32m1_t vs1, unsigned int vxrm,
size_t vl);
vuint32m1_t __riscv_vasubu(vuint32m1_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint32m2_t __riscv_vasubu(vuint32m2_t vs2, vuint32m2_t vs1, unsigned int vxrm,
size_t vl);
vuint32m2_t __riscv_vasubu(vuint32m2_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint32m4_t __riscv_vasubu(vuint32m4_t vs2, vuint32m4_t vs1, unsigned int vxrm,
size_t vl);
vuint32m4_t __riscv_vasubu(vuint32m4_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint32m8_t __riscv_vasubu(vuint32m8_t vs2, vuint32m8_t vs1, unsigned int vxrm,
size_t vl);
vuint32m8_t __riscv_vasubu(vuint32m8_t vs2, uint32_t rs1, unsigned int vxrm,
size_t vl);
vuint64m1_t __riscv_vasubu(vuint64m1_t vs2, vuint64m1_t vs1, unsigned int vxrm,
size_t vl);
vuint64m1_t __riscv_vasubu(vuint64m1_t vs2, uint64_t rs1, unsigned int vxrm,
size_t vl);
vuint64m2_t __riscv_vasubu(vuint64m2_t vs2, vuint64m2_t vs1, unsigned int vxrm,
size_t vl);
vuint64m2_t __riscv_vasubu(vuint64m2_t vs2, uint64_t rs1, unsigned int vxrm,
size_t vl);
vuint64m4_t __riscv_vasubu(vuint64m4_t vs2, vuint64m4_t vs1, unsigned int vxrm,
size_t vl);
vuint64m4_t __riscv_vasubu(vuint64m4_t vs2, uint64_t rs1, unsigned int vxrm,
size_t vl);
vuint64m8_t __riscv_vasubu(vuint64m8_t vs2, vuint64m8_t vs1, unsigned int vxrm,
size_t vl);
vuint64m8_t __riscv_vasubu(vuint64m8_t vs2, uint64_t rs1, unsigned int vxrm,
size_t vl);
// masked functions
vint8mf8_t __riscv_vaadd(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vaadd(vbool64_t vm, vint8mf8_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vaadd(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vaadd(vbool32_t vm, vint8mf4_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vaadd(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vaadd(vbool16_t vm, vint8mf2_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vaadd(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vaadd(vbool8_t vm, vint8m1_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vaadd(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vaadd(vbool4_t vm, vint8m2_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vaadd(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vaadd(vbool2_t vm, vint8m4_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vaadd(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vaadd(vbool1_t vm, vint8m8_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vaadd(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vaadd(vbool64_t vm, vint16mf4_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vaadd(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vaadd(vbool32_t vm, vint16mf2_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vaadd(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vaadd(vbool16_t vm, vint16m1_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vaadd(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vaadd(vbool8_t vm, vint16m2_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vaadd(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vaadd(vbool4_t vm, vint16m4_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vaadd(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vaadd(vbool2_t vm, vint16m8_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vaadd(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vaadd(vbool64_t vm, vint32mf2_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vaadd(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vaadd(vbool32_t vm, vint32m1_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vaadd(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vaadd(vbool16_t vm, vint32m2_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vaadd(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vaadd(vbool8_t vm, vint32m4_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vaadd(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vaadd(vbool4_t vm, vint32m8_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vaadd(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vaadd(vbool64_t vm, vint64m1_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vaadd(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vaadd(vbool32_t vm, vint64m2_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vaadd(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vaadd(vbool16_t vm, vint64m4_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vaadd(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vaadd(vbool8_t vm, vint64m8_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vasub(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vasub(vbool64_t vm, vint8mf8_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vasub(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vasub(vbool32_t vm, vint8mf4_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vasub(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vasub(vbool16_t vm, vint8mf2_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vasub(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vasub(vbool8_t vm, vint8m1_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vasub(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vasub(vbool4_t vm, vint8m2_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vasub(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vasub(vbool2_t vm, vint8m4_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vasub(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vasub(vbool1_t vm, vint8m8_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vasub(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vasub(vbool64_t vm, vint16mf4_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vasub(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vasub(vbool32_t vm, vint16mf2_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vasub(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vasub(vbool16_t vm, vint16m1_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vasub(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vasub(vbool8_t vm, vint16m2_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vasub(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vasub(vbool4_t vm, vint16m4_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vasub(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vasub(vbool2_t vm, vint16m8_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vasub(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vasub(vbool64_t vm, vint32mf2_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vasub(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vasub(vbool32_t vm, vint32m1_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vasub(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vasub(vbool16_t vm, vint32m2_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vasub(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vasub(vbool8_t vm, vint32m4_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vasub(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vasub(vbool4_t vm, vint32m8_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vasub(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vasub(vbool64_t vm, vint64m1_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vasub(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vasub(vbool32_t vm, vint64m2_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vasub(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vasub(vbool16_t vm, vint64m4_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vasub(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vasub(vbool8_t vm, vint64m8_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vaaddu(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vaaddu(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vaaddu(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vaaddu(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vaaddu(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vaaddu(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vaaddu(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vaaddu(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vaaddu(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vaaddu(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vaaddu(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vaaddu(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vaaddu(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vaaddu(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vaaddu(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vaaddu(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vaaddu(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vaaddu(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vaaddu(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1,
unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vaaddu(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vaaddu(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vaaddu(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vaaddu(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vaaddu(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vaaddu(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1,
unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vaaddu(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vaaddu(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vaaddu(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vaaddu(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1,
unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vaaddu(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vaaddu(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1,
unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vaaddu(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vaaddu(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1,
unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vaaddu(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vaaddu(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1,
unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vaaddu(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vaaddu(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1,
unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vaaddu(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1,
unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vaaddu(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1,
unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vaaddu(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1,
unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vaaddu(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1,
unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vaaddu(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1,
unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vaaddu(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1,
unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vaaddu(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vasubu(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vasubu(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vasubu(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vasubu(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vasubu(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vasubu(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vasubu(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vasubu(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vasubu(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vasubu(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vasubu(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vasubu(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vasubu(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vasubu(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vasubu(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vasubu(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vasubu(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vasubu(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vasubu(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1,
unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vasubu(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vasubu(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vasubu(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vasubu(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vasubu(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vasubu(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1,
unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vasubu(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vasubu(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vasubu(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vasubu(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1,
unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vasubu(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vasubu(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1,
unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vasubu(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vasubu(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1,
unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vasubu(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vasubu(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1,
unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vasubu(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1,
unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vasubu(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1,
unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vasubu(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1,
unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vasubu(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1,
unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vasubu(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1,
unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vasubu(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1,
unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vasubu(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1,
unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vasubu(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1,
unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vasubu(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vsmul(vint8mf8_t vs2, vint8mf8_t vs1, unsigned int vxrm,
size_t vl);
vint8mf8_t __riscv_vsmul(vint8mf8_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vsmul(vint8mf4_t vs2, vint8mf4_t vs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vsmul(vint8mf4_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vsmul(vint8mf2_t vs2, vint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vsmul(vint8mf2_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vsmul(vint8m1_t vs2, vint8m1_t vs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vsmul(vint8m1_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vsmul(vint8m2_t vs2, vint8m2_t vs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vsmul(vint8m2_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vsmul(vint8m4_t vs2, vint8m4_t vs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vsmul(vint8m4_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint8m8_t __riscv_vsmul(vint8m8_t vs2, vint8m8_t vs1, unsigned int vxrm,
size_t vl);
vint8m8_t __riscv_vsmul(vint8m8_t vs2, int8_t rs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vsmul(vint16mf4_t vs2, vint16mf4_t vs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vsmul(vint16mf4_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vsmul(vint16mf2_t vs2, vint16mf2_t vs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vsmul(vint16mf2_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vsmul(vint16m1_t vs2, vint16m1_t vs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vsmul(vint16m1_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vsmul(vint16m2_t vs2, vint16m2_t vs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vsmul(vint16m2_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vsmul(vint16m4_t vs2, vint16m4_t vs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vsmul(vint16m4_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint16m8_t __riscv_vsmul(vint16m8_t vs2, vint16m8_t vs1, unsigned int vxrm,
size_t vl);
vint16m8_t __riscv_vsmul(vint16m8_t vs2, int16_t rs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vsmul(vint32mf2_t vs2, vint32mf2_t vs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vsmul(vint32mf2_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vsmul(vint32m1_t vs2, vint32m1_t vs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vsmul(vint32m1_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vsmul(vint32m2_t vs2, vint32m2_t vs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vsmul(vint32m2_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vsmul(vint32m4_t vs2, vint32m4_t vs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vsmul(vint32m4_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint32m8_t __riscv_vsmul(vint32m8_t vs2, vint32m8_t vs1, unsigned int vxrm,
size_t vl);
vint32m8_t __riscv_vsmul(vint32m8_t vs2, int32_t rs1, unsigned int vxrm,
size_t vl);
vint64m1_t __riscv_vsmul(vint64m1_t vs2, vint64m1_t vs1, unsigned int vxrm,
size_t vl);
vint64m1_t __riscv_vsmul(vint64m1_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m2_t __riscv_vsmul(vint64m2_t vs2, vint64m2_t vs1, unsigned int vxrm,
size_t vl);
vint64m2_t __riscv_vsmul(vint64m2_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m4_t __riscv_vsmul(vint64m4_t vs2, vint64m4_t vs1, unsigned int vxrm,
size_t vl);
vint64m4_t __riscv_vsmul(vint64m4_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
vint64m8_t __riscv_vsmul(vint64m8_t vs2, vint64m8_t vs1, unsigned int vxrm,
size_t vl);
vint64m8_t __riscv_vsmul(vint64m8_t vs2, int64_t rs1, unsigned int vxrm,
size_t vl);
// masked functions
vint8mf8_t __riscv_vsmul(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vsmul(vbool64_t vm, vint8mf8_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vsmul(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vsmul(vbool32_t vm, vint8mf4_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vsmul(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vsmul(vbool16_t vm, vint8mf2_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vsmul(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vsmul(vbool8_t vm, vint8m1_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vsmul(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vsmul(vbool4_t vm, vint8m2_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vsmul(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vsmul(vbool2_t vm, vint8m4_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vsmul(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vsmul(vbool1_t vm, vint8m8_t vs2, int8_t rs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vsmul(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vsmul(vbool64_t vm, vint16mf4_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vsmul(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vsmul(vbool32_t vm, vint16mf2_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vsmul(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vsmul(vbool16_t vm, vint16m1_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vsmul(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vsmul(vbool8_t vm, vint16m2_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vsmul(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vsmul(vbool4_t vm, vint16m4_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vsmul(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vsmul(vbool2_t vm, vint16m8_t vs2, int16_t rs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vsmul(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vsmul(vbool64_t vm, vint32mf2_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vsmul(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vsmul(vbool32_t vm, vint32m1_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vsmul(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vsmul(vbool16_t vm, vint32m2_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vsmul(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vsmul(vbool8_t vm, vint32m4_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vsmul(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vsmul(vbool4_t vm, vint32m8_t vs2, int32_t rs1,
unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vsmul(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vsmul(vbool64_t vm, vint64m1_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vsmul(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vsmul(vbool32_t vm, vint64m2_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vsmul(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vsmul(vbool16_t vm, vint64m4_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vsmul(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vsmul(vbool8_t vm, vint64m8_t vs2, int64_t rs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vssra(vint8mf8_t vs2, vuint8mf8_t vs1, unsigned int vxrm,
size_t vl);
vint8mf8_t __riscv_vssra(vint8mf8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vssra(vint8mf4_t vs2, vuint8mf4_t vs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vssra(vint8mf4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vssra(vint8mf2_t vs2, vuint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vssra(vint8mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vssra(vint8m1_t vs2, vuint8m1_t vs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vssra(vint8m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vssra(vint8m2_t vs2, vuint8m2_t vs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vssra(vint8m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vssra(vint8m4_t vs2, vuint8m4_t vs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vssra(vint8m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8m8_t __riscv_vssra(vint8m8_t vs2, vuint8m8_t vs1, unsigned int vxrm,
size_t vl);
vint8m8_t __riscv_vssra(vint8m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vssra(vint16mf4_t vs2, vuint16mf4_t vs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vssra(vint16mf4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vssra(vint16mf2_t vs2, vuint16mf2_t vs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vssra(vint16mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vssra(vint16m1_t vs2, vuint16m1_t vs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vssra(vint16m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vssra(vint16m2_t vs2, vuint16m2_t vs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vssra(vint16m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vssra(vint16m4_t vs2, vuint16m4_t vs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vssra(vint16m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16m8_t __riscv_vssra(vint16m8_t vs2, vuint16m8_t vs1, unsigned int vxrm,
size_t vl);
vint16m8_t __riscv_vssra(vint16m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vssra(vint32mf2_t vs2, vuint32mf2_t vs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vssra(vint32mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vssra(vint32m1_t vs2, vuint32m1_t vs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vssra(vint32m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vssra(vint32m2_t vs2, vuint32m2_t vs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vssra(vint32m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vssra(vint32m4_t vs2, vuint32m4_t vs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vssra(vint32m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32m8_t __riscv_vssra(vint32m8_t vs2, vuint32m8_t vs1, unsigned int vxrm,
size_t vl);
vint32m8_t __riscv_vssra(vint32m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint64m1_t __riscv_vssra(vint64m1_t vs2, vuint64m1_t vs1, unsigned int vxrm,
size_t vl);
vint64m1_t __riscv_vssra(vint64m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint64m2_t __riscv_vssra(vint64m2_t vs2, vuint64m2_t vs1, unsigned int vxrm,
size_t vl);
vint64m2_t __riscv_vssra(vint64m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint64m4_t __riscv_vssra(vint64m4_t vs2, vuint64m4_t vs1, unsigned int vxrm,
size_t vl);
vint64m4_t __riscv_vssra(vint64m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint64m8_t __riscv_vssra(vint64m8_t vs2, vuint64m8_t vs1, unsigned int vxrm,
size_t vl);
vint64m8_t __riscv_vssra(vint64m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf8_t __riscv_vssrl(vuint8mf8_t vs2, vuint8mf8_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf8_t __riscv_vssrl(vuint8mf8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf4_t __riscv_vssrl(vuint8mf4_t vs2, vuint8mf4_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf4_t __riscv_vssrl(vuint8mf4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf2_t __riscv_vssrl(vuint8mf2_t vs2, vuint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf2_t __riscv_vssrl(vuint8mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8m1_t __riscv_vssrl(vuint8m1_t vs2, vuint8m1_t vs1, unsigned int vxrm,
size_t vl);
vuint8m1_t __riscv_vssrl(vuint8m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8m2_t __riscv_vssrl(vuint8m2_t vs2, vuint8m2_t vs1, unsigned int vxrm,
size_t vl);
vuint8m2_t __riscv_vssrl(vuint8m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8m4_t __riscv_vssrl(vuint8m4_t vs2, vuint8m4_t vs1, unsigned int vxrm,
size_t vl);
vuint8m4_t __riscv_vssrl(vuint8m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8m8_t __riscv_vssrl(vuint8m8_t vs2, vuint8m8_t vs1, unsigned int vxrm,
size_t vl);
vuint8m8_t __riscv_vssrl(vuint8m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16mf4_t __riscv_vssrl(vuint16mf4_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vssrl(vuint16mf4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16mf2_t __riscv_vssrl(vuint16mf2_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vssrl(vuint16mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16m1_t __riscv_vssrl(vuint16m1_t vs2, vuint16m1_t vs1, unsigned int vxrm,
size_t vl);
vuint16m1_t __riscv_vssrl(vuint16m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16m2_t __riscv_vssrl(vuint16m2_t vs2, vuint16m2_t vs1, unsigned int vxrm,
size_t vl);
vuint16m2_t __riscv_vssrl(vuint16m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16m4_t __riscv_vssrl(vuint16m4_t vs2, vuint16m4_t vs1, unsigned int vxrm,
size_t vl);
vuint16m4_t __riscv_vssrl(vuint16m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16m8_t __riscv_vssrl(vuint16m8_t vs2, vuint16m8_t vs1, unsigned int vxrm,
size_t vl);
vuint16m8_t __riscv_vssrl(vuint16m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32mf2_t __riscv_vssrl(vuint32mf2_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vssrl(vuint32mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32m1_t __riscv_vssrl(vuint32m1_t vs2, vuint32m1_t vs1, unsigned int vxrm,
size_t vl);
vuint32m1_t __riscv_vssrl(vuint32m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32m2_t __riscv_vssrl(vuint32m2_t vs2, vuint32m2_t vs1, unsigned int vxrm,
size_t vl);
vuint32m2_t __riscv_vssrl(vuint32m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32m4_t __riscv_vssrl(vuint32m4_t vs2, vuint32m4_t vs1, unsigned int vxrm,
size_t vl);
vuint32m4_t __riscv_vssrl(vuint32m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32m8_t __riscv_vssrl(vuint32m8_t vs2, vuint32m8_t vs1, unsigned int vxrm,
size_t vl);
vuint32m8_t __riscv_vssrl(vuint32m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint64m1_t __riscv_vssrl(vuint64m1_t vs2, vuint64m1_t vs1, unsigned int vxrm,
size_t vl);
vuint64m1_t __riscv_vssrl(vuint64m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint64m2_t __riscv_vssrl(vuint64m2_t vs2, vuint64m2_t vs1, unsigned int vxrm,
size_t vl);
vuint64m2_t __riscv_vssrl(vuint64m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint64m4_t __riscv_vssrl(vuint64m4_t vs2, vuint64m4_t vs1, unsigned int vxrm,
size_t vl);
vuint64m4_t __riscv_vssrl(vuint64m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint64m8_t __riscv_vssrl(vuint64m8_t vs2, vuint64m8_t vs1, unsigned int vxrm,
size_t vl);
vuint64m8_t __riscv_vssrl(vuint64m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
// masked functions
vint8mf8_t __riscv_vssra(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vssra(vbool64_t vm, vint8mf8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vssra(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vssra(vbool32_t vm, vint8mf4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vssra(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vssra(vbool16_t vm, vint8mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vssra(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vssra(vbool8_t vm, vint8m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vssra(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vssra(vbool4_t vm, vint8m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vssra(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vssra(vbool2_t vm, vint8m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vssra(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1,
unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vssra(vbool1_t vm, vint8m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vssra(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vssra(vbool64_t vm, vint16mf4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vssra(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vssra(vbool32_t vm, vint16mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vssra(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vssra(vbool16_t vm, vint16m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vssra(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vssra(vbool8_t vm, vint16m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vssra(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vssra(vbool4_t vm, vint16m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vssra(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1,
unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vssra(vbool2_t vm, vint16m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vssra(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vssra(vbool64_t vm, vint32mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vssra(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vssra(vbool32_t vm, vint32m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vssra(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vssra(vbool16_t vm, vint32m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vssra(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vssra(vbool8_t vm, vint32m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vssra(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1,
unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vssra(vbool4_t vm, vint32m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vssra(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1,
unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vssra(vbool64_t vm, vint64m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vssra(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1,
unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vssra(vbool32_t vm, vint64m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vssra(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1,
unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vssra(vbool16_t vm, vint64m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vssra(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1,
unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vssra(vbool8_t vm, vint64m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vssrl(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vssrl(vbool64_t vm, vuint8mf8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vssrl(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vssrl(vbool32_t vm, vuint8mf4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vssrl(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vssrl(vbool16_t vm, vuint8mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vssrl(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vssrl(vbool8_t vm, vuint8m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vssrl(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vssrl(vbool4_t vm, vuint8m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vssrl(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vssrl(vbool2_t vm, vuint8m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vssrl(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vssrl(vbool1_t vm, vuint8m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vssrl(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vssrl(vbool64_t vm, vuint16mf4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vssrl(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vssrl(vbool32_t vm, vuint16mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vssrl(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1,
unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vssrl(vbool16_t vm, vuint16m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vssrl(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vssrl(vbool8_t vm, vuint16m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vssrl(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vssrl(vbool4_t vm, vuint16m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vssrl(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1,
unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vssrl(vbool2_t vm, vuint16m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vssrl(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vssrl(vbool64_t vm, vuint32mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vssrl(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1,
unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vssrl(vbool32_t vm, vuint32m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vssrl(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1,
unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vssrl(vbool16_t vm, vuint32m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vssrl(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1,
unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vssrl(vbool8_t vm, vuint32m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vssrl(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1,
unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vssrl(vbool4_t vm, vuint32m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vssrl(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1,
unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vssrl(vbool64_t vm, vuint64m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vssrl(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1,
unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vssrl(vbool32_t vm, vuint64m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vssrl(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1,
unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vssrl(vbool16_t vm, vuint64m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vssrl(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1,
unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vssrl(vbool8_t vm, vuint64m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vnclip(vint16mf4_t vs2, vuint8mf8_t vs1, unsigned int vxrm,
size_t vl);
vint8mf8_t __riscv_vnclip(vint16mf4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vnclip(vint16mf2_t vs2, vuint8mf4_t vs1, unsigned int vxrm,
size_t vl);
vint8mf4_t __riscv_vnclip(vint16mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vnclip(vint16m1_t vs2, vuint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vint8mf2_t __riscv_vnclip(vint16m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vnclip(vint16m2_t vs2, vuint8m1_t vs1, unsigned int vxrm,
size_t vl);
vint8m1_t __riscv_vnclip(vint16m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vnclip(vint16m4_t vs2, vuint8m2_t vs1, unsigned int vxrm,
size_t vl);
vint8m2_t __riscv_vnclip(vint16m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vnclip(vint16m8_t vs2, vuint8m4_t vs1, unsigned int vxrm,
size_t vl);
vint8m4_t __riscv_vnclip(vint16m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vnclip(vint32mf2_t vs2, vuint16mf4_t vs1, unsigned int vxrm,
size_t vl);
vint16mf4_t __riscv_vnclip(vint32mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vnclip(vint32m1_t vs2, vuint16mf2_t vs1, unsigned int vxrm,
size_t vl);
vint16mf2_t __riscv_vnclip(vint32m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vnclip(vint32m2_t vs2, vuint16m1_t vs1, unsigned int vxrm,
size_t vl);
vint16m1_t __riscv_vnclip(vint32m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vnclip(vint32m4_t vs2, vuint16m2_t vs1, unsigned int vxrm,
size_t vl);
vint16m2_t __riscv_vnclip(vint32m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vnclip(vint32m8_t vs2, vuint16m4_t vs1, unsigned int vxrm,
size_t vl);
vint16m4_t __riscv_vnclip(vint32m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vnclip(vint64m1_t vs2, vuint32mf2_t vs1, unsigned int vxrm,
size_t vl);
vint32mf2_t __riscv_vnclip(vint64m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vnclip(vint64m2_t vs2, vuint32m1_t vs1, unsigned int vxrm,
size_t vl);
vint32m1_t __riscv_vnclip(vint64m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vnclip(vint64m4_t vs2, vuint32m2_t vs1, unsigned int vxrm,
size_t vl);
vint32m2_t __riscv_vnclip(vint64m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vnclip(vint64m8_t vs2, vuint32m4_t vs1, unsigned int vxrm,
size_t vl);
vint32m4_t __riscv_vnclip(vint64m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf8_t __riscv_vnclipu(vuint16mf4_t vs2, vuint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vnclipu(vuint16mf4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf4_t __riscv_vnclipu(vuint16mf2_t vs2, vuint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vnclipu(vuint16mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8mf2_t __riscv_vnclipu(vuint16m1_t vs2, vuint8mf2_t vs1, unsigned int vxrm,
size_t vl);
vuint8mf2_t __riscv_vnclipu(vuint16m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8m1_t __riscv_vnclipu(vuint16m2_t vs2, vuint8m1_t vs1, unsigned int vxrm,
size_t vl);
vuint8m1_t __riscv_vnclipu(vuint16m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8m2_t __riscv_vnclipu(vuint16m4_t vs2, vuint8m2_t vs1, unsigned int vxrm,
size_t vl);
vuint8m2_t __riscv_vnclipu(vuint16m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint8m4_t __riscv_vnclipu(vuint16m8_t vs2, vuint8m4_t vs1, unsigned int vxrm,
size_t vl);
vuint8m4_t __riscv_vnclipu(vuint16m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16mf4_t __riscv_vnclipu(vuint32mf2_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vnclipu(vuint32mf2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16mf2_t __riscv_vnclipu(vuint32m1_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vnclipu(vuint32m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16m1_t __riscv_vnclipu(vuint32m2_t vs2, vuint16m1_t vs1, unsigned int vxrm,
size_t vl);
vuint16m1_t __riscv_vnclipu(vuint32m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16m2_t __riscv_vnclipu(vuint32m4_t vs2, vuint16m2_t vs1, unsigned int vxrm,
size_t vl);
vuint16m2_t __riscv_vnclipu(vuint32m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint16m4_t __riscv_vnclipu(vuint32m8_t vs2, vuint16m4_t vs1, unsigned int vxrm,
size_t vl);
vuint16m4_t __riscv_vnclipu(vuint32m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32mf2_t __riscv_vnclipu(vuint64m1_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vnclipu(vuint64m1_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32m1_t __riscv_vnclipu(vuint64m2_t vs2, vuint32m1_t vs1, unsigned int vxrm,
size_t vl);
vuint32m1_t __riscv_vnclipu(vuint64m2_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32m2_t __riscv_vnclipu(vuint64m4_t vs2, vuint32m2_t vs1, unsigned int vxrm,
size_t vl);
vuint32m2_t __riscv_vnclipu(vuint64m4_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
vuint32m4_t __riscv_vnclipu(vuint64m8_t vs2, vuint32m4_t vs1, unsigned int vxrm,
size_t vl);
vuint32m4_t __riscv_vnclipu(vuint64m8_t vs2, size_t rs1, unsigned int vxrm,
size_t vl);
// masked functions
vint8mf8_t __riscv_vnclip(vbool64_t vm, vint16mf4_t vs2, vuint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vnclip(vbool64_t vm, vint16mf4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vnclip(vbool32_t vm, vint16mf2_t vs2, vuint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vnclip(vbool32_t vm, vint16mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vnclip(vbool16_t vm, vint16m1_t vs2, vuint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vnclip(vbool16_t vm, vint16m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vnclip(vbool8_t vm, vint16m2_t vs2, vuint8m1_t vs1,
unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vnclip(vbool8_t vm, vint16m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vnclip(vbool4_t vm, vint16m4_t vs2, vuint8m2_t vs1,
unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vnclip(vbool4_t vm, vint16m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vnclip(vbool2_t vm, vint16m8_t vs2, vuint8m4_t vs1,
unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vnclip(vbool2_t vm, vint16m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vnclip(vbool64_t vm, vint32mf2_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vnclip(vbool64_t vm, vint32mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vnclip(vbool32_t vm, vint32m1_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vnclip(vbool32_t vm, vint32m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vnclip(vbool16_t vm, vint32m2_t vs2, vuint16m1_t vs1,
unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vnclip(vbool16_t vm, vint32m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vnclip(vbool8_t vm, vint32m4_t vs2, vuint16m2_t vs1,
unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vnclip(vbool8_t vm, vint32m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vnclip(vbool4_t vm, vint32m8_t vs2, vuint16m4_t vs1,
unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vnclip(vbool4_t vm, vint32m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vnclip(vbool64_t vm, vint64m1_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vnclip(vbool64_t vm, vint64m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vnclip(vbool32_t vm, vint64m2_t vs2, vuint32m1_t vs1,
unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vnclip(vbool32_t vm, vint64m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vnclip(vbool16_t vm, vint64m4_t vs2, vuint32m2_t vs1,
unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vnclip(vbool16_t vm, vint64m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vnclip(vbool8_t vm, vint64m8_t vs2, vuint32m4_t vs1,
unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vnclip(vbool8_t vm, vint64m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vnclipu(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vnclipu(vbool64_t vm, vuint16mf4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vnclipu(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vnclipu(vbool32_t vm, vuint16mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vnclipu(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vnclipu(vbool16_t vm, vuint16m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vnclipu(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1,
unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vnclipu(vbool8_t vm, vuint16m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vnclipu(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1,
unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vnclipu(vbool4_t vm, vuint16m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vnclipu(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1,
unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vnclipu(vbool2_t vm, vuint16m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vnclipu(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vnclipu(vbool64_t vm, vuint32mf2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vnclipu(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vnclipu(vbool32_t vm, vuint32m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vnclipu(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1,
unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vnclipu(vbool16_t vm, vuint32m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vnclipu(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1,
unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vnclipu(vbool8_t vm, vuint32m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vnclipu(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1,
unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vnclipu(vbool4_t vm, vuint32m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vnclipu(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1,
unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vnclipu(vbool64_t vm, vuint64m1_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vnclipu(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1,
unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vnclipu(vbool32_t vm, vuint64m2_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vnclipu(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1,
unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vnclipu(vbool16_t vm, vuint64m4_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vnclipu(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1,
unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vnclipu(vbool8_t vm, vuint64m8_t vs2, size_t rs1,
unsigned int vxrm, size_t vl);