@@ -115,6 +115,8 @@ class octuple_to_str<int octuple> {
115
115
"NoDef")))))));
116
116
}
117
117
118
+ def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT GPR:$vl)))>;
119
+
118
120
// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
119
121
def VLMax : OutPatFrag<(ops), (XLenVT X0)>;
120
122
@@ -2132,7 +2134,7 @@ class VPatUnaryNoMask<string intrinsic_name,
2132
2134
VReg op2_reg_class> :
2133
2135
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2134
2136
(op2_type op2_reg_class:$rs2),
2135
- (XLenVT (VLOp GPR:$vl)) )),
2137
+ VLOpFrag )),
2136
2138
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2137
2139
(op2_type op2_reg_class:$rs2),
2138
2140
GPR:$vl, sew)>;
@@ -2151,7 +2153,7 @@ class VPatUnaryMask<string intrinsic_name,
2151
2153
(result_type result_reg_class:$merge),
2152
2154
(op2_type op2_reg_class:$rs2),
2153
2155
(mask_type V0),
2154
- (XLenVT (VLOp GPR:$vl)) )),
2156
+ VLOpFrag )),
2155
2157
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
2156
2158
(result_type result_reg_class:$merge),
2157
2159
(op2_type op2_reg_class:$rs2),
@@ -2162,7 +2164,7 @@ class VPatMaskUnaryNoMask<string intrinsic_name,
2162
2164
MTypeInfo mti> :
2163
2165
Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
2164
2166
(mti.Mask VR:$rs2),
2165
- (XLenVT (VLOp GPR:$vl)) )),
2167
+ VLOpFrag )),
2166
2168
(!cast<Instruction>(inst#"_M_"#mti.BX)
2167
2169
(mti.Mask VR:$rs2),
2168
2170
GPR:$vl, mti.SEW)>;
@@ -2174,7 +2176,7 @@ class VPatMaskUnaryMask<string intrinsic_name,
2174
2176
(mti.Mask VR:$merge),
2175
2177
(mti.Mask VR:$rs2),
2176
2178
(mti.Mask V0),
2177
- (XLenVT (VLOp GPR:$vl)) )),
2179
+ VLOpFrag )),
2178
2180
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
2179
2181
(mti.Mask VR:$merge),
2180
2182
(mti.Mask VR:$rs2),
@@ -2194,7 +2196,7 @@ class VPatUnaryAnyMask<string intrinsic,
2194
2196
(result_type result_reg_class:$merge),
2195
2197
(op1_type op1_reg_class:$rs1),
2196
2198
(mask_type VR:$rs2),
2197
- (XLenVT (VLOp GPR:$vl)) )),
2199
+ VLOpFrag )),
2198
2200
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2199
2201
(result_type result_reg_class:$merge),
2200
2202
(op1_type op1_reg_class:$rs1),
@@ -2212,7 +2214,7 @@ class VPatBinaryNoMask<string intrinsic_name,
2212
2214
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2213
2215
(op1_type op1_reg_class:$rs1),
2214
2216
(op2_type op2_kind:$rs2),
2215
- (XLenVT (VLOp GPR:$vl)) )),
2217
+ VLOpFrag )),
2216
2218
(!cast<Instruction>(inst)
2217
2219
(op1_type op1_reg_class:$rs1),
2218
2220
(op2_type op2_kind:$rs2),
@@ -2233,7 +2235,7 @@ class VPatBinaryMask<string intrinsic_name,
2233
2235
(op1_type op1_reg_class:$rs1),
2234
2236
(op2_type op2_kind:$rs2),
2235
2237
(mask_type V0),
2236
- (XLenVT (VLOp GPR:$vl)) )),
2238
+ VLOpFrag )),
2237
2239
(!cast<Instruction>(inst#"_MASK")
2238
2240
(result_type result_reg_class:$merge),
2239
2241
(op1_type op1_reg_class:$rs1),
@@ -2256,7 +2258,7 @@ class VPatTernaryNoMask<string intrinsic,
2256
2258
(result_type result_reg_class:$rs3),
2257
2259
(op1_type op1_reg_class:$rs1),
2258
2260
(op2_type op2_kind:$rs2),
2259
- (XLenVT (VLOp GPR:$vl)) )),
2261
+ VLOpFrag )),
2260
2262
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2261
2263
result_reg_class:$rs3,
2262
2264
(op1_type op1_reg_class:$rs1),
@@ -2280,7 +2282,7 @@ class VPatTernaryMask<string intrinsic,
2280
2282
(op1_type op1_reg_class:$rs1),
2281
2283
(op2_type op2_kind:$rs2),
2282
2284
(mask_type V0),
2283
- (XLenVT (VLOp GPR:$vl)) )),
2285
+ VLOpFrag )),
2284
2286
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
2285
2287
result_reg_class:$rs3,
2286
2288
(op1_type op1_reg_class:$rs1),
@@ -2300,7 +2302,7 @@ class VPatAMOWDNoMask<string intrinsic_name,
2300
2302
GPR:$rs1,
2301
2303
(op1_type op1_reg_class:$vs2),
2302
2304
(result_type vlmul.vrclass:$vd),
2303
- (XLenVT (VLOp GPR:$vl)) )),
2305
+ VLOpFrag )),
2304
2306
(!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX)
2305
2307
$rs1, $vs2, $vd,
2306
2308
GPR:$vl, sew)>;
@@ -2319,7 +2321,7 @@ class VPatAMOWDMask<string intrinsic_name,
2319
2321
(op1_type op1_reg_class:$vs2),
2320
2322
(result_type vlmul.vrclass:$vd),
2321
2323
(mask_type V0),
2322
- (XLenVT (VLOp GPR:$vl)) )),
2324
+ VLOpFrag )),
2323
2325
(!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX # "_MASK")
2324
2326
$rs1, $vs2, $vd,
2325
2327
(mask_type V0), GPR:$vl, sew)>;
@@ -2329,11 +2331,11 @@ multiclass VPatUnaryS_M<string intrinsic_name,
2329
2331
{
2330
2332
foreach mti = AllMasks in {
2331
2333
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
2332
- (mti.Mask VR:$rs1), (XLenVT (VLOp GPR:$vl)) )),
2334
+ (mti.Mask VR:$rs1), VLOpFrag )),
2333
2335
(!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
2334
2336
GPR:$vl, mti.SEW)>;
2335
2337
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
2336
- (mti.Mask VR:$rs1), (mti.Mask V0), (XLenVT (VLOp GPR:$vl)) )),
2338
+ (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag )),
2337
2339
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
2338
2340
(mti.Mask V0), GPR:$vl, mti.SEW)>;
2339
2341
}
@@ -2400,12 +2402,12 @@ multiclass VPatNullaryV<string intrinsic, string instruction>
2400
2402
{
2401
2403
foreach vti = AllIntegerVectors in {
2402
2404
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
2403
- (XLenVT (VLOp GPR:$vl)) )),
2405
+ VLOpFrag )),
2404
2406
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
2405
2407
GPR:$vl, vti.SEW)>;
2406
2408
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
2407
2409
(vti.Vector vti.RegClass:$merge),
2408
- (vti.Mask V0), (XLenVT (VLOp GPR:$vl)) )),
2410
+ (vti.Mask V0), VLOpFrag )),
2409
2411
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
2410
2412
vti.RegClass:$merge, (vti.Mask V0),
2411
2413
GPR:$vl, vti.SEW)>;
@@ -2415,7 +2417,7 @@ multiclass VPatNullaryV<string intrinsic, string instruction>
2415
2417
multiclass VPatNullaryM<string intrinsic, string inst> {
2416
2418
foreach mti = AllMasks in
2417
2419
def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
2418
- (XLenVT (VLOp GPR:$vl)))),
2420
+ (XLenVT (VLOp (XLenVT (XLenVT GPR:$vl)) )))),
2419
2421
(!cast<Instruction>(inst#"_M_"#mti.BX)
2420
2422
GPR:$vl, mti.SEW)>;
2421
2423
}
@@ -2454,7 +2456,7 @@ multiclass VPatBinaryCarryIn<string intrinsic,
2454
2456
(op1_type op1_reg_class:$rs1),
2455
2457
(op2_type op2_kind:$rs2),
2456
2458
(mask_type V0),
2457
- (XLenVT (VLOp GPR:$vl)) )),
2459
+ VLOpFrag )),
2458
2460
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2459
2461
(op1_type op1_reg_class:$rs1),
2460
2462
(op2_type op2_kind:$rs2),
@@ -2475,7 +2477,7 @@ multiclass VPatBinaryMaskOut<string intrinsic,
2475
2477
def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
2476
2478
(op1_type op1_reg_class:$rs1),
2477
2479
(op2_type op2_kind:$rs2),
2478
- (XLenVT (VLOp GPR:$vl)) )),
2480
+ VLOpFrag )),
2479
2481
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2480
2482
(op1_type op1_reg_class:$rs1),
2481
2483
(op2_type op2_kind:$rs2),
@@ -3248,7 +3250,7 @@ foreach vti = AllIntegerVectors in {
3248
3250
// consistency.
3249
3251
def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$rs2),
3250
3252
(vti.Vector vti.RegClass:$rs1),
3251
- (XLenVT (VLOp GPR:$vl)) )),
3253
+ VLOpFrag )),
3252
3254
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
3253
3255
vti.RegClass:$rs2,
3254
3256
GPR:$vl,
@@ -3257,7 +3259,7 @@ foreach vti = AllIntegerVectors in {
3257
3259
(vti.Vector vti.RegClass:$rs2),
3258
3260
(vti.Vector vti.RegClass:$rs1),
3259
3261
(vti.Mask V0),
3260
- (XLenVT (VLOp GPR:$vl)) )),
3262
+ VLOpFrag )),
3261
3263
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
3262
3264
vti.RegClass:$merge,
3263
3265
vti.RegClass:$rs1,
@@ -3269,7 +3271,7 @@ foreach vti = AllIntegerVectors in {
3269
3271
// Match VSUB with a small immediate to vadd.vi by negating the immediate.
3270
3272
def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
3271
3273
(vti.Scalar simm5_plus1:$rs2),
3272
- (XLenVT (VLOp GPR:$vl)) )),
3274
+ VLOpFrag )),
3273
3275
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
3274
3276
(NegImm simm5_plus1:$rs2),
3275
3277
GPR:$vl,
@@ -3278,7 +3280,7 @@ foreach vti = AllIntegerVectors in {
3278
3280
(vti.Vector vti.RegClass:$rs1),
3279
3281
(vti.Scalar simm5_plus1:$rs2),
3280
3282
(vti.Mask V0),
3281
- (XLenVT (VLOp GPR:$vl)) )),
3283
+ VLOpFrag )),
3282
3284
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
3283
3285
vti.RegClass:$merge,
3284
3286
vti.RegClass:$rs1,
@@ -3885,7 +3887,7 @@ defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
3885
3887
foreach vti = AllIntegerVectors in {
3886
3888
def : Pat<(vti.Mask (int_riscv_vmsgt (vti.Vector vti.RegClass:$rs2),
3887
3889
(vti.Vector vti.RegClass:$rs1),
3888
- (XLenVT (VLOp GPR:$vl)) )),
3890
+ VLOpFrag )),
3889
3891
(!cast<Instruction>("PseudoVMSLT_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
3890
3892
vti.RegClass:$rs2,
3891
3893
GPR:$vl,
@@ -3894,7 +3896,7 @@ foreach vti = AllIntegerVectors in {
3894
3896
(vti.Vector vti.RegClass:$rs2),
3895
3897
(vti.Vector vti.RegClass:$rs1),
3896
3898
(vti.Mask V0),
3897
- (XLenVT (VLOp GPR:$vl)) )),
3899
+ VLOpFrag )),
3898
3900
(!cast<Instruction>("PseudoVMSLT_VV_"#vti.LMul.MX#"_MASK")
3899
3901
VR:$merge,
3900
3902
vti.RegClass:$rs1,
@@ -3905,7 +3907,7 @@ foreach vti = AllIntegerVectors in {
3905
3907
3906
3908
def : Pat<(vti.Mask (int_riscv_vmsgtu (vti.Vector vti.RegClass:$rs2),
3907
3909
(vti.Vector vti.RegClass:$rs1),
3908
- (XLenVT (VLOp GPR:$vl)) )),
3910
+ VLOpFrag )),
3909
3911
(!cast<Instruction>("PseudoVMSLTU_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
3910
3912
vti.RegClass:$rs2,
3911
3913
GPR:$vl,
@@ -3914,7 +3916,7 @@ foreach vti = AllIntegerVectors in {
3914
3916
(vti.Vector vti.RegClass:$rs2),
3915
3917
(vti.Vector vti.RegClass:$rs1),
3916
3918
(vti.Mask V0),
3917
- (XLenVT (VLOp GPR:$vl)) )),
3919
+ VLOpFrag )),
3918
3920
(!cast<Instruction>("PseudoVMSLTU_VV_"#vti.LMul.MX#"_MASK")
3919
3921
VR:$merge,
3920
3922
vti.RegClass:$rs1,
@@ -3931,7 +3933,7 @@ foreach vti = AllIntegerVectors in {
3931
3933
foreach vti = AllIntegerVectors in {
3932
3934
def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1),
3933
3935
(vti.Scalar simm5_plus1:$rs2),
3934
- (XLenVT (VLOp GPR:$vl)) )),
3936
+ VLOpFrag )),
3935
3937
(!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
3936
3938
(DecImm simm5_plus1:$rs2),
3937
3939
GPR:$vl,
@@ -3940,7 +3942,7 @@ foreach vti = AllIntegerVectors in {
3940
3942
(vti.Vector vti.RegClass:$rs1),
3941
3943
(vti.Scalar simm5_plus1:$rs2),
3942
3944
(vti.Mask V0),
3943
- (XLenVT (VLOp GPR:$vl)) )),
3945
+ VLOpFrag )),
3944
3946
(!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK")
3945
3947
VR:$merge,
3946
3948
vti.RegClass:$rs1,
@@ -3951,7 +3953,7 @@ foreach vti = AllIntegerVectors in {
3951
3953
3952
3954
def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
3953
3955
(vti.Scalar simm5_plus1:$rs2),
3954
- (XLenVT (VLOp GPR:$vl)) )),
3956
+ VLOpFrag )),
3955
3957
(!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
3956
3958
(DecImm simm5_plus1:$rs2),
3957
3959
GPR:$vl,
@@ -3960,7 +3962,7 @@ foreach vti = AllIntegerVectors in {
3960
3962
(vti.Vector vti.RegClass:$rs1),
3961
3963
(vti.Scalar simm5_plus1:$rs2),
3962
3964
(vti.Mask V0),
3963
- (XLenVT (VLOp GPR:$vl)) )),
3965
+ VLOpFrag )),
3964
3966
(!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK")
3965
3967
VR:$merge,
3966
3968
vti.RegClass:$rs1,
@@ -3972,7 +3974,7 @@ foreach vti = AllIntegerVectors in {
3972
3974
// Special cases to avoid matching vmsltu.vi 0 (always false) to
3973
3975
// vmsleu.vi -1 (always true). Instead match to vmsne.vv.
3974
3976
def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
3975
- (vti.Scalar 0), (XLenVT (VLOp GPR:$vl)) )),
3977
+ (vti.Scalar 0), VLOpFrag )),
3976
3978
(!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
3977
3979
vti.RegClass:$rs1,
3978
3980
GPR:$vl,
@@ -3981,7 +3983,7 @@ foreach vti = AllIntegerVectors in {
3981
3983
(vti.Vector vti.RegClass:$rs1),
3982
3984
(vti.Scalar 0),
3983
3985
(vti.Mask V0),
3984
- (XLenVT (VLOp GPR:$vl)) )),
3986
+ VLOpFrag )),
3985
3987
(!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK")
3986
3988
VR:$merge,
3987
3989
vti.RegClass:$rs1,
@@ -4048,7 +4050,7 @@ defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
4048
4050
//===----------------------------------------------------------------------===//
4049
4051
foreach vti = AllVectors in {
4050
4052
def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1),
4051
- (XLenVT (VLOp GPR:$vl)) )),
4053
+ VLOpFrag )),
4052
4054
(!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
4053
4055
$rs1, GPR:$vl, vti.SEW)>;
4054
4056
@@ -4196,7 +4198,7 @@ foreach fvti = AllFloatVectors in {
4196
4198
defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
4197
4199
def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2),
4198
4200
(fvti.Scalar (fpimm0)),
4199
- (fvti.Mask V0), (XLenVT (VLOp GPR:$vl)) )),
4201
+ (fvti.Mask V0), VLOpFrag )),
4200
4202
(instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.SEW)>;
4201
4203
}
4202
4204
@@ -4357,7 +4359,7 @@ foreach fvti = AllFloatVectors in {
4357
4359
(instr $rs2, fvti.SEW)>;
4358
4360
4359
4361
def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
4360
- (fvti.Scalar fvti.ScalarRegClass:$rs2), (XLenVT (VLOp GPR:$vl)) )),
4362
+ (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag )),
4361
4363
(!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" #
4362
4364
fvti.LMul.MX)
4363
4365
(fvti.Vector $rs1),
0 commit comments