@@ -4844,7 +4844,8 @@ instruct vaddB_reg(vec dst, vec src1, vec src2) %{
4844
4844
%}
4845
4845
4846
4846
instruct vaddB_mem(vec dst, vec src, memory mem) %{
4847
- predicate(UseAVX > 0);
4847
+ predicate((UseAVX > 0) &&
4848
+ (vector_length_in_bytes(n->in(1)) > 8));
4848
4849
match(Set dst (AddVB src (LoadVector mem)));
4849
4850
format %{ "vpaddb $dst,$src,$mem\t! add packedB" %}
4850
4851
ins_encode %{
@@ -4877,7 +4878,8 @@ instruct vaddS_reg(vec dst, vec src1, vec src2) %{
4877
4878
%}
4878
4879
4879
4880
instruct vaddS_mem(vec dst, vec src, memory mem) %{
4880
- predicate(UseAVX > 0);
4881
+ predicate((UseAVX > 0) &&
4882
+ (vector_length_in_bytes(n->in(1)) > 8));
4881
4883
match(Set dst (AddVS src (LoadVector mem)));
4882
4884
format %{ "vpaddw $dst,$src,$mem\t! add packedS" %}
4883
4885
ins_encode %{
@@ -4911,7 +4913,8 @@ instruct vaddI_reg(vec dst, vec src1, vec src2) %{
4911
4913
4912
4914
4913
4915
instruct vaddI_mem(vec dst, vec src, memory mem) %{
4914
- predicate(UseAVX > 0);
4916
+ predicate((UseAVX > 0) &&
4917
+ (vector_length_in_bytes(n->in(1)) > 8));
4915
4918
match(Set dst (AddVI src (LoadVector mem)));
4916
4919
format %{ "vpaddd $dst,$src,$mem\t! add packedI" %}
4917
4920
ins_encode %{
@@ -4944,7 +4947,8 @@ instruct vaddL_reg(vec dst, vec src1, vec src2) %{
4944
4947
%}
4945
4948
4946
4949
instruct vaddL_mem(vec dst, vec src, memory mem) %{
4947
- predicate(UseAVX > 0);
4950
+ predicate((UseAVX > 0) &&
4951
+ (vector_length_in_bytes(n->in(1)) > 8));
4948
4952
match(Set dst (AddVL src (LoadVector mem)));
4949
4953
format %{ "vpaddq $dst,$src,$mem\t! add packedL" %}
4950
4954
ins_encode %{
@@ -4977,7 +4981,8 @@ instruct vaddF_reg(vec dst, vec src1, vec src2) %{
4977
4981
%}
4978
4982
4979
4983
instruct vaddF_mem(vec dst, vec src, memory mem) %{
4980
- predicate(UseAVX > 0);
4984
+ predicate((UseAVX > 0) &&
4985
+ (vector_length_in_bytes(n->in(1)) > 8));
4981
4986
match(Set dst (AddVF src (LoadVector mem)));
4982
4987
format %{ "vaddps $dst,$src,$mem\t! add packedF" %}
4983
4988
ins_encode %{
@@ -5010,7 +5015,8 @@ instruct vaddD_reg(vec dst, vec src1, vec src2) %{
5010
5015
%}
5011
5016
5012
5017
instruct vaddD_mem(vec dst, vec src, memory mem) %{
5013
- predicate(UseAVX > 0);
5018
+ predicate((UseAVX > 0) &&
5019
+ (vector_length_in_bytes(n->in(1)) > 8));
5014
5020
match(Set dst (AddVD src (LoadVector mem)));
5015
5021
format %{ "vaddpd $dst,$src,$mem\t! add packedD" %}
5016
5022
ins_encode %{
@@ -5045,7 +5051,8 @@ instruct vsubB_reg(vec dst, vec src1, vec src2) %{
5045
5051
%}
5046
5052
5047
5053
instruct vsubB_mem(vec dst, vec src, memory mem) %{
5048
- predicate(UseAVX > 0);
5054
+ predicate((UseAVX > 0) &&
5055
+ (vector_length_in_bytes(n->in(1)) > 8));
5049
5056
match(Set dst (SubVB src (LoadVector mem)));
5050
5057
format %{ "vpsubb $dst,$src,$mem\t! sub packedB" %}
5051
5058
ins_encode %{
@@ -5079,7 +5086,8 @@ instruct vsubS_reg(vec dst, vec src1, vec src2) %{
5079
5086
%}
5080
5087
5081
5088
instruct vsubS_mem(vec dst, vec src, memory mem) %{
5082
- predicate(UseAVX > 0);
5089
+ predicate((UseAVX > 0) &&
5090
+ (vector_length_in_bytes(n->in(1)) > 8));
5083
5091
match(Set dst (SubVS src (LoadVector mem)));
5084
5092
format %{ "vpsubw $dst,$src,$mem\t! sub packedS" %}
5085
5093
ins_encode %{
@@ -5112,7 +5120,8 @@ instruct vsubI_reg(vec dst, vec src1, vec src2) %{
5112
5120
%}
5113
5121
5114
5122
instruct vsubI_mem(vec dst, vec src, memory mem) %{
5115
- predicate(UseAVX > 0);
5123
+ predicate((UseAVX > 0) &&
5124
+ (vector_length_in_bytes(n->in(1)) > 8));
5116
5125
match(Set dst (SubVI src (LoadVector mem)));
5117
5126
format %{ "vpsubd $dst,$src,$mem\t! sub packedI" %}
5118
5127
ins_encode %{
@@ -5146,7 +5155,8 @@ instruct vsubL_reg(vec dst, vec src1, vec src2) %{
5146
5155
5147
5156
5148
5157
instruct vsubL_mem(vec dst, vec src, memory mem) %{
5149
- predicate(UseAVX > 0);
5158
+ predicate((UseAVX > 0) &&
5159
+ (vector_length_in_bytes(n->in(1)) > 8));
5150
5160
match(Set dst (SubVL src (LoadVector mem)));
5151
5161
format %{ "vpsubq $dst,$src,$mem\t! sub packedL" %}
5152
5162
ins_encode %{
@@ -5179,7 +5189,8 @@ instruct vsubF_reg(vec dst, vec src1, vec src2) %{
5179
5189
%}
5180
5190
5181
5191
instruct vsubF_mem(vec dst, vec src, memory mem) %{
5182
- predicate(UseAVX > 0);
5192
+ predicate((UseAVX > 0) &&
5193
+ (vector_length_in_bytes(n->in(1)) > 8));
5183
5194
match(Set dst (SubVF src (LoadVector mem)));
5184
5195
format %{ "vsubps $dst,$src,$mem\t! sub packedF" %}
5185
5196
ins_encode %{
@@ -5212,7 +5223,8 @@ instruct vsubD_reg(vec dst, vec src1, vec src2) %{
5212
5223
%}
5213
5224
5214
5225
instruct vsubD_mem(vec dst, vec src, memory mem) %{
5215
- predicate(UseAVX > 0);
5226
+ predicate((UseAVX > 0) &&
5227
+ (vector_length_in_bytes(n->in(1)) > 8));
5216
5228
match(Set dst (SubVD src (LoadVector mem)));
5217
5229
format %{ "vsubpd $dst,$src,$mem\t! sub packedD" %}
5218
5230
ins_encode %{
@@ -5360,7 +5372,8 @@ instruct vmulS_reg(vec dst, vec src1, vec src2) %{
5360
5372
%}
5361
5373
5362
5374
instruct vmulS_mem(vec dst, vec src, memory mem) %{
5363
- predicate(UseAVX > 0);
5375
+ predicate((UseAVX > 0) &&
5376
+ (vector_length_in_bytes(n->in(1)) > 8));
5364
5377
match(Set dst (MulVS src (LoadVector mem)));
5365
5378
format %{ "vpmullw $dst,$src,$mem\t! mul packedS" %}
5366
5379
ins_encode %{
@@ -5394,7 +5407,8 @@ instruct vmulI_reg(vec dst, vec src1, vec src2) %{
5394
5407
%}
5395
5408
5396
5409
instruct vmulI_mem(vec dst, vec src, memory mem) %{
5397
- predicate(UseAVX > 0);
5410
+ predicate((UseAVX > 0) &&
5411
+ (vector_length_in_bytes(n->in(1)) > 8));
5398
5412
match(Set dst (MulVI src (LoadVector mem)));
5399
5413
format %{ "vpmulld $dst,$src,$mem\t! mul packedI" %}
5400
5414
ins_encode %{
@@ -5418,7 +5432,8 @@ instruct vmulL_reg(vec dst, vec src1, vec src2) %{
5418
5432
%}
5419
5433
5420
5434
instruct vmulL_mem(vec dst, vec src, memory mem) %{
5421
- predicate(VM_Version::supports_avx512dq());
5435
+ predicate(VM_Version::supports_avx512dq() &&
5436
+ (vector_length_in_bytes(n->in(1)) > 8));
5422
5437
match(Set dst (MulVL src (LoadVector mem)));
5423
5438
format %{ "vpmullq $dst,$src,$mem\t! mul packedL" %}
5424
5439
ins_encode %{
@@ -5503,7 +5518,8 @@ instruct vmulF_reg(vec dst, vec src1, vec src2) %{
5503
5518
%}
5504
5519
5505
5520
instruct vmulF_mem(vec dst, vec src, memory mem) %{
5506
- predicate(UseAVX > 0);
5521
+ predicate((UseAVX > 0) &&
5522
+ (vector_length_in_bytes(n->in(1)) > 8));
5507
5523
match(Set dst (MulVF src (LoadVector mem)));
5508
5524
format %{ "vmulps $dst,$src,$mem\t! mul packedF" %}
5509
5525
ins_encode %{
@@ -5536,7 +5552,8 @@ instruct vmulD_reg(vec dst, vec src1, vec src2) %{
5536
5552
%}
5537
5553
5538
5554
instruct vmulD_mem(vec dst, vec src, memory mem) %{
5539
- predicate(UseAVX > 0);
5555
+ predicate((UseAVX > 0) &&
5556
+ (vector_length_in_bytes(n->in(1)) > 8));
5540
5557
match(Set dst (MulVD src (LoadVector mem)));
5541
5558
format %{ "vmulpd $dst,$src,$mem\t! mul packedD" %}
5542
5559
ins_encode %{
@@ -5607,7 +5624,8 @@ instruct vdivF_reg(vec dst, vec src1, vec src2) %{
5607
5624
%}
5608
5625
5609
5626
instruct vdivF_mem(vec dst, vec src, memory mem) %{
5610
- predicate(UseAVX > 0);
5627
+ predicate((UseAVX > 0) &&
5628
+ (vector_length_in_bytes(n->in(1)) > 8));
5611
5629
match(Set dst (DivVF src (LoadVector mem)));
5612
5630
format %{ "vdivps $dst,$src,$mem\t! div packedF" %}
5613
5631
ins_encode %{
@@ -5640,7 +5658,8 @@ instruct vdivD_reg(vec dst, vec src1, vec src2) %{
5640
5658
%}
5641
5659
5642
5660
instruct vdivD_mem(vec dst, vec src, memory mem) %{
5643
- predicate(UseAVX > 0);
5661
+ predicate((UseAVX > 0) &&
5662
+ (vector_length_in_bytes(n->in(1)) > 8));
5644
5663
match(Set dst (DivVD src (LoadVector mem)));
5645
5664
format %{ "vdivpd $dst,$src,$mem\t! div packedD" %}
5646
5665
ins_encode %{
@@ -5824,6 +5843,7 @@ instruct vsqrtF_reg(vec dst, vec src) %{
5824
5843
%}
5825
5844
5826
5845
instruct vsqrtF_mem(vec dst, memory mem) %{
5846
+ predicate(vector_length_in_bytes(n->in(1)) > 8);
5827
5847
match(Set dst (SqrtVF (LoadVector mem)));
5828
5848
format %{ "vsqrtps $dst,$mem\t! sqrt packedF" %}
5829
5849
ins_encode %{
@@ -5847,6 +5867,7 @@ instruct vsqrtD_reg(vec dst, vec src) %{
5847
5867
%}
5848
5868
5849
5869
instruct vsqrtD_mem(vec dst, memory mem) %{
5870
+ predicate(vector_length_in_bytes(n->in(1)) > 8);
5850
5871
match(Set dst (SqrtVD (LoadVector mem)));
5851
5872
format %{ "vsqrtpd $dst,$mem\t! sqrt packedD" %}
5852
5873
ins_encode %{
@@ -6459,7 +6480,8 @@ instruct vand_reg(vec dst, vec src1, vec src2) %{
6459
6480
%}
6460
6481
6461
6482
instruct vand_mem(vec dst, vec src, memory mem) %{
6462
- predicate(UseAVX > 0);
6483
+ predicate((UseAVX > 0) &&
6484
+ (vector_length_in_bytes(n->in(1)) > 8));
6463
6485
match(Set dst (AndV src (LoadVector mem)));
6464
6486
format %{ "vpand $dst,$src,$mem\t! and vectors" %}
6465
6487
ins_encode %{
@@ -6493,7 +6515,8 @@ instruct vor_reg(vec dst, vec src1, vec src2) %{
6493
6515
%}
6494
6516
6495
6517
instruct vor_mem(vec dst, vec src, memory mem) %{
6496
- predicate(UseAVX > 0);
6518
+ predicate((UseAVX > 0) &&
6519
+ (vector_length_in_bytes(n->in(1)) > 8));
6497
6520
match(Set dst (OrV src (LoadVector mem)));
6498
6521
format %{ "vpor $dst,$src,$mem\t! or vectors" %}
6499
6522
ins_encode %{
@@ -6527,7 +6550,8 @@ instruct vxor_reg(vec dst, vec src1, vec src2) %{
6527
6550
%}
6528
6551
6529
6552
instruct vxor_mem(vec dst, vec src, memory mem) %{
6530
- predicate(UseAVX > 0);
6553
+ predicate((UseAVX > 0) &&
6554
+ (vector_length_in_bytes(n->in(1)) > 8));
6531
6555
match(Set dst (XorV src (LoadVector mem)));
6532
6556
format %{ "vpxor $dst,$src,$mem\t! xor vectors" %}
6533
6557
ins_encode %{
@@ -7947,6 +7971,7 @@ instruct vfmaF_reg(vec a, vec b, vec c) %{
7947
7971
%}
7948
7972
7949
7973
instruct vfmaF_mem(vec a, memory b, vec c) %{
7974
+ predicate(vector_length_in_bytes(n->in(1)) > 8);
7950
7975
match(Set c (FmaVF c (Binary a (LoadVector b))));
7951
7976
format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packedF" %}
7952
7977
ins_cost(150);
@@ -7971,6 +7996,7 @@ instruct vfmaD_reg(vec a, vec b, vec c) %{
7971
7996
%}
7972
7997
7973
7998
instruct vfmaD_mem(vec a, memory b, vec c) %{
7999
+ predicate(vector_length_in_bytes(n->in(1)) > 8);
7974
8000
match(Set c (FmaVD c (Binary a (LoadVector b))));
7975
8001
format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packedD" %}
7976
8002
ins_cost(150);
@@ -8048,6 +8074,7 @@ instruct vpternlog(vec dst, vec src2, vec src3, immU8 func) %{
8048
8074
%}
8049
8075
8050
8076
instruct vpternlog_mem(vec dst, vec src2, memory src3, immU8 func) %{
8077
+ predicate(vector_length_in_bytes(n->in(1)) > 8);
8051
8078
match(Set dst (MacroLogicV (Binary dst src2) (Binary (LoadVector src3) func)));
8052
8079
effect(TEMP dst);
8053
8080
format %{ "vpternlogd $dst,$src2,$src3,$func\t! vector ternary logic" %}
0 commit comments