-
Notifications
You must be signed in to change notification settings - Fork 42
/
vproc_alu.sv
1018 lines (943 loc) · 49.2 KB
/
vproc_alu.sv
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright TU Wien
// Licensed under the ISC license, see LICENSE.txt for details
// SPDX-License-Identifier: ISC
`include "vproc_vregshift.svh"
module vproc_alu #(
parameter int unsigned VREG_W = 128, // width in bits of vector registers
parameter int unsigned VMSK_W = 16, // width of vector register masks (= VREG_W / 8)
parameter int unsigned CFG_VL_W = 7, // width of VL reg in bits (= log2(VREG_W))
parameter int unsigned ALU_OP_W = 64, // ALU operand width in bits
parameter int unsigned XIF_ID_W = 3, // width in bits of instruction IDs
parameter int unsigned XIF_ID_CNT = 8, // total count of instruction IDs
parameter int unsigned MAX_WR_ATTEMPTS = 1, // max required vregfile write attempts
parameter bit BUF_VREG = 1'b1, // insert pipeline stage after vreg read
parameter bit BUF_OPERANDS = 1'b1, // insert pipeline stage after operand extraction
parameter bit BUF_INTERMEDIATE = 1'b1, // insert pipeline stage for intermediate results
parameter bit BUF_RESULTS = 1'b1, // insert pipeline stage after computing result
parameter bit DONT_CARE_ZERO = 1'b0 // initialize don't care values to zero
)(
input logic clk_i,
input logic async_rst_ni,
input logic sync_rst_ni,
input logic [XIF_ID_W-1:0] id_i,
input vproc_pkg::cfg_vsew vsew_i,
input vproc_pkg::cfg_emul emul_i,
input logic [CFG_VL_W-1:0] vl_i,
input logic vl_0_i,
input logic op_rdy_i,
output logic op_ack_o,
input vproc_pkg::op_mode_alu mode_i,
input vproc_pkg::op_widenarrow widenarrow_i,
input vproc_pkg::op_regs rs1_i,
input vproc_pkg::op_regs rs2_i,
input logic [4:0] vd_i,
input logic [31:0] vreg_pend_wr_i,
output logic [31:0] vreg_pend_rd_o,
input logic [31:0] vreg_pend_rd_i,
output logic [31:0] clear_wr_hazards_o,
input logic [XIF_ID_CNT-1:0] instr_spec_i,
input logic [XIF_ID_CNT-1:0] instr_killed_i,
output logic instr_done_valid_o,
output logic [XIF_ID_W-1:0] instr_done_id_o,
// connections to register file:
input logic [VREG_W-1:0] vreg_mask_i,
input logic [VREG_W-1:0] vreg_rd_i,
output logic [4:0] vreg_rd_addr_o,
output logic [VREG_W-1:0] vreg_wr_o,
output logic [4:0] vreg_wr_addr_o,
output logic [VMSK_W-1:0] vreg_wr_mask_o,
output logic vreg_wr_en_o
);
import vproc_pkg::*;
if ((ALU_OP_W & (ALU_OP_W - 1)) != 0 || ALU_OP_W < 32 || ALU_OP_W >= VREG_W) begin
$fatal(1, "The vector ALU operand width ALU_OP_W must be at least 32, less than ",
"the vector register width VREG_W and a power of two. ",
"The current value of %d is invalid.", ALU_OP_W);
end
if (MAX_WR_ATTEMPTS < 1 || (1 << (MAX_WR_ATTEMPTS - 1)) > VREG_W / ALU_OP_W) begin
$fatal(1, "The maximum number of write attempts MAX_WR_ATTEMPTS of a unit ",
"must be at least 1 and 2^(MAX_WR_ATTEMPTS-1) must be less than or ",
"equal to the ratio of the vector register width vs the operand width ",
"of that unit. ",
"For the vector ALU MAX_WR_ATTEMPTS is %d and that ratio is %d.",
MAX_WR_ATTEMPTS, VREG_W / ALU_OP_W);
end
// max number of cycles by which a write can be delayed
localparam int unsigned MAX_WR_DELAY = (1 << (MAX_WR_ATTEMPTS - 1)) - 1;
///////////////////////////////////////////////////////////////////////////
// ALU STATE:
localparam int unsigned ALU_CYCLES_PER_VREG = VREG_W / ALU_OP_W;
localparam int unsigned ALU_COUNTER_W = $clog2(ALU_CYCLES_PER_VREG) + 3;
typedef union packed {
logic [ALU_COUNTER_W-1:0] val;
struct packed {
logic [2:0] mul; // mul part (vreg index)
logic [ALU_COUNTER_W-4:0] low; // counter part in vreg (vreg pos)
} part;
} alu_counter;
typedef struct packed {
alu_counter count;
logic first_cycle;
logic last_cycle;
logic [XIF_ID_W-1:0] id;
op_mode_alu mode;
cfg_vsew eew; // effective element width
cfg_emul emul; // effective MUL factor
logic [CFG_VL_W-1:0] vl;
logic vl_0;
op_regs rs1;
logic vs1_narrow;
logic vs1_fetch;
logic vs1_shift;
op_regs rs2;
logic vs2_narrow;
logic vs2_fetch;
logic vs2_shift;
logic v0msk_shift;
logic [4:0] vd;
logic vd_narrow;
logic vd_store;
} alu_state;
logic state_valid_q, state_valid_d;
alu_state state_q, state_d;
logic [31:0] vreg_pend_wr_q, vreg_pend_wr_d; // local copy of global vreg write mask
always_ff @(posedge clk_i or negedge async_rst_ni) begin : vproc_alu_state_valid
if (~async_rst_ni) begin
state_valid_q <= 1'b0;
end
else if (~sync_rst_ni) begin
state_valid_q <= 1'b0;
end else begin
state_valid_q <= state_valid_d;
end
end
always_ff @(posedge clk_i) begin : vproc_alu_state
state_q <= state_d;
vreg_pend_wr_q <= vreg_pend_wr_d;
end
logic last_cycle;
always_comb begin
last_cycle = DONT_CARE_ZERO ? 1'b0 : 1'bx;
unique case (state_q.emul)
EMUL_1: last_cycle = state_q.count.part.low == '1;
EMUL_2: last_cycle = (state_q.count.part.mul[ 0] == '1) & (state_q.count.part.low == '1);
EMUL_4: last_cycle = (state_q.count.part.mul[1:0] == '1) & (state_q.count.part.low == '1);
EMUL_8: last_cycle = (state_q.count.part.mul[2:0] == '1) & (state_q.count.part.low == '1);
default: ;
endcase
end
logic pipeline_ready;
always_comb begin
op_ack_o = 1'b0;
state_valid_d = state_valid_q;
state_d = state_q;
vreg_pend_wr_d = vreg_pend_wr_q & vreg_pend_wr_i;
if (((~state_valid_q) | (last_cycle & pipeline_ready)) & op_rdy_i) begin
op_ack_o = 1'b1;
state_d.count.val = '0;
state_valid_d = 1'b1;
state_d.first_cycle = 1'b1;
state_d.id = id_i;
state_d.mode = mode_i;
state_d.emul = emul_i;
state_d.eew = vsew_i;
state_d.vl = vl_i;
state_d.vl_0 = vl_0_i;
state_d.rs1 = rs1_i;
state_d.vs1_narrow = widenarrow_i != OP_SINGLEWIDTH;
state_d.vs1_fetch = rs1_i.vreg;
state_d.vs1_shift = 1'b1;
state_d.rs2 = rs2_i;
state_d.vs2_narrow = widenarrow_i == OP_WIDENING;
state_d.vs2_fetch = rs2_i.vreg;
state_d.vs2_shift = 1'b1;
state_d.v0msk_shift = 1'b1;
state_d.vd = vd_i;
state_d.vd_narrow = widenarrow_i == OP_NARROWING;
state_d.vd_store = 1'b0;
vreg_pend_wr_d = vreg_pend_wr_i;
end
else if (state_valid_q & pipeline_ready) begin
state_d.count.val = state_q.count.val + 1;
state_valid_d = ~last_cycle;
state_d.first_cycle = 1'b0;
state_d.vs1_fetch = 1'b0;
state_d.vs2_fetch = 1'b0;
if (state_q.count.part.low == '1) begin
if (state_q.rs1.vreg & (~state_q.vs1_narrow | state_q.count.part.mul[0])) begin
state_d.rs1.r.vaddr[2:0] = state_q.rs1.r.vaddr[2:0] + 3'b1;
state_d.vs1_fetch = state_q.rs1.vreg;
end
if (~state_q.vs2_narrow | state_q.count.part.mul[0]) begin
state_d.rs2.r.vaddr[2:0] = state_q.rs2.r.vaddr[2:0] + 3'b1;
state_d.vs2_fetch = state_q.rs2.vreg;
end
if (~state_q.mode.cmp & (~state_q.vd_narrow | state_q.count.part.mul[0])) begin
state_d.vd[2:0] = state_q.vd[2:0] + 3'b1;
end
end
state_d.vs1_shift = ~state_q.vs1_narrow | state_q.count.part.low[0];
state_d.vs2_shift = ~state_q.vs2_narrow | state_q.count.part.low[0];
unique case (state_q.eew)
VSEW_8: state_d.v0msk_shift = 1'b1;
VSEW_16: state_d.v0msk_shift = state_q.count.val[0];
VSEW_32: state_d.v0msk_shift = state_q.count.val[1:0] == '1;
default: ;
endcase
end
end
///////////////////////////////////////////////////////////////////////////
// ALU PIPELINE BUFFERS:
// pass state information along pipeline:
logic state_vreg_ready, state_vs1_ready, state_vs2_ready, state_ex1_ready, state_ex2_ready, state_res_ready, state_vd_ready;
logic state_init_stall, state_vd_stall;
logic state_init_valid, state_vreg_valid_q, state_vs1_valid_q, state_vs2_valid_q, state_ex1_valid_q, state_ex2_valid_q, state_res_valid_q, state_vd_valid_q;
alu_state state_init, state_vreg_q, state_vs1_q, state_vs2_q, state_ex1_q, state_ex2_q, state_res_q, state_vd_q;
always_comb begin
state_init_valid = state_valid_q;
state_init = state_q;
state_init.last_cycle = state_valid_q & last_cycle;
state_init.vd_store = (state_q.count.part.low == '1) & (~state_q.vd_narrow | state_q.count.part.mul[0]);
end
assign pipeline_ready = state_vreg_ready & ~state_init_stall;
// common vreg read register:
logic [VREG_W-1:0] vreg_rd_q, vreg_rd_d;
// operand shift registers:
logic [VREG_W-1:0] vs1_shift_q, vs1_shift_d;
logic [VREG_W-1:0] vs2_shift_q, vs2_shift_d;
logic [VREG_W-1:0] v0msk_shift_q, v0msk_shift_d;
// temporary buffer for vs1 while fetching vs2:
logic [ALU_OP_W-1:0] vs1_tmp_q, vs1_tmp_d;
// operands and result:
logic [ALU_OP_W*9/8-1:0] operand1_q, operand1_d;
logic [ALU_OP_W*9/8-1:0] operand2_q, operand2_d;
logic [ALU_OP_W /8-1:0] operand_mask_q, operand_mask_d;
logic [ALU_OP_W -1:0] result_alu_q, result_alu_d;
logic [ALU_OP_W /8-1:0] result_cmp_q, result_cmp_d;
logic [ALU_OP_W /8-1:0] result_mask_q, result_mask_d;
// intermediate results:
logic [ALU_OP_W -1:0] operand1_tmp_q, operand1_tmp_d;
logic [ALU_OP_W -1:0] operand2_tmp_q, operand2_tmp_d;
logic [ALU_OP_W /8-1:0] operand_mask_tmp_q, operand_mask_tmp_d;
logic [ALU_OP_W*9/8-1:0] sum_q, sum_d;
logic [ALU_OP_W /8-1:0] cmp_q, cmp_d;
logic [ALU_OP_W /4-1:0] satval_q, satval_d;
logic [ALU_OP_W -1:0] shift_res_q, shift_res_d;
// result shift register:
logic [VREG_W-1:0] vd_alu_shift_q, vd_alu_shift_d;
logic [VMSK_W-1:0] vdmsk_alu_shift_q, vdmsk_alu_shift_d;
logic [VMSK_W-1:0] vd_cmp_shift_q, vd_cmp_shift_d;
logic [VMSK_W-1:0] vdmsk_cmp_q, vdmsk_cmp_d;
// vreg write buffers
localparam WRITE_BUFFER_SZ = (MAX_WR_DELAY > 0) ? MAX_WR_DELAY : 1;
logic vreg_wr_en_q [WRITE_BUFFER_SZ], vreg_wr_en_d;
logic vreg_wr_clear_q[WRITE_BUFFER_SZ], vreg_wr_clear_d;
logic [4:0] vreg_wr_addr_q [WRITE_BUFFER_SZ], vreg_wr_addr_d;
logic [VMSK_W-1:0] vreg_wr_mask_q [WRITE_BUFFER_SZ], vreg_wr_mask_d;
logic [VREG_W-1:0] vreg_wr_q [WRITE_BUFFER_SZ], vreg_wr_d;
// hazard clear registers
logic [31:0] clear_wr_hazards_q, clear_wr_hazards_d;
generate
if (BUF_VREG) begin
always_ff @(posedge clk_i or negedge async_rst_ni) begin : vproc_alu_stage_vreg_valid
if (~async_rst_ni) begin
state_vreg_valid_q <= 1'b0;
end
else if (~sync_rst_ni) begin
state_vreg_valid_q <= 1'b0;
end
else if (state_vreg_ready) begin
state_vreg_valid_q <= state_init_valid & ~state_init_stall;
end
end
always_ff @(posedge clk_i) begin : vproc_alu_stage_vreg
// Note: state_init_valid is omitted here since vreg buffering
// may need to proceed for one extra cycle after the
// instruction has left state_init
if (state_vreg_ready) begin
state_vreg_q <= state_init;
vreg_rd_q <= vreg_rd_d;
end
end
assign state_vreg_ready = ~state_vreg_valid_q | state_vs1_ready;
end else begin
always_comb begin
state_vreg_valid_q = state_init_valid & ~state_init_stall;
state_vreg_q = state_init;
vreg_rd_q = vreg_rd_d;
end
assign state_vreg_ready = state_vs1_ready;
end
always_ff @(posedge clk_i or negedge async_rst_ni) begin : vproc_alu_stage_vs1_valid
if (~async_rst_ni) begin
state_vs1_valid_q <= 1'b0;
end
else if (~sync_rst_ni) begin
state_vs1_valid_q <= 1'b0;
end
else if (state_vs1_ready) begin
state_vs1_valid_q <= state_vreg_valid_q;
end
end
always_ff @(posedge clk_i) begin : vproc_alu_stage_vs1
if (state_vs1_ready & state_vreg_valid_q) begin
state_vs1_q <= state_vreg_q;
vs1_shift_q <= vs1_shift_d;
end
end
assign state_vs1_ready = ~state_vs1_valid_q | state_vs2_ready;
always_ff @(posedge clk_i or negedge async_rst_ni) begin : vproc_alu_stage_vs2_valid
if (~async_rst_ni) begin
state_vs2_valid_q <= 1'b0;
end
else if (~sync_rst_ni) begin
state_vs2_valid_q <= 1'b0;
end
else if (state_vs2_ready) begin
state_vs2_valid_q <= state_vs1_valid_q;
end
end
always_ff @(posedge clk_i) begin : vproc_alu_stage_vs2
if (state_vs2_ready & state_vs1_valid_q) begin
state_vs2_q <= state_vs1_q;
vs2_shift_q <= vs2_shift_d;
v0msk_shift_q <= v0msk_shift_d;
vs1_tmp_q <= vs1_tmp_d;
end
end
assign state_vs2_ready = ~state_vs2_valid_q | state_ex1_ready;
if (BUF_OPERANDS) begin
always_ff @(posedge clk_i or negedge async_rst_ni) begin : vproc_alu_stage_ex1_valid
if (~async_rst_ni) begin
state_ex1_valid_q <= 1'b0;
end
else if (~sync_rst_ni) begin
state_ex1_valid_q <= 1'b0;
end
else if (state_ex1_ready) begin
state_ex1_valid_q <= state_vs2_valid_q;
end
end
always_ff @(posedge clk_i) begin : vproc_alu_stage_ex1
if (state_ex1_ready & state_vs2_valid_q) begin
state_ex1_q <= state_vs2_q;
operand1_q <= operand1_d;
operand2_q <= operand2_d;
operand_mask_q <= operand_mask_d;
end
end
assign state_ex1_ready = ~state_ex1_valid_q | state_ex2_ready;
end else begin
always_comb begin
state_ex1_valid_q = state_vs2_valid_q;
state_ex1_q = state_vs2_q;
operand1_q = operand1_d;
operand2_q = operand2_d;
operand_mask_q = operand_mask_d;
end
assign state_ex1_ready = state_ex2_ready;
end
if (BUF_INTERMEDIATE) begin
always_ff @(posedge clk_i or negedge async_rst_ni) begin : vproc_alu_stage_ex2_valid
if (~async_rst_ni) begin
state_ex2_valid_q <= 1'b0;
end
else if (~sync_rst_ni) begin
state_ex2_valid_q <= 1'b0;
end
else if (state_ex2_ready) begin
state_ex2_valid_q <= state_ex1_valid_q;
end
end
always_ff @(posedge clk_i) begin : vproc_alu_stage_ex2
if (state_ex2_ready & state_ex1_valid_q) begin
state_ex2_q <= state_ex1_q;
operand1_tmp_q <= operand1_tmp_d;
operand2_tmp_q <= operand2_tmp_d;
operand_mask_tmp_q <= operand_mask_tmp_d;
sum_q <= sum_d;
cmp_q <= cmp_d;
satval_q <= satval_d;
shift_res_q <= shift_res_d;
end
end
assign state_ex2_ready = ~state_ex2_valid_q | state_res_ready;
end else begin
always_comb begin
state_ex2_valid_q = state_ex1_valid_q;
state_ex2_q = state_ex1_q;
operand1_tmp_q = operand1_tmp_d;
operand2_tmp_q = operand2_tmp_d;
operand_mask_tmp_q = operand_mask_tmp_d;
sum_q = sum_d;
cmp_q = cmp_d;
satval_q = satval_d;
shift_res_q = shift_res_d;
end
assign state_ex2_ready = state_res_ready;
end
if (BUF_RESULTS) begin
always_ff @(posedge clk_i or negedge async_rst_ni) begin : vproc_alu_stage_res_valid
if (~async_rst_ni) begin
state_res_valid_q <= 1'b0;
end
else if (~sync_rst_ni) begin
state_res_valid_q <= 1'b0;
end
else if (state_res_ready) begin
state_res_valid_q <= state_ex2_valid_q;
end
end
always_ff @(posedge clk_i) begin : vproc_alu_stage_res
if (state_res_ready & state_ex2_valid_q) begin
state_res_q <= state_ex2_q;
result_alu_q <= result_alu_d;
result_cmp_q <= result_cmp_d;
result_mask_q <= result_mask_d;
end
end
assign state_res_ready = ~state_res_valid_q | state_vd_ready;
end else begin
always_comb begin
state_res_valid_q = state_ex2_valid_q;
state_res_q = state_ex2_q;
result_alu_q = result_alu_d;
result_cmp_q = result_cmp_d;
result_mask_q = result_mask_d;
end
assign state_res_ready = state_vd_ready;
end
always_ff @(posedge clk_i or negedge async_rst_ni) begin : vproc_alu_stage_vd_valid
if (~async_rst_ni) begin
state_vd_valid_q <= 1'b0;
end
else if (~sync_rst_ni) begin
state_vd_valid_q <= 1'b0;
end
else if (state_vd_ready) begin
state_vd_valid_q <= state_res_valid_q;
end
end
always_ff @(posedge clk_i) begin : vproc_alu_stage_vd
if (state_vd_ready & state_res_valid_q) begin
state_vd_q <= state_res_q;
vd_alu_shift_q <= vd_alu_shift_d;
vdmsk_alu_shift_q <= vdmsk_alu_shift_d;
vd_cmp_shift_q <= vd_cmp_shift_d;
vdmsk_cmp_q <= vdmsk_cmp_d;
end
end
assign state_vd_ready = ~state_vd_valid_q | ~state_vd_stall;
if (MAX_WR_DELAY > 0) begin
always_ff @(posedge clk_i) begin : vproc_alu_wr_delay
vreg_wr_en_q [0] <= vreg_wr_en_d;
vreg_wr_clear_q[0] <= vreg_wr_clear_d;
vreg_wr_addr_q [0] <= vreg_wr_addr_d;
vreg_wr_mask_q [0] <= vreg_wr_mask_d;
vreg_wr_q [0] <= vreg_wr_d;
for (int i = 1; i < MAX_WR_DELAY; i++) begin
vreg_wr_en_q [i] <= vreg_wr_en_q [i-1];
vreg_wr_clear_q[i] <= vreg_wr_clear_q[i-1];
vreg_wr_addr_q [i] <= vreg_wr_addr_q [i-1];
vreg_wr_mask_q [i] <= vreg_wr_mask_q [i-1];
vreg_wr_q [i] <= vreg_wr_q [i-1];
end
end
end
always_ff @(posedge clk_i) begin
clear_wr_hazards_q <= clear_wr_hazards_d;
end
endgenerate
always_comb begin
vreg_wr_en_o = vreg_wr_en_d;
vreg_wr_addr_o = vreg_wr_addr_d;
vreg_wr_mask_o = vreg_wr_mask_d;
vreg_wr_o = vreg_wr_d;
for (int i = 0; i < MAX_WR_DELAY; i++) begin
if ((((i + 1) & (i + 2)) == 0) & vreg_wr_en_q[i]) begin
vreg_wr_en_o = 1'b1;
vreg_wr_addr_o = vreg_wr_addr_q[i];
vreg_wr_mask_o = vreg_wr_mask_q[i];
vreg_wr_o = vreg_wr_q [i];
end
end
end
// write hazard clearing
always_comb begin
clear_wr_hazards_d = vreg_wr_clear_d ? (32'b1 << vreg_wr_addr_d ) : 32'b0;
if (MAX_WR_DELAY > 0) begin
clear_wr_hazards_d = vreg_wr_clear_q[MAX_WR_DELAY-1] ? (32'b1 << vreg_wr_addr_q[MAX_WR_DELAY-1]) : 32'b0;
end
end
assign clear_wr_hazards_o = clear_wr_hazards_q;
// Stall vreg reads until pending writes are complete; note that vreg read
// stalling always happens in the init stage, since otherwise a substantial
// amount of state would have to be forwarded (such as vreg_pend_wr_q)
assign state_init_stall = (state_init.vs1_fetch & vreg_pend_wr_q[state_init.rs1.r.vaddr]) |
(state_init.vs2_fetch & vreg_pend_wr_q[state_init.rs2.r.vaddr]) |
(state_init.first_cycle & state_init_masked & vreg_pend_wr_q[0]);
// Stall vreg writes until pending reads of the destination register are
// complete and while the instruction is speculative
assign state_vd_stall = state_vd_q.vd_store & (vreg_pend_rd_i[state_vd_q.vd] | instr_spec_i[state_vd_q.id]);
assign instr_done_valid_o = state_vd_valid_q & state_vd_q.last_cycle & ~state_vd_stall;
assign instr_done_id_o = state_vd_q.id;
// pending vreg reads
// Note: The pipeline might stall while reading a vreg, hence a vreg has to
// be part of the pending reads until the read is complete.
logic [31:0] pend_vs1, pend_vs2;
always_comb begin
pend_vs1 = DONT_CARE_ZERO ? '0 : 'x;
unique case ({state_init.emul, state_init.vs1_narrow})
{EMUL_1, 1'b0}: pend_vs1 = {31'b0, state_init.vs1_fetch} << state_init.rs1.r.vaddr;
{EMUL_2, 1'b1}: pend_vs1 = {31'b0, state_init.vs1_fetch} << state_init.rs1.r.vaddr;
{EMUL_2, 1'b0}: pend_vs1 = (32'h03 & ((32'h02 | {31'b0, state_init.vs1_fetch}) << state_init.count.part.mul[2:0])) << {state_init.rs1.r.vaddr[4:1], 1'b0};
{EMUL_4, 1'b1}: pend_vs1 = (32'h03 & ((32'h02 | {31'b0, state_init.vs1_fetch}) << state_init.count.part.mul[2:1])) << {state_init.rs1.r.vaddr[4:1], 1'b0};
{EMUL_4, 1'b0}: pend_vs1 = (32'h0F & ((32'h0E | {31'b0, state_init.vs1_fetch}) << state_init.count.part.mul[2:0])) << {state_init.rs1.r.vaddr[4:2], 2'b0};
{EMUL_8, 1'b1}: pend_vs1 = (32'h0F & ((32'h0E | {31'b0, state_init.vs1_fetch}) << state_init.count.part.mul[2:1])) << {state_init.rs1.r.vaddr[4:2], 2'b0};
{EMUL_8, 1'b0}: pend_vs1 = (32'hFF & ((32'hFE | {31'b0, state_init.vs1_fetch}) << state_init.count.part.mul[2:0])) << {state_init.rs1.r.vaddr[4:3], 3'b0};
default: ;
endcase
pend_vs2 = DONT_CARE_ZERO ? '0 : 'x;
unique case ({state_init.emul, state_init.vs2_narrow})
{EMUL_1, 1'b0}: pend_vs2 = {31'b0, state_init.vs2_fetch} << state_init.rs2.r.vaddr;
{EMUL_2, 1'b1}: pend_vs2 = {31'b0, state_init.vs2_fetch} << state_init.rs2.r.vaddr;
{EMUL_2, 1'b0}: pend_vs2 = (32'h03 & ((32'h02 | {31'b0, state_init.vs2_fetch}) << state_init.count.part.mul[2:0])) << {state_init.rs2.r.vaddr[4:1], 1'b0};
{EMUL_4, 1'b1}: pend_vs2 = (32'h03 & ((32'h02 | {31'b0, state_init.vs2_fetch}) << state_init.count.part.mul[2:1])) << {state_init.rs2.r.vaddr[4:1], 1'b0};
{EMUL_4, 1'b0}: pend_vs2 = (32'h0F & ((32'h0E | {31'b0, state_init.vs2_fetch}) << state_init.count.part.mul[2:0])) << {state_init.rs2.r.vaddr[4:2], 2'b0};
{EMUL_8, 1'b1}: pend_vs2 = (32'h0F & ((32'h0E | {31'b0, state_init.vs2_fetch}) << state_init.count.part.mul[2:1])) << {state_init.rs2.r.vaddr[4:2], 2'b0};
{EMUL_8, 1'b0}: pend_vs2 = (32'hFF & ((32'hFE | {31'b0, state_init.vs2_fetch}) << state_init.count.part.mul[2:0])) << {state_init.rs2.r.vaddr[4:3], 3'b0};
default: ;
endcase
end
// Determine whether there is a pending read of v0 as a mask
logic state_init_masked, state_vreg_masked, state_vs1_masked;
assign state_init_masked = (state_init.mode.masked | (state_init.mode.op_mask == ALU_MASK_CARRY) | (state_init.mode.op_mask == ALU_MASK_SEL));
assign state_vreg_masked = (state_vreg_q.mode.masked | (state_vreg_q.mode.op_mask == ALU_MASK_CARRY) | (state_vreg_q.mode.op_mask == ALU_MASK_SEL));
assign state_vs1_masked = (state_vs1_q.mode.masked | (state_vs1_q.mode.op_mask == ALU_MASK_CARRY) | (state_vs1_q.mode.op_mask == ALU_MASK_SEL));
// Note: vs2 is read in the second cycle; the v0 mask has no extra buffer
// and is always read in state_vs1
assign vreg_pend_rd_o = ((
((state_init_valid & state_init.rs1.vreg ) ? pend_vs1 : '0) |
((state_init_valid & state_init.rs2.vreg ) ? pend_vs2 : '0) |
((state_init_valid & state_init.first_cycle) ? {31'b0, state_init_masked} : '0)
) & ~vreg_pend_wr_q) |
(( state_vreg_valid_q & state_vreg_q.vs2_fetch ) ? (32'h1 << state_vreg_q.rs2.r.vaddr) : '0) |
((~BUF_VREG & state_vs1_valid_q & state_vs1_q.vs2_fetch ) ? (32'h1 << state_vs1_q.rs2.r.vaddr ) : '0) |
(( state_vreg_valid_q & state_vreg_q.first_cycle) ? {31'b0, state_vreg_masked} : '0) |
(( state_vs1_valid_q & state_vs1_q.first_cycle ) ? {31'b0, state_vs1_masked } : '0);
///////////////////////////////////////////////////////////////////////////
// ALU REGISTER READ/WRITE AND CONVERSION
// source register addressing and read:
assign vreg_rd_addr_o = (state_init.count.part.low[0] == 1'b0) ? state_init.rs1.r.vaddr : state_init.rs2.r.vaddr;
assign vreg_rd_d = vreg_rd_i;
// operand shift registers assignment:
fetch_info vs1_info, vs2_info, v0msk_info;
always_comb begin
vs1_info.shift = state_vreg_q.vs1_shift;
vs1_info.fetch = state_vreg_q.vs1_fetch;
vs2_info.shift = state_vs1_q.vs2_shift;
vs2_info.fetch = state_vs1_q.vs2_fetch;
v0msk_info.shift = state_vs1_q.v0msk_shift;
v0msk_info.fetch = state_vs1_q.first_cycle;
end
`VREGSHIFT_OPERAND_NARROW(VREG_W, ALU_OP_W, vs1_info, vreg_rd_q, vs1_shift_q, vs1_shift_d)
`VREGSHIFT_OPERAND_NARROW(VREG_W, ALU_OP_W, vs2_info, vreg_rd_q, vs2_shift_q, vs2_shift_d)
`VREGSHIFT_OPMASK(VREG_W, ALU_OP_W, v0msk_info, state_vs1_q.eew, vreg_mask_i, v0msk_shift_q, v0msk_shift_d)
assign vs1_tmp_d = vs1_shift_q[ALU_OP_W-1:0];
// conversion from source registers to operands:
logic [ALU_OP_W-1:0] operand1, operand2;
vproc_vregunpack #(
.OP_W ( ALU_OP_W ),
.DONT_CARE_ZERO ( DONT_CARE_ZERO )
) alu_vregunpack (
.vsew_i ( state_vs2_q.eew ),
.rs1_i ( state_vs2_q.rs1 ),
.vs1_i ( vs1_tmp_q ),
.vs1_narrow_i ( state_vs2_q.vs1_narrow ),
.vs1_sigext_i ( state_vs2_q.mode.sigext ),
.vs2_i ( vs2_shift_q[ALU_OP_W-1:0] ),
.vs2_narrow_i ( state_vs2_q.vs2_narrow ),
.vs2_sigext_i ( state_vs2_q.mode.sigext ),
.vmsk_i ( v0msk_shift_q[ALU_OP_W/8-1:0] ),
.operand1_o ( operand1 ),
.operand2_o ( ),
.operand_mask_o ( operand_mask_d )
);
always_comb begin
operand2 = vs2_shift_q[ALU_OP_W-1:0];
if (state_vs2_q.vs2_narrow) begin
operand2 = DONT_CARE_ZERO ? '0 : 'x;
unique case (state_vs2_q.eew)
VSEW_16: begin
for (int i = 0; i < ALU_OP_W / 16; i++) begin
operand2[16*i +: 16] = {{8 {state_vs2_q.mode.sigext & vs2_shift_q[8 *i + 7 ]}}, vs2_shift_q[8 *i +: 8 ]};
end
end
VSEW_32: begin
for (int i = 0; i < ALU_OP_W / 32; i++) begin
operand2[32*i +: 32] = {{16{state_vs2_q.mode.sigext & vs2_shift_q[16*i + 15]}}, vs2_shift_q[16*i +: 16]};
end
end
default: ;
endcase
end
end
logic state_vs2_subtract;
assign state_vs2_subtract = state_vs2_q.mode.inv_op1 | state_vs2_q.mode.inv_op2;
always_comb begin
operand1_d = DONT_CARE_ZERO ? '0 : 'x;
operand2_d = DONT_CARE_ZERO ? '0 : 'x;
for (int i = 0; i < ALU_OP_W / 32; i++) begin
// operand 1 extraction
operand1_d[36*i+1 +: 8] = state_vs2_q.mode.inv_op1 ? ~operand1[32*i +: 8] : operand1[32*i +: 8];
operand1_d[36*i+10 +: 8] = state_vs2_q.mode.inv_op1 ? ~operand1[32*i+8 +: 8] : operand1[32*i+8 +: 8];
operand1_d[36*i+19 +: 8] = state_vs2_q.mode.inv_op1 ? ~operand1[32*i+16 +: 8] : operand1[32*i+16 +: 8];
operand1_d[36*i+28 +: 8] = state_vs2_q.mode.inv_op1 ? ~operand1[32*i+24 +: 8] : operand1[32*i+24 +: 8];
// operand 2 extraction
operand2_d[36*i+1 +: 8] = state_vs2_q.mode.inv_op2 ? ~operand2[32*i +: 8] : operand2[32*i +: 8];
operand2_d[36*i+10 +: 8] = state_vs2_q.mode.inv_op2 ? ~operand2[32*i+8 +: 8] : operand2[32*i+8 +: 8];
operand2_d[36*i+19 +: 8] = state_vs2_q.mode.inv_op2 ? ~operand2[32*i+16 +: 8] : operand2[32*i+16 +: 8];
operand2_d[36*i+28 +: 8] = state_vs2_q.mode.inv_op2 ? ~operand2[32*i+24 +: 8] : operand2[32*i+24 +: 8];
// operand 1 carry logic
operand1_d[36*i ] = ((state_vs2_q.mode.op_mask == ALU_MASK_CARRY) & operand_mask_d[i*4 ]) ^ state_vs2_subtract;
operand1_d[36*i+9 ] = (state_vs2_q.eew == VSEW_8 ) ? (((state_vs2_q.mode.op_mask == ALU_MASK_CARRY) & operand_mask_d[i*4+1]) ^ state_vs2_subtract) : 1'b1;
operand1_d[36*i+18] = (state_vs2_q.eew != VSEW_32) ? (((state_vs2_q.mode.op_mask == ALU_MASK_CARRY) & operand_mask_d[i*4+2]) ^ state_vs2_subtract) : 1'b1;
operand1_d[36*i+27] = (state_vs2_q.eew == VSEW_8 ) ? (((state_vs2_q.mode.op_mask == ALU_MASK_CARRY) & operand_mask_d[i*4+3]) ^ state_vs2_subtract) : 1'b1;
// operand 2 carry logic
operand2_d[36*i ] = 1'b1;
operand2_d[36*i+9 ] = (state_vs2_q.eew == VSEW_8 ) ? (((state_vs2_q.mode.op_mask == ALU_MASK_CARRY) & operand_mask_d[i*4+1]) ^ state_vs2_subtract) : 1'b0;
operand2_d[36*i+18] = (state_vs2_q.eew != VSEW_32) ? (((state_vs2_q.mode.op_mask == ALU_MASK_CARRY) & operand_mask_d[i*4+2]) ^ state_vs2_subtract) : 1'b0;
operand2_d[36*i+27] = (state_vs2_q.eew == VSEW_8 ) ? (((state_vs2_q.mode.op_mask == ALU_MASK_CARRY) & operand_mask_d[i*4+3]) ^ state_vs2_subtract) : 1'b0;
end
end
logic [ALU_OP_W-1:0] operand1_32, operand2_32;
always_comb begin
for (int i = 0; i < ALU_OP_W / 32; i++) begin
operand1_32[32*i +: 32] = {operand1_q[36*i+28 +: 8], operand1_q[36*i+19 +: 8], operand1_q[36*i+10 +: 8], operand1_q[36*i+1 +: 8]};
operand2_32[32*i +: 32] = {operand2_q[36*i+28 +: 8], operand2_q[36*i+19 +: 8], operand2_q[36*i+10 +: 8], operand2_q[36*i+1 +: 8]};
end
end
assign operand1_tmp_d = operand1_32;
assign operand2_tmp_d = operand2_32;
assign operand_mask_tmp_d = operand_mask_q;
// result byte mask:
logic [VREG_W-1:0] vl_mask;
assign vl_mask = state_ex2_q.vl_0 ? {VREG_W{1'b0}} : ({VREG_W{1'b1}} >> (~state_ex2_q.vl));
assign result_mask_d = (state_ex2_q.mode.masked ? operand_mask_tmp_q : {(ALU_OP_W/8){1'b1}}) & vl_mask[state_ex2_q.count.val*ALU_OP_W/8 +: ALU_OP_W/8];
// conversion from results to destination registers:
logic [ALU_OP_W -1:0] vd_alu;
logic [ALU_OP_W/8-1:0] vdmsk_alu;
vproc_vregpack #(
.OP_W ( ALU_OP_W ),
.DONT_CARE_ZERO ( DONT_CARE_ZERO )
) alu_vregpack (
.vsew_i ( state_res_q.eew ),
.result_i ( result_alu_q ),
.result_narrow_i ( state_res_q.vd_narrow ),
.result_mask_i ( result_mask_q ),
.vd_o ( vd_alu ),
.vdmsk_o ( vdmsk_alu )
);
// result shift register assignment:
store_info vd_info;
always_comb begin
vd_info.shift = ~state_res_q.vd_narrow | ~state_res_q.count.val[0];
end
`VREGSHIFT_RESULT_NARROW(VREG_W, ALU_OP_W, vd_info, vd_alu, vd_alu_shift_q, vd_alu_shift_d)
`VREGSHIFT_RESMASK_NARROW(VREG_W, ALU_OP_W, vd_info, vdmsk_alu, vdmsk_alu_shift_q, vdmsk_alu_shift_d)
// Inactive elements (tail and masked-off elements) are always handled
// according to the undisturbed policy (i.e., inactive elements are
// not updated). However, mask destination values are the only exception,
// since these can be written at bit granularity and would require a
// dedicated write enable for each bit, rather than a byte enable.
// According to the specification mask destination values are always tail-
// agnostic, hence inactive elements can be left unchanged or overwritten
// with 1s. Hence, mask destination values are written after one vector
// register was processed and all inactive values (according to the mask
// `result_mask_q') are overwritten with 1s.
always_comb begin
vd_cmp_shift_d = DONT_CARE_ZERO ? '0 : 'x;
vdmsk_cmp_d = DONT_CARE_ZERO ? '0 : 'x;
unique case (state_res_q.eew)
VSEW_8: begin
vd_cmp_shift_d[VMSK_W -ALU_OP_W/8 -1:0] = vd_cmp_shift_q[VMSK_W -1:ALU_OP_W/8 ];
for (int i = 0; i < ALU_OP_W / 8 ; i++) begin
vd_cmp_shift_d[VMSK_W -ALU_OP_W/8 +i] = result_cmp_q[ i] | ~result_mask_q[i ];
end
end
VSEW_16: begin
vd_cmp_shift_d[VMSK_W/2-ALU_OP_W/16-1:0] = vd_cmp_shift_q[VMSK_W/2-1:ALU_OP_W/16];
for (int i = 0; i < ALU_OP_W / 16; i++) begin
vd_cmp_shift_d[VMSK_W/2-ALU_OP_W/16+i] = result_cmp_q[2*i] | ~result_mask_q[2*i];
end
end
VSEW_32: begin
vd_cmp_shift_d[VMSK_W/4-ALU_OP_W/32-1:0] = vd_cmp_shift_q[VMSK_W/4-1:ALU_OP_W/32];
if (VMSK_W == 16) begin
vd_cmp_shift_d[VMSK_W/2-1:VMSK_W/4] = '1;
end
for (int i = 0; i < ALU_OP_W / 32; i++) begin
vd_cmp_shift_d[VMSK_W/4-ALU_OP_W/32+i] = result_cmp_q[4*i] | ~result_mask_q[4*i];
end
end
default: ;
endcase
unique case (state_res_q.eew)
VSEW_8: vdmsk_cmp_d = {{(VMSK_W*7 )/8 {1'b0}}, {(VMSK_W+7 )/8 {1'b1}}} << ((VMSK_W/8 ) * state_res_q.count.part.mul);
VSEW_16: vdmsk_cmp_d = {{(VMSK_W*15)/16{1'b0}}, {(VMSK_W+15)/16{1'b1}}} << ((VMSK_W/16) * state_res_q.count.part.mul);
VSEW_32: vdmsk_cmp_d = (VMSK_W == 16) ? 16'h0001 << state_res_q.count.part.mul[2:1] :
{{(VMSK_W*31)/32{1'b0}}, {(VMSK_W+31)/32{1'b1}}} << ((VMSK_W/32) * state_res_q.count.part.mul);
default: ;
endcase
end
//
assign vreg_wr_en_d = state_vd_valid_q & state_vd_q.vd_store & ~state_vd_stall & ~instr_killed_i[state_vd_q.id];
assign vreg_wr_clear_d = state_vd_valid_q & (state_vd_q.mode.cmp ? state_vd_q.last_cycle : state_vd_q.vd_store) & ~state_vd_stall;
assign vreg_wr_addr_d = state_vd_q.vd;
assign vreg_wr_mask_d = vreg_wr_en_o ? (state_vd_q.mode.cmp ? vdmsk_cmp_q : vdmsk_alu_shift_q) : '0;
assign vreg_wr_d = state_vd_q.mode.cmp ? {8{vd_cmp_shift_q}} : vd_alu_shift_q;
///////////////////////////////////////////////////////////////////////////
// ALU ARITHMETIC:
logic state_ex1_subtract;
assign state_ex1_subtract = state_ex1_q.mode.inv_op1 | state_ex1_q.mode.inv_op2;
// 37-bit adder (fracturable 32-bit adder with carry-in and carry-out)
logic [ALU_OP_W*37/32-1:0] sum37;
always_comb begin
sum37 = DONT_CARE_ZERO ? '0 : 'x;
for (int i = 0; i < ALU_OP_W / 32; i++) begin
sum37[37*i +: 37] = {1'b0, operand2_q[36*i +: 36]} + {1'b0, operand1_q[36*i +: 36]};
end
end
logic [ALU_OP_W/8-1:0] carry, sig_op1, sig_op2, sig_res;
always_comb begin
sum_d = DONT_CARE_ZERO ? '0 : 'x;
carry = DONT_CARE_ZERO ? '0 : 'x;
sig_op1 = DONT_CARE_ZERO ? '0 : 'x;
sig_op2 = DONT_CARE_ZERO ? '0 : 'x;
sig_res = DONT_CARE_ZERO ? '0 : 'x;
for (int i = 0; i < ALU_OP_W / 32; i++) begin
// discard lowest bit of the 37-bit result and fill in carry chain bits
sum_d[36*i +: 8] = sum37[37*i+1 +: 8];
sum_d[36*i+9 +: 8] = sum37[37*i+10 +: 8];
sum_d[36*i+18 +: 8] = sum37[37*i+19 +: 8];
sum_d[36*i+27 +: 8] = sum37[37*i+28 +: 8];
unique case (state_ex1_q.eew)
VSEW_8: begin
sum_d [36*i+8 ] = sum37 [37*i+9 ] ^ state_ex1_subtract;
sum_d [36*i+17 ] = sum37 [37*i+18] ^ state_ex1_subtract;
sum_d [36*i+26 ] = sum37 [37*i+27] ^ state_ex1_subtract;
sum_d [36*i+35 ] = sum37 [37*i+36] ^ state_ex1_subtract;
carry [4 *i +: 4] = {sum37 [37*i+36], sum37 [37*i+27], sum37 [37*i+18], sum37 [37*i+9]};
sig_op1[4 *i +: 4] = {operand1_q[36*i+35], operand1_q[36*i+26], operand1_q[36*i+17], operand1_q[36*i+8]};
sig_op2[4 *i +: 4] = {operand2_q[36*i+35], operand2_q[36*i+26], operand2_q[36*i+17], operand2_q[36*i+8]};
sig_res[4 *i +: 4] = {sum37 [37*i+35], sum37 [37*i+26], sum37 [37*i+17], sum37 [37*i+8]};
end
VSEW_16: begin
sum_d [36*i+8 ] = sum37 [37*i+10];
sum_d [36*i+17 ] = sum37 [37*i+18] ^ state_ex1_subtract;
sum_d [36*i+26 ] = sum37 [37*i+28];
sum_d [36*i+35 ] = sum37 [37*i+36] ^ state_ex1_subtract;
carry [4 *i +: 4] = {{2{sum37 [37*i+36]}}, {2{sum37 [37*i+18]}}};
sig_op1[4 *i +: 4] = {{2{operand1_q[36*i+35]}}, {2{operand1_q[36*i+17]}}};
sig_op2[4 *i +: 4] = {{2{operand2_q[36*i+35]}}, {2{operand2_q[36*i+17]}}};
sig_res[4 *i +: 4] = {{2{sum37 [37*i+35]}}, {2{sum37 [37*i+17]}}};
end
VSEW_32: begin
sum_d [36*i+8 ] = sum37 [37*i+10];
sum_d [36*i+17 ] = sum37 [37*i+19];
sum_d [36*i+26 ] = sum37 [37*i+28];
sum_d [36*i+35 ] = sum37 [37*i+36] ^ state_ex1_subtract;
carry [4 *i +: 4] = {4{sum37 [37*i+36]}};
sig_op1[4 *i +: 4] = {4{operand1_q[36*i+35]}};
sig_op2[4 *i +: 4] = {4{operand2_q[36*i+35]}};
sig_res[4 *i +: 4] = {4{sum37 [37*i+35]}};
end
default: ;
endcase
end
end
// signed arithmetic overflow flag (note that subtraction is implemented by
// inverting the subtrahend and adding it with carry to the minuend; hence
// the logic for detecting overflow always follows the rules for addition:
// signed overflow occurs when the operands have equal sign and the sign of
// the result is different)
logic [ALU_OP_W/8-1:0] ovflw;
assign ovflw = ~(sig_op1 ^ sig_op2) & (sig_op1 ^ sig_res);
always_comb begin
cmp_d = DONT_CARE_ZERO ? '0 : 'x;
unique case (state_ex1_q.mode.opx1.sel)
ALU_SEL_CARRY: cmp_d = state_ex1_subtract ? ~carry : carry;
ALU_SEL_OVFLW: cmp_d = ovflw;
ALU_SEL_LT: cmp_d = ovflw ^ sig_res; // minuend is less than subtrahend
ALU_SEL_MASK: begin
for (int i = 0; i < ALU_OP_W / 8; i++) begin
cmp_d[i] = (state_ex1_q.mode.op_mask == ALU_MASK_SEL) & operand_mask_q[i];
end
end
default: ;
endcase
end
// saturation value generation: generate the sign bit and fill bit for
// saturation values of the result of the addition (used by saturating add
// and subtract instructions); differentiation between signed and unsigned
// mode is done based on whether the carry or the overflow bit is saved in
// the compare register `cmp_q'; for signed overflow the sign bit of the
// result is inverted, while the fill bit (i.e., all other bits of the
// final result) is the initial sign of the result (hence the fill bit
// is always different from the sign bit); for unsigned operations the
// carry bit fills the entire final result (sign bit and fill bit equal)
logic mode_signed;
always_comb begin
mode_signed = DONT_CARE_ZERO ? '0 : 'x;
unique case (state_ex1_q.mode.opx1.sel)
ALU_SEL_CARRY: mode_signed = 1'b0;
ALU_SEL_OVFLW: mode_signed = 1'b1;
default: ;
endcase
end
always_comb begin
satval_d = DONT_CARE_ZERO ? '0 : 'x;
unique case (state_ex1_q.eew)
VSEW_8: begin
for (int i = 0; i < ALU_OP_W / 8 ; i++) begin
satval_d[2*i +: 2] = mode_signed ? {~sig_res[ i], sig_res[ i] } : {2{carry[ i]}};
end
end
VSEW_16: begin
for (int i = 0; i < ALU_OP_W / 16; i++) begin
satval_d[4*i +: 4] = mode_signed ? {~sig_res[2*i], {3{sig_res[2*i]}}} : {4{carry[2*i]}};
end
end
VSEW_32: begin
for (int i = 0; i < ALU_OP_W / 32; i++) begin
satval_d[8*i +: 8] = mode_signed ? {~sig_res[4*i], {7{sig_res[4*i]}}} : {8{carry[4*i]}};
end
end
default: ;
endcase
end
// barrel shifter
always_comb begin
shift_res_d = DONT_CARE_ZERO ? '0 : 'x;
unique case ({state_ex1_q.mode.opx1.shift, state_ex1_q.eew})
// vsll.*
{ALU_SHIFT_VSLL, VSEW_8}: begin
for (int i = 0; i < ALU_OP_W / 8 ; i++)
shift_res_d[8 *i +: 8 ] = operand2_32[8 *i +: 8 ] << operand1_32[8 *i +: 3];
end
{ALU_SHIFT_VSLL, VSEW_16}: begin
for (int i = 0; i < ALU_OP_W / 16; i++)
shift_res_d[16*i +: 16] = operand2_32[16*i +: 16] << operand1_32[16*i +: 4];
end
{ALU_SHIFT_VSLL, VSEW_32}: begin
for (int i = 0; i < ALU_OP_W / 32; i++)
shift_res_d[32*i +: 32] = operand2_32[32*i +: 32] << operand1_32[32*i +: 5];
end
// vsrl.*
{ALU_SHIFT_VSRL, VSEW_8}: begin
for (int i = 0; i < ALU_OP_W / 8 ; i++)
shift_res_d[8 *i +: 8 ] = operand2_32[8 *i +: 8 ] >> operand1_32[8 *i +: 3];
end
{ALU_SHIFT_VSRL, VSEW_16}: begin
for (int i = 0; i < ALU_OP_W / 16; i++)
shift_res_d[16*i +: 16] = operand2_32[16*i +: 16] >> operand1_32[16*i +: 4];
end
{ALU_SHIFT_VSRL, VSEW_32}: begin
for (int i = 0; i < ALU_OP_W / 32; i++)
shift_res_d[32*i +: 32] = operand2_32[32*i +: 32] >> operand1_32[32*i +: 5];
end
// vsra.*
{ALU_SHIFT_VSRA, VSEW_8}: begin
for (int i = 0; i < ALU_OP_W / 8 ; i++)
shift_res_d[8 *i +: 8 ] = $signed(operand2_32[8 *i +: 8 ]) >>> operand1_32[8 *i +: 3];
end
{ALU_SHIFT_VSRA, VSEW_16}: begin
for (int i = 0; i < ALU_OP_W / 16; i++)
shift_res_d[16*i +: 16] = $signed(operand2_32[16*i +: 16]) >>> operand1_32[16*i +: 4];
end
{ALU_SHIFT_VSRA, VSEW_32}: begin
for (int i = 0; i < ALU_OP_W / 32; i++)
shift_res_d[32*i +: 32] = $signed(operand2_32[32*i +: 32]) >>> operand1_32[32*i +: 5];
end
default: ;
endcase
end
// arithmetic result
always_comb begin
result_alu_d = DONT_CARE_ZERO ? '0 : 'x;
unique case (state_ex2_q.mode.opx2.res)
// add and saturating add: the result is replaced by the saturation
// value if the corresponding bit in the compare register is set;
// for non-saturating addition (and subtraction) the compare
// register has to be 0
ALU_VADD: begin
for (int i = 0; i < ALU_OP_W / 8; i++) begin
result_alu_d[8*i +: 8] = cmp_q[i] ? {satval_q[2*i+1], {7{satval_q[2*i]}}} : sum_q[9*i +: 8];
end
end
// averaging add: the result is right-shifted by one bit (the carry
// in of the addition can be used to control rounding behavior)
ALU_VAADD: begin
for (int i = 0; i < ALU_OP_W / 8; i++) begin
result_alu_d[8*i +: 8] = sum_q[9*i+1 +: 8];
end
end
ALU_VAND: result_alu_d = operand2_tmp_q & operand1_tmp_q;
ALU_VOR: result_alu_d = operand2_tmp_q | operand1_tmp_q;
ALU_VXOR: result_alu_d = operand2_tmp_q ^ operand1_tmp_q;
ALU_VSHIFT: result_alu_d = shift_res_q;
// select either one of the operands based on the register `cmp_q',
// which holds the result of a comparison for the vmin[u].* and
// vmax[u].* instructions, the v0 mask for vmerge.*, or all zeroes
// for the vsext.* and vzext.* instructions which use vs2 as source
ALU_VSEL: begin
for (int i = 0; i < ALU_OP_W / 8; i++) begin
result_alu_d[8*i +: 8] = cmp_q[i] ? ~operand1_tmp_q[8*i +: 8] : operand2_tmp_q[8*i +: 8];
end
end
ALU_VSELN: begin
for (int i = 0; i < ALU_OP_W / 8; i++) begin
result_alu_d[8*i +: 8] = cmp_q[i] ? operand2_tmp_q[8*i +: 8] : ~operand1_tmp_q[8*i +: 8];
end
end
default: ;
endcase
end
// compare result; comparisons are done using the compare register `cmp_q';
// equality (or inequality) is determined by testing whether the sum is 0
logic [ALU_OP_W/8-1:0] neq;
always_comb begin
neq = DONT_CARE_ZERO ? '0 : 'x;
unique case (state_ex2_q.eew)
VSEW_8: begin
for (int i = 0; i < ALU_OP_W / 8 ; i++) begin
neq[i ] = | sum_q[9 *i +: 8];
end
end
VSEW_16: begin
for (int i = 0; i < ALU_OP_W / 16; i++) begin
neq[2*i] = |{sum_q[18*i+9 +: 8], sum_q[18*i +: 8]};
end
end
VSEW_32: begin
for (int i = 0; i < ALU_OP_W / 32; i++) begin
neq[4*i] = |{sum_q[36*i+27 +: 8], sum_q[36*i+18 +: 8], sum_q[36*i+9 +: 8], sum_q[36*i +: 8]};
end
end
default: ;
endcase