/
vr_dpdk_ethdev.c
1183 lines (1034 loc) · 41.9 KB
/
vr_dpdk_ethdev.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (C) 2014 Semihalf.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* vr_dpdk_ethdev.c -- DPDK ethernet device
*
*/
#include "vr_dpdk.h"
#include <vr_mpls.h>
#include <rte_eth_bond.h>
#include <rte_errno.h>
#if (RTE_VERSION >= RTE_VERSION_NUM(17, 11, 0, 0))
#include <rte_ethdev_pci.h>
#endif
#include <rte_ethdev.h>
#include <rte_hash_crc.h>
#include <rte_ip.h>
#include <rte_port_ethdev.h>
#include <rte_udp.h>
extern int vr_rxd_sz, vr_txd_sz;
extern unsigned int datapath_offloads;
struct rte_eth_conf ethdev_conf = {
#if (RTE_VERSION >= RTE_VERSION_NUM(17, 2, 0, 0))
.link_speeds = ETH_LINK_SPEED_AUTONEG,
#else
.link_speed = 0, /* ETH_LINK_SPEED_10[0|00|000], or 0 for autonegotation */
.link_duplex = 0, /* ETH_LINK_[HALF_DUPLEX|FULL_DUPLEX], or 0 for autonegotation */
#endif
.rxmode = { /* Port RX configuration. */
/* The multi-queue packet distribution mode to be used, e.g. RSS. */
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = VR_DEF_MAX_PACKET_SZ, /* Only used if jumbo_frame enabled */
.header_split = 0, /* Disable Header Split */
#if (RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0))
.offloads = DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP,
#else
.hw_ip_checksum = 1, /* Enable IP/UDP/TCP checksum offload */
.hw_vlan_filter = 0, /* Disabel VLAN filter */
.hw_vlan_strip = 0, /* Disable VLAN strip (might be enabled with --vlan argument) */
.hw_vlan_extend = 0, /* Disable Extended VLAN */
.jumbo_frame = 1, /* Enable Jumbo Frame Receipt */
.hw_strip_crc = 1, /* Enable CRC stripping by hardware */
.enable_scatter = 0, /* Disable scatter packets rx handler */
#endif
},
.rx_adv_conf = {
.rss_conf = { /* Port RSS configuration */
.rss_key = NULL, /* If not NULL, 40-byte hash key */
.rss_key_len = 0, /* Hash key length in bytes */
/* Hash functions to apply */
.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
},
},
.txmode = { /* Port TX configuration. */
.mq_mode = ETH_MQ_TX_NONE, /* TX multi-queues mode */
#if (RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0))
.offloads = 0,
#endif
/* For i40e specifically */
.pvid = 0,
.hw_vlan_reject_tagged = 0, /* If set, reject sending out tagged pkts */
.hw_vlan_reject_untagged = 0, /* If set, reject sending out untagged pkts */
.hw_vlan_insert_pvid = 0, /* If set, enable port based VLAN insertion */
},
.fdir_conf = {
#if VR_DPDK_USE_HW_FILTERING
.mode = RTE_FDIR_MODE_PERFECT, /* Flow Director mode. */
.status = RTE_FDIR_REPORT_STATUS, /* How to report FDIR hash. */
#else
.mode = RTE_FDIR_MODE_NONE,
.status = RTE_FDIR_NO_REPORT_STATUS,
#endif
.pballoc = RTE_FDIR_PBALLOC_64K, /* Space for FDIR filters. */
/* Offset of flexbytes field in RX packets (in 16-bit word units). */
/* TODO: flow director API has changed since DPDK 1.7 */
// .flexbytes_offset = VR_DPDK_MPLS_OFFSET,
/* RX queue of packets matching a "drop" filter in perfect mode. */
.drop_queue = 0,
.flex_conf = {
},
},
};
/* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
/* RX ring configuration */
static const struct rte_eth_rxconf rx_queue_conf = {
.rx_thresh = {
.pthresh = 8, /* Ring prefetch threshold */
.hthresh = 8, /* Ring host threshold */
.wthresh = 4, /* Ring writeback threshold */
},
/* Do not immediately free RX descriptors */
.rx_free_thresh = VR_DPDK_RX_BURST_SZ,
#if (RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0))
.offloads = 0,
#endif
};
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
/* TX ring configuration */
static const struct rte_eth_txconf tx_queue_conf = {
.tx_thresh = {
.pthresh = 32, /* Ring prefetch threshold */
.hthresh = 0, /* Ring host threshold */
.wthresh = 0, /* Ring writeback threshold */
},
#if (RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0))
.offloads = 0,
.txq_flags = ETH_TXQ_FLAGS_IGNORE,
#else
.txq_flags = 0, /* Set flags for the Tx queue */
#endif
.tx_free_thresh = 32,
.tx_rs_thresh = 32, /* Use PMD default values */
};
#if VR_DPDK_USE_HW_FILTERING
/* Add hardware filter */
int
vr_dpdk_ethdev_filter_add(struct vr_interface *vif, uint16_t queue_id,
unsigned dst_ip, unsigned mpls_label)
{
struct vr_dpdk_ethdev *ethdev = (struct vr_dpdk_ethdev *)vif->vif_os;
uint8_t port_id = ethdev->ethdev_port_id;
struct rte_fdir_filter filter;
int ret;
/* accept 2-byte labels only */
if (mpls_label > 0xffff)
return -EINVAL;
if (queue_id > VR_DPDK_MAX_NB_RX_QUEUES)
return -EINVAL;
memset(&filter, 0, sizeof(filter));
filter.iptype = RTE_FDIR_IPTYPE_IPV4;
filter.l4type = RTE_FDIR_L4TYPE_UDP;
filter.ip_dst.ipv4_addr = dst_ip;
filter.port_dst = rte_cpu_to_be_16((uint16_t)VR_MPLS_OVER_UDP_DST_PORT);
filter.flex_bytes = rte_cpu_to_be_16((uint16_t)mpls_label);
RTE_LOG_DP(DEBUG, VROUTER, "%s: ip_dst=0x%x port_dst=%d flex_bytes=%d\n", __func__,
(unsigned)dst_ip, (unsigned)VR_MPLS_OVER_UDP_DST_PORT, (unsigned)mpls_label);
if (queue_id >= 0xFF) {
RTE_LOG(ERR, VROUTER, " error adding perfect filter for eth device %"
PRIu8 ": queue ID %" PRIu16 " is out of range\n",
port_id, queue_id);
return -EINVAL;
}
ret = rte_eth_dev_fdir_add_perfect_filter(port_id, &filter, (uint16_t)mpls_label,
(uint8_t)queue_id, 0);
if (ret == 0)
ethdev->ethdev_queue_states[queue_id] = VR_DPDK_QUEUE_FILTERING_STATE;
return ret;
}
#endif
/* Get a ready queue ID */
uint16_t
vr_dpdk_ethdev_ready_queue_id_get(struct vr_interface *vif)
{
uint16_t i;
struct vr_dpdk_ethdev *ethdev = (struct vr_dpdk_ethdev *)vif->vif_os;
for (i = ethdev->ethdev_nb_rss_queues; i < ethdev->ethdev_nb_rx_queues; i++) {
if (ethdev->ethdev_queue_states[i] == VR_DPDK_QUEUE_READY_STATE) {
return i;
}
}
return VR_DPDK_INVALID_QUEUE_ID;
}
/* Release ethdev RX queue */
static void
dpdk_ethdev_rx_queue_release(unsigned lcore_id,
unsigned queue_index __attribute__((unused)),
struct vr_interface *vif)
{
struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
struct vr_dpdk_queue *rx_queue = &lcore->lcore_rx_queues[vif->vif_idx];
struct vr_dpdk_queue_params *rx_queue_params
= &lcore->lcore_rx_queue_params[vif->vif_idx];
/* free the queue */
if (rx_queue->rxq_ops.f_free(rx_queue->q_queue_h)) {
RTE_LOG(ERR, VROUTER, " error freeing lcore %u eth device RX queue\n",
lcore_id);
}
/* reset the queue */
vrouter_put_interface(rx_queue->q_vif);
memset(rx_queue, 0, sizeof(*rx_queue));
memset(rx_queue_params, 0, sizeof(*rx_queue_params));
}
/* Init eth RX queue */
struct vr_dpdk_queue *
vr_dpdk_ethdev_rx_queue_init(unsigned lcore_id, struct vr_interface *vif,
unsigned queue_or_lcore_id)
{
uint16_t rx_queue_id = queue_or_lcore_id;
uint8_t port_id;
unsigned int vif_idx = vif->vif_idx;
const unsigned int socket_id = rte_lcore_to_socket_id(lcore_id);
struct vr_dpdk_ethdev *ethdev;
struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
struct vr_dpdk_queue *rx_queue = &lcore->lcore_rx_queues[vif_idx];
struct vr_dpdk_queue_params *rx_queue_params
= &lcore->lcore_rx_queue_params[vif_idx];
ethdev = (struct vr_dpdk_ethdev *)vif->vif_os;
port_id = ethdev->ethdev_port_id;
/* init queue */
rx_queue->rxq_ops = rte_port_ethdev_reader_ops;
rx_queue->q_queue_h = NULL;
rx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx);
/* create the queue */
struct rte_port_ethdev_reader_params reader_params = {
.port_id = port_id,
.queue_id = rx_queue_id,
};
rx_queue->q_queue_h = rx_queue->rxq_ops.f_create(&reader_params, socket_id);
if (rx_queue->q_queue_h == NULL) {
RTE_LOG(ERR, VROUTER, " error creating eth device %" PRIu8
" RX queue %" PRIu16 "\n", port_id, rx_queue_id);
return NULL;
}
/* store queue params */
rx_queue_params->qp_release_op = &dpdk_ethdev_rx_queue_release;
rx_queue_params->qp_ethdev.queue_id = rx_queue_id;
rx_queue_params->qp_ethdev.port_id = port_id;
return rx_queue;
}
/* Release ethdev TX queue */
static void
dpdk_ethdev_tx_queue_release(unsigned lcore_id, unsigned queue_index,
struct vr_interface *vif)
{
int i;
struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
struct vr_dpdk_queue *tx_queue;
struct vr_dpdk_queue_params *tx_queue_params;
tx_queue = &lcore->lcore_tx_queues[vif->vif_idx][queue_index];
tx_queue_params =
&lcore->lcore_tx_queue_params[vif->vif_idx][queue_index];
/* remove queue params from the list of bonds to TX */
for (i = 0; i < lcore->lcore_nb_bonds_to_tx; i++) {
if (likely(lcore->lcore_bonds_to_tx[i] == tx_queue_params)) {
lcore->lcore_bonds_to_tx[i] = NULL;
lcore->lcore_nb_bonds_to_tx--;
RTE_VERIFY(lcore->lcore_nb_bonds_to_tx <= VR_DPDK_MAX_BONDS);
/* copy the last element to the empty spot */
lcore->lcore_bonds_to_tx[i] = lcore->lcore_bonds_to_tx[lcore->lcore_nb_bonds_to_tx];
break;
}
}
tx_queue->txq_ops.f_tx = NULL;
rte_wmb();
/* flush and free the queue */
if (tx_queue->txq_ops.f_free(tx_queue->q_queue_h)) {
RTE_LOG(ERR, VROUTER, " error freeing lcore %u eth device TX queue\n",
lcore_id);
}
/* reset the queue */
vrouter_put_interface(tx_queue->q_vif);
memset(tx_queue, 0, sizeof(*tx_queue));
memset(tx_queue_params, 0, sizeof(*tx_queue_params));
}
/* Init eth TX queue */
struct vr_dpdk_queue *
vr_dpdk_ethdev_tx_queue_init(unsigned lcore_id, struct vr_interface *vif,
unsigned queue_or_lcore_id)
{
uint8_t port_id;
uint16_t tx_queue_id = queue_or_lcore_id;
unsigned int vif_idx = vif->vif_idx, dpdk_queue_index;
const unsigned int socket_id = rte_lcore_to_socket_id(lcore_id);
struct vr_dpdk_ethdev *ethdev;
struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
struct vr_dpdk_queue *tx_queue;
struct vr_dpdk_queue_params *tx_queue_params;
ethdev = (struct vr_dpdk_ethdev *)vif->vif_os;
port_id = ethdev->ethdev_port_id;
if (lcore->lcore_hw_queue_to_dpdk_index[vif->vif_idx]) {
dpdk_queue_index =
lcore->lcore_hw_queue_to_dpdk_index[vif->vif_idx][tx_queue_id];
} else {
dpdk_queue_index = 0;
}
tx_queue = &lcore->lcore_tx_queues[vif_idx][dpdk_queue_index];
tx_queue_params = &lcore->lcore_tx_queue_params[vif_idx][dpdk_queue_index];
/* init queue */
tx_queue->txq_ops = rte_port_ethdev_writer_ops;
tx_queue->q_queue_h = NULL;
tx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx);
/* create the queue */
struct rte_port_ethdev_writer_params writer_params = {
.port_id = port_id,
.queue_id = tx_queue_id,
.tx_burst_sz = VR_DPDK_TX_BURST_SZ,
};
tx_queue->q_queue_h = tx_queue->txq_ops.f_create(&writer_params, socket_id);
if (tx_queue->q_queue_h == NULL) {
RTE_LOG(ERR, VROUTER, " error creating eth device %" PRIu8
" TX queue %" PRIu16 "\n", port_id, tx_queue_id);
return NULL;
}
/* store queue params */
tx_queue_params->qp_release_op = &dpdk_ethdev_tx_queue_release;
tx_queue_params->qp_ethdev.queue_id = tx_queue_id;
tx_queue_params->qp_ethdev.port_id = port_id;
/* for the queue 0 add queue params to the list of bonds to TX */
if (ethdev->ethdev_nb_slaves > 0 && tx_queue_id == 0) {
/* make sure queue params have been stored */
rte_wmb();
lcore->lcore_bonds_to_tx[lcore->lcore_nb_bonds_to_tx++] = tx_queue_params;
RTE_VERIFY(lcore->lcore_nb_bonds_to_tx <= VR_DPDK_MAX_BONDS);
}
return tx_queue;
}
/*
* vr_max_tx_queues_adjust - bond devices always return dev_info indicating
* 512 TX queues are supported. vrouter clips down to a max of VR_DPDK_MAX_NB_TX_QUEUES.
* However, if any of the bond slaves does not support this many TX queues, the
* number needs to be reduced further to a value that the NIC can support. Also,
* bnxt report max_tx_queues of 170, but fails to configure more than 8 TX queues.
*/
static void
vr_max_tx_queues_adjust(struct vr_dpdk_ethdev *ethdev, uint16_t *nb_tx_q)
{
struct rte_eth_dev_info dev_info;
int i;
VR_DPDK_RTE_ETH_FOREACH_DEV(i)
{
rte_eth_dev_info_get(i, &dev_info);
if (dev_info.driver_name) {
if (strncmp(dev_info.driver_name, "net_bnxt",
strlen("net_bnxt") + 1) == 0) {
if (*nb_tx_q > VR_DPDK_MAX_NB_TX_Q_BNXT) {
RTE_LOG(INFO, VROUTER, "TX queues changed from %d to %d due to port %d\n",
*nb_tx_q, VR_DPDK_MAX_NB_TX_Q_BNXT, i);
*nb_tx_q = VR_DPDK_MAX_NB_TX_Q_BNXT;
}
}
}
}
return;
}
/* Update device info */
static void
dpdk_ethdev_info_update(struct vr_dpdk_ethdev *ethdev)
{
struct rte_eth_dev_info dev_info;
rte_eth_dev_info_get(ethdev->ethdev_port_id, &dev_info);
ethdev->ethdev_nb_rx_queues = RTE_MIN(dev_info.max_rx_queues,
VR_DPDK_MAX_NB_RX_QUEUES);
if (dev_info.max_tx_queues > VR_DPDK_MAX_NB_TX_QUEUES)
dev_info.max_tx_queues = VR_DPDK_MAX_NB_TX_QUEUES;
/*
* If a device advertises max_tx_queues higher than it
* can actually support, reduce the value to a number
* that it can support.
*/
vr_max_tx_queues_adjust(ethdev, &dev_info.max_tx_queues);
ethdev->ethdev_nb_tx_queues = dev_info.max_tx_queues;
/* Check if we have dedicated an lcore for SR-IOV VF IO. */
if (vr_dpdk.vf_lcore_id) {
ethdev->ethdev_nb_rx_queues = ethdev->ethdev_nb_tx_queues = 1;
}
ethdev->ethdev_nb_rss_queues = RTE_MIN(RTE_MIN(ethdev->ethdev_nb_rx_queues,
vr_dpdk.nb_fwd_lcores), VR_DPDK_MAX_NB_RSS_QUEUES);
ethdev->ethdev_reta_size = RTE_MIN(dev_info.reta_size,
VR_DPDK_MAX_RETA_SIZE);
/*
* If the NIC driver sets reta_size to a value that is not a power of
* 2, align it as DPDK expects it to be a power of 2.
*/
ethdev->ethdev_reta_size = RTE_ALIGN(ethdev->ethdev_reta_size, RTE_RETA_GROUP_SIZE);
RTE_LOG_DP(DEBUG, VROUTER, "dev_info: driver_name=%s if_index=%u"
" max_rx_queues=%" PRIu16 " max_tx_queues=%" PRIu16
" max_vfs=%" PRIu16 " max_vmdq_pools=%" PRIu16
#if (RTE_VERSION >= RTE_VERSION_NUM(17, 11, 0, 0))
" rx_offload_capa=%" PRIx64 " tx_offload_capa=%" PRIx64 "\n",
#else
" rx_offload_capa=%" PRIx32 " tx_offload_capa=%" PRIx32 "\n",
#endif
dev_info.driver_name, dev_info.if_index,
dev_info.max_rx_queues, dev_info.max_tx_queues,
dev_info.max_vfs, dev_info.max_vmdq_pools,
dev_info.rx_offload_capa, dev_info.tx_offload_capa);
#if !VR_DPDK_USE_HW_FILTERING
/* use RSS queues only */
ethdev->ethdev_nb_rx_queues = ethdev->ethdev_nb_rss_queues;
#else
/* we use just RSS queues if the device does not support RETA */
if (ethdev->ethdev_reta_size == 0)
ethdev->ethdev_nb_rx_queues = ethdev->ethdev_nb_rss_queues;
#endif
RTE_LOG(INFO, VROUTER, "Using %d TX queues, %d RX queues\n",
ethdev->ethdev_nb_tx_queues,
ethdev->ethdev_nb_rx_queues);
return;
}
/* Setup ethdev hardware queues */
static int
dpdk_ethdev_queues_setup(struct vr_dpdk_ethdev *ethdev)
{
int ret, i;
uint8_t port_id = ethdev->ethdev_port_id;
struct rte_mempool *mempool;
/* configure RX queues */
RTE_LOG_DP(DEBUG, VROUTER, "%s: nb_rx_queues=%u nb_tx_queues=%u\n",
__func__, (unsigned)ethdev->ethdev_nb_rx_queues,
(unsigned)ethdev->ethdev_nb_tx_queues);
for (i = 0; i < VR_DPDK_MAX_NB_RX_QUEUES; i++) {
if (i < ethdev->ethdev_nb_rss_queues) {
mempool = vr_dpdk.rss_mempool;
ethdev->ethdev_queue_states[i] = VR_DPDK_QUEUE_RSS_STATE;
} else if (i < ethdev->ethdev_nb_rx_queues) {
if (vr_dpdk.nb_free_mempools == 0) {
RTE_LOG(ERR, VROUTER, " error assigning mempool to eth device %"
PRIu8 " RX queue %d\n", port_id, i);
return -ENOMEM;
}
vr_dpdk.nb_free_mempools--;
mempool = vr_dpdk.free_mempools[vr_dpdk.nb_free_mempools];
ethdev->ethdev_queue_states[i] = VR_DPDK_QUEUE_READY_STATE;
} else {
ethdev->ethdev_queue_states[i] = VR_DPDK_QUEUE_NONE;
continue;
}
ret = rte_eth_rx_queue_setup(port_id, i, vr_rxd_sz,
SOCKET_ID_ANY, &rx_queue_conf, mempool);
if (ret < 0) {
/* return mempool to the list */
if (mempool != vr_dpdk.rss_mempool)
vr_dpdk.nb_free_mempools++;
RTE_LOG(ERR, VROUTER, " error setting up eth device %" PRIu8 " RX queue %d"
": %s (%d)\n", port_id, i, rte_strerror(-ret), -ret);
return ret;
}
/* map RX queue to stats counter ignoring any errors */
rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i);
/* save queue mempool pointer */
ethdev->ethdev_mempools[i] = mempool;
}
i = ethdev->ethdev_nb_rx_queues - ethdev->ethdev_nb_rss_queues;
RTE_LOG(INFO, VROUTER, " setup %d RSS queue(s) and %d filtering queue(s)\n",
(int)ethdev->ethdev_nb_rss_queues, i);
/* configure TX queues */
for (i = 0; i < ethdev->ethdev_nb_tx_queues; i++) {
ret = rte_eth_tx_queue_setup(port_id, i, vr_txd_sz,
SOCKET_ID_ANY, &tx_queue_conf);
if (ret < 0) {
RTE_LOG(ERR, VROUTER, " error setting up eth device %" PRIu8 " TX queue %d"
": %s (%d)\n", port_id, i, rte_strerror(-ret), -ret);
return ret;
}
/* map TX queue to stats counter ignoring any errors */
rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i);
}
return 0;
}
static void
dpdk_ethdev_reta_show(uint8_t port_id, uint16_t reta_size)
{
int nb_entries = reta_size/RTE_RETA_GROUP_SIZE;
struct rte_eth_rss_reta_entry64 reta_entries[nb_entries];
struct rte_eth_rss_reta_entry64 *reta;
uint16_t i, idx, shift;
int ret, entry;
for (entry = 0; entry < nb_entries; entry++) {
reta = &reta_entries[entry];
/* reset RSS redirection table */
memset(reta, 0, sizeof(*reta));
reta->mask = 0xffffffffffffffffULL;
}
ret = rte_eth_dev_rss_reta_query(port_id, reta_entries, reta_size);
if (ret != 0) {
RTE_LOG(ERR, VROUTER, "Error getting RSS RETA info: %s (%d)\n",
rte_strerror(ret), ret);
return;
}
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
if (!(reta_entries[idx].mask & (1ULL << shift)))
continue;
RTE_LOG_DP(DEBUG, VROUTER, " hash index=%u, queue=%u\n",
i, reta_entries[idx].reta[shift]);
}
}
/* Init RSS */
int
vr_dpdk_ethdev_rss_init(struct vr_dpdk_ethdev *ethdev)
{
int ret, i, j, entry;
uint8_t port_id = ethdev->ethdev_port_id;
int nb_entries = ethdev->ethdev_reta_size/RTE_RETA_GROUP_SIZE;
struct rte_eth_rss_reta_entry64 reta_entries[VR_DPDK_MAX_RETA_ENTRIES];
struct rte_eth_rss_reta_entry64 *reta;
/* There is nothing to configure if the device does not support RETA.
* If the device reported few RX queues earlier, we assume those
* queues are preconfigured for RSS by default.
*/
if (ethdev->ethdev_reta_size == 0)
return 0;
RTE_LOG_DP(DEBUG, VROUTER, "%s: RSS RETA BEFORE:\n", __func__);
dpdk_ethdev_reta_show(port_id, ethdev->ethdev_reta_size);
for (entry = 0; entry < nb_entries; entry++) {
reta = &reta_entries[entry];
/* create new RSS redirection table */
memset(reta, 0, sizeof(*reta));
reta->mask = 0xffffffffffffffffULL;
for (i = j = 0; i < RTE_RETA_GROUP_SIZE; i++) {
reta->reta[i] = j++;
if (ethdev->ethdev_queue_states[j] != VR_DPDK_QUEUE_RSS_STATE)
j = 0;
}
}
/* update RSS redirection table */
ret = rte_eth_dev_rss_reta_update(port_id, reta_entries,
ethdev->ethdev_reta_size);
/* no error if the device does not support RETA configuration */
if (ret == -ENOTSUP)
return 0;
if (ret < 0) {
RTE_LOG(ERR, VROUTER, " error initializing ethdev %" PRIu8 " RSS: %s (%d)\n",
port_id, rte_strerror(-ret), -ret);
}
RTE_LOG_DP(DEBUG, VROUTER, "%s: RSS RETA AFTER:\n", __func__);
dpdk_ethdev_reta_show(port_id, ethdev->ethdev_reta_size);
return ret;
}
/* Init hardware filtering */
static void
dpdk_ethdev_mempools_free(struct vr_dpdk_ethdev *ethdev)
{
int i;
for (i = ethdev->ethdev_nb_rss_queues; i < ethdev->ethdev_nb_rx_queues; i++) {
if (ethdev->ethdev_mempools[i] != NULL
&& ethdev->ethdev_mempools[i] != vr_dpdk.rss_mempool) {
vr_dpdk.free_mempools[vr_dpdk.nb_free_mempools++] =
ethdev->ethdev_mempools[i];
ethdev->ethdev_mempools[i] = NULL;
ethdev->ethdev_queue_states[i] = VR_DPDK_QUEUE_READY_STATE;
}
}
}
#if VR_DPDK_USE_HW_FILTERING
/* Init hardware filtering */
int
vr_dpdk_ethdev_filtering_init(struct vr_interface *vif,
struct vr_dpdk_ethdev *ethdev)
{
int ret;
uint8_t port_id = ethdev->ethdev_port_id;
struct rte_fdir_masks masks;
struct rte_eth_fdir fdir_info;
/* probe Flow Director */
memset(&fdir_info, 0, sizeof(fdir_info));
ret = rte_eth_dev_fdir_get_infos(port_id, &fdir_info);
if (ret == 0) {
/* enable hardware filtering */
RTE_LOG(INFO, VROUTER, " enable hardware filtering for ethdev %"
PRIu8 "\n", port_id);
vif->vif_flags |= VIF_FLAG_FILTERING_OFFLOAD;
} else {
vif->vif_flags &= ~VIF_FLAG_FILTERING_OFFLOAD;
/* free filtering mempools */
dpdk_ethdev_mempools_free(ethdev);
/* the ethdev does not support hardware filtering - it's not an error */
return 0;
}
memset(&masks, 0, sizeof(masks));
masks.dst_ipv4_mask = 0xffffffff;
masks.dst_port_mask = 0xffff;
masks.flexbytes = 1;
ret = rte_eth_dev_fdir_set_masks(port_id, &masks);
if (ret < 0) {
RTE_LOG(ERR, VROUTER, " error setting ethdev %" PRIu8
" Flow Director masks: %s (%d)\n", port_id, rte_strerror(-ret), -ret);
}
return ret;
}
#endif
/* Update device bond info */
static void
dpdk_ethdev_bond_info_update(struct vr_dpdk_ethdev *ethdev)
{
int i, slave_port_id;
int port_id = ethdev->ethdev_port_id;
uint16_t mtu = 0;
struct rte_pci_addr *pci_addr;
struct ether_addr bond_mac, mac_addr;
struct ether_addr lacp_mac = { .addr_bytes = {0x01, 0x80, 0xc2, 0, 0, 0x02} };
if (rte_eth_bond_mode_get(port_id) == -1) {
ethdev->ethdev_nb_slaves = -1;
} else {
ethdev->ethdev_nb_slaves = rte_eth_bond_slaves_get(port_id,
ethdev->ethdev_slaves, VR_DPDK_BOND_MAX_SLAVES);
memset(&mac_addr, 0, sizeof(bond_mac));
rte_eth_macaddr_get(port_id, &bond_mac);
RTE_LOG(INFO, VROUTER, " bond eth device %" PRIu8
" configured MAC " MAC_FORMAT "\n",
port_id, MAC_VALUE(bond_mac.addr_bytes));
/* log out and configure bond members */
for (i = 0; i < ethdev->ethdev_nb_slaves; i++) {
slave_port_id = ethdev->ethdev_slaves[i];
if (!rte_eth_devices[port_id].data->mtu) {
rte_eth_dev_get_mtu(slave_port_id, &mtu);
rte_eth_devices[port_id].data->mtu = mtu;
}
memset(&mac_addr, 0, sizeof(mac_addr));
rte_eth_macaddr_get(slave_port_id, &mac_addr);
#if (RTE_VERSION >= RTE_VERSION_NUM(17, 2, 0, 0))
pci_addr = &(RTE_DEV_TO_PCI(rte_eth_devices[slave_port_id].device)->addr);
#else
pci_addr = &rte_eth_devices[slave_port_id].pci_dev->addr;
#endif
RTE_LOG(INFO, VROUTER, " bond member eth device %" PRIu8
" PCI " PCI_PRI_FMT " MAC " MAC_FORMAT "\n",
slave_port_id, pci_addr->domain, pci_addr->bus,
pci_addr->devid, pci_addr->function,
MAC_VALUE(mac_addr.addr_bytes));
/* try to add bond mac and LACP multicast MACs */
if (rte_eth_dev_mac_addr_add(slave_port_id, &bond_mac, 0) == 0
&& rte_eth_dev_set_mc_addr_list(slave_port_id, &lacp_mac, 1) == 0) {
/* disable the promisc mode enabled by default */
rte_eth_promiscuous_disable(ethdev->ethdev_port_id);
RTE_LOG(INFO, VROUTER, " bond member eth device %" PRIu8
" promisc mode disabled\n", slave_port_id);
} else {
RTE_LOG(INFO, VROUTER, " bond member eth device %" PRIu8
": unable to add MAC addresses\n", slave_port_id);
}
}
}
}
/* Check if port_id is a bond slave. */
bool
vr_dpdk_ethdev_bond_port_match(uint8_t port_id, struct vr_dpdk_ethdev *ethdev)
{
int i;
if (ethdev->ethdev_nb_slaves > 0) {
for (i = 0; i < ethdev->ethdev_nb_slaves; i++) {
if (port_id == ethdev->ethdev_slaves[i])
break;
}
if (i < ethdev->ethdev_nb_slaves)
return true;
}
return false;
}
/* Init ethernet device */
int
vr_dpdk_ethdev_init(struct vr_dpdk_ethdev *ethdev, struct rte_eth_conf *dev_conf)
{
uint8_t port_id;
int ret;
port_id = ethdev->ethdev_port_id;
ethdev->ethdev_ptr = &rte_eth_devices[port_id];
dpdk_ethdev_info_update(ethdev);
ret = rte_eth_dev_configure(port_id, ethdev->ethdev_nb_rx_queues,
ethdev->ethdev_nb_tx_queues, dev_conf);
if (ret < 0) {
RTE_LOG(ERR, VROUTER, " error configuring eth dev %" PRIu8
": %s (%d)\n",
port_id, rte_strerror(-ret), -ret);
return ret;
}
#if (RTE_VERSION < RTE_VERSION_NUM(17, 2, 0, 0))
/* update device bond information after the device has been configured */
if (ethdev->ethdev_ptr->driver) { /* af_packet has no driver and no bond info */
#else
if (dpdk_find_port_id_by_drv_name() != VR_DPDK_INVALID_PORT_ID) {
#endif
dpdk_ethdev_bond_info_update(ethdev);
}
ret = dpdk_ethdev_queues_setup(ethdev);
if (ret < 0)
return ret;
/* Promisc mode
* KNI generates random MACs for e1000e NICs, so we need this
* option enabled for the development on servers with those NICs
*/
#if VR_DPDK_ENABLE_PROMISC
rte_eth_promiscuous_enable(port_id);
#endif
return 0;
}
/* Release ethernet device */
int
vr_dpdk_ethdev_release(struct vr_dpdk_ethdev *ethdev)
{
ethdev->ethdev_ptr = NULL;
dpdk_ethdev_mempools_free(ethdev);
return 0;
}
/*
* dpdk_mbuf_rss_hash - emulate RSS hash for the mbuf.
*
* Returns:
* 0 if hash was not calculated
* 1 if hash was calculated.
*/
static inline int
dpdk_mbuf_rss_hash(struct rte_mbuf *mbuf, struct vr_ip *ipv4_hdr,
struct vr_ip6 *ipv6_hdr)
{
uint64_t *ip_addr_ptr;
uint32_t *l4_ptr = NULL;
uint32_t hash = 0;
unsigned char ip_proto, i;
if (likely(ipv4_hdr != NULL)) {
/**
* Both source and destination IPv4 addresses are 4-bytes long,
* so they can be hashed at once with single 8-byte hashing funcion.
*
* We use SSE4.2 CRC hash. No need to match NIC's Toeplitz hash ATM.
*/
ip_addr_ptr = (uint64_t *)((uintptr_t)ipv4_hdr +
offsetof(struct vr_ip, ip_saddr));
hash = rte_hash_crc_8byte(*ip_addr_ptr, hash);
if (likely(!vr_ip_fragment(ipv4_hdr))) {
ip_proto = ipv4_hdr->ip_proto;
l4_ptr = (uint32_t *)((uintptr_t)ipv4_hdr + (ipv4_hdr->ip_hl) * IPV4_IHL_MULTIPLIER);
} else {
ip_proto = 0;
}
} else if (ipv6_hdr != NULL) {
/**
* Both source and destination IPv6 addresses are 16-bytes long,
* and DPDK hashing functions can calculate 8-bytes at once, so hashing
* has to be done in four steps, each address hashed from the beginning
* to the middle, and from the middle to the end. In the header, first
* comes source address, which is followed by destination address. This
* lets us to set the pointer to the beginning of source address and
* move it by 64 bits after hash is calculated.
*/
for (i = 0; i < 4; i++) {
ip_addr_ptr = (uint64_t *)((uintptr_t)ipv6_hdr +
offsetof(struct ipv6_hdr, src_addr) + 8*i);
hash = rte_hash_crc_8byte(*ip_addr_ptr, hash);
}
ip_proto = ipv6_hdr->ip6_nxt;
/* In case of extended header L4 is not hashed. */
l4_ptr = (uint32_t *)((uintptr_t)ipv6_hdr + sizeof(struct ipv6_hdr));
} else {
return 0;
}
switch (ip_proto) {
case VR_IP_PROTO_TCP:
hash = rte_hash_crc_4byte(*l4_ptr, hash);
break;
case VR_IP_PROTO_UDP:
hash = rte_hash_crc_4byte(*l4_ptr, hash);
break;
case VR_IP_PROTO_GRE:
if (likely(l4_ptr != NULL)) {
struct vr_gre_key* gre_hdr = (struct vr_gre_key *)l4_ptr;
if (likely(gre_hdr->gre_comm_hdr.gre_flags & VR_GRE_FLAG_KEY)) {
hash = rte_hash_crc_4byte(gre_hdr->gre_key , hash);
}
}
break;
}
mbuf->ol_flags |= PKT_RX_RSS_HASH;
mbuf->hash.rss = hash;
RTE_LOG_DP(DEBUG, VROUTER, "%s: RSS hash: 0x%x (emulated)\n",
__func__, mbuf->hash.rss);
return 1;
}
/* dpdk_mbuf_parse_and_hash_packets
*
* Parse incoming packet. Check L2, L3 headers, encapsulation type, perform
* TCP MSS adjust if needed, then call hashing function to.
*
* Return:
* -1 if packet length is too short to contain valid header
* 0 if there is no need to perform hashing (ie. unsupported encap type,
* packet already hashed)
* dpdk_mbuf_rss_hash() if hashing is needed. dpdk_mbuf_rss_hash() returns 1
* if hash was calculated, 0 if not.
*
* TODO: if we ever need to set L4 lengths or packet type flags, or other info
* about received packets, this is a good place to do it.
*/
static int
dpdk_mbuf_parse_and_hash_packets(struct rte_mbuf *mbuf)
{
struct vr_eth *eth_hdr = rte_pktmbuf_mtod(mbuf, struct vr_eth *);
struct vr_ip *ipv4_hdr = NULL;
struct vr_ip *ipv4_inner_hdr = NULL;
struct vr_ip6 *ipv6_hdr = NULL;
struct vr_ip6 *ipv6_inner_hdr = NULL;
struct vr_udp *udp_hdr = NULL;
struct vr_gre *gre_hdr = NULL;
struct vlan_hdr *vlan_hdr;
unsigned int pull_len = VR_ETHER_HLEN, ipv4_len;
int encap_type, helper_ret;
unsigned short gre_udp_encap = 0, gre_hdr_len = VR_GRE_BASIC_HDR_LEN,
eth_proto, udp_port;
uint16_t mbuf_data_len = rte_pktmbuf_data_len(mbuf);
if (unlikely(mbuf_data_len < pull_len))
return -1;
eth_proto = eth_hdr->eth_proto;
/* Skip VLAN tag. It may be present if we handle tagged packet from VM. */
while (eth_proto == rte_cpu_to_be_16(VR_ETH_PROTO_VLAN)) {
if (unlikely(mbuf_data_len < pull_len + VR_VLAN_HLEN))
return -1;
/* Store the first VLAN TCI for further use. */
if (likely((mbuf->ol_flags & PKT_RX_VLAN) == 0)) {
vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->vlan_tci = rte_be_to_cpu_16(vlan_hdr->vlan_tci);
}
eth_proto = ((struct vr_vlan_hdr *)((uintptr_t)eth_hdr + pull_len))->vlan_proto;
pull_len += VR_VLAN_HLEN;
}
if (likely(eth_proto == rte_cpu_to_be_16(VR_ETH_PROTO_IP))) {
ipv4_hdr = (struct vr_ip *)((uintptr_t)eth_hdr + pull_len);
if (unlikely(mbuf_data_len < pull_len + sizeof(struct vr_ip)))
return -1;
ipv4_len = (ipv4_hdr->ip_hl) * IPV4_IHL_MULTIPLIER;
pull_len += ipv4_len;
if (ipv4_hdr->ip_proto == VR_IP_PROTO_GRE) {
gre_hdr = (struct vr_gre *)((uintptr_t)ipv4_hdr + ipv4_len);
if (unlikely(mbuf_data_len < pull_len + VR_GRE_BASIC_HDR_LEN))
return -1;
if (likely(gre_hdr->gre_proto == VR_GRE_PROTO_MPLS_NO)) {
/* We are not RFC 1701 compliant receiver. */
if (unlikely(gre_hdr->gre_flags & (~(VR_GRE_FLAG_CSUM |
VR_GRE_FLAG_KEY))))
return 0;
if (gre_hdr->gre_flags & VR_GRE_FLAG_CSUM) {
gre_hdr_len += (VR_GRE_CKSUM_HDR_LEN -
VR_GRE_BASIC_HDR_LEN);
}
if (gre_hdr->gre_flags & VR_GRE_FLAG_KEY) {
gre_hdr_len += (VR_GRE_KEY_HDR_LEN -
VR_GRE_BASIC_HDR_LEN);
}
pull_len += gre_hdr_len;
gre_udp_encap = gre_hdr->gre_proto;
/*
* mbuf->ol_flags & PKT_RX_RSS_HASH is mistakenly set
* by the NIC driver for MPLS over GRE packets. It is
* removed here and will be set after we perform hashing.
*/
mbuf->ol_flags &= ~PKT_RX_RSS_HASH;
/* Go to parsing. */
} else {
return dpdk_mbuf_rss_hash(mbuf, ipv4_hdr, NULL); /* Looks like GRE, but no MPLS. */
}
} else if (ipv4_hdr->ip_proto == VR_IP_PROTO_UDP) {
/* At this point the packet may be:
* IP with inner packet carried in MPLS-over-UDP, or
* IP with inner packet carried in VXLAN, or
* just regular UDP inside IP.
*/
udp_hdr = (struct vr_udp *)((uintptr_t)ipv4_hdr + ipv4_len);
if (likely(vr_ip_transport_header_valid(ipv4_hdr))) {
if (unlikely(mbuf_data_len < pull_len + sizeof(struct vr_udp)))
return -1;
}