/
netdev-dpdk.c
3798 lines (3201 loc) · 117 KB
/
netdev-dpdk.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <config.h>
#include "netdev-dpdk.h"
#include <string.h>
#include <signal.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <linux/virtio_net.h>
#include <sys/socket.h>
#include <linux/if.h>
#include <rte_bus_pci.h>
#include <rte_config.h>
#include <rte_cycles.h>
#include <rte_errno.h>
#include <rte_eth_ring.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_meter.h>
#include <rte_pci.h>
#include <rte_vhost.h>
#include <rte_version.h>
#include "dirs.h"
#include "dp-packet.h"
#include "dpdk.h"
#include "dpif-netdev.h"
#include "fatal-signal.h"
#include "netdev-provider.h"
#include "netdev-vport.h"
#include "odp-util.h"
#include "openvswitch/dynamic-string.h"
#include "openvswitch/list.h"
#include "openvswitch/ofp-print.h"
#include "openvswitch/vlog.h"
#include "ovs-numa.h"
#include "ovs-thread.h"
#include "ovs-rcu.h"
#include "packets.h"
#include "openvswitch/shash.h"
#include "smap.h"
#include "sset.h"
#include "unaligned.h"
#include "timeval.h"
#include "unixctl.h"
enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
VLOG_DEFINE_THIS_MODULE(netdev_dpdk);
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
#define DPDK_PORT_WATCHDOG_INTERVAL 5
#define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
#define OVS_VPORT_DPDK "ovs_dpdk"
/*
* need to reserve tons of extra space in the mbufs so we can align the
* DMA addresses to 4KB.
* The minimum mbuf size is limited to avoid scatter behaviour and drop in
* performance for standard Ethernet MTU.
*/
#define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN \
+ (2 * VLAN_HEADER_LEN))
#define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
#define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
#define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
- ETHER_HDR_LEN - ETHER_CRC_LEN)
#define MBUF_SIZE(mtu) ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) \
+ sizeof(struct dp_packet) \
+ RTE_PKTMBUF_HEADROOM), \
RTE_CACHE_LINE_SIZE)
#define NETDEV_DPDK_MBUF_ALIGN 1024
#define NETDEV_DPDK_MAX_PKT_LEN 9728
/* Min number of packets in the mempool. OVS tries to allocate a mempool with
* roughly estimated number of mbufs: if this fails (because the system doesn't
* have enough hugepages) we keep halving the number until the allocation
* succeeds or we reach MIN_NB_MBUF */
#define MIN_NB_MBUF (4096 * 4)
#define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
/*
* DPDK XSTATS Counter names definition
*/
#define XSTAT_RX_64_PACKETS "rx_size_64_packets"
#define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
#define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
#define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
#define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
#define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
#define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
#define XSTAT_TX_64_PACKETS "tx_size_64_packets"
#define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
#define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
#define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
#define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
#define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
#define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
#define XSTAT_RX_MULTICAST_PACKETS "rx_multicast_packets"
#define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
#define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
#define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
#define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
#define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
#define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
#define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
#define SOCKET0 0
/* Default size of Physical NIC RXQ */
#define NIC_PORT_DEFAULT_RXQ_SIZE 2048
/* Default size of Physical NIC TXQ */
#define NIC_PORT_DEFAULT_TXQ_SIZE 2048
/* Maximum size of Physical NIC Queues */
#define NIC_PORT_MAX_Q_SIZE 4096
#define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
#define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
#define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
* yet mapped to another queue. */
#define DPDK_ETH_PORT_ID_INVALID RTE_MAX_ETHPORTS
/* DPDK library uses uint16_t for port_id. */
typedef uint16_t dpdk_port_t;
#define VHOST_ENQ_RETRY_NUM 8
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
static const struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
.header_split = 0, /* Header Split disabled */
.hw_ip_checksum = 0, /* IP checksum offload disabled */
.hw_vlan_filter = 0, /* VLAN filtering disabled */
.jumbo_frame = 0, /* Jumbo Frame Support disabled */
.hw_strip_crc = 0,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
},
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
},
};
/*
* These callbacks allow virtio-net devices to be added to vhost ports when
* configuration has been fully completed.
*/
static int new_device(int vid);
static void destroy_device(int vid);
static int vring_state_changed(int vid, uint16_t queue_id, int enable);
static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
.vring_state_changed = vring_state_changed,
.features_changed = NULL
};
enum { DPDK_RING_SIZE = 256 };
BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
enum { DRAIN_TSC = 200000ULL };
enum dpdk_dev_type {
DPDK_DEV_ETH = 0,
DPDK_DEV_VHOST = 1,
};
/* Quality of Service */
/* An instance of a QoS configuration. Always associated with a particular
* network device.
*
* Each QoS implementation subclasses this with whatever additional data it
* needs.
*/
struct qos_conf {
const struct dpdk_qos_ops *ops;
rte_spinlock_t lock;
};
/* A particular implementation of dpdk QoS operations.
*
* The functions below return 0 if successful or a positive errno value on
* failure, except where otherwise noted. All of them must be provided, except
* where otherwise noted.
*/
struct dpdk_qos_ops {
/* Name of the QoS type */
const char *qos_name;
/* Called to construct a qos_conf object. The implementation should make
* the appropriate calls to configure QoS according to 'details'.
*
* The contents of 'details' should be documented as valid for 'ovs_name'
* in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
* (which is built as ovs-vswitchd.conf.db(8)).
*
* This function must return 0 if and only if it sets '*conf' to an
* initialized 'struct qos_conf'.
*
* For all QoS implementations it should always be non-null.
*/
int (*qos_construct)(const struct smap *details, struct qos_conf **conf);
/* Destroys the data structures allocated by the implementation as part of
* 'qos_conf'.
*
* For all QoS implementations it should always be non-null.
*/
void (*qos_destruct)(struct qos_conf *conf);
/* Retrieves details of 'conf' configuration into 'details'.
*
* The contents of 'details' should be documented as valid for 'ovs_name'
* in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
* (which is built as ovs-vswitchd.conf.db(8)).
*/
int (*qos_get)(const struct qos_conf *conf, struct smap *details);
/* Returns true if 'conf' is already configured according to 'details'.
*
* The contents of 'details' should be documented as valid for 'ovs_name'
* in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
* (which is built as ovs-vswitchd.conf.db(8)).
*
* For all QoS implementations it should always be non-null.
*/
bool (*qos_is_equal)(const struct qos_conf *conf,
const struct smap *details);
/* Modify an array of rte_mbufs. The modification is specific to
* each qos implementation.
*
* The function should take and array of mbufs and an int representing
* the current number of mbufs present in the array.
*
* After the function has performed a qos modification to the array of
* mbufs it returns an int representing the number of mbufs now present in
* the array. This value is can then be passed to the port send function
* along with the modified array for transmission.
*
* For all QoS implementations it should always be non-null.
*/
int (*qos_run)(struct qos_conf *qos_conf, struct rte_mbuf **pkts,
int pkt_cnt, bool may_steal);
};
/* dpdk_qos_ops for each type of user space QoS implementation */
static const struct dpdk_qos_ops egress_policer_ops;
/*
* Array of dpdk_qos_ops, contains pointer to all supported QoS
* operations.
*/
static const struct dpdk_qos_ops *const qos_confs[] = {
&egress_policer_ops,
NULL
};
static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
/* Contains all 'struct dpdk_dev's. */
static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
= OVS_LIST_INITIALIZER(&dpdk_list);
static struct ovs_mutex dpdk_mp_mutex OVS_ACQ_AFTER(dpdk_mutex)
= OVS_MUTEX_INITIALIZER;
/* There should be one 'struct dpdk_tx_queue' created for
* each cpu core. */
struct dpdk_tx_queue {
rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
* from concurrent access. It is used only
* if the queue is shared among different
* pmd threads (see 'concurrent_txq'). */
int map; /* Mapping of configured vhost-user queues
* to enabled by guest. */
};
/* dpdk has no way to remove dpdk ring ethernet devices
so we have to keep them around once they've been created
*/
static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
= OVS_LIST_INITIALIZER(&dpdk_ring_list);
struct dpdk_ring {
/* For the client rings */
struct rte_ring *cring_tx;
struct rte_ring *cring_rx;
unsigned int user_port_id; /* User given port no, parsed from port name */
dpdk_port_t eth_port_id; /* ethernet device port id */
struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
};
struct ingress_policer {
struct rte_meter_srtcm_params app_srtcm_params;
struct rte_meter_srtcm in_policer;
rte_spinlock_t policer_lock;
};
enum dpdk_hw_ol_features {
NETDEV_RX_CHECKSUM_OFFLOAD = 1 << 0,
};
/*
* In order to avoid confusion in variables names, following naming convention
* should be used, if possible:
*
* 'struct netdev' : 'netdev'
* 'struct netdev_dpdk' : 'dev'
* 'struct netdev_rxq' : 'rxq'
* 'struct netdev_rxq_dpdk' : 'rx'
*
* Example:
* struct netdev *netdev = netdev_from_name(name);
* struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
*
* Also, 'netdev' should be used instead of 'dev->up', where 'netdev' was
* already defined.
*/
struct netdev_dpdk {
PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline0,
dpdk_port_t port_id;
/* If true, device was attached by rte_eth_dev_attach(). */
bool attached;
struct eth_addr hwaddr;
int mtu;
int socket_id;
int buf_size;
int max_packet_len;
enum dpdk_dev_type type;
enum netdev_flags flags;
char *devargs; /* Device arguments for dpdk ports */
struct dpdk_tx_queue *tx_q;
struct rte_eth_link link;
int link_reset_cnt;
/* 4 pad bytes here. */
);
PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline1,
struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
struct rte_mempool *mp;
/* virtio identifier for vhost devices */
ovsrcu_index vid;
/* True if vHost device is 'up' and has been reconfigured at least once */
bool vhost_reconfigured;
/* 3 pad bytes here. */
);
PADDED_MEMBERS(CACHE_LINE_SIZE,
/* Identifier used to distinguish vhost devices from each other. */
char vhost_id[PATH_MAX];
);
PADDED_MEMBERS(CACHE_LINE_SIZE,
struct netdev up;
/* In dpdk_list. */
struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
/* QoS configuration and lock for the device */
OVSRCU_TYPE(struct qos_conf *) qos_conf;
/* Ingress Policer */
OVSRCU_TYPE(struct ingress_policer *) ingress_policer;
uint32_t policer_rate;
uint32_t policer_burst;
);
PADDED_MEMBERS(CACHE_LINE_SIZE,
struct netdev_stats stats;
/* Protects stats */
rte_spinlock_t stats_lock;
/* 44 pad bytes here. */
);
PADDED_MEMBERS(CACHE_LINE_SIZE,
/* The following properties cannot be changed when a device is running,
* so we remember the request and update them next time
* netdev_dpdk*_reconfigure() is called */
int requested_mtu;
int requested_n_txq;
int requested_n_rxq;
int requested_rxq_size;
int requested_txq_size;
/* Number of rx/tx descriptors for physical devices */
int rxq_size;
int txq_size;
/* Socket ID detected when vHost device is brought up */
int requested_socket_id;
/* Denotes whether vHost port is client/server mode */
uint64_t vhost_driver_flags;
/* DPDK-ETH Flow control */
struct rte_eth_fc_conf fc_conf;
/* DPDK-ETH hardware offload features,
* from the enum set 'dpdk_hw_ol_features' */
uint32_t hw_ol_features;
);
PADDED_MEMBERS(CACHE_LINE_SIZE,
/* Names of all XSTATS counters */
struct rte_eth_xstat_name *rte_xstats_names;
int rte_xstats_names_size;
int rte_xstats_ids_size;
uint64_t *rte_xstats_ids;
);
};
struct netdev_rxq_dpdk {
struct netdev_rxq up;
dpdk_port_t port_id;
};
static void netdev_dpdk_destruct(struct netdev *netdev);
static void netdev_dpdk_vhost_destruct(struct netdev *netdev);
int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
struct ingress_policer *
netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev);
static bool
is_dpdk_class(const struct netdev_class *class)
{
return class->destruct == netdev_dpdk_destruct
|| class->destruct == netdev_dpdk_vhost_destruct;
}
/* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
* aligned at 1k or less. If a declared mbuf size is not a multiple of this
* value, insufficient buffers are allocated to accomodate the packet in its
* entirety. Furthermore, certain drivers need to ensure that there is also
* sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
* frames). If the RX buffer is too small, then the driver enables scatter RX
* behaviour, which reduces performance. To prevent this, use a buffer size
* that is closest to 'mtu', but which satisfies the aforementioned criteria.
*/
static uint32_t
dpdk_buf_size(int mtu)
{
return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
NETDEV_DPDK_MBUF_ALIGN);
}
/* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed.
*
* Unlike xmalloc(), this function can return NULL on failure. */
static void *
dpdk_rte_mzalloc(size_t sz)
{
return rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
}
void
free_dpdk_buf(struct dp_packet *p)
{
struct rte_mbuf *pkt = (struct rte_mbuf *) p;
rte_pktmbuf_free(pkt);
}
static void
ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED,
void *opaque_arg OVS_UNUSED,
void *_p,
unsigned i OVS_UNUSED)
{
struct rte_mbuf *pkt = _p;
dp_packet_init_dpdk((struct dp_packet *) pkt, pkt->buf_len);
}
/* Returns a valid pointer when either of the following is true:
* - a new mempool was just created;
* - a matching mempool already exists. */
static struct rte_mempool *
dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
{
char mp_name[RTE_MEMPOOL_NAMESIZE];
const char *netdev_name = netdev_get_name(&dev->up);
int socket_id = dev->requested_socket_id;
uint32_t n_mbufs;
uint32_t hash = hash_string(netdev_name, 0);
struct rte_mempool *mp = NULL;
/*
* XXX: rough estimation of number of mbufs required for this port:
* <packets required to fill the device rxqs>
* + <packets that could be stuck on other ports txqs>
* + <packets in the pmd threads>
* + <additional memory for corner cases>
*/
n_mbufs = dev->requested_n_rxq * dev->requested_rxq_size
+ dev->requested_n_txq * dev->requested_txq_size
+ MIN(RTE_MAX_LCORE, dev->requested_n_rxq) * NETDEV_MAX_BURST
+ MIN_NB_MBUF;
ovs_mutex_lock(&dpdk_mp_mutex);
do {
/* Full DPDK memory pool name must be unique and cannot be
* longer than RTE_MEMPOOL_NAMESIZE. */
int ret = snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"ovs%08x%02d%05d%07u",
hash, socket_id, mtu, n_mbufs);
if (ret < 0 || ret >= RTE_MEMPOOL_NAMESIZE) {
VLOG_DBG("snprintf returned %d. "
"Failed to generate a mempool name for \"%s\". "
"Hash:0x%x, socket_id: %d, mtu:%d, mbufs:%u.",
ret, netdev_name, hash, socket_id, mtu, n_mbufs);
break;
}
VLOG_DBG("Port %s: Requesting a mempool of %u mbufs "
"on socket %d for %d Rx and %d Tx queues.",
netdev_name, n_mbufs, socket_id,
dev->requested_n_rxq, dev->requested_n_txq);
mp = rte_pktmbuf_pool_create(mp_name, n_mbufs, MP_CACHE_SZ,
sizeof (struct dp_packet) - sizeof (struct rte_mbuf),
MBUF_SIZE(mtu) - sizeof(struct dp_packet), socket_id);
if (mp) {
VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
mp_name, n_mbufs);
/* rte_pktmbuf_pool_create has done some initialization of the
* rte_mbuf part of each dp_packet. Some OvS specific fields
* of the packet still need to be initialized by
* ovs_rte_pktmbuf_init. */
rte_mempool_obj_iter(mp, ovs_rte_pktmbuf_init, NULL);
} else if (rte_errno == EEXIST) {
/* A mempool with the same name already exists. We just
* retrieve its pointer to be returned to the caller. */
mp = rte_mempool_lookup(mp_name);
/* As the mempool create returned EEXIST we can expect the
* lookup has returned a valid pointer. If for some reason
* that's not the case we keep track of it. */
VLOG_DBG("A mempool with name \"%s\" already exists at %p.",
mp_name, mp);
} else {
VLOG_ERR("Failed mempool \"%s\" create request of %u mbufs",
mp_name, n_mbufs);
}
} while (!mp && rte_errno == ENOMEM && (n_mbufs /= 2) >= MIN_NB_MBUF);
ovs_mutex_unlock(&dpdk_mp_mutex);
return mp;
}
/* Release an existing mempool. */
static void
dpdk_mp_free(struct rte_mempool *mp)
{
if (!mp) {
return;
}
ovs_mutex_lock(&dpdk_mp_mutex);
VLOG_DBG("Releasing \"%s\" mempool", mp->name);
rte_mempool_free(mp);
ovs_mutex_unlock(&dpdk_mp_mutex);
}
/* Tries to allocate a new mempool - or re-use an existing one where
* appropriate - on requested_socket_id with a size determined by
* requested_mtu and requested Rx/Tx queues.
* On success - or when re-using an existing mempool - the new configuration
* will be applied.
* On error, device will be left unchanged. */
static int
netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
OVS_REQUIRES(dev->mutex)
{
uint32_t buf_size = dpdk_buf_size(dev->requested_mtu);
struct rte_mempool *mp;
int ret = 0;
mp = dpdk_mp_create(dev, FRAME_LEN_TO_MTU(buf_size));
if (!mp) {
VLOG_ERR("Failed to create memory pool for netdev "
"%s, with MTU %d on socket %d: %s\n",
dev->up.name, dev->requested_mtu, dev->requested_socket_id,
rte_strerror(rte_errno));
ret = rte_errno;
} else {
/* If a new MTU was requested and its rounded value equals the one
* that is currently used, then the existing mempool is returned. */
if (dev->mp != mp) {
/* A new mempool was created, release the previous one. */
dpdk_mp_free(dev->mp);
} else {
ret = EEXIST;
}
dev->mp = mp;
dev->mtu = dev->requested_mtu;
dev->socket_id = dev->requested_socket_id;
dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
}
return ret;
}
static void
check_link_status(struct netdev_dpdk *dev)
{
struct rte_eth_link link;
rte_eth_link_get_nowait(dev->port_id, &link);
if (dev->link.link_status != link.link_status) {
netdev_change_seq_changed(&dev->up);
dev->link_reset_cnt++;
dev->link = link;
if (dev->link.link_status) {
VLOG_DBG_RL(&rl, "Port %"PRIu8" Link Up - speed %u Mbps - %s",
dev->port_id, (unsigned) dev->link.link_speed,
(dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex"));
} else {
VLOG_DBG_RL(&rl, "Port %"PRIu8" Link Down", dev->port_id);
}
}
}
static void *
dpdk_watchdog(void *dummy OVS_UNUSED)
{
struct netdev_dpdk *dev;
pthread_detach(pthread_self());
for (;;) {
ovs_mutex_lock(&dpdk_mutex);
LIST_FOR_EACH (dev, list_node, &dpdk_list) {
ovs_mutex_lock(&dev->mutex);
if (dev->type == DPDK_DEV_ETH) {
check_link_status(dev);
}
ovs_mutex_unlock(&dev->mutex);
}
ovs_mutex_unlock(&dpdk_mutex);
xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
}
return NULL;
}
static int
dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
{
int diag = 0;
int i;
struct rte_eth_conf conf = port_conf;
/* For some NICs (e.g. Niantic), scatter_rx mode needs to be explicitly
* enabled. */
if (dev->mtu > ETHER_MTU) {
conf.rxmode.enable_scatter = 1;
}
conf.rxmode.hw_ip_checksum = (dev->hw_ol_features &
NETDEV_RX_CHECKSUM_OFFLOAD) != 0;
/* A device may report more queues than it makes available (this has
* been observed for Intel xl710, which reserves some of them for
* SRIOV): rte_eth_*_queue_setup will fail if a queue is not
* available. When this happens we can retry the configuration
* and request less queues */
while (n_rxq && n_txq) {
if (diag) {
VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
}
diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &conf);
if (diag) {
VLOG_WARN("Interface %s eth_dev setup error %s\n",
dev->up.name, rte_strerror(-diag));
break;
}
diag = rte_eth_dev_set_mtu(dev->port_id, dev->mtu);
if (diag) {
VLOG_ERR("Interface %s MTU (%d) setup error: %s",
dev->up.name, dev->mtu, rte_strerror(-diag));
break;
}
for (i = 0; i < n_txq; i++) {
diag = rte_eth_tx_queue_setup(dev->port_id, i, dev->txq_size,
dev->socket_id, NULL);
if (diag) {
VLOG_INFO("Interface %s txq(%d) setup error: %s",
dev->up.name, i, rte_strerror(-diag));
break;
}
}
if (i != n_txq) {
/* Retry with less tx queues */
n_txq = i;
continue;
}
for (i = 0; i < n_rxq; i++) {
diag = rte_eth_rx_queue_setup(dev->port_id, i, dev->rxq_size,
dev->socket_id, NULL, dev->mp);
if (diag) {
VLOG_INFO("Interface %s rxq(%d) setup error: %s",
dev->up.name, i, rte_strerror(-diag));
break;
}
}
if (i != n_rxq) {
/* Retry with less rx queues */
n_rxq = i;
continue;
}
dev->up.n_rxq = n_rxq;
dev->up.n_txq = n_txq;
return 0;
}
return diag;
}
static void
dpdk_eth_flow_ctrl_setup(struct netdev_dpdk *dev) OVS_REQUIRES(dev->mutex)
{
if (rte_eth_dev_flow_ctrl_set(dev->port_id, &dev->fc_conf)) {
VLOG_WARN("Failed to enable flow control on device %"PRIu8,
dev->port_id);
}
}
static int
dpdk_eth_dev_init(struct netdev_dpdk *dev)
OVS_REQUIRES(dev->mutex)
{
struct rte_pktmbuf_pool_private *mbp_priv;
struct rte_eth_dev_info info;
struct ether_addr eth_addr;
int diag;
int n_rxq, n_txq;
uint32_t rx_chksm_offload_capa = DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_IPV4_CKSUM;
rte_eth_dev_info_get(dev->port_id, &info);
if ((info.rx_offload_capa & rx_chksm_offload_capa) !=
rx_chksm_offload_capa) {
VLOG_WARN("Rx checksum offload is not supported on port %"PRIu8,
dev->port_id);
dev->hw_ol_features &= ~NETDEV_RX_CHECKSUM_OFFLOAD;
} else {
dev->hw_ol_features |= NETDEV_RX_CHECKSUM_OFFLOAD;
}
n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
if (diag) {
VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
return -diag;
}
diag = rte_eth_dev_start(dev->port_id);
if (diag) {
VLOG_ERR("Interface %s start error: %s", dev->up.name,
rte_strerror(-diag));
return -diag;
}
rte_eth_promiscuous_enable(dev->port_id);
rte_eth_allmulticast_enable(dev->port_id);
memset(ð_addr, 0x0, sizeof(eth_addr));
rte_eth_macaddr_get(dev->port_id, ð_addr);
VLOG_INFO_RL(&rl, "Port %"PRIu8": "ETH_ADDR_FMT,
dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
rte_eth_link_get_nowait(dev->port_id, &dev->link);
mbp_priv = rte_mempool_get_priv(dev->mp);
dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
/* Get the Flow control configuration for DPDK-ETH */
diag = rte_eth_dev_flow_ctrl_get(dev->port_id, &dev->fc_conf);
if (diag) {
VLOG_DBG("cannot get flow control parameters on port=%"PRIu8", err=%d",
dev->port_id, diag);
}
return 0;
}
static struct netdev_dpdk *
netdev_dpdk_cast(const struct netdev *netdev)
{
return CONTAINER_OF(netdev, struct netdev_dpdk, up);
}
static struct netdev *
netdev_dpdk_alloc(void)
{
struct netdev_dpdk *dev;
dev = dpdk_rte_mzalloc(sizeof *dev);
if (dev) {
return &dev->up;
}
return NULL;
}
static struct dpdk_tx_queue *
netdev_dpdk_alloc_txq(unsigned int n_txqs)
{
struct dpdk_tx_queue *txqs;
unsigned i;
txqs = dpdk_rte_mzalloc(n_txqs * sizeof *txqs);
if (txqs) {
for (i = 0; i < n_txqs; i++) {
/* Initialize map for vhost devices. */
txqs[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
rte_spinlock_init(&txqs[i].tx_lock);
}
}
return txqs;
}
static int
common_construct(struct netdev *netdev, dpdk_port_t port_no,
enum dpdk_dev_type type, int socket_id)
OVS_REQUIRES(dpdk_mutex)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
ovs_mutex_init(&dev->mutex);
rte_spinlock_init(&dev->stats_lock);
/* If the 'sid' is negative, it means that the kernel fails
* to obtain the pci numa info. In that situation, always
* use 'SOCKET0'. */
dev->socket_id = socket_id < 0 ? SOCKET0 : socket_id;
dev->requested_socket_id = dev->socket_id;
dev->port_id = port_no;
dev->type = type;
dev->flags = 0;
dev->requested_mtu = ETHER_MTU;
dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
ovsrcu_index_init(&dev->vid, -1);
dev->vhost_reconfigured = false;
dev->attached = false;
ovsrcu_init(&dev->qos_conf, NULL);
ovsrcu_init(&dev->ingress_policer, NULL);
dev->policer_rate = 0;
dev->policer_burst = 0;
netdev->n_rxq = 0;
netdev->n_txq = 0;
dev->requested_n_rxq = NR_QUEUE;
dev->requested_n_txq = NR_QUEUE;
dev->requested_rxq_size = NIC_PORT_DEFAULT_RXQ_SIZE;
dev->requested_txq_size = NIC_PORT_DEFAULT_TXQ_SIZE;
/* Initialize the flow control to NULL */
memset(&dev->fc_conf, 0, sizeof dev->fc_conf);
/* Initilize the hardware offload flags to 0 */
dev->hw_ol_features = 0;
dev->flags = NETDEV_UP | NETDEV_PROMISC;
ovs_list_push_back(&dpdk_list, &dev->list_node);
netdev_request_reconfigure(netdev);
dev->rte_xstats_names = NULL;
dev->rte_xstats_names_size = 0;
dev->rte_xstats_ids = NULL;
dev->rte_xstats_ids_size = 0;
return 0;
}
/* dev_name must be the prefix followed by a positive decimal number.
* (no leading + or - signs are allowed) */
static int
dpdk_dev_parse_name(const char dev_name[], const char prefix[],
unsigned int *port_no)
{
const char *cport;
if (strncmp(dev_name, prefix, strlen(prefix))) {
return ENODEV;
}
cport = dev_name + strlen(prefix);
if (str_to_uint(cport, 10, port_no)) {
return 0;
} else {
return ENODEV;
}
}
static int
vhost_common_construct(struct netdev *netdev)
OVS_REQUIRES(dpdk_mutex)
{
int socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
dev->tx_q = netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM);
if (!dev->tx_q) {
return ENOMEM;
}
return common_construct(netdev, DPDK_ETH_PORT_ID_INVALID,
DPDK_DEV_VHOST, socket_id);
}
static int
netdev_dpdk_vhost_construct(struct netdev *netdev)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
const char *name = netdev->name;
int err;
/* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
* the file system. '/' or '\' would traverse directories, so they're not
* acceptable in 'name'. */
if (strchr(name, '/') || strchr(name, '\\')) {
VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
"A valid name must not include '/' or '\\'",
name);
return EINVAL;
}
ovs_mutex_lock(&dpdk_mutex);
/* Take the name of the vhost-user port and append it to the location where
* the socket is to be created, then register the socket.
*/
snprintf(dev->vhost_id, sizeof dev->vhost_id, "%s/%s",
dpdk_get_vhost_sock_dir(), name);
dev->vhost_driver_flags &= ~RTE_VHOST_USER_CLIENT;
err = rte_vhost_driver_register(dev->vhost_id, dev->vhost_driver_flags);
if (err) {
VLOG_ERR("vhost-user socket device setup failure for socket %s\n",