1919 * Virtual Ethernet interfaces.
2020 *
2121 * For each mode, the following tests are run:
22- * a. nopoll - soft-irq processing
22+ * a. nopoll - soft-irq processing in run-to-completion mode
2323 * b. poll - using poll() syscall
2424 * c. Socket Teardown
2525 * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy
4545 * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
4646 * then remove xsk sockets from queue 0 on both veth interfaces and
4747 * finally run a traffic on queues ids 1
48+ * g. unaligned mode
4849 *
4950 * Total tests: 12
5051 *
@@ -243,6 +244,9 @@ static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size
243244 };
244245 int ret ;
245246
247+ if (umem -> unaligned_mode )
248+ cfg .flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG ;
249+
246250 ret = xsk_umem__create (& umem -> umem , buffer , size ,
247251 & umem -> fq , & umem -> cq , & cfg );
248252 if (ret )
@@ -252,19 +256,6 @@ static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size
252256 return 0 ;
253257}
254258
255- static void xsk_populate_fill_ring (struct xsk_umem_info * umem )
256- {
257- int ret , i ;
258- u32 idx = 0 ;
259-
260- ret = xsk_ring_prod__reserve (& umem -> fq , XSK_RING_PROD__DEFAULT_NUM_DESCS , & idx );
261- if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS )
262- exit_with_error (- ret );
263- for (i = 0 ; i < XSK_RING_PROD__DEFAULT_NUM_DESCS ; i ++ )
264- * xsk_ring_prod__fill_addr (& umem -> fq , idx ++ ) = i * umem -> frame_size ;
265- xsk_ring_prod__submit (& umem -> fq , XSK_RING_PROD__DEFAULT_NUM_DESCS );
266- }
267-
268259static int xsk_configure_socket (struct xsk_socket_info * xsk , struct xsk_umem_info * umem ,
269260 struct ifobject * ifobject , u32 qid )
270261{
@@ -477,7 +468,7 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
477468 struct pkt_stream * pkt_stream ;
478469 u32 i ;
479470
480- pkt_stream = malloc ( sizeof (* pkt_stream ));
471+ pkt_stream = calloc ( 1 , sizeof (* pkt_stream ));
481472 if (!pkt_stream )
482473 exit_with_error (ENOMEM );
483474
@@ -487,7 +478,8 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
487478
488479 pkt_stream -> nb_pkts = nb_pkts ;
489480 for (i = 0 ; i < nb_pkts ; i ++ ) {
490- pkt_stream -> pkts [i ].addr = (i % umem -> num_frames ) * umem -> frame_size ;
481+ pkt_stream -> pkts [i ].addr = (i % umem -> num_frames ) * umem -> frame_size +
482+ DEFAULT_OFFSET ;
491483 pkt_stream -> pkts [i ].len = pkt_len ;
492484 pkt_stream -> pkts [i ].payload = i ;
493485
@@ -500,6 +492,12 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
500492 return pkt_stream ;
501493}
502494
495+ static struct pkt_stream * pkt_stream_clone (struct xsk_umem_info * umem ,
496+ struct pkt_stream * pkt_stream )
497+ {
498+ return pkt_stream_generate (umem , pkt_stream -> nb_pkts , pkt_stream -> pkts [0 ].len );
499+ }
500+
503501static void pkt_stream_replace (struct test_spec * test , u32 nb_pkts , u32 pkt_len )
504502{
505503 struct pkt_stream * pkt_stream ;
@@ -509,6 +507,22 @@ static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
509507 test -> ifobj_rx -> pkt_stream = pkt_stream ;
510508}
511509
510+ static void pkt_stream_replace_half (struct test_spec * test , u32 pkt_len , u32 offset )
511+ {
512+ struct xsk_umem_info * umem = test -> ifobj_tx -> umem ;
513+ struct pkt_stream * pkt_stream ;
514+ u32 i ;
515+
516+ pkt_stream = pkt_stream_clone (umem , test -> pkt_stream_default );
517+ for (i = 0 ; i < test -> pkt_stream_default -> nb_pkts ; i += 2 ) {
518+ pkt_stream -> pkts [i ].addr = (i % umem -> num_frames ) * umem -> frame_size + offset ;
519+ pkt_stream -> pkts [i ].len = pkt_len ;
520+ }
521+
522+ test -> ifobj_tx -> pkt_stream = pkt_stream ;
523+ test -> ifobj_rx -> pkt_stream = pkt_stream ;
524+ }
525+
512526static struct pkt * pkt_generate (struct ifobject * ifobject , u32 pkt_nb )
513527{
514528 struct pkt * pkt = pkt_stream_get_pkt (ifobject -> pkt_stream , pkt_nb );
@@ -570,9 +584,9 @@ static void pkt_dump(void *pkt, u32 len)
570584 fprintf (stdout , "---------------------------------------\n" );
571585}
572586
573- static bool is_pkt_valid (struct pkt * pkt , void * buffer , const struct xdp_desc * desc )
587+ static bool is_pkt_valid (struct pkt * pkt , void * buffer , u64 addr , u32 len )
574588{
575- void * data = xsk_umem__get_data (buffer , desc -> addr );
589+ void * data = xsk_umem__get_data (buffer , addr );
576590 struct iphdr * iphdr = (struct iphdr * )(data + sizeof (struct ethhdr ));
577591
578592 if (!pkt ) {
@@ -586,10 +600,10 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, const struct xdp_desc *d
586600 if (opt_pkt_dump )
587601 pkt_dump (data , PKT_SIZE );
588602
589- if (pkt -> len != desc -> len ) {
603+ if (pkt -> len != len ) {
590604 ksft_test_result_fail
591605 ("ERROR: [%s] expected length [%d], got length [%d]\n" ,
592- __func__ , pkt -> len , desc -> len );
606+ __func__ , pkt -> len , len );
593607 return false;
594608 }
595609
@@ -671,7 +685,7 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
671685
672686 orig = xsk_umem__extract_addr (addr );
673687 addr = xsk_umem__add_offset_to_addr (addr );
674- if (!is_pkt_valid (pkt , xsk -> umem -> buffer , desc ))
688+ if (!is_pkt_valid (pkt , xsk -> umem -> buffer , addr , desc -> len ))
675689 return ;
676690
677691 * xsk_ring_prod__fill_addr (& xsk -> umem -> fq , idx_fq ++ ) = orig ;
@@ -815,13 +829,16 @@ static void tx_stats_validate(struct ifobject *ifobject)
815829
816830static void thread_common_ops (struct test_spec * test , struct ifobject * ifobject )
817831{
832+ int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE ;
818833 u32 i ;
819834
820835 ifobject -> ns_fd = switch_namespace (ifobject -> nsname );
821836
837+ if (ifobject -> umem -> unaligned_mode )
838+ mmap_flags |= MAP_HUGETLB ;
839+
822840 for (i = 0 ; i < test -> nb_sockets ; i ++ ) {
823841 u64 umem_sz = ifobject -> umem -> num_frames * ifobject -> umem -> frame_size ;
824- int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE ;
825842 u32 ctr = 0 ;
826843 void * bufs ;
827844
@@ -879,6 +896,32 @@ static void *worker_testapp_validate_tx(void *arg)
879896 pthread_exit (NULL );
880897}
881898
899+ static void xsk_populate_fill_ring (struct xsk_umem_info * umem , struct pkt_stream * pkt_stream )
900+ {
901+ u32 idx = 0 , i ;
902+ int ret ;
903+
904+ ret = xsk_ring_prod__reserve (& umem -> fq , XSK_RING_PROD__DEFAULT_NUM_DESCS , & idx );
905+ if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS )
906+ exit_with_error (ENOSPC );
907+ for (i = 0 ; i < XSK_RING_PROD__DEFAULT_NUM_DESCS ; i ++ ) {
908+ u64 addr ;
909+
910+ if (pkt_stream -> use_addr_for_fill ) {
911+ struct pkt * pkt = pkt_stream_get_pkt (pkt_stream , i );
912+
913+ if (!pkt )
914+ break ;
915+ addr = pkt -> addr ;
916+ } else {
917+ addr = (i % umem -> num_frames ) * umem -> frame_size + DEFAULT_OFFSET ;
918+ }
919+
920+ * xsk_ring_prod__fill_addr (& umem -> fq , idx ++ ) = addr ;
921+ }
922+ xsk_ring_prod__submit (& umem -> fq , XSK_RING_PROD__DEFAULT_NUM_DESCS );
923+ }
924+
882925static void * worker_testapp_validate_rx (void * arg )
883926{
884927 struct test_spec * test = (struct test_spec * )arg ;
@@ -889,7 +932,7 @@ static void *worker_testapp_validate_rx(void *arg)
889932 thread_common_ops (test , ifobject );
890933
891934 if (stat_test_type != STAT_TEST_RX_FILL_EMPTY )
892- xsk_populate_fill_ring (ifobject -> umem );
935+ xsk_populate_fill_ring (ifobject -> umem , ifobject -> pkt_stream );
893936
894937 fds .fd = xsk_socket__fd (ifobject -> xsk -> xsk );
895938 fds .events = POLLIN ;
@@ -1033,6 +1076,40 @@ static void testapp_stats(struct test_spec *test)
10331076 test_spec_set_name (test , "STATS" );
10341077}
10351078
1079+ /* Simple test */
1080+ static bool hugepages_present (struct ifobject * ifobject )
1081+ {
1082+ const size_t mmap_sz = 2 * ifobject -> umem -> num_frames * ifobject -> umem -> frame_size ;
1083+ void * bufs ;
1084+
1085+ bufs = mmap (NULL , mmap_sz , PROT_READ | PROT_WRITE ,
1086+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_HUGETLB , -1 , 0 );
1087+ if (bufs == MAP_FAILED )
1088+ return false;
1089+
1090+ munmap (bufs , mmap_sz );
1091+ return true;
1092+ }
1093+
1094+ static bool testapp_unaligned (struct test_spec * test )
1095+ {
1096+ if (!hugepages_present (test -> ifobj_tx )) {
1097+ ksft_test_result_skip ("No 2M huge pages present.\n" );
1098+ return false;
1099+ }
1100+
1101+ test_spec_set_name (test , "UNALIGNED_MODE" );
1102+ test -> ifobj_tx -> umem -> unaligned_mode = true;
1103+ test -> ifobj_rx -> umem -> unaligned_mode = true;
1104+ /* Let half of the packets straddle a buffer boundrary */
1105+ pkt_stream_replace_half (test , PKT_SIZE , test -> ifobj_tx -> umem -> frame_size - 32 );
1106+ test -> ifobj_rx -> pkt_stream -> use_addr_for_fill = true;
1107+ testapp_validate_traffic (test );
1108+
1109+ pkt_stream_restore_default (test );
1110+ return true;
1111+ }
1112+
10361113static void init_iface (struct ifobject * ifobj , const char * dst_mac , const char * src_mac ,
10371114 const char * dst_ip , const char * src_ip , const u16 dst_port ,
10381115 const u16 src_port , thread_func_t func_ptr )
@@ -1084,6 +1161,10 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
10841161 test_spec_set_name (test , "POLL" );
10851162 testapp_validate_traffic (test );
10861163 break ;
1164+ case TEST_TYPE_UNALIGNED :
1165+ if (!testapp_unaligned (test ))
1166+ return ;
1167+ break ;
10871168 default :
10881169 break ;
10891170 }
0 commit comments