@@ -604,12 +604,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
604604 return iov_iter_count (iter );
605605}
606606
607- static bool vhost_exceeds_weight (int pkts , int total_len )
608- {
609- return total_len >= VHOST_NET_WEIGHT ||
610- pkts >= VHOST_NET_PKT_WEIGHT ;
611- }
612-
613607static int get_tx_bufs (struct vhost_net * net ,
614608 struct vhost_net_virtqueue * nvq ,
615609 struct msghdr * msg ,
@@ -779,7 +773,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
779773 int sent_pkts = 0 ;
780774 bool sock_can_batch = (sock -> sk -> sk_sndbuf == INT_MAX );
781775
782- for (;;) {
776+ do {
783777 bool busyloop_intr = false;
784778
785779 if (nvq -> done_idx == VHOST_NET_BATCH )
@@ -845,11 +839,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
845839 vq -> heads [nvq -> done_idx ].id = cpu_to_vhost32 (vq , head );
846840 vq -> heads [nvq -> done_idx ].len = 0 ;
847841 ++ nvq -> done_idx ;
848- if (vhost_exceeds_weight (++ sent_pkts , total_len )) {
849- vhost_poll_queue (& vq -> poll );
850- break ;
851- }
852- }
842+ } while (likely (!vhost_exceeds_weight (vq , ++ sent_pkts , total_len )));
853843
854844 vhost_tx_batch (net , nvq , sock , & msg );
855845}
@@ -874,7 +864,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
874864 bool zcopy_used ;
875865 int sent_pkts = 0 ;
876866
877- for (;;) {
867+ do {
878868 bool busyloop_intr ;
879869
880870 /* Release DMAs done buffers first */
@@ -951,11 +941,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
951941 else
952942 vhost_zerocopy_signal_used (net , vq );
953943 vhost_net_tx_packet (net );
954- if (unlikely (vhost_exceeds_weight (++ sent_pkts , total_len ))) {
955- vhost_poll_queue (& vq -> poll );
956- break ;
957- }
958- }
944+ } while (likely (!vhost_exceeds_weight (vq , ++ sent_pkts , total_len )));
959945}
960946
961947/* Expects to be always run from workqueue - which acts as
@@ -1153,8 +1139,11 @@ static void handle_rx(struct vhost_net *net)
11531139 vq -> log : NULL ;
11541140 mergeable = vhost_has_feature (vq , VIRTIO_NET_F_MRG_RXBUF );
11551141
1156- while ((sock_len = vhost_net_rx_peek_head_len (net , sock -> sk ,
1157- & busyloop_intr ))) {
1142+ do {
1143+ sock_len = vhost_net_rx_peek_head_len (net , sock -> sk ,
1144+ & busyloop_intr );
1145+ if (!sock_len )
1146+ break ;
11581147 sock_len += sock_hlen ;
11591148 vhost_len = sock_len + vhost_hlen ;
11601149 headcount = get_rx_bufs (vq , vq -> heads + nvq -> done_idx ,
@@ -1239,14 +1228,11 @@ static void handle_rx(struct vhost_net *net)
12391228 vhost_log_write (vq , vq_log , log , vhost_len ,
12401229 vq -> iov , in );
12411230 total_len += vhost_len ;
1242- if (unlikely (vhost_exceeds_weight (++ recv_pkts , total_len ))) {
1243- vhost_poll_queue (& vq -> poll );
1244- goto out ;
1245- }
1246- }
1231+ } while (likely (!vhost_exceeds_weight (vq , ++ recv_pkts , total_len )));
1232+
12471233 if (unlikely (busyloop_intr ))
12481234 vhost_poll_queue (& vq -> poll );
1249- else
1235+ else if (! sock_len )
12501236 vhost_net_enable_vq (net , vq );
12511237out :
12521238 vhost_net_signal_used (nvq );
@@ -1338,7 +1324,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
13381324 vhost_net_buf_init (& n -> vqs [i ].rxq );
13391325 }
13401326 vhost_dev_init (dev , vqs , VHOST_NET_VQ_MAX ,
1341- UIO_MAXIOV + VHOST_NET_BATCH );
1327+ UIO_MAXIOV + VHOST_NET_BATCH ,
1328+ VHOST_NET_PKT_WEIGHT , VHOST_NET_WEIGHT );
13421329
13431330 vhost_poll_init (n -> poll + VHOST_NET_VQ_TX , handle_tx_net , EPOLLOUT , dev );
13441331 vhost_poll_init (n -> poll + VHOST_NET_VQ_RX , handle_rx_net , EPOLLIN , dev );
0 commit comments