4343#include <net/tcp.h>
4444#include <linux/ptr_ring.h>
4545#include <net/inet_common.h>
46+ #include <linux/sched/signal.h>
4647
4748#define SOCK_CREATE_FLAG_MASK \
4849 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
@@ -523,8 +524,6 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
523524 i = md -> sg_start ;
524525
525526 do {
526- r -> sg_data [i ] = md -> sg_data [i ];
527-
528527 size = (apply && apply_bytes < md -> sg_data [i ].length ) ?
529528 apply_bytes : md -> sg_data [i ].length ;
530529
@@ -535,6 +534,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
535534 }
536535
537536 sk_mem_charge (sk , size );
537+ r -> sg_data [i ] = md -> sg_data [i ];
538538 r -> sg_data [i ].length = size ;
539539 md -> sg_data [i ].length -= size ;
540540 md -> sg_data [i ].offset += size ;
@@ -732,6 +732,26 @@ static int bpf_exec_tx_verdict(struct smap_psock *psock,
732732 return err ;
733733}
734734
735+ static int bpf_wait_data (struct sock * sk ,
736+ struct smap_psock * psk , int flags ,
737+ long timeo , int * err )
738+ {
739+ int rc ;
740+
741+ DEFINE_WAIT_FUNC (wait , woken_wake_function );
742+
743+ add_wait_queue (sk_sleep (sk ), & wait );
744+ sk_set_bit (SOCKWQ_ASYNC_WAITDATA , sk );
745+ rc = sk_wait_event (sk , & timeo ,
746+ !list_empty (& psk -> ingress ) ||
747+ !skb_queue_empty (& sk -> sk_receive_queue ),
748+ & wait );
749+ sk_clear_bit (SOCKWQ_ASYNC_WAITDATA , sk );
750+ remove_wait_queue (sk_sleep (sk ), & wait );
751+
752+ return rc ;
753+ }
754+
735755static int bpf_tcp_recvmsg (struct sock * sk , struct msghdr * msg , size_t len ,
736756 int nonblock , int flags , int * addr_len )
737757{
@@ -755,6 +775,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
755775 return tcp_recvmsg (sk , msg , len , nonblock , flags , addr_len );
756776
757777 lock_sock (sk );
778+ bytes_ready :
758779 while (copied != len ) {
759780 struct scatterlist * sg ;
760781 struct sk_msg_buff * md ;
@@ -809,6 +830,28 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
809830 }
810831 }
811832
833+ if (!copied ) {
834+ long timeo ;
835+ int data ;
836+ int err = 0 ;
837+
838+ timeo = sock_rcvtimeo (sk , nonblock );
839+ data = bpf_wait_data (sk , psock , flags , timeo , & err );
840+
841+ if (data ) {
842+ if (!skb_queue_empty (& sk -> sk_receive_queue )) {
843+ release_sock (sk );
844+ smap_release_sock (psock , sk );
845+ copied = tcp_recvmsg (sk , msg , len , nonblock , flags , addr_len );
846+ return copied ;
847+ }
848+ goto bytes_ready ;
849+ }
850+
851+ if (err )
852+ copied = err ;
853+ }
854+
812855 release_sock (sk );
813856 smap_release_sock (psock , sk );
814857 return copied ;
@@ -1831,7 +1874,7 @@ static int sock_map_update_elem(struct bpf_map *map,
18311874 return err ;
18321875}
18331876
1834- static void sock_map_release (struct bpf_map * map , struct file * map_file )
1877+ static void sock_map_release (struct bpf_map * map )
18351878{
18361879 struct bpf_stab * stab = container_of (map , struct bpf_stab , map );
18371880 struct bpf_prog * orig ;
@@ -1855,7 +1898,7 @@ const struct bpf_map_ops sock_map_ops = {
18551898 .map_get_next_key = sock_map_get_next_key ,
18561899 .map_update_elem = sock_map_update_elem ,
18571900 .map_delete_elem = sock_map_delete_elem ,
1858- .map_release = sock_map_release ,
1901+ .map_release_uref = sock_map_release ,
18591902};
18601903
18611904BPF_CALL_4 (bpf_sock_map_update , struct bpf_sock_ops_kern * , bpf_sock ,
0 commit comments