Skip to content
Permalink
Browse files
virtio/vsock: dequeue callback for SOCK_SEQPACKET
This adds transport callback and it's logic for SEQPACKET dequeue.
Callback fetches RW packets from rx queue of socket until whole record
is copied(if user's buffer is full, user is not woken up). This is done
to not stall sender, because if we wake up user and it leaves syscall,
nobody will send credit update for rest of record, and sender will wait
for next enter of read syscall at receiver's side. So if user buffer is
full, we just send credit update and drop data.

Signed-off-by: Arseny Krasnov <arseny.krasnov@kaspersky.com>
  • Loading branch information
Arseny Krasnov authored and intel-lab-lkp committed Apr 13, 2021
1 parent 239d92c commit c5fe1745b6ed6bc01a7f88737bd43fce70349367
Show file tree
Hide file tree
Showing 2 changed files with 78 additions and 0 deletions.
@@ -80,6 +80,11 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len, int flags);

ssize_t
virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
int flags,
bool *msg_ready);
s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);

@@ -393,6 +393,67 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
return err;
}

static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
int flags,
bool *msg_ready)
{
struct virtio_vsock_sock *vvs = vsk->trans;
struct virtio_vsock_pkt *pkt;
int err = 0;
size_t user_buf_len = msg->msg_iter.count;

*msg_ready = false;
spin_lock_bh(&vvs->rx_lock);

while (!*msg_ready && !list_empty(&vvs->rx_queue) && err >= 0) {
pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);

if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RW) {
size_t bytes_to_copy;
size_t pkt_len;

pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
bytes_to_copy = min(user_buf_len, pkt_len);

/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);

if (memcpy_to_msg(msg, pkt->buf, bytes_to_copy)) {
err = -EINVAL;
break;
}

spin_lock_bh(&vvs->rx_lock);

/* If user sets 'MSG_TRUNC' we return real length
* of message.
*/
if (flags & MSG_TRUNC)
err += pkt_len;
else
err += bytes_to_copy;

user_buf_len -= bytes_to_copy;

if (pkt->hdr.flags & VIRTIO_VSOCK_SEQ_EOR)
*msg_ready = true;
}

virtio_transport_dec_rx_pkt(vvs, pkt);
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}

spin_unlock_bh(&vvs->rx_lock);

virtio_transport_send_credit_update(vsk);

return err;
}

ssize_t
virtio_transport_stream_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
@@ -405,6 +466,18 @@ virtio_transport_stream_dequeue(struct vsock_sock *vsk,
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);

ssize_t
virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
int flags, bool *msg_ready)
{
if (flags & MSG_PEEK)
return -EOPNOTSUPP;

return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags, msg_ready);
}
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);

int
virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,

0 comments on commit c5fe174

Please sign in to comment.