Skip to content

Commit

Permalink
mptcp: fix race on unaccepted mptcp sockets
Browse files Browse the repository at this point in the history
When the listener socket owning the relevant request is closed,
it frees the unaccepted subflows and that causes later deletion
of the paired MPTCP sockets.

The mptcp socket's worker can run in the time interval between such delete
operations. When that happens, any access to msk->first will cause an UaF
access, as the subflow cleanup did not cleared such field in the mptcp
socket.

Address the issue explicitly traversing the listener socket accept
queue at close time and performing the needed cleanup on the pending
msk.

Note that the locking is a bit tricky, as we need to acquire the msk
socket lock, while still owning the subflow socket one.

Fixes: 86e39e0 ("mptcp: keep track of local endpoint still available for each msk")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Paolo Abeni authored and kuba-moo committed Jun 29, 2022
1 parent f745a3e commit 6aeed90
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 0 deletions.
5 changes: 5 additions & 0 deletions net/mptcp/protocol.c
Original file line number Diff line number Diff line change
Expand Up @@ -2331,6 +2331,11 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
kfree_rcu(subflow, rcu);
} else {
/* otherwise tcp will dispose of the ssk and subflow ctx */
if (ssk->sk_state == TCP_LISTEN) {
tcp_set_state(ssk, TCP_CLOSE);
mptcp_subflow_queue_clean(ssk);
inet_csk_listen_stop(ssk);
}
__tcp_close(ssk, 0);

/* close acquired an extra ref */
Expand Down
2 changes: 2 additions & 0 deletions net/mptcp/protocol.h
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,7 @@ struct mptcp_sock {

u32 setsockopt_seq;
char ca_name[TCP_CA_NAME_MAX];
struct mptcp_sock *dl_next;
};

#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
Expand Down Expand Up @@ -608,6 +609,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow);
void mptcp_subflow_send_ack(struct sock *ssk);
void mptcp_subflow_reset(struct sock *ssk);
void mptcp_subflow_queue_clean(struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);

Expand Down
52 changes: 52 additions & 0 deletions net/mptcp/subflow.c
Original file line number Diff line number Diff line change
Expand Up @@ -1723,6 +1723,58 @@ static void subflow_state_change(struct sock *sk)
}
}

void mptcp_subflow_queue_clean(struct sock *listener_ssk)
{
struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
struct mptcp_sock *msk, *next, *head = NULL;
struct request_sock *req;

/* build a list of all unaccepted mptcp sockets */
spin_lock_bh(&queue->rskq_lock);
for (req = queue->rskq_accept_head; req; req = req->dl_next) {
struct mptcp_subflow_context *subflow;
struct sock *ssk = req->sk;
struct mptcp_sock *msk;

if (!sk_is_mptcp(ssk))
continue;

subflow = mptcp_subflow_ctx(ssk);
if (!subflow || !subflow->conn)
continue;

/* skip if already in list */
msk = mptcp_sk(subflow->conn);
if (msk->dl_next || msk == head)
continue;

msk->dl_next = head;
head = msk;
}
spin_unlock_bh(&queue->rskq_lock);
if (!head)
return;

/* can't acquire the msk socket lock under the subflow one,
* or will cause ABBA deadlock
*/
release_sock(listener_ssk);

for (msk = head; msk; msk = next) {
struct sock *sk = (struct sock *)msk;
bool slow;

slow = lock_sock_fast_nested(sk);
next = msk->dl_next;
msk->first = NULL;
msk->dl_next = NULL;
unlock_sock_fast(sk, slow);
}

/* we are still under the listener msk socket lock */
lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
}

static int subflow_ulp_init(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
Expand Down

0 comments on commit 6aeed90

Please sign in to comment.