Skip to content

Commit 10d6393

Browse files
julianwiedmanndavem330
authored andcommitted
net/af_iucv: support drop monitoring
Change the good paths to use consume_skb() instead of kfree_skb(). This avoids flooding dropwatch with false-positives. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 0033523 commit 10d6393

File tree

1 file changed

+22
-20
lines changed

1 file changed

+22
-20
lines changed

net/iucv/af_iucv.c

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1044,7 +1044,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
10441044
if (err == 0) {
10451045
atomic_dec(&iucv->skbs_in_xmit);
10461046
skb_unlink(skb, &iucv->send_skb_q);
1047-
kfree_skb(skb);
1047+
consume_skb(skb);
10481048
}
10491049

10501050
/* this error should never happen since the */
@@ -1293,7 +1293,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
12931293
}
12941294
}
12951295

1296-
kfree_skb(skb);
1296+
consume_skb(skb);
12971297
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
12981298
atomic_inc(&iucv->msg_recv);
12991299
if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
@@ -1756,7 +1756,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
17561756
spin_unlock_irqrestore(&list->lock, flags);
17571757

17581758
if (this) {
1759-
kfree_skb(this);
1759+
consume_skb(this);
17601760
/* wake up any process waiting for sending */
17611761
iucv_sock_wake_msglim(sk);
17621762
}
@@ -1903,17 +1903,17 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
19031903
{
19041904
struct iucv_sock *iucv = iucv_sk(sk);
19051905

1906-
if (!iucv)
1907-
goto out;
1908-
if (sk->sk_state != IUCV_BOUND)
1909-
goto out;
1906+
if (!iucv || sk->sk_state != IUCV_BOUND) {
1907+
kfree_skb(skb);
1908+
return NET_RX_SUCCESS;
1909+
}
1910+
19101911
bh_lock_sock(sk);
19111912
iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
19121913
sk->sk_state = IUCV_CONNECTED;
19131914
sk->sk_state_change(sk);
19141915
bh_unlock_sock(sk);
1915-
out:
1916-
kfree_skb(skb);
1916+
consume_skb(skb);
19171917
return NET_RX_SUCCESS;
19181918
}
19191919

@@ -1924,16 +1924,16 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
19241924
{
19251925
struct iucv_sock *iucv = iucv_sk(sk);
19261926

1927-
if (!iucv)
1928-
goto out;
1929-
if (sk->sk_state != IUCV_BOUND)
1930-
goto out;
1927+
if (!iucv || sk->sk_state != IUCV_BOUND) {
1928+
kfree_skb(skb);
1929+
return NET_RX_SUCCESS;
1930+
}
1931+
19311932
bh_lock_sock(sk);
19321933
sk->sk_state = IUCV_DISCONN;
19331934
sk->sk_state_change(sk);
19341935
bh_unlock_sock(sk);
1935-
out:
1936-
kfree_skb(skb);
1936+
consume_skb(skb);
19371937
return NET_RX_SUCCESS;
19381938
}
19391939

@@ -1945,16 +1945,18 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
19451945
struct iucv_sock *iucv = iucv_sk(sk);
19461946

19471947
/* other end of connection closed */
1948-
if (!iucv)
1949-
goto out;
1948+
if (!iucv) {
1949+
kfree_skb(skb);
1950+
return NET_RX_SUCCESS;
1951+
}
1952+
19501953
bh_lock_sock(sk);
19511954
if (sk->sk_state == IUCV_CONNECTED) {
19521955
sk->sk_state = IUCV_DISCONN;
19531956
sk->sk_state_change(sk);
19541957
}
19551958
bh_unlock_sock(sk);
1956-
out:
1957-
kfree_skb(skb);
1959+
consume_skb(skb);
19581960
return NET_RX_SUCCESS;
19591961
}
19601962

@@ -2107,7 +2109,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
21072109
case (AF_IUCV_FLAG_WIN):
21082110
err = afiucv_hs_callback_win(sk, skb);
21092111
if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2110-
kfree_skb(skb);
2112+
consume_skb(skb);
21112113
break;
21122114
}
21132115
fallthrough; /* and receive non-zero length data */

0 commit comments

Comments
 (0)