Commit 766e9037cc139ee25ed93ee5ad11e1450c4b99f6
Committed by
David S. Miller
1 parent
48bccd25df
Exists in
master
and in
39 other branches
net: sk_drops consolidation
sock_queue_rcv_skb() can update sk_drops itself, removing need for callers to take care of it. This is more consistent since sock_queue_rcv_skb() also reads sk_drops when queueing a skb. This adds sk_drops managment to many protocols that not cared yet. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 9 changed files with 20 additions and 34 deletions Side-by-side Diff
net/core/sock.c
... | ... | @@ -274,7 +274,7 @@ |
274 | 274 | |
275 | 275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
276 | 276 | { |
277 | - int err = 0; | |
277 | + int err; | |
278 | 278 | int skb_len; |
279 | 279 | unsigned long flags; |
280 | 280 | struct sk_buff_head *list = &sk->sk_receive_queue; |
281 | 281 | |
282 | 282 | |
... | ... | @@ -284,17 +284,17 @@ |
284 | 284 | */ |
285 | 285 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
286 | 286 | (unsigned)sk->sk_rcvbuf) { |
287 | - err = -ENOMEM; | |
288 | - goto out; | |
287 | + atomic_inc(&sk->sk_drops); | |
288 | + return -ENOMEM; | |
289 | 289 | } |
290 | 290 | |
291 | 291 | err = sk_filter(sk, skb); |
292 | 292 | if (err) |
293 | - goto out; | |
293 | + return err; | |
294 | 294 | |
295 | 295 | if (!sk_rmem_schedule(sk, skb->truesize)) { |
296 | - err = -ENOBUFS; | |
297 | - goto out; | |
296 | + atomic_inc(&sk->sk_drops); | |
297 | + return -ENOBUFS; | |
298 | 298 | } |
299 | 299 | |
300 | 300 | skb->dev = NULL; |
... | ... | @@ -314,8 +314,7 @@ |
314 | 314 | |
315 | 315 | if (!sock_flag(sk, SOCK_DEAD)) |
316 | 316 | sk->sk_data_ready(sk, skb_len); |
317 | -out: | |
318 | - return err; | |
317 | + return 0; | |
319 | 318 | } |
320 | 319 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
321 | 320 |
net/ieee802154/dgram.c
net/ieee802154/raw.c
net/ipv4/raw.c
net/ipv4/udp.c
... | ... | @@ -1063,25 +1063,22 @@ |
1063 | 1063 | |
1064 | 1064 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1065 | 1065 | { |
1066 | - int is_udplite = IS_UDPLITE(sk); | |
1067 | - int rc; | |
1066 | + int rc = sock_queue_rcv_skb(sk, skb); | |
1068 | 1067 | |
1069 | - if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { | |
1068 | + if (rc < 0) { | |
1069 | + int is_udplite = IS_UDPLITE(sk); | |
1070 | + | |
1070 | 1071 | /* Note that an ENOMEM error is charged twice */ |
1071 | - if (rc == -ENOMEM) { | |
1072 | + if (rc == -ENOMEM) | |
1072 | 1073 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, |
1073 | 1074 | is_udplite); |
1074 | - atomic_inc(&sk->sk_drops); | |
1075 | - } | |
1076 | - goto drop; | |
1075 | + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | |
1076 | + kfree_skb(skb); | |
1077 | + return -1; | |
1077 | 1078 | } |
1078 | 1079 | |
1079 | 1080 | return 0; |
1080 | 1081 | |
1081 | -drop: | |
1082 | - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | |
1083 | - kfree_skb(skb); | |
1084 | - return -1; | |
1085 | 1082 | } |
1086 | 1083 | |
1087 | 1084 | /* returns: |
net/ipv6/raw.c
net/ipv6/udp.c
... | ... | @@ -385,13 +385,11 @@ |
385 | 385 | goto drop; |
386 | 386 | } |
387 | 387 | |
388 | - if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | |
388 | + if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { | |
389 | 389 | /* Note that an ENOMEM error is charged twice */ |
390 | - if (rc == -ENOMEM) { | |
390 | + if (rc == -ENOMEM) | |
391 | 391 | UDP6_INC_STATS_BH(sock_net(sk), |
392 | 392 | UDP_MIB_RCVBUFERRORS, is_udplite); |
393 | - atomic_inc(&sk->sk_drops); | |
394 | - } | |
395 | 393 | goto drop; |
396 | 394 | } |
397 | 395 |
net/phonet/datagram.c
... | ... | @@ -159,11 +159,9 @@ |
159 | 159 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
160 | 160 | { |
161 | 161 | int err = sock_queue_rcv_skb(sk, skb); |
162 | - if (err < 0) { | |
162 | + | |
163 | + if (err < 0) | |
163 | 164 | kfree_skb(skb); |
164 | - if (err == -ENOMEM) | |
165 | - atomic_inc(&sk->sk_drops); | |
166 | - } | |
167 | 165 | return err ? NET_RX_DROP : NET_RX_SUCCESS; |
168 | 166 | } |
169 | 167 |