Commit 766e9037cc139ee25ed93ee5ad11e1450c4b99f6

Authored by Eric Dumazet
Committed by David S. Miller
1 parent 48bccd25df

net: sk_drops consolidation

sock_queue_rcv_skb() can update sk_drops itself, removing need for
callers to take care of it. This is more consistent since
sock_queue_rcv_skb() also reads sk_drops when queueing a skb.

This adds sk_drops managment to many protocols that not cared yet.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 9 changed files with 20 additions and 34 deletions Side-by-side Diff

... ... @@ -274,7 +274,7 @@
274 274  
275 275 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
276 276 {
277   - int err = 0;
  277 + int err;
278 278 int skb_len;
279 279 unsigned long flags;
280 280 struct sk_buff_head *list = &sk->sk_receive_queue;
281 281  
282 282  
... ... @@ -284,17 +284,17 @@
284 284 */
285 285 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
286 286 (unsigned)sk->sk_rcvbuf) {
287   - err = -ENOMEM;
288   - goto out;
  287 + atomic_inc(&sk->sk_drops);
  288 + return -ENOMEM;
289 289 }
290 290  
291 291 err = sk_filter(sk, skb);
292 292 if (err)
293   - goto out;
  293 + return err;
294 294  
295 295 if (!sk_rmem_schedule(sk, skb->truesize)) {
296   - err = -ENOBUFS;
297   - goto out;
  296 + atomic_inc(&sk->sk_drops);
  297 + return -ENOBUFS;
298 298 }
299 299  
300 300 skb->dev = NULL;
... ... @@ -314,8 +314,7 @@
314 314  
315 315 if (!sock_flag(sk, SOCK_DEAD))
316 316 sk->sk_data_ready(sk, skb_len);
317   -out:
318   - return err;
  317 + return 0;
319 318 }
320 319 EXPORT_SYMBOL(sock_queue_rcv_skb);
321 320  
net/ieee802154/dgram.c
... ... @@ -318,7 +318,6 @@
318 318 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
319 319 {
320 320 if (sock_queue_rcv_skb(sk, skb) < 0) {
321   - atomic_inc(&sk->sk_drops);
322 321 kfree_skb(skb);
323 322 return NET_RX_DROP;
324 323 }
net/ieee802154/raw.c
... ... @@ -206,7 +206,6 @@
206 206 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
207 207 {
208 208 if (sock_queue_rcv_skb(sk, skb) < 0) {
209   - atomic_inc(&sk->sk_drops);
210 209 kfree_skb(skb);
211 210 return NET_RX_DROP;
212 211 }
... ... @@ -292,7 +292,6 @@
292 292 /* Charge it to the socket. */
293 293  
294 294 if (sock_queue_rcv_skb(sk, skb) < 0) {
295   - atomic_inc(&sk->sk_drops);
296 295 kfree_skb(skb);
297 296 return NET_RX_DROP;
298 297 }
... ... @@ -1063,25 +1063,22 @@
1063 1063  
1064 1064 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1065 1065 {
1066   - int is_udplite = IS_UDPLITE(sk);
1067   - int rc;
  1066 + int rc = sock_queue_rcv_skb(sk, skb);
1068 1067  
1069   - if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
  1068 + if (rc < 0) {
  1069 + int is_udplite = IS_UDPLITE(sk);
  1070 +
1070 1071 /* Note that an ENOMEM error is charged twice */
1071   - if (rc == -ENOMEM) {
  1072 + if (rc == -ENOMEM)
1072 1073 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1073 1074 is_udplite);
1074   - atomic_inc(&sk->sk_drops);
1075   - }
1076   - goto drop;
  1075 + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1076 + kfree_skb(skb);
  1077 + return -1;
1077 1078 }
1078 1079  
1079 1080 return 0;
1080 1081  
1081   -drop:
1082   - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1083   - kfree_skb(skb);
1084   - return -1;
1085 1082 }
1086 1083  
1087 1084 /* returns:
... ... @@ -381,8 +381,7 @@
381 381 }
382 382  
383 383 /* Charge it to the socket. */
384   - if (sock_queue_rcv_skb(sk,skb)<0) {
385   - atomic_inc(&sk->sk_drops);
  384 + if (sock_queue_rcv_skb(sk, skb) < 0) {
386 385 kfree_skb(skb);
387 386 return NET_RX_DROP;
388 387 }
... ... @@ -385,13 +385,11 @@
385 385 goto drop;
386 386 }
387 387  
388   - if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
  388 + if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
389 389 /* Note that an ENOMEM error is charged twice */
390   - if (rc == -ENOMEM) {
  390 + if (rc == -ENOMEM)
391 391 UDP6_INC_STATS_BH(sock_net(sk),
392 392 UDP_MIB_RCVBUFERRORS, is_udplite);
393   - atomic_inc(&sk->sk_drops);
394   - }
395 393 goto drop;
396 394 }
397 395  
net/phonet/datagram.c
... ... @@ -159,11 +159,9 @@
159 159 static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
160 160 {
161 161 int err = sock_queue_rcv_skb(sk, skb);
162   - if (err < 0) {
  162 +
  163 + if (err < 0)
163 164 kfree_skb(skb);
164   - if (err == -ENOMEM)
165   - atomic_inc(&sk->sk_drops);
166   - }
167 165 return err ? NET_RX_DROP : NET_RX_SUCCESS;
168 166 }
169 167  
... ... @@ -360,8 +360,6 @@
360 360 err = sock_queue_rcv_skb(sk, skb);
361 361 if (!err)
362 362 return 0;
363   - if (err == -ENOMEM)
364   - atomic_inc(&sk->sk_drops);
365 363 break;
366 364 }
367 365