Commit 14afee4b6092fde451ee17604e5f5c89da33e71e
Committed by
David S. Miller
1 parent
2638595afc
Exists in
smarc_imx_lf-5.15.y
and in
20 other branches
net: convert sock.sk_wmem_alloc from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 37 changed files with 74 additions and 85 deletions Side-by-side Diff
- drivers/atm/fore200e.c
- drivers/atm/he.c
- drivers/atm/idt77252.c
- include/linux/atmdev.h
- include/net/sock.h
- net/atm/br2684.c
- net/atm/clip.c
- net/atm/common.c
- net/atm/lec.c
- net/atm/mpc.c
- net/atm/pppoatm.c
- net/atm/raw.c
- net/atm/signaling.c
- net/caif/caif_socket.c
- net/core/datagram.c
- net/core/skbuff.c
- net/core/sock.c
- net/ipv4/af_inet.c
- net/ipv4/esp4.c
- net/ipv4/ip_output.c
- net/ipv4/tcp.c
- net/ipv4/tcp_offload.c
- net/ipv4/tcp_output.c
- net/ipv6/esp6.c
- net/ipv6/ip6_output.c
- net/kcm/kcmproc.c
- net/key/af_key.c
- net/netlink/af_netlink.c
- net/packet/af_packet.c
- net/phonet/socket.c
- net/rds/tcp_send.c
- net/rxrpc/af_rxrpc.c
- net/sched/sch_atm.c
- net/sctp/output.c
- net/sctp/proc.c
- net/sctp/socket.c
- net/unix/af_unix.c
drivers/atm/fore200e.c
... | ... | @@ -924,12 +924,7 @@ |
924 | 924 | else { |
925 | 925 | dev_kfree_skb_any(entry->skb); |
926 | 926 | } |
927 | -#if 1 | |
928 | - /* race fixed by the above incarnation mechanism, but... */ | |
929 | - if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) { | |
930 | - atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0); | |
931 | - } | |
932 | -#endif | |
927 | + | |
933 | 928 | /* check error condition */ |
934 | 929 | if (*entry->status & STATUS_ERROR) |
935 | 930 | atomic_inc(&vcc->stats->tx_err); |
936 | 931 | |
... | ... | @@ -1130,13 +1125,9 @@ |
1130 | 1125 | return -ENOMEM; |
1131 | 1126 | } |
1132 | 1127 | |
1133 | - ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); | |
1134 | - | |
1135 | 1128 | vcc->push(vcc, skb); |
1136 | 1129 | atomic_inc(&vcc->stats->rx); |
1137 | 1130 | |
1138 | - ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); | |
1139 | - | |
1140 | 1131 | return 0; |
1141 | 1132 | } |
1142 | 1133 | |
... | ... | @@ -1572,7 +1563,6 @@ |
1572 | 1563 | unsigned long flags; |
1573 | 1564 | |
1574 | 1565 | ASSERT(vcc); |
1575 | - ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); | |
1576 | 1566 | ASSERT(fore200e); |
1577 | 1567 | ASSERT(fore200e_vcc); |
1578 | 1568 |
drivers/atm/he.c
... | ... | @@ -2395,7 +2395,7 @@ |
2395 | 2395 | * TBRQ, the host issues the close command to the adapter. |
2396 | 2396 | */ |
2397 | 2397 | |
2398 | - while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && | |
2398 | + while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && | |
2399 | 2399 | (retry < MAX_RETRY)) { |
2400 | 2400 | msleep(sleep); |
2401 | 2401 | if (sleep < 250) |
drivers/atm/idt77252.c
... | ... | @@ -724,7 +724,7 @@ |
724 | 724 | struct sock *sk = sk_atm(vcc); |
725 | 725 | |
726 | 726 | vc->estimator->cells += (skb->len + 47) / 48; |
727 | - if (atomic_read(&sk->sk_wmem_alloc) > | |
727 | + if (refcount_read(&sk->sk_wmem_alloc) > | |
728 | 728 | (sk->sk_sndbuf >> 1)) { |
729 | 729 | u32 cps = vc->estimator->maxcps; |
730 | 730 | |
... | ... | @@ -2009,7 +2009,7 @@ |
2009 | 2009 | atomic_inc(&vcc->stats->tx_err); |
2010 | 2010 | return -ENOMEM; |
2011 | 2011 | } |
2012 | - atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
2012 | + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
2013 | 2013 | |
2014 | 2014 | skb_put_data(skb, cell, 52); |
2015 | 2015 |
include/linux/atmdev.h
... | ... | @@ -254,7 +254,7 @@ |
254 | 254 | |
255 | 255 | static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) |
256 | 256 | { |
257 | - return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) < | |
257 | + return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) < | |
258 | 258 | sk_atm(vcc)->sk_sndbuf; |
259 | 259 | } |
260 | 260 |
include/net/sock.h
... | ... | @@ -390,7 +390,7 @@ |
390 | 390 | |
391 | 391 | /* ===== cache line for TX ===== */ |
392 | 392 | int sk_wmem_queued; |
393 | - atomic_t sk_wmem_alloc; | |
393 | + refcount_t sk_wmem_alloc; | |
394 | 394 | unsigned long sk_tsq_flags; |
395 | 395 | struct sk_buff *sk_send_head; |
396 | 396 | struct sk_buff_head sk_write_queue; |
... | ... | @@ -1911,7 +1911,7 @@ |
1911 | 1911 | */ |
1912 | 1912 | static inline int sk_wmem_alloc_get(const struct sock *sk) |
1913 | 1913 | { |
1914 | - return atomic_read(&sk->sk_wmem_alloc) - 1; | |
1914 | + return refcount_read(&sk->sk_wmem_alloc) - 1; | |
1915 | 1915 | } |
1916 | 1916 | |
1917 | 1917 | /** |
... | ... | @@ -2055,7 +2055,7 @@ |
2055 | 2055 | int amt = 0; |
2056 | 2056 | |
2057 | 2057 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { |
2058 | - amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | |
2058 | + amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); | |
2059 | 2059 | if (amt < 0) |
2060 | 2060 | amt = 0; |
2061 | 2061 | } |
... | ... | @@ -2136,7 +2136,7 @@ |
2136 | 2136 | */ |
2137 | 2137 | static inline bool sock_writeable(const struct sock *sk) |
2138 | 2138 | { |
2139 | - return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); | |
2139 | + return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); | |
2140 | 2140 | } |
2141 | 2141 | |
2142 | 2142 | static inline gfp_t gfp_any(void) |
net/atm/br2684.c
... | ... | @@ -252,7 +252,7 @@ |
252 | 252 | |
253 | 253 | ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; |
254 | 254 | pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); |
255 | - atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); | |
255 | + refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); | |
256 | 256 | ATM_SKB(skb)->atm_options = atmvcc->atm_options; |
257 | 257 | dev->stats.tx_packets++; |
258 | 258 | dev->stats.tx_bytes += skb->len; |
net/atm/clip.c
... | ... | @@ -381,7 +381,7 @@ |
381 | 381 | memcpy(here, llc_oui, sizeof(llc_oui)); |
382 | 382 | ((__be16 *) here)[3] = skb->protocol; |
383 | 383 | } |
384 | - atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
384 | + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
385 | 385 | ATM_SKB(skb)->atm_options = vcc->atm_options; |
386 | 386 | entry->vccs->last_use = jiffies; |
387 | 387 | pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); |
net/atm/common.c
... | ... | @@ -80,9 +80,9 @@ |
80 | 80 | printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n", |
81 | 81 | __func__, atomic_read(&sk->sk_rmem_alloc)); |
82 | 82 | |
83 | - if (atomic_read(&sk->sk_wmem_alloc)) | |
83 | + if (refcount_read(&sk->sk_wmem_alloc)) | |
84 | 84 | printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n", |
85 | - __func__, atomic_read(&sk->sk_wmem_alloc)); | |
85 | + __func__, refcount_read(&sk->sk_wmem_alloc)); | |
86 | 86 | } |
87 | 87 | |
88 | 88 | static void vcc_def_wakeup(struct sock *sk) |
... | ... | @@ -101,7 +101,7 @@ |
101 | 101 | struct atm_vcc *vcc = atm_sk(sk); |
102 | 102 | |
103 | 103 | return (vcc->qos.txtp.max_sdu + |
104 | - atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; | |
104 | + refcount_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; | |
105 | 105 | } |
106 | 106 | |
107 | 107 | static void vcc_write_space(struct sock *sk) |
... | ... | @@ -156,7 +156,7 @@ |
156 | 156 | memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc)); |
157 | 157 | memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc)); |
158 | 158 | vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ |
159 | - atomic_set(&sk->sk_wmem_alloc, 1); | |
159 | + refcount_set(&sk->sk_wmem_alloc, 1); | |
160 | 160 | atomic_set(&sk->sk_rmem_alloc, 0); |
161 | 161 | vcc->push = NULL; |
162 | 162 | vcc->pop = NULL; |
... | ... | @@ -630,7 +630,7 @@ |
630 | 630 | goto out; |
631 | 631 | } |
632 | 632 | pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); |
633 | - atomic_add(skb->truesize, &sk->sk_wmem_alloc); | |
633 | + refcount_add(skb->truesize, &sk->sk_wmem_alloc); | |
634 | 634 | |
635 | 635 | skb->dev = NULL; /* for paths shared with net_device interfaces */ |
636 | 636 | ATM_SKB(skb)->atm_options = vcc->atm_options; |
net/atm/lec.c
... | ... | @@ -181,7 +181,7 @@ |
181 | 181 | ATM_SKB(skb)->vcc = vcc; |
182 | 182 | ATM_SKB(skb)->atm_options = vcc->atm_options; |
183 | 183 | |
184 | - atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
184 | + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
185 | 185 | if (vcc->send(vcc, skb) < 0) { |
186 | 186 | dev->stats.tx_dropped++; |
187 | 187 | return; |
... | ... | @@ -345,7 +345,7 @@ |
345 | 345 | int i; |
346 | 346 | char *tmp; /* FIXME */ |
347 | 347 | |
348 | - atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
348 | + WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); | |
349 | 349 | mesg = (struct atmlec_msg *)skb->data; |
350 | 350 | tmp = skb->data; |
351 | 351 | tmp += sizeof(struct atmlec_msg); |
net/atm/mpc.c
... | ... | @@ -555,7 +555,7 @@ |
555 | 555 | sizeof(struct llc_snap_hdr)); |
556 | 556 | } |
557 | 557 | |
558 | - atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); | |
558 | + refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); | |
559 | 559 | ATM_SKB(skb)->atm_options = entry->shortcut->atm_options; |
560 | 560 | entry->shortcut->send(entry->shortcut, skb); |
561 | 561 | entry->packets_fwded++; |
... | ... | @@ -911,7 +911,7 @@ |
911 | 911 | |
912 | 912 | struct mpoa_client *mpc = find_mpc_by_vcc(vcc); |
913 | 913 | struct k_message *mesg = (struct k_message *)skb->data; |
914 | - atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
914 | + WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); | |
915 | 915 | |
916 | 916 | if (mpc == NULL) { |
917 | 917 | pr_info("no mpc found\n"); |
net/atm/pppoatm.c
... | ... | @@ -350,7 +350,7 @@ |
350 | 350 | return 1; |
351 | 351 | } |
352 | 352 | |
353 | - atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); | |
353 | + refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); | |
354 | 354 | ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; |
355 | 355 | pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", |
356 | 356 | skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev); |
net/atm/raw.c
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | |
36 | 36 | pr_debug("(%d) %d -= %d\n", |
37 | 37 | vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); |
38 | - atomic_sub(skb->truesize, &sk->sk_wmem_alloc); | |
38 | + WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); | |
39 | 39 | dev_kfree_skb_any(skb); |
40 | 40 | sk->sk_write_space(sk); |
41 | 41 | } |
net/atm/signaling.c
... | ... | @@ -67,7 +67,7 @@ |
67 | 67 | struct sock *sk; |
68 | 68 | |
69 | 69 | msg = (struct atmsvc_msg *) skb->data; |
70 | - atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | |
70 | + WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); | |
71 | 71 | vcc = *(struct atm_vcc **) &msg->vcc; |
72 | 72 | pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc); |
73 | 73 | sk = sk_atm(vcc); |
net/caif/caif_socket.c
... | ... | @@ -1013,7 +1013,7 @@ |
1013 | 1013 | static void caif_sock_destructor(struct sock *sk) |
1014 | 1014 | { |
1015 | 1015 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
1016 | - caif_assert(!atomic_read(&sk->sk_wmem_alloc)); | |
1016 | + caif_assert(!refcount_read(&sk->sk_wmem_alloc)); | |
1017 | 1017 | caif_assert(sk_unhashed(sk)); |
1018 | 1018 | caif_assert(!sk->sk_socket); |
1019 | 1019 | if (!sock_flag(sk, SOCK_DEAD)) { |
net/core/datagram.c
... | ... | @@ -614,7 +614,7 @@ |
614 | 614 | skb->data_len += copied; |
615 | 615 | skb->len += copied; |
616 | 616 | skb->truesize += truesize; |
617 | - atomic_add(truesize, &skb->sk->sk_wmem_alloc); | |
617 | + refcount_add(truesize, &skb->sk->sk_wmem_alloc); | |
618 | 618 | while (copied) { |
619 | 619 | int size = min_t(int, copied, PAGE_SIZE - start); |
620 | 620 | skb_fill_page_desc(skb, frag++, pages[n], start, size); |
net/core/skbuff.c
net/core/sock.c
... | ... | @@ -1528,7 +1528,7 @@ |
1528 | 1528 | if (likely(sk->sk_net_refcnt)) |
1529 | 1529 | get_net(net); |
1530 | 1530 | sock_net_set(sk, net); |
1531 | - atomic_set(&sk->sk_wmem_alloc, 1); | |
1531 | + refcount_set(&sk->sk_wmem_alloc, 1); | |
1532 | 1532 | |
1533 | 1533 | mem_cgroup_sk_alloc(sk); |
1534 | 1534 | cgroup_sk_alloc(&sk->sk_cgrp_data); |
... | ... | @@ -1552,7 +1552,7 @@ |
1552 | 1552 | sk->sk_destruct(sk); |
1553 | 1553 | |
1554 | 1554 | filter = rcu_dereference_check(sk->sk_filter, |
1555 | - atomic_read(&sk->sk_wmem_alloc) == 0); | |
1555 | + refcount_read(&sk->sk_wmem_alloc) == 0); | |
1556 | 1556 | if (filter) { |
1557 | 1557 | sk_filter_uncharge(sk, filter); |
1558 | 1558 | RCU_INIT_POINTER(sk->sk_filter, NULL); |
... | ... | @@ -1602,7 +1602,7 @@ |
1602 | 1602 | * some packets are still in some tx queue. |
1603 | 1603 | * If not null, sock_wfree() will call __sk_free(sk) later |
1604 | 1604 | */ |
1605 | - if (atomic_dec_and_test(&sk->sk_wmem_alloc)) | |
1605 | + if (refcount_dec_and_test(&sk->sk_wmem_alloc)) | |
1606 | 1606 | __sk_free(sk); |
1607 | 1607 | } |
1608 | 1608 | EXPORT_SYMBOL(sk_free); |
... | ... | @@ -1659,7 +1659,7 @@ |
1659 | 1659 | /* |
1660 | 1660 | * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) |
1661 | 1661 | */ |
1662 | - atomic_set(&newsk->sk_wmem_alloc, 1); | |
1662 | + refcount_set(&newsk->sk_wmem_alloc, 1); | |
1663 | 1663 | atomic_set(&newsk->sk_omem_alloc, 0); |
1664 | 1664 | sk_init_common(newsk); |
1665 | 1665 | |
... | ... | @@ -1787,7 +1787,7 @@ |
1787 | 1787 | * Keep a reference on sk_wmem_alloc, this will be released |
1788 | 1788 | * after sk_write_space() call |
1789 | 1789 | */ |
1790 | - atomic_sub(len - 1, &sk->sk_wmem_alloc); | |
1790 | + WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); | |
1791 | 1791 | sk->sk_write_space(sk); |
1792 | 1792 | len = 1; |
1793 | 1793 | } |
... | ... | @@ -1795,7 +1795,7 @@ |
1795 | 1795 | * if sk_wmem_alloc reaches 0, we must finish what sk_free() |
1796 | 1796 | * could not do because of in-flight packets |
1797 | 1797 | */ |
1798 | - if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) | |
1798 | + if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) | |
1799 | 1799 | __sk_free(sk); |
1800 | 1800 | } |
1801 | 1801 | EXPORT_SYMBOL(sock_wfree); |
... | ... | @@ -1807,7 +1807,7 @@ |
1807 | 1807 | { |
1808 | 1808 | struct sock *sk = skb->sk; |
1809 | 1809 | |
1810 | - if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) | |
1810 | + if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) | |
1811 | 1811 | __sk_free(sk); |
1812 | 1812 | } |
1813 | 1813 | |
... | ... | @@ -1829,7 +1829,7 @@ |
1829 | 1829 | * is enough to guarantee sk_free() wont free this sock until |
1830 | 1830 | * all in-flight packets are completed |
1831 | 1831 | */ |
1832 | - atomic_add(skb->truesize, &sk->sk_wmem_alloc); | |
1832 | + refcount_add(skb->truesize, &sk->sk_wmem_alloc); | |
1833 | 1833 | } |
1834 | 1834 | EXPORT_SYMBOL(skb_set_owner_w); |
1835 | 1835 | |
... | ... | @@ -1852,7 +1852,7 @@ |
1852 | 1852 | struct sock *sk = skb->sk; |
1853 | 1853 | |
1854 | 1854 | if (atomic_inc_not_zero(&sk->sk_refcnt)) { |
1855 | - atomic_sub(skb->truesize, &sk->sk_wmem_alloc); | |
1855 | + WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); | |
1856 | 1856 | skb->destructor = sock_efree; |
1857 | 1857 | } |
1858 | 1858 | } else { |
... | ... | @@ -1912,7 +1912,7 @@ |
1912 | 1912 | struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, |
1913 | 1913 | gfp_t priority) |
1914 | 1914 | { |
1915 | - if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { | |
1915 | + if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { | |
1916 | 1916 | struct sk_buff *skb = alloc_skb(size, priority); |
1917 | 1917 | if (skb) { |
1918 | 1918 | skb_set_owner_w(skb, sk); |
... | ... | @@ -1987,7 +1987,7 @@ |
1987 | 1987 | break; |
1988 | 1988 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1989 | 1989 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1990 | - if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) | |
1990 | + if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) | |
1991 | 1991 | break; |
1992 | 1992 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
1993 | 1993 | break; |
... | ... | @@ -2310,7 +2310,7 @@ |
2310 | 2310 | if (sk->sk_type == SOCK_STREAM) { |
2311 | 2311 | if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) |
2312 | 2312 | return 1; |
2313 | - } else if (atomic_read(&sk->sk_wmem_alloc) < | |
2313 | + } else if (refcount_read(&sk->sk_wmem_alloc) < | |
2314 | 2314 | prot->sysctl_wmem[0]) |
2315 | 2315 | return 1; |
2316 | 2316 | } |
... | ... | @@ -2577,7 +2577,7 @@ |
2577 | 2577 | /* Do not wake up a writer until he can make "significant" |
2578 | 2578 | * progress. --DaveM |
2579 | 2579 | */ |
2580 | - if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | |
2580 | + if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | |
2581 | 2581 | wq = rcu_dereference(sk->sk_wq); |
2582 | 2582 | if (skwq_has_sleeper(wq)) |
2583 | 2583 | wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | |
net/ipv4/af_inet.c
net/ipv4/esp4.c
net/ipv4/ip_output.c
... | ... | @@ -1037,7 +1037,7 @@ |
1037 | 1037 | (flags & MSG_DONTWAIT), &err); |
1038 | 1038 | } else { |
1039 | 1039 | skb = NULL; |
1040 | - if (atomic_read(&sk->sk_wmem_alloc) <= | |
1040 | + if (refcount_read(&sk->sk_wmem_alloc) <= | |
1041 | 1041 | 2 * sk->sk_sndbuf) |
1042 | 1042 | skb = sock_wmalloc(sk, |
1043 | 1043 | alloclen + hh_len + 15, 1, |
... | ... | @@ -1145,7 +1145,7 @@ |
1145 | 1145 | skb->len += copy; |
1146 | 1146 | skb->data_len += copy; |
1147 | 1147 | skb->truesize += copy; |
1148 | - atomic_add(copy, &sk->sk_wmem_alloc); | |
1148 | + refcount_add(copy, &sk->sk_wmem_alloc); | |
1149 | 1149 | } |
1150 | 1150 | offset += copy; |
1151 | 1151 | length -= copy; |
... | ... | @@ -1369,7 +1369,7 @@ |
1369 | 1369 | skb->len += len; |
1370 | 1370 | skb->data_len += len; |
1371 | 1371 | skb->truesize += len; |
1372 | - atomic_add(len, &sk->sk_wmem_alloc); | |
1372 | + refcount_add(len, &sk->sk_wmem_alloc); | |
1373 | 1373 | offset += len; |
1374 | 1374 | size -= len; |
1375 | 1375 | } |
net/ipv4/tcp.c
... | ... | @@ -664,7 +664,7 @@ |
664 | 664 | return skb->len < size_goal && |
665 | 665 | sysctl_tcp_autocorking && |
666 | 666 | skb != tcp_write_queue_head(sk) && |
667 | - atomic_read(&sk->sk_wmem_alloc) > skb->truesize; | |
667 | + refcount_read(&sk->sk_wmem_alloc) > skb->truesize; | |
668 | 668 | } |
669 | 669 | |
670 | 670 | static void tcp_push(struct sock *sk, int flags, int mss_now, |
... | ... | @@ -692,7 +692,7 @@ |
692 | 692 | /* It is possible TX completion already happened |
693 | 693 | * before we set TSQ_THROTTLED. |
694 | 694 | */ |
695 | - if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) | |
695 | + if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) | |
696 | 696 | return; |
697 | 697 | } |
698 | 698 |
net/ipv4/tcp_offload.c
... | ... | @@ -152,7 +152,7 @@ |
152 | 152 | swap(gso_skb->sk, skb->sk); |
153 | 153 | swap(gso_skb->destructor, skb->destructor); |
154 | 154 | sum_truesize += skb->truesize; |
155 | - atomic_add(sum_truesize - gso_skb->truesize, | |
155 | + refcount_add(sum_truesize - gso_skb->truesize, | |
156 | 156 | &skb->sk->sk_wmem_alloc); |
157 | 157 | } |
158 | 158 |
net/ipv4/tcp_output.c
... | ... | @@ -861,12 +861,11 @@ |
861 | 861 | struct sock *sk = skb->sk; |
862 | 862 | struct tcp_sock *tp = tcp_sk(sk); |
863 | 863 | unsigned long flags, nval, oval; |
864 | - int wmem; | |
865 | 864 | |
866 | 865 | /* Keep one reference on sk_wmem_alloc. |
867 | 866 | * Will be released by sk_free() from here or tcp_tasklet_func() |
868 | 867 | */ |
869 | - wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); | |
868 | + WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); | |
870 | 869 | |
871 | 870 | /* If this softirq is serviced by ksoftirqd, we are likely under stress. |
872 | 871 | * Wait until our queues (qdisc + devices) are drained. |
... | ... | @@ -875,7 +874,7 @@ |
875 | 874 | * - chance for incoming ACK (processed by another cpu maybe) |
876 | 875 | * to migrate this flow (skb->ooo_okay will be eventually set) |
877 | 876 | */ |
878 | - if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) | |
877 | + if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) | |
879 | 878 | goto out; |
880 | 879 | |
881 | 880 | for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { |
... | ... | @@ -925,7 +924,7 @@ |
925 | 924 | if (nval != oval) |
926 | 925 | continue; |
927 | 926 | |
928 | - if (!atomic_inc_not_zero(&sk->sk_wmem_alloc)) | |
927 | + if (!refcount_inc_not_zero(&sk->sk_wmem_alloc)) | |
929 | 928 | break; |
930 | 929 | /* queue this socket to tasklet queue */ |
931 | 930 | tsq = this_cpu_ptr(&tsq_tasklet); |
... | ... | @@ -1045,7 +1044,7 @@ |
1045 | 1044 | skb->sk = sk; |
1046 | 1045 | skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; |
1047 | 1046 | skb_set_hash_from_sk(skb, sk); |
1048 | - atomic_add(skb->truesize, &sk->sk_wmem_alloc); | |
1047 | + refcount_add(skb->truesize, &sk->sk_wmem_alloc); | |
1049 | 1048 | |
1050 | 1049 | skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); |
1051 | 1050 | |
... | ... | @@ -2176,7 +2175,7 @@ |
2176 | 2175 | limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes); |
2177 | 2176 | limit <<= factor; |
2178 | 2177 | |
2179 | - if (atomic_read(&sk->sk_wmem_alloc) > limit) { | |
2178 | + if (refcount_read(&sk->sk_wmem_alloc) > limit) { | |
2180 | 2179 | /* Always send the 1st or 2nd skb in write queue. |
2181 | 2180 | * No need to wait for TX completion to call us back, |
2182 | 2181 | * after softirq/tasklet schedule. |
... | ... | @@ -2192,7 +2191,7 @@ |
2192 | 2191 | * test again the condition. |
2193 | 2192 | */ |
2194 | 2193 | smp_mb__after_atomic(); |
2195 | - if (atomic_read(&sk->sk_wmem_alloc) > limit) | |
2194 | + if (refcount_read(&sk->sk_wmem_alloc) > limit) | |
2196 | 2195 | return true; |
2197 | 2196 | } |
2198 | 2197 | return false; |
... | ... | @@ -2812,7 +2811,7 @@ |
2812 | 2811 | /* Do not sent more than we queued. 1/4 is reserved for possible |
2813 | 2812 | * copying overhead: fragmentation, tunneling, mangling etc. |
2814 | 2813 | */ |
2815 | - if (atomic_read(&sk->sk_wmem_alloc) > | |
2814 | + if (refcount_read(&sk->sk_wmem_alloc) > | |
2816 | 2815 | min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), |
2817 | 2816 | sk->sk_sndbuf)) |
2818 | 2817 | return -EAGAIN; |
net/ipv6/esp6.c
net/ipv6/ip6_output.c
... | ... | @@ -1472,7 +1472,7 @@ |
1472 | 1472 | (flags & MSG_DONTWAIT), &err); |
1473 | 1473 | } else { |
1474 | 1474 | skb = NULL; |
1475 | - if (atomic_read(&sk->sk_wmem_alloc) <= | |
1475 | + if (refcount_read(&sk->sk_wmem_alloc) <= | |
1476 | 1476 | 2 * sk->sk_sndbuf) |
1477 | 1477 | skb = sock_wmalloc(sk, |
1478 | 1478 | alloclen + hh_len, 1, |
... | ... | @@ -1581,7 +1581,7 @@ |
1581 | 1581 | skb->len += copy; |
1582 | 1582 | skb->data_len += copy; |
1583 | 1583 | skb->truesize += copy; |
1584 | - atomic_add(copy, &sk->sk_wmem_alloc); | |
1584 | + refcount_add(copy, &sk->sk_wmem_alloc); | |
1585 | 1585 | } |
1586 | 1586 | offset += copy; |
1587 | 1587 | length -= copy; |
net/kcm/kcmproc.c
... | ... | @@ -162,7 +162,7 @@ |
162 | 162 | psock->sk->sk_receive_queue.qlen, |
163 | 163 | atomic_read(&psock->sk->sk_rmem_alloc), |
164 | 164 | psock->sk->sk_write_queue.qlen, |
165 | - atomic_read(&psock->sk->sk_wmem_alloc)); | |
165 | + refcount_read(&psock->sk->sk_wmem_alloc)); | |
166 | 166 | |
167 | 167 | if (psock->done) |
168 | 168 | seq_puts(seq, "Done "); |
net/key/af_key.c
net/netlink/af_netlink.c
net/packet/af_packet.c
... | ... | @@ -1317,7 +1317,7 @@ |
1317 | 1317 | skb_queue_purge(&sk->sk_error_queue); |
1318 | 1318 | |
1319 | 1319 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
1320 | - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | |
1320 | + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | |
1321 | 1321 | |
1322 | 1322 | if (!sock_flag(sk, SOCK_DEAD)) { |
1323 | 1323 | pr_err("Attempt to release alive packet socket: %p\n", sk); |
... | ... | @@ -2523,7 +2523,7 @@ |
2523 | 2523 | skb->data_len = to_write; |
2524 | 2524 | skb->len += to_write; |
2525 | 2525 | skb->truesize += to_write; |
2526 | - atomic_add(to_write, &po->sk.sk_wmem_alloc); | |
2526 | + refcount_add(to_write, &po->sk.sk_wmem_alloc); | |
2527 | 2527 | |
2528 | 2528 | while (likely(to_write)) { |
2529 | 2529 | nr_frags = skb_shinfo(skb)->nr_frags; |
net/phonet/socket.c
... | ... | @@ -360,7 +360,7 @@ |
360 | 360 | return POLLHUP; |
361 | 361 | |
362 | 362 | if (sk->sk_state == TCP_ESTABLISHED && |
363 | - atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && | |
363 | + refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && | |
364 | 364 | atomic_read(&pn->tx_credits)) |
365 | 365 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
366 | 366 |
net/rds/tcp_send.c
... | ... | @@ -202,7 +202,7 @@ |
202 | 202 | tc->t_last_seen_una = rds_tcp_snd_una(tc); |
203 | 203 | rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked); |
204 | 204 | |
205 | - if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) | |
205 | + if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) | |
206 | 206 | queue_delayed_work(rds_wq, &cp->cp_send_w, 0); |
207 | 207 | |
208 | 208 | out: |
net/rxrpc/af_rxrpc.c
... | ... | @@ -53,7 +53,7 @@ |
53 | 53 | */ |
54 | 54 | static inline int rxrpc_writable(struct sock *sk) |
55 | 55 | { |
56 | - return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; | |
56 | + return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; | |
57 | 57 | } |
58 | 58 | |
59 | 59 | /* |
... | ... | @@ -730,7 +730,7 @@ |
730 | 730 | |
731 | 731 | rxrpc_purge_queue(&sk->sk_receive_queue); |
732 | 732 | |
733 | - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | |
733 | + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | |
734 | 734 | WARN_ON(!sk_unhashed(sk)); |
735 | 735 | WARN_ON(sk->sk_socket); |
736 | 736 |
net/sched/sch_atm.c
... | ... | @@ -498,7 +498,7 @@ |
498 | 498 | ATM_SKB(skb)->vcc = flow->vcc; |
499 | 499 | memcpy(skb_push(skb, flow->hdr_len), flow->hdr, |
500 | 500 | flow->hdr_len); |
501 | - atomic_add(skb->truesize, | |
501 | + refcount_add(skb->truesize, | |
502 | 502 | &sk_atm(flow->vcc)->sk_wmem_alloc); |
503 | 503 | /* atm.atm_options are already set by atm_tc_enqueue */ |
504 | 504 | flow->vcc->send(flow->vcc, skb); |
net/sctp/output.c
... | ... | @@ -402,7 +402,7 @@ |
402 | 402 | * therefore only reserve a single byte to keep socket around until |
403 | 403 | * the packet has been transmitted. |
404 | 404 | */ |
405 | - atomic_inc(&sk->sk_wmem_alloc); | |
405 | + refcount_inc(&sk->sk_wmem_alloc); | |
406 | 406 | } |
407 | 407 | |
408 | 408 | static int sctp_packet_pack(struct sctp_packet *packet, |
net/sctp/proc.c
... | ... | @@ -363,7 +363,7 @@ |
363 | 363 | assoc->stream.outcnt, assoc->max_retrans, |
364 | 364 | assoc->init_retries, assoc->shutdown_retries, |
365 | 365 | assoc->rtx_data_chunks, |
366 | - atomic_read(&sk->sk_wmem_alloc), | |
366 | + refcount_read(&sk->sk_wmem_alloc), | |
367 | 367 | sk->sk_wmem_queued, |
368 | 368 | sk->sk_sndbuf, |
369 | 369 | sk->sk_rcvbuf); |
net/sctp/socket.c
... | ... | @@ -164,7 +164,7 @@ |
164 | 164 | sizeof(struct sk_buff) + |
165 | 165 | sizeof(struct sctp_chunk); |
166 | 166 | |
167 | - atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); | |
167 | + refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); | |
168 | 168 | sk->sk_wmem_queued += chunk->skb->truesize; |
169 | 169 | sk_mem_charge(sk, chunk->skb->truesize); |
170 | 170 | } |
... | ... | @@ -7684,7 +7684,7 @@ |
7684 | 7684 | sizeof(struct sk_buff) + |
7685 | 7685 | sizeof(struct sctp_chunk); |
7686 | 7686 | |
7687 | - atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); | |
7687 | + WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc)); | |
7688 | 7688 | |
7689 | 7689 | /* |
7690 | 7690 | * This undoes what is done via sctp_set_owner_w and sk_mem_charge |
net/unix/af_unix.c
... | ... | @@ -442,7 +442,7 @@ |
442 | 442 | static int unix_writable(const struct sock *sk) |
443 | 443 | { |
444 | 444 | return sk->sk_state != TCP_LISTEN && |
445 | - (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; | |
445 | + (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; | |
446 | 446 | } |
447 | 447 | |
448 | 448 | static void unix_write_space(struct sock *sk) |
... | ... | @@ -487,7 +487,7 @@ |
487 | 487 | |
488 | 488 | skb_queue_purge(&sk->sk_receive_queue); |
489 | 489 | |
490 | - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | |
490 | + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | |
491 | 491 | WARN_ON(!sk_unhashed(sk)); |
492 | 492 | WARN_ON(sk->sk_socket); |
493 | 493 | if (!sock_flag(sk, SOCK_DEAD)) { |
... | ... | @@ -2033,7 +2033,7 @@ |
2033 | 2033 | skb->len += size; |
2034 | 2034 | skb->data_len += size; |
2035 | 2035 | skb->truesize += size; |
2036 | - atomic_add(size, &sk->sk_wmem_alloc); | |
2036 | + refcount_add(size, &sk->sk_wmem_alloc); | |
2037 | 2037 | |
2038 | 2038 | if (newskb) { |
2039 | 2039 | err = unix_scm_to_skb(&scm, skb, false); |