Commit 288efe8606b62d0753ba6722b36ef241877251fd

Authored by Eric Dumazet
Committed by David S. Miller
1 parent 7976a11b30

net: annotate lockless accesses to sk->sk_ack_backlog

sk->sk_ack_backlog can be read without any lock being held.
We need to use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing
and/or potential KCSAN warnings.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 7 changed files with 9 additions and 9 deletions Side-by-side Diff

... ... @@ -859,17 +859,17 @@
859 859  
860 860 static inline void sk_acceptq_removed(struct sock *sk)
861 861 {
862   - sk->sk_ack_backlog--;
  862 + WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
863 863 }
864 864  
865 865 static inline void sk_acceptq_added(struct sock *sk)
866 866 {
867   - sk->sk_ack_backlog++;
  867 + WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
868 868 }
869 869  
870 870 static inline bool sk_acceptq_is_full(const struct sock *sk)
871 871 {
872   - return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
  872 + return READ_ONCE(sk->sk_ack_backlog) > sk->sk_max_ack_backlog;
873 873 }
874 874  
875 875 /*
... ... @@ -3225,7 +3225,7 @@
3225 3225 * tcpi_unacked -> Number of children ready for accept()
3226 3226 * tcpi_sacked -> max backlog
3227 3227 */
3228   - info->tcpi_unacked = sk->sk_ack_backlog;
  3228 + info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
3229 3229 info->tcpi_sacked = sk->sk_max_ack_backlog;
3230 3230 return;
3231 3231 }
... ... @@ -21,7 +21,7 @@
21 21 struct tcp_info *info = _info;
22 22  
23 23 if (inet_sk_state_load(sk) == TCP_LISTEN) {
24   - r->idiag_rqueue = sk->sk_ack_backlog;
  24 + r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
25 25 r->idiag_wqueue = sk->sk_max_ack_backlog;
26 26 } else if (sk->sk_type == SOCK_STREAM) {
27 27 const struct tcp_sock *tp = tcp_sk(sk);
... ... @@ -2451,7 +2451,7 @@
2451 2451  
2452 2452 state = inet_sk_state_load(sk);
2453 2453 if (state == TCP_LISTEN)
2454   - rx_queue = sk->sk_ack_backlog;
  2454 + rx_queue = READ_ONCE(sk->sk_ack_backlog);
2455 2455 else
2456 2456 /* Because we don't lock the socket,
2457 2457 * we might find a transient negative value.
... ... @@ -1891,7 +1891,7 @@
1891 1891  
1892 1892 state = inet_sk_state_load(sp);
1893 1893 if (state == TCP_LISTEN)
1894   - rx_queue = sp->sk_ack_backlog;
  1894 + rx_queue = READ_ONCE(sp->sk_ack_backlog);
1895 1895 else
1896 1896 /* Because we don't lock the socket,
1897 1897 * we might find a transient negative value.
... ... @@ -521,7 +521,7 @@
521 521 *err = -1;
522 522 return;
523 523 }
524   - dst->value = sk->sk_ack_backlog;
  524 + dst->value = READ_ONCE(sk->sk_ack_backlog);
525 525 }
526 526  
527 527 META_COLLECTOR(int_sk_max_ack_bl)
... ... @@ -425,7 +425,7 @@
425 425 r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
426 426 r->idiag_wqueue = infox->asoc->sndbuf_used;
427 427 } else {
428   - r->idiag_rqueue = sk->sk_ack_backlog;
  428 + r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
429 429 r->idiag_wqueue = sk->sk_max_ack_backlog;
430 430 }
431 431 if (infox->sctpinfo)