Blame view

net/rxrpc/peer_event.c 11.3 KB
2874c5fd2   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
f66d74901   David Howells   rxrpc: Use the pe...
2
  /* Peer event handling, typically ICMP messages.
17926a793   David Howells   [AF_RXRPC]: Provi...
3
4
5
   *
   * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   * Written by David Howells (dhowells@redhat.com)
17926a793   David Howells   [AF_RXRPC]: Provi...
6
7
8
9
10
11
12
13
14
15
16
17
18
19
   */
  
  #include <linux/module.h>
  #include <linux/net.h>
  #include <linux/skbuff.h>
  #include <linux/errqueue.h>
  #include <linux/udp.h>
  #include <linux/in.h>
  #include <linux/in6.h>
  #include <linux/icmp.h>
  #include <net/sock.h>
  #include <net/af_rxrpc.h>
  #include <net/ip.h>
  #include "ar-internal.h"
f66d74901   David Howells   rxrpc: Use the pe...
20
  static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
f33443031   David Howells   rxrpc: Fix error ...
21
22
  static void rxrpc_distribute_error(struct rxrpc_peer *, int,
  				   enum rxrpc_call_completion);
f66d74901   David Howells   rxrpc: Use the pe...
23

17926a793   David Howells   [AF_RXRPC]: Provi...
24
  /*
be6e6707f   David Howells   rxrpc: Rework pee...
25
26
27
   * Find the peer associated with an ICMP packet.
   */
  static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
494337c91   David Howells   rxrpc: Add a trac...
28
29
  						     const struct sk_buff *skb,
  						     struct sockaddr_rxrpc *srx)
be6e6707f   David Howells   rxrpc: Rework pee...
30
31
  {
  	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
be6e6707f   David Howells   rxrpc: Rework pee...
32
33
  
  	_enter("");
494337c91   David Howells   rxrpc: Add a trac...
34
35
36
37
  	memset(srx, 0, sizeof(*srx));
  	srx->transport_type = local->srx.transport_type;
  	srx->transport_len = local->srx.transport_len;
  	srx->transport.family = local->srx.transport.family;
be6e6707f   David Howells   rxrpc: Rework pee...
38
39
40
41
  
  	/* Can we see an ICMP4 packet on an ICMP6 listening socket?  and vice
  	 * versa?
  	 */
494337c91   David Howells   rxrpc: Add a trac...
42
  	switch (srx->transport.family) {
be6e6707f   David Howells   rxrpc: Rework pee...
43
  	case AF_INET:
46894a135   David Howells   rxrpc: Use IPv4 a...
44
45
  		srx->transport_len = sizeof(srx->transport.sin);
  		srx->transport.family = AF_INET;
494337c91   David Howells   rxrpc: Add a trac...
46
  		srx->transport.sin.sin_port = serr->port;
be6e6707f   David Howells   rxrpc: Rework pee...
47
48
49
  		switch (serr->ee.ee_origin) {
  		case SO_EE_ORIGIN_ICMP:
  			_net("Rx ICMP");
494337c91   David Howells   rxrpc: Add a trac...
50
  			memcpy(&srx->transport.sin.sin_addr,
be6e6707f   David Howells   rxrpc: Rework pee...
51
52
53
54
55
  			       skb_network_header(skb) + serr->addr_offset,
  			       sizeof(struct in_addr));
  			break;
  		case SO_EE_ORIGIN_ICMP6:
  			_net("Rx ICMP6 on v4 sock");
494337c91   David Howells   rxrpc: Add a trac...
56
  			memcpy(&srx->transport.sin.sin_addr,
be6e6707f   David Howells   rxrpc: Rework pee...
57
58
59
60
  			       skb_network_header(skb) + serr->addr_offset + 12,
  			       sizeof(struct in_addr));
  			break;
  		default:
494337c91   David Howells   rxrpc: Add a trac...
61
  			memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
be6e6707f   David Howells   rxrpc: Rework pee...
62
63
64
65
  			       sizeof(struct in_addr));
  			break;
  		}
  		break;
d19127473   David Howells   rxrpc: Make IPv6 ...
66
  #ifdef CONFIG_AF_RXRPC_IPV6
75b54cb57   David Howells   rxrpc: Add IPv6 s...
67
  	case AF_INET6:
75b54cb57   David Howells   rxrpc: Add IPv6 s...
68
69
70
  		switch (serr->ee.ee_origin) {
  		case SO_EE_ORIGIN_ICMP6:
  			_net("Rx ICMP6");
46894a135   David Howells   rxrpc: Use IPv4 a...
71
  			srx->transport.sin6.sin6_port = serr->port;
494337c91   David Howells   rxrpc: Add a trac...
72
  			memcpy(&srx->transport.sin6.sin6_addr,
75b54cb57   David Howells   rxrpc: Add IPv6 s...
73
74
75
76
77
  			       skb_network_header(skb) + serr->addr_offset,
  			       sizeof(struct in6_addr));
  			break;
  		case SO_EE_ORIGIN_ICMP:
  			_net("Rx ICMP on v6 sock");
46894a135   David Howells   rxrpc: Use IPv4 a...
78
79
80
81
  			srx->transport_len = sizeof(srx->transport.sin);
  			srx->transport.family = AF_INET;
  			srx->transport.sin.sin_port = serr->port;
  			memcpy(&srx->transport.sin.sin_addr,
75b54cb57   David Howells   rxrpc: Add IPv6 s...
82
83
84
85
  			       skb_network_header(skb) + serr->addr_offset,
  			       sizeof(struct in_addr));
  			break;
  		default:
494337c91   David Howells   rxrpc: Add a trac...
86
  			memcpy(&srx->transport.sin6.sin6_addr,
75b54cb57   David Howells   rxrpc: Add IPv6 s...
87
88
89
90
91
  			       &ipv6_hdr(skb)->saddr,
  			       sizeof(struct in6_addr));
  			break;
  		}
  		break;
d19127473   David Howells   rxrpc: Make IPv6 ...
92
  #endif
75b54cb57   David Howells   rxrpc: Add IPv6 s...
93

be6e6707f   David Howells   rxrpc: Rework pee...
94
95
96
  	default:
  		BUG();
  	}
494337c91   David Howells   rxrpc: Add a trac...
97
  	return rxrpc_lookup_peer_rcu(local, srx);
be6e6707f   David Howells   rxrpc: Rework pee...
98
99
100
  }
  
  /*
1a70c05ba   David Howells   rxrpc: Break MTU ...
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
   * Handle an MTU/fragmentation problem.
   */
  static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
  {
  	u32 mtu = serr->ee.ee_info;
  
  	_net("Rx ICMP Fragmentation Needed (%d)", mtu);
  
  	/* wind down the local interface MTU */
  	if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
  		peer->if_mtu = mtu;
  		_net("I/F MTU %u", mtu);
  	}
  
  	if (mtu == 0) {
  		/* they didn't give us a size, estimate one */
  		mtu = peer->if_mtu;
  		if (mtu > 1500) {
  			mtu >>= 1;
  			if (mtu < 1500)
  				mtu = 1500;
  		} else {
  			mtu -= 100;
  			if (mtu < peer->hdrsize)
  				mtu = peer->hdrsize + 4;
  		}
  	}
  
  	if (mtu < peer->mtu) {
  		spin_lock_bh(&peer->lock);
  		peer->mtu = mtu;
  		peer->maxdata = peer->mtu - peer->hdrsize;
  		spin_unlock_bh(&peer->lock);
  		_net("Net MTU %u (maxdata %u)",
  		     peer->mtu, peer->maxdata);
  	}
  }
  
  /*
f66d74901   David Howells   rxrpc: Use the pe...
140
   * Handle an error received on the local endpoint.
17926a793   David Howells   [AF_RXRPC]: Provi...
141
   */
abe89ef0e   David Howells   rxrpc: Rename rxr...
142
  void rxrpc_error_report(struct sock *sk)
17926a793   David Howells   [AF_RXRPC]: Provi...
143
144
  {
  	struct sock_exterr_skb *serr;
494337c91   David Howells   rxrpc: Add a trac...
145
  	struct sockaddr_rxrpc srx;
2ca4f6ca4   Eric Dumazet   rxrpc: use rcu pr...
146
  	struct rxrpc_local *local;
17926a793   David Howells   [AF_RXRPC]: Provi...
147
148
  	struct rxrpc_peer *peer;
  	struct sk_buff *skb;
17926a793   David Howells   [AF_RXRPC]: Provi...
149

2ca4f6ca4   Eric Dumazet   rxrpc: use rcu pr...
150
151
152
153
  	rcu_read_lock();
  	local = rcu_dereference_sk_user_data(sk);
  	if (unlikely(!local)) {
  		rcu_read_unlock();
f0308fb07   David Howells   rxrpc: Fix possib...
154
  		return;
2ca4f6ca4   Eric Dumazet   rxrpc: use rcu pr...
155
  	}
17926a793   David Howells   [AF_RXRPC]: Provi...
156
  	_enter("%p{%d}", sk, local->debug_id);
56d282d9d   Marc Dionne   rxrpc: Clear sock...
157
158
159
160
  	/* Clear the outstanding error value on the socket so that it doesn't
  	 * cause kernel_sendmsg() to return it later.
  	 */
  	sock_error(sk);
364a9e932   Willem de Bruijn   sock: deduplicate...
161
  	skb = sock_dequeue_err_skb(sk);
17926a793   David Howells   [AF_RXRPC]: Provi...
162
  	if (!skb) {
2ca4f6ca4   Eric Dumazet   rxrpc: use rcu pr...
163
  		rcu_read_unlock();
17926a793   David Howells   [AF_RXRPC]: Provi...
164
165
166
  		_leave("UDP socket errqueue empty");
  		return;
  	}
987db9f7c   David Howells   rxrpc: Use the tx...
167
  	rxrpc_new_skb(skb, rxrpc_skb_received);
c247f0534   Willem de Bruijn   ip: fix error que...
168
169
  	serr = SKB_EXT_ERR(skb);
  	if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
49ca0d8bf   Willem de Bruijn   net-timestamp: no...
170
  		_leave("UDP empty message");
2ca4f6ca4   Eric Dumazet   rxrpc: use rcu pr...
171
  		rcu_read_unlock();
987db9f7c   David Howells   rxrpc: Use the tx...
172
  		rxrpc_free_skb(skb, rxrpc_skb_freed);
49ca0d8bf   Willem de Bruijn   net-timestamp: no...
173
174
  		return;
  	}
17926a793   David Howells   [AF_RXRPC]: Provi...
175

494337c91   David Howells   rxrpc: Add a trac...
176
  	peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
be6e6707f   David Howells   rxrpc: Rework pee...
177
178
179
180
  	if (peer && !rxrpc_get_peer_maybe(peer))
  		peer = NULL;
  	if (!peer) {
  		rcu_read_unlock();
987db9f7c   David Howells   rxrpc: Use the tx...
181
  		rxrpc_free_skb(skb, rxrpc_skb_freed);
17926a793   David Howells   [AF_RXRPC]: Provi...
182
183
184
  		_leave(" [no peer]");
  		return;
  	}
494337c91   David Howells   rxrpc: Add a trac...
185
  	trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
1a70c05ba   David Howells   rxrpc: Break MTU ...
186
187
188
189
  	if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
  	     serr->ee.ee_type == ICMP_DEST_UNREACH &&
  	     serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
  		rxrpc_adjust_mtu(peer, serr);
f66d74901   David Howells   rxrpc: Use the pe...
190
  		rcu_read_unlock();
987db9f7c   David Howells   rxrpc: Use the tx...
191
  		rxrpc_free_skb(skb, rxrpc_skb_freed);
f66d74901   David Howells   rxrpc: Use the pe...
192
193
194
  		rxrpc_put_peer(peer);
  		_leave(" [MTU update]");
  		return;
17926a793   David Howells   [AF_RXRPC]: Provi...
195
  	}
f66d74901   David Howells   rxrpc: Use the pe...
196
  	rxrpc_store_error(peer, serr);
be6e6707f   David Howells   rxrpc: Rework pee...
197
  	rcu_read_unlock();
987db9f7c   David Howells   rxrpc: Use the tx...
198
  	rxrpc_free_skb(skb, rxrpc_skb_freed);
1890fea79   David Howells   rxrpc: Fix a miss...
199
  	rxrpc_put_peer(peer);
17926a793   David Howells   [AF_RXRPC]: Provi...
200

17926a793   David Howells   [AF_RXRPC]: Provi...
201
202
203
204
  	_leave("");
  }
  
  /*
f66d74901   David Howells   rxrpc: Use the pe...
205
   * Map an error report to error codes on the peer record.
17926a793   David Howells   [AF_RXRPC]: Provi...
206
   */
f66d74901   David Howells   rxrpc: Use the pe...
207
208
  static void rxrpc_store_error(struct rxrpc_peer *peer,
  			      struct sock_exterr_skb *serr)
17926a793   David Howells   [AF_RXRPC]: Provi...
209
  {
f33443031   David Howells   rxrpc: Fix error ...
210
  	enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
17926a793   David Howells   [AF_RXRPC]: Provi...
211
  	struct sock_extended_err *ee;
c9d10c497   David S. Miller   rxrpc: Kill set b...
212
  	int err;
17926a793   David Howells   [AF_RXRPC]: Provi...
213
214
  
  	_enter("");
17926a793   David Howells   [AF_RXRPC]: Provi...
215
  	ee = &serr->ee;
17926a793   David Howells   [AF_RXRPC]: Provi...
216
217
218
219
  	err = ee->ee_errno;
  
  	switch (ee->ee_origin) {
  	case SO_EE_ORIGIN_ICMP:
17926a793   David Howells   [AF_RXRPC]: Provi...
220
221
222
223
224
  		switch (ee->ee_type) {
  		case ICMP_DEST_UNREACH:
  			switch (ee->ee_code) {
  			case ICMP_NET_UNREACH:
  				_net("Rx Received ICMP Network Unreachable");
17926a793   David Howells   [AF_RXRPC]: Provi...
225
226
227
  				break;
  			case ICMP_HOST_UNREACH:
  				_net("Rx Received ICMP Host Unreachable");
17926a793   David Howells   [AF_RXRPC]: Provi...
228
229
230
  				break;
  			case ICMP_PORT_UNREACH:
  				_net("Rx Received ICMP Port Unreachable");
17926a793   David Howells   [AF_RXRPC]: Provi...
231
232
233
  				break;
  			case ICMP_NET_UNKNOWN:
  				_net("Rx Received ICMP Unknown Network");
17926a793   David Howells   [AF_RXRPC]: Provi...
234
235
236
  				break;
  			case ICMP_HOST_UNKNOWN:
  				_net("Rx Received ICMP Unknown Host");
17926a793   David Howells   [AF_RXRPC]: Provi...
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
  				break;
  			default:
  				_net("Rx Received ICMP DestUnreach code=%u",
  				     ee->ee_code);
  				break;
  			}
  			break;
  
  		case ICMP_TIME_EXCEEDED:
  			_net("Rx Received ICMP TTL Exceeded");
  			break;
  
  		default:
  			_proto("Rx Received ICMP error { type=%u code=%u }",
  			       ee->ee_type, ee->ee_code);
  			break;
  		}
  		break;
f66d74901   David Howells   rxrpc: Use the pe...
255
  	case SO_EE_ORIGIN_NONE:
17926a793   David Howells   [AF_RXRPC]: Provi...
256
  	case SO_EE_ORIGIN_LOCAL:
fe77d5fc5   David Howells   rxrpc: Do a littl...
257
  		_proto("Rx Received local error { error=%d }", err);
f33443031   David Howells   rxrpc: Fix error ...
258
  		compl = RXRPC_CALL_LOCAL_ERROR;
17926a793   David Howells   [AF_RXRPC]: Provi...
259
  		break;
17926a793   David Howells   [AF_RXRPC]: Provi...
260
261
  	case SO_EE_ORIGIN_ICMP6:
  	default:
fe77d5fc5   David Howells   rxrpc: Do a littl...
262
  		_proto("Rx Received error report { orig=%u }", ee->ee_origin);
17926a793   David Howells   [AF_RXRPC]: Provi...
263
264
  		break;
  	}
f33443031   David Howells   rxrpc: Fix error ...
265
  	rxrpc_distribute_error(peer, err, compl);
f66d74901   David Howells   rxrpc: Use the pe...
266
267
268
  }
  
  /*
f33443031   David Howells   rxrpc: Fix error ...
269
   * Distribute an error that occurred on a peer.
f66d74901   David Howells   rxrpc: Use the pe...
270
   */
f33443031   David Howells   rxrpc: Fix error ...
271
272
  static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
  				   enum rxrpc_call_completion compl)
f66d74901   David Howells   rxrpc: Use the pe...
273
  {
f66d74901   David Howells   rxrpc: Use the pe...
274
  	struct rxrpc_call *call;
17926a793   David Howells   [AF_RXRPC]: Provi...
275

f33443031   David Howells   rxrpc: Fix error ...
276
  	hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
e34d4234b   David Howells   rxrpc: Trace rxrp...
277
  		rxrpc_see_call(call);
f33443031   David Howells   rxrpc: Fix error ...
278
279
  		if (call->state < RXRPC_CALL_COMPLETE &&
  		    rxrpc_set_call_completion(call, compl, 0, -error))
248f219cb   David Howells   rxrpc: Rewrite th...
280
  			rxrpc_notify_socket(call);
17926a793   David Howells   [AF_RXRPC]: Provi...
281
  	}
17926a793   David Howells   [AF_RXRPC]: Provi...
282
  }
cf1a6474f   David Howells   rxrpc: Add per-pe...
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
  
  /*
   * Add RTT information to cache.  This is called in softirq mode and has
   * exclusive access to the peer RTT data.
   */
  void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
  			rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
  			ktime_t send_time, ktime_t resp_time)
  {
  	struct rxrpc_peer *peer = call->peer;
  	s64 rtt;
  	u64 sum = peer->rtt_sum, avg;
  	u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
  
  	rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
  	if (rtt < 0)
  		return;
c1e15b494   David Howells   rxrpc: Fix the pa...
300
  	spin_lock(&peer->rtt_input_lock);
cf1a6474f   David Howells   rxrpc: Add per-pe...
301
302
303
304
305
306
307
308
309
310
  	/* Replace the oldest datum in the RTT buffer */
  	sum -= peer->rtt_cache[cursor];
  	sum += rtt;
  	peer->rtt_cache[cursor] = rtt;
  	peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
  	peer->rtt_sum = sum;
  	if (usage < RXRPC_RTT_CACHE_SIZE) {
  		usage++;
  		peer->rtt_usage = usage;
  	}
c1e15b494   David Howells   rxrpc: Fix the pa...
311
  	spin_unlock(&peer->rtt_input_lock);
cf1a6474f   David Howells   rxrpc: Add per-pe...
312
313
314
315
316
317
318
  	/* Now recalculate the average */
  	if (usage == RXRPC_RTT_CACHE_SIZE) {
  		avg = sum / RXRPC_RTT_CACHE_SIZE;
  	} else {
  		avg = sum;
  		do_div(avg, usage);
  	}
c1e15b494   David Howells   rxrpc: Fix the pa...
319
  	/* Don't need to update this under lock */
cf1a6474f   David Howells   rxrpc: Add per-pe...
320
321
322
323
  	peer->rtt = avg;
  	trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
  			   usage, avg);
  }
ace45bec6   David Howells   rxrpc: Fix firewa...
324
325
  
  /*
330bdcfad   David Howells   rxrpc: Fix the ke...
326
   * Perform keep-alive pings.
ace45bec6   David Howells   rxrpc: Fix firewa...
327
   */
330bdcfad   David Howells   rxrpc: Fix the ke...
328
329
330
331
  static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
  					  struct list_head *collector,
  					  time64_t base,
  					  u8 cursor)
ace45bec6   David Howells   rxrpc: Fix firewa...
332
  {
ace45bec6   David Howells   rxrpc: Fix firewa...
333
  	struct rxrpc_peer *peer;
330bdcfad   David Howells   rxrpc: Fix the ke...
334
335
336
  	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
  	time64_t keepalive_at;
  	int slot;
ace45bec6   David Howells   rxrpc: Fix firewa...
337

330bdcfad   David Howells   rxrpc: Fix the ke...
338
  	spin_lock_bh(&rxnet->peer_hash_lock);
ace45bec6   David Howells   rxrpc: Fix firewa...
339

330bdcfad   David Howells   rxrpc: Fix the ke...
340
341
342
  	while (!list_empty(collector)) {
  		peer = list_entry(collector->next,
  				  struct rxrpc_peer, keepalive_link);
ace45bec6   David Howells   rxrpc: Fix firewa...
343

330bdcfad   David Howells   rxrpc: Fix the ke...
344
345
346
  		list_del_init(&peer->keepalive_link);
  		if (!rxrpc_get_peer_maybe(peer))
  			continue;
ace45bec6   David Howells   rxrpc: Fix firewa...
347

843e115de   David Howells   rxrpc: Fix missin...
348
349
350
351
352
353
354
355
356
357
358
359
360
  		if (__rxrpc_use_local(peer->local)) {
  			spin_unlock_bh(&rxnet->peer_hash_lock);
  
  			keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
  			slot = keepalive_at - base;
  			_debug("%02x peer %u t=%d {%pISp}",
  			       cursor, peer->debug_id, slot, &peer->srx.transport);
  
  			if (keepalive_at <= base ||
  			    keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
  				rxrpc_send_keepalive(peer);
  				slot = RXRPC_KEEPALIVE_TIME;
  			}
330bdcfad   David Howells   rxrpc: Fix the ke...
361

843e115de   David Howells   rxrpc: Fix missin...
362
363
364
365
366
367
368
369
370
371
  			/* A transmission to this peer occurred since last we
  			 * examined it so put it into the appropriate future
  			 * bucket.
  			 */
  			slot += cursor;
  			slot &= mask;
  			spin_lock_bh(&rxnet->peer_hash_lock);
  			list_add_tail(&peer->keepalive_link,
  				      &rxnet->peer_keepalive[slot & mask]);
  			rxrpc_unuse_local(peer->local);
ace45bec6   David Howells   rxrpc: Fix firewa...
372
  		}
60034d3d1   David Howells   rxrpc: Fix potent...
373
  		rxrpc_put_peer_locked(peer);
ace45bec6   David Howells   rxrpc: Fix firewa...
374
  	}
ace45bec6   David Howells   rxrpc: Fix firewa...
375
  	spin_unlock_bh(&rxnet->peer_hash_lock);
330bdcfad   David Howells   rxrpc: Fix the ke...
376
  }
ace45bec6   David Howells   rxrpc: Fix firewa...
377

330bdcfad   David Howells   rxrpc: Fix the ke...
378
379
380
381
382
383
384
385
386
387
388
  /*
   * Perform keep-alive pings with VERSION packets to keep any NAT alive.
   */
  void rxrpc_peer_keepalive_worker(struct work_struct *work)
  {
  	struct rxrpc_net *rxnet =
  		container_of(work, struct rxrpc_net, peer_keepalive_work);
  	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
  	time64_t base, now, delay;
  	u8 cursor, stop;
  	LIST_HEAD(collector);
ace45bec6   David Howells   rxrpc: Fix firewa...
389

330bdcfad   David Howells   rxrpc: Fix the ke...
390
391
392
393
  	now = ktime_get_seconds();
  	base = rxnet->peer_keepalive_base;
  	cursor = rxnet->peer_keepalive_cursor;
  	_enter("%lld,%u", base - now, cursor);
ace45bec6   David Howells   rxrpc: Fix firewa...
394

330bdcfad   David Howells   rxrpc: Fix the ke...
395
396
  	if (!rxnet->live)
  		return;
ace45bec6   David Howells   rxrpc: Fix firewa...
397

330bdcfad   David Howells   rxrpc: Fix the ke...
398
399
400
401
402
403
  	/* Remove to a temporary list all the peers that are currently lodged
  	 * in expired buckets plus all new peers.
  	 *
  	 * Everything in the bucket at the cursor is processed this
  	 * second; the bucket at cursor + 1 goes at now + 1s and so
  	 * on...
ace45bec6   David Howells   rxrpc: Fix firewa...
404
  	 */
ace45bec6   David Howells   rxrpc: Fix firewa...
405
  	spin_lock_bh(&rxnet->peer_hash_lock);
330bdcfad   David Howells   rxrpc: Fix the ke...
406
407
408
409
410
411
412
413
414
  	list_splice_init(&rxnet->peer_keepalive_new, &collector);
  
  	stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
  	while (base <= now && (s8)(cursor - stop) < 0) {
  		list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
  				      &collector);
  		base++;
  		cursor++;
  	}
ace45bec6   David Howells   rxrpc: Fix firewa...
415

330bdcfad   David Howells   rxrpc: Fix the ke...
416
417
  	base = now;
  	spin_unlock_bh(&rxnet->peer_hash_lock);
ace45bec6   David Howells   rxrpc: Fix firewa...
418

ace45bec6   David Howells   rxrpc: Fix firewa...
419
420
  	rxnet->peer_keepalive_base = base;
  	rxnet->peer_keepalive_cursor = cursor;
330bdcfad   David Howells   rxrpc: Fix the ke...
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
  	rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
  	ASSERT(list_empty(&collector));
  
  	/* Schedule the timer for the next occupied timeslot. */
  	cursor = rxnet->peer_keepalive_cursor;
  	stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
  	for (; (s8)(cursor - stop) < 0; cursor++) {
  		if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
  			break;
  		base++;
  	}
  
  	now = ktime_get_seconds();
  	delay = base - now;
  	if (delay < 1)
  		delay = 1;
  	delay *= HZ;
  	if (rxnet->live)
  		timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
ace45bec6   David Howells   rxrpc: Fix firewa...
440
441
  	_leave("");
  }