Blame view

net/ipv4/inet_fragment.c 10.3 KB
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
  /*
   * inet fragments management
   *
   *		This program is free software; you can redistribute it and/or
   *		modify it under the terms of the GNU General Public License
   *		as published by the Free Software Foundation; either version
   *		2 of the License, or (at your option) any later version.
   *
   * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
   *				Started as consolidation of ipv4/ip_fragment.c,
   *				ipv6/reassembly. and ipv6 nf conntrack reassembly
   */
  
  #include <linux/list.h>
  #include <linux/spinlock.h>
  #include <linux/module.h>
  #include <linux/timer.h>
  #include <linux/mm.h>
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
19
  #include <linux/random.h>
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
20
21
  #include <linux/skbuff.h>
  #include <linux/rtnetlink.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
22
  #include <linux/slab.h>
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
23

5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
24
  #include <net/sock.h>
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
25
  #include <net/inet_frag.h>
be991971d   Hannes Frederic Sowa   inet: generalize ...
26
  #include <net/inet_ecn.h>
b13d3cbfb   Florian Westphal   inet: frag: move ...
27
28
  #define INETFRAGS_EVICT_BUCKETS   128
  #define INETFRAGS_EVICT_MAX	  512
e3a57d18b   Florian Westphal   inet: frag: remov...
29
30
  /* don't rebuild inetfrag table with new secret more often than this */
  #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
be991971d   Hannes Frederic Sowa   inet: generalize ...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
  /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
   * Value : 0xff if frame should be dropped.
   *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
   */
  const u8 ip_frag_ecn_table[16] = {
  	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
  	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
  	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
  	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
  
  	/* invalid combinations : drop frame */
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
  };
  EXPORT_SYMBOL(ip_frag_ecn_table);
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
51

fb3cfe6e7   Florian Westphal   inet: frag: remov...
52
53
54
55
56
  static unsigned int
  inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
  {
  	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
  }
e3a57d18b   Florian Westphal   inet: frag: remov...
57
58
59
60
61
62
63
  static bool inet_frag_may_rebuild(struct inet_frags *f)
  {
  	return time_after(jiffies,
  	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
  }
  
  static void inet_frag_secret_rebuild(struct inet_frags *f)
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
64
  {
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
65
  	int i;
ab1c724f6   Florian Westphal   inet: frag: use s...
66
  	write_seqlock_bh(&f->rnd_seqlock);
e3a57d18b   Florian Westphal   inet: frag: remov...
67
68
69
  
  	if (!inet_frag_may_rebuild(f))
  		goto out;
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
70

321a3a99e   Pavel Emelyanov   [INET]: Consolida...
71
  	get_random_bytes(&f->rnd, sizeof(u32));
e3a57d18b   Florian Westphal   inet: frag: remov...
72

321a3a99e   Pavel Emelyanov   [INET]: Consolida...
73
  	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
74
  		struct inet_frag_bucket *hb;
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
75
  		struct inet_frag_queue *q;
b67bfe0d4   Sasha Levin   hlist: drop the n...
76
  		struct hlist_node *n;
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
77

19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
78
  		hb = &f->hash[i];
ab1c724f6   Florian Westphal   inet: frag: use s...
79
  		spin_lock(&hb->chain_lock);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
80
  		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
fb3cfe6e7   Florian Westphal   inet: frag: remov...
81
  			unsigned int hval = inet_frag_hashfn(f, q);
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
82
83
  
  			if (hval != i) {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
84
  				struct inet_frag_bucket *hb_dest;
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
85
86
87
  				hlist_del(&q->list);
  
  				/* Relink to new hash chain. */
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
88
  				hb_dest = &f->hash[hval];
ab1c724f6   Florian Westphal   inet: frag: use s...
89
90
91
92
93
94
95
96
97
98
99
  
  				/* This is the only place where we take
  				 * another chain_lock while already holding
  				 * one.  As this will not run concurrently,
  				 * we cannot deadlock on hb_dest lock below, if its
  				 * already locked it will be released soon since
  				 * other caller cannot be waiting for hb lock
  				 * that we've taken above.
  				 */
  				spin_lock_nested(&hb_dest->chain_lock,
  						 SINGLE_DEPTH_NESTING);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
100
  				hlist_add_head(&q->list, &hb_dest->chain);
ab1c724f6   Florian Westphal   inet: frag: use s...
101
  				spin_unlock(&hb_dest->chain_lock);
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
102
103
  			}
  		}
ab1c724f6   Florian Westphal   inet: frag: use s...
104
  		spin_unlock(&hb->chain_lock);
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
105
  	}
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
106

e3a57d18b   Florian Westphal   inet: frag: remov...
107
108
109
  	f->rebuild = false;
  	f->last_rebuild_jiffies = jiffies;
  out:
ab1c724f6   Florian Westphal   inet: frag: use s...
110
  	write_sequnlock_bh(&f->rnd_seqlock);
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
111
  }
b13d3cbfb   Florian Westphal   inet: frag: move ...
112
113
114
115
116
117
118
119
120
121
122
123
124
  static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
  {
  	return q->net->low_thresh == 0 ||
  	       frag_mem_limit(q->net) >= q->net->low_thresh;
  }
  
  static unsigned int
  inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
  {
  	struct inet_frag_queue *fq;
  	struct hlist_node *n;
  	unsigned int evicted = 0;
  	HLIST_HEAD(expired);
b13d3cbfb   Florian Westphal   inet: frag: move ...
125
126
127
128
129
  	spin_lock(&hb->chain_lock);
  
  	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
  		if (!inet_fragq_should_evict(fq))
  			continue;
5719b296f   Florian Westphal   inet: frag: don't...
130
131
  		if (!del_timer(&fq->timer))
  			continue;
b13d3cbfb   Florian Westphal   inet: frag: move ...
132

d1fe19444   Florian Westphal   inet: frag: don't...
133
  		hlist_add_head(&fq->list_evictor, &expired);
b13d3cbfb   Florian Westphal   inet: frag: move ...
134
135
136
137
  		++evicted;
  	}
  
  	spin_unlock(&hb->chain_lock);
d1fe19444   Florian Westphal   inet: frag: don't...
138
  	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
b13d3cbfb   Florian Westphal   inet: frag: move ...
139
140
141
142
143
144
145
146
147
148
149
150
151
152
  		f->frag_expire((unsigned long) fq);
  
  	return evicted;
  }
  
  static void inet_frag_worker(struct work_struct *work)
  {
  	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
  	unsigned int i, evicted = 0;
  	struct inet_frags *f;
  
  	f = container_of(work, struct inet_frags, frags_work);
  
  	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
ab1c724f6   Florian Westphal   inet: frag: use s...
153
  	local_bh_disable();
b13d3cbfb   Florian Westphal   inet: frag: move ...
154
155
156
157
158
159
160
161
162
  
  	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
  		evicted += inet_evict_bucket(f, &f->hash[i]);
  		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
  		if (evicted > INETFRAGS_EVICT_MAX)
  			break;
  	}
  
  	f->next_bucket = i;
ab1c724f6   Florian Westphal   inet: frag: use s...
163
  	local_bh_enable();
e3a57d18b   Florian Westphal   inet: frag: remov...
164
165
  	if (f->rebuild && inet_frag_may_rebuild(f))
  		inet_frag_secret_rebuild(f);
b13d3cbfb   Florian Westphal   inet: frag: move ...
166
167
168
169
170
171
172
  }
  
  static void inet_frag_schedule_worker(struct inet_frags *f)
  {
  	if (unlikely(!work_pending(&f->frags_work)))
  		schedule_work(&f->frags_work);
  }
d4ad4d22e   Nikolay Aleksandrov   inet: frags: use ...
173
  int inet_frags_init(struct inet_frags *f)
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
174
175
  {
  	int i;
b13d3cbfb   Florian Westphal   inet: frag: move ...
176
  	INIT_WORK(&f->frags_work, inet_frag_worker);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
177
178
  	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
  		struct inet_frag_bucket *hb = &f->hash[i];
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
179

19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
180
181
182
  		spin_lock_init(&hb->chain_lock);
  		INIT_HLIST_HEAD(&hb->chain);
  	}
ab1c724f6   Florian Westphal   inet: frag: use s...
183
184
  
  	seqlock_init(&f->rnd_seqlock);
e3a57d18b   Florian Westphal   inet: frag: remov...
185
  	f->last_rebuild_jiffies = 0;
d4ad4d22e   Nikolay Aleksandrov   inet: frags: use ...
186
187
188
189
190
191
  	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
  					    NULL);
  	if (!f->frags_cachep)
  		return -ENOMEM;
  
  	return 0;
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
192
193
194
195
196
  }
  EXPORT_SYMBOL(inet_frags_init);
  
  void inet_frags_fini(struct inet_frags *f)
  {
b13d3cbfb   Florian Westphal   inet: frag: move ...
197
  	cancel_work_sync(&f->frags_work);
d4ad4d22e   Nikolay Aleksandrov   inet: frags: use ...
198
  	kmem_cache_destroy(f->frags_cachep);
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
199
200
  }
  EXPORT_SYMBOL(inet_frags_fini);
277e650dd   Pavel Emelyanov   [INET]: Consolida...
201

81566e832   Pavel Emelyanov   [NETNS][FRAGS]: M...
202
203
  void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
  {
ab1c724f6   Florian Westphal   inet: frag: use s...
204
  	unsigned int seq;
b13d3cbfb   Florian Westphal   inet: frag: move ...
205
  	int i;
81566e832   Pavel Emelyanov   [NETNS][FRAGS]: M...
206
  	nf->low_thresh = 0;
e8e16b706   David S. Miller   [INET]: inet_frag...
207

ab1c724f6   Florian Westphal   inet: frag: use s...
208
  evict_again:
5719b296f   Florian Westphal   inet: frag: don't...
209
  	local_bh_disable();
ab1c724f6   Florian Westphal   inet: frag: use s...
210
  	seq = read_seqbegin(&f->rnd_seqlock);
b13d3cbfb   Florian Westphal   inet: frag: move ...
211
212
213
  
  	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
  		inet_evict_bucket(f, &f->hash[i]);
ab1c724f6   Florian Westphal   inet: frag: use s...
214
  	local_bh_enable();
5719b296f   Florian Westphal   inet: frag: don't...
215
216
217
218
219
  	cond_resched();
  
  	if (read_seqretry(&f->rnd_seqlock, seq) ||
  	    percpu_counter_sum(&nf->mem))
  		goto evict_again;
6d7b857d5   Jesper Dangaard Brouer   net: use lib/perc...
220
221
  
  	percpu_counter_destroy(&nf->mem);
81566e832   Pavel Emelyanov   [NETNS][FRAGS]: M...
222
223
  }
  EXPORT_SYMBOL(inet_frags_exit_net);
ab1c724f6   Florian Westphal   inet: frag: use s...
224
225
226
  static struct inet_frag_bucket *
  get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
  __acquires(hb->chain_lock)
277e650dd   Pavel Emelyanov   [INET]: Consolida...
227
  {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
228
  	struct inet_frag_bucket *hb;
ab1c724f6   Florian Westphal   inet: frag: use s...
229
230
231
232
  	unsigned int seq, hash;
  
   restart:
  	seq = read_seqbegin(&f->rnd_seqlock);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
233

fb3cfe6e7   Florian Westphal   inet: frag: remov...
234
  	hash = inet_frag_hashfn(f, fq);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
235
236
237
  	hb = &f->hash[hash];
  
  	spin_lock(&hb->chain_lock);
ab1c724f6   Florian Westphal   inet: frag: use s...
238
239
240
241
242
243
244
245
246
247
248
249
250
  	if (read_seqretry(&f->rnd_seqlock, seq)) {
  		spin_unlock(&hb->chain_lock);
  		goto restart;
  	}
  
  	return hb;
  }
  
  static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
  {
  	struct inet_frag_bucket *hb;
  
  	hb = get_frag_bucket_locked(fq, f);
d1fe19444   Florian Westphal   inet: frag: don't...
251
  	hlist_del(&fq->list);
5719b296f   Florian Westphal   inet: frag: don't...
252
  	fq->flags |= INET_FRAG_COMPLETE;
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
253
  	spin_unlock(&hb->chain_lock);
277e650dd   Pavel Emelyanov   [INET]: Consolida...
254
255
256
257
258
259
  }
  
  void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
  {
  	if (del_timer(&fq->timer))
  		atomic_dec(&fq->refcnt);
06aa8b8a0   Nikolay Aleksandrov   inet: frags: rena...
260
  	if (!(fq->flags & INET_FRAG_COMPLETE)) {
277e650dd   Pavel Emelyanov   [INET]: Consolida...
261
262
  		fq_unlink(fq, f);
  		atomic_dec(&fq->refcnt);
277e650dd   Pavel Emelyanov   [INET]: Consolida...
263
264
  	}
  }
277e650dd   Pavel Emelyanov   [INET]: Consolida...
265
  EXPORT_SYMBOL(inet_frag_kill);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
266

3fd588eb9   Florian Westphal   inet: frag: remov...
267
  void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
268
269
  {
  	struct sk_buff *fp;
6ddc08222   Pavel Emelyanov   [NETNS][FRAGS]: M...
270
  	struct netns_frags *nf;
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
271
  	unsigned int sum, sum_truesize = 0;
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
272

06aa8b8a0   Nikolay Aleksandrov   inet: frags: rena...
273
  	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
547b792ca   Ilpo Järvinen   net: convert BUG_...
274
  	WARN_ON(del_timer(&q->timer) != 0);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
275
276
277
  
  	/* Release all fragment data. */
  	fp = q->fragments;
6ddc08222   Pavel Emelyanov   [NETNS][FRAGS]: M...
278
  	nf = q->net;
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
279
280
  	while (fp) {
  		struct sk_buff *xp = fp->next;
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
281
  		sum_truesize += fp->truesize;
a72a5e2d3   Florian Westphal   inet: kill unused...
282
  		kfree_skb(fp);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
283
284
  		fp = xp;
  	}
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
285
  	sum = sum_truesize + f->qsize;
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
286

c95477090   Pavel Emelyanov   [INET]: Consolida...
287
288
  	if (f->destructor)
  		f->destructor(q);
d4ad4d22e   Nikolay Aleksandrov   inet: frags: use ...
289
  	kmem_cache_free(f->frags_cachep, q);
5719b296f   Florian Westphal   inet: frag: don't...
290
291
  
  	sub_frag_mem_limit(nf, sum);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
292
293
  }
  EXPORT_SYMBOL(inet_frag_destroy);
8e7999c44   Pavel Emelyanov   [INET]: Consolida...
294

ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
295
  static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
f926e2366   Nikolay Aleksandrov   inet: frags: fix ...
296
297
298
  						struct inet_frag_queue *qp_in,
  						struct inet_frags *f,
  						void *arg)
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
299
  {
ab1c724f6   Florian Westphal   inet: frag: use s...
300
  	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
301
  	struct inet_frag_queue *qp;
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
302

2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
303
304
  #ifdef CONFIG_SMP
  	/* With SMP race we have to recheck hash table, because
ab1c724f6   Florian Westphal   inet: frag: use s...
305
306
  	 * such entry could have been created on other cpu before
  	 * we acquired hash bucket lock.
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
307
  	 */
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
308
  	hlist_for_each_entry(qp, &hb->chain, list) {
ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
309
  		if (qp->net == nf && f->match(qp, arg)) {
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
310
  			atomic_inc(&qp->refcnt);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
311
  			spin_unlock(&hb->chain_lock);
06aa8b8a0   Nikolay Aleksandrov   inet: frags: rena...
312
  			qp_in->flags |= INET_FRAG_COMPLETE;
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
313
314
315
316
317
318
  			inet_frag_put(qp_in, f);
  			return qp;
  		}
  	}
  #endif
  	qp = qp_in;
b2fd5321d   Pavel Emelyanov   [NETNS][FRAGS]: M...
319
  	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
320
321
322
  		atomic_inc(&qp->refcnt);
  
  	atomic_inc(&qp->refcnt);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
323
  	hlist_add_head(&qp->list, &hb->chain);
3fd588eb9   Florian Westphal   inet: frag: remov...
324

19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
325
  	spin_unlock(&hb->chain_lock);
24b9bf43e   Nikolay Aleksandrov   net: fix for a ra...
326

2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
327
328
  	return qp;
  }
e521db9d7   Pavel Emelyanov   [INET]: Consolida...
329

ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
330
  static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
f926e2366   Nikolay Aleksandrov   inet: frags: fix ...
331
332
  					       struct inet_frags *f,
  					       void *arg)
e521db9d7   Pavel Emelyanov   [INET]: Consolida...
333
334
  {
  	struct inet_frag_queue *q;
30759219f   Michal Kubeček   net: disable frag...
335
  	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
b13d3cbfb   Florian Westphal   inet: frag: move ...
336
  		inet_frag_schedule_worker(f);
86e93e470   Florian Westphal   inet: frag: move ...
337
  		return NULL;
b13d3cbfb   Florian Westphal   inet: frag: move ...
338
  	}
86e93e470   Florian Westphal   inet: frag: move ...
339

d4ad4d22e   Nikolay Aleksandrov   inet: frags: use ...
340
  	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
51456b291   Ian Morris   ipv4: coding styl...
341
  	if (!q)
e521db9d7   Pavel Emelyanov   [INET]: Consolida...
342
  		return NULL;
54db0cc2b   Gao feng   inetpeer: add par...
343
  	q->net = nf;
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
344
  	f->constructor(q, arg);
0e60d245a   Florian Westphal   inet: frag: chang...
345
  	add_frag_mem_limit(nf, f->qsize);
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
346

e521db9d7   Pavel Emelyanov   [INET]: Consolida...
347
348
349
350
351
352
  	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
  	spin_lock_init(&q->lock);
  	atomic_set(&q->refcnt, 1);
  
  	return q;
  }
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
353

ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
354
  static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
f926e2366   Nikolay Aleksandrov   inet: frags: fix ...
355
356
  						struct inet_frags *f,
  						void *arg)
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
357
358
  {
  	struct inet_frag_queue *q;
ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
359
  	q = inet_frag_alloc(nf, f, arg);
51456b291   Ian Morris   ipv4: coding styl...
360
  	if (!q)
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
361
  		return NULL;
9a375803f   Pavel Emelyanov   inet fragments: f...
362
  	return inet_frag_intern(nf, q, f, arg);
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
363
  }
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
364

ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
365
  struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
f926e2366   Nikolay Aleksandrov   inet: frags: fix ...
366
367
  				       struct inet_frags *f, void *key,
  				       unsigned int hash)
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
368
  {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
369
  	struct inet_frag_bucket *hb;
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
370
  	struct inet_frag_queue *q;
5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
371
  	int depth = 0;
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
372

b13d3cbfb   Florian Westphal   inet: frag: move ...
373
374
  	if (frag_mem_limit(nf) > nf->low_thresh)
  		inet_frag_schedule_worker(f);
86e93e470   Florian Westphal   inet: frag: move ...
375

fb3cfe6e7   Florian Westphal   inet: frag: remov...
376
  	hash &= (INETFRAGS_HASHSZ - 1);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
377
378
379
380
  	hb = &f->hash[hash];
  
  	spin_lock(&hb->chain_lock);
  	hlist_for_each_entry(q, &hb->chain, list) {
ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
381
  		if (q->net == nf && f->match(q, key)) {
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
382
  			atomic_inc(&q->refcnt);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
383
  			spin_unlock(&hb->chain_lock);
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
384
385
  			return q;
  		}
5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
386
  		depth++;
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
387
  	}
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
388
  	spin_unlock(&hb->chain_lock);
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
389

5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
390
391
  	if (depth <= INETFRAGS_MAXDEPTH)
  		return inet_frag_create(nf, f, key);
e3a57d18b   Florian Westphal   inet: frag: remov...
392
393
  
  	if (inet_frag_may_rebuild(f)) {
ab1c724f6   Florian Westphal   inet: frag: use s...
394
395
  		if (!f->rebuild)
  			f->rebuild = true;
e3a57d18b   Florian Westphal   inet: frag: remov...
396
397
398
399
  		inet_frag_schedule_worker(f);
  	}
  
  	return ERR_PTR(-ENOBUFS);
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
400
401
  }
  EXPORT_SYMBOL(inet_frag_find);
5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
402
403
404
405
406
407
408
409
410
411
  
  void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
  				   const char *prefix)
  {
  	static const char msg[] = "inet_frag_find: Fragment hash bucket"
  		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
  		". Dropping fragment.
  ";
  
  	if (PTR_ERR(q) == -ENOBUFS)
ba7a46f16   Joe Perches   net: Convert LIMI...
412
  		net_dbg_ratelimited("%s%s", prefix, msg);
5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
413
414
  }
  EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);