Blame view

net/ipv4/inet_fragment.c 8.62 KB
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
  /*
   * inet fragments management
   *
   *		This program is free software; you can redistribute it and/or
   *		modify it under the terms of the GNU General Public License
   *		as published by the Free Software Foundation; either version
   *		2 of the License, or (at your option) any later version.
   *
   * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
   *				Started as consolidation of ipv4/ip_fragment.c,
   *				ipv6/reassembly. and ipv6 nf conntrack reassembly
   */
  
  #include <linux/list.h>
  #include <linux/spinlock.h>
  #include <linux/module.h>
  #include <linux/timer.h>
  #include <linux/mm.h>
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
19
  #include <linux/random.h>
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
20
21
  #include <linux/skbuff.h>
  #include <linux/rtnetlink.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
22
  #include <linux/slab.h>
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
23

5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
24
  #include <net/sock.h>
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
25
  #include <net/inet_frag.h>
be991971d   Hannes Frederic Sowa   inet: generalize ...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
  #include <net/inet_ecn.h>
  
  /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
   * Value : 0xff if frame should be dropped.
   *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
   */
  const u8 ip_frag_ecn_table[16] = {
  	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
  	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
  	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
  	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
  
  	/* invalid combinations : drop frame */
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
  	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
  };
  EXPORT_SYMBOL(ip_frag_ecn_table);
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
48

321a3a99e   Pavel Emelyanov   [INET]: Consolida...
49
50
51
52
53
  static void inet_frag_secret_rebuild(unsigned long dummy)
  {
  	struct inet_frags *f = (struct inet_frags *)dummy;
  	unsigned long now = jiffies;
  	int i;
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
54
  	/* Per bucket lock NOT needed here, due to write lock protection */
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
55
  	write_lock(&f->lock);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
56

321a3a99e   Pavel Emelyanov   [INET]: Consolida...
57
58
  	get_random_bytes(&f->rnd, sizeof(u32));
  	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
59
  		struct inet_frag_bucket *hb;
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
60
  		struct inet_frag_queue *q;
b67bfe0d4   Sasha Levin   hlist: drop the n...
61
  		struct hlist_node *n;
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
62

19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
63
64
  		hb = &f->hash[i];
  		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
65
66
67
  			unsigned int hval = f->hashfn(q);
  
  			if (hval != i) {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
68
  				struct inet_frag_bucket *hb_dest;
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
69
70
71
  				hlist_del(&q->list);
  
  				/* Relink to new hash chain. */
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
72
73
  				hb_dest = &f->hash[hval];
  				hlist_add_head(&q->list, &hb_dest->chain);
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
74
75
76
77
  			}
  		}
  	}
  	write_unlock(&f->lock);
3b4bc4a2b   Pavel Emelyanov   [NETNS][FRAGS]: I...
78
  	mod_timer(&f->secret_timer, now + f->secret_interval);
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
79
  }
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
80
81
82
  void inet_frags_init(struct inet_frags *f)
  {
  	int i;
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
83
84
  	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
  		struct inet_frag_bucket *hb = &f->hash[i];
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
85

19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
86
87
88
  		spin_lock_init(&hb->chain_lock);
  		INIT_HLIST_HEAD(&hb->chain);
  	}
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
89
  	rwlock_init(&f->lock);
b24b8a247   Pavel Emelyanov   [NET]: Convert in...
90
91
  	setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
  			(unsigned long)f);
3b4bc4a2b   Pavel Emelyanov   [NETNS][FRAGS]: I...
92
  	f->secret_timer.expires = jiffies + f->secret_interval;
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
93
  	add_timer(&f->secret_timer);
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
94
95
  }
  EXPORT_SYMBOL(inet_frags_init);
e5a2bb842   Pavel Emelyanov   [NETNS][FRAGS]: M...
96
97
98
  void inet_frags_init_net(struct netns_frags *nf)
  {
  	nf->nqueues = 0;
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
99
  	init_frag_mem_limit(nf);
3140c25c8   Pavel Emelyanov   [NETNS][FRAGS]: M...
100
  	INIT_LIST_HEAD(&nf->lru_list);
3ef0eb0db   Jesper Dangaard Brouer   net: frag, move L...
101
  	spin_lock_init(&nf->lru_lock);
e5a2bb842   Pavel Emelyanov   [NETNS][FRAGS]: M...
102
103
  }
  EXPORT_SYMBOL(inet_frags_init_net);
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
104
105
  void inet_frags_fini(struct inet_frags *f)
  {
321a3a99e   Pavel Emelyanov   [INET]: Consolida...
106
  	del_timer(&f->secret_timer);
7eb95156d   Pavel Emelyanov   [INET]: Collect f...
107
108
  }
  EXPORT_SYMBOL(inet_frags_fini);
277e650dd   Pavel Emelyanov   [INET]: Consolida...
109

81566e832   Pavel Emelyanov   [NETNS][FRAGS]: M...
110
111
112
  void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
  {
  	nf->low_thresh = 0;
e8e16b706   David S. Miller   [INET]: inet_frag...
113
114
  
  	local_bh_disable();
6b102865e   Amerigo Wang   ipv6: unify fragm...
115
  	inet_frag_evictor(nf, f, true);
e8e16b706   David S. Miller   [INET]: inet_frag...
116
  	local_bh_enable();
6d7b857d5   Jesper Dangaard Brouer   net: use lib/perc...
117
118
  
  	percpu_counter_destroy(&nf->mem);
81566e832   Pavel Emelyanov   [NETNS][FRAGS]: M...
119
120
  }
  EXPORT_SYMBOL(inet_frags_exit_net);
277e650dd   Pavel Emelyanov   [INET]: Consolida...
121
122
  static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
  {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
123
124
125
126
127
128
129
130
  	struct inet_frag_bucket *hb;
  	unsigned int hash;
  
  	read_lock(&f->lock);
  	hash = f->hashfn(fq);
  	hb = &f->hash[hash];
  
  	spin_lock(&hb->chain_lock);
277e650dd   Pavel Emelyanov   [INET]: Consolida...
131
  	hlist_del(&fq->list);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
132
133
134
  	spin_unlock(&hb->chain_lock);
  
  	read_unlock(&f->lock);
3ef0eb0db   Jesper Dangaard Brouer   net: frag, move L...
135
  	inet_frag_lru_del(fq);
277e650dd   Pavel Emelyanov   [INET]: Consolida...
136
137
138
139
140
141
  }
  
  void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
  {
  	if (del_timer(&fq->timer))
  		atomic_dec(&fq->refcnt);
bc578a54f   Joe Perches   [NET]: Rename ine...
142
  	if (!(fq->last_in & INET_FRAG_COMPLETE)) {
277e650dd   Pavel Emelyanov   [INET]: Consolida...
143
144
  		fq_unlink(fq, f);
  		atomic_dec(&fq->refcnt);
bc578a54f   Joe Perches   [NET]: Rename ine...
145
  		fq->last_in |= INET_FRAG_COMPLETE;
277e650dd   Pavel Emelyanov   [INET]: Consolida...
146
147
  	}
  }
277e650dd   Pavel Emelyanov   [INET]: Consolida...
148
  EXPORT_SYMBOL(inet_frag_kill);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
149

6ddc08222   Pavel Emelyanov   [NETNS][FRAGS]: M...
150
  static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
151
  		struct sk_buff *skb)
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
152
  {
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
153
154
155
156
157
158
159
160
161
  	if (f->skb_free)
  		f->skb_free(skb);
  	kfree_skb(skb);
  }
  
  void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
  					int *work)
  {
  	struct sk_buff *fp;
6ddc08222   Pavel Emelyanov   [NETNS][FRAGS]: M...
162
  	struct netns_frags *nf;
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
163
  	unsigned int sum, sum_truesize = 0;
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
164

547b792ca   Ilpo Järvinen   net: convert BUG_...
165
166
  	WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
  	WARN_ON(del_timer(&q->timer) != 0);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
167
168
169
  
  	/* Release all fragment data. */
  	fp = q->fragments;
6ddc08222   Pavel Emelyanov   [NETNS][FRAGS]: M...
170
  	nf = q->net;
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
171
172
  	while (fp) {
  		struct sk_buff *xp = fp->next;
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
173
174
  		sum_truesize += fp->truesize;
  		frag_kfree_skb(nf, f, fp);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
175
176
  		fp = xp;
  	}
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
177
  	sum = sum_truesize + f->qsize;
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
178
  	if (work)
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
179
180
  		*work -= sum;
  	sub_frag_mem_limit(q, sum);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
181

c95477090   Pavel Emelyanov   [INET]: Consolida...
182
183
184
  	if (f->destructor)
  		f->destructor(q);
  	kfree(q);
1e4b82873   Pavel Emelyanov   [INET]: Consolida...
185
186
187
  
  }
  EXPORT_SYMBOL(inet_frag_destroy);
8e7999c44   Pavel Emelyanov   [INET]: Consolida...
188

6b102865e   Amerigo Wang   ipv6: unify fragm...
189
  int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
8e7999c44   Pavel Emelyanov   [INET]: Consolida...
190
191
192
  {
  	struct inet_frag_queue *q;
  	int work, evicted = 0;
6b102865e   Amerigo Wang   ipv6: unify fragm...
193
  	if (!force) {
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
194
  		if (frag_mem_limit(nf) <= nf->high_thresh)
6b102865e   Amerigo Wang   ipv6: unify fragm...
195
196
  			return 0;
  	}
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
197
  	work = frag_mem_limit(nf) - nf->low_thresh;
e588e2f28   Florian Westphal   inet: frag: make ...
198
  	while (work > 0 || force) {
3ef0eb0db   Jesper Dangaard Brouer   net: frag, move L...
199
  		spin_lock(&nf->lru_lock);
3140c25c8   Pavel Emelyanov   [NETNS][FRAGS]: M...
200
  		if (list_empty(&nf->lru_list)) {
3ef0eb0db   Jesper Dangaard Brouer   net: frag, move L...
201
  			spin_unlock(&nf->lru_lock);
8e7999c44   Pavel Emelyanov   [INET]: Consolida...
202
203
  			break;
  		}
3140c25c8   Pavel Emelyanov   [NETNS][FRAGS]: M...
204
  		q = list_first_entry(&nf->lru_list,
8e7999c44   Pavel Emelyanov   [INET]: Consolida...
205
206
  				struct inet_frag_queue, lru_list);
  		atomic_inc(&q->refcnt);
68399ac37   Jesper Dangaard Brouer   net: frag, avoid ...
207
208
  		/* Remove q from list to avoid several CPUs grabbing it */
  		list_del_init(&q->lru_list);
3ef0eb0db   Jesper Dangaard Brouer   net: frag, move L...
209
  		spin_unlock(&nf->lru_lock);
8e7999c44   Pavel Emelyanov   [INET]: Consolida...
210
211
  
  		spin_lock(&q->lock);
bc578a54f   Joe Perches   [NET]: Rename ine...
212
  		if (!(q->last_in & INET_FRAG_COMPLETE))
8e7999c44   Pavel Emelyanov   [INET]: Consolida...
213
214
215
216
217
218
219
220
221
222
223
  			inet_frag_kill(q, f);
  		spin_unlock(&q->lock);
  
  		if (atomic_dec_and_test(&q->refcnt))
  			inet_frag_destroy(q, f, &work);
  		evicted++;
  	}
  
  	return evicted;
  }
  EXPORT_SYMBOL(inet_frag_evictor);
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
224

ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
225
226
  static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
  		struct inet_frag_queue *qp_in, struct inet_frags *f,
9a375803f   Pavel Emelyanov   inet fragments: f...
227
  		void *arg)
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
228
  {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
229
  	struct inet_frag_bucket *hb;
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
230
  	struct inet_frag_queue *qp;
9a375803f   Pavel Emelyanov   inet fragments: f...
231
  	unsigned int hash;
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
232

19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
233
  	read_lock(&f->lock); /* Protects against hash rebuild */
9a375803f   Pavel Emelyanov   inet fragments: f...
234
235
236
237
238
239
  	/*
  	 * While we stayed w/o the lock other CPU could update
  	 * the rnd seed, so we need to re-calculate the hash
  	 * chain. Fortunatelly the qp_in can be used to get one.
  	 */
  	hash = f->hashfn(qp_in);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
240
241
  	hb = &f->hash[hash];
  	spin_lock(&hb->chain_lock);
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
242
243
244
  #ifdef CONFIG_SMP
  	/* With SMP race we have to recheck hash table, because
  	 * such entry could be created on other cpu, while we
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
245
  	 * released the hash bucket lock.
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
246
  	 */
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
247
  	hlist_for_each_entry(qp, &hb->chain, list) {
ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
248
  		if (qp->net == nf && f->match(qp, arg)) {
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
249
  			atomic_inc(&qp->refcnt);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
250
251
  			spin_unlock(&hb->chain_lock);
  			read_unlock(&f->lock);
bc578a54f   Joe Perches   [NET]: Rename ine...
252
  			qp_in->last_in |= INET_FRAG_COMPLETE;
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
253
254
255
256
257
258
  			inet_frag_put(qp_in, f);
  			return qp;
  		}
  	}
  #endif
  	qp = qp_in;
b2fd5321d   Pavel Emelyanov   [NETNS][FRAGS]: M...
259
  	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
260
261
262
  		atomic_inc(&qp->refcnt);
  
  	atomic_inc(&qp->refcnt);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
263
  	hlist_add_head(&qp->list, &hb->chain);
24b9bf43e   Nikolay Aleksandrov   net: fix for a ra...
264
  	inet_frag_lru_add(nf, qp);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
265
266
  	spin_unlock(&hb->chain_lock);
  	read_unlock(&f->lock);
24b9bf43e   Nikolay Aleksandrov   net: fix for a ra...
267

2588fe1d7   Pavel Emelyanov   [INET]: Consolida...
268
269
  	return qp;
  }
e521db9d7   Pavel Emelyanov   [INET]: Consolida...
270

ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
271
272
  static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
  		struct inet_frags *f, void *arg)
e521db9d7   Pavel Emelyanov   [INET]: Consolida...
273
274
275
276
277
278
  {
  	struct inet_frag_queue *q;
  
  	q = kzalloc(f->qsize, GFP_ATOMIC);
  	if (q == NULL)
  		return NULL;
54db0cc2b   Gao feng   inetpeer: add par...
279
  	q->net = nf;
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
280
  	f->constructor(q, arg);
d433673e5   Jesper Dangaard Brouer   net: frag helper ...
281
  	add_frag_mem_limit(q, f->qsize);
e521db9d7   Pavel Emelyanov   [INET]: Consolida...
282
283
284
  	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
  	spin_lock_init(&q->lock);
  	atomic_set(&q->refcnt, 1);
b56141ab3   Konstantin Khlebnikov   net: frag, fix ra...
285
  	INIT_LIST_HEAD(&q->lru_list);
e521db9d7   Pavel Emelyanov   [INET]: Consolida...
286
287
288
  
  	return q;
  }
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
289

ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
290
  static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
9a375803f   Pavel Emelyanov   inet fragments: f...
291
  		struct inet_frags *f, void *arg)
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
292
293
  {
  	struct inet_frag_queue *q;
ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
294
  	q = inet_frag_alloc(nf, f, arg);
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
295
296
  	if (q == NULL)
  		return NULL;
9a375803f   Pavel Emelyanov   inet fragments: f...
297
  	return inet_frag_intern(nf, q, f, arg);
c6fda2822   Pavel Emelyanov   [INET]: Consolida...
298
  }
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
299

ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
300
301
  struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
  		struct inet_frags *f, void *key, unsigned int hash)
56bca31ff   Hannes Eder   inet fragments: f...
302
  	__releases(&f->lock)
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
303
  {
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
304
  	struct inet_frag_bucket *hb;
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
305
  	struct inet_frag_queue *q;
5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
306
  	int depth = 0;
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
307

19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
308
309
310
311
  	hb = &f->hash[hash];
  
  	spin_lock(&hb->chain_lock);
  	hlist_for_each_entry(q, &hb->chain, list) {
ac18e7509   Pavel Emelyanov   [NETNS][FRAGS]: M...
312
  		if (q->net == nf && f->match(q, key)) {
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
313
  			atomic_inc(&q->refcnt);
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
314
  			spin_unlock(&hb->chain_lock);
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
315
316
317
  			read_unlock(&f->lock);
  			return q;
  		}
5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
318
  		depth++;
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
319
  	}
19952cc4f   Jesper Dangaard Brouer   net: frag queue p...
320
  	spin_unlock(&hb->chain_lock);
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
321
  	read_unlock(&f->lock);
5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
322
323
324
325
  	if (depth <= INETFRAGS_MAXDEPTH)
  		return inet_frag_create(nf, f, key);
  	else
  		return ERR_PTR(-ENOBUFS);
abd6523d1   Pavel Emelyanov   [INET]: Consolida...
326
327
  }
  EXPORT_SYMBOL(inet_frag_find);
5a3da1fe9   Hannes Frederic Sowa   inet: limit lengt...
328
329
330
331
332
333
334
335
336
337
338
339
340
  
  void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
  				   const char *prefix)
  {
  	static const char msg[] = "inet_frag_find: Fragment hash bucket"
  		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
  		". Dropping fragment.
  ";
  
  	if (PTR_ERR(q) == -ENOBUFS)
  		LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
  }
  EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);