Blame view
net/ipv4/inet_fragment.c
6.45 KB
7eb95156d
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
/* * inet fragments management * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Pavel Emelyanov <xemul@openvz.org> * Started as consolidation of ipv4/ip_fragment.c, * ipv6/reassembly. and ipv6 nf conntrack reassembly */ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/mm.h> |
321a3a99e
|
19 |
#include <linux/random.h> |
1e4b82873
|
20 21 |
#include <linux/skbuff.h> #include <linux/rtnetlink.h> |
5a0e3ad6a
|
22 |
#include <linux/slab.h> |
7eb95156d
|
23 24 |
#include <net/inet_frag.h> |
321a3a99e
|
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
static void inet_frag_secret_rebuild(unsigned long dummy) { struct inet_frags *f = (struct inet_frags *)dummy; unsigned long now = jiffies; int i; write_lock(&f->lock); get_random_bytes(&f->rnd, sizeof(u32)); for (i = 0; i < INETFRAGS_HASHSZ; i++) { struct inet_frag_queue *q; struct hlist_node *p, *n; hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { unsigned int hval = f->hashfn(q); if (hval != i) { hlist_del(&q->list); /* Relink to new hash chain. */ hlist_add_head(&q->list, &f->hash[hval]); } } } write_unlock(&f->lock); |
3b4bc4a2b
|
49 |
mod_timer(&f->secret_timer, now + f->secret_interval); |
321a3a99e
|
50 |
} |
7eb95156d
|
51 52 53 54 55 56 |
void inet_frags_init(struct inet_frags *f) { int i; for (i = 0; i < INETFRAGS_HASHSZ; i++) INIT_HLIST_HEAD(&f->hash[i]); |
7eb95156d
|
57 58 59 60 |
rwlock_init(&f->lock); f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ (jiffies ^ (jiffies >> 6))); |
b24b8a247
|
61 62 |
setup_timer(&f->secret_timer, inet_frag_secret_rebuild, (unsigned long)f); |
3b4bc4a2b
|
63 |
f->secret_timer.expires = jiffies + f->secret_interval; |
321a3a99e
|
64 |
add_timer(&f->secret_timer); |
7eb95156d
|
65 66 |
} EXPORT_SYMBOL(inet_frags_init); |
e5a2bb842
|
67 68 69 |
void inet_frags_init_net(struct netns_frags *nf) { nf->nqueues = 0; |
6ddc08222
|
70 |
atomic_set(&nf->mem, 0); |
3140c25c8
|
71 |
INIT_LIST_HEAD(&nf->lru_list); |
e5a2bb842
|
72 73 |
} EXPORT_SYMBOL(inet_frags_init_net); |
7eb95156d
|
74 75 |
void inet_frags_fini(struct inet_frags *f) { |
321a3a99e
|
76 |
del_timer(&f->secret_timer); |
7eb95156d
|
77 78 |
} EXPORT_SYMBOL(inet_frags_fini); |
277e650dd
|
79 |
|
81566e832
|
80 81 82 |
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) { nf->low_thresh = 0; |
e8e16b706
|
83 84 |
local_bh_disable(); |
6b102865e
|
85 |
inet_frag_evictor(nf, f, true); |
e8e16b706
|
86 |
local_bh_enable(); |
81566e832
|
87 88 |
} EXPORT_SYMBOL(inet_frags_exit_net); |
277e650dd
|
89 90 91 92 93 |
static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) { write_lock(&f->lock); hlist_del(&fq->list); list_del(&fq->lru_list); |
e5a2bb842
|
94 |
fq->net->nqueues--; |
277e650dd
|
95 96 97 98 99 100 101 |
write_unlock(&f->lock); } void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) { if (del_timer(&fq->timer)) atomic_dec(&fq->refcnt); |
bc578a54f
|
102 |
if (!(fq->last_in & INET_FRAG_COMPLETE)) { |
277e650dd
|
103 104 |
fq_unlink(fq, f); atomic_dec(&fq->refcnt); |
bc578a54f
|
105 |
fq->last_in |= INET_FRAG_COMPLETE; |
277e650dd
|
106 107 |
} } |
277e650dd
|
108 |
EXPORT_SYMBOL(inet_frag_kill); |
1e4b82873
|
109 |
|
6ddc08222
|
110 111 |
static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, struct sk_buff *skb, int *work) |
1e4b82873
|
112 113 114 |
{ if (work) *work -= skb->truesize; |
6ddc08222
|
115 |
atomic_sub(skb->truesize, &nf->mem); |
1e4b82873
|
116 117 118 119 120 121 122 123 124 |
if (f->skb_free) f->skb_free(skb); kfree_skb(skb); } void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, int *work) { struct sk_buff *fp; |
6ddc08222
|
125 |
struct netns_frags *nf; |
1e4b82873
|
126 |
|
547b792ca
|
127 128 |
WARN_ON(!(q->last_in & INET_FRAG_COMPLETE)); WARN_ON(del_timer(&q->timer) != 0); |
1e4b82873
|
129 130 131 |
/* Release all fragment data. */ fp = q->fragments; |
6ddc08222
|
132 |
nf = q->net; |
1e4b82873
|
133 134 |
while (fp) { struct sk_buff *xp = fp->next; |
6ddc08222
|
135 |
frag_kfree_skb(nf, f, fp, work); |
1e4b82873
|
136 137 138 139 140 |
fp = xp; } if (work) *work -= f->qsize; |
6ddc08222
|
141 |
atomic_sub(f->qsize, &nf->mem); |
1e4b82873
|
142 |
|
c95477090
|
143 144 145 |
if (f->destructor) f->destructor(q); kfree(q); |
1e4b82873
|
146 147 148 |
} EXPORT_SYMBOL(inet_frag_destroy); |
8e7999c44
|
149 |
|
6b102865e
|
150 |
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) |
8e7999c44
|
151 152 153 |
{ struct inet_frag_queue *q; int work, evicted = 0; |
6b102865e
|
154 155 156 157 |
if (!force) { if (atomic_read(&nf->mem) <= nf->high_thresh) return 0; } |
e31e0bdc7
|
158 |
work = atomic_read(&nf->mem) - nf->low_thresh; |
8e7999c44
|
159 160 |
while (work > 0) { read_lock(&f->lock); |
3140c25c8
|
161 |
if (list_empty(&nf->lru_list)) { |
8e7999c44
|
162 163 164 |
read_unlock(&f->lock); break; } |
3140c25c8
|
165 |
q = list_first_entry(&nf->lru_list, |
8e7999c44
|
166 167 168 169 170 |
struct inet_frag_queue, lru_list); atomic_inc(&q->refcnt); read_unlock(&f->lock); spin_lock(&q->lock); |
bc578a54f
|
171 |
if (!(q->last_in & INET_FRAG_COMPLETE)) |
8e7999c44
|
172 173 174 175 176 177 178 179 180 181 182 |
inet_frag_kill(q, f); spin_unlock(&q->lock); if (atomic_dec_and_test(&q->refcnt)) inet_frag_destroy(q, f, &work); evicted++; } return evicted; } EXPORT_SYMBOL(inet_frag_evictor); |
2588fe1d7
|
183 |
|
ac18e7509
|
184 185 |
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, struct inet_frag_queue *qp_in, struct inet_frags *f, |
9a375803f
|
186 |
void *arg) |
2588fe1d7
|
187 188 189 190 191 |
{ struct inet_frag_queue *qp; #ifdef CONFIG_SMP struct hlist_node *n; #endif |
9a375803f
|
192 |
unsigned int hash; |
2588fe1d7
|
193 194 |
write_lock(&f->lock); |
9a375803f
|
195 196 197 198 199 200 |
/* * While we stayed w/o the lock other CPU could update * the rnd seed, so we need to re-calculate the hash * chain. Fortunatelly the qp_in can be used to get one. */ hash = f->hashfn(qp_in); |
2588fe1d7
|
201 202 203 204 205 206 |
#ifdef CONFIG_SMP /* With SMP race we have to recheck hash table, because * such entry could be created on other cpu, while we * promoted read lock to write lock. */ hlist_for_each_entry(qp, n, &f->hash[hash], list) { |
ac18e7509
|
207 |
if (qp->net == nf && f->match(qp, arg)) { |
2588fe1d7
|
208 209 |
atomic_inc(&qp->refcnt); write_unlock(&f->lock); |
bc578a54f
|
210 |
qp_in->last_in |= INET_FRAG_COMPLETE; |
2588fe1d7
|
211 212 213 214 215 216 |
inet_frag_put(qp_in, f); return qp; } } #endif qp = qp_in; |
b2fd5321d
|
217 |
if (!mod_timer(&qp->timer, jiffies + nf->timeout)) |
2588fe1d7
|
218 219 220 221 |
atomic_inc(&qp->refcnt); atomic_inc(&qp->refcnt); hlist_add_head(&qp->list, &f->hash[hash]); |
3140c25c8
|
222 |
list_add_tail(&qp->lru_list, &nf->lru_list); |
e5a2bb842
|
223 |
nf->nqueues++; |
2588fe1d7
|
224 225 226 |
write_unlock(&f->lock); return qp; } |
e521db9d7
|
227 |
|
ac18e7509
|
228 229 |
static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, struct inet_frags *f, void *arg) |
e521db9d7
|
230 231 232 233 234 235 |
{ struct inet_frag_queue *q; q = kzalloc(f->qsize, GFP_ATOMIC); if (q == NULL) return NULL; |
54db0cc2b
|
236 |
q->net = nf; |
c6fda2822
|
237 |
f->constructor(q, arg); |
6ddc08222
|
238 |
atomic_add(f->qsize, &nf->mem); |
e521db9d7
|
239 240 241 242 243 244 |
setup_timer(&q->timer, f->frag_expire, (unsigned long)q); spin_lock_init(&q->lock); atomic_set(&q->refcnt, 1); return q; } |
c6fda2822
|
245 |
|
ac18e7509
|
246 |
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, |
9a375803f
|
247 |
struct inet_frags *f, void *arg) |
c6fda2822
|
248 249 |
{ struct inet_frag_queue *q; |
ac18e7509
|
250 |
q = inet_frag_alloc(nf, f, arg); |
c6fda2822
|
251 252 |
if (q == NULL) return NULL; |
9a375803f
|
253 |
return inet_frag_intern(nf, q, f, arg); |
c6fda2822
|
254 |
} |
abd6523d1
|
255 |
|
ac18e7509
|
256 257 |
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, struct inet_frags *f, void *key, unsigned int hash) |
56bca31ff
|
258 |
__releases(&f->lock) |
abd6523d1
|
259 260 261 |
{ struct inet_frag_queue *q; struct hlist_node *n; |
abd6523d1
|
262 |
hlist_for_each_entry(q, n, &f->hash[hash], list) { |
ac18e7509
|
263 |
if (q->net == nf && f->match(q, key)) { |
abd6523d1
|
264 265 266 267 268 269 |
atomic_inc(&q->refcnt); read_unlock(&f->lock); return q; } } read_unlock(&f->lock); |
9a375803f
|
270 |
return inet_frag_create(nf, f, key); |
abd6523d1
|
271 272 |
} EXPORT_SYMBOL(inet_frag_find); |