Commit 0a8891a0a419d43ea06c8ded0849f0820c6a873b
Committed by
David S. Miller
1 parent
4665079cbb
Exists in
master
and in
7 other branches
[IPv6]: use container_of() macro in fib6_clean_node()
In ip6_fib.c, fib6_clean_node() casts a fib6_walker_t pointer to a fib6_cleaner_t pointer assuming a struct fib6_walker_t (field 'w') is the first field in struct fib6_walker_t. To prevent any future problems that may occur if one day a field is inadvertently inserted before the 'w' field in struct fib6_cleaner_t, (and to improve readability), this patch uses the container_of() macro. Signed-off-by: Benjamin Thery <benjamin.thery@bull.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
net/ipv6/ip6_fib.c
1 | /* | 1 | /* |
2 | * Linux INET6 implementation | 2 | * Linux INET6 implementation |
3 | * Forwarding Information Database | 3 | * Forwarding Information Database |
4 | * | 4 | * |
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $ | 8 | * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $ |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 12 | * as published by the Free Software Foundation; either version |
13 | * 2 of the License, or (at your option) any later version. | 13 | * 2 of the License, or (at your option) any later version. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * Changes: | 17 | * Changes: |
18 | * Yuji SEKIYA @USAGI: Support default route on router node; | 18 | * Yuji SEKIYA @USAGI: Support default route on router node; |
19 | * remove ip6_null_entry from the top of | 19 | * remove ip6_null_entry from the top of |
20 | * routing table. | 20 | * routing table. |
21 | * Ville Nuorvala: Fixed routing subtrees. | 21 | * Ville Nuorvala: Fixed routing subtrees. |
22 | */ | 22 | */ |
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | #include <linux/net.h> | 25 | #include <linux/net.h> |
26 | #include <linux/route.h> | 26 | #include <linux/route.h> |
27 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
28 | #include <linux/in6.h> | 28 | #include <linux/in6.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
31 | 31 | ||
32 | #ifdef CONFIG_PROC_FS | 32 | #ifdef CONFIG_PROC_FS |
33 | #include <linux/proc_fs.h> | 33 | #include <linux/proc_fs.h> |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | #include <net/ipv6.h> | 36 | #include <net/ipv6.h> |
37 | #include <net/ndisc.h> | 37 | #include <net/ndisc.h> |
38 | #include <net/addrconf.h> | 38 | #include <net/addrconf.h> |
39 | 39 | ||
40 | #include <net/ip6_fib.h> | 40 | #include <net/ip6_fib.h> |
41 | #include <net/ip6_route.h> | 41 | #include <net/ip6_route.h> |
42 | 42 | ||
43 | #define RT6_DEBUG 2 | 43 | #define RT6_DEBUG 2 |
44 | 44 | ||
45 | #if RT6_DEBUG >= 3 | 45 | #if RT6_DEBUG >= 3 |
46 | #define RT6_TRACE(x...) printk(KERN_DEBUG x) | 46 | #define RT6_TRACE(x...) printk(KERN_DEBUG x) |
47 | #else | 47 | #else |
48 | #define RT6_TRACE(x...) do { ; } while (0) | 48 | #define RT6_TRACE(x...) do { ; } while (0) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | struct rt6_statistics rt6_stats; | 51 | struct rt6_statistics rt6_stats; |
52 | 52 | ||
53 | static struct kmem_cache * fib6_node_kmem __read_mostly; | 53 | static struct kmem_cache * fib6_node_kmem __read_mostly; |
54 | 54 | ||
55 | enum fib_walk_state_t | 55 | enum fib_walk_state_t |
56 | { | 56 | { |
57 | #ifdef CONFIG_IPV6_SUBTREES | 57 | #ifdef CONFIG_IPV6_SUBTREES |
58 | FWS_S, | 58 | FWS_S, |
59 | #endif | 59 | #endif |
60 | FWS_L, | 60 | FWS_L, |
61 | FWS_R, | 61 | FWS_R, |
62 | FWS_C, | 62 | FWS_C, |
63 | FWS_U | 63 | FWS_U |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct fib6_cleaner_t | 66 | struct fib6_cleaner_t |
67 | { | 67 | { |
68 | struct fib6_walker_t w; | 68 | struct fib6_walker_t w; |
69 | int (*func)(struct rt6_info *, void *arg); | 69 | int (*func)(struct rt6_info *, void *arg); |
70 | void *arg; | 70 | void *arg; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static DEFINE_RWLOCK(fib6_walker_lock); | 73 | static DEFINE_RWLOCK(fib6_walker_lock); |
74 | 74 | ||
75 | #ifdef CONFIG_IPV6_SUBTREES | 75 | #ifdef CONFIG_IPV6_SUBTREES |
76 | #define FWS_INIT FWS_S | 76 | #define FWS_INIT FWS_S |
77 | #else | 77 | #else |
78 | #define FWS_INIT FWS_L | 78 | #define FWS_INIT FWS_L |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt); | 81 | static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt); |
82 | static struct rt6_info * fib6_find_prefix(struct fib6_node *fn); | 82 | static struct rt6_info * fib6_find_prefix(struct fib6_node *fn); |
83 | static struct fib6_node * fib6_repair_tree(struct fib6_node *fn); | 83 | static struct fib6_node * fib6_repair_tree(struct fib6_node *fn); |
84 | static int fib6_walk(struct fib6_walker_t *w); | 84 | static int fib6_walk(struct fib6_walker_t *w); |
85 | static int fib6_walk_continue(struct fib6_walker_t *w); | 85 | static int fib6_walk_continue(struct fib6_walker_t *w); |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * A routing update causes an increase of the serial number on the | 88 | * A routing update causes an increase of the serial number on the |
89 | * affected subtree. This allows for cached routes to be asynchronously | 89 | * affected subtree. This allows for cached routes to be asynchronously |
90 | * tested when modifications are made to the destination cache as a | 90 | * tested when modifications are made to the destination cache as a |
91 | * result of redirects, path MTU changes, etc. | 91 | * result of redirects, path MTU changes, etc. |
92 | */ | 92 | */ |
93 | 93 | ||
94 | static __u32 rt_sernum; | 94 | static __u32 rt_sernum; |
95 | 95 | ||
96 | static DEFINE_TIMER(ip6_fib_timer, fib6_run_gc, 0, 0); | 96 | static DEFINE_TIMER(ip6_fib_timer, fib6_run_gc, 0, 0); |
97 | 97 | ||
98 | static struct fib6_walker_t fib6_walker_list = { | 98 | static struct fib6_walker_t fib6_walker_list = { |
99 | .prev = &fib6_walker_list, | 99 | .prev = &fib6_walker_list, |
100 | .next = &fib6_walker_list, | 100 | .next = &fib6_walker_list, |
101 | }; | 101 | }; |
102 | 102 | ||
103 | #define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next) | 103 | #define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next) |
104 | 104 | ||
105 | static inline void fib6_walker_link(struct fib6_walker_t *w) | 105 | static inline void fib6_walker_link(struct fib6_walker_t *w) |
106 | { | 106 | { |
107 | write_lock_bh(&fib6_walker_lock); | 107 | write_lock_bh(&fib6_walker_lock); |
108 | w->next = fib6_walker_list.next; | 108 | w->next = fib6_walker_list.next; |
109 | w->prev = &fib6_walker_list; | 109 | w->prev = &fib6_walker_list; |
110 | w->next->prev = w; | 110 | w->next->prev = w; |
111 | w->prev->next = w; | 111 | w->prev->next = w; |
112 | write_unlock_bh(&fib6_walker_lock); | 112 | write_unlock_bh(&fib6_walker_lock); |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline void fib6_walker_unlink(struct fib6_walker_t *w) | 115 | static inline void fib6_walker_unlink(struct fib6_walker_t *w) |
116 | { | 116 | { |
117 | write_lock_bh(&fib6_walker_lock); | 117 | write_lock_bh(&fib6_walker_lock); |
118 | w->next->prev = w->prev; | 118 | w->next->prev = w->prev; |
119 | w->prev->next = w->next; | 119 | w->prev->next = w->next; |
120 | w->prev = w->next = w; | 120 | w->prev = w->next = w; |
121 | write_unlock_bh(&fib6_walker_lock); | 121 | write_unlock_bh(&fib6_walker_lock); |
122 | } | 122 | } |
123 | static __inline__ u32 fib6_new_sernum(void) | 123 | static __inline__ u32 fib6_new_sernum(void) |
124 | { | 124 | { |
125 | u32 n = ++rt_sernum; | 125 | u32 n = ++rt_sernum; |
126 | if ((__s32)n <= 0) | 126 | if ((__s32)n <= 0) |
127 | rt_sernum = n = 1; | 127 | rt_sernum = n = 1; |
128 | return n; | 128 | return n; |
129 | } | 129 | } |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * Auxiliary address test functions for the radix tree. | 132 | * Auxiliary address test functions for the radix tree. |
133 | * | 133 | * |
134 | * These assume a 32bit processor (although it will work on | 134 | * These assume a 32bit processor (although it will work on |
135 | * 64bit processors) | 135 | * 64bit processors) |
136 | */ | 136 | */ |
137 | 137 | ||
138 | /* | 138 | /* |
139 | * test bit | 139 | * test bit |
140 | */ | 140 | */ |
141 | 141 | ||
142 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) | 142 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) |
143 | { | 143 | { |
144 | __be32 *addr = token; | 144 | __be32 *addr = token; |
145 | 145 | ||
146 | return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; | 146 | return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; |
147 | } | 147 | } |
148 | 148 | ||
149 | static __inline__ struct fib6_node * node_alloc(void) | 149 | static __inline__ struct fib6_node * node_alloc(void) |
150 | { | 150 | { |
151 | struct fib6_node *fn; | 151 | struct fib6_node *fn; |
152 | 152 | ||
153 | fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC); | 153 | fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC); |
154 | 154 | ||
155 | return fn; | 155 | return fn; |
156 | } | 156 | } |
157 | 157 | ||
158 | static __inline__ void node_free(struct fib6_node * fn) | 158 | static __inline__ void node_free(struct fib6_node * fn) |
159 | { | 159 | { |
160 | kmem_cache_free(fib6_node_kmem, fn); | 160 | kmem_cache_free(fib6_node_kmem, fn); |
161 | } | 161 | } |
162 | 162 | ||
163 | static __inline__ void rt6_release(struct rt6_info *rt) | 163 | static __inline__ void rt6_release(struct rt6_info *rt) |
164 | { | 164 | { |
165 | if (atomic_dec_and_test(&rt->rt6i_ref)) | 165 | if (atomic_dec_and_test(&rt->rt6i_ref)) |
166 | dst_free(&rt->u.dst); | 166 | dst_free(&rt->u.dst); |
167 | } | 167 | } |
168 | 168 | ||
169 | static struct fib6_table fib6_main_tbl = { | 169 | static struct fib6_table fib6_main_tbl = { |
170 | .tb6_id = RT6_TABLE_MAIN, | 170 | .tb6_id = RT6_TABLE_MAIN, |
171 | .tb6_root = { | 171 | .tb6_root = { |
172 | .leaf = &ip6_null_entry, | 172 | .leaf = &ip6_null_entry, |
173 | .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO, | 173 | .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO, |
174 | }, | 174 | }, |
175 | }; | 175 | }; |
176 | 176 | ||
177 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 177 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
178 | #define FIB_TABLE_HASHSZ 256 | 178 | #define FIB_TABLE_HASHSZ 256 |
179 | #else | 179 | #else |
180 | #define FIB_TABLE_HASHSZ 1 | 180 | #define FIB_TABLE_HASHSZ 1 |
181 | #endif | 181 | #endif |
182 | static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ]; | 182 | static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ]; |
183 | 183 | ||
184 | static void fib6_link_table(struct fib6_table *tb) | 184 | static void fib6_link_table(struct fib6_table *tb) |
185 | { | 185 | { |
186 | unsigned int h; | 186 | unsigned int h; |
187 | 187 | ||
188 | /* | 188 | /* |
189 | * Initialize table lock at a single place to give lockdep a key, | 189 | * Initialize table lock at a single place to give lockdep a key, |
190 | * tables aren't visible prior to being linked to the list. | 190 | * tables aren't visible prior to being linked to the list. |
191 | */ | 191 | */ |
192 | rwlock_init(&tb->tb6_lock); | 192 | rwlock_init(&tb->tb6_lock); |
193 | 193 | ||
194 | h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1); | 194 | h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1); |
195 | 195 | ||
196 | /* | 196 | /* |
197 | * No protection necessary, this is the only list mutatation | 197 | * No protection necessary, this is the only list mutatation |
198 | * operation, tables never disappear once they exist. | 198 | * operation, tables never disappear once they exist. |
199 | */ | 199 | */ |
200 | hlist_add_head_rcu(&tb->tb6_hlist, &fib_table_hash[h]); | 200 | hlist_add_head_rcu(&tb->tb6_hlist, &fib_table_hash[h]); |
201 | } | 201 | } |
202 | 202 | ||
203 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 203 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
204 | static struct fib6_table fib6_local_tbl = { | 204 | static struct fib6_table fib6_local_tbl = { |
205 | .tb6_id = RT6_TABLE_LOCAL, | 205 | .tb6_id = RT6_TABLE_LOCAL, |
206 | .tb6_root = { | 206 | .tb6_root = { |
207 | .leaf = &ip6_null_entry, | 207 | .leaf = &ip6_null_entry, |
208 | .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO, | 208 | .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO, |
209 | }, | 209 | }, |
210 | }; | 210 | }; |
211 | 211 | ||
212 | static struct fib6_table *fib6_alloc_table(u32 id) | 212 | static struct fib6_table *fib6_alloc_table(u32 id) |
213 | { | 213 | { |
214 | struct fib6_table *table; | 214 | struct fib6_table *table; |
215 | 215 | ||
216 | table = kzalloc(sizeof(*table), GFP_ATOMIC); | 216 | table = kzalloc(sizeof(*table), GFP_ATOMIC); |
217 | if (table != NULL) { | 217 | if (table != NULL) { |
218 | table->tb6_id = id; | 218 | table->tb6_id = id; |
219 | table->tb6_root.leaf = &ip6_null_entry; | 219 | table->tb6_root.leaf = &ip6_null_entry; |
220 | table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; | 220 | table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; |
221 | } | 221 | } |
222 | 222 | ||
223 | return table; | 223 | return table; |
224 | } | 224 | } |
225 | 225 | ||
226 | struct fib6_table *fib6_new_table(u32 id) | 226 | struct fib6_table *fib6_new_table(u32 id) |
227 | { | 227 | { |
228 | struct fib6_table *tb; | 228 | struct fib6_table *tb; |
229 | 229 | ||
230 | if (id == 0) | 230 | if (id == 0) |
231 | id = RT6_TABLE_MAIN; | 231 | id = RT6_TABLE_MAIN; |
232 | tb = fib6_get_table(id); | 232 | tb = fib6_get_table(id); |
233 | if (tb) | 233 | if (tb) |
234 | return tb; | 234 | return tb; |
235 | 235 | ||
236 | tb = fib6_alloc_table(id); | 236 | tb = fib6_alloc_table(id); |
237 | if (tb != NULL) | 237 | if (tb != NULL) |
238 | fib6_link_table(tb); | 238 | fib6_link_table(tb); |
239 | 239 | ||
240 | return tb; | 240 | return tb; |
241 | } | 241 | } |
242 | 242 | ||
243 | struct fib6_table *fib6_get_table(u32 id) | 243 | struct fib6_table *fib6_get_table(u32 id) |
244 | { | 244 | { |
245 | struct fib6_table *tb; | 245 | struct fib6_table *tb; |
246 | struct hlist_node *node; | 246 | struct hlist_node *node; |
247 | unsigned int h; | 247 | unsigned int h; |
248 | 248 | ||
249 | if (id == 0) | 249 | if (id == 0) |
250 | id = RT6_TABLE_MAIN; | 250 | id = RT6_TABLE_MAIN; |
251 | h = id & (FIB_TABLE_HASHSZ - 1); | 251 | h = id & (FIB_TABLE_HASHSZ - 1); |
252 | rcu_read_lock(); | 252 | rcu_read_lock(); |
253 | hlist_for_each_entry_rcu(tb, node, &fib_table_hash[h], tb6_hlist) { | 253 | hlist_for_each_entry_rcu(tb, node, &fib_table_hash[h], tb6_hlist) { |
254 | if (tb->tb6_id == id) { | 254 | if (tb->tb6_id == id) { |
255 | rcu_read_unlock(); | 255 | rcu_read_unlock(); |
256 | return tb; | 256 | return tb; |
257 | } | 257 | } |
258 | } | 258 | } |
259 | rcu_read_unlock(); | 259 | rcu_read_unlock(); |
260 | 260 | ||
261 | return NULL; | 261 | return NULL; |
262 | } | 262 | } |
263 | 263 | ||
264 | static void __init fib6_tables_init(void) | 264 | static void __init fib6_tables_init(void) |
265 | { | 265 | { |
266 | fib6_link_table(&fib6_main_tbl); | 266 | fib6_link_table(&fib6_main_tbl); |
267 | fib6_link_table(&fib6_local_tbl); | 267 | fib6_link_table(&fib6_local_tbl); |
268 | } | 268 | } |
269 | 269 | ||
270 | #else | 270 | #else |
271 | 271 | ||
272 | struct fib6_table *fib6_new_table(u32 id) | 272 | struct fib6_table *fib6_new_table(u32 id) |
273 | { | 273 | { |
274 | return fib6_get_table(id); | 274 | return fib6_get_table(id); |
275 | } | 275 | } |
276 | 276 | ||
277 | struct fib6_table *fib6_get_table(u32 id) | 277 | struct fib6_table *fib6_get_table(u32 id) |
278 | { | 278 | { |
279 | return &fib6_main_tbl; | 279 | return &fib6_main_tbl; |
280 | } | 280 | } |
281 | 281 | ||
282 | struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, | 282 | struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, |
283 | pol_lookup_t lookup) | 283 | pol_lookup_t lookup) |
284 | { | 284 | { |
285 | return (struct dst_entry *) lookup(&fib6_main_tbl, fl, flags); | 285 | return (struct dst_entry *) lookup(&fib6_main_tbl, fl, flags); |
286 | } | 286 | } |
287 | 287 | ||
288 | static void __init fib6_tables_init(void) | 288 | static void __init fib6_tables_init(void) |
289 | { | 289 | { |
290 | fib6_link_table(&fib6_main_tbl); | 290 | fib6_link_table(&fib6_main_tbl); |
291 | } | 291 | } |
292 | 292 | ||
293 | #endif | 293 | #endif |
294 | 294 | ||
295 | static int fib6_dump_node(struct fib6_walker_t *w) | 295 | static int fib6_dump_node(struct fib6_walker_t *w) |
296 | { | 296 | { |
297 | int res; | 297 | int res; |
298 | struct rt6_info *rt; | 298 | struct rt6_info *rt; |
299 | 299 | ||
300 | for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { | 300 | for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { |
301 | res = rt6_dump_route(rt, w->args); | 301 | res = rt6_dump_route(rt, w->args); |
302 | if (res < 0) { | 302 | if (res < 0) { |
303 | /* Frame is full, suspend walking */ | 303 | /* Frame is full, suspend walking */ |
304 | w->leaf = rt; | 304 | w->leaf = rt; |
305 | return 1; | 305 | return 1; |
306 | } | 306 | } |
307 | BUG_TRAP(res!=0); | 307 | BUG_TRAP(res!=0); |
308 | } | 308 | } |
309 | w->leaf = NULL; | 309 | w->leaf = NULL; |
310 | return 0; | 310 | return 0; |
311 | } | 311 | } |
312 | 312 | ||
313 | static void fib6_dump_end(struct netlink_callback *cb) | 313 | static void fib6_dump_end(struct netlink_callback *cb) |
314 | { | 314 | { |
315 | struct fib6_walker_t *w = (void*)cb->args[2]; | 315 | struct fib6_walker_t *w = (void*)cb->args[2]; |
316 | 316 | ||
317 | if (w) { | 317 | if (w) { |
318 | cb->args[2] = 0; | 318 | cb->args[2] = 0; |
319 | kfree(w); | 319 | kfree(w); |
320 | } | 320 | } |
321 | cb->done = (void*)cb->args[3]; | 321 | cb->done = (void*)cb->args[3]; |
322 | cb->args[1] = 3; | 322 | cb->args[1] = 3; |
323 | } | 323 | } |
324 | 324 | ||
325 | static int fib6_dump_done(struct netlink_callback *cb) | 325 | static int fib6_dump_done(struct netlink_callback *cb) |
326 | { | 326 | { |
327 | fib6_dump_end(cb); | 327 | fib6_dump_end(cb); |
328 | return cb->done ? cb->done(cb) : 0; | 328 | return cb->done ? cb->done(cb) : 0; |
329 | } | 329 | } |
330 | 330 | ||
331 | static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, | 331 | static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, |
332 | struct netlink_callback *cb) | 332 | struct netlink_callback *cb) |
333 | { | 333 | { |
334 | struct fib6_walker_t *w; | 334 | struct fib6_walker_t *w; |
335 | int res; | 335 | int res; |
336 | 336 | ||
337 | w = (void *)cb->args[2]; | 337 | w = (void *)cb->args[2]; |
338 | w->root = &table->tb6_root; | 338 | w->root = &table->tb6_root; |
339 | 339 | ||
340 | if (cb->args[4] == 0) { | 340 | if (cb->args[4] == 0) { |
341 | read_lock_bh(&table->tb6_lock); | 341 | read_lock_bh(&table->tb6_lock); |
342 | res = fib6_walk(w); | 342 | res = fib6_walk(w); |
343 | read_unlock_bh(&table->tb6_lock); | 343 | read_unlock_bh(&table->tb6_lock); |
344 | if (res > 0) | 344 | if (res > 0) |
345 | cb->args[4] = 1; | 345 | cb->args[4] = 1; |
346 | } else { | 346 | } else { |
347 | read_lock_bh(&table->tb6_lock); | 347 | read_lock_bh(&table->tb6_lock); |
348 | res = fib6_walk_continue(w); | 348 | res = fib6_walk_continue(w); |
349 | read_unlock_bh(&table->tb6_lock); | 349 | read_unlock_bh(&table->tb6_lock); |
350 | if (res != 0) { | 350 | if (res != 0) { |
351 | if (res < 0) | 351 | if (res < 0) |
352 | fib6_walker_unlink(w); | 352 | fib6_walker_unlink(w); |
353 | goto end; | 353 | goto end; |
354 | } | 354 | } |
355 | fib6_walker_unlink(w); | 355 | fib6_walker_unlink(w); |
356 | cb->args[4] = 0; | 356 | cb->args[4] = 0; |
357 | } | 357 | } |
358 | end: | 358 | end: |
359 | return res; | 359 | return res; |
360 | } | 360 | } |
361 | 361 | ||
362 | static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | 362 | static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) |
363 | { | 363 | { |
364 | unsigned int h, s_h; | 364 | unsigned int h, s_h; |
365 | unsigned int e = 0, s_e; | 365 | unsigned int e = 0, s_e; |
366 | struct rt6_rtnl_dump_arg arg; | 366 | struct rt6_rtnl_dump_arg arg; |
367 | struct fib6_walker_t *w; | 367 | struct fib6_walker_t *w; |
368 | struct fib6_table *tb; | 368 | struct fib6_table *tb; |
369 | struct hlist_node *node; | 369 | struct hlist_node *node; |
370 | int res = 0; | 370 | int res = 0; |
371 | 371 | ||
372 | s_h = cb->args[0]; | 372 | s_h = cb->args[0]; |
373 | s_e = cb->args[1]; | 373 | s_e = cb->args[1]; |
374 | 374 | ||
375 | w = (void *)cb->args[2]; | 375 | w = (void *)cb->args[2]; |
376 | if (w == NULL) { | 376 | if (w == NULL) { |
377 | /* New dump: | 377 | /* New dump: |
378 | * | 378 | * |
379 | * 1. hook callback destructor. | 379 | * 1. hook callback destructor. |
380 | */ | 380 | */ |
381 | cb->args[3] = (long)cb->done; | 381 | cb->args[3] = (long)cb->done; |
382 | cb->done = fib6_dump_done; | 382 | cb->done = fib6_dump_done; |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * 2. allocate and initialize walker. | 385 | * 2. allocate and initialize walker. |
386 | */ | 386 | */ |
387 | w = kzalloc(sizeof(*w), GFP_ATOMIC); | 387 | w = kzalloc(sizeof(*w), GFP_ATOMIC); |
388 | if (w == NULL) | 388 | if (w == NULL) |
389 | return -ENOMEM; | 389 | return -ENOMEM; |
390 | w->func = fib6_dump_node; | 390 | w->func = fib6_dump_node; |
391 | cb->args[2] = (long)w; | 391 | cb->args[2] = (long)w; |
392 | } | 392 | } |
393 | 393 | ||
394 | arg.skb = skb; | 394 | arg.skb = skb; |
395 | arg.cb = cb; | 395 | arg.cb = cb; |
396 | w->args = &arg; | 396 | w->args = &arg; |
397 | 397 | ||
398 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { | 398 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { |
399 | e = 0; | 399 | e = 0; |
400 | hlist_for_each_entry(tb, node, &fib_table_hash[h], tb6_hlist) { | 400 | hlist_for_each_entry(tb, node, &fib_table_hash[h], tb6_hlist) { |
401 | if (e < s_e) | 401 | if (e < s_e) |
402 | goto next; | 402 | goto next; |
403 | res = fib6_dump_table(tb, skb, cb); | 403 | res = fib6_dump_table(tb, skb, cb); |
404 | if (res != 0) | 404 | if (res != 0) |
405 | goto out; | 405 | goto out; |
406 | next: | 406 | next: |
407 | e++; | 407 | e++; |
408 | } | 408 | } |
409 | } | 409 | } |
410 | out: | 410 | out: |
411 | cb->args[1] = e; | 411 | cb->args[1] = e; |
412 | cb->args[0] = h; | 412 | cb->args[0] = h; |
413 | 413 | ||
414 | res = res < 0 ? res : skb->len; | 414 | res = res < 0 ? res : skb->len; |
415 | if (res <= 0) | 415 | if (res <= 0) |
416 | fib6_dump_end(cb); | 416 | fib6_dump_end(cb); |
417 | return res; | 417 | return res; |
418 | } | 418 | } |
419 | 419 | ||
420 | /* | 420 | /* |
421 | * Routing Table | 421 | * Routing Table |
422 | * | 422 | * |
423 | * return the appropriate node for a routing tree "add" operation | 423 | * return the appropriate node for a routing tree "add" operation |
424 | * by either creating and inserting or by returning an existing | 424 | * by either creating and inserting or by returning an existing |
425 | * node. | 425 | * node. |
426 | */ | 426 | */ |
427 | 427 | ||
428 | static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, | 428 | static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, |
429 | int addrlen, int plen, | 429 | int addrlen, int plen, |
430 | int offset) | 430 | int offset) |
431 | { | 431 | { |
432 | struct fib6_node *fn, *in, *ln; | 432 | struct fib6_node *fn, *in, *ln; |
433 | struct fib6_node *pn = NULL; | 433 | struct fib6_node *pn = NULL; |
434 | struct rt6key *key; | 434 | struct rt6key *key; |
435 | int bit; | 435 | int bit; |
436 | __be32 dir = 0; | 436 | __be32 dir = 0; |
437 | __u32 sernum = fib6_new_sernum(); | 437 | __u32 sernum = fib6_new_sernum(); |
438 | 438 | ||
439 | RT6_TRACE("fib6_add_1\n"); | 439 | RT6_TRACE("fib6_add_1\n"); |
440 | 440 | ||
441 | /* insert node in tree */ | 441 | /* insert node in tree */ |
442 | 442 | ||
443 | fn = root; | 443 | fn = root; |
444 | 444 | ||
445 | do { | 445 | do { |
446 | key = (struct rt6key *)((u8 *)fn->leaf + offset); | 446 | key = (struct rt6key *)((u8 *)fn->leaf + offset); |
447 | 447 | ||
448 | /* | 448 | /* |
449 | * Prefix match | 449 | * Prefix match |
450 | */ | 450 | */ |
451 | if (plen < fn->fn_bit || | 451 | if (plen < fn->fn_bit || |
452 | !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) | 452 | !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) |
453 | goto insert_above; | 453 | goto insert_above; |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * Exact match ? | 456 | * Exact match ? |
457 | */ | 457 | */ |
458 | 458 | ||
459 | if (plen == fn->fn_bit) { | 459 | if (plen == fn->fn_bit) { |
460 | /* clean up an intermediate node */ | 460 | /* clean up an intermediate node */ |
461 | if ((fn->fn_flags & RTN_RTINFO) == 0) { | 461 | if ((fn->fn_flags & RTN_RTINFO) == 0) { |
462 | rt6_release(fn->leaf); | 462 | rt6_release(fn->leaf); |
463 | fn->leaf = NULL; | 463 | fn->leaf = NULL; |
464 | } | 464 | } |
465 | 465 | ||
466 | fn->fn_sernum = sernum; | 466 | fn->fn_sernum = sernum; |
467 | 467 | ||
468 | return fn; | 468 | return fn; |
469 | } | 469 | } |
470 | 470 | ||
471 | /* | 471 | /* |
472 | * We have more bits to go | 472 | * We have more bits to go |
473 | */ | 473 | */ |
474 | 474 | ||
475 | /* Try to walk down on tree. */ | 475 | /* Try to walk down on tree. */ |
476 | fn->fn_sernum = sernum; | 476 | fn->fn_sernum = sernum; |
477 | dir = addr_bit_set(addr, fn->fn_bit); | 477 | dir = addr_bit_set(addr, fn->fn_bit); |
478 | pn = fn; | 478 | pn = fn; |
479 | fn = dir ? fn->right: fn->left; | 479 | fn = dir ? fn->right: fn->left; |
480 | } while (fn); | 480 | } while (fn); |
481 | 481 | ||
482 | /* | 482 | /* |
483 | * We walked to the bottom of tree. | 483 | * We walked to the bottom of tree. |
484 | * Create new leaf node without children. | 484 | * Create new leaf node without children. |
485 | */ | 485 | */ |
486 | 486 | ||
487 | ln = node_alloc(); | 487 | ln = node_alloc(); |
488 | 488 | ||
489 | if (ln == NULL) | 489 | if (ln == NULL) |
490 | return NULL; | 490 | return NULL; |
491 | ln->fn_bit = plen; | 491 | ln->fn_bit = plen; |
492 | 492 | ||
493 | ln->parent = pn; | 493 | ln->parent = pn; |
494 | ln->fn_sernum = sernum; | 494 | ln->fn_sernum = sernum; |
495 | 495 | ||
496 | if (dir) | 496 | if (dir) |
497 | pn->right = ln; | 497 | pn->right = ln; |
498 | else | 498 | else |
499 | pn->left = ln; | 499 | pn->left = ln; |
500 | 500 | ||
501 | return ln; | 501 | return ln; |
502 | 502 | ||
503 | 503 | ||
504 | insert_above: | 504 | insert_above: |
505 | /* | 505 | /* |
506 | * split since we don't have a common prefix anymore or | 506 | * split since we don't have a common prefix anymore or |
507 | * we have a less significant route. | 507 | * we have a less significant route. |
508 | * we've to insert an intermediate node on the list | 508 | * we've to insert an intermediate node on the list |
509 | * this new node will point to the one we need to create | 509 | * this new node will point to the one we need to create |
510 | * and the current | 510 | * and the current |
511 | */ | 511 | */ |
512 | 512 | ||
513 | pn = fn->parent; | 513 | pn = fn->parent; |
514 | 514 | ||
515 | /* find 1st bit in difference between the 2 addrs. | 515 | /* find 1st bit in difference between the 2 addrs. |
516 | 516 | ||
517 | See comment in __ipv6_addr_diff: bit may be an invalid value, | 517 | See comment in __ipv6_addr_diff: bit may be an invalid value, |
518 | but if it is >= plen, the value is ignored in any case. | 518 | but if it is >= plen, the value is ignored in any case. |
519 | */ | 519 | */ |
520 | 520 | ||
521 | bit = __ipv6_addr_diff(addr, &key->addr, addrlen); | 521 | bit = __ipv6_addr_diff(addr, &key->addr, addrlen); |
522 | 522 | ||
523 | /* | 523 | /* |
524 | * (intermediate)[in] | 524 | * (intermediate)[in] |
525 | * / \ | 525 | * / \ |
526 | * (new leaf node)[ln] (old node)[fn] | 526 | * (new leaf node)[ln] (old node)[fn] |
527 | */ | 527 | */ |
528 | if (plen > bit) { | 528 | if (plen > bit) { |
529 | in = node_alloc(); | 529 | in = node_alloc(); |
530 | ln = node_alloc(); | 530 | ln = node_alloc(); |
531 | 531 | ||
532 | if (in == NULL || ln == NULL) { | 532 | if (in == NULL || ln == NULL) { |
533 | if (in) | 533 | if (in) |
534 | node_free(in); | 534 | node_free(in); |
535 | if (ln) | 535 | if (ln) |
536 | node_free(ln); | 536 | node_free(ln); |
537 | return NULL; | 537 | return NULL; |
538 | } | 538 | } |
539 | 539 | ||
540 | /* | 540 | /* |
541 | * new intermediate node. | 541 | * new intermediate node. |
542 | * RTN_RTINFO will | 542 | * RTN_RTINFO will |
543 | * be off since that an address that chooses one of | 543 | * be off since that an address that chooses one of |
544 | * the branches would not match less specific routes | 544 | * the branches would not match less specific routes |
545 | * in the other branch | 545 | * in the other branch |
546 | */ | 546 | */ |
547 | 547 | ||
548 | in->fn_bit = bit; | 548 | in->fn_bit = bit; |
549 | 549 | ||
550 | in->parent = pn; | 550 | in->parent = pn; |
551 | in->leaf = fn->leaf; | 551 | in->leaf = fn->leaf; |
552 | atomic_inc(&in->leaf->rt6i_ref); | 552 | atomic_inc(&in->leaf->rt6i_ref); |
553 | 553 | ||
554 | in->fn_sernum = sernum; | 554 | in->fn_sernum = sernum; |
555 | 555 | ||
556 | /* update parent pointer */ | 556 | /* update parent pointer */ |
557 | if (dir) | 557 | if (dir) |
558 | pn->right = in; | 558 | pn->right = in; |
559 | else | 559 | else |
560 | pn->left = in; | 560 | pn->left = in; |
561 | 561 | ||
562 | ln->fn_bit = plen; | 562 | ln->fn_bit = plen; |
563 | 563 | ||
564 | ln->parent = in; | 564 | ln->parent = in; |
565 | fn->parent = in; | 565 | fn->parent = in; |
566 | 566 | ||
567 | ln->fn_sernum = sernum; | 567 | ln->fn_sernum = sernum; |
568 | 568 | ||
569 | if (addr_bit_set(addr, bit)) { | 569 | if (addr_bit_set(addr, bit)) { |
570 | in->right = ln; | 570 | in->right = ln; |
571 | in->left = fn; | 571 | in->left = fn; |
572 | } else { | 572 | } else { |
573 | in->left = ln; | 573 | in->left = ln; |
574 | in->right = fn; | 574 | in->right = fn; |
575 | } | 575 | } |
576 | } else { /* plen <= bit */ | 576 | } else { /* plen <= bit */ |
577 | 577 | ||
578 | /* | 578 | /* |
579 | * (new leaf node)[ln] | 579 | * (new leaf node)[ln] |
580 | * / \ | 580 | * / \ |
581 | * (old node)[fn] NULL | 581 | * (old node)[fn] NULL |
582 | */ | 582 | */ |
583 | 583 | ||
584 | ln = node_alloc(); | 584 | ln = node_alloc(); |
585 | 585 | ||
586 | if (ln == NULL) | 586 | if (ln == NULL) |
587 | return NULL; | 587 | return NULL; |
588 | 588 | ||
589 | ln->fn_bit = plen; | 589 | ln->fn_bit = plen; |
590 | 590 | ||
591 | ln->parent = pn; | 591 | ln->parent = pn; |
592 | 592 | ||
593 | ln->fn_sernum = sernum; | 593 | ln->fn_sernum = sernum; |
594 | 594 | ||
595 | if (dir) | 595 | if (dir) |
596 | pn->right = ln; | 596 | pn->right = ln; |
597 | else | 597 | else |
598 | pn->left = ln; | 598 | pn->left = ln; |
599 | 599 | ||
600 | if (addr_bit_set(&key->addr, plen)) | 600 | if (addr_bit_set(&key->addr, plen)) |
601 | ln->right = fn; | 601 | ln->right = fn; |
602 | else | 602 | else |
603 | ln->left = fn; | 603 | ln->left = fn; |
604 | 604 | ||
605 | fn->parent = ln; | 605 | fn->parent = ln; |
606 | } | 606 | } |
607 | return ln; | 607 | return ln; |
608 | } | 608 | } |
609 | 609 | ||
610 | /* | 610 | /* |
611 | * Insert routing information in a node. | 611 | * Insert routing information in a node. |
612 | */ | 612 | */ |
613 | 613 | ||
614 | static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | 614 | static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, |
615 | struct nl_info *info) | 615 | struct nl_info *info) |
616 | { | 616 | { |
617 | struct rt6_info *iter = NULL; | 617 | struct rt6_info *iter = NULL; |
618 | struct rt6_info **ins; | 618 | struct rt6_info **ins; |
619 | 619 | ||
620 | ins = &fn->leaf; | 620 | ins = &fn->leaf; |
621 | 621 | ||
622 | for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { | 622 | for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { |
623 | /* | 623 | /* |
624 | * Search for duplicates | 624 | * Search for duplicates |
625 | */ | 625 | */ |
626 | 626 | ||
627 | if (iter->rt6i_metric == rt->rt6i_metric) { | 627 | if (iter->rt6i_metric == rt->rt6i_metric) { |
628 | /* | 628 | /* |
629 | * Same priority level | 629 | * Same priority level |
630 | */ | 630 | */ |
631 | 631 | ||
632 | if (iter->rt6i_dev == rt->rt6i_dev && | 632 | if (iter->rt6i_dev == rt->rt6i_dev && |
633 | iter->rt6i_idev == rt->rt6i_idev && | 633 | iter->rt6i_idev == rt->rt6i_idev && |
634 | ipv6_addr_equal(&iter->rt6i_gateway, | 634 | ipv6_addr_equal(&iter->rt6i_gateway, |
635 | &rt->rt6i_gateway)) { | 635 | &rt->rt6i_gateway)) { |
636 | if (!(iter->rt6i_flags&RTF_EXPIRES)) | 636 | if (!(iter->rt6i_flags&RTF_EXPIRES)) |
637 | return -EEXIST; | 637 | return -EEXIST; |
638 | iter->rt6i_expires = rt->rt6i_expires; | 638 | iter->rt6i_expires = rt->rt6i_expires; |
639 | if (!(rt->rt6i_flags&RTF_EXPIRES)) { | 639 | if (!(rt->rt6i_flags&RTF_EXPIRES)) { |
640 | iter->rt6i_flags &= ~RTF_EXPIRES; | 640 | iter->rt6i_flags &= ~RTF_EXPIRES; |
641 | iter->rt6i_expires = 0; | 641 | iter->rt6i_expires = 0; |
642 | } | 642 | } |
643 | return -EEXIST; | 643 | return -EEXIST; |
644 | } | 644 | } |
645 | } | 645 | } |
646 | 646 | ||
647 | if (iter->rt6i_metric > rt->rt6i_metric) | 647 | if (iter->rt6i_metric > rt->rt6i_metric) |
648 | break; | 648 | break; |
649 | 649 | ||
650 | ins = &iter->u.dst.rt6_next; | 650 | ins = &iter->u.dst.rt6_next; |
651 | } | 651 | } |
652 | 652 | ||
653 | /* Reset round-robin state, if necessary */ | 653 | /* Reset round-robin state, if necessary */ |
654 | if (ins == &fn->leaf) | 654 | if (ins == &fn->leaf) |
655 | fn->rr_ptr = NULL; | 655 | fn->rr_ptr = NULL; |
656 | 656 | ||
657 | /* | 657 | /* |
658 | * insert node | 658 | * insert node |
659 | */ | 659 | */ |
660 | 660 | ||
661 | rt->u.dst.rt6_next = iter; | 661 | rt->u.dst.rt6_next = iter; |
662 | *ins = rt; | 662 | *ins = rt; |
663 | rt->rt6i_node = fn; | 663 | rt->rt6i_node = fn; |
664 | atomic_inc(&rt->rt6i_ref); | 664 | atomic_inc(&rt->rt6i_ref); |
665 | inet6_rt_notify(RTM_NEWROUTE, rt, info); | 665 | inet6_rt_notify(RTM_NEWROUTE, rt, info); |
666 | rt6_stats.fib_rt_entries++; | 666 | rt6_stats.fib_rt_entries++; |
667 | 667 | ||
668 | if ((fn->fn_flags & RTN_RTINFO) == 0) { | 668 | if ((fn->fn_flags & RTN_RTINFO) == 0) { |
669 | rt6_stats.fib_route_nodes++; | 669 | rt6_stats.fib_route_nodes++; |
670 | fn->fn_flags |= RTN_RTINFO; | 670 | fn->fn_flags |= RTN_RTINFO; |
671 | } | 671 | } |
672 | 672 | ||
673 | return 0; | 673 | return 0; |
674 | } | 674 | } |
675 | 675 | ||
676 | static __inline__ void fib6_start_gc(struct rt6_info *rt) | 676 | static __inline__ void fib6_start_gc(struct rt6_info *rt) |
677 | { | 677 | { |
678 | if (ip6_fib_timer.expires == 0 && | 678 | if (ip6_fib_timer.expires == 0 && |
679 | (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) | 679 | (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) |
680 | mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval); | 680 | mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval); |
681 | } | 681 | } |
682 | 682 | ||
683 | void fib6_force_start_gc(void) | 683 | void fib6_force_start_gc(void) |
684 | { | 684 | { |
685 | if (ip6_fib_timer.expires == 0) | 685 | if (ip6_fib_timer.expires == 0) |
686 | mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval); | 686 | mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval); |
687 | } | 687 | } |
688 | 688 | ||
689 | /* | 689 | /* |
690 | * Add routing information to the routing tree. | 690 | * Add routing information to the routing tree. |
691 | * <destination addr>/<source addr> | 691 | * <destination addr>/<source addr> |
692 | * with source addr info in sub-trees | 692 | * with source addr info in sub-trees |
693 | */ | 693 | */ |
694 | 694 | ||
695 | int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) | 695 | int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) |
696 | { | 696 | { |
697 | struct fib6_node *fn, *pn = NULL; | 697 | struct fib6_node *fn, *pn = NULL; |
698 | int err = -ENOMEM; | 698 | int err = -ENOMEM; |
699 | 699 | ||
700 | fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), | 700 | fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), |
701 | rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst)); | 701 | rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst)); |
702 | 702 | ||
703 | if (fn == NULL) | 703 | if (fn == NULL) |
704 | goto out; | 704 | goto out; |
705 | 705 | ||
706 | pn = fn; | 706 | pn = fn; |
707 | 707 | ||
708 | #ifdef CONFIG_IPV6_SUBTREES | 708 | #ifdef CONFIG_IPV6_SUBTREES |
709 | if (rt->rt6i_src.plen) { | 709 | if (rt->rt6i_src.plen) { |
710 | struct fib6_node *sn; | 710 | struct fib6_node *sn; |
711 | 711 | ||
712 | if (fn->subtree == NULL) { | 712 | if (fn->subtree == NULL) { |
713 | struct fib6_node *sfn; | 713 | struct fib6_node *sfn; |
714 | 714 | ||
715 | /* | 715 | /* |
716 | * Create subtree. | 716 | * Create subtree. |
717 | * | 717 | * |
718 | * fn[main tree] | 718 | * fn[main tree] |
719 | * | | 719 | * | |
720 | * sfn[subtree root] | 720 | * sfn[subtree root] |
721 | * \ | 721 | * \ |
722 | * sn[new leaf node] | 722 | * sn[new leaf node] |
723 | */ | 723 | */ |
724 | 724 | ||
725 | /* Create subtree root node */ | 725 | /* Create subtree root node */ |
726 | sfn = node_alloc(); | 726 | sfn = node_alloc(); |
727 | if (sfn == NULL) | 727 | if (sfn == NULL) |
728 | goto st_failure; | 728 | goto st_failure; |
729 | 729 | ||
730 | sfn->leaf = &ip6_null_entry; | 730 | sfn->leaf = &ip6_null_entry; |
731 | atomic_inc(&ip6_null_entry.rt6i_ref); | 731 | atomic_inc(&ip6_null_entry.rt6i_ref); |
732 | sfn->fn_flags = RTN_ROOT; | 732 | sfn->fn_flags = RTN_ROOT; |
733 | sfn->fn_sernum = fib6_new_sernum(); | 733 | sfn->fn_sernum = fib6_new_sernum(); |
734 | 734 | ||
735 | /* Now add the first leaf node to new subtree */ | 735 | /* Now add the first leaf node to new subtree */ |
736 | 736 | ||
737 | sn = fib6_add_1(sfn, &rt->rt6i_src.addr, | 737 | sn = fib6_add_1(sfn, &rt->rt6i_src.addr, |
738 | sizeof(struct in6_addr), rt->rt6i_src.plen, | 738 | sizeof(struct in6_addr), rt->rt6i_src.plen, |
739 | offsetof(struct rt6_info, rt6i_src)); | 739 | offsetof(struct rt6_info, rt6i_src)); |
740 | 740 | ||
741 | if (sn == NULL) { | 741 | if (sn == NULL) { |
742 | /* If it is failed, discard just allocated | 742 | /* If it is failed, discard just allocated |
743 | root, and then (in st_failure) stale node | 743 | root, and then (in st_failure) stale node |
744 | in main tree. | 744 | in main tree. |
745 | */ | 745 | */ |
746 | node_free(sfn); | 746 | node_free(sfn); |
747 | goto st_failure; | 747 | goto st_failure; |
748 | } | 748 | } |
749 | 749 | ||
750 | /* Now link new subtree to main tree */ | 750 | /* Now link new subtree to main tree */ |
751 | sfn->parent = fn; | 751 | sfn->parent = fn; |
752 | fn->subtree = sfn; | 752 | fn->subtree = sfn; |
753 | } else { | 753 | } else { |
754 | sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, | 754 | sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, |
755 | sizeof(struct in6_addr), rt->rt6i_src.plen, | 755 | sizeof(struct in6_addr), rt->rt6i_src.plen, |
756 | offsetof(struct rt6_info, rt6i_src)); | 756 | offsetof(struct rt6_info, rt6i_src)); |
757 | 757 | ||
758 | if (sn == NULL) | 758 | if (sn == NULL) |
759 | goto st_failure; | 759 | goto st_failure; |
760 | } | 760 | } |
761 | 761 | ||
762 | if (fn->leaf == NULL) { | 762 | if (fn->leaf == NULL) { |
763 | fn->leaf = rt; | 763 | fn->leaf = rt; |
764 | atomic_inc(&rt->rt6i_ref); | 764 | atomic_inc(&rt->rt6i_ref); |
765 | } | 765 | } |
766 | fn = sn; | 766 | fn = sn; |
767 | } | 767 | } |
768 | #endif | 768 | #endif |
769 | 769 | ||
770 | err = fib6_add_rt2node(fn, rt, info); | 770 | err = fib6_add_rt2node(fn, rt, info); |
771 | 771 | ||
772 | if (err == 0) { | 772 | if (err == 0) { |
773 | fib6_start_gc(rt); | 773 | fib6_start_gc(rt); |
774 | if (!(rt->rt6i_flags&RTF_CACHE)) | 774 | if (!(rt->rt6i_flags&RTF_CACHE)) |
775 | fib6_prune_clones(pn, rt); | 775 | fib6_prune_clones(pn, rt); |
776 | } | 776 | } |
777 | 777 | ||
778 | out: | 778 | out: |
779 | if (err) { | 779 | if (err) { |
780 | #ifdef CONFIG_IPV6_SUBTREES | 780 | #ifdef CONFIG_IPV6_SUBTREES |
781 | /* | 781 | /* |
782 | * If fib6_add_1 has cleared the old leaf pointer in the | 782 | * If fib6_add_1 has cleared the old leaf pointer in the |
783 | * super-tree leaf node we have to find a new one for it. | 783 | * super-tree leaf node we have to find a new one for it. |
784 | */ | 784 | */ |
785 | if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { | 785 | if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { |
786 | pn->leaf = fib6_find_prefix(pn); | 786 | pn->leaf = fib6_find_prefix(pn); |
787 | #if RT6_DEBUG >= 2 | 787 | #if RT6_DEBUG >= 2 |
788 | if (!pn->leaf) { | 788 | if (!pn->leaf) { |
789 | BUG_TRAP(pn->leaf != NULL); | 789 | BUG_TRAP(pn->leaf != NULL); |
790 | pn->leaf = &ip6_null_entry; | 790 | pn->leaf = &ip6_null_entry; |
791 | } | 791 | } |
792 | #endif | 792 | #endif |
793 | atomic_inc(&pn->leaf->rt6i_ref); | 793 | atomic_inc(&pn->leaf->rt6i_ref); |
794 | } | 794 | } |
795 | #endif | 795 | #endif |
796 | dst_free(&rt->u.dst); | 796 | dst_free(&rt->u.dst); |
797 | } | 797 | } |
798 | return err; | 798 | return err; |
799 | 799 | ||
800 | #ifdef CONFIG_IPV6_SUBTREES | 800 | #ifdef CONFIG_IPV6_SUBTREES |
801 | /* Subtree creation failed, probably main tree node | 801 | /* Subtree creation failed, probably main tree node |
802 | is orphan. If it is, shoot it. | 802 | is orphan. If it is, shoot it. |
803 | */ | 803 | */ |
804 | st_failure: | 804 | st_failure: |
805 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) | 805 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) |
806 | fib6_repair_tree(fn); | 806 | fib6_repair_tree(fn); |
807 | dst_free(&rt->u.dst); | 807 | dst_free(&rt->u.dst); |
808 | return err; | 808 | return err; |
809 | #endif | 809 | #endif |
810 | } | 810 | } |
811 | 811 | ||
812 | /* | 812 | /* |
813 | * Routing tree lookup | 813 | * Routing tree lookup |
814 | * | 814 | * |
815 | */ | 815 | */ |
816 | 816 | ||
817 | struct lookup_args { | 817 | struct lookup_args { |
818 | int offset; /* key offset on rt6_info */ | 818 | int offset; /* key offset on rt6_info */ |
819 | struct in6_addr *addr; /* search key */ | 819 | struct in6_addr *addr; /* search key */ |
820 | }; | 820 | }; |
821 | 821 | ||
822 | static struct fib6_node * fib6_lookup_1(struct fib6_node *root, | 822 | static struct fib6_node * fib6_lookup_1(struct fib6_node *root, |
823 | struct lookup_args *args) | 823 | struct lookup_args *args) |
824 | { | 824 | { |
825 | struct fib6_node *fn; | 825 | struct fib6_node *fn; |
826 | __be32 dir; | 826 | __be32 dir; |
827 | 827 | ||
828 | if (unlikely(args->offset == 0)) | 828 | if (unlikely(args->offset == 0)) |
829 | return NULL; | 829 | return NULL; |
830 | 830 | ||
831 | /* | 831 | /* |
832 | * Descend on a tree | 832 | * Descend on a tree |
833 | */ | 833 | */ |
834 | 834 | ||
835 | fn = root; | 835 | fn = root; |
836 | 836 | ||
837 | for (;;) { | 837 | for (;;) { |
838 | struct fib6_node *next; | 838 | struct fib6_node *next; |
839 | 839 | ||
840 | dir = addr_bit_set(args->addr, fn->fn_bit); | 840 | dir = addr_bit_set(args->addr, fn->fn_bit); |
841 | 841 | ||
842 | next = dir ? fn->right : fn->left; | 842 | next = dir ? fn->right : fn->left; |
843 | 843 | ||
844 | if (next) { | 844 | if (next) { |
845 | fn = next; | 845 | fn = next; |
846 | continue; | 846 | continue; |
847 | } | 847 | } |
848 | 848 | ||
849 | break; | 849 | break; |
850 | } | 850 | } |
851 | 851 | ||
852 | while(fn) { | 852 | while(fn) { |
853 | if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) { | 853 | if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) { |
854 | struct rt6key *key; | 854 | struct rt6key *key; |
855 | 855 | ||
856 | key = (struct rt6key *) ((u8 *) fn->leaf + | 856 | key = (struct rt6key *) ((u8 *) fn->leaf + |
857 | args->offset); | 857 | args->offset); |
858 | 858 | ||
859 | if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { | 859 | if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { |
860 | #ifdef CONFIG_IPV6_SUBTREES | 860 | #ifdef CONFIG_IPV6_SUBTREES |
861 | if (fn->subtree) | 861 | if (fn->subtree) |
862 | fn = fib6_lookup_1(fn->subtree, args + 1); | 862 | fn = fib6_lookup_1(fn->subtree, args + 1); |
863 | #endif | 863 | #endif |
864 | if (!fn || fn->fn_flags & RTN_RTINFO) | 864 | if (!fn || fn->fn_flags & RTN_RTINFO) |
865 | return fn; | 865 | return fn; |
866 | } | 866 | } |
867 | } | 867 | } |
868 | 868 | ||
869 | if (fn->fn_flags & RTN_ROOT) | 869 | if (fn->fn_flags & RTN_ROOT) |
870 | break; | 870 | break; |
871 | 871 | ||
872 | fn = fn->parent; | 872 | fn = fn->parent; |
873 | } | 873 | } |
874 | 874 | ||
875 | return NULL; | 875 | return NULL; |
876 | } | 876 | } |
877 | 877 | ||
878 | struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr, | 878 | struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr, |
879 | struct in6_addr *saddr) | 879 | struct in6_addr *saddr) |
880 | { | 880 | { |
881 | struct fib6_node *fn; | 881 | struct fib6_node *fn; |
882 | struct lookup_args args[] = { | 882 | struct lookup_args args[] = { |
883 | { | 883 | { |
884 | .offset = offsetof(struct rt6_info, rt6i_dst), | 884 | .offset = offsetof(struct rt6_info, rt6i_dst), |
885 | .addr = daddr, | 885 | .addr = daddr, |
886 | }, | 886 | }, |
887 | #ifdef CONFIG_IPV6_SUBTREES | 887 | #ifdef CONFIG_IPV6_SUBTREES |
888 | { | 888 | { |
889 | .offset = offsetof(struct rt6_info, rt6i_src), | 889 | .offset = offsetof(struct rt6_info, rt6i_src), |
890 | .addr = saddr, | 890 | .addr = saddr, |
891 | }, | 891 | }, |
892 | #endif | 892 | #endif |
893 | { | 893 | { |
894 | .offset = 0, /* sentinel */ | 894 | .offset = 0, /* sentinel */ |
895 | } | 895 | } |
896 | }; | 896 | }; |
897 | 897 | ||
898 | fn = fib6_lookup_1(root, daddr ? args : args + 1); | 898 | fn = fib6_lookup_1(root, daddr ? args : args + 1); |
899 | 899 | ||
900 | if (fn == NULL || fn->fn_flags & RTN_TL_ROOT) | 900 | if (fn == NULL || fn->fn_flags & RTN_TL_ROOT) |
901 | fn = root; | 901 | fn = root; |
902 | 902 | ||
903 | return fn; | 903 | return fn; |
904 | } | 904 | } |
905 | 905 | ||
906 | /* | 906 | /* |
907 | * Get node with specified destination prefix (and source prefix, | 907 | * Get node with specified destination prefix (and source prefix, |
908 | * if subtrees are used) | 908 | * if subtrees are used) |
909 | */ | 909 | */ |
910 | 910 | ||
911 | 911 | ||
912 | static struct fib6_node * fib6_locate_1(struct fib6_node *root, | 912 | static struct fib6_node * fib6_locate_1(struct fib6_node *root, |
913 | struct in6_addr *addr, | 913 | struct in6_addr *addr, |
914 | int plen, int offset) | 914 | int plen, int offset) |
915 | { | 915 | { |
916 | struct fib6_node *fn; | 916 | struct fib6_node *fn; |
917 | 917 | ||
918 | for (fn = root; fn ; ) { | 918 | for (fn = root; fn ; ) { |
919 | struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset); | 919 | struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset); |
920 | 920 | ||
921 | /* | 921 | /* |
922 | * Prefix match | 922 | * Prefix match |
923 | */ | 923 | */ |
924 | if (plen < fn->fn_bit || | 924 | if (plen < fn->fn_bit || |
925 | !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) | 925 | !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) |
926 | return NULL; | 926 | return NULL; |
927 | 927 | ||
928 | if (plen == fn->fn_bit) | 928 | if (plen == fn->fn_bit) |
929 | return fn; | 929 | return fn; |
930 | 930 | ||
931 | /* | 931 | /* |
932 | * We have more bits to go | 932 | * We have more bits to go |
933 | */ | 933 | */ |
934 | if (addr_bit_set(addr, fn->fn_bit)) | 934 | if (addr_bit_set(addr, fn->fn_bit)) |
935 | fn = fn->right; | 935 | fn = fn->right; |
936 | else | 936 | else |
937 | fn = fn->left; | 937 | fn = fn->left; |
938 | } | 938 | } |
939 | return NULL; | 939 | return NULL; |
940 | } | 940 | } |
941 | 941 | ||
942 | struct fib6_node * fib6_locate(struct fib6_node *root, | 942 | struct fib6_node * fib6_locate(struct fib6_node *root, |
943 | struct in6_addr *daddr, int dst_len, | 943 | struct in6_addr *daddr, int dst_len, |
944 | struct in6_addr *saddr, int src_len) | 944 | struct in6_addr *saddr, int src_len) |
945 | { | 945 | { |
946 | struct fib6_node *fn; | 946 | struct fib6_node *fn; |
947 | 947 | ||
948 | fn = fib6_locate_1(root, daddr, dst_len, | 948 | fn = fib6_locate_1(root, daddr, dst_len, |
949 | offsetof(struct rt6_info, rt6i_dst)); | 949 | offsetof(struct rt6_info, rt6i_dst)); |
950 | 950 | ||
951 | #ifdef CONFIG_IPV6_SUBTREES | 951 | #ifdef CONFIG_IPV6_SUBTREES |
952 | if (src_len) { | 952 | if (src_len) { |
953 | BUG_TRAP(saddr!=NULL); | 953 | BUG_TRAP(saddr!=NULL); |
954 | if (fn && fn->subtree) | 954 | if (fn && fn->subtree) |
955 | fn = fib6_locate_1(fn->subtree, saddr, src_len, | 955 | fn = fib6_locate_1(fn->subtree, saddr, src_len, |
956 | offsetof(struct rt6_info, rt6i_src)); | 956 | offsetof(struct rt6_info, rt6i_src)); |
957 | } | 957 | } |
958 | #endif | 958 | #endif |
959 | 959 | ||
960 | if (fn && fn->fn_flags&RTN_RTINFO) | 960 | if (fn && fn->fn_flags&RTN_RTINFO) |
961 | return fn; | 961 | return fn; |
962 | 962 | ||
963 | return NULL; | 963 | return NULL; |
964 | } | 964 | } |
965 | 965 | ||
966 | 966 | ||
967 | /* | 967 | /* |
968 | * Deletion | 968 | * Deletion |
969 | * | 969 | * |
970 | */ | 970 | */ |
971 | 971 | ||
972 | static struct rt6_info * fib6_find_prefix(struct fib6_node *fn) | 972 | static struct rt6_info * fib6_find_prefix(struct fib6_node *fn) |
973 | { | 973 | { |
974 | if (fn->fn_flags&RTN_ROOT) | 974 | if (fn->fn_flags&RTN_ROOT) |
975 | return &ip6_null_entry; | 975 | return &ip6_null_entry; |
976 | 976 | ||
977 | while(fn) { | 977 | while(fn) { |
978 | if(fn->left) | 978 | if(fn->left) |
979 | return fn->left->leaf; | 979 | return fn->left->leaf; |
980 | 980 | ||
981 | if(fn->right) | 981 | if(fn->right) |
982 | return fn->right->leaf; | 982 | return fn->right->leaf; |
983 | 983 | ||
984 | fn = FIB6_SUBTREE(fn); | 984 | fn = FIB6_SUBTREE(fn); |
985 | } | 985 | } |
986 | return NULL; | 986 | return NULL; |
987 | } | 987 | } |
988 | 988 | ||
989 | /* | 989 | /* |
990 | * Called to trim the tree of intermediate nodes when possible. "fn" | 990 | * Called to trim the tree of intermediate nodes when possible. "fn" |
991 | * is the node we want to try and remove. | 991 | * is the node we want to try and remove. |
992 | */ | 992 | */ |
993 | 993 | ||
994 | static struct fib6_node * fib6_repair_tree(struct fib6_node *fn) | 994 | static struct fib6_node * fib6_repair_tree(struct fib6_node *fn) |
995 | { | 995 | { |
996 | int children; | 996 | int children; |
997 | int nstate; | 997 | int nstate; |
998 | struct fib6_node *child, *pn; | 998 | struct fib6_node *child, *pn; |
999 | struct fib6_walker_t *w; | 999 | struct fib6_walker_t *w; |
1000 | int iter = 0; | 1000 | int iter = 0; |
1001 | 1001 | ||
1002 | for (;;) { | 1002 | for (;;) { |
1003 | RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); | 1003 | RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); |
1004 | iter++; | 1004 | iter++; |
1005 | 1005 | ||
1006 | BUG_TRAP(!(fn->fn_flags&RTN_RTINFO)); | 1006 | BUG_TRAP(!(fn->fn_flags&RTN_RTINFO)); |
1007 | BUG_TRAP(!(fn->fn_flags&RTN_TL_ROOT)); | 1007 | BUG_TRAP(!(fn->fn_flags&RTN_TL_ROOT)); |
1008 | BUG_TRAP(fn->leaf==NULL); | 1008 | BUG_TRAP(fn->leaf==NULL); |
1009 | 1009 | ||
1010 | children = 0; | 1010 | children = 0; |
1011 | child = NULL; | 1011 | child = NULL; |
1012 | if (fn->right) child = fn->right, children |= 1; | 1012 | if (fn->right) child = fn->right, children |= 1; |
1013 | if (fn->left) child = fn->left, children |= 2; | 1013 | if (fn->left) child = fn->left, children |= 2; |
1014 | 1014 | ||
1015 | if (children == 3 || FIB6_SUBTREE(fn) | 1015 | if (children == 3 || FIB6_SUBTREE(fn) |
1016 | #ifdef CONFIG_IPV6_SUBTREES | 1016 | #ifdef CONFIG_IPV6_SUBTREES |
1017 | /* Subtree root (i.e. fn) may have one child */ | 1017 | /* Subtree root (i.e. fn) may have one child */ |
1018 | || (children && fn->fn_flags&RTN_ROOT) | 1018 | || (children && fn->fn_flags&RTN_ROOT) |
1019 | #endif | 1019 | #endif |
1020 | ) { | 1020 | ) { |
1021 | fn->leaf = fib6_find_prefix(fn); | 1021 | fn->leaf = fib6_find_prefix(fn); |
1022 | #if RT6_DEBUG >= 2 | 1022 | #if RT6_DEBUG >= 2 |
1023 | if (fn->leaf==NULL) { | 1023 | if (fn->leaf==NULL) { |
1024 | BUG_TRAP(fn->leaf); | 1024 | BUG_TRAP(fn->leaf); |
1025 | fn->leaf = &ip6_null_entry; | 1025 | fn->leaf = &ip6_null_entry; |
1026 | } | 1026 | } |
1027 | #endif | 1027 | #endif |
1028 | atomic_inc(&fn->leaf->rt6i_ref); | 1028 | atomic_inc(&fn->leaf->rt6i_ref); |
1029 | return fn->parent; | 1029 | return fn->parent; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | pn = fn->parent; | 1032 | pn = fn->parent; |
1033 | #ifdef CONFIG_IPV6_SUBTREES | 1033 | #ifdef CONFIG_IPV6_SUBTREES |
1034 | if (FIB6_SUBTREE(pn) == fn) { | 1034 | if (FIB6_SUBTREE(pn) == fn) { |
1035 | BUG_TRAP(fn->fn_flags&RTN_ROOT); | 1035 | BUG_TRAP(fn->fn_flags&RTN_ROOT); |
1036 | FIB6_SUBTREE(pn) = NULL; | 1036 | FIB6_SUBTREE(pn) = NULL; |
1037 | nstate = FWS_L; | 1037 | nstate = FWS_L; |
1038 | } else { | 1038 | } else { |
1039 | BUG_TRAP(!(fn->fn_flags&RTN_ROOT)); | 1039 | BUG_TRAP(!(fn->fn_flags&RTN_ROOT)); |
1040 | #endif | 1040 | #endif |
1041 | if (pn->right == fn) pn->right = child; | 1041 | if (pn->right == fn) pn->right = child; |
1042 | else if (pn->left == fn) pn->left = child; | 1042 | else if (pn->left == fn) pn->left = child; |
1043 | #if RT6_DEBUG >= 2 | 1043 | #if RT6_DEBUG >= 2 |
1044 | else BUG_TRAP(0); | 1044 | else BUG_TRAP(0); |
1045 | #endif | 1045 | #endif |
1046 | if (child) | 1046 | if (child) |
1047 | child->parent = pn; | 1047 | child->parent = pn; |
1048 | nstate = FWS_R; | 1048 | nstate = FWS_R; |
1049 | #ifdef CONFIG_IPV6_SUBTREES | 1049 | #ifdef CONFIG_IPV6_SUBTREES |
1050 | } | 1050 | } |
1051 | #endif | 1051 | #endif |
1052 | 1052 | ||
1053 | read_lock(&fib6_walker_lock); | 1053 | read_lock(&fib6_walker_lock); |
1054 | FOR_WALKERS(w) { | 1054 | FOR_WALKERS(w) { |
1055 | if (child == NULL) { | 1055 | if (child == NULL) { |
1056 | if (w->root == fn) { | 1056 | if (w->root == fn) { |
1057 | w->root = w->node = NULL; | 1057 | w->root = w->node = NULL; |
1058 | RT6_TRACE("W %p adjusted by delroot 1\n", w); | 1058 | RT6_TRACE("W %p adjusted by delroot 1\n", w); |
1059 | } else if (w->node == fn) { | 1059 | } else if (w->node == fn) { |
1060 | RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate); | 1060 | RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate); |
1061 | w->node = pn; | 1061 | w->node = pn; |
1062 | w->state = nstate; | 1062 | w->state = nstate; |
1063 | } | 1063 | } |
1064 | } else { | 1064 | } else { |
1065 | if (w->root == fn) { | 1065 | if (w->root == fn) { |
1066 | w->root = child; | 1066 | w->root = child; |
1067 | RT6_TRACE("W %p adjusted by delroot 2\n", w); | 1067 | RT6_TRACE("W %p adjusted by delroot 2\n", w); |
1068 | } | 1068 | } |
1069 | if (w->node == fn) { | 1069 | if (w->node == fn) { |
1070 | w->node = child; | 1070 | w->node = child; |
1071 | if (children&2) { | 1071 | if (children&2) { |
1072 | RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); | 1072 | RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); |
1073 | w->state = w->state>=FWS_R ? FWS_U : FWS_INIT; | 1073 | w->state = w->state>=FWS_R ? FWS_U : FWS_INIT; |
1074 | } else { | 1074 | } else { |
1075 | RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); | 1075 | RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); |
1076 | w->state = w->state>=FWS_C ? FWS_U : FWS_INIT; | 1076 | w->state = w->state>=FWS_C ? FWS_U : FWS_INIT; |
1077 | } | 1077 | } |
1078 | } | 1078 | } |
1079 | } | 1079 | } |
1080 | } | 1080 | } |
1081 | read_unlock(&fib6_walker_lock); | 1081 | read_unlock(&fib6_walker_lock); |
1082 | 1082 | ||
1083 | node_free(fn); | 1083 | node_free(fn); |
1084 | if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn)) | 1084 | if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn)) |
1085 | return pn; | 1085 | return pn; |
1086 | 1086 | ||
1087 | rt6_release(pn->leaf); | 1087 | rt6_release(pn->leaf); |
1088 | pn->leaf = NULL; | 1088 | pn->leaf = NULL; |
1089 | fn = pn; | 1089 | fn = pn; |
1090 | } | 1090 | } |
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | 1093 | static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, |
1094 | struct nl_info *info) | 1094 | struct nl_info *info) |
1095 | { | 1095 | { |
1096 | struct fib6_walker_t *w; | 1096 | struct fib6_walker_t *w; |
1097 | struct rt6_info *rt = *rtp; | 1097 | struct rt6_info *rt = *rtp; |
1098 | 1098 | ||
1099 | RT6_TRACE("fib6_del_route\n"); | 1099 | RT6_TRACE("fib6_del_route\n"); |
1100 | 1100 | ||
1101 | /* Unlink it */ | 1101 | /* Unlink it */ |
1102 | *rtp = rt->u.dst.rt6_next; | 1102 | *rtp = rt->u.dst.rt6_next; |
1103 | rt->rt6i_node = NULL; | 1103 | rt->rt6i_node = NULL; |
1104 | rt6_stats.fib_rt_entries--; | 1104 | rt6_stats.fib_rt_entries--; |
1105 | rt6_stats.fib_discarded_routes++; | 1105 | rt6_stats.fib_discarded_routes++; |
1106 | 1106 | ||
1107 | /* Reset round-robin state, if necessary */ | 1107 | /* Reset round-robin state, if necessary */ |
1108 | if (fn->rr_ptr == rt) | 1108 | if (fn->rr_ptr == rt) |
1109 | fn->rr_ptr = NULL; | 1109 | fn->rr_ptr = NULL; |
1110 | 1110 | ||
1111 | /* Adjust walkers */ | 1111 | /* Adjust walkers */ |
1112 | read_lock(&fib6_walker_lock); | 1112 | read_lock(&fib6_walker_lock); |
1113 | FOR_WALKERS(w) { | 1113 | FOR_WALKERS(w) { |
1114 | if (w->state == FWS_C && w->leaf == rt) { | 1114 | if (w->state == FWS_C && w->leaf == rt) { |
1115 | RT6_TRACE("walker %p adjusted by delroute\n", w); | 1115 | RT6_TRACE("walker %p adjusted by delroute\n", w); |
1116 | w->leaf = rt->u.dst.rt6_next; | 1116 | w->leaf = rt->u.dst.rt6_next; |
1117 | if (w->leaf == NULL) | 1117 | if (w->leaf == NULL) |
1118 | w->state = FWS_U; | 1118 | w->state = FWS_U; |
1119 | } | 1119 | } |
1120 | } | 1120 | } |
1121 | read_unlock(&fib6_walker_lock); | 1121 | read_unlock(&fib6_walker_lock); |
1122 | 1122 | ||
1123 | rt->u.dst.rt6_next = NULL; | 1123 | rt->u.dst.rt6_next = NULL; |
1124 | 1124 | ||
1125 | if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT) | 1125 | if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT) |
1126 | fn->leaf = &ip6_null_entry; | 1126 | fn->leaf = &ip6_null_entry; |
1127 | 1127 | ||
1128 | /* If it was last route, expunge its radix tree node */ | 1128 | /* If it was last route, expunge its radix tree node */ |
1129 | if (fn->leaf == NULL) { | 1129 | if (fn->leaf == NULL) { |
1130 | fn->fn_flags &= ~RTN_RTINFO; | 1130 | fn->fn_flags &= ~RTN_RTINFO; |
1131 | rt6_stats.fib_route_nodes--; | 1131 | rt6_stats.fib_route_nodes--; |
1132 | fn = fib6_repair_tree(fn); | 1132 | fn = fib6_repair_tree(fn); |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | if (atomic_read(&rt->rt6i_ref) != 1) { | 1135 | if (atomic_read(&rt->rt6i_ref) != 1) { |
1136 | /* This route is used as dummy address holder in some split | 1136 | /* This route is used as dummy address holder in some split |
1137 | * nodes. It is not leaked, but it still holds other resources, | 1137 | * nodes. It is not leaked, but it still holds other resources, |
1138 | * which must be released in time. So, scan ascendant nodes | 1138 | * which must be released in time. So, scan ascendant nodes |
1139 | * and replace dummy references to this route with references | 1139 | * and replace dummy references to this route with references |
1140 | * to still alive ones. | 1140 | * to still alive ones. |
1141 | */ | 1141 | */ |
1142 | while (fn) { | 1142 | while (fn) { |
1143 | if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) { | 1143 | if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) { |
1144 | fn->leaf = fib6_find_prefix(fn); | 1144 | fn->leaf = fib6_find_prefix(fn); |
1145 | atomic_inc(&fn->leaf->rt6i_ref); | 1145 | atomic_inc(&fn->leaf->rt6i_ref); |
1146 | rt6_release(rt); | 1146 | rt6_release(rt); |
1147 | } | 1147 | } |
1148 | fn = fn->parent; | 1148 | fn = fn->parent; |
1149 | } | 1149 | } |
1150 | /* No more references are possible at this point. */ | 1150 | /* No more references are possible at this point. */ |
1151 | if (atomic_read(&rt->rt6i_ref) != 1) BUG(); | 1151 | if (atomic_read(&rt->rt6i_ref) != 1) BUG(); |
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | inet6_rt_notify(RTM_DELROUTE, rt, info); | 1154 | inet6_rt_notify(RTM_DELROUTE, rt, info); |
1155 | rt6_release(rt); | 1155 | rt6_release(rt); |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | int fib6_del(struct rt6_info *rt, struct nl_info *info) | 1158 | int fib6_del(struct rt6_info *rt, struct nl_info *info) |
1159 | { | 1159 | { |
1160 | struct fib6_node *fn = rt->rt6i_node; | 1160 | struct fib6_node *fn = rt->rt6i_node; |
1161 | struct rt6_info **rtp; | 1161 | struct rt6_info **rtp; |
1162 | 1162 | ||
1163 | #if RT6_DEBUG >= 2 | 1163 | #if RT6_DEBUG >= 2 |
1164 | if (rt->u.dst.obsolete>0) { | 1164 | if (rt->u.dst.obsolete>0) { |
1165 | BUG_TRAP(fn==NULL); | 1165 | BUG_TRAP(fn==NULL); |
1166 | return -ENOENT; | 1166 | return -ENOENT; |
1167 | } | 1167 | } |
1168 | #endif | 1168 | #endif |
1169 | if (fn == NULL || rt == &ip6_null_entry) | 1169 | if (fn == NULL || rt == &ip6_null_entry) |
1170 | return -ENOENT; | 1170 | return -ENOENT; |
1171 | 1171 | ||
1172 | BUG_TRAP(fn->fn_flags&RTN_RTINFO); | 1172 | BUG_TRAP(fn->fn_flags&RTN_RTINFO); |
1173 | 1173 | ||
1174 | if (!(rt->rt6i_flags&RTF_CACHE)) { | 1174 | if (!(rt->rt6i_flags&RTF_CACHE)) { |
1175 | struct fib6_node *pn = fn; | 1175 | struct fib6_node *pn = fn; |
1176 | #ifdef CONFIG_IPV6_SUBTREES | 1176 | #ifdef CONFIG_IPV6_SUBTREES |
1177 | /* clones of this route might be in another subtree */ | 1177 | /* clones of this route might be in another subtree */ |
1178 | if (rt->rt6i_src.plen) { | 1178 | if (rt->rt6i_src.plen) { |
1179 | while (!(pn->fn_flags&RTN_ROOT)) | 1179 | while (!(pn->fn_flags&RTN_ROOT)) |
1180 | pn = pn->parent; | 1180 | pn = pn->parent; |
1181 | pn = pn->parent; | 1181 | pn = pn->parent; |
1182 | } | 1182 | } |
1183 | #endif | 1183 | #endif |
1184 | fib6_prune_clones(pn, rt); | 1184 | fib6_prune_clones(pn, rt); |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | /* | 1187 | /* |
1188 | * Walk the leaf entries looking for ourself | 1188 | * Walk the leaf entries looking for ourself |
1189 | */ | 1189 | */ |
1190 | 1190 | ||
1191 | for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) { | 1191 | for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) { |
1192 | if (*rtp == rt) { | 1192 | if (*rtp == rt) { |
1193 | fib6_del_route(fn, rtp, info); | 1193 | fib6_del_route(fn, rtp, info); |
1194 | return 0; | 1194 | return 0; |
1195 | } | 1195 | } |
1196 | } | 1196 | } |
1197 | return -ENOENT; | 1197 | return -ENOENT; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | /* | 1200 | /* |
1201 | * Tree traversal function. | 1201 | * Tree traversal function. |
1202 | * | 1202 | * |
1203 | * Certainly, it is not interrupt safe. | 1203 | * Certainly, it is not interrupt safe. |
1204 | * However, it is internally reenterable wrt itself and fib6_add/fib6_del. | 1204 | * However, it is internally reenterable wrt itself and fib6_add/fib6_del. |
1205 | * It means, that we can modify tree during walking | 1205 | * It means, that we can modify tree during walking |
1206 | * and use this function for garbage collection, clone pruning, | 1206 | * and use this function for garbage collection, clone pruning, |
1207 | * cleaning tree when a device goes down etc. etc. | 1207 | * cleaning tree when a device goes down etc. etc. |
1208 | * | 1208 | * |
1209 | * It guarantees that every node will be traversed, | 1209 | * It guarantees that every node will be traversed, |
1210 | * and that it will be traversed only once. | 1210 | * and that it will be traversed only once. |
1211 | * | 1211 | * |
1212 | * Callback function w->func may return: | 1212 | * Callback function w->func may return: |
1213 | * 0 -> continue walking. | 1213 | * 0 -> continue walking. |
1214 | * positive value -> walking is suspended (used by tree dumps, | 1214 | * positive value -> walking is suspended (used by tree dumps, |
1215 | * and probably by gc, if it will be split to several slices) | 1215 | * and probably by gc, if it will be split to several slices) |
1216 | * negative value -> terminate walking. | 1216 | * negative value -> terminate walking. |
1217 | * | 1217 | * |
1218 | * The function itself returns: | 1218 | * The function itself returns: |
1219 | * 0 -> walk is complete. | 1219 | * 0 -> walk is complete. |
1220 | * >0 -> walk is incomplete (i.e. suspended) | 1220 | * >0 -> walk is incomplete (i.e. suspended) |
1221 | * <0 -> walk is terminated by an error. | 1221 | * <0 -> walk is terminated by an error. |
1222 | */ | 1222 | */ |
1223 | 1223 | ||
1224 | static int fib6_walk_continue(struct fib6_walker_t *w) | 1224 | static int fib6_walk_continue(struct fib6_walker_t *w) |
1225 | { | 1225 | { |
1226 | struct fib6_node *fn, *pn; | 1226 | struct fib6_node *fn, *pn; |
1227 | 1227 | ||
1228 | for (;;) { | 1228 | for (;;) { |
1229 | fn = w->node; | 1229 | fn = w->node; |
1230 | if (fn == NULL) | 1230 | if (fn == NULL) |
1231 | return 0; | 1231 | return 0; |
1232 | 1232 | ||
1233 | if (w->prune && fn != w->root && | 1233 | if (w->prune && fn != w->root && |
1234 | fn->fn_flags&RTN_RTINFO && w->state < FWS_C) { | 1234 | fn->fn_flags&RTN_RTINFO && w->state < FWS_C) { |
1235 | w->state = FWS_C; | 1235 | w->state = FWS_C; |
1236 | w->leaf = fn->leaf; | 1236 | w->leaf = fn->leaf; |
1237 | } | 1237 | } |
1238 | switch (w->state) { | 1238 | switch (w->state) { |
1239 | #ifdef CONFIG_IPV6_SUBTREES | 1239 | #ifdef CONFIG_IPV6_SUBTREES |
1240 | case FWS_S: | 1240 | case FWS_S: |
1241 | if (FIB6_SUBTREE(fn)) { | 1241 | if (FIB6_SUBTREE(fn)) { |
1242 | w->node = FIB6_SUBTREE(fn); | 1242 | w->node = FIB6_SUBTREE(fn); |
1243 | continue; | 1243 | continue; |
1244 | } | 1244 | } |
1245 | w->state = FWS_L; | 1245 | w->state = FWS_L; |
1246 | #endif | 1246 | #endif |
1247 | case FWS_L: | 1247 | case FWS_L: |
1248 | if (fn->left) { | 1248 | if (fn->left) { |
1249 | w->node = fn->left; | 1249 | w->node = fn->left; |
1250 | w->state = FWS_INIT; | 1250 | w->state = FWS_INIT; |
1251 | continue; | 1251 | continue; |
1252 | } | 1252 | } |
1253 | w->state = FWS_R; | 1253 | w->state = FWS_R; |
1254 | case FWS_R: | 1254 | case FWS_R: |
1255 | if (fn->right) { | 1255 | if (fn->right) { |
1256 | w->node = fn->right; | 1256 | w->node = fn->right; |
1257 | w->state = FWS_INIT; | 1257 | w->state = FWS_INIT; |
1258 | continue; | 1258 | continue; |
1259 | } | 1259 | } |
1260 | w->state = FWS_C; | 1260 | w->state = FWS_C; |
1261 | w->leaf = fn->leaf; | 1261 | w->leaf = fn->leaf; |
1262 | case FWS_C: | 1262 | case FWS_C: |
1263 | if (w->leaf && fn->fn_flags&RTN_RTINFO) { | 1263 | if (w->leaf && fn->fn_flags&RTN_RTINFO) { |
1264 | int err = w->func(w); | 1264 | int err = w->func(w); |
1265 | if (err) | 1265 | if (err) |
1266 | return err; | 1266 | return err; |
1267 | continue; | 1267 | continue; |
1268 | } | 1268 | } |
1269 | w->state = FWS_U; | 1269 | w->state = FWS_U; |
1270 | case FWS_U: | 1270 | case FWS_U: |
1271 | if (fn == w->root) | 1271 | if (fn == w->root) |
1272 | return 0; | 1272 | return 0; |
1273 | pn = fn->parent; | 1273 | pn = fn->parent; |
1274 | w->node = pn; | 1274 | w->node = pn; |
1275 | #ifdef CONFIG_IPV6_SUBTREES | 1275 | #ifdef CONFIG_IPV6_SUBTREES |
1276 | if (FIB6_SUBTREE(pn) == fn) { | 1276 | if (FIB6_SUBTREE(pn) == fn) { |
1277 | BUG_TRAP(fn->fn_flags&RTN_ROOT); | 1277 | BUG_TRAP(fn->fn_flags&RTN_ROOT); |
1278 | w->state = FWS_L; | 1278 | w->state = FWS_L; |
1279 | continue; | 1279 | continue; |
1280 | } | 1280 | } |
1281 | #endif | 1281 | #endif |
1282 | if (pn->left == fn) { | 1282 | if (pn->left == fn) { |
1283 | w->state = FWS_R; | 1283 | w->state = FWS_R; |
1284 | continue; | 1284 | continue; |
1285 | } | 1285 | } |
1286 | if (pn->right == fn) { | 1286 | if (pn->right == fn) { |
1287 | w->state = FWS_C; | 1287 | w->state = FWS_C; |
1288 | w->leaf = w->node->leaf; | 1288 | w->leaf = w->node->leaf; |
1289 | continue; | 1289 | continue; |
1290 | } | 1290 | } |
1291 | #if RT6_DEBUG >= 2 | 1291 | #if RT6_DEBUG >= 2 |
1292 | BUG_TRAP(0); | 1292 | BUG_TRAP(0); |
1293 | #endif | 1293 | #endif |
1294 | } | 1294 | } |
1295 | } | 1295 | } |
1296 | } | 1296 | } |
1297 | 1297 | ||
1298 | static int fib6_walk(struct fib6_walker_t *w) | 1298 | static int fib6_walk(struct fib6_walker_t *w) |
1299 | { | 1299 | { |
1300 | int res; | 1300 | int res; |
1301 | 1301 | ||
1302 | w->state = FWS_INIT; | 1302 | w->state = FWS_INIT; |
1303 | w->node = w->root; | 1303 | w->node = w->root; |
1304 | 1304 | ||
1305 | fib6_walker_link(w); | 1305 | fib6_walker_link(w); |
1306 | res = fib6_walk_continue(w); | 1306 | res = fib6_walk_continue(w); |
1307 | if (res <= 0) | 1307 | if (res <= 0) |
1308 | fib6_walker_unlink(w); | 1308 | fib6_walker_unlink(w); |
1309 | return res; | 1309 | return res; |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | static int fib6_clean_node(struct fib6_walker_t *w) | 1312 | static int fib6_clean_node(struct fib6_walker_t *w) |
1313 | { | 1313 | { |
1314 | int res; | 1314 | int res; |
1315 | struct rt6_info *rt; | 1315 | struct rt6_info *rt; |
1316 | struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w; | 1316 | struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); |
1317 | 1317 | ||
1318 | for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { | 1318 | for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { |
1319 | res = c->func(rt, c->arg); | 1319 | res = c->func(rt, c->arg); |
1320 | if (res < 0) { | 1320 | if (res < 0) { |
1321 | w->leaf = rt; | 1321 | w->leaf = rt; |
1322 | res = fib6_del(rt, NULL); | 1322 | res = fib6_del(rt, NULL); |
1323 | if (res) { | 1323 | if (res) { |
1324 | #if RT6_DEBUG >= 2 | 1324 | #if RT6_DEBUG >= 2 |
1325 | printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res); | 1325 | printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res); |
1326 | #endif | 1326 | #endif |
1327 | continue; | 1327 | continue; |
1328 | } | 1328 | } |
1329 | return 0; | 1329 | return 0; |
1330 | } | 1330 | } |
1331 | BUG_TRAP(res==0); | 1331 | BUG_TRAP(res==0); |
1332 | } | 1332 | } |
1333 | w->leaf = rt; | 1333 | w->leaf = rt; |
1334 | return 0; | 1334 | return 0; |
1335 | } | 1335 | } |
1336 | 1336 | ||
1337 | /* | 1337 | /* |
1338 | * Convenient frontend to tree walker. | 1338 | * Convenient frontend to tree walker. |
1339 | * | 1339 | * |
1340 | * func is called on each route. | 1340 | * func is called on each route. |
1341 | * It may return -1 -> delete this route. | 1341 | * It may return -1 -> delete this route. |
1342 | * 0 -> continue walking | 1342 | * 0 -> continue walking |
1343 | * | 1343 | * |
1344 | * prune==1 -> only immediate children of node (certainly, | 1344 | * prune==1 -> only immediate children of node (certainly, |
1345 | * ignoring pure split nodes) will be scanned. | 1345 | * ignoring pure split nodes) will be scanned. |
1346 | */ | 1346 | */ |
1347 | 1347 | ||
1348 | static void fib6_clean_tree(struct fib6_node *root, | 1348 | static void fib6_clean_tree(struct fib6_node *root, |
1349 | int (*func)(struct rt6_info *, void *arg), | 1349 | int (*func)(struct rt6_info *, void *arg), |
1350 | int prune, void *arg) | 1350 | int prune, void *arg) |
1351 | { | 1351 | { |
1352 | struct fib6_cleaner_t c; | 1352 | struct fib6_cleaner_t c; |
1353 | 1353 | ||
1354 | c.w.root = root; | 1354 | c.w.root = root; |
1355 | c.w.func = fib6_clean_node; | 1355 | c.w.func = fib6_clean_node; |
1356 | c.w.prune = prune; | 1356 | c.w.prune = prune; |
1357 | c.func = func; | 1357 | c.func = func; |
1358 | c.arg = arg; | 1358 | c.arg = arg; |
1359 | 1359 | ||
1360 | fib6_walk(&c.w); | 1360 | fib6_walk(&c.w); |
1361 | } | 1361 | } |
1362 | 1362 | ||
1363 | void fib6_clean_all(int (*func)(struct rt6_info *, void *arg), | 1363 | void fib6_clean_all(int (*func)(struct rt6_info *, void *arg), |
1364 | int prune, void *arg) | 1364 | int prune, void *arg) |
1365 | { | 1365 | { |
1366 | struct fib6_table *table; | 1366 | struct fib6_table *table; |
1367 | struct hlist_node *node; | 1367 | struct hlist_node *node; |
1368 | unsigned int h; | 1368 | unsigned int h; |
1369 | 1369 | ||
1370 | rcu_read_lock(); | 1370 | rcu_read_lock(); |
1371 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 1371 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
1372 | hlist_for_each_entry_rcu(table, node, &fib_table_hash[h], | 1372 | hlist_for_each_entry_rcu(table, node, &fib_table_hash[h], |
1373 | tb6_hlist) { | 1373 | tb6_hlist) { |
1374 | write_lock_bh(&table->tb6_lock); | 1374 | write_lock_bh(&table->tb6_lock); |
1375 | fib6_clean_tree(&table->tb6_root, func, prune, arg); | 1375 | fib6_clean_tree(&table->tb6_root, func, prune, arg); |
1376 | write_unlock_bh(&table->tb6_lock); | 1376 | write_unlock_bh(&table->tb6_lock); |
1377 | } | 1377 | } |
1378 | } | 1378 | } |
1379 | rcu_read_unlock(); | 1379 | rcu_read_unlock(); |
1380 | } | 1380 | } |
1381 | 1381 | ||
1382 | static int fib6_prune_clone(struct rt6_info *rt, void *arg) | 1382 | static int fib6_prune_clone(struct rt6_info *rt, void *arg) |
1383 | { | 1383 | { |
1384 | if (rt->rt6i_flags & RTF_CACHE) { | 1384 | if (rt->rt6i_flags & RTF_CACHE) { |
1385 | RT6_TRACE("pruning clone %p\n", rt); | 1385 | RT6_TRACE("pruning clone %p\n", rt); |
1386 | return -1; | 1386 | return -1; |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | return 0; | 1389 | return 0; |
1390 | } | 1390 | } |
1391 | 1391 | ||
1392 | static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt) | 1392 | static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt) |
1393 | { | 1393 | { |
1394 | fib6_clean_tree(fn, fib6_prune_clone, 1, rt); | 1394 | fib6_clean_tree(fn, fib6_prune_clone, 1, rt); |
1395 | } | 1395 | } |
1396 | 1396 | ||
1397 | /* | 1397 | /* |
1398 | * Garbage collection | 1398 | * Garbage collection |
1399 | */ | 1399 | */ |
1400 | 1400 | ||
1401 | static struct fib6_gc_args | 1401 | static struct fib6_gc_args |
1402 | { | 1402 | { |
1403 | int timeout; | 1403 | int timeout; |
1404 | int more; | 1404 | int more; |
1405 | } gc_args; | 1405 | } gc_args; |
1406 | 1406 | ||
1407 | static int fib6_age(struct rt6_info *rt, void *arg) | 1407 | static int fib6_age(struct rt6_info *rt, void *arg) |
1408 | { | 1408 | { |
1409 | unsigned long now = jiffies; | 1409 | unsigned long now = jiffies; |
1410 | 1410 | ||
1411 | /* | 1411 | /* |
1412 | * check addrconf expiration here. | 1412 | * check addrconf expiration here. |
1413 | * Routes are expired even if they are in use. | 1413 | * Routes are expired even if they are in use. |
1414 | * | 1414 | * |
1415 | * Also age clones. Note, that clones are aged out | 1415 | * Also age clones. Note, that clones are aged out |
1416 | * only if they are not in use now. | 1416 | * only if they are not in use now. |
1417 | */ | 1417 | */ |
1418 | 1418 | ||
1419 | if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) { | 1419 | if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) { |
1420 | if (time_after(now, rt->rt6i_expires)) { | 1420 | if (time_after(now, rt->rt6i_expires)) { |
1421 | RT6_TRACE("expiring %p\n", rt); | 1421 | RT6_TRACE("expiring %p\n", rt); |
1422 | return -1; | 1422 | return -1; |
1423 | } | 1423 | } |
1424 | gc_args.more++; | 1424 | gc_args.more++; |
1425 | } else if (rt->rt6i_flags & RTF_CACHE) { | 1425 | } else if (rt->rt6i_flags & RTF_CACHE) { |
1426 | if (atomic_read(&rt->u.dst.__refcnt) == 0 && | 1426 | if (atomic_read(&rt->u.dst.__refcnt) == 0 && |
1427 | time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) { | 1427 | time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) { |
1428 | RT6_TRACE("aging clone %p\n", rt); | 1428 | RT6_TRACE("aging clone %p\n", rt); |
1429 | return -1; | 1429 | return -1; |
1430 | } else if ((rt->rt6i_flags & RTF_GATEWAY) && | 1430 | } else if ((rt->rt6i_flags & RTF_GATEWAY) && |
1431 | (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) { | 1431 | (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) { |
1432 | RT6_TRACE("purging route %p via non-router but gateway\n", | 1432 | RT6_TRACE("purging route %p via non-router but gateway\n", |
1433 | rt); | 1433 | rt); |
1434 | return -1; | 1434 | return -1; |
1435 | } | 1435 | } |
1436 | gc_args.more++; | 1436 | gc_args.more++; |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | return 0; | 1439 | return 0; |
1440 | } | 1440 | } |
1441 | 1441 | ||
1442 | static DEFINE_SPINLOCK(fib6_gc_lock); | 1442 | static DEFINE_SPINLOCK(fib6_gc_lock); |
1443 | 1443 | ||
1444 | void fib6_run_gc(unsigned long dummy) | 1444 | void fib6_run_gc(unsigned long dummy) |
1445 | { | 1445 | { |
1446 | if (dummy != ~0UL) { | 1446 | if (dummy != ~0UL) { |
1447 | spin_lock_bh(&fib6_gc_lock); | 1447 | spin_lock_bh(&fib6_gc_lock); |
1448 | gc_args.timeout = dummy ? (int)dummy : ip6_rt_gc_interval; | 1448 | gc_args.timeout = dummy ? (int)dummy : ip6_rt_gc_interval; |
1449 | } else { | 1449 | } else { |
1450 | local_bh_disable(); | 1450 | local_bh_disable(); |
1451 | if (!spin_trylock(&fib6_gc_lock)) { | 1451 | if (!spin_trylock(&fib6_gc_lock)) { |
1452 | mod_timer(&ip6_fib_timer, jiffies + HZ); | 1452 | mod_timer(&ip6_fib_timer, jiffies + HZ); |
1453 | local_bh_enable(); | 1453 | local_bh_enable(); |
1454 | return; | 1454 | return; |
1455 | } | 1455 | } |
1456 | gc_args.timeout = ip6_rt_gc_interval; | 1456 | gc_args.timeout = ip6_rt_gc_interval; |
1457 | } | 1457 | } |
1458 | gc_args.more = 0; | 1458 | gc_args.more = 0; |
1459 | 1459 | ||
1460 | ndisc_dst_gc(&gc_args.more); | 1460 | ndisc_dst_gc(&gc_args.more); |
1461 | fib6_clean_all(fib6_age, 0, NULL); | 1461 | fib6_clean_all(fib6_age, 0, NULL); |
1462 | 1462 | ||
1463 | if (gc_args.more) | 1463 | if (gc_args.more) |
1464 | mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval); | 1464 | mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval); |
1465 | else { | 1465 | else { |
1466 | del_timer(&ip6_fib_timer); | 1466 | del_timer(&ip6_fib_timer); |
1467 | ip6_fib_timer.expires = 0; | 1467 | ip6_fib_timer.expires = 0; |
1468 | } | 1468 | } |
1469 | spin_unlock_bh(&fib6_gc_lock); | 1469 | spin_unlock_bh(&fib6_gc_lock); |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | void __init fib6_init(void) | 1472 | void __init fib6_init(void) |
1473 | { | 1473 | { |
1474 | fib6_node_kmem = kmem_cache_create("fib6_nodes", | 1474 | fib6_node_kmem = kmem_cache_create("fib6_nodes", |
1475 | sizeof(struct fib6_node), | 1475 | sizeof(struct fib6_node), |
1476 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, | 1476 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
1477 | NULL); | 1477 | NULL); |
1478 | 1478 | ||
1479 | fib6_tables_init(); | 1479 | fib6_tables_init(); |
1480 | 1480 | ||
1481 | __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); | 1481 | __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | void fib6_gc_cleanup(void) | 1484 | void fib6_gc_cleanup(void) |
1485 | { | 1485 | { |
1486 | del_timer(&ip6_fib_timer); | 1486 | del_timer(&ip6_fib_timer); |
1487 | kmem_cache_destroy(fib6_node_kmem); | 1487 | kmem_cache_destroy(fib6_node_kmem); |
1488 | } | 1488 | } |
1489 | 1489 |