Commit 4670994d150a86ebd53ab353a2af517c5465bfaf
Committed by
Paul E. McKenney
1 parent
3acb458c32
Exists in
master
and in
7 other branches
net,rcu: convert call_rcu(fc_rport_free_rcu) to kfree_rcu()
The rcu callback fc_rport_free_rcu() just calls a kfree(), so we use kfree_rcu() instead of the call_rcu(fc_rport_free_rcu). Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Showing 1 changed file with 1 additions and 11 deletions Inline Diff
net/ipv4/fib_semantics.c
1 | /* | 1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
3 | * operating system. INET is implemented using the BSD Socket | 3 | * operating system. INET is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. | 4 | * interface as the means of communication with the user level. |
5 | * | 5 | * |
6 | * IPv4 Forwarding Information Base: semantics. | 6 | * IPv4 Forwarding Information Base: semantics. |
7 | * | 7 | * |
8 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 8 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 12 | * as published by the Free Software Foundation; either version |
13 | * 2 of the License, or (at your option) any later version. | 13 | * 2 of the License, or (at your option) any later version. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
17 | #include <asm/system.h> | 17 | #include <asm/system.h> |
18 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/jiffies.h> | 21 | #include <linux/jiffies.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/socket.h> | 24 | #include <linux/socket.h> |
25 | #include <linux/sockios.h> | 25 | #include <linux/sockios.h> |
26 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
27 | #include <linux/in.h> | 27 | #include <linux/in.h> |
28 | #include <linux/inet.h> | 28 | #include <linux/inet.h> |
29 | #include <linux/inetdevice.h> | 29 | #include <linux/inetdevice.h> |
30 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
31 | #include <linux/if_arp.h> | 31 | #include <linux/if_arp.h> |
32 | #include <linux/proc_fs.h> | 32 | #include <linux/proc_fs.h> |
33 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | 36 | ||
37 | #include <net/arp.h> | 37 | #include <net/arp.h> |
38 | #include <net/ip.h> | 38 | #include <net/ip.h> |
39 | #include <net/protocol.h> | 39 | #include <net/protocol.h> |
40 | #include <net/route.h> | 40 | #include <net/route.h> |
41 | #include <net/tcp.h> | 41 | #include <net/tcp.h> |
42 | #include <net/sock.h> | 42 | #include <net/sock.h> |
43 | #include <net/ip_fib.h> | 43 | #include <net/ip_fib.h> |
44 | #include <net/netlink.h> | 44 | #include <net/netlink.h> |
45 | #include <net/nexthop.h> | 45 | #include <net/nexthop.h> |
46 | 46 | ||
47 | #include "fib_lookup.h" | 47 | #include "fib_lookup.h" |
48 | 48 | ||
49 | static DEFINE_SPINLOCK(fib_info_lock); | 49 | static DEFINE_SPINLOCK(fib_info_lock); |
50 | static struct hlist_head *fib_info_hash; | 50 | static struct hlist_head *fib_info_hash; |
51 | static struct hlist_head *fib_info_laddrhash; | 51 | static struct hlist_head *fib_info_laddrhash; |
52 | static unsigned int fib_info_hash_size; | 52 | static unsigned int fib_info_hash_size; |
53 | static unsigned int fib_info_cnt; | 53 | static unsigned int fib_info_cnt; |
54 | 54 | ||
55 | #define DEVINDEX_HASHBITS 8 | 55 | #define DEVINDEX_HASHBITS 8 |
56 | #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) | 56 | #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) |
57 | static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; | 57 | static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; |
58 | 58 | ||
59 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 59 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
60 | 60 | ||
61 | static DEFINE_SPINLOCK(fib_multipath_lock); | 61 | static DEFINE_SPINLOCK(fib_multipath_lock); |
62 | 62 | ||
63 | #define for_nexthops(fi) { \ | 63 | #define for_nexthops(fi) { \ |
64 | int nhsel; const struct fib_nh *nh; \ | 64 | int nhsel; const struct fib_nh *nh; \ |
65 | for (nhsel = 0, nh = (fi)->fib_nh; \ | 65 | for (nhsel = 0, nh = (fi)->fib_nh; \ |
66 | nhsel < (fi)->fib_nhs; \ | 66 | nhsel < (fi)->fib_nhs; \ |
67 | nh++, nhsel++) | 67 | nh++, nhsel++) |
68 | 68 | ||
69 | #define change_nexthops(fi) { \ | 69 | #define change_nexthops(fi) { \ |
70 | int nhsel; struct fib_nh *nexthop_nh; \ | 70 | int nhsel; struct fib_nh *nexthop_nh; \ |
71 | for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ | 71 | for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ |
72 | nhsel < (fi)->fib_nhs; \ | 72 | nhsel < (fi)->fib_nhs; \ |
73 | nexthop_nh++, nhsel++) | 73 | nexthop_nh++, nhsel++) |
74 | 74 | ||
75 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ | 75 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ |
76 | 76 | ||
77 | /* Hope, that gcc will optimize it to get rid of dummy loop */ | 77 | /* Hope, that gcc will optimize it to get rid of dummy loop */ |
78 | 78 | ||
79 | #define for_nexthops(fi) { \ | 79 | #define for_nexthops(fi) { \ |
80 | int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ | 80 | int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ |
81 | for (nhsel = 0; nhsel < 1; nhsel++) | 81 | for (nhsel = 0; nhsel < 1; nhsel++) |
82 | 82 | ||
83 | #define change_nexthops(fi) { \ | 83 | #define change_nexthops(fi) { \ |
84 | int nhsel; \ | 84 | int nhsel; \ |
85 | struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ | 85 | struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ |
86 | for (nhsel = 0; nhsel < 1; nhsel++) | 86 | for (nhsel = 0; nhsel < 1; nhsel++) |
87 | 87 | ||
88 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ | 88 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ |
89 | 89 | ||
90 | #define endfor_nexthops(fi) } | 90 | #define endfor_nexthops(fi) } |
91 | 91 | ||
92 | 92 | ||
93 | const struct fib_prop fib_props[RTN_MAX + 1] = { | 93 | const struct fib_prop fib_props[RTN_MAX + 1] = { |
94 | [RTN_UNSPEC] = { | 94 | [RTN_UNSPEC] = { |
95 | .error = 0, | 95 | .error = 0, |
96 | .scope = RT_SCOPE_NOWHERE, | 96 | .scope = RT_SCOPE_NOWHERE, |
97 | }, | 97 | }, |
98 | [RTN_UNICAST] = { | 98 | [RTN_UNICAST] = { |
99 | .error = 0, | 99 | .error = 0, |
100 | .scope = RT_SCOPE_UNIVERSE, | 100 | .scope = RT_SCOPE_UNIVERSE, |
101 | }, | 101 | }, |
102 | [RTN_LOCAL] = { | 102 | [RTN_LOCAL] = { |
103 | .error = 0, | 103 | .error = 0, |
104 | .scope = RT_SCOPE_HOST, | 104 | .scope = RT_SCOPE_HOST, |
105 | }, | 105 | }, |
106 | [RTN_BROADCAST] = { | 106 | [RTN_BROADCAST] = { |
107 | .error = 0, | 107 | .error = 0, |
108 | .scope = RT_SCOPE_LINK, | 108 | .scope = RT_SCOPE_LINK, |
109 | }, | 109 | }, |
110 | [RTN_ANYCAST] = { | 110 | [RTN_ANYCAST] = { |
111 | .error = 0, | 111 | .error = 0, |
112 | .scope = RT_SCOPE_LINK, | 112 | .scope = RT_SCOPE_LINK, |
113 | }, | 113 | }, |
114 | [RTN_MULTICAST] = { | 114 | [RTN_MULTICAST] = { |
115 | .error = 0, | 115 | .error = 0, |
116 | .scope = RT_SCOPE_UNIVERSE, | 116 | .scope = RT_SCOPE_UNIVERSE, |
117 | }, | 117 | }, |
118 | [RTN_BLACKHOLE] = { | 118 | [RTN_BLACKHOLE] = { |
119 | .error = -EINVAL, | 119 | .error = -EINVAL, |
120 | .scope = RT_SCOPE_UNIVERSE, | 120 | .scope = RT_SCOPE_UNIVERSE, |
121 | }, | 121 | }, |
122 | [RTN_UNREACHABLE] = { | 122 | [RTN_UNREACHABLE] = { |
123 | .error = -EHOSTUNREACH, | 123 | .error = -EHOSTUNREACH, |
124 | .scope = RT_SCOPE_UNIVERSE, | 124 | .scope = RT_SCOPE_UNIVERSE, |
125 | }, | 125 | }, |
126 | [RTN_PROHIBIT] = { | 126 | [RTN_PROHIBIT] = { |
127 | .error = -EACCES, | 127 | .error = -EACCES, |
128 | .scope = RT_SCOPE_UNIVERSE, | 128 | .scope = RT_SCOPE_UNIVERSE, |
129 | }, | 129 | }, |
130 | [RTN_THROW] = { | 130 | [RTN_THROW] = { |
131 | .error = -EAGAIN, | 131 | .error = -EAGAIN, |
132 | .scope = RT_SCOPE_UNIVERSE, | 132 | .scope = RT_SCOPE_UNIVERSE, |
133 | }, | 133 | }, |
134 | [RTN_NAT] = { | 134 | [RTN_NAT] = { |
135 | .error = -EINVAL, | 135 | .error = -EINVAL, |
136 | .scope = RT_SCOPE_NOWHERE, | 136 | .scope = RT_SCOPE_NOWHERE, |
137 | }, | 137 | }, |
138 | [RTN_XRESOLVE] = { | 138 | [RTN_XRESOLVE] = { |
139 | .error = -EINVAL, | 139 | .error = -EINVAL, |
140 | .scope = RT_SCOPE_NOWHERE, | 140 | .scope = RT_SCOPE_NOWHERE, |
141 | }, | 141 | }, |
142 | }; | 142 | }; |
143 | 143 | ||
144 | |||
145 | /* Release a nexthop info record */ | 144 | /* Release a nexthop info record */ |
146 | 145 | ||
147 | static void free_fib_info_rcu(struct rcu_head *head) | ||
148 | { | ||
149 | struct fib_info *fi = container_of(head, struct fib_info, rcu); | ||
150 | |||
151 | if (fi->fib_metrics != (u32 *) dst_default_metrics) | ||
152 | kfree(fi->fib_metrics); | ||
153 | kfree(fi); | ||
154 | } | ||
155 | |||
156 | void free_fib_info(struct fib_info *fi) | 146 | void free_fib_info(struct fib_info *fi) |
157 | { | 147 | { |
158 | if (fi->fib_dead == 0) { | 148 | if (fi->fib_dead == 0) { |
159 | pr_warning("Freeing alive fib_info %p\n", fi); | 149 | pr_warning("Freeing alive fib_info %p\n", fi); |
160 | return; | 150 | return; |
161 | } | 151 | } |
162 | change_nexthops(fi) { | 152 | change_nexthops(fi) { |
163 | if (nexthop_nh->nh_dev) | 153 | if (nexthop_nh->nh_dev) |
164 | dev_put(nexthop_nh->nh_dev); | 154 | dev_put(nexthop_nh->nh_dev); |
165 | nexthop_nh->nh_dev = NULL; | 155 | nexthop_nh->nh_dev = NULL; |
166 | } endfor_nexthops(fi); | 156 | } endfor_nexthops(fi); |
167 | fib_info_cnt--; | 157 | fib_info_cnt--; |
168 | release_net(fi->fib_net); | 158 | release_net(fi->fib_net); |
169 | call_rcu(&fi->rcu, free_fib_info_rcu); | 159 | kfree_rcu(fi, rcu); |
170 | } | 160 | } |
171 | 161 | ||
172 | void fib_release_info(struct fib_info *fi) | 162 | void fib_release_info(struct fib_info *fi) |
173 | { | 163 | { |
174 | spin_lock_bh(&fib_info_lock); | 164 | spin_lock_bh(&fib_info_lock); |
175 | if (fi && --fi->fib_treeref == 0) { | 165 | if (fi && --fi->fib_treeref == 0) { |
176 | hlist_del(&fi->fib_hash); | 166 | hlist_del(&fi->fib_hash); |
177 | if (fi->fib_prefsrc) | 167 | if (fi->fib_prefsrc) |
178 | hlist_del(&fi->fib_lhash); | 168 | hlist_del(&fi->fib_lhash); |
179 | change_nexthops(fi) { | 169 | change_nexthops(fi) { |
180 | if (!nexthop_nh->nh_dev) | 170 | if (!nexthop_nh->nh_dev) |
181 | continue; | 171 | continue; |
182 | hlist_del(&nexthop_nh->nh_hash); | 172 | hlist_del(&nexthop_nh->nh_hash); |
183 | } endfor_nexthops(fi) | 173 | } endfor_nexthops(fi) |
184 | fi->fib_dead = 1; | 174 | fi->fib_dead = 1; |
185 | fib_info_put(fi); | 175 | fib_info_put(fi); |
186 | } | 176 | } |
187 | spin_unlock_bh(&fib_info_lock); | 177 | spin_unlock_bh(&fib_info_lock); |
188 | } | 178 | } |
189 | 179 | ||
190 | static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) | 180 | static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) |
191 | { | 181 | { |
192 | const struct fib_nh *onh = ofi->fib_nh; | 182 | const struct fib_nh *onh = ofi->fib_nh; |
193 | 183 | ||
194 | for_nexthops(fi) { | 184 | for_nexthops(fi) { |
195 | if (nh->nh_oif != onh->nh_oif || | 185 | if (nh->nh_oif != onh->nh_oif || |
196 | nh->nh_gw != onh->nh_gw || | 186 | nh->nh_gw != onh->nh_gw || |
197 | nh->nh_scope != onh->nh_scope || | 187 | nh->nh_scope != onh->nh_scope || |
198 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 188 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
199 | nh->nh_weight != onh->nh_weight || | 189 | nh->nh_weight != onh->nh_weight || |
200 | #endif | 190 | #endif |
201 | #ifdef CONFIG_IP_ROUTE_CLASSID | 191 | #ifdef CONFIG_IP_ROUTE_CLASSID |
202 | nh->nh_tclassid != onh->nh_tclassid || | 192 | nh->nh_tclassid != onh->nh_tclassid || |
203 | #endif | 193 | #endif |
204 | ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) | 194 | ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) |
205 | return -1; | 195 | return -1; |
206 | onh++; | 196 | onh++; |
207 | } endfor_nexthops(fi); | 197 | } endfor_nexthops(fi); |
208 | return 0; | 198 | return 0; |
209 | } | 199 | } |
210 | 200 | ||
211 | static inline unsigned int fib_devindex_hashfn(unsigned int val) | 201 | static inline unsigned int fib_devindex_hashfn(unsigned int val) |
212 | { | 202 | { |
213 | unsigned int mask = DEVINDEX_HASHSIZE - 1; | 203 | unsigned int mask = DEVINDEX_HASHSIZE - 1; |
214 | 204 | ||
215 | return (val ^ | 205 | return (val ^ |
216 | (val >> DEVINDEX_HASHBITS) ^ | 206 | (val >> DEVINDEX_HASHBITS) ^ |
217 | (val >> (DEVINDEX_HASHBITS * 2))) & mask; | 207 | (val >> (DEVINDEX_HASHBITS * 2))) & mask; |
218 | } | 208 | } |
219 | 209 | ||
220 | static inline unsigned int fib_info_hashfn(const struct fib_info *fi) | 210 | static inline unsigned int fib_info_hashfn(const struct fib_info *fi) |
221 | { | 211 | { |
222 | unsigned int mask = (fib_info_hash_size - 1); | 212 | unsigned int mask = (fib_info_hash_size - 1); |
223 | unsigned int val = fi->fib_nhs; | 213 | unsigned int val = fi->fib_nhs; |
224 | 214 | ||
225 | val ^= (fi->fib_protocol << 8) | fi->fib_scope; | 215 | val ^= (fi->fib_protocol << 8) | fi->fib_scope; |
226 | val ^= (__force u32)fi->fib_prefsrc; | 216 | val ^= (__force u32)fi->fib_prefsrc; |
227 | val ^= fi->fib_priority; | 217 | val ^= fi->fib_priority; |
228 | for_nexthops(fi) { | 218 | for_nexthops(fi) { |
229 | val ^= fib_devindex_hashfn(nh->nh_oif); | 219 | val ^= fib_devindex_hashfn(nh->nh_oif); |
230 | } endfor_nexthops(fi) | 220 | } endfor_nexthops(fi) |
231 | 221 | ||
232 | return (val ^ (val >> 7) ^ (val >> 12)) & mask; | 222 | return (val ^ (val >> 7) ^ (val >> 12)) & mask; |
233 | } | 223 | } |
234 | 224 | ||
235 | static struct fib_info *fib_find_info(const struct fib_info *nfi) | 225 | static struct fib_info *fib_find_info(const struct fib_info *nfi) |
236 | { | 226 | { |
237 | struct hlist_head *head; | 227 | struct hlist_head *head; |
238 | struct hlist_node *node; | 228 | struct hlist_node *node; |
239 | struct fib_info *fi; | 229 | struct fib_info *fi; |
240 | unsigned int hash; | 230 | unsigned int hash; |
241 | 231 | ||
242 | hash = fib_info_hashfn(nfi); | 232 | hash = fib_info_hashfn(nfi); |
243 | head = &fib_info_hash[hash]; | 233 | head = &fib_info_hash[hash]; |
244 | 234 | ||
245 | hlist_for_each_entry(fi, node, head, fib_hash) { | 235 | hlist_for_each_entry(fi, node, head, fib_hash) { |
246 | if (!net_eq(fi->fib_net, nfi->fib_net)) | 236 | if (!net_eq(fi->fib_net, nfi->fib_net)) |
247 | continue; | 237 | continue; |
248 | if (fi->fib_nhs != nfi->fib_nhs) | 238 | if (fi->fib_nhs != nfi->fib_nhs) |
249 | continue; | 239 | continue; |
250 | if (nfi->fib_protocol == fi->fib_protocol && | 240 | if (nfi->fib_protocol == fi->fib_protocol && |
251 | nfi->fib_scope == fi->fib_scope && | 241 | nfi->fib_scope == fi->fib_scope && |
252 | nfi->fib_prefsrc == fi->fib_prefsrc && | 242 | nfi->fib_prefsrc == fi->fib_prefsrc && |
253 | nfi->fib_priority == fi->fib_priority && | 243 | nfi->fib_priority == fi->fib_priority && |
254 | memcmp(nfi->fib_metrics, fi->fib_metrics, | 244 | memcmp(nfi->fib_metrics, fi->fib_metrics, |
255 | sizeof(u32) * RTAX_MAX) == 0 && | 245 | sizeof(u32) * RTAX_MAX) == 0 && |
256 | ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && | 246 | ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && |
257 | (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) | 247 | (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) |
258 | return fi; | 248 | return fi; |
259 | } | 249 | } |
260 | 250 | ||
261 | return NULL; | 251 | return NULL; |
262 | } | 252 | } |
263 | 253 | ||
264 | /* Check, that the gateway is already configured. | 254 | /* Check, that the gateway is already configured. |
265 | * Used only by redirect accept routine. | 255 | * Used only by redirect accept routine. |
266 | */ | 256 | */ |
267 | int ip_fib_check_default(__be32 gw, struct net_device *dev) | 257 | int ip_fib_check_default(__be32 gw, struct net_device *dev) |
268 | { | 258 | { |
269 | struct hlist_head *head; | 259 | struct hlist_head *head; |
270 | struct hlist_node *node; | 260 | struct hlist_node *node; |
271 | struct fib_nh *nh; | 261 | struct fib_nh *nh; |
272 | unsigned int hash; | 262 | unsigned int hash; |
273 | 263 | ||
274 | spin_lock(&fib_info_lock); | 264 | spin_lock(&fib_info_lock); |
275 | 265 | ||
276 | hash = fib_devindex_hashfn(dev->ifindex); | 266 | hash = fib_devindex_hashfn(dev->ifindex); |
277 | head = &fib_info_devhash[hash]; | 267 | head = &fib_info_devhash[hash]; |
278 | hlist_for_each_entry(nh, node, head, nh_hash) { | 268 | hlist_for_each_entry(nh, node, head, nh_hash) { |
279 | if (nh->nh_dev == dev && | 269 | if (nh->nh_dev == dev && |
280 | nh->nh_gw == gw && | 270 | nh->nh_gw == gw && |
281 | !(nh->nh_flags & RTNH_F_DEAD)) { | 271 | !(nh->nh_flags & RTNH_F_DEAD)) { |
282 | spin_unlock(&fib_info_lock); | 272 | spin_unlock(&fib_info_lock); |
283 | return 0; | 273 | return 0; |
284 | } | 274 | } |
285 | } | 275 | } |
286 | 276 | ||
287 | spin_unlock(&fib_info_lock); | 277 | spin_unlock(&fib_info_lock); |
288 | 278 | ||
289 | return -1; | 279 | return -1; |
290 | } | 280 | } |
291 | 281 | ||
292 | static inline size_t fib_nlmsg_size(struct fib_info *fi) | 282 | static inline size_t fib_nlmsg_size(struct fib_info *fi) |
293 | { | 283 | { |
294 | size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) | 284 | size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) |
295 | + nla_total_size(4) /* RTA_TABLE */ | 285 | + nla_total_size(4) /* RTA_TABLE */ |
296 | + nla_total_size(4) /* RTA_DST */ | 286 | + nla_total_size(4) /* RTA_DST */ |
297 | + nla_total_size(4) /* RTA_PRIORITY */ | 287 | + nla_total_size(4) /* RTA_PRIORITY */ |
298 | + nla_total_size(4); /* RTA_PREFSRC */ | 288 | + nla_total_size(4); /* RTA_PREFSRC */ |
299 | 289 | ||
300 | /* space for nested metrics */ | 290 | /* space for nested metrics */ |
301 | payload += nla_total_size((RTAX_MAX * nla_total_size(4))); | 291 | payload += nla_total_size((RTAX_MAX * nla_total_size(4))); |
302 | 292 | ||
303 | if (fi->fib_nhs) { | 293 | if (fi->fib_nhs) { |
304 | /* Also handles the special case fib_nhs == 1 */ | 294 | /* Also handles the special case fib_nhs == 1 */ |
305 | 295 | ||
306 | /* each nexthop is packed in an attribute */ | 296 | /* each nexthop is packed in an attribute */ |
307 | size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); | 297 | size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); |
308 | 298 | ||
309 | /* may contain flow and gateway attribute */ | 299 | /* may contain flow and gateway attribute */ |
310 | nhsize += 2 * nla_total_size(4); | 300 | nhsize += 2 * nla_total_size(4); |
311 | 301 | ||
312 | /* all nexthops are packed in a nested attribute */ | 302 | /* all nexthops are packed in a nested attribute */ |
313 | payload += nla_total_size(fi->fib_nhs * nhsize); | 303 | payload += nla_total_size(fi->fib_nhs * nhsize); |
314 | } | 304 | } |
315 | 305 | ||
316 | return payload; | 306 | return payload; |
317 | } | 307 | } |
318 | 308 | ||
319 | void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, | 309 | void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, |
320 | int dst_len, u32 tb_id, struct nl_info *info, | 310 | int dst_len, u32 tb_id, struct nl_info *info, |
321 | unsigned int nlm_flags) | 311 | unsigned int nlm_flags) |
322 | { | 312 | { |
323 | struct sk_buff *skb; | 313 | struct sk_buff *skb; |
324 | u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; | 314 | u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; |
325 | int err = -ENOBUFS; | 315 | int err = -ENOBUFS; |
326 | 316 | ||
327 | skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); | 317 | skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); |
328 | if (skb == NULL) | 318 | if (skb == NULL) |
329 | goto errout; | 319 | goto errout; |
330 | 320 | ||
331 | err = fib_dump_info(skb, info->pid, seq, event, tb_id, | 321 | err = fib_dump_info(skb, info->pid, seq, event, tb_id, |
332 | fa->fa_type, key, dst_len, | 322 | fa->fa_type, key, dst_len, |
333 | fa->fa_tos, fa->fa_info, nlm_flags); | 323 | fa->fa_tos, fa->fa_info, nlm_flags); |
334 | if (err < 0) { | 324 | if (err < 0) { |
335 | /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ | 325 | /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ |
336 | WARN_ON(err == -EMSGSIZE); | 326 | WARN_ON(err == -EMSGSIZE); |
337 | kfree_skb(skb); | 327 | kfree_skb(skb); |
338 | goto errout; | 328 | goto errout; |
339 | } | 329 | } |
340 | rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE, | 330 | rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE, |
341 | info->nlh, GFP_KERNEL); | 331 | info->nlh, GFP_KERNEL); |
342 | return; | 332 | return; |
343 | errout: | 333 | errout: |
344 | if (err < 0) | 334 | if (err < 0) |
345 | rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err); | 335 | rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err); |
346 | } | 336 | } |
347 | 337 | ||
348 | /* Return the first fib alias matching TOS with | 338 | /* Return the first fib alias matching TOS with |
349 | * priority less than or equal to PRIO. | 339 | * priority less than or equal to PRIO. |
350 | */ | 340 | */ |
351 | struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio) | 341 | struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio) |
352 | { | 342 | { |
353 | if (fah) { | 343 | if (fah) { |
354 | struct fib_alias *fa; | 344 | struct fib_alias *fa; |
355 | list_for_each_entry(fa, fah, fa_list) { | 345 | list_for_each_entry(fa, fah, fa_list) { |
356 | if (fa->fa_tos > tos) | 346 | if (fa->fa_tos > tos) |
357 | continue; | 347 | continue; |
358 | if (fa->fa_info->fib_priority >= prio || | 348 | if (fa->fa_info->fib_priority >= prio || |
359 | fa->fa_tos < tos) | 349 | fa->fa_tos < tos) |
360 | return fa; | 350 | return fa; |
361 | } | 351 | } |
362 | } | 352 | } |
363 | return NULL; | 353 | return NULL; |
364 | } | 354 | } |
365 | 355 | ||
366 | int fib_detect_death(struct fib_info *fi, int order, | 356 | int fib_detect_death(struct fib_info *fi, int order, |
367 | struct fib_info **last_resort, int *last_idx, int dflt) | 357 | struct fib_info **last_resort, int *last_idx, int dflt) |
368 | { | 358 | { |
369 | struct neighbour *n; | 359 | struct neighbour *n; |
370 | int state = NUD_NONE; | 360 | int state = NUD_NONE; |
371 | 361 | ||
372 | n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev); | 362 | n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev); |
373 | if (n) { | 363 | if (n) { |
374 | state = n->nud_state; | 364 | state = n->nud_state; |
375 | neigh_release(n); | 365 | neigh_release(n); |
376 | } | 366 | } |
377 | if (state == NUD_REACHABLE) | 367 | if (state == NUD_REACHABLE) |
378 | return 0; | 368 | return 0; |
379 | if ((state & NUD_VALID) && order != dflt) | 369 | if ((state & NUD_VALID) && order != dflt) |
380 | return 0; | 370 | return 0; |
381 | if ((state & NUD_VALID) || | 371 | if ((state & NUD_VALID) || |
382 | (*last_idx < 0 && order > dflt)) { | 372 | (*last_idx < 0 && order > dflt)) { |
383 | *last_resort = fi; | 373 | *last_resort = fi; |
384 | *last_idx = order; | 374 | *last_idx = order; |
385 | } | 375 | } |
386 | return 1; | 376 | return 1; |
387 | } | 377 | } |
388 | 378 | ||
389 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 379 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
390 | 380 | ||
391 | static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) | 381 | static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) |
392 | { | 382 | { |
393 | int nhs = 0; | 383 | int nhs = 0; |
394 | 384 | ||
395 | while (rtnh_ok(rtnh, remaining)) { | 385 | while (rtnh_ok(rtnh, remaining)) { |
396 | nhs++; | 386 | nhs++; |
397 | rtnh = rtnh_next(rtnh, &remaining); | 387 | rtnh = rtnh_next(rtnh, &remaining); |
398 | } | 388 | } |
399 | 389 | ||
400 | /* leftover implies invalid nexthop configuration, discard it */ | 390 | /* leftover implies invalid nexthop configuration, discard it */ |
401 | return remaining > 0 ? 0 : nhs; | 391 | return remaining > 0 ? 0 : nhs; |
402 | } | 392 | } |
403 | 393 | ||
404 | static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, | 394 | static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, |
405 | int remaining, struct fib_config *cfg) | 395 | int remaining, struct fib_config *cfg) |
406 | { | 396 | { |
407 | change_nexthops(fi) { | 397 | change_nexthops(fi) { |
408 | int attrlen; | 398 | int attrlen; |
409 | 399 | ||
410 | if (!rtnh_ok(rtnh, remaining)) | 400 | if (!rtnh_ok(rtnh, remaining)) |
411 | return -EINVAL; | 401 | return -EINVAL; |
412 | 402 | ||
413 | nexthop_nh->nh_flags = | 403 | nexthop_nh->nh_flags = |
414 | (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; | 404 | (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; |
415 | nexthop_nh->nh_oif = rtnh->rtnh_ifindex; | 405 | nexthop_nh->nh_oif = rtnh->rtnh_ifindex; |
416 | nexthop_nh->nh_weight = rtnh->rtnh_hops + 1; | 406 | nexthop_nh->nh_weight = rtnh->rtnh_hops + 1; |
417 | 407 | ||
418 | attrlen = rtnh_attrlen(rtnh); | 408 | attrlen = rtnh_attrlen(rtnh); |
419 | if (attrlen > 0) { | 409 | if (attrlen > 0) { |
420 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); | 410 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); |
421 | 411 | ||
422 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | 412 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
423 | nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; | 413 | nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; |
424 | #ifdef CONFIG_IP_ROUTE_CLASSID | 414 | #ifdef CONFIG_IP_ROUTE_CLASSID |
425 | nla = nla_find(attrs, attrlen, RTA_FLOW); | 415 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
426 | nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; | 416 | nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; |
427 | #endif | 417 | #endif |
428 | } | 418 | } |
429 | 419 | ||
430 | rtnh = rtnh_next(rtnh, &remaining); | 420 | rtnh = rtnh_next(rtnh, &remaining); |
431 | } endfor_nexthops(fi); | 421 | } endfor_nexthops(fi); |
432 | 422 | ||
433 | return 0; | 423 | return 0; |
434 | } | 424 | } |
435 | 425 | ||
436 | #endif | 426 | #endif |
437 | 427 | ||
438 | int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) | 428 | int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) |
439 | { | 429 | { |
440 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 430 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
441 | struct rtnexthop *rtnh; | 431 | struct rtnexthop *rtnh; |
442 | int remaining; | 432 | int remaining; |
443 | #endif | 433 | #endif |
444 | 434 | ||
445 | if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority) | 435 | if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority) |
446 | return 1; | 436 | return 1; |
447 | 437 | ||
448 | if (cfg->fc_oif || cfg->fc_gw) { | 438 | if (cfg->fc_oif || cfg->fc_gw) { |
449 | if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && | 439 | if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && |
450 | (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) | 440 | (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) |
451 | return 0; | 441 | return 0; |
452 | return 1; | 442 | return 1; |
453 | } | 443 | } |
454 | 444 | ||
455 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 445 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
456 | if (cfg->fc_mp == NULL) | 446 | if (cfg->fc_mp == NULL) |
457 | return 0; | 447 | return 0; |
458 | 448 | ||
459 | rtnh = cfg->fc_mp; | 449 | rtnh = cfg->fc_mp; |
460 | remaining = cfg->fc_mp_len; | 450 | remaining = cfg->fc_mp_len; |
461 | 451 | ||
462 | for_nexthops(fi) { | 452 | for_nexthops(fi) { |
463 | int attrlen; | 453 | int attrlen; |
464 | 454 | ||
465 | if (!rtnh_ok(rtnh, remaining)) | 455 | if (!rtnh_ok(rtnh, remaining)) |
466 | return -EINVAL; | 456 | return -EINVAL; |
467 | 457 | ||
468 | if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif) | 458 | if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif) |
469 | return 1; | 459 | return 1; |
470 | 460 | ||
471 | attrlen = rtnh_attrlen(rtnh); | 461 | attrlen = rtnh_attrlen(rtnh); |
472 | if (attrlen < 0) { | 462 | if (attrlen < 0) { |
473 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); | 463 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); |
474 | 464 | ||
475 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | 465 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
476 | if (nla && nla_get_be32(nla) != nh->nh_gw) | 466 | if (nla && nla_get_be32(nla) != nh->nh_gw) |
477 | return 1; | 467 | return 1; |
478 | #ifdef CONFIG_IP_ROUTE_CLASSID | 468 | #ifdef CONFIG_IP_ROUTE_CLASSID |
479 | nla = nla_find(attrs, attrlen, RTA_FLOW); | 469 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
480 | if (nla && nla_get_u32(nla) != nh->nh_tclassid) | 470 | if (nla && nla_get_u32(nla) != nh->nh_tclassid) |
481 | return 1; | 471 | return 1; |
482 | #endif | 472 | #endif |
483 | } | 473 | } |
484 | 474 | ||
485 | rtnh = rtnh_next(rtnh, &remaining); | 475 | rtnh = rtnh_next(rtnh, &remaining); |
486 | } endfor_nexthops(fi); | 476 | } endfor_nexthops(fi); |
487 | #endif | 477 | #endif |
488 | return 0; | 478 | return 0; |
489 | } | 479 | } |
490 | 480 | ||
491 | 481 | ||
492 | /* | 482 | /* |
493 | * Picture | 483 | * Picture |
494 | * ------- | 484 | * ------- |
495 | * | 485 | * |
496 | * Semantics of nexthop is very messy by historical reasons. | 486 | * Semantics of nexthop is very messy by historical reasons. |
497 | * We have to take into account, that: | 487 | * We have to take into account, that: |
498 | * a) gateway can be actually local interface address, | 488 | * a) gateway can be actually local interface address, |
499 | * so that gatewayed route is direct. | 489 | * so that gatewayed route is direct. |
500 | * b) gateway must be on-link address, possibly | 490 | * b) gateway must be on-link address, possibly |
501 | * described not by an ifaddr, but also by a direct route. | 491 | * described not by an ifaddr, but also by a direct route. |
502 | * c) If both gateway and interface are specified, they should not | 492 | * c) If both gateway and interface are specified, they should not |
503 | * contradict. | 493 | * contradict. |
504 | * d) If we use tunnel routes, gateway could be not on-link. | 494 | * d) If we use tunnel routes, gateway could be not on-link. |
505 | * | 495 | * |
506 | * Attempt to reconcile all of these (alas, self-contradictory) conditions | 496 | * Attempt to reconcile all of these (alas, self-contradictory) conditions |
507 | * results in pretty ugly and hairy code with obscure logic. | 497 | * results in pretty ugly and hairy code with obscure logic. |
508 | * | 498 | * |
509 | * I chose to generalized it instead, so that the size | 499 | * I chose to generalized it instead, so that the size |
510 | * of code does not increase practically, but it becomes | 500 | * of code does not increase practically, but it becomes |
511 | * much more general. | 501 | * much more general. |
512 | * Every prefix is assigned a "scope" value: "host" is local address, | 502 | * Every prefix is assigned a "scope" value: "host" is local address, |
513 | * "link" is direct route, | 503 | * "link" is direct route, |
514 | * [ ... "site" ... "interior" ... ] | 504 | * [ ... "site" ... "interior" ... ] |
515 | * and "universe" is true gateway route with global meaning. | 505 | * and "universe" is true gateway route with global meaning. |
516 | * | 506 | * |
517 | * Every prefix refers to a set of "nexthop"s (gw, oif), | 507 | * Every prefix refers to a set of "nexthop"s (gw, oif), |
518 | * where gw must have narrower scope. This recursion stops | 508 | * where gw must have narrower scope. This recursion stops |
519 | * when gw has LOCAL scope or if "nexthop" is declared ONLINK, | 509 | * when gw has LOCAL scope or if "nexthop" is declared ONLINK, |
520 | * which means that gw is forced to be on link. | 510 | * which means that gw is forced to be on link. |
521 | * | 511 | * |
522 | * Code is still hairy, but now it is apparently logically | 512 | * Code is still hairy, but now it is apparently logically |
523 | * consistent and very flexible. F.e. as by-product it allows | 513 | * consistent and very flexible. F.e. as by-product it allows |
524 | * to co-exists in peace independent exterior and interior | 514 | * to co-exists in peace independent exterior and interior |
525 | * routing processes. | 515 | * routing processes. |
526 | * | 516 | * |
527 | * Normally it looks as following. | 517 | * Normally it looks as following. |
528 | * | 518 | * |
529 | * {universe prefix} -> (gw, oif) [scope link] | 519 | * {universe prefix} -> (gw, oif) [scope link] |
530 | * | | 520 | * | |
531 | * |-> {link prefix} -> (gw, oif) [scope local] | 521 | * |-> {link prefix} -> (gw, oif) [scope local] |
532 | * | | 522 | * | |
533 | * |-> {local prefix} (terminal node) | 523 | * |-> {local prefix} (terminal node) |
534 | */ | 524 | */ |
535 | static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, | 525 | static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, |
536 | struct fib_nh *nh) | 526 | struct fib_nh *nh) |
537 | { | 527 | { |
538 | int err; | 528 | int err; |
539 | struct net *net; | 529 | struct net *net; |
540 | struct net_device *dev; | 530 | struct net_device *dev; |
541 | 531 | ||
542 | net = cfg->fc_nlinfo.nl_net; | 532 | net = cfg->fc_nlinfo.nl_net; |
543 | if (nh->nh_gw) { | 533 | if (nh->nh_gw) { |
544 | struct fib_result res; | 534 | struct fib_result res; |
545 | 535 | ||
546 | if (nh->nh_flags & RTNH_F_ONLINK) { | 536 | if (nh->nh_flags & RTNH_F_ONLINK) { |
547 | 537 | ||
548 | if (cfg->fc_scope >= RT_SCOPE_LINK) | 538 | if (cfg->fc_scope >= RT_SCOPE_LINK) |
549 | return -EINVAL; | 539 | return -EINVAL; |
550 | if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST) | 540 | if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST) |
551 | return -EINVAL; | 541 | return -EINVAL; |
552 | dev = __dev_get_by_index(net, nh->nh_oif); | 542 | dev = __dev_get_by_index(net, nh->nh_oif); |
553 | if (!dev) | 543 | if (!dev) |
554 | return -ENODEV; | 544 | return -ENODEV; |
555 | if (!(dev->flags & IFF_UP)) | 545 | if (!(dev->flags & IFF_UP)) |
556 | return -ENETDOWN; | 546 | return -ENETDOWN; |
557 | nh->nh_dev = dev; | 547 | nh->nh_dev = dev; |
558 | dev_hold(dev); | 548 | dev_hold(dev); |
559 | nh->nh_scope = RT_SCOPE_LINK; | 549 | nh->nh_scope = RT_SCOPE_LINK; |
560 | return 0; | 550 | return 0; |
561 | } | 551 | } |
562 | rcu_read_lock(); | 552 | rcu_read_lock(); |
563 | { | 553 | { |
564 | struct flowi4 fl4 = { | 554 | struct flowi4 fl4 = { |
565 | .daddr = nh->nh_gw, | 555 | .daddr = nh->nh_gw, |
566 | .flowi4_scope = cfg->fc_scope + 1, | 556 | .flowi4_scope = cfg->fc_scope + 1, |
567 | .flowi4_oif = nh->nh_oif, | 557 | .flowi4_oif = nh->nh_oif, |
568 | }; | 558 | }; |
569 | 559 | ||
570 | /* It is not necessary, but requires a bit of thinking */ | 560 | /* It is not necessary, but requires a bit of thinking */ |
571 | if (fl4.flowi4_scope < RT_SCOPE_LINK) | 561 | if (fl4.flowi4_scope < RT_SCOPE_LINK) |
572 | fl4.flowi4_scope = RT_SCOPE_LINK; | 562 | fl4.flowi4_scope = RT_SCOPE_LINK; |
573 | err = fib_lookup(net, &fl4, &res); | 563 | err = fib_lookup(net, &fl4, &res); |
574 | if (err) { | 564 | if (err) { |
575 | rcu_read_unlock(); | 565 | rcu_read_unlock(); |
576 | return err; | 566 | return err; |
577 | } | 567 | } |
578 | } | 568 | } |
579 | err = -EINVAL; | 569 | err = -EINVAL; |
580 | if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) | 570 | if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) |
581 | goto out; | 571 | goto out; |
582 | nh->nh_scope = res.scope; | 572 | nh->nh_scope = res.scope; |
583 | nh->nh_oif = FIB_RES_OIF(res); | 573 | nh->nh_oif = FIB_RES_OIF(res); |
584 | nh->nh_dev = dev = FIB_RES_DEV(res); | 574 | nh->nh_dev = dev = FIB_RES_DEV(res); |
585 | if (!dev) | 575 | if (!dev) |
586 | goto out; | 576 | goto out; |
587 | dev_hold(dev); | 577 | dev_hold(dev); |
588 | err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; | 578 | err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; |
589 | } else { | 579 | } else { |
590 | struct in_device *in_dev; | 580 | struct in_device *in_dev; |
591 | 581 | ||
592 | if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) | 582 | if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) |
593 | return -EINVAL; | 583 | return -EINVAL; |
594 | 584 | ||
595 | rcu_read_lock(); | 585 | rcu_read_lock(); |
596 | err = -ENODEV; | 586 | err = -ENODEV; |
597 | in_dev = inetdev_by_index(net, nh->nh_oif); | 587 | in_dev = inetdev_by_index(net, nh->nh_oif); |
598 | if (in_dev == NULL) | 588 | if (in_dev == NULL) |
599 | goto out; | 589 | goto out; |
600 | err = -ENETDOWN; | 590 | err = -ENETDOWN; |
601 | if (!(in_dev->dev->flags & IFF_UP)) | 591 | if (!(in_dev->dev->flags & IFF_UP)) |
602 | goto out; | 592 | goto out; |
603 | nh->nh_dev = in_dev->dev; | 593 | nh->nh_dev = in_dev->dev; |
604 | dev_hold(nh->nh_dev); | 594 | dev_hold(nh->nh_dev); |
605 | nh->nh_scope = RT_SCOPE_HOST; | 595 | nh->nh_scope = RT_SCOPE_HOST; |
606 | err = 0; | 596 | err = 0; |
607 | } | 597 | } |
608 | out: | 598 | out: |
609 | rcu_read_unlock(); | 599 | rcu_read_unlock(); |
610 | return err; | 600 | return err; |
611 | } | 601 | } |
612 | 602 | ||
613 | static inline unsigned int fib_laddr_hashfn(__be32 val) | 603 | static inline unsigned int fib_laddr_hashfn(__be32 val) |
614 | { | 604 | { |
615 | unsigned int mask = (fib_info_hash_size - 1); | 605 | unsigned int mask = (fib_info_hash_size - 1); |
616 | 606 | ||
617 | return ((__force u32)val ^ | 607 | return ((__force u32)val ^ |
618 | ((__force u32)val >> 7) ^ | 608 | ((__force u32)val >> 7) ^ |
619 | ((__force u32)val >> 14)) & mask; | 609 | ((__force u32)val >> 14)) & mask; |
620 | } | 610 | } |
621 | 611 | ||
622 | static struct hlist_head *fib_info_hash_alloc(int bytes) | 612 | static struct hlist_head *fib_info_hash_alloc(int bytes) |
623 | { | 613 | { |
624 | if (bytes <= PAGE_SIZE) | 614 | if (bytes <= PAGE_SIZE) |
625 | return kzalloc(bytes, GFP_KERNEL); | 615 | return kzalloc(bytes, GFP_KERNEL); |
626 | else | 616 | else |
627 | return (struct hlist_head *) | 617 | return (struct hlist_head *) |
628 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, | 618 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
629 | get_order(bytes)); | 619 | get_order(bytes)); |
630 | } | 620 | } |
631 | 621 | ||
632 | static void fib_info_hash_free(struct hlist_head *hash, int bytes) | 622 | static void fib_info_hash_free(struct hlist_head *hash, int bytes) |
633 | { | 623 | { |
634 | if (!hash) | 624 | if (!hash) |
635 | return; | 625 | return; |
636 | 626 | ||
637 | if (bytes <= PAGE_SIZE) | 627 | if (bytes <= PAGE_SIZE) |
638 | kfree(hash); | 628 | kfree(hash); |
639 | else | 629 | else |
640 | free_pages((unsigned long) hash, get_order(bytes)); | 630 | free_pages((unsigned long) hash, get_order(bytes)); |
641 | } | 631 | } |
642 | 632 | ||
643 | static void fib_info_hash_move(struct hlist_head *new_info_hash, | 633 | static void fib_info_hash_move(struct hlist_head *new_info_hash, |
644 | struct hlist_head *new_laddrhash, | 634 | struct hlist_head *new_laddrhash, |
645 | unsigned int new_size) | 635 | unsigned int new_size) |
646 | { | 636 | { |
647 | struct hlist_head *old_info_hash, *old_laddrhash; | 637 | struct hlist_head *old_info_hash, *old_laddrhash; |
648 | unsigned int old_size = fib_info_hash_size; | 638 | unsigned int old_size = fib_info_hash_size; |
649 | unsigned int i, bytes; | 639 | unsigned int i, bytes; |
650 | 640 | ||
651 | spin_lock_bh(&fib_info_lock); | 641 | spin_lock_bh(&fib_info_lock); |
652 | old_info_hash = fib_info_hash; | 642 | old_info_hash = fib_info_hash; |
653 | old_laddrhash = fib_info_laddrhash; | 643 | old_laddrhash = fib_info_laddrhash; |
654 | fib_info_hash_size = new_size; | 644 | fib_info_hash_size = new_size; |
655 | 645 | ||
656 | for (i = 0; i < old_size; i++) { | 646 | for (i = 0; i < old_size; i++) { |
657 | struct hlist_head *head = &fib_info_hash[i]; | 647 | struct hlist_head *head = &fib_info_hash[i]; |
658 | struct hlist_node *node, *n; | 648 | struct hlist_node *node, *n; |
659 | struct fib_info *fi; | 649 | struct fib_info *fi; |
660 | 650 | ||
661 | hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { | 651 | hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { |
662 | struct hlist_head *dest; | 652 | struct hlist_head *dest; |
663 | unsigned int new_hash; | 653 | unsigned int new_hash; |
664 | 654 | ||
665 | hlist_del(&fi->fib_hash); | 655 | hlist_del(&fi->fib_hash); |
666 | 656 | ||
667 | new_hash = fib_info_hashfn(fi); | 657 | new_hash = fib_info_hashfn(fi); |
668 | dest = &new_info_hash[new_hash]; | 658 | dest = &new_info_hash[new_hash]; |
669 | hlist_add_head(&fi->fib_hash, dest); | 659 | hlist_add_head(&fi->fib_hash, dest); |
670 | } | 660 | } |
671 | } | 661 | } |
672 | fib_info_hash = new_info_hash; | 662 | fib_info_hash = new_info_hash; |
673 | 663 | ||
674 | for (i = 0; i < old_size; i++) { | 664 | for (i = 0; i < old_size; i++) { |
675 | struct hlist_head *lhead = &fib_info_laddrhash[i]; | 665 | struct hlist_head *lhead = &fib_info_laddrhash[i]; |
676 | struct hlist_node *node, *n; | 666 | struct hlist_node *node, *n; |
677 | struct fib_info *fi; | 667 | struct fib_info *fi; |
678 | 668 | ||
679 | hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { | 669 | hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { |
680 | struct hlist_head *ldest; | 670 | struct hlist_head *ldest; |
681 | unsigned int new_hash; | 671 | unsigned int new_hash; |
682 | 672 | ||
683 | hlist_del(&fi->fib_lhash); | 673 | hlist_del(&fi->fib_lhash); |
684 | 674 | ||
685 | new_hash = fib_laddr_hashfn(fi->fib_prefsrc); | 675 | new_hash = fib_laddr_hashfn(fi->fib_prefsrc); |
686 | ldest = &new_laddrhash[new_hash]; | 676 | ldest = &new_laddrhash[new_hash]; |
687 | hlist_add_head(&fi->fib_lhash, ldest); | 677 | hlist_add_head(&fi->fib_lhash, ldest); |
688 | } | 678 | } |
689 | } | 679 | } |
690 | fib_info_laddrhash = new_laddrhash; | 680 | fib_info_laddrhash = new_laddrhash; |
691 | 681 | ||
692 | spin_unlock_bh(&fib_info_lock); | 682 | spin_unlock_bh(&fib_info_lock); |
693 | 683 | ||
694 | bytes = old_size * sizeof(struct hlist_head *); | 684 | bytes = old_size * sizeof(struct hlist_head *); |
695 | fib_info_hash_free(old_info_hash, bytes); | 685 | fib_info_hash_free(old_info_hash, bytes); |
696 | fib_info_hash_free(old_laddrhash, bytes); | 686 | fib_info_hash_free(old_laddrhash, bytes); |
697 | } | 687 | } |
698 | 688 | ||
699 | __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) | 689 | __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) |
700 | { | 690 | { |
701 | nh->nh_saddr = inet_select_addr(nh->nh_dev, | 691 | nh->nh_saddr = inet_select_addr(nh->nh_dev, |
702 | nh->nh_gw, | 692 | nh->nh_gw, |
703 | nh->nh_parent->fib_scope); | 693 | nh->nh_parent->fib_scope); |
704 | nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); | 694 | nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); |
705 | 695 | ||
706 | return nh->nh_saddr; | 696 | return nh->nh_saddr; |
707 | } | 697 | } |
708 | 698 | ||
709 | struct fib_info *fib_create_info(struct fib_config *cfg) | 699 | struct fib_info *fib_create_info(struct fib_config *cfg) |
710 | { | 700 | { |
711 | int err; | 701 | int err; |
712 | struct fib_info *fi = NULL; | 702 | struct fib_info *fi = NULL; |
713 | struct fib_info *ofi; | 703 | struct fib_info *ofi; |
714 | int nhs = 1; | 704 | int nhs = 1; |
715 | struct net *net = cfg->fc_nlinfo.nl_net; | 705 | struct net *net = cfg->fc_nlinfo.nl_net; |
716 | 706 | ||
717 | if (cfg->fc_type > RTN_MAX) | 707 | if (cfg->fc_type > RTN_MAX) |
718 | goto err_inval; | 708 | goto err_inval; |
719 | 709 | ||
720 | /* Fast check to catch the most weird cases */ | 710 | /* Fast check to catch the most weird cases */ |
721 | if (fib_props[cfg->fc_type].scope > cfg->fc_scope) | 711 | if (fib_props[cfg->fc_type].scope > cfg->fc_scope) |
722 | goto err_inval; | 712 | goto err_inval; |
723 | 713 | ||
724 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 714 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
725 | if (cfg->fc_mp) { | 715 | if (cfg->fc_mp) { |
726 | nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); | 716 | nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); |
727 | if (nhs == 0) | 717 | if (nhs == 0) |
728 | goto err_inval; | 718 | goto err_inval; |
729 | } | 719 | } |
730 | #endif | 720 | #endif |
731 | 721 | ||
732 | err = -ENOBUFS; | 722 | err = -ENOBUFS; |
733 | if (fib_info_cnt >= fib_info_hash_size) { | 723 | if (fib_info_cnt >= fib_info_hash_size) { |
734 | unsigned int new_size = fib_info_hash_size << 1; | 724 | unsigned int new_size = fib_info_hash_size << 1; |
735 | struct hlist_head *new_info_hash; | 725 | struct hlist_head *new_info_hash; |
736 | struct hlist_head *new_laddrhash; | 726 | struct hlist_head *new_laddrhash; |
737 | unsigned int bytes; | 727 | unsigned int bytes; |
738 | 728 | ||
739 | if (!new_size) | 729 | if (!new_size) |
740 | new_size = 1; | 730 | new_size = 1; |
741 | bytes = new_size * sizeof(struct hlist_head *); | 731 | bytes = new_size * sizeof(struct hlist_head *); |
742 | new_info_hash = fib_info_hash_alloc(bytes); | 732 | new_info_hash = fib_info_hash_alloc(bytes); |
743 | new_laddrhash = fib_info_hash_alloc(bytes); | 733 | new_laddrhash = fib_info_hash_alloc(bytes); |
744 | if (!new_info_hash || !new_laddrhash) { | 734 | if (!new_info_hash || !new_laddrhash) { |
745 | fib_info_hash_free(new_info_hash, bytes); | 735 | fib_info_hash_free(new_info_hash, bytes); |
746 | fib_info_hash_free(new_laddrhash, bytes); | 736 | fib_info_hash_free(new_laddrhash, bytes); |
747 | } else | 737 | } else |
748 | fib_info_hash_move(new_info_hash, new_laddrhash, new_size); | 738 | fib_info_hash_move(new_info_hash, new_laddrhash, new_size); |
749 | 739 | ||
750 | if (!fib_info_hash_size) | 740 | if (!fib_info_hash_size) |
751 | goto failure; | 741 | goto failure; |
752 | } | 742 | } |
753 | 743 | ||
754 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); | 744 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); |
755 | if (fi == NULL) | 745 | if (fi == NULL) |
756 | goto failure; | 746 | goto failure; |
757 | if (cfg->fc_mx) { | 747 | if (cfg->fc_mx) { |
758 | fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); | 748 | fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); |
759 | if (!fi->fib_metrics) | 749 | if (!fi->fib_metrics) |
760 | goto failure; | 750 | goto failure; |
761 | } else | 751 | } else |
762 | fi->fib_metrics = (u32 *) dst_default_metrics; | 752 | fi->fib_metrics = (u32 *) dst_default_metrics; |
763 | fib_info_cnt++; | 753 | fib_info_cnt++; |
764 | 754 | ||
765 | fi->fib_net = hold_net(net); | 755 | fi->fib_net = hold_net(net); |
766 | fi->fib_protocol = cfg->fc_protocol; | 756 | fi->fib_protocol = cfg->fc_protocol; |
767 | fi->fib_scope = cfg->fc_scope; | 757 | fi->fib_scope = cfg->fc_scope; |
768 | fi->fib_flags = cfg->fc_flags; | 758 | fi->fib_flags = cfg->fc_flags; |
769 | fi->fib_priority = cfg->fc_priority; | 759 | fi->fib_priority = cfg->fc_priority; |
770 | fi->fib_prefsrc = cfg->fc_prefsrc; | 760 | fi->fib_prefsrc = cfg->fc_prefsrc; |
771 | 761 | ||
772 | fi->fib_nhs = nhs; | 762 | fi->fib_nhs = nhs; |
773 | change_nexthops(fi) { | 763 | change_nexthops(fi) { |
774 | nexthop_nh->nh_parent = fi; | 764 | nexthop_nh->nh_parent = fi; |
775 | } endfor_nexthops(fi) | 765 | } endfor_nexthops(fi) |
776 | 766 | ||
777 | if (cfg->fc_mx) { | 767 | if (cfg->fc_mx) { |
778 | struct nlattr *nla; | 768 | struct nlattr *nla; |
779 | int remaining; | 769 | int remaining; |
780 | 770 | ||
781 | nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { | 771 | nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { |
782 | int type = nla_type(nla); | 772 | int type = nla_type(nla); |
783 | 773 | ||
784 | if (type) { | 774 | if (type) { |
785 | if (type > RTAX_MAX) | 775 | if (type > RTAX_MAX) |
786 | goto err_inval; | 776 | goto err_inval; |
787 | fi->fib_metrics[type - 1] = nla_get_u32(nla); | 777 | fi->fib_metrics[type - 1] = nla_get_u32(nla); |
788 | } | 778 | } |
789 | } | 779 | } |
790 | } | 780 | } |
791 | 781 | ||
792 | if (cfg->fc_mp) { | 782 | if (cfg->fc_mp) { |
793 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 783 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
794 | err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg); | 784 | err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg); |
795 | if (err != 0) | 785 | if (err != 0) |
796 | goto failure; | 786 | goto failure; |
797 | if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) | 787 | if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) |
798 | goto err_inval; | 788 | goto err_inval; |
799 | if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) | 789 | if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) |
800 | goto err_inval; | 790 | goto err_inval; |
801 | #ifdef CONFIG_IP_ROUTE_CLASSID | 791 | #ifdef CONFIG_IP_ROUTE_CLASSID |
802 | if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) | 792 | if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) |
803 | goto err_inval; | 793 | goto err_inval; |
804 | #endif | 794 | #endif |
805 | #else | 795 | #else |
806 | goto err_inval; | 796 | goto err_inval; |
807 | #endif | 797 | #endif |
808 | } else { | 798 | } else { |
809 | struct fib_nh *nh = fi->fib_nh; | 799 | struct fib_nh *nh = fi->fib_nh; |
810 | 800 | ||
811 | nh->nh_oif = cfg->fc_oif; | 801 | nh->nh_oif = cfg->fc_oif; |
812 | nh->nh_gw = cfg->fc_gw; | 802 | nh->nh_gw = cfg->fc_gw; |
813 | nh->nh_flags = cfg->fc_flags; | 803 | nh->nh_flags = cfg->fc_flags; |
814 | #ifdef CONFIG_IP_ROUTE_CLASSID | 804 | #ifdef CONFIG_IP_ROUTE_CLASSID |
815 | nh->nh_tclassid = cfg->fc_flow; | 805 | nh->nh_tclassid = cfg->fc_flow; |
816 | #endif | 806 | #endif |
817 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 807 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
818 | nh->nh_weight = 1; | 808 | nh->nh_weight = 1; |
819 | #endif | 809 | #endif |
820 | } | 810 | } |
821 | 811 | ||
822 | if (fib_props[cfg->fc_type].error) { | 812 | if (fib_props[cfg->fc_type].error) { |
823 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) | 813 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) |
824 | goto err_inval; | 814 | goto err_inval; |
825 | goto link_it; | 815 | goto link_it; |
826 | } else { | 816 | } else { |
827 | switch (cfg->fc_type) { | 817 | switch (cfg->fc_type) { |
828 | case RTN_UNICAST: | 818 | case RTN_UNICAST: |
829 | case RTN_LOCAL: | 819 | case RTN_LOCAL: |
830 | case RTN_BROADCAST: | 820 | case RTN_BROADCAST: |
831 | case RTN_ANYCAST: | 821 | case RTN_ANYCAST: |
832 | case RTN_MULTICAST: | 822 | case RTN_MULTICAST: |
833 | break; | 823 | break; |
834 | default: | 824 | default: |
835 | goto err_inval; | 825 | goto err_inval; |
836 | } | 826 | } |
837 | } | 827 | } |
838 | 828 | ||
839 | if (cfg->fc_scope > RT_SCOPE_HOST) | 829 | if (cfg->fc_scope > RT_SCOPE_HOST) |
840 | goto err_inval; | 830 | goto err_inval; |
841 | 831 | ||
842 | if (cfg->fc_scope == RT_SCOPE_HOST) { | 832 | if (cfg->fc_scope == RT_SCOPE_HOST) { |
843 | struct fib_nh *nh = fi->fib_nh; | 833 | struct fib_nh *nh = fi->fib_nh; |
844 | 834 | ||
845 | /* Local address is added. */ | 835 | /* Local address is added. */ |
846 | if (nhs != 1 || nh->nh_gw) | 836 | if (nhs != 1 || nh->nh_gw) |
847 | goto err_inval; | 837 | goto err_inval; |
848 | nh->nh_scope = RT_SCOPE_NOWHERE; | 838 | nh->nh_scope = RT_SCOPE_NOWHERE; |
849 | nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); | 839 | nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); |
850 | err = -ENODEV; | 840 | err = -ENODEV; |
851 | if (nh->nh_dev == NULL) | 841 | if (nh->nh_dev == NULL) |
852 | goto failure; | 842 | goto failure; |
853 | } else { | 843 | } else { |
854 | change_nexthops(fi) { | 844 | change_nexthops(fi) { |
855 | err = fib_check_nh(cfg, fi, nexthop_nh); | 845 | err = fib_check_nh(cfg, fi, nexthop_nh); |
856 | if (err != 0) | 846 | if (err != 0) |
857 | goto failure; | 847 | goto failure; |
858 | } endfor_nexthops(fi) | 848 | } endfor_nexthops(fi) |
859 | } | 849 | } |
860 | 850 | ||
861 | if (fi->fib_prefsrc) { | 851 | if (fi->fib_prefsrc) { |
862 | if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || | 852 | if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || |
863 | fi->fib_prefsrc != cfg->fc_dst) | 853 | fi->fib_prefsrc != cfg->fc_dst) |
864 | if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL) | 854 | if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL) |
865 | goto err_inval; | 855 | goto err_inval; |
866 | } | 856 | } |
867 | 857 | ||
868 | change_nexthops(fi) { | 858 | change_nexthops(fi) { |
869 | fib_info_update_nh_saddr(net, nexthop_nh); | 859 | fib_info_update_nh_saddr(net, nexthop_nh); |
870 | } endfor_nexthops(fi) | 860 | } endfor_nexthops(fi) |
871 | 861 | ||
872 | link_it: | 862 | link_it: |
873 | ofi = fib_find_info(fi); | 863 | ofi = fib_find_info(fi); |
874 | if (ofi) { | 864 | if (ofi) { |
875 | fi->fib_dead = 1; | 865 | fi->fib_dead = 1; |
876 | free_fib_info(fi); | 866 | free_fib_info(fi); |
877 | ofi->fib_treeref++; | 867 | ofi->fib_treeref++; |
878 | return ofi; | 868 | return ofi; |
879 | } | 869 | } |
880 | 870 | ||
881 | fi->fib_treeref++; | 871 | fi->fib_treeref++; |
882 | atomic_inc(&fi->fib_clntref); | 872 | atomic_inc(&fi->fib_clntref); |
883 | spin_lock_bh(&fib_info_lock); | 873 | spin_lock_bh(&fib_info_lock); |
884 | hlist_add_head(&fi->fib_hash, | 874 | hlist_add_head(&fi->fib_hash, |
885 | &fib_info_hash[fib_info_hashfn(fi)]); | 875 | &fib_info_hash[fib_info_hashfn(fi)]); |
886 | if (fi->fib_prefsrc) { | 876 | if (fi->fib_prefsrc) { |
887 | struct hlist_head *head; | 877 | struct hlist_head *head; |
888 | 878 | ||
889 | head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)]; | 879 | head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)]; |
890 | hlist_add_head(&fi->fib_lhash, head); | 880 | hlist_add_head(&fi->fib_lhash, head); |
891 | } | 881 | } |
892 | change_nexthops(fi) { | 882 | change_nexthops(fi) { |
893 | struct hlist_head *head; | 883 | struct hlist_head *head; |
894 | unsigned int hash; | 884 | unsigned int hash; |
895 | 885 | ||
896 | if (!nexthop_nh->nh_dev) | 886 | if (!nexthop_nh->nh_dev) |
897 | continue; | 887 | continue; |
898 | hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex); | 888 | hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex); |
899 | head = &fib_info_devhash[hash]; | 889 | head = &fib_info_devhash[hash]; |
900 | hlist_add_head(&nexthop_nh->nh_hash, head); | 890 | hlist_add_head(&nexthop_nh->nh_hash, head); |
901 | } endfor_nexthops(fi) | 891 | } endfor_nexthops(fi) |
902 | spin_unlock_bh(&fib_info_lock); | 892 | spin_unlock_bh(&fib_info_lock); |
903 | return fi; | 893 | return fi; |
904 | 894 | ||
905 | err_inval: | 895 | err_inval: |
906 | err = -EINVAL; | 896 | err = -EINVAL; |
907 | 897 | ||
908 | failure: | 898 | failure: |
909 | if (fi) { | 899 | if (fi) { |
910 | fi->fib_dead = 1; | 900 | fi->fib_dead = 1; |
911 | free_fib_info(fi); | 901 | free_fib_info(fi); |
912 | } | 902 | } |
913 | 903 | ||
914 | return ERR_PTR(err); | 904 | return ERR_PTR(err); |
915 | } | 905 | } |
916 | 906 | ||
917 | int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 907 | int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, |
918 | u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, | 908 | u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, |
919 | struct fib_info *fi, unsigned int flags) | 909 | struct fib_info *fi, unsigned int flags) |
920 | { | 910 | { |
921 | struct nlmsghdr *nlh; | 911 | struct nlmsghdr *nlh; |
922 | struct rtmsg *rtm; | 912 | struct rtmsg *rtm; |
923 | 913 | ||
924 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); | 914 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); |
925 | if (nlh == NULL) | 915 | if (nlh == NULL) |
926 | return -EMSGSIZE; | 916 | return -EMSGSIZE; |
927 | 917 | ||
928 | rtm = nlmsg_data(nlh); | 918 | rtm = nlmsg_data(nlh); |
929 | rtm->rtm_family = AF_INET; | 919 | rtm->rtm_family = AF_INET; |
930 | rtm->rtm_dst_len = dst_len; | 920 | rtm->rtm_dst_len = dst_len; |
931 | rtm->rtm_src_len = 0; | 921 | rtm->rtm_src_len = 0; |
932 | rtm->rtm_tos = tos; | 922 | rtm->rtm_tos = tos; |
933 | if (tb_id < 256) | 923 | if (tb_id < 256) |
934 | rtm->rtm_table = tb_id; | 924 | rtm->rtm_table = tb_id; |
935 | else | 925 | else |
936 | rtm->rtm_table = RT_TABLE_COMPAT; | 926 | rtm->rtm_table = RT_TABLE_COMPAT; |
937 | NLA_PUT_U32(skb, RTA_TABLE, tb_id); | 927 | NLA_PUT_U32(skb, RTA_TABLE, tb_id); |
938 | rtm->rtm_type = type; | 928 | rtm->rtm_type = type; |
939 | rtm->rtm_flags = fi->fib_flags; | 929 | rtm->rtm_flags = fi->fib_flags; |
940 | rtm->rtm_scope = fi->fib_scope; | 930 | rtm->rtm_scope = fi->fib_scope; |
941 | rtm->rtm_protocol = fi->fib_protocol; | 931 | rtm->rtm_protocol = fi->fib_protocol; |
942 | 932 | ||
943 | if (rtm->rtm_dst_len) | 933 | if (rtm->rtm_dst_len) |
944 | NLA_PUT_BE32(skb, RTA_DST, dst); | 934 | NLA_PUT_BE32(skb, RTA_DST, dst); |
945 | 935 | ||
946 | if (fi->fib_priority) | 936 | if (fi->fib_priority) |
947 | NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); | 937 | NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); |
948 | 938 | ||
949 | if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) | 939 | if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) |
950 | goto nla_put_failure; | 940 | goto nla_put_failure; |
951 | 941 | ||
952 | if (fi->fib_prefsrc) | 942 | if (fi->fib_prefsrc) |
953 | NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); | 943 | NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); |
954 | 944 | ||
955 | if (fi->fib_nhs == 1) { | 945 | if (fi->fib_nhs == 1) { |
956 | if (fi->fib_nh->nh_gw) | 946 | if (fi->fib_nh->nh_gw) |
957 | NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); | 947 | NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); |
958 | 948 | ||
959 | if (fi->fib_nh->nh_oif) | 949 | if (fi->fib_nh->nh_oif) |
960 | NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); | 950 | NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); |
961 | #ifdef CONFIG_IP_ROUTE_CLASSID | 951 | #ifdef CONFIG_IP_ROUTE_CLASSID |
962 | if (fi->fib_nh[0].nh_tclassid) | 952 | if (fi->fib_nh[0].nh_tclassid) |
963 | NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); | 953 | NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); |
964 | #endif | 954 | #endif |
965 | } | 955 | } |
966 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 956 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
967 | if (fi->fib_nhs > 1) { | 957 | if (fi->fib_nhs > 1) { |
968 | struct rtnexthop *rtnh; | 958 | struct rtnexthop *rtnh; |
969 | struct nlattr *mp; | 959 | struct nlattr *mp; |
970 | 960 | ||
971 | mp = nla_nest_start(skb, RTA_MULTIPATH); | 961 | mp = nla_nest_start(skb, RTA_MULTIPATH); |
972 | if (mp == NULL) | 962 | if (mp == NULL) |
973 | goto nla_put_failure; | 963 | goto nla_put_failure; |
974 | 964 | ||
975 | for_nexthops(fi) { | 965 | for_nexthops(fi) { |
976 | rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); | 966 | rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); |
977 | if (rtnh == NULL) | 967 | if (rtnh == NULL) |
978 | goto nla_put_failure; | 968 | goto nla_put_failure; |
979 | 969 | ||
980 | rtnh->rtnh_flags = nh->nh_flags & 0xFF; | 970 | rtnh->rtnh_flags = nh->nh_flags & 0xFF; |
981 | rtnh->rtnh_hops = nh->nh_weight - 1; | 971 | rtnh->rtnh_hops = nh->nh_weight - 1; |
982 | rtnh->rtnh_ifindex = nh->nh_oif; | 972 | rtnh->rtnh_ifindex = nh->nh_oif; |
983 | 973 | ||
984 | if (nh->nh_gw) | 974 | if (nh->nh_gw) |
985 | NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); | 975 | NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); |
986 | #ifdef CONFIG_IP_ROUTE_CLASSID | 976 | #ifdef CONFIG_IP_ROUTE_CLASSID |
987 | if (nh->nh_tclassid) | 977 | if (nh->nh_tclassid) |
988 | NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); | 978 | NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); |
989 | #endif | 979 | #endif |
990 | /* length of rtnetlink header + attributes */ | 980 | /* length of rtnetlink header + attributes */ |
991 | rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; | 981 | rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; |
992 | } endfor_nexthops(fi); | 982 | } endfor_nexthops(fi); |
993 | 983 | ||
994 | nla_nest_end(skb, mp); | 984 | nla_nest_end(skb, mp); |
995 | } | 985 | } |
996 | #endif | 986 | #endif |
997 | return nlmsg_end(skb, nlh); | 987 | return nlmsg_end(skb, nlh); |
998 | 988 | ||
999 | nla_put_failure: | 989 | nla_put_failure: |
1000 | nlmsg_cancel(skb, nlh); | 990 | nlmsg_cancel(skb, nlh); |
1001 | return -EMSGSIZE; | 991 | return -EMSGSIZE; |
1002 | } | 992 | } |
1003 | 993 | ||
1004 | /* | 994 | /* |
1005 | * Update FIB if: | 995 | * Update FIB if: |
1006 | * - local address disappeared -> we must delete all the entries | 996 | * - local address disappeared -> we must delete all the entries |
1007 | * referring to it. | 997 | * referring to it. |
1008 | * - device went down -> we must shutdown all nexthops going via it. | 998 | * - device went down -> we must shutdown all nexthops going via it. |
1009 | */ | 999 | */ |
1010 | int fib_sync_down_addr(struct net *net, __be32 local) | 1000 | int fib_sync_down_addr(struct net *net, __be32 local) |
1011 | { | 1001 | { |
1012 | int ret = 0; | 1002 | int ret = 0; |
1013 | unsigned int hash = fib_laddr_hashfn(local); | 1003 | unsigned int hash = fib_laddr_hashfn(local); |
1014 | struct hlist_head *head = &fib_info_laddrhash[hash]; | 1004 | struct hlist_head *head = &fib_info_laddrhash[hash]; |
1015 | struct hlist_node *node; | 1005 | struct hlist_node *node; |
1016 | struct fib_info *fi; | 1006 | struct fib_info *fi; |
1017 | 1007 | ||
1018 | if (fib_info_laddrhash == NULL || local == 0) | 1008 | if (fib_info_laddrhash == NULL || local == 0) |
1019 | return 0; | 1009 | return 0; |
1020 | 1010 | ||
1021 | hlist_for_each_entry(fi, node, head, fib_lhash) { | 1011 | hlist_for_each_entry(fi, node, head, fib_lhash) { |
1022 | if (!net_eq(fi->fib_net, net)) | 1012 | if (!net_eq(fi->fib_net, net)) |
1023 | continue; | 1013 | continue; |
1024 | if (fi->fib_prefsrc == local) { | 1014 | if (fi->fib_prefsrc == local) { |
1025 | fi->fib_flags |= RTNH_F_DEAD; | 1015 | fi->fib_flags |= RTNH_F_DEAD; |
1026 | ret++; | 1016 | ret++; |
1027 | } | 1017 | } |
1028 | } | 1018 | } |
1029 | return ret; | 1019 | return ret; |
1030 | } | 1020 | } |
1031 | 1021 | ||
1032 | int fib_sync_down_dev(struct net_device *dev, int force) | 1022 | int fib_sync_down_dev(struct net_device *dev, int force) |
1033 | { | 1023 | { |
1034 | int ret = 0; | 1024 | int ret = 0; |
1035 | int scope = RT_SCOPE_NOWHERE; | 1025 | int scope = RT_SCOPE_NOWHERE; |
1036 | struct fib_info *prev_fi = NULL; | 1026 | struct fib_info *prev_fi = NULL; |
1037 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); | 1027 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); |
1038 | struct hlist_head *head = &fib_info_devhash[hash]; | 1028 | struct hlist_head *head = &fib_info_devhash[hash]; |
1039 | struct hlist_node *node; | 1029 | struct hlist_node *node; |
1040 | struct fib_nh *nh; | 1030 | struct fib_nh *nh; |
1041 | 1031 | ||
1042 | if (force) | 1032 | if (force) |
1043 | scope = -1; | 1033 | scope = -1; |
1044 | 1034 | ||
1045 | hlist_for_each_entry(nh, node, head, nh_hash) { | 1035 | hlist_for_each_entry(nh, node, head, nh_hash) { |
1046 | struct fib_info *fi = nh->nh_parent; | 1036 | struct fib_info *fi = nh->nh_parent; |
1047 | int dead; | 1037 | int dead; |
1048 | 1038 | ||
1049 | BUG_ON(!fi->fib_nhs); | 1039 | BUG_ON(!fi->fib_nhs); |
1050 | if (nh->nh_dev != dev || fi == prev_fi) | 1040 | if (nh->nh_dev != dev || fi == prev_fi) |
1051 | continue; | 1041 | continue; |
1052 | prev_fi = fi; | 1042 | prev_fi = fi; |
1053 | dead = 0; | 1043 | dead = 0; |
1054 | change_nexthops(fi) { | 1044 | change_nexthops(fi) { |
1055 | if (nexthop_nh->nh_flags & RTNH_F_DEAD) | 1045 | if (nexthop_nh->nh_flags & RTNH_F_DEAD) |
1056 | dead++; | 1046 | dead++; |
1057 | else if (nexthop_nh->nh_dev == dev && | 1047 | else if (nexthop_nh->nh_dev == dev && |
1058 | nexthop_nh->nh_scope != scope) { | 1048 | nexthop_nh->nh_scope != scope) { |
1059 | nexthop_nh->nh_flags |= RTNH_F_DEAD; | 1049 | nexthop_nh->nh_flags |= RTNH_F_DEAD; |
1060 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1050 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1061 | spin_lock_bh(&fib_multipath_lock); | 1051 | spin_lock_bh(&fib_multipath_lock); |
1062 | fi->fib_power -= nexthop_nh->nh_power; | 1052 | fi->fib_power -= nexthop_nh->nh_power; |
1063 | nexthop_nh->nh_power = 0; | 1053 | nexthop_nh->nh_power = 0; |
1064 | spin_unlock_bh(&fib_multipath_lock); | 1054 | spin_unlock_bh(&fib_multipath_lock); |
1065 | #endif | 1055 | #endif |
1066 | dead++; | 1056 | dead++; |
1067 | } | 1057 | } |
1068 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1058 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1069 | if (force > 1 && nexthop_nh->nh_dev == dev) { | 1059 | if (force > 1 && nexthop_nh->nh_dev == dev) { |
1070 | dead = fi->fib_nhs; | 1060 | dead = fi->fib_nhs; |
1071 | break; | 1061 | break; |
1072 | } | 1062 | } |
1073 | #endif | 1063 | #endif |
1074 | } endfor_nexthops(fi) | 1064 | } endfor_nexthops(fi) |
1075 | if (dead == fi->fib_nhs) { | 1065 | if (dead == fi->fib_nhs) { |
1076 | fi->fib_flags |= RTNH_F_DEAD; | 1066 | fi->fib_flags |= RTNH_F_DEAD; |
1077 | ret++; | 1067 | ret++; |
1078 | } | 1068 | } |
1079 | } | 1069 | } |
1080 | 1070 | ||
1081 | return ret; | 1071 | return ret; |
1082 | } | 1072 | } |
1083 | 1073 | ||
1084 | /* Must be invoked inside of an RCU protected region. */ | 1074 | /* Must be invoked inside of an RCU protected region. */ |
1085 | void fib_select_default(struct fib_result *res) | 1075 | void fib_select_default(struct fib_result *res) |
1086 | { | 1076 | { |
1087 | struct fib_info *fi = NULL, *last_resort = NULL; | 1077 | struct fib_info *fi = NULL, *last_resort = NULL; |
1088 | struct list_head *fa_head = res->fa_head; | 1078 | struct list_head *fa_head = res->fa_head; |
1089 | struct fib_table *tb = res->table; | 1079 | struct fib_table *tb = res->table; |
1090 | int order = -1, last_idx = -1; | 1080 | int order = -1, last_idx = -1; |
1091 | struct fib_alias *fa; | 1081 | struct fib_alias *fa; |
1092 | 1082 | ||
1093 | list_for_each_entry_rcu(fa, fa_head, fa_list) { | 1083 | list_for_each_entry_rcu(fa, fa_head, fa_list) { |
1094 | struct fib_info *next_fi = fa->fa_info; | 1084 | struct fib_info *next_fi = fa->fa_info; |
1095 | 1085 | ||
1096 | if (next_fi->fib_scope != res->scope || | 1086 | if (next_fi->fib_scope != res->scope || |
1097 | fa->fa_type != RTN_UNICAST) | 1087 | fa->fa_type != RTN_UNICAST) |
1098 | continue; | 1088 | continue; |
1099 | 1089 | ||
1100 | if (next_fi->fib_priority > res->fi->fib_priority) | 1090 | if (next_fi->fib_priority > res->fi->fib_priority) |
1101 | break; | 1091 | break; |
1102 | if (!next_fi->fib_nh[0].nh_gw || | 1092 | if (!next_fi->fib_nh[0].nh_gw || |
1103 | next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) | 1093 | next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) |
1104 | continue; | 1094 | continue; |
1105 | 1095 | ||
1106 | fib_alias_accessed(fa); | 1096 | fib_alias_accessed(fa); |
1107 | 1097 | ||
1108 | if (fi == NULL) { | 1098 | if (fi == NULL) { |
1109 | if (next_fi != res->fi) | 1099 | if (next_fi != res->fi) |
1110 | break; | 1100 | break; |
1111 | } else if (!fib_detect_death(fi, order, &last_resort, | 1101 | } else if (!fib_detect_death(fi, order, &last_resort, |
1112 | &last_idx, tb->tb_default)) { | 1102 | &last_idx, tb->tb_default)) { |
1113 | fib_result_assign(res, fi); | 1103 | fib_result_assign(res, fi); |
1114 | tb->tb_default = order; | 1104 | tb->tb_default = order; |
1115 | goto out; | 1105 | goto out; |
1116 | } | 1106 | } |
1117 | fi = next_fi; | 1107 | fi = next_fi; |
1118 | order++; | 1108 | order++; |
1119 | } | 1109 | } |
1120 | 1110 | ||
1121 | if (order <= 0 || fi == NULL) { | 1111 | if (order <= 0 || fi == NULL) { |
1122 | tb->tb_default = -1; | 1112 | tb->tb_default = -1; |
1123 | goto out; | 1113 | goto out; |
1124 | } | 1114 | } |
1125 | 1115 | ||
1126 | if (!fib_detect_death(fi, order, &last_resort, &last_idx, | 1116 | if (!fib_detect_death(fi, order, &last_resort, &last_idx, |
1127 | tb->tb_default)) { | 1117 | tb->tb_default)) { |
1128 | fib_result_assign(res, fi); | 1118 | fib_result_assign(res, fi); |
1129 | tb->tb_default = order; | 1119 | tb->tb_default = order; |
1130 | goto out; | 1120 | goto out; |
1131 | } | 1121 | } |
1132 | 1122 | ||
1133 | if (last_idx >= 0) | 1123 | if (last_idx >= 0) |
1134 | fib_result_assign(res, last_resort); | 1124 | fib_result_assign(res, last_resort); |
1135 | tb->tb_default = last_idx; | 1125 | tb->tb_default = last_idx; |
1136 | out: | 1126 | out: |
1137 | return; | 1127 | return; |
1138 | } | 1128 | } |
1139 | 1129 | ||
1140 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1130 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1141 | 1131 | ||
1142 | /* | 1132 | /* |
1143 | * Dead device goes up. We wake up dead nexthops. | 1133 | * Dead device goes up. We wake up dead nexthops. |
1144 | * It takes sense only on multipath routes. | 1134 | * It takes sense only on multipath routes. |
1145 | */ | 1135 | */ |
1146 | int fib_sync_up(struct net_device *dev) | 1136 | int fib_sync_up(struct net_device *dev) |
1147 | { | 1137 | { |
1148 | struct fib_info *prev_fi; | 1138 | struct fib_info *prev_fi; |
1149 | unsigned int hash; | 1139 | unsigned int hash; |
1150 | struct hlist_head *head; | 1140 | struct hlist_head *head; |
1151 | struct hlist_node *node; | 1141 | struct hlist_node *node; |
1152 | struct fib_nh *nh; | 1142 | struct fib_nh *nh; |
1153 | int ret; | 1143 | int ret; |
1154 | 1144 | ||
1155 | if (!(dev->flags & IFF_UP)) | 1145 | if (!(dev->flags & IFF_UP)) |
1156 | return 0; | 1146 | return 0; |
1157 | 1147 | ||
1158 | prev_fi = NULL; | 1148 | prev_fi = NULL; |
1159 | hash = fib_devindex_hashfn(dev->ifindex); | 1149 | hash = fib_devindex_hashfn(dev->ifindex); |
1160 | head = &fib_info_devhash[hash]; | 1150 | head = &fib_info_devhash[hash]; |
1161 | ret = 0; | 1151 | ret = 0; |
1162 | 1152 | ||
1163 | hlist_for_each_entry(nh, node, head, nh_hash) { | 1153 | hlist_for_each_entry(nh, node, head, nh_hash) { |
1164 | struct fib_info *fi = nh->nh_parent; | 1154 | struct fib_info *fi = nh->nh_parent; |
1165 | int alive; | 1155 | int alive; |
1166 | 1156 | ||
1167 | BUG_ON(!fi->fib_nhs); | 1157 | BUG_ON(!fi->fib_nhs); |
1168 | if (nh->nh_dev != dev || fi == prev_fi) | 1158 | if (nh->nh_dev != dev || fi == prev_fi) |
1169 | continue; | 1159 | continue; |
1170 | 1160 | ||
1171 | prev_fi = fi; | 1161 | prev_fi = fi; |
1172 | alive = 0; | 1162 | alive = 0; |
1173 | change_nexthops(fi) { | 1163 | change_nexthops(fi) { |
1174 | if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { | 1164 | if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { |
1175 | alive++; | 1165 | alive++; |
1176 | continue; | 1166 | continue; |
1177 | } | 1167 | } |
1178 | if (nexthop_nh->nh_dev == NULL || | 1168 | if (nexthop_nh->nh_dev == NULL || |
1179 | !(nexthop_nh->nh_dev->flags & IFF_UP)) | 1169 | !(nexthop_nh->nh_dev->flags & IFF_UP)) |
1180 | continue; | 1170 | continue; |
1181 | if (nexthop_nh->nh_dev != dev || | 1171 | if (nexthop_nh->nh_dev != dev || |
1182 | !__in_dev_get_rtnl(dev)) | 1172 | !__in_dev_get_rtnl(dev)) |
1183 | continue; | 1173 | continue; |
1184 | alive++; | 1174 | alive++; |
1185 | spin_lock_bh(&fib_multipath_lock); | 1175 | spin_lock_bh(&fib_multipath_lock); |
1186 | nexthop_nh->nh_power = 0; | 1176 | nexthop_nh->nh_power = 0; |
1187 | nexthop_nh->nh_flags &= ~RTNH_F_DEAD; | 1177 | nexthop_nh->nh_flags &= ~RTNH_F_DEAD; |
1188 | spin_unlock_bh(&fib_multipath_lock); | 1178 | spin_unlock_bh(&fib_multipath_lock); |
1189 | } endfor_nexthops(fi) | 1179 | } endfor_nexthops(fi) |
1190 | 1180 | ||
1191 | if (alive > 0) { | 1181 | if (alive > 0) { |
1192 | fi->fib_flags &= ~RTNH_F_DEAD; | 1182 | fi->fib_flags &= ~RTNH_F_DEAD; |
1193 | ret++; | 1183 | ret++; |
1194 | } | 1184 | } |
1195 | } | 1185 | } |
1196 | 1186 | ||
1197 | return ret; | 1187 | return ret; |
1198 | } | 1188 | } |
1199 | 1189 | ||
1200 | /* | 1190 | /* |
1201 | * The algorithm is suboptimal, but it provides really | 1191 | * The algorithm is suboptimal, but it provides really |
1202 | * fair weighted route distribution. | 1192 | * fair weighted route distribution. |
1203 | */ | 1193 | */ |
1204 | void fib_select_multipath(struct fib_result *res) | 1194 | void fib_select_multipath(struct fib_result *res) |
1205 | { | 1195 | { |
1206 | struct fib_info *fi = res->fi; | 1196 | struct fib_info *fi = res->fi; |
1207 | int w; | 1197 | int w; |
1208 | 1198 | ||
1209 | spin_lock_bh(&fib_multipath_lock); | 1199 | spin_lock_bh(&fib_multipath_lock); |
1210 | if (fi->fib_power <= 0) { | 1200 | if (fi->fib_power <= 0) { |
1211 | int power = 0; | 1201 | int power = 0; |
1212 | change_nexthops(fi) { | 1202 | change_nexthops(fi) { |
1213 | if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { | 1203 | if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { |
1214 | power += nexthop_nh->nh_weight; | 1204 | power += nexthop_nh->nh_weight; |
1215 | nexthop_nh->nh_power = nexthop_nh->nh_weight; | 1205 | nexthop_nh->nh_power = nexthop_nh->nh_weight; |
1216 | } | 1206 | } |
1217 | } endfor_nexthops(fi); | 1207 | } endfor_nexthops(fi); |
1218 | fi->fib_power = power; | 1208 | fi->fib_power = power; |
1219 | if (power <= 0) { | 1209 | if (power <= 0) { |
1220 | spin_unlock_bh(&fib_multipath_lock); | 1210 | spin_unlock_bh(&fib_multipath_lock); |
1221 | /* Race condition: route has just become dead. */ | 1211 | /* Race condition: route has just become dead. */ |
1222 | res->nh_sel = 0; | 1212 | res->nh_sel = 0; |
1223 | return; | 1213 | return; |
1224 | } | 1214 | } |
1225 | } | 1215 | } |
1226 | 1216 | ||
1227 | 1217 | ||
1228 | /* w should be random number [0..fi->fib_power-1], | 1218 | /* w should be random number [0..fi->fib_power-1], |
1229 | * it is pretty bad approximation. | 1219 | * it is pretty bad approximation. |
1230 | */ | 1220 | */ |
1231 | 1221 | ||
1232 | w = jiffies % fi->fib_power; | 1222 | w = jiffies % fi->fib_power; |
1233 | 1223 | ||
1234 | change_nexthops(fi) { | 1224 | change_nexthops(fi) { |
1235 | if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) && | 1225 | if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) && |
1236 | nexthop_nh->nh_power) { | 1226 | nexthop_nh->nh_power) { |
1237 | w -= nexthop_nh->nh_power; | 1227 | w -= nexthop_nh->nh_power; |
1238 | if (w <= 0) { | 1228 | if (w <= 0) { |
1239 | nexthop_nh->nh_power--; | 1229 | nexthop_nh->nh_power--; |
1240 | fi->fib_power--; | 1230 | fi->fib_power--; |
1241 | res->nh_sel = nhsel; | 1231 | res->nh_sel = nhsel; |
1242 | spin_unlock_bh(&fib_multipath_lock); | 1232 | spin_unlock_bh(&fib_multipath_lock); |
1243 | return; | 1233 | return; |
1244 | } | 1234 | } |
1245 | } | 1235 | } |
1246 | } endfor_nexthops(fi); | 1236 | } endfor_nexthops(fi); |
1247 | 1237 | ||
1248 | /* Race condition: route has just become dead. */ | 1238 | /* Race condition: route has just become dead. */ |
1249 | res->nh_sel = 0; | 1239 | res->nh_sel = 0; |
1250 | spin_unlock_bh(&fib_multipath_lock); | 1240 | spin_unlock_bh(&fib_multipath_lock); |
1251 | } | 1241 | } |
1252 | #endif | 1242 | #endif |
1253 | 1243 |