Commit 83723d60717f8da0f53f91cf42a845ed56c09662
Committed by
Pablo Neira Ayuso
1 parent
45b9f509b7
Exists in
master
and in
7 other branches
netfilter: x_tables: dont block BH while reading counters
Using "iptables -L" with a lot of rules have a too big BH latency. Jesper mentioned ~6 ms and worried of frame drops. Switch to a per_cpu seqlock scheme, so that taking a snapshot of counters doesnt need to block BH (for this cpu, but also other cpus). This adds two increments on seqlock sequence per ipt_do_table() call, its a reasonable cost for allowing "iptables -L" not block BH processing. Reported-by: Jesper Dangaard Brouer <hawk@comx.dk> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Patrick McHardy <kaber@trash.net> Acked-by: Stephen Hemminger <shemminger@vyatta.com> Acked-by: Jesper Dangaard Brouer <hawk@comx.dk> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Showing 5 changed files with 49 additions and 99 deletions Inline Diff
include/linux/netfilter/x_tables.h
1 | #ifndef _X_TABLES_H | 1 | #ifndef _X_TABLES_H |
2 | #define _X_TABLES_H | 2 | #define _X_TABLES_H |
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | #define XT_FUNCTION_MAXNAMELEN 30 | 6 | #define XT_FUNCTION_MAXNAMELEN 30 |
7 | #define XT_EXTENSION_MAXNAMELEN 29 | 7 | #define XT_EXTENSION_MAXNAMELEN 29 |
8 | #define XT_TABLE_MAXNAMELEN 32 | 8 | #define XT_TABLE_MAXNAMELEN 32 |
9 | 9 | ||
10 | struct xt_entry_match { | 10 | struct xt_entry_match { |
11 | union { | 11 | union { |
12 | struct { | 12 | struct { |
13 | __u16 match_size; | 13 | __u16 match_size; |
14 | 14 | ||
15 | /* Used by userspace */ | 15 | /* Used by userspace */ |
16 | char name[XT_EXTENSION_MAXNAMELEN]; | 16 | char name[XT_EXTENSION_MAXNAMELEN]; |
17 | __u8 revision; | 17 | __u8 revision; |
18 | } user; | 18 | } user; |
19 | struct { | 19 | struct { |
20 | __u16 match_size; | 20 | __u16 match_size; |
21 | 21 | ||
22 | /* Used inside the kernel */ | 22 | /* Used inside the kernel */ |
23 | struct xt_match *match; | 23 | struct xt_match *match; |
24 | } kernel; | 24 | } kernel; |
25 | 25 | ||
26 | /* Total length */ | 26 | /* Total length */ |
27 | __u16 match_size; | 27 | __u16 match_size; |
28 | } u; | 28 | } u; |
29 | 29 | ||
30 | unsigned char data[0]; | 30 | unsigned char data[0]; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct xt_entry_target { | 33 | struct xt_entry_target { |
34 | union { | 34 | union { |
35 | struct { | 35 | struct { |
36 | __u16 target_size; | 36 | __u16 target_size; |
37 | 37 | ||
38 | /* Used by userspace */ | 38 | /* Used by userspace */ |
39 | char name[XT_EXTENSION_MAXNAMELEN]; | 39 | char name[XT_EXTENSION_MAXNAMELEN]; |
40 | __u8 revision; | 40 | __u8 revision; |
41 | } user; | 41 | } user; |
42 | struct { | 42 | struct { |
43 | __u16 target_size; | 43 | __u16 target_size; |
44 | 44 | ||
45 | /* Used inside the kernel */ | 45 | /* Used inside the kernel */ |
46 | struct xt_target *target; | 46 | struct xt_target *target; |
47 | } kernel; | 47 | } kernel; |
48 | 48 | ||
49 | /* Total length */ | 49 | /* Total length */ |
50 | __u16 target_size; | 50 | __u16 target_size; |
51 | } u; | 51 | } u; |
52 | 52 | ||
53 | unsigned char data[0]; | 53 | unsigned char data[0]; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | #define XT_TARGET_INIT(__name, __size) \ | 56 | #define XT_TARGET_INIT(__name, __size) \ |
57 | { \ | 57 | { \ |
58 | .target.u.user = { \ | 58 | .target.u.user = { \ |
59 | .target_size = XT_ALIGN(__size), \ | 59 | .target_size = XT_ALIGN(__size), \ |
60 | .name = __name, \ | 60 | .name = __name, \ |
61 | }, \ | 61 | }, \ |
62 | } | 62 | } |
63 | 63 | ||
64 | struct xt_standard_target { | 64 | struct xt_standard_target { |
65 | struct xt_entry_target target; | 65 | struct xt_entry_target target; |
66 | int verdict; | 66 | int verdict; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct xt_error_target { | 69 | struct xt_error_target { |
70 | struct xt_entry_target target; | 70 | struct xt_entry_target target; |
71 | char errorname[XT_FUNCTION_MAXNAMELEN]; | 71 | char errorname[XT_FUNCTION_MAXNAMELEN]; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision | 74 | /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision |
75 | * kernel supports, if >= revision. */ | 75 | * kernel supports, if >= revision. */ |
76 | struct xt_get_revision { | 76 | struct xt_get_revision { |
77 | char name[XT_EXTENSION_MAXNAMELEN]; | 77 | char name[XT_EXTENSION_MAXNAMELEN]; |
78 | __u8 revision; | 78 | __u8 revision; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* CONTINUE verdict for targets */ | 81 | /* CONTINUE verdict for targets */ |
82 | #define XT_CONTINUE 0xFFFFFFFF | 82 | #define XT_CONTINUE 0xFFFFFFFF |
83 | 83 | ||
84 | /* For standard target */ | 84 | /* For standard target */ |
85 | #define XT_RETURN (-NF_REPEAT - 1) | 85 | #define XT_RETURN (-NF_REPEAT - 1) |
86 | 86 | ||
87 | /* this is a dummy structure to find out the alignment requirement for a struct | 87 | /* this is a dummy structure to find out the alignment requirement for a struct |
88 | * containing all the fundamental data types that are used in ipt_entry, | 88 | * containing all the fundamental data types that are used in ipt_entry, |
89 | * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my | 89 | * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my |
90 | * personal pleasure to remove it -HW | 90 | * personal pleasure to remove it -HW |
91 | */ | 91 | */ |
92 | struct _xt_align { | 92 | struct _xt_align { |
93 | __u8 u8; | 93 | __u8 u8; |
94 | __u16 u16; | 94 | __u16 u16; |
95 | __u32 u32; | 95 | __u32 u32; |
96 | __u64 u64; | 96 | __u64 u64; |
97 | }; | 97 | }; |
98 | 98 | ||
99 | #define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align)) | 99 | #define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align)) |
100 | 100 | ||
101 | /* Standard return verdict, or do jump. */ | 101 | /* Standard return verdict, or do jump. */ |
102 | #define XT_STANDARD_TARGET "" | 102 | #define XT_STANDARD_TARGET "" |
103 | /* Error verdict. */ | 103 | /* Error verdict. */ |
104 | #define XT_ERROR_TARGET "ERROR" | 104 | #define XT_ERROR_TARGET "ERROR" |
105 | 105 | ||
106 | #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0) | 106 | #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0) |
107 | #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0) | 107 | #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0) |
108 | 108 | ||
109 | struct xt_counters { | 109 | struct xt_counters { |
110 | __u64 pcnt, bcnt; /* Packet and byte counters */ | 110 | __u64 pcnt, bcnt; /* Packet and byte counters */ |
111 | }; | 111 | }; |
112 | 112 | ||
113 | /* The argument to IPT_SO_ADD_COUNTERS. */ | 113 | /* The argument to IPT_SO_ADD_COUNTERS. */ |
114 | struct xt_counters_info { | 114 | struct xt_counters_info { |
115 | /* Which table. */ | 115 | /* Which table. */ |
116 | char name[XT_TABLE_MAXNAMELEN]; | 116 | char name[XT_TABLE_MAXNAMELEN]; |
117 | 117 | ||
118 | unsigned int num_counters; | 118 | unsigned int num_counters; |
119 | 119 | ||
120 | /* The counters (actually `number' of these). */ | 120 | /* The counters (actually `number' of these). */ |
121 | struct xt_counters counters[0]; | 121 | struct xt_counters counters[0]; |
122 | }; | 122 | }; |
123 | 123 | ||
124 | #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */ | 124 | #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */ |
125 | 125 | ||
126 | #ifndef __KERNEL__ | 126 | #ifndef __KERNEL__ |
127 | /* fn returns 0 to continue iteration */ | 127 | /* fn returns 0 to continue iteration */ |
128 | #define XT_MATCH_ITERATE(type, e, fn, args...) \ | 128 | #define XT_MATCH_ITERATE(type, e, fn, args...) \ |
129 | ({ \ | 129 | ({ \ |
130 | unsigned int __i; \ | 130 | unsigned int __i; \ |
131 | int __ret = 0; \ | 131 | int __ret = 0; \ |
132 | struct xt_entry_match *__m; \ | 132 | struct xt_entry_match *__m; \ |
133 | \ | 133 | \ |
134 | for (__i = sizeof(type); \ | 134 | for (__i = sizeof(type); \ |
135 | __i < (e)->target_offset; \ | 135 | __i < (e)->target_offset; \ |
136 | __i += __m->u.match_size) { \ | 136 | __i += __m->u.match_size) { \ |
137 | __m = (void *)e + __i; \ | 137 | __m = (void *)e + __i; \ |
138 | \ | 138 | \ |
139 | __ret = fn(__m , ## args); \ | 139 | __ret = fn(__m , ## args); \ |
140 | if (__ret != 0) \ | 140 | if (__ret != 0) \ |
141 | break; \ | 141 | break; \ |
142 | } \ | 142 | } \ |
143 | __ret; \ | 143 | __ret; \ |
144 | }) | 144 | }) |
145 | 145 | ||
146 | /* fn returns 0 to continue iteration */ | 146 | /* fn returns 0 to continue iteration */ |
147 | #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ | 147 | #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ |
148 | ({ \ | 148 | ({ \ |
149 | unsigned int __i, __n; \ | 149 | unsigned int __i, __n; \ |
150 | int __ret = 0; \ | 150 | int __ret = 0; \ |
151 | type *__entry; \ | 151 | type *__entry; \ |
152 | \ | 152 | \ |
153 | for (__i = 0, __n = 0; __i < (size); \ | 153 | for (__i = 0, __n = 0; __i < (size); \ |
154 | __i += __entry->next_offset, __n++) { \ | 154 | __i += __entry->next_offset, __n++) { \ |
155 | __entry = (void *)(entries) + __i; \ | 155 | __entry = (void *)(entries) + __i; \ |
156 | if (__n < n) \ | 156 | if (__n < n) \ |
157 | continue; \ | 157 | continue; \ |
158 | \ | 158 | \ |
159 | __ret = fn(__entry , ## args); \ | 159 | __ret = fn(__entry , ## args); \ |
160 | if (__ret != 0) \ | 160 | if (__ret != 0) \ |
161 | break; \ | 161 | break; \ |
162 | } \ | 162 | } \ |
163 | __ret; \ | 163 | __ret; \ |
164 | }) | 164 | }) |
165 | 165 | ||
166 | /* fn returns 0 to continue iteration */ | 166 | /* fn returns 0 to continue iteration */ |
167 | #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ | 167 | #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ |
168 | XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) | 168 | XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) |
169 | 169 | ||
170 | #endif /* !__KERNEL__ */ | 170 | #endif /* !__KERNEL__ */ |
171 | 171 | ||
172 | /* pos is normally a struct ipt_entry/ip6t_entry/etc. */ | 172 | /* pos is normally a struct ipt_entry/ip6t_entry/etc. */ |
173 | #define xt_entry_foreach(pos, ehead, esize) \ | 173 | #define xt_entry_foreach(pos, ehead, esize) \ |
174 | for ((pos) = (typeof(pos))(ehead); \ | 174 | for ((pos) = (typeof(pos))(ehead); \ |
175 | (pos) < (typeof(pos))((char *)(ehead) + (esize)); \ | 175 | (pos) < (typeof(pos))((char *)(ehead) + (esize)); \ |
176 | (pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset)) | 176 | (pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset)) |
177 | 177 | ||
178 | /* can only be xt_entry_match, so no use of typeof here */ | 178 | /* can only be xt_entry_match, so no use of typeof here */ |
179 | #define xt_ematch_foreach(pos, entry) \ | 179 | #define xt_ematch_foreach(pos, entry) \ |
180 | for ((pos) = (struct xt_entry_match *)entry->elems; \ | 180 | for ((pos) = (struct xt_entry_match *)entry->elems; \ |
181 | (pos) < (struct xt_entry_match *)((char *)(entry) + \ | 181 | (pos) < (struct xt_entry_match *)((char *)(entry) + \ |
182 | (entry)->target_offset); \ | 182 | (entry)->target_offset); \ |
183 | (pos) = (struct xt_entry_match *)((char *)(pos) + \ | 183 | (pos) = (struct xt_entry_match *)((char *)(pos) + \ |
184 | (pos)->u.match_size)) | 184 | (pos)->u.match_size)) |
185 | 185 | ||
186 | #ifdef __KERNEL__ | 186 | #ifdef __KERNEL__ |
187 | 187 | ||
188 | #include <linux/netdevice.h> | 188 | #include <linux/netdevice.h> |
189 | 189 | ||
190 | /** | 190 | /** |
191 | * struct xt_action_param - parameters for matches/targets | 191 | * struct xt_action_param - parameters for matches/targets |
192 | * | 192 | * |
193 | * @match: the match extension | 193 | * @match: the match extension |
194 | * @target: the target extension | 194 | * @target: the target extension |
195 | * @matchinfo: per-match data | 195 | * @matchinfo: per-match data |
196 | * @targetinfo: per-target data | 196 | * @targetinfo: per-target data |
197 | * @in: input netdevice | 197 | * @in: input netdevice |
198 | * @out: output netdevice | 198 | * @out: output netdevice |
199 | * @fragoff: packet is a fragment, this is the data offset | 199 | * @fragoff: packet is a fragment, this is the data offset |
200 | * @thoff: position of transport header relative to skb->data | 200 | * @thoff: position of transport header relative to skb->data |
201 | * @hook: hook number given packet came from | 201 | * @hook: hook number given packet came from |
202 | * @family: Actual NFPROTO_* through which the function is invoked | 202 | * @family: Actual NFPROTO_* through which the function is invoked |
203 | * (helpful when match->family == NFPROTO_UNSPEC) | 203 | * (helpful when match->family == NFPROTO_UNSPEC) |
204 | * | 204 | * |
205 | * Fields written to by extensions: | 205 | * Fields written to by extensions: |
206 | * | 206 | * |
207 | * @hotdrop: drop packet if we had inspection problems | 207 | * @hotdrop: drop packet if we had inspection problems |
208 | * Network namespace obtainable using dev_net(in/out) | 208 | * Network namespace obtainable using dev_net(in/out) |
209 | */ | 209 | */ |
210 | struct xt_action_param { | 210 | struct xt_action_param { |
211 | union { | 211 | union { |
212 | const struct xt_match *match; | 212 | const struct xt_match *match; |
213 | const struct xt_target *target; | 213 | const struct xt_target *target; |
214 | }; | 214 | }; |
215 | union { | 215 | union { |
216 | const void *matchinfo, *targinfo; | 216 | const void *matchinfo, *targinfo; |
217 | }; | 217 | }; |
218 | const struct net_device *in, *out; | 218 | const struct net_device *in, *out; |
219 | int fragoff; | 219 | int fragoff; |
220 | unsigned int thoff; | 220 | unsigned int thoff; |
221 | unsigned int hooknum; | 221 | unsigned int hooknum; |
222 | u_int8_t family; | 222 | u_int8_t family; |
223 | bool hotdrop; | 223 | bool hotdrop; |
224 | }; | 224 | }; |
225 | 225 | ||
226 | /** | 226 | /** |
227 | * struct xt_mtchk_param - parameters for match extensions' | 227 | * struct xt_mtchk_param - parameters for match extensions' |
228 | * checkentry functions | 228 | * checkentry functions |
229 | * | 229 | * |
230 | * @net: network namespace through which the check was invoked | 230 | * @net: network namespace through which the check was invoked |
231 | * @table: table the rule is tried to be inserted into | 231 | * @table: table the rule is tried to be inserted into |
232 | * @entryinfo: the family-specific rule data | 232 | * @entryinfo: the family-specific rule data |
233 | * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry) | 233 | * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry) |
234 | * @match: struct xt_match through which this function was invoked | 234 | * @match: struct xt_match through which this function was invoked |
235 | * @matchinfo: per-match data | 235 | * @matchinfo: per-match data |
236 | * @hook_mask: via which hooks the new rule is reachable | 236 | * @hook_mask: via which hooks the new rule is reachable |
237 | * Other fields as above. | 237 | * Other fields as above. |
238 | */ | 238 | */ |
239 | struct xt_mtchk_param { | 239 | struct xt_mtchk_param { |
240 | struct net *net; | 240 | struct net *net; |
241 | const char *table; | 241 | const char *table; |
242 | const void *entryinfo; | 242 | const void *entryinfo; |
243 | const struct xt_match *match; | 243 | const struct xt_match *match; |
244 | void *matchinfo; | 244 | void *matchinfo; |
245 | unsigned int hook_mask; | 245 | unsigned int hook_mask; |
246 | u_int8_t family; | 246 | u_int8_t family; |
247 | }; | 247 | }; |
248 | 248 | ||
249 | /** | 249 | /** |
250 | * struct xt_mdtor_param - match destructor parameters | 250 | * struct xt_mdtor_param - match destructor parameters |
251 | * Fields as above. | 251 | * Fields as above. |
252 | */ | 252 | */ |
253 | struct xt_mtdtor_param { | 253 | struct xt_mtdtor_param { |
254 | struct net *net; | 254 | struct net *net; |
255 | const struct xt_match *match; | 255 | const struct xt_match *match; |
256 | void *matchinfo; | 256 | void *matchinfo; |
257 | u_int8_t family; | 257 | u_int8_t family; |
258 | }; | 258 | }; |
259 | 259 | ||
260 | /** | 260 | /** |
261 | * struct xt_tgchk_param - parameters for target extensions' | 261 | * struct xt_tgchk_param - parameters for target extensions' |
262 | * checkentry functions | 262 | * checkentry functions |
263 | * | 263 | * |
264 | * @entryinfo: the family-specific rule data | 264 | * @entryinfo: the family-specific rule data |
265 | * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) | 265 | * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) |
266 | * | 266 | * |
267 | * Other fields see above. | 267 | * Other fields see above. |
268 | */ | 268 | */ |
269 | struct xt_tgchk_param { | 269 | struct xt_tgchk_param { |
270 | struct net *net; | 270 | struct net *net; |
271 | const char *table; | 271 | const char *table; |
272 | const void *entryinfo; | 272 | const void *entryinfo; |
273 | const struct xt_target *target; | 273 | const struct xt_target *target; |
274 | void *targinfo; | 274 | void *targinfo; |
275 | unsigned int hook_mask; | 275 | unsigned int hook_mask; |
276 | u_int8_t family; | 276 | u_int8_t family; |
277 | }; | 277 | }; |
278 | 278 | ||
279 | /* Target destructor parameters */ | 279 | /* Target destructor parameters */ |
280 | struct xt_tgdtor_param { | 280 | struct xt_tgdtor_param { |
281 | struct net *net; | 281 | struct net *net; |
282 | const struct xt_target *target; | 282 | const struct xt_target *target; |
283 | void *targinfo; | 283 | void *targinfo; |
284 | u_int8_t family; | 284 | u_int8_t family; |
285 | }; | 285 | }; |
286 | 286 | ||
287 | struct xt_match { | 287 | struct xt_match { |
288 | struct list_head list; | 288 | struct list_head list; |
289 | 289 | ||
290 | const char name[XT_EXTENSION_MAXNAMELEN]; | 290 | const char name[XT_EXTENSION_MAXNAMELEN]; |
291 | u_int8_t revision; | 291 | u_int8_t revision; |
292 | 292 | ||
293 | /* Return true or false: return FALSE and set *hotdrop = 1 to | 293 | /* Return true or false: return FALSE and set *hotdrop = 1 to |
294 | force immediate packet drop. */ | 294 | force immediate packet drop. */ |
295 | /* Arguments changed since 2.6.9, as this must now handle | 295 | /* Arguments changed since 2.6.9, as this must now handle |
296 | non-linear skb, using skb_header_pointer and | 296 | non-linear skb, using skb_header_pointer and |
297 | skb_ip_make_writable. */ | 297 | skb_ip_make_writable. */ |
298 | bool (*match)(const struct sk_buff *skb, | 298 | bool (*match)(const struct sk_buff *skb, |
299 | struct xt_action_param *); | 299 | struct xt_action_param *); |
300 | 300 | ||
301 | /* Called when user tries to insert an entry of this type. */ | 301 | /* Called when user tries to insert an entry of this type. */ |
302 | int (*checkentry)(const struct xt_mtchk_param *); | 302 | int (*checkentry)(const struct xt_mtchk_param *); |
303 | 303 | ||
304 | /* Called when entry of this type deleted. */ | 304 | /* Called when entry of this type deleted. */ |
305 | void (*destroy)(const struct xt_mtdtor_param *); | 305 | void (*destroy)(const struct xt_mtdtor_param *); |
306 | #ifdef CONFIG_COMPAT | 306 | #ifdef CONFIG_COMPAT |
307 | /* Called when userspace align differs from kernel space one */ | 307 | /* Called when userspace align differs from kernel space one */ |
308 | void (*compat_from_user)(void *dst, const void *src); | 308 | void (*compat_from_user)(void *dst, const void *src); |
309 | int (*compat_to_user)(void __user *dst, const void *src); | 309 | int (*compat_to_user)(void __user *dst, const void *src); |
310 | #endif | 310 | #endif |
311 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ | 311 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ |
312 | struct module *me; | 312 | struct module *me; |
313 | 313 | ||
314 | const char *table; | 314 | const char *table; |
315 | unsigned int matchsize; | 315 | unsigned int matchsize; |
316 | #ifdef CONFIG_COMPAT | 316 | #ifdef CONFIG_COMPAT |
317 | unsigned int compatsize; | 317 | unsigned int compatsize; |
318 | #endif | 318 | #endif |
319 | unsigned int hooks; | 319 | unsigned int hooks; |
320 | unsigned short proto; | 320 | unsigned short proto; |
321 | 321 | ||
322 | unsigned short family; | 322 | unsigned short family; |
323 | }; | 323 | }; |
324 | 324 | ||
325 | /* Registration hooks for targets. */ | 325 | /* Registration hooks for targets. */ |
326 | struct xt_target { | 326 | struct xt_target { |
327 | struct list_head list; | 327 | struct list_head list; |
328 | 328 | ||
329 | const char name[XT_EXTENSION_MAXNAMELEN]; | 329 | const char name[XT_EXTENSION_MAXNAMELEN]; |
330 | u_int8_t revision; | 330 | u_int8_t revision; |
331 | 331 | ||
332 | /* Returns verdict. Argument order changed since 2.6.9, as this | 332 | /* Returns verdict. Argument order changed since 2.6.9, as this |
333 | must now handle non-linear skbs, using skb_copy_bits and | 333 | must now handle non-linear skbs, using skb_copy_bits and |
334 | skb_ip_make_writable. */ | 334 | skb_ip_make_writable. */ |
335 | unsigned int (*target)(struct sk_buff *skb, | 335 | unsigned int (*target)(struct sk_buff *skb, |
336 | const struct xt_action_param *); | 336 | const struct xt_action_param *); |
337 | 337 | ||
338 | /* Called when user tries to insert an entry of this type: | 338 | /* Called when user tries to insert an entry of this type: |
339 | hook_mask is a bitmask of hooks from which it can be | 339 | hook_mask is a bitmask of hooks from which it can be |
340 | called. */ | 340 | called. */ |
341 | /* Should return 0 on success or an error code otherwise (-Exxxx). */ | 341 | /* Should return 0 on success or an error code otherwise (-Exxxx). */ |
342 | int (*checkentry)(const struct xt_tgchk_param *); | 342 | int (*checkentry)(const struct xt_tgchk_param *); |
343 | 343 | ||
344 | /* Called when entry of this type deleted. */ | 344 | /* Called when entry of this type deleted. */ |
345 | void (*destroy)(const struct xt_tgdtor_param *); | 345 | void (*destroy)(const struct xt_tgdtor_param *); |
346 | #ifdef CONFIG_COMPAT | 346 | #ifdef CONFIG_COMPAT |
347 | /* Called when userspace align differs from kernel space one */ | 347 | /* Called when userspace align differs from kernel space one */ |
348 | void (*compat_from_user)(void *dst, const void *src); | 348 | void (*compat_from_user)(void *dst, const void *src); |
349 | int (*compat_to_user)(void __user *dst, const void *src); | 349 | int (*compat_to_user)(void __user *dst, const void *src); |
350 | #endif | 350 | #endif |
351 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ | 351 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ |
352 | struct module *me; | 352 | struct module *me; |
353 | 353 | ||
354 | const char *table; | 354 | const char *table; |
355 | unsigned int targetsize; | 355 | unsigned int targetsize; |
356 | #ifdef CONFIG_COMPAT | 356 | #ifdef CONFIG_COMPAT |
357 | unsigned int compatsize; | 357 | unsigned int compatsize; |
358 | #endif | 358 | #endif |
359 | unsigned int hooks; | 359 | unsigned int hooks; |
360 | unsigned short proto; | 360 | unsigned short proto; |
361 | 361 | ||
362 | unsigned short family; | 362 | unsigned short family; |
363 | }; | 363 | }; |
364 | 364 | ||
365 | /* Furniture shopping... */ | 365 | /* Furniture shopping... */ |
366 | struct xt_table { | 366 | struct xt_table { |
367 | struct list_head list; | 367 | struct list_head list; |
368 | 368 | ||
369 | /* What hooks you will enter on */ | 369 | /* What hooks you will enter on */ |
370 | unsigned int valid_hooks; | 370 | unsigned int valid_hooks; |
371 | 371 | ||
372 | /* Man behind the curtain... */ | 372 | /* Man behind the curtain... */ |
373 | struct xt_table_info *private; | 373 | struct xt_table_info *private; |
374 | 374 | ||
375 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ | 375 | /* Set this to THIS_MODULE if you are a module, otherwise NULL */ |
376 | struct module *me; | 376 | struct module *me; |
377 | 377 | ||
378 | u_int8_t af; /* address/protocol family */ | 378 | u_int8_t af; /* address/protocol family */ |
379 | int priority; /* hook order */ | 379 | int priority; /* hook order */ |
380 | 380 | ||
381 | /* A unique name... */ | 381 | /* A unique name... */ |
382 | const char name[XT_TABLE_MAXNAMELEN]; | 382 | const char name[XT_TABLE_MAXNAMELEN]; |
383 | }; | 383 | }; |
384 | 384 | ||
385 | #include <linux/netfilter_ipv4.h> | 385 | #include <linux/netfilter_ipv4.h> |
386 | 386 | ||
387 | /* The table itself */ | 387 | /* The table itself */ |
388 | struct xt_table_info { | 388 | struct xt_table_info { |
389 | /* Size per table */ | 389 | /* Size per table */ |
390 | unsigned int size; | 390 | unsigned int size; |
391 | /* Number of entries: FIXME. --RR */ | 391 | /* Number of entries: FIXME. --RR */ |
392 | unsigned int number; | 392 | unsigned int number; |
393 | /* Initial number of entries. Needed for module usage count */ | 393 | /* Initial number of entries. Needed for module usage count */ |
394 | unsigned int initial_entries; | 394 | unsigned int initial_entries; |
395 | 395 | ||
396 | /* Entry points and underflows */ | 396 | /* Entry points and underflows */ |
397 | unsigned int hook_entry[NF_INET_NUMHOOKS]; | 397 | unsigned int hook_entry[NF_INET_NUMHOOKS]; |
398 | unsigned int underflow[NF_INET_NUMHOOKS]; | 398 | unsigned int underflow[NF_INET_NUMHOOKS]; |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * Number of user chains. Since tables cannot have loops, at most | 401 | * Number of user chains. Since tables cannot have loops, at most |
402 | * @stacksize jumps (number of user chains) can possibly be made. | 402 | * @stacksize jumps (number of user chains) can possibly be made. |
403 | */ | 403 | */ |
404 | unsigned int stacksize; | 404 | unsigned int stacksize; |
405 | unsigned int __percpu *stackptr; | 405 | unsigned int __percpu *stackptr; |
406 | void ***jumpstack; | 406 | void ***jumpstack; |
407 | /* ipt_entry tables: one per CPU */ | 407 | /* ipt_entry tables: one per CPU */ |
408 | /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ | 408 | /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ |
409 | void *entries[1]; | 409 | void *entries[1]; |
410 | }; | 410 | }; |
411 | 411 | ||
412 | #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ | 412 | #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ |
413 | + nr_cpu_ids * sizeof(char *)) | 413 | + nr_cpu_ids * sizeof(char *)) |
414 | extern int xt_register_target(struct xt_target *target); | 414 | extern int xt_register_target(struct xt_target *target); |
415 | extern void xt_unregister_target(struct xt_target *target); | 415 | extern void xt_unregister_target(struct xt_target *target); |
416 | extern int xt_register_targets(struct xt_target *target, unsigned int n); | 416 | extern int xt_register_targets(struct xt_target *target, unsigned int n); |
417 | extern void xt_unregister_targets(struct xt_target *target, unsigned int n); | 417 | extern void xt_unregister_targets(struct xt_target *target, unsigned int n); |
418 | 418 | ||
419 | extern int xt_register_match(struct xt_match *target); | 419 | extern int xt_register_match(struct xt_match *target); |
420 | extern void xt_unregister_match(struct xt_match *target); | 420 | extern void xt_unregister_match(struct xt_match *target); |
421 | extern int xt_register_matches(struct xt_match *match, unsigned int n); | 421 | extern int xt_register_matches(struct xt_match *match, unsigned int n); |
422 | extern void xt_unregister_matches(struct xt_match *match, unsigned int n); | 422 | extern void xt_unregister_matches(struct xt_match *match, unsigned int n); |
423 | 423 | ||
424 | extern int xt_check_match(struct xt_mtchk_param *, | 424 | extern int xt_check_match(struct xt_mtchk_param *, |
425 | unsigned int size, u_int8_t proto, bool inv_proto); | 425 | unsigned int size, u_int8_t proto, bool inv_proto); |
426 | extern int xt_check_target(struct xt_tgchk_param *, | 426 | extern int xt_check_target(struct xt_tgchk_param *, |
427 | unsigned int size, u_int8_t proto, bool inv_proto); | 427 | unsigned int size, u_int8_t proto, bool inv_proto); |
428 | 428 | ||
429 | extern struct xt_table *xt_register_table(struct net *net, | 429 | extern struct xt_table *xt_register_table(struct net *net, |
430 | const struct xt_table *table, | 430 | const struct xt_table *table, |
431 | struct xt_table_info *bootstrap, | 431 | struct xt_table_info *bootstrap, |
432 | struct xt_table_info *newinfo); | 432 | struct xt_table_info *newinfo); |
433 | extern void *xt_unregister_table(struct xt_table *table); | 433 | extern void *xt_unregister_table(struct xt_table *table); |
434 | 434 | ||
435 | extern struct xt_table_info *xt_replace_table(struct xt_table *table, | 435 | extern struct xt_table_info *xt_replace_table(struct xt_table *table, |
436 | unsigned int num_counters, | 436 | unsigned int num_counters, |
437 | struct xt_table_info *newinfo, | 437 | struct xt_table_info *newinfo, |
438 | int *error); | 438 | int *error); |
439 | 439 | ||
440 | extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); | 440 | extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); |
441 | extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); | 441 | extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); |
442 | extern struct xt_match *xt_request_find_match(u8 af, const char *name, | 442 | extern struct xt_match *xt_request_find_match(u8 af, const char *name, |
443 | u8 revision); | 443 | u8 revision); |
444 | extern struct xt_target *xt_request_find_target(u8 af, const char *name, | 444 | extern struct xt_target *xt_request_find_target(u8 af, const char *name, |
445 | u8 revision); | 445 | u8 revision); |
446 | extern int xt_find_revision(u8 af, const char *name, u8 revision, | 446 | extern int xt_find_revision(u8 af, const char *name, u8 revision, |
447 | int target, int *err); | 447 | int target, int *err); |
448 | 448 | ||
449 | extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, | 449 | extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
450 | const char *name); | 450 | const char *name); |
451 | extern void xt_table_unlock(struct xt_table *t); | 451 | extern void xt_table_unlock(struct xt_table *t); |
452 | 452 | ||
453 | extern int xt_proto_init(struct net *net, u_int8_t af); | 453 | extern int xt_proto_init(struct net *net, u_int8_t af); |
454 | extern void xt_proto_fini(struct net *net, u_int8_t af); | 454 | extern void xt_proto_fini(struct net *net, u_int8_t af); |
455 | 455 | ||
456 | extern struct xt_table_info *xt_alloc_table_info(unsigned int size); | 456 | extern struct xt_table_info *xt_alloc_table_info(unsigned int size); |
457 | extern void xt_free_table_info(struct xt_table_info *info); | 457 | extern void xt_free_table_info(struct xt_table_info *info); |
458 | 458 | ||
459 | /* | 459 | /* |
460 | * Per-CPU spinlock associated with per-cpu table entries, and | 460 | * Per-CPU spinlock associated with per-cpu table entries, and |
461 | * with a counter for the "reading" side that allows a recursive | 461 | * with a counter for the "reading" side that allows a recursive |
462 | * reader to avoid taking the lock and deadlocking. | 462 | * reader to avoid taking the lock and deadlocking. |
463 | * | 463 | * |
464 | * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu. | 464 | * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu. |
465 | * It needs to ensure that the rules are not being changed while the packet | 465 | * It needs to ensure that the rules are not being changed while the packet |
466 | * is being processed. In some cases, the read lock will be acquired | 466 | * is being processed. In some cases, the read lock will be acquired |
467 | * twice on the same CPU; this is okay because of the count. | 467 | * twice on the same CPU; this is okay because of the count. |
468 | * | 468 | * |
469 | * "writing" is used when reading counters. | 469 | * "writing" is used when reading counters. |
470 | * During replace any readers that are using the old tables have to complete | 470 | * During replace any readers that are using the old tables have to complete |
471 | * before freeing the old table. This is handled by the write locking | 471 | * before freeing the old table. This is handled by the write locking |
472 | * necessary for reading the counters. | 472 | * necessary for reading the counters. |
473 | */ | 473 | */ |
474 | struct xt_info_lock { | 474 | struct xt_info_lock { |
475 | spinlock_t lock; | 475 | seqlock_t lock; |
476 | unsigned char readers; | 476 | unsigned char readers; |
477 | }; | 477 | }; |
478 | DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); | 478 | DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); |
479 | 479 | ||
480 | /* | 480 | /* |
481 | * Note: we need to ensure that preemption is disabled before acquiring | 481 | * Note: we need to ensure that preemption is disabled before acquiring |
482 | * the per-cpu-variable, so we do it as a two step process rather than | 482 | * the per-cpu-variable, so we do it as a two step process rather than |
483 | * using "spin_lock_bh()". | 483 | * using "spin_lock_bh()". |
484 | * | 484 | * |
485 | * We _also_ need to disable bottom half processing before updating our | 485 | * We _also_ need to disable bottom half processing before updating our |
486 | * nesting count, to make sure that the only kind of re-entrancy is this | 486 | * nesting count, to make sure that the only kind of re-entrancy is this |
487 | * code being called by itself: since the count+lock is not an atomic | 487 | * code being called by itself: since the count+lock is not an atomic |
488 | * operation, we can allow no races. | 488 | * operation, we can allow no races. |
489 | * | 489 | * |
490 | * _Only_ that special combination of being per-cpu and never getting | 490 | * _Only_ that special combination of being per-cpu and never getting |
491 | * re-entered asynchronously means that the count is safe. | 491 | * re-entered asynchronously means that the count is safe. |
492 | */ | 492 | */ |
493 | static inline void xt_info_rdlock_bh(void) | 493 | static inline void xt_info_rdlock_bh(void) |
494 | { | 494 | { |
495 | struct xt_info_lock *lock; | 495 | struct xt_info_lock *lock; |
496 | 496 | ||
497 | local_bh_disable(); | 497 | local_bh_disable(); |
498 | lock = &__get_cpu_var(xt_info_locks); | 498 | lock = &__get_cpu_var(xt_info_locks); |
499 | if (likely(!lock->readers++)) | 499 | if (likely(!lock->readers++)) |
500 | spin_lock(&lock->lock); | 500 | write_seqlock(&lock->lock); |
501 | } | 501 | } |
502 | 502 | ||
503 | static inline void xt_info_rdunlock_bh(void) | 503 | static inline void xt_info_rdunlock_bh(void) |
504 | { | 504 | { |
505 | struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); | 505 | struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); |
506 | 506 | ||
507 | if (likely(!--lock->readers)) | 507 | if (likely(!--lock->readers)) |
508 | spin_unlock(&lock->lock); | 508 | write_sequnlock(&lock->lock); |
509 | local_bh_enable(); | 509 | local_bh_enable(); |
510 | } | 510 | } |
511 | 511 | ||
512 | /* | 512 | /* |
513 | * The "writer" side needs to get exclusive access to the lock, | 513 | * The "writer" side needs to get exclusive access to the lock, |
514 | * regardless of readers. This must be called with bottom half | 514 | * regardless of readers. This must be called with bottom half |
515 | * processing (and thus also preemption) disabled. | 515 | * processing (and thus also preemption) disabled. |
516 | */ | 516 | */ |
517 | static inline void xt_info_wrlock(unsigned int cpu) | 517 | static inline void xt_info_wrlock(unsigned int cpu) |
518 | { | 518 | { |
519 | spin_lock(&per_cpu(xt_info_locks, cpu).lock); | 519 | write_seqlock(&per_cpu(xt_info_locks, cpu).lock); |
520 | } | 520 | } |
521 | 521 | ||
522 | static inline void xt_info_wrunlock(unsigned int cpu) | 522 | static inline void xt_info_wrunlock(unsigned int cpu) |
523 | { | 523 | { |
524 | spin_unlock(&per_cpu(xt_info_locks, cpu).lock); | 524 | write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); |
525 | } | 525 | } |
526 | 526 | ||
527 | /* | 527 | /* |
528 | * This helper is performance critical and must be inlined | 528 | * This helper is performance critical and must be inlined |
529 | */ | 529 | */ |
530 | static inline unsigned long ifname_compare_aligned(const char *_a, | 530 | static inline unsigned long ifname_compare_aligned(const char *_a, |
531 | const char *_b, | 531 | const char *_b, |
532 | const char *_mask) | 532 | const char *_mask) |
533 | { | 533 | { |
534 | const unsigned long *a = (const unsigned long *)_a; | 534 | const unsigned long *a = (const unsigned long *)_a; |
535 | const unsigned long *b = (const unsigned long *)_b; | 535 | const unsigned long *b = (const unsigned long *)_b; |
536 | const unsigned long *mask = (const unsigned long *)_mask; | 536 | const unsigned long *mask = (const unsigned long *)_mask; |
537 | unsigned long ret; | 537 | unsigned long ret; |
538 | 538 | ||
539 | ret = (a[0] ^ b[0]) & mask[0]; | 539 | ret = (a[0] ^ b[0]) & mask[0]; |
540 | if (IFNAMSIZ > sizeof(unsigned long)) | 540 | if (IFNAMSIZ > sizeof(unsigned long)) |
541 | ret |= (a[1] ^ b[1]) & mask[1]; | 541 | ret |= (a[1] ^ b[1]) & mask[1]; |
542 | if (IFNAMSIZ > 2 * sizeof(unsigned long)) | 542 | if (IFNAMSIZ > 2 * sizeof(unsigned long)) |
543 | ret |= (a[2] ^ b[2]) & mask[2]; | 543 | ret |= (a[2] ^ b[2]) & mask[2]; |
544 | if (IFNAMSIZ > 3 * sizeof(unsigned long)) | 544 | if (IFNAMSIZ > 3 * sizeof(unsigned long)) |
545 | ret |= (a[3] ^ b[3]) & mask[3]; | 545 | ret |= (a[3] ^ b[3]) & mask[3]; |
546 | BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); | 546 | BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); |
547 | return ret; | 547 | return ret; |
548 | } | 548 | } |
549 | 549 | ||
550 | extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); | 550 | extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); |
551 | extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); | 551 | extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); |
552 | 552 | ||
553 | #ifdef CONFIG_COMPAT | 553 | #ifdef CONFIG_COMPAT |
554 | #include <net/compat.h> | 554 | #include <net/compat.h> |
555 | 555 | ||
556 | struct compat_xt_entry_match { | 556 | struct compat_xt_entry_match { |
557 | union { | 557 | union { |
558 | struct { | 558 | struct { |
559 | u_int16_t match_size; | 559 | u_int16_t match_size; |
560 | char name[XT_FUNCTION_MAXNAMELEN - 1]; | 560 | char name[XT_FUNCTION_MAXNAMELEN - 1]; |
561 | u_int8_t revision; | 561 | u_int8_t revision; |
562 | } user; | 562 | } user; |
563 | struct { | 563 | struct { |
564 | u_int16_t match_size; | 564 | u_int16_t match_size; |
565 | compat_uptr_t match; | 565 | compat_uptr_t match; |
566 | } kernel; | 566 | } kernel; |
567 | u_int16_t match_size; | 567 | u_int16_t match_size; |
568 | } u; | 568 | } u; |
569 | unsigned char data[0]; | 569 | unsigned char data[0]; |
570 | }; | 570 | }; |
571 | 571 | ||
572 | struct compat_xt_entry_target { | 572 | struct compat_xt_entry_target { |
573 | union { | 573 | union { |
574 | struct { | 574 | struct { |
575 | u_int16_t target_size; | 575 | u_int16_t target_size; |
576 | char name[XT_FUNCTION_MAXNAMELEN - 1]; | 576 | char name[XT_FUNCTION_MAXNAMELEN - 1]; |
577 | u_int8_t revision; | 577 | u_int8_t revision; |
578 | } user; | 578 | } user; |
579 | struct { | 579 | struct { |
580 | u_int16_t target_size; | 580 | u_int16_t target_size; |
581 | compat_uptr_t target; | 581 | compat_uptr_t target; |
582 | } kernel; | 582 | } kernel; |
583 | u_int16_t target_size; | 583 | u_int16_t target_size; |
584 | } u; | 584 | } u; |
585 | unsigned char data[0]; | 585 | unsigned char data[0]; |
586 | }; | 586 | }; |
587 | 587 | ||
588 | /* FIXME: this works only on 32 bit tasks | 588 | /* FIXME: this works only on 32 bit tasks |
589 | * need to change whole approach in order to calculate align as function of | 589 | * need to change whole approach in order to calculate align as function of |
590 | * current task alignment */ | 590 | * current task alignment */ |
591 | 591 | ||
592 | struct compat_xt_counters { | 592 | struct compat_xt_counters { |
593 | compat_u64 pcnt, bcnt; /* Packet and byte counters */ | 593 | compat_u64 pcnt, bcnt; /* Packet and byte counters */ |
594 | }; | 594 | }; |
595 | 595 | ||
596 | struct compat_xt_counters_info { | 596 | struct compat_xt_counters_info { |
597 | char name[XT_TABLE_MAXNAMELEN]; | 597 | char name[XT_TABLE_MAXNAMELEN]; |
598 | compat_uint_t num_counters; | 598 | compat_uint_t num_counters; |
599 | struct compat_xt_counters counters[0]; | 599 | struct compat_xt_counters counters[0]; |
600 | }; | 600 | }; |
601 | 601 | ||
602 | struct _compat_xt_align { | 602 | struct _compat_xt_align { |
603 | __u8 u8; | 603 | __u8 u8; |
604 | __u16 u16; | 604 | __u16 u16; |
605 | __u32 u32; | 605 | __u32 u32; |
606 | compat_u64 u64; | 606 | compat_u64 u64; |
607 | }; | 607 | }; |
608 | 608 | ||
609 | #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align)) | 609 | #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align)) |
610 | 610 | ||
611 | extern void xt_compat_lock(u_int8_t af); | 611 | extern void xt_compat_lock(u_int8_t af); |
612 | extern void xt_compat_unlock(u_int8_t af); | 612 | extern void xt_compat_unlock(u_int8_t af); |
613 | 613 | ||
614 | extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta); | 614 | extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta); |
615 | extern void xt_compat_flush_offsets(u_int8_t af); | 615 | extern void xt_compat_flush_offsets(u_int8_t af); |
616 | extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset); | 616 | extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset); |
617 | 617 | ||
618 | extern int xt_compat_match_offset(const struct xt_match *match); | 618 | extern int xt_compat_match_offset(const struct xt_match *match); |
619 | extern int xt_compat_match_from_user(struct xt_entry_match *m, | 619 | extern int xt_compat_match_from_user(struct xt_entry_match *m, |
620 | void **dstptr, unsigned int *size); | 620 | void **dstptr, unsigned int *size); |
621 | extern int xt_compat_match_to_user(const struct xt_entry_match *m, | 621 | extern int xt_compat_match_to_user(const struct xt_entry_match *m, |
622 | void __user **dstptr, unsigned int *size); | 622 | void __user **dstptr, unsigned int *size); |
623 | 623 | ||
624 | extern int xt_compat_target_offset(const struct xt_target *target); | 624 | extern int xt_compat_target_offset(const struct xt_target *target); |
625 | extern void xt_compat_target_from_user(struct xt_entry_target *t, | 625 | extern void xt_compat_target_from_user(struct xt_entry_target *t, |
626 | void **dstptr, unsigned int *size); | 626 | void **dstptr, unsigned int *size); |
627 | extern int xt_compat_target_to_user(const struct xt_entry_target *t, | 627 | extern int xt_compat_target_to_user(const struct xt_entry_target *t, |
628 | void __user **dstptr, unsigned int *size); | 628 | void __user **dstptr, unsigned int *size); |
629 | 629 | ||
630 | #endif /* CONFIG_COMPAT */ | 630 | #endif /* CONFIG_COMPAT */ |
631 | #endif /* __KERNEL__ */ | 631 | #endif /* __KERNEL__ */ |
632 | 632 | ||
633 | #endif /* _X_TABLES_H */ | 633 | #endif /* _X_TABLES_H */ |
634 | 634 |
net/ipv4/netfilter/arp_tables.c
1 | /* | 1 | /* |
2 | * Packet matching code for ARP packets. | 2 | * Packet matching code for ARP packets. |
3 | * | 3 | * |
4 | * Based heavily, if not almost entirely, upon ip_tables.c framework. | 4 | * Based heavily, if not almost entirely, upon ip_tables.c framework. |
5 | * | 5 | * |
6 | * Some ARP specific bits are: | 6 | * Some ARP specific bits are: |
7 | * | 7 | * |
8 | * Copyright (C) 2002 David S. Miller (davem@redhat.com) | 8 | * Copyright (C) 2002 David S. Miller (davem@redhat.com) |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/capability.h> | 15 | #include <linux/capability.h> |
16 | #include <linux/if_arp.h> | 16 | #include <linux/if_arp.h> |
17 | #include <linux/kmod.h> | 17 | #include <linux/kmod.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <net/compat.h> | 24 | #include <net/compat.h> |
25 | #include <net/sock.h> | 25 | #include <net/sock.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | #include <linux/netfilter_arp/arp_tables.h> | 29 | #include <linux/netfilter_arp/arp_tables.h> |
30 | #include "../../netfilter/xt_repldata.h" | 30 | #include "../../netfilter/xt_repldata.h" |
31 | 31 | ||
32 | MODULE_LICENSE("GPL"); | 32 | MODULE_LICENSE("GPL"); |
33 | MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); | 33 | MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); |
34 | MODULE_DESCRIPTION("arptables core"); | 34 | MODULE_DESCRIPTION("arptables core"); |
35 | 35 | ||
36 | /*#define DEBUG_ARP_TABLES*/ | 36 | /*#define DEBUG_ARP_TABLES*/ |
37 | /*#define DEBUG_ARP_TABLES_USER*/ | 37 | /*#define DEBUG_ARP_TABLES_USER*/ |
38 | 38 | ||
39 | #ifdef DEBUG_ARP_TABLES | 39 | #ifdef DEBUG_ARP_TABLES |
40 | #define dprintf(format, args...) printk(format , ## args) | 40 | #define dprintf(format, args...) printk(format , ## args) |
41 | #else | 41 | #else |
42 | #define dprintf(format, args...) | 42 | #define dprintf(format, args...) |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #ifdef DEBUG_ARP_TABLES_USER | 45 | #ifdef DEBUG_ARP_TABLES_USER |
46 | #define duprintf(format, args...) printk(format , ## args) | 46 | #define duprintf(format, args...) printk(format , ## args) |
47 | #else | 47 | #else |
48 | #define duprintf(format, args...) | 48 | #define duprintf(format, args...) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #ifdef CONFIG_NETFILTER_DEBUG | 51 | #ifdef CONFIG_NETFILTER_DEBUG |
52 | #define ARP_NF_ASSERT(x) WARN_ON(!(x)) | 52 | #define ARP_NF_ASSERT(x) WARN_ON(!(x)) |
53 | #else | 53 | #else |
54 | #define ARP_NF_ASSERT(x) | 54 | #define ARP_NF_ASSERT(x) |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | void *arpt_alloc_initial_table(const struct xt_table *info) | 57 | void *arpt_alloc_initial_table(const struct xt_table *info) |
58 | { | 58 | { |
59 | return xt_alloc_initial_table(arpt, ARPT); | 59 | return xt_alloc_initial_table(arpt, ARPT); |
60 | } | 60 | } |
61 | EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); | 61 | EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); |
62 | 62 | ||
63 | static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, | 63 | static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, |
64 | const char *hdr_addr, int len) | 64 | const char *hdr_addr, int len) |
65 | { | 65 | { |
66 | int i, ret; | 66 | int i, ret; |
67 | 67 | ||
68 | if (len > ARPT_DEV_ADDR_LEN_MAX) | 68 | if (len > ARPT_DEV_ADDR_LEN_MAX) |
69 | len = ARPT_DEV_ADDR_LEN_MAX; | 69 | len = ARPT_DEV_ADDR_LEN_MAX; |
70 | 70 | ||
71 | ret = 0; | 71 | ret = 0; |
72 | for (i = 0; i < len; i++) | 72 | for (i = 0; i < len; i++) |
73 | ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; | 73 | ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; |
74 | 74 | ||
75 | return ret != 0; | 75 | return ret != 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * Unfortunatly, _b and _mask are not aligned to an int (or long int) | 79 | * Unfortunatly, _b and _mask are not aligned to an int (or long int) |
80 | * Some arches dont care, unrolling the loop is a win on them. | 80 | * Some arches dont care, unrolling the loop is a win on them. |
81 | * For other arches, we only have a 16bit alignement. | 81 | * For other arches, we only have a 16bit alignement. |
82 | */ | 82 | */ |
83 | static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) | 83 | static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) |
84 | { | 84 | { |
85 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 85 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
86 | unsigned long ret = ifname_compare_aligned(_a, _b, _mask); | 86 | unsigned long ret = ifname_compare_aligned(_a, _b, _mask); |
87 | #else | 87 | #else |
88 | unsigned long ret = 0; | 88 | unsigned long ret = 0; |
89 | const u16 *a = (const u16 *)_a; | 89 | const u16 *a = (const u16 *)_a; |
90 | const u16 *b = (const u16 *)_b; | 90 | const u16 *b = (const u16 *)_b; |
91 | const u16 *mask = (const u16 *)_mask; | 91 | const u16 *mask = (const u16 *)_mask; |
92 | int i; | 92 | int i; |
93 | 93 | ||
94 | for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) | 94 | for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) |
95 | ret |= (a[i] ^ b[i]) & mask[i]; | 95 | ret |= (a[i] ^ b[i]) & mask[i]; |
96 | #endif | 96 | #endif |
97 | return ret; | 97 | return ret; |
98 | } | 98 | } |
99 | 99 | ||
100 | /* Returns whether packet matches rule or not. */ | 100 | /* Returns whether packet matches rule or not. */ |
101 | static inline int arp_packet_match(const struct arphdr *arphdr, | 101 | static inline int arp_packet_match(const struct arphdr *arphdr, |
102 | struct net_device *dev, | 102 | struct net_device *dev, |
103 | const char *indev, | 103 | const char *indev, |
104 | const char *outdev, | 104 | const char *outdev, |
105 | const struct arpt_arp *arpinfo) | 105 | const struct arpt_arp *arpinfo) |
106 | { | 106 | { |
107 | const char *arpptr = (char *)(arphdr + 1); | 107 | const char *arpptr = (char *)(arphdr + 1); |
108 | const char *src_devaddr, *tgt_devaddr; | 108 | const char *src_devaddr, *tgt_devaddr; |
109 | __be32 src_ipaddr, tgt_ipaddr; | 109 | __be32 src_ipaddr, tgt_ipaddr; |
110 | long ret; | 110 | long ret; |
111 | 111 | ||
112 | #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) | 112 | #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) |
113 | 113 | ||
114 | if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, | 114 | if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, |
115 | ARPT_INV_ARPOP)) { | 115 | ARPT_INV_ARPOP)) { |
116 | dprintf("ARP operation field mismatch.\n"); | 116 | dprintf("ARP operation field mismatch.\n"); |
117 | dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", | 117 | dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", |
118 | arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); | 118 | arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); |
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, | 122 | if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, |
123 | ARPT_INV_ARPHRD)) { | 123 | ARPT_INV_ARPHRD)) { |
124 | dprintf("ARP hardware address format mismatch.\n"); | 124 | dprintf("ARP hardware address format mismatch.\n"); |
125 | dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", | 125 | dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", |
126 | arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); | 126 | arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); |
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, | 130 | if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, |
131 | ARPT_INV_ARPPRO)) { | 131 | ARPT_INV_ARPPRO)) { |
132 | dprintf("ARP protocol address format mismatch.\n"); | 132 | dprintf("ARP protocol address format mismatch.\n"); |
133 | dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", | 133 | dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", |
134 | arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); | 134 | arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); |
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, | 138 | if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, |
139 | ARPT_INV_ARPHLN)) { | 139 | ARPT_INV_ARPHLN)) { |
140 | dprintf("ARP hardware address length mismatch.\n"); | 140 | dprintf("ARP hardware address length mismatch.\n"); |
141 | dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", | 141 | dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", |
142 | arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); | 142 | arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); |
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | src_devaddr = arpptr; | 146 | src_devaddr = arpptr; |
147 | arpptr += dev->addr_len; | 147 | arpptr += dev->addr_len; |
148 | memcpy(&src_ipaddr, arpptr, sizeof(u32)); | 148 | memcpy(&src_ipaddr, arpptr, sizeof(u32)); |
149 | arpptr += sizeof(u32); | 149 | arpptr += sizeof(u32); |
150 | tgt_devaddr = arpptr; | 150 | tgt_devaddr = arpptr; |
151 | arpptr += dev->addr_len; | 151 | arpptr += dev->addr_len; |
152 | memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); | 152 | memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); |
153 | 153 | ||
154 | if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), | 154 | if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), |
155 | ARPT_INV_SRCDEVADDR) || | 155 | ARPT_INV_SRCDEVADDR) || |
156 | FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), | 156 | FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), |
157 | ARPT_INV_TGTDEVADDR)) { | 157 | ARPT_INV_TGTDEVADDR)) { |
158 | dprintf("Source or target device address mismatch.\n"); | 158 | dprintf("Source or target device address mismatch.\n"); |
159 | 159 | ||
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | 162 | ||
163 | if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, | 163 | if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, |
164 | ARPT_INV_SRCIP) || | 164 | ARPT_INV_SRCIP) || |
165 | FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), | 165 | FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), |
166 | ARPT_INV_TGTIP)) { | 166 | ARPT_INV_TGTIP)) { |
167 | dprintf("Source or target IP address mismatch.\n"); | 167 | dprintf("Source or target IP address mismatch.\n"); |
168 | 168 | ||
169 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", | 169 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", |
170 | &src_ipaddr, | 170 | &src_ipaddr, |
171 | &arpinfo->smsk.s_addr, | 171 | &arpinfo->smsk.s_addr, |
172 | &arpinfo->src.s_addr, | 172 | &arpinfo->src.s_addr, |
173 | arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); | 173 | arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); |
174 | dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", | 174 | dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", |
175 | &tgt_ipaddr, | 175 | &tgt_ipaddr, |
176 | &arpinfo->tmsk.s_addr, | 176 | &arpinfo->tmsk.s_addr, |
177 | &arpinfo->tgt.s_addr, | 177 | &arpinfo->tgt.s_addr, |
178 | arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); | 178 | arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); |
179 | return 0; | 179 | return 0; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* Look for ifname matches. */ | 182 | /* Look for ifname matches. */ |
183 | ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); | 183 | ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); |
184 | 184 | ||
185 | if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { | 185 | if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { |
186 | dprintf("VIA in mismatch (%s vs %s).%s\n", | 186 | dprintf("VIA in mismatch (%s vs %s).%s\n", |
187 | indev, arpinfo->iniface, | 187 | indev, arpinfo->iniface, |
188 | arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":""); | 188 | arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":""); |
189 | return 0; | 189 | return 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); | 192 | ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); |
193 | 193 | ||
194 | if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { | 194 | if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { |
195 | dprintf("VIA out mismatch (%s vs %s).%s\n", | 195 | dprintf("VIA out mismatch (%s vs %s).%s\n", |
196 | outdev, arpinfo->outiface, | 196 | outdev, arpinfo->outiface, |
197 | arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":""); | 197 | arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":""); |
198 | return 0; | 198 | return 0; |
199 | } | 199 | } |
200 | 200 | ||
201 | return 1; | 201 | return 1; |
202 | #undef FWINV | 202 | #undef FWINV |
203 | } | 203 | } |
204 | 204 | ||
205 | static inline int arp_checkentry(const struct arpt_arp *arp) | 205 | static inline int arp_checkentry(const struct arpt_arp *arp) |
206 | { | 206 | { |
207 | if (arp->flags & ~ARPT_F_MASK) { | 207 | if (arp->flags & ~ARPT_F_MASK) { |
208 | duprintf("Unknown flag bits set: %08X\n", | 208 | duprintf("Unknown flag bits set: %08X\n", |
209 | arp->flags & ~ARPT_F_MASK); | 209 | arp->flags & ~ARPT_F_MASK); |
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
212 | if (arp->invflags & ~ARPT_INV_MASK) { | 212 | if (arp->invflags & ~ARPT_INV_MASK) { |
213 | duprintf("Unknown invflag bits set: %08X\n", | 213 | duprintf("Unknown invflag bits set: %08X\n", |
214 | arp->invflags & ~ARPT_INV_MASK); | 214 | arp->invflags & ~ARPT_INV_MASK); |
215 | return 0; | 215 | return 0; |
216 | } | 216 | } |
217 | 217 | ||
218 | return 1; | 218 | return 1; |
219 | } | 219 | } |
220 | 220 | ||
221 | static unsigned int | 221 | static unsigned int |
222 | arpt_error(struct sk_buff *skb, const struct xt_action_param *par) | 222 | arpt_error(struct sk_buff *skb, const struct xt_action_param *par) |
223 | { | 223 | { |
224 | if (net_ratelimit()) | 224 | if (net_ratelimit()) |
225 | pr_err("arp_tables: error: '%s'\n", | 225 | pr_err("arp_tables: error: '%s'\n", |
226 | (const char *)par->targinfo); | 226 | (const char *)par->targinfo); |
227 | 227 | ||
228 | return NF_DROP; | 228 | return NF_DROP; |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline const struct xt_entry_target * | 231 | static inline const struct xt_entry_target * |
232 | arpt_get_target_c(const struct arpt_entry *e) | 232 | arpt_get_target_c(const struct arpt_entry *e) |
233 | { | 233 | { |
234 | return arpt_get_target((struct arpt_entry *)e); | 234 | return arpt_get_target((struct arpt_entry *)e); |
235 | } | 235 | } |
236 | 236 | ||
237 | static inline struct arpt_entry * | 237 | static inline struct arpt_entry * |
238 | get_entry(const void *base, unsigned int offset) | 238 | get_entry(const void *base, unsigned int offset) |
239 | { | 239 | { |
240 | return (struct arpt_entry *)(base + offset); | 240 | return (struct arpt_entry *)(base + offset); |
241 | } | 241 | } |
242 | 242 | ||
243 | static inline __pure | 243 | static inline __pure |
244 | struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) | 244 | struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) |
245 | { | 245 | { |
246 | return (void *)entry + entry->next_offset; | 246 | return (void *)entry + entry->next_offset; |
247 | } | 247 | } |
248 | 248 | ||
249 | unsigned int arpt_do_table(struct sk_buff *skb, | 249 | unsigned int arpt_do_table(struct sk_buff *skb, |
250 | unsigned int hook, | 250 | unsigned int hook, |
251 | const struct net_device *in, | 251 | const struct net_device *in, |
252 | const struct net_device *out, | 252 | const struct net_device *out, |
253 | struct xt_table *table) | 253 | struct xt_table *table) |
254 | { | 254 | { |
255 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 255 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
256 | unsigned int verdict = NF_DROP; | 256 | unsigned int verdict = NF_DROP; |
257 | const struct arphdr *arp; | 257 | const struct arphdr *arp; |
258 | struct arpt_entry *e, *back; | 258 | struct arpt_entry *e, *back; |
259 | const char *indev, *outdev; | 259 | const char *indev, *outdev; |
260 | void *table_base; | 260 | void *table_base; |
261 | const struct xt_table_info *private; | 261 | const struct xt_table_info *private; |
262 | struct xt_action_param acpar; | 262 | struct xt_action_param acpar; |
263 | 263 | ||
264 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) | 264 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) |
265 | return NF_DROP; | 265 | return NF_DROP; |
266 | 266 | ||
267 | indev = in ? in->name : nulldevname; | 267 | indev = in ? in->name : nulldevname; |
268 | outdev = out ? out->name : nulldevname; | 268 | outdev = out ? out->name : nulldevname; |
269 | 269 | ||
270 | xt_info_rdlock_bh(); | 270 | xt_info_rdlock_bh(); |
271 | private = table->private; | 271 | private = table->private; |
272 | table_base = private->entries[smp_processor_id()]; | 272 | table_base = private->entries[smp_processor_id()]; |
273 | 273 | ||
274 | e = get_entry(table_base, private->hook_entry[hook]); | 274 | e = get_entry(table_base, private->hook_entry[hook]); |
275 | back = get_entry(table_base, private->underflow[hook]); | 275 | back = get_entry(table_base, private->underflow[hook]); |
276 | 276 | ||
277 | acpar.in = in; | 277 | acpar.in = in; |
278 | acpar.out = out; | 278 | acpar.out = out; |
279 | acpar.hooknum = hook; | 279 | acpar.hooknum = hook; |
280 | acpar.family = NFPROTO_ARP; | 280 | acpar.family = NFPROTO_ARP; |
281 | acpar.hotdrop = false; | 281 | acpar.hotdrop = false; |
282 | 282 | ||
283 | arp = arp_hdr(skb); | 283 | arp = arp_hdr(skb); |
284 | do { | 284 | do { |
285 | const struct xt_entry_target *t; | 285 | const struct xt_entry_target *t; |
286 | 286 | ||
287 | if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { | 287 | if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { |
288 | e = arpt_next_entry(e); | 288 | e = arpt_next_entry(e); |
289 | continue; | 289 | continue; |
290 | } | 290 | } |
291 | 291 | ||
292 | ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1); | 292 | ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1); |
293 | 293 | ||
294 | t = arpt_get_target_c(e); | 294 | t = arpt_get_target_c(e); |
295 | 295 | ||
296 | /* Standard target? */ | 296 | /* Standard target? */ |
297 | if (!t->u.kernel.target->target) { | 297 | if (!t->u.kernel.target->target) { |
298 | int v; | 298 | int v; |
299 | 299 | ||
300 | v = ((struct xt_standard_target *)t)->verdict; | 300 | v = ((struct xt_standard_target *)t)->verdict; |
301 | if (v < 0) { | 301 | if (v < 0) { |
302 | /* Pop from stack? */ | 302 | /* Pop from stack? */ |
303 | if (v != XT_RETURN) { | 303 | if (v != XT_RETURN) { |
304 | verdict = (unsigned)(-v) - 1; | 304 | verdict = (unsigned)(-v) - 1; |
305 | break; | 305 | break; |
306 | } | 306 | } |
307 | e = back; | 307 | e = back; |
308 | back = get_entry(table_base, back->comefrom); | 308 | back = get_entry(table_base, back->comefrom); |
309 | continue; | 309 | continue; |
310 | } | 310 | } |
311 | if (table_base + v | 311 | if (table_base + v |
312 | != arpt_next_entry(e)) { | 312 | != arpt_next_entry(e)) { |
313 | /* Save old back ptr in next entry */ | 313 | /* Save old back ptr in next entry */ |
314 | struct arpt_entry *next = arpt_next_entry(e); | 314 | struct arpt_entry *next = arpt_next_entry(e); |
315 | next->comefrom = (void *)back - table_base; | 315 | next->comefrom = (void *)back - table_base; |
316 | 316 | ||
317 | /* set back pointer to next entry */ | 317 | /* set back pointer to next entry */ |
318 | back = next; | 318 | back = next; |
319 | } | 319 | } |
320 | 320 | ||
321 | e = get_entry(table_base, v); | 321 | e = get_entry(table_base, v); |
322 | continue; | 322 | continue; |
323 | } | 323 | } |
324 | 324 | ||
325 | /* Targets which reenter must return | 325 | /* Targets which reenter must return |
326 | * abs. verdicts | 326 | * abs. verdicts |
327 | */ | 327 | */ |
328 | acpar.target = t->u.kernel.target; | 328 | acpar.target = t->u.kernel.target; |
329 | acpar.targinfo = t->data; | 329 | acpar.targinfo = t->data; |
330 | verdict = t->u.kernel.target->target(skb, &acpar); | 330 | verdict = t->u.kernel.target->target(skb, &acpar); |
331 | 331 | ||
332 | /* Target might have changed stuff. */ | 332 | /* Target might have changed stuff. */ |
333 | arp = arp_hdr(skb); | 333 | arp = arp_hdr(skb); |
334 | 334 | ||
335 | if (verdict == XT_CONTINUE) | 335 | if (verdict == XT_CONTINUE) |
336 | e = arpt_next_entry(e); | 336 | e = arpt_next_entry(e); |
337 | else | 337 | else |
338 | /* Verdict */ | 338 | /* Verdict */ |
339 | break; | 339 | break; |
340 | } while (!acpar.hotdrop); | 340 | } while (!acpar.hotdrop); |
341 | xt_info_rdunlock_bh(); | 341 | xt_info_rdunlock_bh(); |
342 | 342 | ||
343 | if (acpar.hotdrop) | 343 | if (acpar.hotdrop) |
344 | return NF_DROP; | 344 | return NF_DROP; |
345 | else | 345 | else |
346 | return verdict; | 346 | return verdict; |
347 | } | 347 | } |
348 | 348 | ||
349 | /* All zeroes == unconditional rule. */ | 349 | /* All zeroes == unconditional rule. */ |
350 | static inline bool unconditional(const struct arpt_arp *arp) | 350 | static inline bool unconditional(const struct arpt_arp *arp) |
351 | { | 351 | { |
352 | static const struct arpt_arp uncond; | 352 | static const struct arpt_arp uncond; |
353 | 353 | ||
354 | return memcmp(arp, &uncond, sizeof(uncond)) == 0; | 354 | return memcmp(arp, &uncond, sizeof(uncond)) == 0; |
355 | } | 355 | } |
356 | 356 | ||
357 | /* Figures out from what hook each rule can be called: returns 0 if | 357 | /* Figures out from what hook each rule can be called: returns 0 if |
358 | * there are loops. Puts hook bitmask in comefrom. | 358 | * there are loops. Puts hook bitmask in comefrom. |
359 | */ | 359 | */ |
360 | static int mark_source_chains(const struct xt_table_info *newinfo, | 360 | static int mark_source_chains(const struct xt_table_info *newinfo, |
361 | unsigned int valid_hooks, void *entry0) | 361 | unsigned int valid_hooks, void *entry0) |
362 | { | 362 | { |
363 | unsigned int hook; | 363 | unsigned int hook; |
364 | 364 | ||
365 | /* No recursion; use packet counter to save back ptrs (reset | 365 | /* No recursion; use packet counter to save back ptrs (reset |
366 | * to 0 as we leave), and comefrom to save source hook bitmask. | 366 | * to 0 as we leave), and comefrom to save source hook bitmask. |
367 | */ | 367 | */ |
368 | for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { | 368 | for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { |
369 | unsigned int pos = newinfo->hook_entry[hook]; | 369 | unsigned int pos = newinfo->hook_entry[hook]; |
370 | struct arpt_entry *e | 370 | struct arpt_entry *e |
371 | = (struct arpt_entry *)(entry0 + pos); | 371 | = (struct arpt_entry *)(entry0 + pos); |
372 | 372 | ||
373 | if (!(valid_hooks & (1 << hook))) | 373 | if (!(valid_hooks & (1 << hook))) |
374 | continue; | 374 | continue; |
375 | 375 | ||
376 | /* Set initial back pointer. */ | 376 | /* Set initial back pointer. */ |
377 | e->counters.pcnt = pos; | 377 | e->counters.pcnt = pos; |
378 | 378 | ||
379 | for (;;) { | 379 | for (;;) { |
380 | const struct xt_standard_target *t | 380 | const struct xt_standard_target *t |
381 | = (void *)arpt_get_target_c(e); | 381 | = (void *)arpt_get_target_c(e); |
382 | int visited = e->comefrom & (1 << hook); | 382 | int visited = e->comefrom & (1 << hook); |
383 | 383 | ||
384 | if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { | 384 | if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { |
385 | pr_notice("arptables: loop hook %u pos %u %08X.\n", | 385 | pr_notice("arptables: loop hook %u pos %u %08X.\n", |
386 | hook, pos, e->comefrom); | 386 | hook, pos, e->comefrom); |
387 | return 0; | 387 | return 0; |
388 | } | 388 | } |
389 | e->comefrom | 389 | e->comefrom |
390 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); | 390 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); |
391 | 391 | ||
392 | /* Unconditional return/END. */ | 392 | /* Unconditional return/END. */ |
393 | if ((e->target_offset == sizeof(struct arpt_entry) && | 393 | if ((e->target_offset == sizeof(struct arpt_entry) && |
394 | (strcmp(t->target.u.user.name, | 394 | (strcmp(t->target.u.user.name, |
395 | XT_STANDARD_TARGET) == 0) && | 395 | XT_STANDARD_TARGET) == 0) && |
396 | t->verdict < 0 && unconditional(&e->arp)) || | 396 | t->verdict < 0 && unconditional(&e->arp)) || |
397 | visited) { | 397 | visited) { |
398 | unsigned int oldpos, size; | 398 | unsigned int oldpos, size; |
399 | 399 | ||
400 | if ((strcmp(t->target.u.user.name, | 400 | if ((strcmp(t->target.u.user.name, |
401 | XT_STANDARD_TARGET) == 0) && | 401 | XT_STANDARD_TARGET) == 0) && |
402 | t->verdict < -NF_MAX_VERDICT - 1) { | 402 | t->verdict < -NF_MAX_VERDICT - 1) { |
403 | duprintf("mark_source_chains: bad " | 403 | duprintf("mark_source_chains: bad " |
404 | "negative verdict (%i)\n", | 404 | "negative verdict (%i)\n", |
405 | t->verdict); | 405 | t->verdict); |
406 | return 0; | 406 | return 0; |
407 | } | 407 | } |
408 | 408 | ||
409 | /* Return: backtrack through the last | 409 | /* Return: backtrack through the last |
410 | * big jump. | 410 | * big jump. |
411 | */ | 411 | */ |
412 | do { | 412 | do { |
413 | e->comefrom ^= (1<<NF_ARP_NUMHOOKS); | 413 | e->comefrom ^= (1<<NF_ARP_NUMHOOKS); |
414 | oldpos = pos; | 414 | oldpos = pos; |
415 | pos = e->counters.pcnt; | 415 | pos = e->counters.pcnt; |
416 | e->counters.pcnt = 0; | 416 | e->counters.pcnt = 0; |
417 | 417 | ||
418 | /* We're at the start. */ | 418 | /* We're at the start. */ |
419 | if (pos == oldpos) | 419 | if (pos == oldpos) |
420 | goto next; | 420 | goto next; |
421 | 421 | ||
422 | e = (struct arpt_entry *) | 422 | e = (struct arpt_entry *) |
423 | (entry0 + pos); | 423 | (entry0 + pos); |
424 | } while (oldpos == pos + e->next_offset); | 424 | } while (oldpos == pos + e->next_offset); |
425 | 425 | ||
426 | /* Move along one */ | 426 | /* Move along one */ |
427 | size = e->next_offset; | 427 | size = e->next_offset; |
428 | e = (struct arpt_entry *) | 428 | e = (struct arpt_entry *) |
429 | (entry0 + pos + size); | 429 | (entry0 + pos + size); |
430 | e->counters.pcnt = pos; | 430 | e->counters.pcnt = pos; |
431 | pos += size; | 431 | pos += size; |
432 | } else { | 432 | } else { |
433 | int newpos = t->verdict; | 433 | int newpos = t->verdict; |
434 | 434 | ||
435 | if (strcmp(t->target.u.user.name, | 435 | if (strcmp(t->target.u.user.name, |
436 | XT_STANDARD_TARGET) == 0 && | 436 | XT_STANDARD_TARGET) == 0 && |
437 | newpos >= 0) { | 437 | newpos >= 0) { |
438 | if (newpos > newinfo->size - | 438 | if (newpos > newinfo->size - |
439 | sizeof(struct arpt_entry)) { | 439 | sizeof(struct arpt_entry)) { |
440 | duprintf("mark_source_chains: " | 440 | duprintf("mark_source_chains: " |
441 | "bad verdict (%i)\n", | 441 | "bad verdict (%i)\n", |
442 | newpos); | 442 | newpos); |
443 | return 0; | 443 | return 0; |
444 | } | 444 | } |
445 | 445 | ||
446 | /* This a jump; chase it. */ | 446 | /* This a jump; chase it. */ |
447 | duprintf("Jump rule %u -> %u\n", | 447 | duprintf("Jump rule %u -> %u\n", |
448 | pos, newpos); | 448 | pos, newpos); |
449 | } else { | 449 | } else { |
450 | /* ... this is a fallthru */ | 450 | /* ... this is a fallthru */ |
451 | newpos = pos + e->next_offset; | 451 | newpos = pos + e->next_offset; |
452 | } | 452 | } |
453 | e = (struct arpt_entry *) | 453 | e = (struct arpt_entry *) |
454 | (entry0 + newpos); | 454 | (entry0 + newpos); |
455 | e->counters.pcnt = pos; | 455 | e->counters.pcnt = pos; |
456 | pos = newpos; | 456 | pos = newpos; |
457 | } | 457 | } |
458 | } | 458 | } |
459 | next: | 459 | next: |
460 | duprintf("Finished chain %u\n", hook); | 460 | duprintf("Finished chain %u\n", hook); |
461 | } | 461 | } |
462 | return 1; | 462 | return 1; |
463 | } | 463 | } |
464 | 464 | ||
465 | static inline int check_entry(const struct arpt_entry *e, const char *name) | 465 | static inline int check_entry(const struct arpt_entry *e, const char *name) |
466 | { | 466 | { |
467 | const struct xt_entry_target *t; | 467 | const struct xt_entry_target *t; |
468 | 468 | ||
469 | if (!arp_checkentry(&e->arp)) { | 469 | if (!arp_checkentry(&e->arp)) { |
470 | duprintf("arp_tables: arp check failed %p %s.\n", e, name); | 470 | duprintf("arp_tables: arp check failed %p %s.\n", e, name); |
471 | return -EINVAL; | 471 | return -EINVAL; |
472 | } | 472 | } |
473 | 473 | ||
474 | if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) | 474 | if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) |
475 | return -EINVAL; | 475 | return -EINVAL; |
476 | 476 | ||
477 | t = arpt_get_target_c(e); | 477 | t = arpt_get_target_c(e); |
478 | if (e->target_offset + t->u.target_size > e->next_offset) | 478 | if (e->target_offset + t->u.target_size > e->next_offset) |
479 | return -EINVAL; | 479 | return -EINVAL; |
480 | 480 | ||
481 | return 0; | 481 | return 0; |
482 | } | 482 | } |
483 | 483 | ||
484 | static inline int check_target(struct arpt_entry *e, const char *name) | 484 | static inline int check_target(struct arpt_entry *e, const char *name) |
485 | { | 485 | { |
486 | struct xt_entry_target *t = arpt_get_target(e); | 486 | struct xt_entry_target *t = arpt_get_target(e); |
487 | int ret; | 487 | int ret; |
488 | struct xt_tgchk_param par = { | 488 | struct xt_tgchk_param par = { |
489 | .table = name, | 489 | .table = name, |
490 | .entryinfo = e, | 490 | .entryinfo = e, |
491 | .target = t->u.kernel.target, | 491 | .target = t->u.kernel.target, |
492 | .targinfo = t->data, | 492 | .targinfo = t->data, |
493 | .hook_mask = e->comefrom, | 493 | .hook_mask = e->comefrom, |
494 | .family = NFPROTO_ARP, | 494 | .family = NFPROTO_ARP, |
495 | }; | 495 | }; |
496 | 496 | ||
497 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); | 497 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); |
498 | if (ret < 0) { | 498 | if (ret < 0) { |
499 | duprintf("arp_tables: check failed for `%s'.\n", | 499 | duprintf("arp_tables: check failed for `%s'.\n", |
500 | t->u.kernel.target->name); | 500 | t->u.kernel.target->name); |
501 | return ret; | 501 | return ret; |
502 | } | 502 | } |
503 | return 0; | 503 | return 0; |
504 | } | 504 | } |
505 | 505 | ||
506 | static inline int | 506 | static inline int |
507 | find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) | 507 | find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) |
508 | { | 508 | { |
509 | struct xt_entry_target *t; | 509 | struct xt_entry_target *t; |
510 | struct xt_target *target; | 510 | struct xt_target *target; |
511 | int ret; | 511 | int ret; |
512 | 512 | ||
513 | ret = check_entry(e, name); | 513 | ret = check_entry(e, name); |
514 | if (ret) | 514 | if (ret) |
515 | return ret; | 515 | return ret; |
516 | 516 | ||
517 | t = arpt_get_target(e); | 517 | t = arpt_get_target(e); |
518 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, | 518 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, |
519 | t->u.user.revision); | 519 | t->u.user.revision); |
520 | if (IS_ERR(target)) { | 520 | if (IS_ERR(target)) { |
521 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 521 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
522 | ret = PTR_ERR(target); | 522 | ret = PTR_ERR(target); |
523 | goto out; | 523 | goto out; |
524 | } | 524 | } |
525 | t->u.kernel.target = target; | 525 | t->u.kernel.target = target; |
526 | 526 | ||
527 | ret = check_target(e, name); | 527 | ret = check_target(e, name); |
528 | if (ret) | 528 | if (ret) |
529 | goto err; | 529 | goto err; |
530 | return 0; | 530 | return 0; |
531 | err: | 531 | err: |
532 | module_put(t->u.kernel.target->me); | 532 | module_put(t->u.kernel.target->me); |
533 | out: | 533 | out: |
534 | return ret; | 534 | return ret; |
535 | } | 535 | } |
536 | 536 | ||
537 | static bool check_underflow(const struct arpt_entry *e) | 537 | static bool check_underflow(const struct arpt_entry *e) |
538 | { | 538 | { |
539 | const struct xt_entry_target *t; | 539 | const struct xt_entry_target *t; |
540 | unsigned int verdict; | 540 | unsigned int verdict; |
541 | 541 | ||
542 | if (!unconditional(&e->arp)) | 542 | if (!unconditional(&e->arp)) |
543 | return false; | 543 | return false; |
544 | t = arpt_get_target_c(e); | 544 | t = arpt_get_target_c(e); |
545 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 545 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
546 | return false; | 546 | return false; |
547 | verdict = ((struct xt_standard_target *)t)->verdict; | 547 | verdict = ((struct xt_standard_target *)t)->verdict; |
548 | verdict = -verdict - 1; | 548 | verdict = -verdict - 1; |
549 | return verdict == NF_DROP || verdict == NF_ACCEPT; | 549 | return verdict == NF_DROP || verdict == NF_ACCEPT; |
550 | } | 550 | } |
551 | 551 | ||
552 | static inline int check_entry_size_and_hooks(struct arpt_entry *e, | 552 | static inline int check_entry_size_and_hooks(struct arpt_entry *e, |
553 | struct xt_table_info *newinfo, | 553 | struct xt_table_info *newinfo, |
554 | const unsigned char *base, | 554 | const unsigned char *base, |
555 | const unsigned char *limit, | 555 | const unsigned char *limit, |
556 | const unsigned int *hook_entries, | 556 | const unsigned int *hook_entries, |
557 | const unsigned int *underflows, | 557 | const unsigned int *underflows, |
558 | unsigned int valid_hooks) | 558 | unsigned int valid_hooks) |
559 | { | 559 | { |
560 | unsigned int h; | 560 | unsigned int h; |
561 | 561 | ||
562 | if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || | 562 | if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || |
563 | (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { | 563 | (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { |
564 | duprintf("Bad offset %p\n", e); | 564 | duprintf("Bad offset %p\n", e); |
565 | return -EINVAL; | 565 | return -EINVAL; |
566 | } | 566 | } |
567 | 567 | ||
568 | if (e->next_offset | 568 | if (e->next_offset |
569 | < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { | 569 | < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { |
570 | duprintf("checking: element %p size %u\n", | 570 | duprintf("checking: element %p size %u\n", |
571 | e, e->next_offset); | 571 | e, e->next_offset); |
572 | return -EINVAL; | 572 | return -EINVAL; |
573 | } | 573 | } |
574 | 574 | ||
575 | /* Check hooks & underflows */ | 575 | /* Check hooks & underflows */ |
576 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 576 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
577 | if (!(valid_hooks & (1 << h))) | 577 | if (!(valid_hooks & (1 << h))) |
578 | continue; | 578 | continue; |
579 | if ((unsigned char *)e - base == hook_entries[h]) | 579 | if ((unsigned char *)e - base == hook_entries[h]) |
580 | newinfo->hook_entry[h] = hook_entries[h]; | 580 | newinfo->hook_entry[h] = hook_entries[h]; |
581 | if ((unsigned char *)e - base == underflows[h]) { | 581 | if ((unsigned char *)e - base == underflows[h]) { |
582 | if (!check_underflow(e)) { | 582 | if (!check_underflow(e)) { |
583 | pr_err("Underflows must be unconditional and " | 583 | pr_err("Underflows must be unconditional and " |
584 | "use the STANDARD target with " | 584 | "use the STANDARD target with " |
585 | "ACCEPT/DROP\n"); | 585 | "ACCEPT/DROP\n"); |
586 | return -EINVAL; | 586 | return -EINVAL; |
587 | } | 587 | } |
588 | newinfo->underflow[h] = underflows[h]; | 588 | newinfo->underflow[h] = underflows[h]; |
589 | } | 589 | } |
590 | } | 590 | } |
591 | 591 | ||
592 | /* Clear counters and comefrom */ | 592 | /* Clear counters and comefrom */ |
593 | e->counters = ((struct xt_counters) { 0, 0 }); | 593 | e->counters = ((struct xt_counters) { 0, 0 }); |
594 | e->comefrom = 0; | 594 | e->comefrom = 0; |
595 | return 0; | 595 | return 0; |
596 | } | 596 | } |
597 | 597 | ||
598 | static inline void cleanup_entry(struct arpt_entry *e) | 598 | static inline void cleanup_entry(struct arpt_entry *e) |
599 | { | 599 | { |
600 | struct xt_tgdtor_param par; | 600 | struct xt_tgdtor_param par; |
601 | struct xt_entry_target *t; | 601 | struct xt_entry_target *t; |
602 | 602 | ||
603 | t = arpt_get_target(e); | 603 | t = arpt_get_target(e); |
604 | par.target = t->u.kernel.target; | 604 | par.target = t->u.kernel.target; |
605 | par.targinfo = t->data; | 605 | par.targinfo = t->data; |
606 | par.family = NFPROTO_ARP; | 606 | par.family = NFPROTO_ARP; |
607 | if (par.target->destroy != NULL) | 607 | if (par.target->destroy != NULL) |
608 | par.target->destroy(&par); | 608 | par.target->destroy(&par); |
609 | module_put(par.target->me); | 609 | module_put(par.target->me); |
610 | } | 610 | } |
611 | 611 | ||
612 | /* Checks and translates the user-supplied table segment (held in | 612 | /* Checks and translates the user-supplied table segment (held in |
613 | * newinfo). | 613 | * newinfo). |
614 | */ | 614 | */ |
615 | static int translate_table(struct xt_table_info *newinfo, void *entry0, | 615 | static int translate_table(struct xt_table_info *newinfo, void *entry0, |
616 | const struct arpt_replace *repl) | 616 | const struct arpt_replace *repl) |
617 | { | 617 | { |
618 | struct arpt_entry *iter; | 618 | struct arpt_entry *iter; |
619 | unsigned int i; | 619 | unsigned int i; |
620 | int ret = 0; | 620 | int ret = 0; |
621 | 621 | ||
622 | newinfo->size = repl->size; | 622 | newinfo->size = repl->size; |
623 | newinfo->number = repl->num_entries; | 623 | newinfo->number = repl->num_entries; |
624 | 624 | ||
625 | /* Init all hooks to impossible value. */ | 625 | /* Init all hooks to impossible value. */ |
626 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 626 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
627 | newinfo->hook_entry[i] = 0xFFFFFFFF; | 627 | newinfo->hook_entry[i] = 0xFFFFFFFF; |
628 | newinfo->underflow[i] = 0xFFFFFFFF; | 628 | newinfo->underflow[i] = 0xFFFFFFFF; |
629 | } | 629 | } |
630 | 630 | ||
631 | duprintf("translate_table: size %u\n", newinfo->size); | 631 | duprintf("translate_table: size %u\n", newinfo->size); |
632 | i = 0; | 632 | i = 0; |
633 | 633 | ||
634 | /* Walk through entries, checking offsets. */ | 634 | /* Walk through entries, checking offsets. */ |
635 | xt_entry_foreach(iter, entry0, newinfo->size) { | 635 | xt_entry_foreach(iter, entry0, newinfo->size) { |
636 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, | 636 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, |
637 | entry0 + repl->size, | 637 | entry0 + repl->size, |
638 | repl->hook_entry, | 638 | repl->hook_entry, |
639 | repl->underflow, | 639 | repl->underflow, |
640 | repl->valid_hooks); | 640 | repl->valid_hooks); |
641 | if (ret != 0) | 641 | if (ret != 0) |
642 | break; | 642 | break; |
643 | ++i; | 643 | ++i; |
644 | if (strcmp(arpt_get_target(iter)->u.user.name, | 644 | if (strcmp(arpt_get_target(iter)->u.user.name, |
645 | XT_ERROR_TARGET) == 0) | 645 | XT_ERROR_TARGET) == 0) |
646 | ++newinfo->stacksize; | 646 | ++newinfo->stacksize; |
647 | } | 647 | } |
648 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); | 648 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); |
649 | if (ret != 0) | 649 | if (ret != 0) |
650 | return ret; | 650 | return ret; |
651 | 651 | ||
652 | if (i != repl->num_entries) { | 652 | if (i != repl->num_entries) { |
653 | duprintf("translate_table: %u not %u entries\n", | 653 | duprintf("translate_table: %u not %u entries\n", |
654 | i, repl->num_entries); | 654 | i, repl->num_entries); |
655 | return -EINVAL; | 655 | return -EINVAL; |
656 | } | 656 | } |
657 | 657 | ||
658 | /* Check hooks all assigned */ | 658 | /* Check hooks all assigned */ |
659 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 659 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
660 | /* Only hooks which are valid */ | 660 | /* Only hooks which are valid */ |
661 | if (!(repl->valid_hooks & (1 << i))) | 661 | if (!(repl->valid_hooks & (1 << i))) |
662 | continue; | 662 | continue; |
663 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 663 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
664 | duprintf("Invalid hook entry %u %u\n", | 664 | duprintf("Invalid hook entry %u %u\n", |
665 | i, repl->hook_entry[i]); | 665 | i, repl->hook_entry[i]); |
666 | return -EINVAL; | 666 | return -EINVAL; |
667 | } | 667 | } |
668 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 668 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
669 | duprintf("Invalid underflow %u %u\n", | 669 | duprintf("Invalid underflow %u %u\n", |
670 | i, repl->underflow[i]); | 670 | i, repl->underflow[i]); |
671 | return -EINVAL; | 671 | return -EINVAL; |
672 | } | 672 | } |
673 | } | 673 | } |
674 | 674 | ||
675 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { | 675 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { |
676 | duprintf("Looping hook\n"); | 676 | duprintf("Looping hook\n"); |
677 | return -ELOOP; | 677 | return -ELOOP; |
678 | } | 678 | } |
679 | 679 | ||
680 | /* Finally, each sanity check must pass */ | 680 | /* Finally, each sanity check must pass */ |
681 | i = 0; | 681 | i = 0; |
682 | xt_entry_foreach(iter, entry0, newinfo->size) { | 682 | xt_entry_foreach(iter, entry0, newinfo->size) { |
683 | ret = find_check_entry(iter, repl->name, repl->size); | 683 | ret = find_check_entry(iter, repl->name, repl->size); |
684 | if (ret != 0) | 684 | if (ret != 0) |
685 | break; | 685 | break; |
686 | ++i; | 686 | ++i; |
687 | } | 687 | } |
688 | 688 | ||
689 | if (ret != 0) { | 689 | if (ret != 0) { |
690 | xt_entry_foreach(iter, entry0, newinfo->size) { | 690 | xt_entry_foreach(iter, entry0, newinfo->size) { |
691 | if (i-- == 0) | 691 | if (i-- == 0) |
692 | break; | 692 | break; |
693 | cleanup_entry(iter); | 693 | cleanup_entry(iter); |
694 | } | 694 | } |
695 | return ret; | 695 | return ret; |
696 | } | 696 | } |
697 | 697 | ||
698 | /* And one copy for every other CPU */ | 698 | /* And one copy for every other CPU */ |
699 | for_each_possible_cpu(i) { | 699 | for_each_possible_cpu(i) { |
700 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 700 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
701 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 701 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
702 | } | 702 | } |
703 | 703 | ||
704 | return ret; | 704 | return ret; |
705 | } | 705 | } |
706 | 706 | ||
707 | static void get_counters(const struct xt_table_info *t, | 707 | static void get_counters(const struct xt_table_info *t, |
708 | struct xt_counters counters[]) | 708 | struct xt_counters counters[]) |
709 | { | 709 | { |
710 | struct arpt_entry *iter; | 710 | struct arpt_entry *iter; |
711 | unsigned int cpu; | 711 | unsigned int cpu; |
712 | unsigned int i; | 712 | unsigned int i; |
713 | unsigned int curcpu = get_cpu(); | ||
714 | 713 | ||
715 | /* Instead of clearing (by a previous call to memset()) | ||
716 | * the counters and using adds, we set the counters | ||
717 | * with data used by 'current' CPU | ||
718 | * | ||
719 | * Bottom half has to be disabled to prevent deadlock | ||
720 | * if new softirq were to run and call ipt_do_table | ||
721 | */ | ||
722 | local_bh_disable(); | ||
723 | i = 0; | ||
724 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
725 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
726 | iter->counters.pcnt); | ||
727 | ++i; | ||
728 | } | ||
729 | local_bh_enable(); | ||
730 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
731 | * (preemption is disabled) | ||
732 | */ | ||
733 | |||
734 | for_each_possible_cpu(cpu) { | 714 | for_each_possible_cpu(cpu) { |
735 | if (cpu == curcpu) | 715 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
736 | continue; | 716 | |
737 | i = 0; | 717 | i = 0; |
738 | local_bh_disable(); | ||
739 | xt_info_wrlock(cpu); | ||
740 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 718 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
741 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 719 | u64 bcnt, pcnt; |
742 | iter->counters.pcnt); | 720 | unsigned int start; |
721 | |||
722 | do { | ||
723 | start = read_seqbegin(lock); | ||
724 | bcnt = iter->counters.bcnt; | ||
725 | pcnt = iter->counters.pcnt; | ||
726 | } while (read_seqretry(lock, start)); | ||
727 | |||
728 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
743 | ++i; | 729 | ++i; |
744 | } | 730 | } |
745 | xt_info_wrunlock(cpu); | ||
746 | local_bh_enable(); | ||
747 | } | 731 | } |
748 | put_cpu(); | ||
749 | } | 732 | } |
750 | 733 | ||
751 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 734 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
752 | { | 735 | { |
753 | unsigned int countersize; | 736 | unsigned int countersize; |
754 | struct xt_counters *counters; | 737 | struct xt_counters *counters; |
755 | const struct xt_table_info *private = table->private; | 738 | const struct xt_table_info *private = table->private; |
756 | 739 | ||
757 | /* We need atomic snapshot of counters: rest doesn't change | 740 | /* We need atomic snapshot of counters: rest doesn't change |
758 | * (other than comefrom, which userspace doesn't care | 741 | * (other than comefrom, which userspace doesn't care |
759 | * about). | 742 | * about). |
760 | */ | 743 | */ |
761 | countersize = sizeof(struct xt_counters) * private->number; | 744 | countersize = sizeof(struct xt_counters) * private->number; |
762 | counters = vmalloc(countersize); | 745 | counters = vzalloc(countersize); |
763 | 746 | ||
764 | if (counters == NULL) | 747 | if (counters == NULL) |
765 | return ERR_PTR(-ENOMEM); | 748 | return ERR_PTR(-ENOMEM); |
766 | 749 | ||
767 | get_counters(private, counters); | 750 | get_counters(private, counters); |
768 | 751 | ||
769 | return counters; | 752 | return counters; |
770 | } | 753 | } |
771 | 754 | ||
772 | static int copy_entries_to_user(unsigned int total_size, | 755 | static int copy_entries_to_user(unsigned int total_size, |
773 | const struct xt_table *table, | 756 | const struct xt_table *table, |
774 | void __user *userptr) | 757 | void __user *userptr) |
775 | { | 758 | { |
776 | unsigned int off, num; | 759 | unsigned int off, num; |
777 | const struct arpt_entry *e; | 760 | const struct arpt_entry *e; |
778 | struct xt_counters *counters; | 761 | struct xt_counters *counters; |
779 | struct xt_table_info *private = table->private; | 762 | struct xt_table_info *private = table->private; |
780 | int ret = 0; | 763 | int ret = 0; |
781 | void *loc_cpu_entry; | 764 | void *loc_cpu_entry; |
782 | 765 | ||
783 | counters = alloc_counters(table); | 766 | counters = alloc_counters(table); |
784 | if (IS_ERR(counters)) | 767 | if (IS_ERR(counters)) |
785 | return PTR_ERR(counters); | 768 | return PTR_ERR(counters); |
786 | 769 | ||
787 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 770 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
788 | /* ... then copy entire thing ... */ | 771 | /* ... then copy entire thing ... */ |
789 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { | 772 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { |
790 | ret = -EFAULT; | 773 | ret = -EFAULT; |
791 | goto free_counters; | 774 | goto free_counters; |
792 | } | 775 | } |
793 | 776 | ||
794 | /* FIXME: use iterator macros --RR */ | 777 | /* FIXME: use iterator macros --RR */ |
795 | /* ... then go back and fix counters and names */ | 778 | /* ... then go back and fix counters and names */ |
796 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ | 779 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ |
797 | const struct xt_entry_target *t; | 780 | const struct xt_entry_target *t; |
798 | 781 | ||
799 | e = (struct arpt_entry *)(loc_cpu_entry + off); | 782 | e = (struct arpt_entry *)(loc_cpu_entry + off); |
800 | if (copy_to_user(userptr + off | 783 | if (copy_to_user(userptr + off |
801 | + offsetof(struct arpt_entry, counters), | 784 | + offsetof(struct arpt_entry, counters), |
802 | &counters[num], | 785 | &counters[num], |
803 | sizeof(counters[num])) != 0) { | 786 | sizeof(counters[num])) != 0) { |
804 | ret = -EFAULT; | 787 | ret = -EFAULT; |
805 | goto free_counters; | 788 | goto free_counters; |
806 | } | 789 | } |
807 | 790 | ||
808 | t = arpt_get_target_c(e); | 791 | t = arpt_get_target_c(e); |
809 | if (copy_to_user(userptr + off + e->target_offset | 792 | if (copy_to_user(userptr + off + e->target_offset |
810 | + offsetof(struct xt_entry_target, | 793 | + offsetof(struct xt_entry_target, |
811 | u.user.name), | 794 | u.user.name), |
812 | t->u.kernel.target->name, | 795 | t->u.kernel.target->name, |
813 | strlen(t->u.kernel.target->name)+1) != 0) { | 796 | strlen(t->u.kernel.target->name)+1) != 0) { |
814 | ret = -EFAULT; | 797 | ret = -EFAULT; |
815 | goto free_counters; | 798 | goto free_counters; |
816 | } | 799 | } |
817 | } | 800 | } |
818 | 801 | ||
819 | free_counters: | 802 | free_counters: |
820 | vfree(counters); | 803 | vfree(counters); |
821 | return ret; | 804 | return ret; |
822 | } | 805 | } |
823 | 806 | ||
824 | #ifdef CONFIG_COMPAT | 807 | #ifdef CONFIG_COMPAT |
825 | static void compat_standard_from_user(void *dst, const void *src) | 808 | static void compat_standard_from_user(void *dst, const void *src) |
826 | { | 809 | { |
827 | int v = *(compat_int_t *)src; | 810 | int v = *(compat_int_t *)src; |
828 | 811 | ||
829 | if (v > 0) | 812 | if (v > 0) |
830 | v += xt_compat_calc_jump(NFPROTO_ARP, v); | 813 | v += xt_compat_calc_jump(NFPROTO_ARP, v); |
831 | memcpy(dst, &v, sizeof(v)); | 814 | memcpy(dst, &v, sizeof(v)); |
832 | } | 815 | } |
833 | 816 | ||
834 | static int compat_standard_to_user(void __user *dst, const void *src) | 817 | static int compat_standard_to_user(void __user *dst, const void *src) |
835 | { | 818 | { |
836 | compat_int_t cv = *(int *)src; | 819 | compat_int_t cv = *(int *)src; |
837 | 820 | ||
838 | if (cv > 0) | 821 | if (cv > 0) |
839 | cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); | 822 | cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); |
840 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; | 823 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; |
841 | } | 824 | } |
842 | 825 | ||
843 | static int compat_calc_entry(const struct arpt_entry *e, | 826 | static int compat_calc_entry(const struct arpt_entry *e, |
844 | const struct xt_table_info *info, | 827 | const struct xt_table_info *info, |
845 | const void *base, struct xt_table_info *newinfo) | 828 | const void *base, struct xt_table_info *newinfo) |
846 | { | 829 | { |
847 | const struct xt_entry_target *t; | 830 | const struct xt_entry_target *t; |
848 | unsigned int entry_offset; | 831 | unsigned int entry_offset; |
849 | int off, i, ret; | 832 | int off, i, ret; |
850 | 833 | ||
851 | off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); | 834 | off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); |
852 | entry_offset = (void *)e - base; | 835 | entry_offset = (void *)e - base; |
853 | 836 | ||
854 | t = arpt_get_target_c(e); | 837 | t = arpt_get_target_c(e); |
855 | off += xt_compat_target_offset(t->u.kernel.target); | 838 | off += xt_compat_target_offset(t->u.kernel.target); |
856 | newinfo->size -= off; | 839 | newinfo->size -= off; |
857 | ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); | 840 | ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); |
858 | if (ret) | 841 | if (ret) |
859 | return ret; | 842 | return ret; |
860 | 843 | ||
861 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 844 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
862 | if (info->hook_entry[i] && | 845 | if (info->hook_entry[i] && |
863 | (e < (struct arpt_entry *)(base + info->hook_entry[i]))) | 846 | (e < (struct arpt_entry *)(base + info->hook_entry[i]))) |
864 | newinfo->hook_entry[i] -= off; | 847 | newinfo->hook_entry[i] -= off; |
865 | if (info->underflow[i] && | 848 | if (info->underflow[i] && |
866 | (e < (struct arpt_entry *)(base + info->underflow[i]))) | 849 | (e < (struct arpt_entry *)(base + info->underflow[i]))) |
867 | newinfo->underflow[i] -= off; | 850 | newinfo->underflow[i] -= off; |
868 | } | 851 | } |
869 | return 0; | 852 | return 0; |
870 | } | 853 | } |
871 | 854 | ||
872 | static int compat_table_info(const struct xt_table_info *info, | 855 | static int compat_table_info(const struct xt_table_info *info, |
873 | struct xt_table_info *newinfo) | 856 | struct xt_table_info *newinfo) |
874 | { | 857 | { |
875 | struct arpt_entry *iter; | 858 | struct arpt_entry *iter; |
876 | void *loc_cpu_entry; | 859 | void *loc_cpu_entry; |
877 | int ret; | 860 | int ret; |
878 | 861 | ||
879 | if (!newinfo || !info) | 862 | if (!newinfo || !info) |
880 | return -EINVAL; | 863 | return -EINVAL; |
881 | 864 | ||
882 | /* we dont care about newinfo->entries[] */ | 865 | /* we dont care about newinfo->entries[] */ |
883 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | 866 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); |
884 | newinfo->initial_entries = 0; | 867 | newinfo->initial_entries = 0; |
885 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; | 868 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; |
886 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { | 869 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { |
887 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); | 870 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); |
888 | if (ret != 0) | 871 | if (ret != 0) |
889 | return ret; | 872 | return ret; |
890 | } | 873 | } |
891 | return 0; | 874 | return 0; |
892 | } | 875 | } |
893 | #endif | 876 | #endif |
894 | 877 | ||
895 | static int get_info(struct net *net, void __user *user, | 878 | static int get_info(struct net *net, void __user *user, |
896 | const int *len, int compat) | 879 | const int *len, int compat) |
897 | { | 880 | { |
898 | char name[XT_TABLE_MAXNAMELEN]; | 881 | char name[XT_TABLE_MAXNAMELEN]; |
899 | struct xt_table *t; | 882 | struct xt_table *t; |
900 | int ret; | 883 | int ret; |
901 | 884 | ||
902 | if (*len != sizeof(struct arpt_getinfo)) { | 885 | if (*len != sizeof(struct arpt_getinfo)) { |
903 | duprintf("length %u != %Zu\n", *len, | 886 | duprintf("length %u != %Zu\n", *len, |
904 | sizeof(struct arpt_getinfo)); | 887 | sizeof(struct arpt_getinfo)); |
905 | return -EINVAL; | 888 | return -EINVAL; |
906 | } | 889 | } |
907 | 890 | ||
908 | if (copy_from_user(name, user, sizeof(name)) != 0) | 891 | if (copy_from_user(name, user, sizeof(name)) != 0) |
909 | return -EFAULT; | 892 | return -EFAULT; |
910 | 893 | ||
911 | name[XT_TABLE_MAXNAMELEN-1] = '\0'; | 894 | name[XT_TABLE_MAXNAMELEN-1] = '\0'; |
912 | #ifdef CONFIG_COMPAT | 895 | #ifdef CONFIG_COMPAT |
913 | if (compat) | 896 | if (compat) |
914 | xt_compat_lock(NFPROTO_ARP); | 897 | xt_compat_lock(NFPROTO_ARP); |
915 | #endif | 898 | #endif |
916 | t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), | 899 | t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), |
917 | "arptable_%s", name); | 900 | "arptable_%s", name); |
918 | if (t && !IS_ERR(t)) { | 901 | if (t && !IS_ERR(t)) { |
919 | struct arpt_getinfo info; | 902 | struct arpt_getinfo info; |
920 | const struct xt_table_info *private = t->private; | 903 | const struct xt_table_info *private = t->private; |
921 | #ifdef CONFIG_COMPAT | 904 | #ifdef CONFIG_COMPAT |
922 | struct xt_table_info tmp; | 905 | struct xt_table_info tmp; |
923 | 906 | ||
924 | if (compat) { | 907 | if (compat) { |
925 | ret = compat_table_info(private, &tmp); | 908 | ret = compat_table_info(private, &tmp); |
926 | xt_compat_flush_offsets(NFPROTO_ARP); | 909 | xt_compat_flush_offsets(NFPROTO_ARP); |
927 | private = &tmp; | 910 | private = &tmp; |
928 | } | 911 | } |
929 | #endif | 912 | #endif |
930 | memset(&info, 0, sizeof(info)); | 913 | memset(&info, 0, sizeof(info)); |
931 | info.valid_hooks = t->valid_hooks; | 914 | info.valid_hooks = t->valid_hooks; |
932 | memcpy(info.hook_entry, private->hook_entry, | 915 | memcpy(info.hook_entry, private->hook_entry, |
933 | sizeof(info.hook_entry)); | 916 | sizeof(info.hook_entry)); |
934 | memcpy(info.underflow, private->underflow, | 917 | memcpy(info.underflow, private->underflow, |
935 | sizeof(info.underflow)); | 918 | sizeof(info.underflow)); |
936 | info.num_entries = private->number; | 919 | info.num_entries = private->number; |
937 | info.size = private->size; | 920 | info.size = private->size; |
938 | strcpy(info.name, name); | 921 | strcpy(info.name, name); |
939 | 922 | ||
940 | if (copy_to_user(user, &info, *len) != 0) | 923 | if (copy_to_user(user, &info, *len) != 0) |
941 | ret = -EFAULT; | 924 | ret = -EFAULT; |
942 | else | 925 | else |
943 | ret = 0; | 926 | ret = 0; |
944 | xt_table_unlock(t); | 927 | xt_table_unlock(t); |
945 | module_put(t->me); | 928 | module_put(t->me); |
946 | } else | 929 | } else |
947 | ret = t ? PTR_ERR(t) : -ENOENT; | 930 | ret = t ? PTR_ERR(t) : -ENOENT; |
948 | #ifdef CONFIG_COMPAT | 931 | #ifdef CONFIG_COMPAT |
949 | if (compat) | 932 | if (compat) |
950 | xt_compat_unlock(NFPROTO_ARP); | 933 | xt_compat_unlock(NFPROTO_ARP); |
951 | #endif | 934 | #endif |
952 | return ret; | 935 | return ret; |
953 | } | 936 | } |
954 | 937 | ||
955 | static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, | 938 | static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, |
956 | const int *len) | 939 | const int *len) |
957 | { | 940 | { |
958 | int ret; | 941 | int ret; |
959 | struct arpt_get_entries get; | 942 | struct arpt_get_entries get; |
960 | struct xt_table *t; | 943 | struct xt_table *t; |
961 | 944 | ||
962 | if (*len < sizeof(get)) { | 945 | if (*len < sizeof(get)) { |
963 | duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); | 946 | duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); |
964 | return -EINVAL; | 947 | return -EINVAL; |
965 | } | 948 | } |
966 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 949 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
967 | return -EFAULT; | 950 | return -EFAULT; |
968 | if (*len != sizeof(struct arpt_get_entries) + get.size) { | 951 | if (*len != sizeof(struct arpt_get_entries) + get.size) { |
969 | duprintf("get_entries: %u != %Zu\n", *len, | 952 | duprintf("get_entries: %u != %Zu\n", *len, |
970 | sizeof(struct arpt_get_entries) + get.size); | 953 | sizeof(struct arpt_get_entries) + get.size); |
971 | return -EINVAL; | 954 | return -EINVAL; |
972 | } | 955 | } |
973 | 956 | ||
974 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); | 957 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); |
975 | if (t && !IS_ERR(t)) { | 958 | if (t && !IS_ERR(t)) { |
976 | const struct xt_table_info *private = t->private; | 959 | const struct xt_table_info *private = t->private; |
977 | 960 | ||
978 | duprintf("t->private->number = %u\n", | 961 | duprintf("t->private->number = %u\n", |
979 | private->number); | 962 | private->number); |
980 | if (get.size == private->size) | 963 | if (get.size == private->size) |
981 | ret = copy_entries_to_user(private->size, | 964 | ret = copy_entries_to_user(private->size, |
982 | t, uptr->entrytable); | 965 | t, uptr->entrytable); |
983 | else { | 966 | else { |
984 | duprintf("get_entries: I've got %u not %u!\n", | 967 | duprintf("get_entries: I've got %u not %u!\n", |
985 | private->size, get.size); | 968 | private->size, get.size); |
986 | ret = -EAGAIN; | 969 | ret = -EAGAIN; |
987 | } | 970 | } |
988 | module_put(t->me); | 971 | module_put(t->me); |
989 | xt_table_unlock(t); | 972 | xt_table_unlock(t); |
990 | } else | 973 | } else |
991 | ret = t ? PTR_ERR(t) : -ENOENT; | 974 | ret = t ? PTR_ERR(t) : -ENOENT; |
992 | 975 | ||
993 | return ret; | 976 | return ret; |
994 | } | 977 | } |
995 | 978 | ||
996 | static int __do_replace(struct net *net, const char *name, | 979 | static int __do_replace(struct net *net, const char *name, |
997 | unsigned int valid_hooks, | 980 | unsigned int valid_hooks, |
998 | struct xt_table_info *newinfo, | 981 | struct xt_table_info *newinfo, |
999 | unsigned int num_counters, | 982 | unsigned int num_counters, |
1000 | void __user *counters_ptr) | 983 | void __user *counters_ptr) |
1001 | { | 984 | { |
1002 | int ret; | 985 | int ret; |
1003 | struct xt_table *t; | 986 | struct xt_table *t; |
1004 | struct xt_table_info *oldinfo; | 987 | struct xt_table_info *oldinfo; |
1005 | struct xt_counters *counters; | 988 | struct xt_counters *counters; |
1006 | void *loc_cpu_old_entry; | 989 | void *loc_cpu_old_entry; |
1007 | struct arpt_entry *iter; | 990 | struct arpt_entry *iter; |
1008 | 991 | ||
1009 | ret = 0; | 992 | ret = 0; |
1010 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 993 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1011 | if (!counters) { | 994 | if (!counters) { |
1012 | ret = -ENOMEM; | 995 | ret = -ENOMEM; |
1013 | goto out; | 996 | goto out; |
1014 | } | 997 | } |
1015 | 998 | ||
1016 | t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), | 999 | t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), |
1017 | "arptable_%s", name); | 1000 | "arptable_%s", name); |
1018 | if (!t || IS_ERR(t)) { | 1001 | if (!t || IS_ERR(t)) { |
1019 | ret = t ? PTR_ERR(t) : -ENOENT; | 1002 | ret = t ? PTR_ERR(t) : -ENOENT; |
1020 | goto free_newinfo_counters_untrans; | 1003 | goto free_newinfo_counters_untrans; |
1021 | } | 1004 | } |
1022 | 1005 | ||
1023 | /* You lied! */ | 1006 | /* You lied! */ |
1024 | if (valid_hooks != t->valid_hooks) { | 1007 | if (valid_hooks != t->valid_hooks) { |
1025 | duprintf("Valid hook crap: %08X vs %08X\n", | 1008 | duprintf("Valid hook crap: %08X vs %08X\n", |
1026 | valid_hooks, t->valid_hooks); | 1009 | valid_hooks, t->valid_hooks); |
1027 | ret = -EINVAL; | 1010 | ret = -EINVAL; |
1028 | goto put_module; | 1011 | goto put_module; |
1029 | } | 1012 | } |
1030 | 1013 | ||
1031 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); | 1014 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); |
1032 | if (!oldinfo) | 1015 | if (!oldinfo) |
1033 | goto put_module; | 1016 | goto put_module; |
1034 | 1017 | ||
1035 | /* Update module usage count based on number of rules */ | 1018 | /* Update module usage count based on number of rules */ |
1036 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", | 1019 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", |
1037 | oldinfo->number, oldinfo->initial_entries, newinfo->number); | 1020 | oldinfo->number, oldinfo->initial_entries, newinfo->number); |
1038 | if ((oldinfo->number > oldinfo->initial_entries) || | 1021 | if ((oldinfo->number > oldinfo->initial_entries) || |
1039 | (newinfo->number <= oldinfo->initial_entries)) | 1022 | (newinfo->number <= oldinfo->initial_entries)) |
1040 | module_put(t->me); | 1023 | module_put(t->me); |
1041 | if ((oldinfo->number > oldinfo->initial_entries) && | 1024 | if ((oldinfo->number > oldinfo->initial_entries) && |
1042 | (newinfo->number <= oldinfo->initial_entries)) | 1025 | (newinfo->number <= oldinfo->initial_entries)) |
1043 | module_put(t->me); | 1026 | module_put(t->me); |
1044 | 1027 | ||
1045 | /* Get the old counters, and synchronize with replace */ | 1028 | /* Get the old counters, and synchronize with replace */ |
1046 | get_counters(oldinfo, counters); | 1029 | get_counters(oldinfo, counters); |
1047 | 1030 | ||
1048 | /* Decrease module usage counts and free resource */ | 1031 | /* Decrease module usage counts and free resource */ |
1049 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1032 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1050 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) | 1033 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) |
1051 | cleanup_entry(iter); | 1034 | cleanup_entry(iter); |
1052 | 1035 | ||
1053 | xt_free_table_info(oldinfo); | 1036 | xt_free_table_info(oldinfo); |
1054 | if (copy_to_user(counters_ptr, counters, | 1037 | if (copy_to_user(counters_ptr, counters, |
1055 | sizeof(struct xt_counters) * num_counters) != 0) | 1038 | sizeof(struct xt_counters) * num_counters) != 0) |
1056 | ret = -EFAULT; | 1039 | ret = -EFAULT; |
1057 | vfree(counters); | 1040 | vfree(counters); |
1058 | xt_table_unlock(t); | 1041 | xt_table_unlock(t); |
1059 | return ret; | 1042 | return ret; |
1060 | 1043 | ||
1061 | put_module: | 1044 | put_module: |
1062 | module_put(t->me); | 1045 | module_put(t->me); |
1063 | xt_table_unlock(t); | 1046 | xt_table_unlock(t); |
1064 | free_newinfo_counters_untrans: | 1047 | free_newinfo_counters_untrans: |
1065 | vfree(counters); | 1048 | vfree(counters); |
1066 | out: | 1049 | out: |
1067 | return ret; | 1050 | return ret; |
1068 | } | 1051 | } |
1069 | 1052 | ||
1070 | static int do_replace(struct net *net, const void __user *user, | 1053 | static int do_replace(struct net *net, const void __user *user, |
1071 | unsigned int len) | 1054 | unsigned int len) |
1072 | { | 1055 | { |
1073 | int ret; | 1056 | int ret; |
1074 | struct arpt_replace tmp; | 1057 | struct arpt_replace tmp; |
1075 | struct xt_table_info *newinfo; | 1058 | struct xt_table_info *newinfo; |
1076 | void *loc_cpu_entry; | 1059 | void *loc_cpu_entry; |
1077 | struct arpt_entry *iter; | 1060 | struct arpt_entry *iter; |
1078 | 1061 | ||
1079 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1062 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1080 | return -EFAULT; | 1063 | return -EFAULT; |
1081 | 1064 | ||
1082 | /* overflow check */ | 1065 | /* overflow check */ |
1083 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1066 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1084 | return -ENOMEM; | 1067 | return -ENOMEM; |
1085 | 1068 | ||
1086 | newinfo = xt_alloc_table_info(tmp.size); | 1069 | newinfo = xt_alloc_table_info(tmp.size); |
1087 | if (!newinfo) | 1070 | if (!newinfo) |
1088 | return -ENOMEM; | 1071 | return -ENOMEM; |
1089 | 1072 | ||
1090 | /* choose the copy that is on our node/cpu */ | 1073 | /* choose the copy that is on our node/cpu */ |
1091 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1074 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1092 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1075 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1093 | tmp.size) != 0) { | 1076 | tmp.size) != 0) { |
1094 | ret = -EFAULT; | 1077 | ret = -EFAULT; |
1095 | goto free_newinfo; | 1078 | goto free_newinfo; |
1096 | } | 1079 | } |
1097 | 1080 | ||
1098 | ret = translate_table(newinfo, loc_cpu_entry, &tmp); | 1081 | ret = translate_table(newinfo, loc_cpu_entry, &tmp); |
1099 | if (ret != 0) | 1082 | if (ret != 0) |
1100 | goto free_newinfo; | 1083 | goto free_newinfo; |
1101 | 1084 | ||
1102 | duprintf("arp_tables: Translated table\n"); | 1085 | duprintf("arp_tables: Translated table\n"); |
1103 | 1086 | ||
1104 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1087 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1105 | tmp.num_counters, tmp.counters); | 1088 | tmp.num_counters, tmp.counters); |
1106 | if (ret) | 1089 | if (ret) |
1107 | goto free_newinfo_untrans; | 1090 | goto free_newinfo_untrans; |
1108 | return 0; | 1091 | return 0; |
1109 | 1092 | ||
1110 | free_newinfo_untrans: | 1093 | free_newinfo_untrans: |
1111 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1094 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1112 | cleanup_entry(iter); | 1095 | cleanup_entry(iter); |
1113 | free_newinfo: | 1096 | free_newinfo: |
1114 | xt_free_table_info(newinfo); | 1097 | xt_free_table_info(newinfo); |
1115 | return ret; | 1098 | return ret; |
1116 | } | 1099 | } |
1117 | 1100 | ||
1118 | static int do_add_counters(struct net *net, const void __user *user, | 1101 | static int do_add_counters(struct net *net, const void __user *user, |
1119 | unsigned int len, int compat) | 1102 | unsigned int len, int compat) |
1120 | { | 1103 | { |
1121 | unsigned int i, curcpu; | 1104 | unsigned int i, curcpu; |
1122 | struct xt_counters_info tmp; | 1105 | struct xt_counters_info tmp; |
1123 | struct xt_counters *paddc; | 1106 | struct xt_counters *paddc; |
1124 | unsigned int num_counters; | 1107 | unsigned int num_counters; |
1125 | const char *name; | 1108 | const char *name; |
1126 | int size; | 1109 | int size; |
1127 | void *ptmp; | 1110 | void *ptmp; |
1128 | struct xt_table *t; | 1111 | struct xt_table *t; |
1129 | const struct xt_table_info *private; | 1112 | const struct xt_table_info *private; |
1130 | int ret = 0; | 1113 | int ret = 0; |
1131 | void *loc_cpu_entry; | 1114 | void *loc_cpu_entry; |
1132 | struct arpt_entry *iter; | 1115 | struct arpt_entry *iter; |
1133 | #ifdef CONFIG_COMPAT | 1116 | #ifdef CONFIG_COMPAT |
1134 | struct compat_xt_counters_info compat_tmp; | 1117 | struct compat_xt_counters_info compat_tmp; |
1135 | 1118 | ||
1136 | if (compat) { | 1119 | if (compat) { |
1137 | ptmp = &compat_tmp; | 1120 | ptmp = &compat_tmp; |
1138 | size = sizeof(struct compat_xt_counters_info); | 1121 | size = sizeof(struct compat_xt_counters_info); |
1139 | } else | 1122 | } else |
1140 | #endif | 1123 | #endif |
1141 | { | 1124 | { |
1142 | ptmp = &tmp; | 1125 | ptmp = &tmp; |
1143 | size = sizeof(struct xt_counters_info); | 1126 | size = sizeof(struct xt_counters_info); |
1144 | } | 1127 | } |
1145 | 1128 | ||
1146 | if (copy_from_user(ptmp, user, size) != 0) | 1129 | if (copy_from_user(ptmp, user, size) != 0) |
1147 | return -EFAULT; | 1130 | return -EFAULT; |
1148 | 1131 | ||
1149 | #ifdef CONFIG_COMPAT | 1132 | #ifdef CONFIG_COMPAT |
1150 | if (compat) { | 1133 | if (compat) { |
1151 | num_counters = compat_tmp.num_counters; | 1134 | num_counters = compat_tmp.num_counters; |
1152 | name = compat_tmp.name; | 1135 | name = compat_tmp.name; |
1153 | } else | 1136 | } else |
1154 | #endif | 1137 | #endif |
1155 | { | 1138 | { |
1156 | num_counters = tmp.num_counters; | 1139 | num_counters = tmp.num_counters; |
1157 | name = tmp.name; | 1140 | name = tmp.name; |
1158 | } | 1141 | } |
1159 | 1142 | ||
1160 | if (len != size + num_counters * sizeof(struct xt_counters)) | 1143 | if (len != size + num_counters * sizeof(struct xt_counters)) |
1161 | return -EINVAL; | 1144 | return -EINVAL; |
1162 | 1145 | ||
1163 | paddc = vmalloc(len - size); | 1146 | paddc = vmalloc(len - size); |
1164 | if (!paddc) | 1147 | if (!paddc) |
1165 | return -ENOMEM; | 1148 | return -ENOMEM; |
1166 | 1149 | ||
1167 | if (copy_from_user(paddc, user + size, len - size) != 0) { | 1150 | if (copy_from_user(paddc, user + size, len - size) != 0) { |
1168 | ret = -EFAULT; | 1151 | ret = -EFAULT; |
1169 | goto free; | 1152 | goto free; |
1170 | } | 1153 | } |
1171 | 1154 | ||
1172 | t = xt_find_table_lock(net, NFPROTO_ARP, name); | 1155 | t = xt_find_table_lock(net, NFPROTO_ARP, name); |
1173 | if (!t || IS_ERR(t)) { | 1156 | if (!t || IS_ERR(t)) { |
1174 | ret = t ? PTR_ERR(t) : -ENOENT; | 1157 | ret = t ? PTR_ERR(t) : -ENOENT; |
1175 | goto free; | 1158 | goto free; |
1176 | } | 1159 | } |
1177 | 1160 | ||
1178 | local_bh_disable(); | 1161 | local_bh_disable(); |
1179 | private = t->private; | 1162 | private = t->private; |
1180 | if (private->number != num_counters) { | 1163 | if (private->number != num_counters) { |
1181 | ret = -EINVAL; | 1164 | ret = -EINVAL; |
1182 | goto unlock_up_free; | 1165 | goto unlock_up_free; |
1183 | } | 1166 | } |
1184 | 1167 | ||
1185 | i = 0; | 1168 | i = 0; |
1186 | /* Choose the copy that is on our node */ | 1169 | /* Choose the copy that is on our node */ |
1187 | curcpu = smp_processor_id(); | 1170 | curcpu = smp_processor_id(); |
1188 | loc_cpu_entry = private->entries[curcpu]; | 1171 | loc_cpu_entry = private->entries[curcpu]; |
1189 | xt_info_wrlock(curcpu); | 1172 | xt_info_wrlock(curcpu); |
1190 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { | 1173 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { |
1191 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); | 1174 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); |
1192 | ++i; | 1175 | ++i; |
1193 | } | 1176 | } |
1194 | xt_info_wrunlock(curcpu); | 1177 | xt_info_wrunlock(curcpu); |
1195 | unlock_up_free: | 1178 | unlock_up_free: |
1196 | local_bh_enable(); | 1179 | local_bh_enable(); |
1197 | xt_table_unlock(t); | 1180 | xt_table_unlock(t); |
1198 | module_put(t->me); | 1181 | module_put(t->me); |
1199 | free: | 1182 | free: |
1200 | vfree(paddc); | 1183 | vfree(paddc); |
1201 | 1184 | ||
1202 | return ret; | 1185 | return ret; |
1203 | } | 1186 | } |
1204 | 1187 | ||
1205 | #ifdef CONFIG_COMPAT | 1188 | #ifdef CONFIG_COMPAT |
1206 | static inline void compat_release_entry(struct compat_arpt_entry *e) | 1189 | static inline void compat_release_entry(struct compat_arpt_entry *e) |
1207 | { | 1190 | { |
1208 | struct xt_entry_target *t; | 1191 | struct xt_entry_target *t; |
1209 | 1192 | ||
1210 | t = compat_arpt_get_target(e); | 1193 | t = compat_arpt_get_target(e); |
1211 | module_put(t->u.kernel.target->me); | 1194 | module_put(t->u.kernel.target->me); |
1212 | } | 1195 | } |
1213 | 1196 | ||
1214 | static inline int | 1197 | static inline int |
1215 | check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, | 1198 | check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, |
1216 | struct xt_table_info *newinfo, | 1199 | struct xt_table_info *newinfo, |
1217 | unsigned int *size, | 1200 | unsigned int *size, |
1218 | const unsigned char *base, | 1201 | const unsigned char *base, |
1219 | const unsigned char *limit, | 1202 | const unsigned char *limit, |
1220 | const unsigned int *hook_entries, | 1203 | const unsigned int *hook_entries, |
1221 | const unsigned int *underflows, | 1204 | const unsigned int *underflows, |
1222 | const char *name) | 1205 | const char *name) |
1223 | { | 1206 | { |
1224 | struct xt_entry_target *t; | 1207 | struct xt_entry_target *t; |
1225 | struct xt_target *target; | 1208 | struct xt_target *target; |
1226 | unsigned int entry_offset; | 1209 | unsigned int entry_offset; |
1227 | int ret, off, h; | 1210 | int ret, off, h; |
1228 | 1211 | ||
1229 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1212 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1230 | if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || | 1213 | if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || |
1231 | (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { | 1214 | (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { |
1232 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1215 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1233 | return -EINVAL; | 1216 | return -EINVAL; |
1234 | } | 1217 | } |
1235 | 1218 | ||
1236 | if (e->next_offset < sizeof(struct compat_arpt_entry) + | 1219 | if (e->next_offset < sizeof(struct compat_arpt_entry) + |
1237 | sizeof(struct compat_xt_entry_target)) { | 1220 | sizeof(struct compat_xt_entry_target)) { |
1238 | duprintf("checking: element %p size %u\n", | 1221 | duprintf("checking: element %p size %u\n", |
1239 | e, e->next_offset); | 1222 | e, e->next_offset); |
1240 | return -EINVAL; | 1223 | return -EINVAL; |
1241 | } | 1224 | } |
1242 | 1225 | ||
1243 | /* For purposes of check_entry casting the compat entry is fine */ | 1226 | /* For purposes of check_entry casting the compat entry is fine */ |
1244 | ret = check_entry((struct arpt_entry *)e, name); | 1227 | ret = check_entry((struct arpt_entry *)e, name); |
1245 | if (ret) | 1228 | if (ret) |
1246 | return ret; | 1229 | return ret; |
1247 | 1230 | ||
1248 | off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); | 1231 | off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); |
1249 | entry_offset = (void *)e - (void *)base; | 1232 | entry_offset = (void *)e - (void *)base; |
1250 | 1233 | ||
1251 | t = compat_arpt_get_target(e); | 1234 | t = compat_arpt_get_target(e); |
1252 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, | 1235 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, |
1253 | t->u.user.revision); | 1236 | t->u.user.revision); |
1254 | if (IS_ERR(target)) { | 1237 | if (IS_ERR(target)) { |
1255 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1238 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1256 | t->u.user.name); | 1239 | t->u.user.name); |
1257 | ret = PTR_ERR(target); | 1240 | ret = PTR_ERR(target); |
1258 | goto out; | 1241 | goto out; |
1259 | } | 1242 | } |
1260 | t->u.kernel.target = target; | 1243 | t->u.kernel.target = target; |
1261 | 1244 | ||
1262 | off += xt_compat_target_offset(target); | 1245 | off += xt_compat_target_offset(target); |
1263 | *size += off; | 1246 | *size += off; |
1264 | ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); | 1247 | ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); |
1265 | if (ret) | 1248 | if (ret) |
1266 | goto release_target; | 1249 | goto release_target; |
1267 | 1250 | ||
1268 | /* Check hooks & underflows */ | 1251 | /* Check hooks & underflows */ |
1269 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 1252 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
1270 | if ((unsigned char *)e - base == hook_entries[h]) | 1253 | if ((unsigned char *)e - base == hook_entries[h]) |
1271 | newinfo->hook_entry[h] = hook_entries[h]; | 1254 | newinfo->hook_entry[h] = hook_entries[h]; |
1272 | if ((unsigned char *)e - base == underflows[h]) | 1255 | if ((unsigned char *)e - base == underflows[h]) |
1273 | newinfo->underflow[h] = underflows[h]; | 1256 | newinfo->underflow[h] = underflows[h]; |
1274 | } | 1257 | } |
1275 | 1258 | ||
1276 | /* Clear counters and comefrom */ | 1259 | /* Clear counters and comefrom */ |
1277 | memset(&e->counters, 0, sizeof(e->counters)); | 1260 | memset(&e->counters, 0, sizeof(e->counters)); |
1278 | e->comefrom = 0; | 1261 | e->comefrom = 0; |
1279 | return 0; | 1262 | return 0; |
1280 | 1263 | ||
1281 | release_target: | 1264 | release_target: |
1282 | module_put(t->u.kernel.target->me); | 1265 | module_put(t->u.kernel.target->me); |
1283 | out: | 1266 | out: |
1284 | return ret; | 1267 | return ret; |
1285 | } | 1268 | } |
1286 | 1269 | ||
1287 | static int | 1270 | static int |
1288 | compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, | 1271 | compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, |
1289 | unsigned int *size, const char *name, | 1272 | unsigned int *size, const char *name, |
1290 | struct xt_table_info *newinfo, unsigned char *base) | 1273 | struct xt_table_info *newinfo, unsigned char *base) |
1291 | { | 1274 | { |
1292 | struct xt_entry_target *t; | 1275 | struct xt_entry_target *t; |
1293 | struct xt_target *target; | 1276 | struct xt_target *target; |
1294 | struct arpt_entry *de; | 1277 | struct arpt_entry *de; |
1295 | unsigned int origsize; | 1278 | unsigned int origsize; |
1296 | int ret, h; | 1279 | int ret, h; |
1297 | 1280 | ||
1298 | ret = 0; | 1281 | ret = 0; |
1299 | origsize = *size; | 1282 | origsize = *size; |
1300 | de = (struct arpt_entry *)*dstptr; | 1283 | de = (struct arpt_entry *)*dstptr; |
1301 | memcpy(de, e, sizeof(struct arpt_entry)); | 1284 | memcpy(de, e, sizeof(struct arpt_entry)); |
1302 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); | 1285 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); |
1303 | 1286 | ||
1304 | *dstptr += sizeof(struct arpt_entry); | 1287 | *dstptr += sizeof(struct arpt_entry); |
1305 | *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); | 1288 | *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); |
1306 | 1289 | ||
1307 | de->target_offset = e->target_offset - (origsize - *size); | 1290 | de->target_offset = e->target_offset - (origsize - *size); |
1308 | t = compat_arpt_get_target(e); | 1291 | t = compat_arpt_get_target(e); |
1309 | target = t->u.kernel.target; | 1292 | target = t->u.kernel.target; |
1310 | xt_compat_target_from_user(t, dstptr, size); | 1293 | xt_compat_target_from_user(t, dstptr, size); |
1311 | 1294 | ||
1312 | de->next_offset = e->next_offset - (origsize - *size); | 1295 | de->next_offset = e->next_offset - (origsize - *size); |
1313 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 1296 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
1314 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) | 1297 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) |
1315 | newinfo->hook_entry[h] -= origsize - *size; | 1298 | newinfo->hook_entry[h] -= origsize - *size; |
1316 | if ((unsigned char *)de - base < newinfo->underflow[h]) | 1299 | if ((unsigned char *)de - base < newinfo->underflow[h]) |
1317 | newinfo->underflow[h] -= origsize - *size; | 1300 | newinfo->underflow[h] -= origsize - *size; |
1318 | } | 1301 | } |
1319 | return ret; | 1302 | return ret; |
1320 | } | 1303 | } |
1321 | 1304 | ||
1322 | static int translate_compat_table(const char *name, | 1305 | static int translate_compat_table(const char *name, |
1323 | unsigned int valid_hooks, | 1306 | unsigned int valid_hooks, |
1324 | struct xt_table_info **pinfo, | 1307 | struct xt_table_info **pinfo, |
1325 | void **pentry0, | 1308 | void **pentry0, |
1326 | unsigned int total_size, | 1309 | unsigned int total_size, |
1327 | unsigned int number, | 1310 | unsigned int number, |
1328 | unsigned int *hook_entries, | 1311 | unsigned int *hook_entries, |
1329 | unsigned int *underflows) | 1312 | unsigned int *underflows) |
1330 | { | 1313 | { |
1331 | unsigned int i, j; | 1314 | unsigned int i, j; |
1332 | struct xt_table_info *newinfo, *info; | 1315 | struct xt_table_info *newinfo, *info; |
1333 | void *pos, *entry0, *entry1; | 1316 | void *pos, *entry0, *entry1; |
1334 | struct compat_arpt_entry *iter0; | 1317 | struct compat_arpt_entry *iter0; |
1335 | struct arpt_entry *iter1; | 1318 | struct arpt_entry *iter1; |
1336 | unsigned int size; | 1319 | unsigned int size; |
1337 | int ret = 0; | 1320 | int ret = 0; |
1338 | 1321 | ||
1339 | info = *pinfo; | 1322 | info = *pinfo; |
1340 | entry0 = *pentry0; | 1323 | entry0 = *pentry0; |
1341 | size = total_size; | 1324 | size = total_size; |
1342 | info->number = number; | 1325 | info->number = number; |
1343 | 1326 | ||
1344 | /* Init all hooks to impossible value. */ | 1327 | /* Init all hooks to impossible value. */ |
1345 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 1328 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
1346 | info->hook_entry[i] = 0xFFFFFFFF; | 1329 | info->hook_entry[i] = 0xFFFFFFFF; |
1347 | info->underflow[i] = 0xFFFFFFFF; | 1330 | info->underflow[i] = 0xFFFFFFFF; |
1348 | } | 1331 | } |
1349 | 1332 | ||
1350 | duprintf("translate_compat_table: size %u\n", info->size); | 1333 | duprintf("translate_compat_table: size %u\n", info->size); |
1351 | j = 0; | 1334 | j = 0; |
1352 | xt_compat_lock(NFPROTO_ARP); | 1335 | xt_compat_lock(NFPROTO_ARP); |
1353 | /* Walk through entries, checking offsets. */ | 1336 | /* Walk through entries, checking offsets. */ |
1354 | xt_entry_foreach(iter0, entry0, total_size) { | 1337 | xt_entry_foreach(iter0, entry0, total_size) { |
1355 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, | 1338 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, |
1356 | entry0, | 1339 | entry0, |
1357 | entry0 + total_size, | 1340 | entry0 + total_size, |
1358 | hook_entries, | 1341 | hook_entries, |
1359 | underflows, | 1342 | underflows, |
1360 | name); | 1343 | name); |
1361 | if (ret != 0) | 1344 | if (ret != 0) |
1362 | goto out_unlock; | 1345 | goto out_unlock; |
1363 | ++j; | 1346 | ++j; |
1364 | } | 1347 | } |
1365 | 1348 | ||
1366 | ret = -EINVAL; | 1349 | ret = -EINVAL; |
1367 | if (j != number) { | 1350 | if (j != number) { |
1368 | duprintf("translate_compat_table: %u not %u entries\n", | 1351 | duprintf("translate_compat_table: %u not %u entries\n", |
1369 | j, number); | 1352 | j, number); |
1370 | goto out_unlock; | 1353 | goto out_unlock; |
1371 | } | 1354 | } |
1372 | 1355 | ||
1373 | /* Check hooks all assigned */ | 1356 | /* Check hooks all assigned */ |
1374 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 1357 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
1375 | /* Only hooks which are valid */ | 1358 | /* Only hooks which are valid */ |
1376 | if (!(valid_hooks & (1 << i))) | 1359 | if (!(valid_hooks & (1 << i))) |
1377 | continue; | 1360 | continue; |
1378 | if (info->hook_entry[i] == 0xFFFFFFFF) { | 1361 | if (info->hook_entry[i] == 0xFFFFFFFF) { |
1379 | duprintf("Invalid hook entry %u %u\n", | 1362 | duprintf("Invalid hook entry %u %u\n", |
1380 | i, hook_entries[i]); | 1363 | i, hook_entries[i]); |
1381 | goto out_unlock; | 1364 | goto out_unlock; |
1382 | } | 1365 | } |
1383 | if (info->underflow[i] == 0xFFFFFFFF) { | 1366 | if (info->underflow[i] == 0xFFFFFFFF) { |
1384 | duprintf("Invalid underflow %u %u\n", | 1367 | duprintf("Invalid underflow %u %u\n", |
1385 | i, underflows[i]); | 1368 | i, underflows[i]); |
1386 | goto out_unlock; | 1369 | goto out_unlock; |
1387 | } | 1370 | } |
1388 | } | 1371 | } |
1389 | 1372 | ||
1390 | ret = -ENOMEM; | 1373 | ret = -ENOMEM; |
1391 | newinfo = xt_alloc_table_info(size); | 1374 | newinfo = xt_alloc_table_info(size); |
1392 | if (!newinfo) | 1375 | if (!newinfo) |
1393 | goto out_unlock; | 1376 | goto out_unlock; |
1394 | 1377 | ||
1395 | newinfo->number = number; | 1378 | newinfo->number = number; |
1396 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 1379 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
1397 | newinfo->hook_entry[i] = info->hook_entry[i]; | 1380 | newinfo->hook_entry[i] = info->hook_entry[i]; |
1398 | newinfo->underflow[i] = info->underflow[i]; | 1381 | newinfo->underflow[i] = info->underflow[i]; |
1399 | } | 1382 | } |
1400 | entry1 = newinfo->entries[raw_smp_processor_id()]; | 1383 | entry1 = newinfo->entries[raw_smp_processor_id()]; |
1401 | pos = entry1; | 1384 | pos = entry1; |
1402 | size = total_size; | 1385 | size = total_size; |
1403 | xt_entry_foreach(iter0, entry0, total_size) { | 1386 | xt_entry_foreach(iter0, entry0, total_size) { |
1404 | ret = compat_copy_entry_from_user(iter0, &pos, &size, | 1387 | ret = compat_copy_entry_from_user(iter0, &pos, &size, |
1405 | name, newinfo, entry1); | 1388 | name, newinfo, entry1); |
1406 | if (ret != 0) | 1389 | if (ret != 0) |
1407 | break; | 1390 | break; |
1408 | } | 1391 | } |
1409 | xt_compat_flush_offsets(NFPROTO_ARP); | 1392 | xt_compat_flush_offsets(NFPROTO_ARP); |
1410 | xt_compat_unlock(NFPROTO_ARP); | 1393 | xt_compat_unlock(NFPROTO_ARP); |
1411 | if (ret) | 1394 | if (ret) |
1412 | goto free_newinfo; | 1395 | goto free_newinfo; |
1413 | 1396 | ||
1414 | ret = -ELOOP; | 1397 | ret = -ELOOP; |
1415 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) | 1398 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) |
1416 | goto free_newinfo; | 1399 | goto free_newinfo; |
1417 | 1400 | ||
1418 | i = 0; | 1401 | i = 0; |
1419 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1402 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1420 | ret = check_target(iter1, name); | 1403 | ret = check_target(iter1, name); |
1421 | if (ret != 0) | 1404 | if (ret != 0) |
1422 | break; | 1405 | break; |
1423 | ++i; | 1406 | ++i; |
1424 | if (strcmp(arpt_get_target(iter1)->u.user.name, | 1407 | if (strcmp(arpt_get_target(iter1)->u.user.name, |
1425 | XT_ERROR_TARGET) == 0) | 1408 | XT_ERROR_TARGET) == 0) |
1426 | ++newinfo->stacksize; | 1409 | ++newinfo->stacksize; |
1427 | } | 1410 | } |
1428 | if (ret) { | 1411 | if (ret) { |
1429 | /* | 1412 | /* |
1430 | * The first i matches need cleanup_entry (calls ->destroy) | 1413 | * The first i matches need cleanup_entry (calls ->destroy) |
1431 | * because they had called ->check already. The other j-i | 1414 | * because they had called ->check already. The other j-i |
1432 | * entries need only release. | 1415 | * entries need only release. |
1433 | */ | 1416 | */ |
1434 | int skip = i; | 1417 | int skip = i; |
1435 | j -= i; | 1418 | j -= i; |
1436 | xt_entry_foreach(iter0, entry0, newinfo->size) { | 1419 | xt_entry_foreach(iter0, entry0, newinfo->size) { |
1437 | if (skip-- > 0) | 1420 | if (skip-- > 0) |
1438 | continue; | 1421 | continue; |
1439 | if (j-- == 0) | 1422 | if (j-- == 0) |
1440 | break; | 1423 | break; |
1441 | compat_release_entry(iter0); | 1424 | compat_release_entry(iter0); |
1442 | } | 1425 | } |
1443 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1426 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1444 | if (i-- == 0) | 1427 | if (i-- == 0) |
1445 | break; | 1428 | break; |
1446 | cleanup_entry(iter1); | 1429 | cleanup_entry(iter1); |
1447 | } | 1430 | } |
1448 | xt_free_table_info(newinfo); | 1431 | xt_free_table_info(newinfo); |
1449 | return ret; | 1432 | return ret; |
1450 | } | 1433 | } |
1451 | 1434 | ||
1452 | /* And one copy for every other CPU */ | 1435 | /* And one copy for every other CPU */ |
1453 | for_each_possible_cpu(i) | 1436 | for_each_possible_cpu(i) |
1454 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) | 1437 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) |
1455 | memcpy(newinfo->entries[i], entry1, newinfo->size); | 1438 | memcpy(newinfo->entries[i], entry1, newinfo->size); |
1456 | 1439 | ||
1457 | *pinfo = newinfo; | 1440 | *pinfo = newinfo; |
1458 | *pentry0 = entry1; | 1441 | *pentry0 = entry1; |
1459 | xt_free_table_info(info); | 1442 | xt_free_table_info(info); |
1460 | return 0; | 1443 | return 0; |
1461 | 1444 | ||
1462 | free_newinfo: | 1445 | free_newinfo: |
1463 | xt_free_table_info(newinfo); | 1446 | xt_free_table_info(newinfo); |
1464 | out: | 1447 | out: |
1465 | xt_entry_foreach(iter0, entry0, total_size) { | 1448 | xt_entry_foreach(iter0, entry0, total_size) { |
1466 | if (j-- == 0) | 1449 | if (j-- == 0) |
1467 | break; | 1450 | break; |
1468 | compat_release_entry(iter0); | 1451 | compat_release_entry(iter0); |
1469 | } | 1452 | } |
1470 | return ret; | 1453 | return ret; |
1471 | out_unlock: | 1454 | out_unlock: |
1472 | xt_compat_flush_offsets(NFPROTO_ARP); | 1455 | xt_compat_flush_offsets(NFPROTO_ARP); |
1473 | xt_compat_unlock(NFPROTO_ARP); | 1456 | xt_compat_unlock(NFPROTO_ARP); |
1474 | goto out; | 1457 | goto out; |
1475 | } | 1458 | } |
1476 | 1459 | ||
1477 | struct compat_arpt_replace { | 1460 | struct compat_arpt_replace { |
1478 | char name[XT_TABLE_MAXNAMELEN]; | 1461 | char name[XT_TABLE_MAXNAMELEN]; |
1479 | u32 valid_hooks; | 1462 | u32 valid_hooks; |
1480 | u32 num_entries; | 1463 | u32 num_entries; |
1481 | u32 size; | 1464 | u32 size; |
1482 | u32 hook_entry[NF_ARP_NUMHOOKS]; | 1465 | u32 hook_entry[NF_ARP_NUMHOOKS]; |
1483 | u32 underflow[NF_ARP_NUMHOOKS]; | 1466 | u32 underflow[NF_ARP_NUMHOOKS]; |
1484 | u32 num_counters; | 1467 | u32 num_counters; |
1485 | compat_uptr_t counters; | 1468 | compat_uptr_t counters; |
1486 | struct compat_arpt_entry entries[0]; | 1469 | struct compat_arpt_entry entries[0]; |
1487 | }; | 1470 | }; |
1488 | 1471 | ||
1489 | static int compat_do_replace(struct net *net, void __user *user, | 1472 | static int compat_do_replace(struct net *net, void __user *user, |
1490 | unsigned int len) | 1473 | unsigned int len) |
1491 | { | 1474 | { |
1492 | int ret; | 1475 | int ret; |
1493 | struct compat_arpt_replace tmp; | 1476 | struct compat_arpt_replace tmp; |
1494 | struct xt_table_info *newinfo; | 1477 | struct xt_table_info *newinfo; |
1495 | void *loc_cpu_entry; | 1478 | void *loc_cpu_entry; |
1496 | struct arpt_entry *iter; | 1479 | struct arpt_entry *iter; |
1497 | 1480 | ||
1498 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1481 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1499 | return -EFAULT; | 1482 | return -EFAULT; |
1500 | 1483 | ||
1501 | /* overflow check */ | 1484 | /* overflow check */ |
1502 | if (tmp.size >= INT_MAX / num_possible_cpus()) | 1485 | if (tmp.size >= INT_MAX / num_possible_cpus()) |
1503 | return -ENOMEM; | 1486 | return -ENOMEM; |
1504 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1487 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1505 | return -ENOMEM; | 1488 | return -ENOMEM; |
1506 | 1489 | ||
1507 | newinfo = xt_alloc_table_info(tmp.size); | 1490 | newinfo = xt_alloc_table_info(tmp.size); |
1508 | if (!newinfo) | 1491 | if (!newinfo) |
1509 | return -ENOMEM; | 1492 | return -ENOMEM; |
1510 | 1493 | ||
1511 | /* choose the copy that is on our node/cpu */ | 1494 | /* choose the copy that is on our node/cpu */ |
1512 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1495 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1513 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { | 1496 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { |
1514 | ret = -EFAULT; | 1497 | ret = -EFAULT; |
1515 | goto free_newinfo; | 1498 | goto free_newinfo; |
1516 | } | 1499 | } |
1517 | 1500 | ||
1518 | ret = translate_compat_table(tmp.name, tmp.valid_hooks, | 1501 | ret = translate_compat_table(tmp.name, tmp.valid_hooks, |
1519 | &newinfo, &loc_cpu_entry, tmp.size, | 1502 | &newinfo, &loc_cpu_entry, tmp.size, |
1520 | tmp.num_entries, tmp.hook_entry, | 1503 | tmp.num_entries, tmp.hook_entry, |
1521 | tmp.underflow); | 1504 | tmp.underflow); |
1522 | if (ret != 0) | 1505 | if (ret != 0) |
1523 | goto free_newinfo; | 1506 | goto free_newinfo; |
1524 | 1507 | ||
1525 | duprintf("compat_do_replace: Translated table\n"); | 1508 | duprintf("compat_do_replace: Translated table\n"); |
1526 | 1509 | ||
1527 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1510 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1528 | tmp.num_counters, compat_ptr(tmp.counters)); | 1511 | tmp.num_counters, compat_ptr(tmp.counters)); |
1529 | if (ret) | 1512 | if (ret) |
1530 | goto free_newinfo_untrans; | 1513 | goto free_newinfo_untrans; |
1531 | return 0; | 1514 | return 0; |
1532 | 1515 | ||
1533 | free_newinfo_untrans: | 1516 | free_newinfo_untrans: |
1534 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1517 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1535 | cleanup_entry(iter); | 1518 | cleanup_entry(iter); |
1536 | free_newinfo: | 1519 | free_newinfo: |
1537 | xt_free_table_info(newinfo); | 1520 | xt_free_table_info(newinfo); |
1538 | return ret; | 1521 | return ret; |
1539 | } | 1522 | } |
1540 | 1523 | ||
1541 | static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, | 1524 | static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, |
1542 | unsigned int len) | 1525 | unsigned int len) |
1543 | { | 1526 | { |
1544 | int ret; | 1527 | int ret; |
1545 | 1528 | ||
1546 | if (!capable(CAP_NET_ADMIN)) | 1529 | if (!capable(CAP_NET_ADMIN)) |
1547 | return -EPERM; | 1530 | return -EPERM; |
1548 | 1531 | ||
1549 | switch (cmd) { | 1532 | switch (cmd) { |
1550 | case ARPT_SO_SET_REPLACE: | 1533 | case ARPT_SO_SET_REPLACE: |
1551 | ret = compat_do_replace(sock_net(sk), user, len); | 1534 | ret = compat_do_replace(sock_net(sk), user, len); |
1552 | break; | 1535 | break; |
1553 | 1536 | ||
1554 | case ARPT_SO_SET_ADD_COUNTERS: | 1537 | case ARPT_SO_SET_ADD_COUNTERS: |
1555 | ret = do_add_counters(sock_net(sk), user, len, 1); | 1538 | ret = do_add_counters(sock_net(sk), user, len, 1); |
1556 | break; | 1539 | break; |
1557 | 1540 | ||
1558 | default: | 1541 | default: |
1559 | duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); | 1542 | duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); |
1560 | ret = -EINVAL; | 1543 | ret = -EINVAL; |
1561 | } | 1544 | } |
1562 | 1545 | ||
1563 | return ret; | 1546 | return ret; |
1564 | } | 1547 | } |
1565 | 1548 | ||
1566 | static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, | 1549 | static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, |
1567 | compat_uint_t *size, | 1550 | compat_uint_t *size, |
1568 | struct xt_counters *counters, | 1551 | struct xt_counters *counters, |
1569 | unsigned int i) | 1552 | unsigned int i) |
1570 | { | 1553 | { |
1571 | struct xt_entry_target *t; | 1554 | struct xt_entry_target *t; |
1572 | struct compat_arpt_entry __user *ce; | 1555 | struct compat_arpt_entry __user *ce; |
1573 | u_int16_t target_offset, next_offset; | 1556 | u_int16_t target_offset, next_offset; |
1574 | compat_uint_t origsize; | 1557 | compat_uint_t origsize; |
1575 | int ret; | 1558 | int ret; |
1576 | 1559 | ||
1577 | origsize = *size; | 1560 | origsize = *size; |
1578 | ce = (struct compat_arpt_entry __user *)*dstptr; | 1561 | ce = (struct compat_arpt_entry __user *)*dstptr; |
1579 | if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || | 1562 | if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || |
1580 | copy_to_user(&ce->counters, &counters[i], | 1563 | copy_to_user(&ce->counters, &counters[i], |
1581 | sizeof(counters[i])) != 0) | 1564 | sizeof(counters[i])) != 0) |
1582 | return -EFAULT; | 1565 | return -EFAULT; |
1583 | 1566 | ||
1584 | *dstptr += sizeof(struct compat_arpt_entry); | 1567 | *dstptr += sizeof(struct compat_arpt_entry); |
1585 | *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); | 1568 | *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); |
1586 | 1569 | ||
1587 | target_offset = e->target_offset - (origsize - *size); | 1570 | target_offset = e->target_offset - (origsize - *size); |
1588 | 1571 | ||
1589 | t = arpt_get_target(e); | 1572 | t = arpt_get_target(e); |
1590 | ret = xt_compat_target_to_user(t, dstptr, size); | 1573 | ret = xt_compat_target_to_user(t, dstptr, size); |
1591 | if (ret) | 1574 | if (ret) |
1592 | return ret; | 1575 | return ret; |
1593 | next_offset = e->next_offset - (origsize - *size); | 1576 | next_offset = e->next_offset - (origsize - *size); |
1594 | if (put_user(target_offset, &ce->target_offset) != 0 || | 1577 | if (put_user(target_offset, &ce->target_offset) != 0 || |
1595 | put_user(next_offset, &ce->next_offset) != 0) | 1578 | put_user(next_offset, &ce->next_offset) != 0) |
1596 | return -EFAULT; | 1579 | return -EFAULT; |
1597 | return 0; | 1580 | return 0; |
1598 | } | 1581 | } |
1599 | 1582 | ||
1600 | static int compat_copy_entries_to_user(unsigned int total_size, | 1583 | static int compat_copy_entries_to_user(unsigned int total_size, |
1601 | struct xt_table *table, | 1584 | struct xt_table *table, |
1602 | void __user *userptr) | 1585 | void __user *userptr) |
1603 | { | 1586 | { |
1604 | struct xt_counters *counters; | 1587 | struct xt_counters *counters; |
1605 | const struct xt_table_info *private = table->private; | 1588 | const struct xt_table_info *private = table->private; |
1606 | void __user *pos; | 1589 | void __user *pos; |
1607 | unsigned int size; | 1590 | unsigned int size; |
1608 | int ret = 0; | 1591 | int ret = 0; |
1609 | void *loc_cpu_entry; | 1592 | void *loc_cpu_entry; |
1610 | unsigned int i = 0; | 1593 | unsigned int i = 0; |
1611 | struct arpt_entry *iter; | 1594 | struct arpt_entry *iter; |
1612 | 1595 | ||
1613 | counters = alloc_counters(table); | 1596 | counters = alloc_counters(table); |
1614 | if (IS_ERR(counters)) | 1597 | if (IS_ERR(counters)) |
1615 | return PTR_ERR(counters); | 1598 | return PTR_ERR(counters); |
1616 | 1599 | ||
1617 | /* choose the copy on our node/cpu */ | 1600 | /* choose the copy on our node/cpu */ |
1618 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1601 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
1619 | pos = userptr; | 1602 | pos = userptr; |
1620 | size = total_size; | 1603 | size = total_size; |
1621 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { | 1604 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { |
1622 | ret = compat_copy_entry_to_user(iter, &pos, | 1605 | ret = compat_copy_entry_to_user(iter, &pos, |
1623 | &size, counters, i++); | 1606 | &size, counters, i++); |
1624 | if (ret != 0) | 1607 | if (ret != 0) |
1625 | break; | 1608 | break; |
1626 | } | 1609 | } |
1627 | vfree(counters); | 1610 | vfree(counters); |
1628 | return ret; | 1611 | return ret; |
1629 | } | 1612 | } |
1630 | 1613 | ||
1631 | struct compat_arpt_get_entries { | 1614 | struct compat_arpt_get_entries { |
1632 | char name[XT_TABLE_MAXNAMELEN]; | 1615 | char name[XT_TABLE_MAXNAMELEN]; |
1633 | compat_uint_t size; | 1616 | compat_uint_t size; |
1634 | struct compat_arpt_entry entrytable[0]; | 1617 | struct compat_arpt_entry entrytable[0]; |
1635 | }; | 1618 | }; |
1636 | 1619 | ||
1637 | static int compat_get_entries(struct net *net, | 1620 | static int compat_get_entries(struct net *net, |
1638 | struct compat_arpt_get_entries __user *uptr, | 1621 | struct compat_arpt_get_entries __user *uptr, |
1639 | int *len) | 1622 | int *len) |
1640 | { | 1623 | { |
1641 | int ret; | 1624 | int ret; |
1642 | struct compat_arpt_get_entries get; | 1625 | struct compat_arpt_get_entries get; |
1643 | struct xt_table *t; | 1626 | struct xt_table *t; |
1644 | 1627 | ||
1645 | if (*len < sizeof(get)) { | 1628 | if (*len < sizeof(get)) { |
1646 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); | 1629 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); |
1647 | return -EINVAL; | 1630 | return -EINVAL; |
1648 | } | 1631 | } |
1649 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1632 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1650 | return -EFAULT; | 1633 | return -EFAULT; |
1651 | if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { | 1634 | if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { |
1652 | duprintf("compat_get_entries: %u != %zu\n", | 1635 | duprintf("compat_get_entries: %u != %zu\n", |
1653 | *len, sizeof(get) + get.size); | 1636 | *len, sizeof(get) + get.size); |
1654 | return -EINVAL; | 1637 | return -EINVAL; |
1655 | } | 1638 | } |
1656 | 1639 | ||
1657 | xt_compat_lock(NFPROTO_ARP); | 1640 | xt_compat_lock(NFPROTO_ARP); |
1658 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); | 1641 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); |
1659 | if (t && !IS_ERR(t)) { | 1642 | if (t && !IS_ERR(t)) { |
1660 | const struct xt_table_info *private = t->private; | 1643 | const struct xt_table_info *private = t->private; |
1661 | struct xt_table_info info; | 1644 | struct xt_table_info info; |
1662 | 1645 | ||
1663 | duprintf("t->private->number = %u\n", private->number); | 1646 | duprintf("t->private->number = %u\n", private->number); |
1664 | ret = compat_table_info(private, &info); | 1647 | ret = compat_table_info(private, &info); |
1665 | if (!ret && get.size == info.size) { | 1648 | if (!ret && get.size == info.size) { |
1666 | ret = compat_copy_entries_to_user(private->size, | 1649 | ret = compat_copy_entries_to_user(private->size, |
1667 | t, uptr->entrytable); | 1650 | t, uptr->entrytable); |
1668 | } else if (!ret) { | 1651 | } else if (!ret) { |
1669 | duprintf("compat_get_entries: I've got %u not %u!\n", | 1652 | duprintf("compat_get_entries: I've got %u not %u!\n", |
1670 | private->size, get.size); | 1653 | private->size, get.size); |
1671 | ret = -EAGAIN; | 1654 | ret = -EAGAIN; |
1672 | } | 1655 | } |
1673 | xt_compat_flush_offsets(NFPROTO_ARP); | 1656 | xt_compat_flush_offsets(NFPROTO_ARP); |
1674 | module_put(t->me); | 1657 | module_put(t->me); |
1675 | xt_table_unlock(t); | 1658 | xt_table_unlock(t); |
1676 | } else | 1659 | } else |
1677 | ret = t ? PTR_ERR(t) : -ENOENT; | 1660 | ret = t ? PTR_ERR(t) : -ENOENT; |
1678 | 1661 | ||
1679 | xt_compat_unlock(NFPROTO_ARP); | 1662 | xt_compat_unlock(NFPROTO_ARP); |
1680 | return ret; | 1663 | return ret; |
1681 | } | 1664 | } |
1682 | 1665 | ||
1683 | static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); | 1666 | static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); |
1684 | 1667 | ||
1685 | static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, | 1668 | static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, |
1686 | int *len) | 1669 | int *len) |
1687 | { | 1670 | { |
1688 | int ret; | 1671 | int ret; |
1689 | 1672 | ||
1690 | if (!capable(CAP_NET_ADMIN)) | 1673 | if (!capable(CAP_NET_ADMIN)) |
1691 | return -EPERM; | 1674 | return -EPERM; |
1692 | 1675 | ||
1693 | switch (cmd) { | 1676 | switch (cmd) { |
1694 | case ARPT_SO_GET_INFO: | 1677 | case ARPT_SO_GET_INFO: |
1695 | ret = get_info(sock_net(sk), user, len, 1); | 1678 | ret = get_info(sock_net(sk), user, len, 1); |
1696 | break; | 1679 | break; |
1697 | case ARPT_SO_GET_ENTRIES: | 1680 | case ARPT_SO_GET_ENTRIES: |
1698 | ret = compat_get_entries(sock_net(sk), user, len); | 1681 | ret = compat_get_entries(sock_net(sk), user, len); |
1699 | break; | 1682 | break; |
1700 | default: | 1683 | default: |
1701 | ret = do_arpt_get_ctl(sk, cmd, user, len); | 1684 | ret = do_arpt_get_ctl(sk, cmd, user, len); |
1702 | } | 1685 | } |
1703 | return ret; | 1686 | return ret; |
1704 | } | 1687 | } |
1705 | #endif | 1688 | #endif |
1706 | 1689 | ||
1707 | static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | 1690 | static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) |
1708 | { | 1691 | { |
1709 | int ret; | 1692 | int ret; |
1710 | 1693 | ||
1711 | if (!capable(CAP_NET_ADMIN)) | 1694 | if (!capable(CAP_NET_ADMIN)) |
1712 | return -EPERM; | 1695 | return -EPERM; |
1713 | 1696 | ||
1714 | switch (cmd) { | 1697 | switch (cmd) { |
1715 | case ARPT_SO_SET_REPLACE: | 1698 | case ARPT_SO_SET_REPLACE: |
1716 | ret = do_replace(sock_net(sk), user, len); | 1699 | ret = do_replace(sock_net(sk), user, len); |
1717 | break; | 1700 | break; |
1718 | 1701 | ||
1719 | case ARPT_SO_SET_ADD_COUNTERS: | 1702 | case ARPT_SO_SET_ADD_COUNTERS: |
1720 | ret = do_add_counters(sock_net(sk), user, len, 0); | 1703 | ret = do_add_counters(sock_net(sk), user, len, 0); |
1721 | break; | 1704 | break; |
1722 | 1705 | ||
1723 | default: | 1706 | default: |
1724 | duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); | 1707 | duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); |
1725 | ret = -EINVAL; | 1708 | ret = -EINVAL; |
1726 | } | 1709 | } |
1727 | 1710 | ||
1728 | return ret; | 1711 | return ret; |
1729 | } | 1712 | } |
1730 | 1713 | ||
1731 | static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 1714 | static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
1732 | { | 1715 | { |
1733 | int ret; | 1716 | int ret; |
1734 | 1717 | ||
1735 | if (!capable(CAP_NET_ADMIN)) | 1718 | if (!capable(CAP_NET_ADMIN)) |
1736 | return -EPERM; | 1719 | return -EPERM; |
1737 | 1720 | ||
1738 | switch (cmd) { | 1721 | switch (cmd) { |
1739 | case ARPT_SO_GET_INFO: | 1722 | case ARPT_SO_GET_INFO: |
1740 | ret = get_info(sock_net(sk), user, len, 0); | 1723 | ret = get_info(sock_net(sk), user, len, 0); |
1741 | break; | 1724 | break; |
1742 | 1725 | ||
1743 | case ARPT_SO_GET_ENTRIES: | 1726 | case ARPT_SO_GET_ENTRIES: |
1744 | ret = get_entries(sock_net(sk), user, len); | 1727 | ret = get_entries(sock_net(sk), user, len); |
1745 | break; | 1728 | break; |
1746 | 1729 | ||
1747 | case ARPT_SO_GET_REVISION_TARGET: { | 1730 | case ARPT_SO_GET_REVISION_TARGET: { |
1748 | struct xt_get_revision rev; | 1731 | struct xt_get_revision rev; |
1749 | 1732 | ||
1750 | if (*len != sizeof(rev)) { | 1733 | if (*len != sizeof(rev)) { |
1751 | ret = -EINVAL; | 1734 | ret = -EINVAL; |
1752 | break; | 1735 | break; |
1753 | } | 1736 | } |
1754 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { | 1737 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { |
1755 | ret = -EFAULT; | 1738 | ret = -EFAULT; |
1756 | break; | 1739 | break; |
1757 | } | 1740 | } |
1758 | 1741 | ||
1759 | try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, | 1742 | try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, |
1760 | rev.revision, 1, &ret), | 1743 | rev.revision, 1, &ret), |
1761 | "arpt_%s", rev.name); | 1744 | "arpt_%s", rev.name); |
1762 | break; | 1745 | break; |
1763 | } | 1746 | } |
1764 | 1747 | ||
1765 | default: | 1748 | default: |
1766 | duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); | 1749 | duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); |
1767 | ret = -EINVAL; | 1750 | ret = -EINVAL; |
1768 | } | 1751 | } |
1769 | 1752 | ||
1770 | return ret; | 1753 | return ret; |
1771 | } | 1754 | } |
1772 | 1755 | ||
1773 | struct xt_table *arpt_register_table(struct net *net, | 1756 | struct xt_table *arpt_register_table(struct net *net, |
1774 | const struct xt_table *table, | 1757 | const struct xt_table *table, |
1775 | const struct arpt_replace *repl) | 1758 | const struct arpt_replace *repl) |
1776 | { | 1759 | { |
1777 | int ret; | 1760 | int ret; |
1778 | struct xt_table_info *newinfo; | 1761 | struct xt_table_info *newinfo; |
1779 | struct xt_table_info bootstrap = {0}; | 1762 | struct xt_table_info bootstrap = {0}; |
1780 | void *loc_cpu_entry; | 1763 | void *loc_cpu_entry; |
1781 | struct xt_table *new_table; | 1764 | struct xt_table *new_table; |
1782 | 1765 | ||
1783 | newinfo = xt_alloc_table_info(repl->size); | 1766 | newinfo = xt_alloc_table_info(repl->size); |
1784 | if (!newinfo) { | 1767 | if (!newinfo) { |
1785 | ret = -ENOMEM; | 1768 | ret = -ENOMEM; |
1786 | goto out; | 1769 | goto out; |
1787 | } | 1770 | } |
1788 | 1771 | ||
1789 | /* choose the copy on our node/cpu */ | 1772 | /* choose the copy on our node/cpu */ |
1790 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1773 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1791 | memcpy(loc_cpu_entry, repl->entries, repl->size); | 1774 | memcpy(loc_cpu_entry, repl->entries, repl->size); |
1792 | 1775 | ||
1793 | ret = translate_table(newinfo, loc_cpu_entry, repl); | 1776 | ret = translate_table(newinfo, loc_cpu_entry, repl); |
1794 | duprintf("arpt_register_table: translate table gives %d\n", ret); | 1777 | duprintf("arpt_register_table: translate table gives %d\n", ret); |
1795 | if (ret != 0) | 1778 | if (ret != 0) |
1796 | goto out_free; | 1779 | goto out_free; |
1797 | 1780 | ||
1798 | new_table = xt_register_table(net, table, &bootstrap, newinfo); | 1781 | new_table = xt_register_table(net, table, &bootstrap, newinfo); |
1799 | if (IS_ERR(new_table)) { | 1782 | if (IS_ERR(new_table)) { |
1800 | ret = PTR_ERR(new_table); | 1783 | ret = PTR_ERR(new_table); |
1801 | goto out_free; | 1784 | goto out_free; |
1802 | } | 1785 | } |
1803 | return new_table; | 1786 | return new_table; |
1804 | 1787 | ||
1805 | out_free: | 1788 | out_free: |
1806 | xt_free_table_info(newinfo); | 1789 | xt_free_table_info(newinfo); |
1807 | out: | 1790 | out: |
1808 | return ERR_PTR(ret); | 1791 | return ERR_PTR(ret); |
1809 | } | 1792 | } |
1810 | 1793 | ||
1811 | void arpt_unregister_table(struct xt_table *table) | 1794 | void arpt_unregister_table(struct xt_table *table) |
1812 | { | 1795 | { |
1813 | struct xt_table_info *private; | 1796 | struct xt_table_info *private; |
1814 | void *loc_cpu_entry; | 1797 | void *loc_cpu_entry; |
1815 | struct module *table_owner = table->me; | 1798 | struct module *table_owner = table->me; |
1816 | struct arpt_entry *iter; | 1799 | struct arpt_entry *iter; |
1817 | 1800 | ||
1818 | private = xt_unregister_table(table); | 1801 | private = xt_unregister_table(table); |
1819 | 1802 | ||
1820 | /* Decrease module usage counts and free resources */ | 1803 | /* Decrease module usage counts and free resources */ |
1821 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1804 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
1822 | xt_entry_foreach(iter, loc_cpu_entry, private->size) | 1805 | xt_entry_foreach(iter, loc_cpu_entry, private->size) |
1823 | cleanup_entry(iter); | 1806 | cleanup_entry(iter); |
1824 | if (private->number > private->initial_entries) | 1807 | if (private->number > private->initial_entries) |
1825 | module_put(table_owner); | 1808 | module_put(table_owner); |
1826 | xt_free_table_info(private); | 1809 | xt_free_table_info(private); |
1827 | } | 1810 | } |
1828 | 1811 | ||
1829 | /* The built-in targets: standard (NULL) and error. */ | 1812 | /* The built-in targets: standard (NULL) and error. */ |
1830 | static struct xt_target arpt_builtin_tg[] __read_mostly = { | 1813 | static struct xt_target arpt_builtin_tg[] __read_mostly = { |
1831 | { | 1814 | { |
1832 | .name = XT_STANDARD_TARGET, | 1815 | .name = XT_STANDARD_TARGET, |
1833 | .targetsize = sizeof(int), | 1816 | .targetsize = sizeof(int), |
1834 | .family = NFPROTO_ARP, | 1817 | .family = NFPROTO_ARP, |
1835 | #ifdef CONFIG_COMPAT | 1818 | #ifdef CONFIG_COMPAT |
1836 | .compatsize = sizeof(compat_int_t), | 1819 | .compatsize = sizeof(compat_int_t), |
1837 | .compat_from_user = compat_standard_from_user, | 1820 | .compat_from_user = compat_standard_from_user, |
1838 | .compat_to_user = compat_standard_to_user, | 1821 | .compat_to_user = compat_standard_to_user, |
1839 | #endif | 1822 | #endif |
1840 | }, | 1823 | }, |
1841 | { | 1824 | { |
1842 | .name = XT_ERROR_TARGET, | 1825 | .name = XT_ERROR_TARGET, |
1843 | .target = arpt_error, | 1826 | .target = arpt_error, |
1844 | .targetsize = XT_FUNCTION_MAXNAMELEN, | 1827 | .targetsize = XT_FUNCTION_MAXNAMELEN, |
1845 | .family = NFPROTO_ARP, | 1828 | .family = NFPROTO_ARP, |
1846 | }, | 1829 | }, |
1847 | }; | 1830 | }; |
1848 | 1831 | ||
1849 | static struct nf_sockopt_ops arpt_sockopts = { | 1832 | static struct nf_sockopt_ops arpt_sockopts = { |
1850 | .pf = PF_INET, | 1833 | .pf = PF_INET, |
1851 | .set_optmin = ARPT_BASE_CTL, | 1834 | .set_optmin = ARPT_BASE_CTL, |
1852 | .set_optmax = ARPT_SO_SET_MAX+1, | 1835 | .set_optmax = ARPT_SO_SET_MAX+1, |
1853 | .set = do_arpt_set_ctl, | 1836 | .set = do_arpt_set_ctl, |
1854 | #ifdef CONFIG_COMPAT | 1837 | #ifdef CONFIG_COMPAT |
1855 | .compat_set = compat_do_arpt_set_ctl, | 1838 | .compat_set = compat_do_arpt_set_ctl, |
1856 | #endif | 1839 | #endif |
1857 | .get_optmin = ARPT_BASE_CTL, | 1840 | .get_optmin = ARPT_BASE_CTL, |
1858 | .get_optmax = ARPT_SO_GET_MAX+1, | 1841 | .get_optmax = ARPT_SO_GET_MAX+1, |
1859 | .get = do_arpt_get_ctl, | 1842 | .get = do_arpt_get_ctl, |
1860 | #ifdef CONFIG_COMPAT | 1843 | #ifdef CONFIG_COMPAT |
1861 | .compat_get = compat_do_arpt_get_ctl, | 1844 | .compat_get = compat_do_arpt_get_ctl, |
1862 | #endif | 1845 | #endif |
1863 | .owner = THIS_MODULE, | 1846 | .owner = THIS_MODULE, |
1864 | }; | 1847 | }; |
1865 | 1848 | ||
1866 | static int __net_init arp_tables_net_init(struct net *net) | 1849 | static int __net_init arp_tables_net_init(struct net *net) |
1867 | { | 1850 | { |
1868 | return xt_proto_init(net, NFPROTO_ARP); | 1851 | return xt_proto_init(net, NFPROTO_ARP); |
1869 | } | 1852 | } |
1870 | 1853 | ||
1871 | static void __net_exit arp_tables_net_exit(struct net *net) | 1854 | static void __net_exit arp_tables_net_exit(struct net *net) |
1872 | { | 1855 | { |
1873 | xt_proto_fini(net, NFPROTO_ARP); | 1856 | xt_proto_fini(net, NFPROTO_ARP); |
1874 | } | 1857 | } |
1875 | 1858 | ||
1876 | static struct pernet_operations arp_tables_net_ops = { | 1859 | static struct pernet_operations arp_tables_net_ops = { |
1877 | .init = arp_tables_net_init, | 1860 | .init = arp_tables_net_init, |
1878 | .exit = arp_tables_net_exit, | 1861 | .exit = arp_tables_net_exit, |
1879 | }; | 1862 | }; |
1880 | 1863 | ||
1881 | static int __init arp_tables_init(void) | 1864 | static int __init arp_tables_init(void) |
1882 | { | 1865 | { |
1883 | int ret; | 1866 | int ret; |
1884 | 1867 | ||
1885 | ret = register_pernet_subsys(&arp_tables_net_ops); | 1868 | ret = register_pernet_subsys(&arp_tables_net_ops); |
1886 | if (ret < 0) | 1869 | if (ret < 0) |
1887 | goto err1; | 1870 | goto err1; |
1888 | 1871 | ||
1889 | /* Noone else will be downing sem now, so we won't sleep */ | 1872 | /* Noone else will be downing sem now, so we won't sleep */ |
1890 | ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); | 1873 | ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1891 | if (ret < 0) | 1874 | if (ret < 0) |
1892 | goto err2; | 1875 | goto err2; |
1893 | 1876 | ||
1894 | /* Register setsockopt */ | 1877 | /* Register setsockopt */ |
1895 | ret = nf_register_sockopt(&arpt_sockopts); | 1878 | ret = nf_register_sockopt(&arpt_sockopts); |
1896 | if (ret < 0) | 1879 | if (ret < 0) |
1897 | goto err4; | 1880 | goto err4; |
1898 | 1881 | ||
1899 | printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n"); | 1882 | printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n"); |
1900 | return 0; | 1883 | return 0; |
1901 | 1884 | ||
1902 | err4: | 1885 | err4: |
1903 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); | 1886 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1904 | err2: | 1887 | err2: |
1905 | unregister_pernet_subsys(&arp_tables_net_ops); | 1888 | unregister_pernet_subsys(&arp_tables_net_ops); |
1906 | err1: | 1889 | err1: |
1907 | return ret; | 1890 | return ret; |
1908 | } | 1891 | } |
1909 | 1892 | ||
1910 | static void __exit arp_tables_fini(void) | 1893 | static void __exit arp_tables_fini(void) |
1911 | { | 1894 | { |
1912 | nf_unregister_sockopt(&arpt_sockopts); | 1895 | nf_unregister_sockopt(&arpt_sockopts); |
1913 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); | 1896 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1914 | unregister_pernet_subsys(&arp_tables_net_ops); | 1897 | unregister_pernet_subsys(&arp_tables_net_ops); |
1915 | } | 1898 | } |
net/ipv4/netfilter/ip_tables.c
1 | /* | 1 | /* |
2 | * Packet matching code. | 2 | * Packet matching code. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | 4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
5 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | 5 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | #include <linux/cache.h> | 12 | #include <linux/cache.h> |
13 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/kmod.h> | 15 | #include <linux/kmod.h> |
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/icmp.h> | 19 | #include <linux/icmp.h> |
20 | #include <net/ip.h> | 20 | #include <net/ip.h> |
21 | #include <net/compat.h> | 21 | #include <net/compat.h> |
22 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/cpumask.h> | 26 | #include <linux/cpumask.h> |
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | #include <linux/netfilter_ipv4/ip_tables.h> | 29 | #include <linux/netfilter_ipv4/ip_tables.h> |
30 | #include <net/netfilter/nf_log.h> | 30 | #include <net/netfilter/nf_log.h> |
31 | #include "../../netfilter/xt_repldata.h" | 31 | #include "../../netfilter/xt_repldata.h" |
32 | 32 | ||
33 | MODULE_LICENSE("GPL"); | 33 | MODULE_LICENSE("GPL"); |
34 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 34 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
35 | MODULE_DESCRIPTION("IPv4 packet filter"); | 35 | MODULE_DESCRIPTION("IPv4 packet filter"); |
36 | 36 | ||
37 | /*#define DEBUG_IP_FIREWALL*/ | 37 | /*#define DEBUG_IP_FIREWALL*/ |
38 | /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ | 38 | /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ |
39 | /*#define DEBUG_IP_FIREWALL_USER*/ | 39 | /*#define DEBUG_IP_FIREWALL_USER*/ |
40 | 40 | ||
41 | #ifdef DEBUG_IP_FIREWALL | 41 | #ifdef DEBUG_IP_FIREWALL |
42 | #define dprintf(format, args...) pr_info(format , ## args) | 42 | #define dprintf(format, args...) pr_info(format , ## args) |
43 | #else | 43 | #else |
44 | #define dprintf(format, args...) | 44 | #define dprintf(format, args...) |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #ifdef DEBUG_IP_FIREWALL_USER | 47 | #ifdef DEBUG_IP_FIREWALL_USER |
48 | #define duprintf(format, args...) pr_info(format , ## args) | 48 | #define duprintf(format, args...) pr_info(format , ## args) |
49 | #else | 49 | #else |
50 | #define duprintf(format, args...) | 50 | #define duprintf(format, args...) |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifdef CONFIG_NETFILTER_DEBUG | 53 | #ifdef CONFIG_NETFILTER_DEBUG |
54 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) | 54 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) |
55 | #else | 55 | #else |
56 | #define IP_NF_ASSERT(x) | 56 | #define IP_NF_ASSERT(x) |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #if 0 | 59 | #if 0 |
60 | /* All the better to debug you with... */ | 60 | /* All the better to debug you with... */ |
61 | #define static | 61 | #define static |
62 | #define inline | 62 | #define inline |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | void *ipt_alloc_initial_table(const struct xt_table *info) | 65 | void *ipt_alloc_initial_table(const struct xt_table *info) |
66 | { | 66 | { |
67 | return xt_alloc_initial_table(ipt, IPT); | 67 | return xt_alloc_initial_table(ipt, IPT); |
68 | } | 68 | } |
69 | EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); | 69 | EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | We keep a set of rules for each CPU, so we can avoid write-locking | 72 | We keep a set of rules for each CPU, so we can avoid write-locking |
73 | them in the softirq when updating the counters and therefore | 73 | them in the softirq when updating the counters and therefore |
74 | only need to read-lock in the softirq; doing a write_lock_bh() in user | 74 | only need to read-lock in the softirq; doing a write_lock_bh() in user |
75 | context stops packets coming through and allows user context to read | 75 | context stops packets coming through and allows user context to read |
76 | the counters or update the rules. | 76 | the counters or update the rules. |
77 | 77 | ||
78 | Hence the start of any table is given by get_table() below. */ | 78 | Hence the start of any table is given by get_table() below. */ |
79 | 79 | ||
80 | /* Returns whether matches rule or not. */ | 80 | /* Returns whether matches rule or not. */ |
81 | /* Performance critical - called for every packet */ | 81 | /* Performance critical - called for every packet */ |
82 | static inline bool | 82 | static inline bool |
83 | ip_packet_match(const struct iphdr *ip, | 83 | ip_packet_match(const struct iphdr *ip, |
84 | const char *indev, | 84 | const char *indev, |
85 | const char *outdev, | 85 | const char *outdev, |
86 | const struct ipt_ip *ipinfo, | 86 | const struct ipt_ip *ipinfo, |
87 | int isfrag) | 87 | int isfrag) |
88 | { | 88 | { |
89 | unsigned long ret; | 89 | unsigned long ret; |
90 | 90 | ||
91 | #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) | 91 | #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) |
92 | 92 | ||
93 | if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, | 93 | if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, |
94 | IPT_INV_SRCIP) || | 94 | IPT_INV_SRCIP) || |
95 | FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, | 95 | FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, |
96 | IPT_INV_DSTIP)) { | 96 | IPT_INV_DSTIP)) { |
97 | dprintf("Source or dest mismatch.\n"); | 97 | dprintf("Source or dest mismatch.\n"); |
98 | 98 | ||
99 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", | 99 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", |
100 | &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, | 100 | &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, |
101 | ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); | 101 | ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); |
102 | dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", | 102 | dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", |
103 | &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, | 103 | &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, |
104 | ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); | 104 | ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); |
105 | return false; | 105 | return false; |
106 | } | 106 | } |
107 | 107 | ||
108 | ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); | 108 | ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); |
109 | 109 | ||
110 | if (FWINV(ret != 0, IPT_INV_VIA_IN)) { | 110 | if (FWINV(ret != 0, IPT_INV_VIA_IN)) { |
111 | dprintf("VIA in mismatch (%s vs %s).%s\n", | 111 | dprintf("VIA in mismatch (%s vs %s).%s\n", |
112 | indev, ipinfo->iniface, | 112 | indev, ipinfo->iniface, |
113 | ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":""); | 113 | ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":""); |
114 | return false; | 114 | return false; |
115 | } | 115 | } |
116 | 116 | ||
117 | ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); | 117 | ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); |
118 | 118 | ||
119 | if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { | 119 | if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { |
120 | dprintf("VIA out mismatch (%s vs %s).%s\n", | 120 | dprintf("VIA out mismatch (%s vs %s).%s\n", |
121 | outdev, ipinfo->outiface, | 121 | outdev, ipinfo->outiface, |
122 | ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":""); | 122 | ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":""); |
123 | return false; | 123 | return false; |
124 | } | 124 | } |
125 | 125 | ||
126 | /* Check specific protocol */ | 126 | /* Check specific protocol */ |
127 | if (ipinfo->proto && | 127 | if (ipinfo->proto && |
128 | FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { | 128 | FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { |
129 | dprintf("Packet protocol %hi does not match %hi.%s\n", | 129 | dprintf("Packet protocol %hi does not match %hi.%s\n", |
130 | ip->protocol, ipinfo->proto, | 130 | ip->protocol, ipinfo->proto, |
131 | ipinfo->invflags&IPT_INV_PROTO ? " (INV)":""); | 131 | ipinfo->invflags&IPT_INV_PROTO ? " (INV)":""); |
132 | return false; | 132 | return false; |
133 | } | 133 | } |
134 | 134 | ||
135 | /* If we have a fragment rule but the packet is not a fragment | 135 | /* If we have a fragment rule but the packet is not a fragment |
136 | * then we return zero */ | 136 | * then we return zero */ |
137 | if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { | 137 | if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { |
138 | dprintf("Fragment rule but not fragment.%s\n", | 138 | dprintf("Fragment rule but not fragment.%s\n", |
139 | ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); | 139 | ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); |
140 | return false; | 140 | return false; |
141 | } | 141 | } |
142 | 142 | ||
143 | return true; | 143 | return true; |
144 | } | 144 | } |
145 | 145 | ||
146 | static bool | 146 | static bool |
147 | ip_checkentry(const struct ipt_ip *ip) | 147 | ip_checkentry(const struct ipt_ip *ip) |
148 | { | 148 | { |
149 | if (ip->flags & ~IPT_F_MASK) { | 149 | if (ip->flags & ~IPT_F_MASK) { |
150 | duprintf("Unknown flag bits set: %08X\n", | 150 | duprintf("Unknown flag bits set: %08X\n", |
151 | ip->flags & ~IPT_F_MASK); | 151 | ip->flags & ~IPT_F_MASK); |
152 | return false; | 152 | return false; |
153 | } | 153 | } |
154 | if (ip->invflags & ~IPT_INV_MASK) { | 154 | if (ip->invflags & ~IPT_INV_MASK) { |
155 | duprintf("Unknown invflag bits set: %08X\n", | 155 | duprintf("Unknown invflag bits set: %08X\n", |
156 | ip->invflags & ~IPT_INV_MASK); | 156 | ip->invflags & ~IPT_INV_MASK); |
157 | return false; | 157 | return false; |
158 | } | 158 | } |
159 | return true; | 159 | return true; |
160 | } | 160 | } |
161 | 161 | ||
162 | static unsigned int | 162 | static unsigned int |
163 | ipt_error(struct sk_buff *skb, const struct xt_action_param *par) | 163 | ipt_error(struct sk_buff *skb, const struct xt_action_param *par) |
164 | { | 164 | { |
165 | if (net_ratelimit()) | 165 | if (net_ratelimit()) |
166 | pr_info("error: `%s'\n", (const char *)par->targinfo); | 166 | pr_info("error: `%s'\n", (const char *)par->targinfo); |
167 | 167 | ||
168 | return NF_DROP; | 168 | return NF_DROP; |
169 | } | 169 | } |
170 | 170 | ||
171 | /* Performance critical */ | 171 | /* Performance critical */ |
172 | static inline struct ipt_entry * | 172 | static inline struct ipt_entry * |
173 | get_entry(const void *base, unsigned int offset) | 173 | get_entry(const void *base, unsigned int offset) |
174 | { | 174 | { |
175 | return (struct ipt_entry *)(base + offset); | 175 | return (struct ipt_entry *)(base + offset); |
176 | } | 176 | } |
177 | 177 | ||
178 | /* All zeroes == unconditional rule. */ | 178 | /* All zeroes == unconditional rule. */ |
179 | /* Mildly perf critical (only if packet tracing is on) */ | 179 | /* Mildly perf critical (only if packet tracing is on) */ |
180 | static inline bool unconditional(const struct ipt_ip *ip) | 180 | static inline bool unconditional(const struct ipt_ip *ip) |
181 | { | 181 | { |
182 | static const struct ipt_ip uncond; | 182 | static const struct ipt_ip uncond; |
183 | 183 | ||
184 | return memcmp(ip, &uncond, sizeof(uncond)) == 0; | 184 | return memcmp(ip, &uncond, sizeof(uncond)) == 0; |
185 | #undef FWINV | 185 | #undef FWINV |
186 | } | 186 | } |
187 | 187 | ||
188 | /* for const-correctness */ | 188 | /* for const-correctness */ |
189 | static inline const struct xt_entry_target * | 189 | static inline const struct xt_entry_target * |
190 | ipt_get_target_c(const struct ipt_entry *e) | 190 | ipt_get_target_c(const struct ipt_entry *e) |
191 | { | 191 | { |
192 | return ipt_get_target((struct ipt_entry *)e); | 192 | return ipt_get_target((struct ipt_entry *)e); |
193 | } | 193 | } |
194 | 194 | ||
195 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 195 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
196 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 196 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
197 | static const char *const hooknames[] = { | 197 | static const char *const hooknames[] = { |
198 | [NF_INET_PRE_ROUTING] = "PREROUTING", | 198 | [NF_INET_PRE_ROUTING] = "PREROUTING", |
199 | [NF_INET_LOCAL_IN] = "INPUT", | 199 | [NF_INET_LOCAL_IN] = "INPUT", |
200 | [NF_INET_FORWARD] = "FORWARD", | 200 | [NF_INET_FORWARD] = "FORWARD", |
201 | [NF_INET_LOCAL_OUT] = "OUTPUT", | 201 | [NF_INET_LOCAL_OUT] = "OUTPUT", |
202 | [NF_INET_POST_ROUTING] = "POSTROUTING", | 202 | [NF_INET_POST_ROUTING] = "POSTROUTING", |
203 | }; | 203 | }; |
204 | 204 | ||
205 | enum nf_ip_trace_comments { | 205 | enum nf_ip_trace_comments { |
206 | NF_IP_TRACE_COMMENT_RULE, | 206 | NF_IP_TRACE_COMMENT_RULE, |
207 | NF_IP_TRACE_COMMENT_RETURN, | 207 | NF_IP_TRACE_COMMENT_RETURN, |
208 | NF_IP_TRACE_COMMENT_POLICY, | 208 | NF_IP_TRACE_COMMENT_POLICY, |
209 | }; | 209 | }; |
210 | 210 | ||
211 | static const char *const comments[] = { | 211 | static const char *const comments[] = { |
212 | [NF_IP_TRACE_COMMENT_RULE] = "rule", | 212 | [NF_IP_TRACE_COMMENT_RULE] = "rule", |
213 | [NF_IP_TRACE_COMMENT_RETURN] = "return", | 213 | [NF_IP_TRACE_COMMENT_RETURN] = "return", |
214 | [NF_IP_TRACE_COMMENT_POLICY] = "policy", | 214 | [NF_IP_TRACE_COMMENT_POLICY] = "policy", |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static struct nf_loginfo trace_loginfo = { | 217 | static struct nf_loginfo trace_loginfo = { |
218 | .type = NF_LOG_TYPE_LOG, | 218 | .type = NF_LOG_TYPE_LOG, |
219 | .u = { | 219 | .u = { |
220 | .log = { | 220 | .log = { |
221 | .level = 4, | 221 | .level = 4, |
222 | .logflags = NF_LOG_MASK, | 222 | .logflags = NF_LOG_MASK, |
223 | }, | 223 | }, |
224 | }, | 224 | }, |
225 | }; | 225 | }; |
226 | 226 | ||
227 | /* Mildly perf critical (only if packet tracing is on) */ | 227 | /* Mildly perf critical (only if packet tracing is on) */ |
228 | static inline int | 228 | static inline int |
229 | get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, | 229 | get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, |
230 | const char *hookname, const char **chainname, | 230 | const char *hookname, const char **chainname, |
231 | const char **comment, unsigned int *rulenum) | 231 | const char **comment, unsigned int *rulenum) |
232 | { | 232 | { |
233 | const struct xt_standard_target *t = (void *)ipt_get_target_c(s); | 233 | const struct xt_standard_target *t = (void *)ipt_get_target_c(s); |
234 | 234 | ||
235 | if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { | 235 | if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { |
236 | /* Head of user chain: ERROR target with chainname */ | 236 | /* Head of user chain: ERROR target with chainname */ |
237 | *chainname = t->target.data; | 237 | *chainname = t->target.data; |
238 | (*rulenum) = 0; | 238 | (*rulenum) = 0; |
239 | } else if (s == e) { | 239 | } else if (s == e) { |
240 | (*rulenum)++; | 240 | (*rulenum)++; |
241 | 241 | ||
242 | if (s->target_offset == sizeof(struct ipt_entry) && | 242 | if (s->target_offset == sizeof(struct ipt_entry) && |
243 | strcmp(t->target.u.kernel.target->name, | 243 | strcmp(t->target.u.kernel.target->name, |
244 | XT_STANDARD_TARGET) == 0 && | 244 | XT_STANDARD_TARGET) == 0 && |
245 | t->verdict < 0 && | 245 | t->verdict < 0 && |
246 | unconditional(&s->ip)) { | 246 | unconditional(&s->ip)) { |
247 | /* Tail of chains: STANDARD target (return/policy) */ | 247 | /* Tail of chains: STANDARD target (return/policy) */ |
248 | *comment = *chainname == hookname | 248 | *comment = *chainname == hookname |
249 | ? comments[NF_IP_TRACE_COMMENT_POLICY] | 249 | ? comments[NF_IP_TRACE_COMMENT_POLICY] |
250 | : comments[NF_IP_TRACE_COMMENT_RETURN]; | 250 | : comments[NF_IP_TRACE_COMMENT_RETURN]; |
251 | } | 251 | } |
252 | return 1; | 252 | return 1; |
253 | } else | 253 | } else |
254 | (*rulenum)++; | 254 | (*rulenum)++; |
255 | 255 | ||
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | static void trace_packet(const struct sk_buff *skb, | 259 | static void trace_packet(const struct sk_buff *skb, |
260 | unsigned int hook, | 260 | unsigned int hook, |
261 | const struct net_device *in, | 261 | const struct net_device *in, |
262 | const struct net_device *out, | 262 | const struct net_device *out, |
263 | const char *tablename, | 263 | const char *tablename, |
264 | const struct xt_table_info *private, | 264 | const struct xt_table_info *private, |
265 | const struct ipt_entry *e) | 265 | const struct ipt_entry *e) |
266 | { | 266 | { |
267 | const void *table_base; | 267 | const void *table_base; |
268 | const struct ipt_entry *root; | 268 | const struct ipt_entry *root; |
269 | const char *hookname, *chainname, *comment; | 269 | const char *hookname, *chainname, *comment; |
270 | const struct ipt_entry *iter; | 270 | const struct ipt_entry *iter; |
271 | unsigned int rulenum = 0; | 271 | unsigned int rulenum = 0; |
272 | 272 | ||
273 | table_base = private->entries[smp_processor_id()]; | 273 | table_base = private->entries[smp_processor_id()]; |
274 | root = get_entry(table_base, private->hook_entry[hook]); | 274 | root = get_entry(table_base, private->hook_entry[hook]); |
275 | 275 | ||
276 | hookname = chainname = hooknames[hook]; | 276 | hookname = chainname = hooknames[hook]; |
277 | comment = comments[NF_IP_TRACE_COMMENT_RULE]; | 277 | comment = comments[NF_IP_TRACE_COMMENT_RULE]; |
278 | 278 | ||
279 | xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) | 279 | xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) |
280 | if (get_chainname_rulenum(iter, e, hookname, | 280 | if (get_chainname_rulenum(iter, e, hookname, |
281 | &chainname, &comment, &rulenum) != 0) | 281 | &chainname, &comment, &rulenum) != 0) |
282 | break; | 282 | break; |
283 | 283 | ||
284 | nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, | 284 | nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, |
285 | "TRACE: %s:%s:%s:%u ", | 285 | "TRACE: %s:%s:%s:%u ", |
286 | tablename, chainname, comment, rulenum); | 286 | tablename, chainname, comment, rulenum); |
287 | } | 287 | } |
288 | #endif | 288 | #endif |
289 | 289 | ||
290 | static inline __pure | 290 | static inline __pure |
291 | struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) | 291 | struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) |
292 | { | 292 | { |
293 | return (void *)entry + entry->next_offset; | 293 | return (void *)entry + entry->next_offset; |
294 | } | 294 | } |
295 | 295 | ||
296 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ | 296 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ |
297 | unsigned int | 297 | unsigned int |
298 | ipt_do_table(struct sk_buff *skb, | 298 | ipt_do_table(struct sk_buff *skb, |
299 | unsigned int hook, | 299 | unsigned int hook, |
300 | const struct net_device *in, | 300 | const struct net_device *in, |
301 | const struct net_device *out, | 301 | const struct net_device *out, |
302 | struct xt_table *table) | 302 | struct xt_table *table) |
303 | { | 303 | { |
304 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 304 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
305 | const struct iphdr *ip; | 305 | const struct iphdr *ip; |
306 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 306 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
307 | unsigned int verdict = NF_DROP; | 307 | unsigned int verdict = NF_DROP; |
308 | const char *indev, *outdev; | 308 | const char *indev, *outdev; |
309 | const void *table_base; | 309 | const void *table_base; |
310 | struct ipt_entry *e, **jumpstack; | 310 | struct ipt_entry *e, **jumpstack; |
311 | unsigned int *stackptr, origptr, cpu; | 311 | unsigned int *stackptr, origptr, cpu; |
312 | const struct xt_table_info *private; | 312 | const struct xt_table_info *private; |
313 | struct xt_action_param acpar; | 313 | struct xt_action_param acpar; |
314 | 314 | ||
315 | /* Initialization */ | 315 | /* Initialization */ |
316 | ip = ip_hdr(skb); | 316 | ip = ip_hdr(skb); |
317 | indev = in ? in->name : nulldevname; | 317 | indev = in ? in->name : nulldevname; |
318 | outdev = out ? out->name : nulldevname; | 318 | outdev = out ? out->name : nulldevname; |
319 | /* We handle fragments by dealing with the first fragment as | 319 | /* We handle fragments by dealing with the first fragment as |
320 | * if it was a normal packet. All other fragments are treated | 320 | * if it was a normal packet. All other fragments are treated |
321 | * normally, except that they will NEVER match rules that ask | 321 | * normally, except that they will NEVER match rules that ask |
322 | * things we don't know, ie. tcp syn flag or ports). If the | 322 | * things we don't know, ie. tcp syn flag or ports). If the |
323 | * rule is also a fragment-specific rule, non-fragments won't | 323 | * rule is also a fragment-specific rule, non-fragments won't |
324 | * match it. */ | 324 | * match it. */ |
325 | acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; | 325 | acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; |
326 | acpar.thoff = ip_hdrlen(skb); | 326 | acpar.thoff = ip_hdrlen(skb); |
327 | acpar.hotdrop = false; | 327 | acpar.hotdrop = false; |
328 | acpar.in = in; | 328 | acpar.in = in; |
329 | acpar.out = out; | 329 | acpar.out = out; |
330 | acpar.family = NFPROTO_IPV4; | 330 | acpar.family = NFPROTO_IPV4; |
331 | acpar.hooknum = hook; | 331 | acpar.hooknum = hook; |
332 | 332 | ||
333 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 333 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
334 | xt_info_rdlock_bh(); | 334 | xt_info_rdlock_bh(); |
335 | private = table->private; | 335 | private = table->private; |
336 | cpu = smp_processor_id(); | 336 | cpu = smp_processor_id(); |
337 | table_base = private->entries[cpu]; | 337 | table_base = private->entries[cpu]; |
338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; | 338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; |
339 | stackptr = per_cpu_ptr(private->stackptr, cpu); | 339 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
340 | origptr = *stackptr; | 340 | origptr = *stackptr; |
341 | 341 | ||
342 | e = get_entry(table_base, private->hook_entry[hook]); | 342 | e = get_entry(table_base, private->hook_entry[hook]); |
343 | 343 | ||
344 | pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n", | 344 | pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n", |
345 | table->name, hook, origptr, | 345 | table->name, hook, origptr, |
346 | get_entry(table_base, private->underflow[hook])); | 346 | get_entry(table_base, private->underflow[hook])); |
347 | 347 | ||
348 | do { | 348 | do { |
349 | const struct xt_entry_target *t; | 349 | const struct xt_entry_target *t; |
350 | const struct xt_entry_match *ematch; | 350 | const struct xt_entry_match *ematch; |
351 | 351 | ||
352 | IP_NF_ASSERT(e); | 352 | IP_NF_ASSERT(e); |
353 | if (!ip_packet_match(ip, indev, outdev, | 353 | if (!ip_packet_match(ip, indev, outdev, |
354 | &e->ip, acpar.fragoff)) { | 354 | &e->ip, acpar.fragoff)) { |
355 | no_match: | 355 | no_match: |
356 | e = ipt_next_entry(e); | 356 | e = ipt_next_entry(e); |
357 | continue; | 357 | continue; |
358 | } | 358 | } |
359 | 359 | ||
360 | xt_ematch_foreach(ematch, e) { | 360 | xt_ematch_foreach(ematch, e) { |
361 | acpar.match = ematch->u.kernel.match; | 361 | acpar.match = ematch->u.kernel.match; |
362 | acpar.matchinfo = ematch->data; | 362 | acpar.matchinfo = ematch->data; |
363 | if (!acpar.match->match(skb, &acpar)) | 363 | if (!acpar.match->match(skb, &acpar)) |
364 | goto no_match; | 364 | goto no_match; |
365 | } | 365 | } |
366 | 366 | ||
367 | ADD_COUNTER(e->counters, skb->len, 1); | 367 | ADD_COUNTER(e->counters, skb->len, 1); |
368 | 368 | ||
369 | t = ipt_get_target(e); | 369 | t = ipt_get_target(e); |
370 | IP_NF_ASSERT(t->u.kernel.target); | 370 | IP_NF_ASSERT(t->u.kernel.target); |
371 | 371 | ||
372 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 372 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
373 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 373 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
374 | /* The packet is traced: log it */ | 374 | /* The packet is traced: log it */ |
375 | if (unlikely(skb->nf_trace)) | 375 | if (unlikely(skb->nf_trace)) |
376 | trace_packet(skb, hook, in, out, | 376 | trace_packet(skb, hook, in, out, |
377 | table->name, private, e); | 377 | table->name, private, e); |
378 | #endif | 378 | #endif |
379 | /* Standard target? */ | 379 | /* Standard target? */ |
380 | if (!t->u.kernel.target->target) { | 380 | if (!t->u.kernel.target->target) { |
381 | int v; | 381 | int v; |
382 | 382 | ||
383 | v = ((struct xt_standard_target *)t)->verdict; | 383 | v = ((struct xt_standard_target *)t)->verdict; |
384 | if (v < 0) { | 384 | if (v < 0) { |
385 | /* Pop from stack? */ | 385 | /* Pop from stack? */ |
386 | if (v != XT_RETURN) { | 386 | if (v != XT_RETURN) { |
387 | verdict = (unsigned)(-v) - 1; | 387 | verdict = (unsigned)(-v) - 1; |
388 | break; | 388 | break; |
389 | } | 389 | } |
390 | if (*stackptr == 0) { | 390 | if (*stackptr == 0) { |
391 | e = get_entry(table_base, | 391 | e = get_entry(table_base, |
392 | private->underflow[hook]); | 392 | private->underflow[hook]); |
393 | pr_debug("Underflow (this is normal) " | 393 | pr_debug("Underflow (this is normal) " |
394 | "to %p\n", e); | 394 | "to %p\n", e); |
395 | } else { | 395 | } else { |
396 | e = jumpstack[--*stackptr]; | 396 | e = jumpstack[--*stackptr]; |
397 | pr_debug("Pulled %p out from pos %u\n", | 397 | pr_debug("Pulled %p out from pos %u\n", |
398 | e, *stackptr); | 398 | e, *stackptr); |
399 | e = ipt_next_entry(e); | 399 | e = ipt_next_entry(e); |
400 | } | 400 | } |
401 | continue; | 401 | continue; |
402 | } | 402 | } |
403 | if (table_base + v != ipt_next_entry(e) && | 403 | if (table_base + v != ipt_next_entry(e) && |
404 | !(e->ip.flags & IPT_F_GOTO)) { | 404 | !(e->ip.flags & IPT_F_GOTO)) { |
405 | if (*stackptr >= private->stacksize) { | 405 | if (*stackptr >= private->stacksize) { |
406 | verdict = NF_DROP; | 406 | verdict = NF_DROP; |
407 | break; | 407 | break; |
408 | } | 408 | } |
409 | jumpstack[(*stackptr)++] = e; | 409 | jumpstack[(*stackptr)++] = e; |
410 | pr_debug("Pushed %p into pos %u\n", | 410 | pr_debug("Pushed %p into pos %u\n", |
411 | e, *stackptr - 1); | 411 | e, *stackptr - 1); |
412 | } | 412 | } |
413 | 413 | ||
414 | e = get_entry(table_base, v); | 414 | e = get_entry(table_base, v); |
415 | continue; | 415 | continue; |
416 | } | 416 | } |
417 | 417 | ||
418 | acpar.target = t->u.kernel.target; | 418 | acpar.target = t->u.kernel.target; |
419 | acpar.targinfo = t->data; | 419 | acpar.targinfo = t->data; |
420 | 420 | ||
421 | verdict = t->u.kernel.target->target(skb, &acpar); | 421 | verdict = t->u.kernel.target->target(skb, &acpar); |
422 | /* Target might have changed stuff. */ | 422 | /* Target might have changed stuff. */ |
423 | ip = ip_hdr(skb); | 423 | ip = ip_hdr(skb); |
424 | if (verdict == XT_CONTINUE) | 424 | if (verdict == XT_CONTINUE) |
425 | e = ipt_next_entry(e); | 425 | e = ipt_next_entry(e); |
426 | else | 426 | else |
427 | /* Verdict */ | 427 | /* Verdict */ |
428 | break; | 428 | break; |
429 | } while (!acpar.hotdrop); | 429 | } while (!acpar.hotdrop); |
430 | xt_info_rdunlock_bh(); | 430 | xt_info_rdunlock_bh(); |
431 | pr_debug("Exiting %s; resetting sp from %u to %u\n", | 431 | pr_debug("Exiting %s; resetting sp from %u to %u\n", |
432 | __func__, *stackptr, origptr); | 432 | __func__, *stackptr, origptr); |
433 | *stackptr = origptr; | 433 | *stackptr = origptr; |
434 | #ifdef DEBUG_ALLOW_ALL | 434 | #ifdef DEBUG_ALLOW_ALL |
435 | return NF_ACCEPT; | 435 | return NF_ACCEPT; |
436 | #else | 436 | #else |
437 | if (acpar.hotdrop) | 437 | if (acpar.hotdrop) |
438 | return NF_DROP; | 438 | return NF_DROP; |
439 | else return verdict; | 439 | else return verdict; |
440 | #endif | 440 | #endif |
441 | } | 441 | } |
442 | 442 | ||
443 | /* Figures out from what hook each rule can be called: returns 0 if | 443 | /* Figures out from what hook each rule can be called: returns 0 if |
444 | there are loops. Puts hook bitmask in comefrom. */ | 444 | there are loops. Puts hook bitmask in comefrom. */ |
445 | static int | 445 | static int |
446 | mark_source_chains(const struct xt_table_info *newinfo, | 446 | mark_source_chains(const struct xt_table_info *newinfo, |
447 | unsigned int valid_hooks, void *entry0) | 447 | unsigned int valid_hooks, void *entry0) |
448 | { | 448 | { |
449 | unsigned int hook; | 449 | unsigned int hook; |
450 | 450 | ||
451 | /* No recursion; use packet counter to save back ptrs (reset | 451 | /* No recursion; use packet counter to save back ptrs (reset |
452 | to 0 as we leave), and comefrom to save source hook bitmask */ | 452 | to 0 as we leave), and comefrom to save source hook bitmask */ |
453 | for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { | 453 | for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { |
454 | unsigned int pos = newinfo->hook_entry[hook]; | 454 | unsigned int pos = newinfo->hook_entry[hook]; |
455 | struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); | 455 | struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); |
456 | 456 | ||
457 | if (!(valid_hooks & (1 << hook))) | 457 | if (!(valid_hooks & (1 << hook))) |
458 | continue; | 458 | continue; |
459 | 459 | ||
460 | /* Set initial back pointer. */ | 460 | /* Set initial back pointer. */ |
461 | e->counters.pcnt = pos; | 461 | e->counters.pcnt = pos; |
462 | 462 | ||
463 | for (;;) { | 463 | for (;;) { |
464 | const struct xt_standard_target *t | 464 | const struct xt_standard_target *t |
465 | = (void *)ipt_get_target_c(e); | 465 | = (void *)ipt_get_target_c(e); |
466 | int visited = e->comefrom & (1 << hook); | 466 | int visited = e->comefrom & (1 << hook); |
467 | 467 | ||
468 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { | 468 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { |
469 | pr_err("iptables: loop hook %u pos %u %08X.\n", | 469 | pr_err("iptables: loop hook %u pos %u %08X.\n", |
470 | hook, pos, e->comefrom); | 470 | hook, pos, e->comefrom); |
471 | return 0; | 471 | return 0; |
472 | } | 472 | } |
473 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); | 473 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); |
474 | 474 | ||
475 | /* Unconditional return/END. */ | 475 | /* Unconditional return/END. */ |
476 | if ((e->target_offset == sizeof(struct ipt_entry) && | 476 | if ((e->target_offset == sizeof(struct ipt_entry) && |
477 | (strcmp(t->target.u.user.name, | 477 | (strcmp(t->target.u.user.name, |
478 | XT_STANDARD_TARGET) == 0) && | 478 | XT_STANDARD_TARGET) == 0) && |
479 | t->verdict < 0 && unconditional(&e->ip)) || | 479 | t->verdict < 0 && unconditional(&e->ip)) || |
480 | visited) { | 480 | visited) { |
481 | unsigned int oldpos, size; | 481 | unsigned int oldpos, size; |
482 | 482 | ||
483 | if ((strcmp(t->target.u.user.name, | 483 | if ((strcmp(t->target.u.user.name, |
484 | XT_STANDARD_TARGET) == 0) && | 484 | XT_STANDARD_TARGET) == 0) && |
485 | t->verdict < -NF_MAX_VERDICT - 1) { | 485 | t->verdict < -NF_MAX_VERDICT - 1) { |
486 | duprintf("mark_source_chains: bad " | 486 | duprintf("mark_source_chains: bad " |
487 | "negative verdict (%i)\n", | 487 | "negative verdict (%i)\n", |
488 | t->verdict); | 488 | t->verdict); |
489 | return 0; | 489 | return 0; |
490 | } | 490 | } |
491 | 491 | ||
492 | /* Return: backtrack through the last | 492 | /* Return: backtrack through the last |
493 | big jump. */ | 493 | big jump. */ |
494 | do { | 494 | do { |
495 | e->comefrom ^= (1<<NF_INET_NUMHOOKS); | 495 | e->comefrom ^= (1<<NF_INET_NUMHOOKS); |
496 | #ifdef DEBUG_IP_FIREWALL_USER | 496 | #ifdef DEBUG_IP_FIREWALL_USER |
497 | if (e->comefrom | 497 | if (e->comefrom |
498 | & (1 << NF_INET_NUMHOOKS)) { | 498 | & (1 << NF_INET_NUMHOOKS)) { |
499 | duprintf("Back unset " | 499 | duprintf("Back unset " |
500 | "on hook %u " | 500 | "on hook %u " |
501 | "rule %u\n", | 501 | "rule %u\n", |
502 | hook, pos); | 502 | hook, pos); |
503 | } | 503 | } |
504 | #endif | 504 | #endif |
505 | oldpos = pos; | 505 | oldpos = pos; |
506 | pos = e->counters.pcnt; | 506 | pos = e->counters.pcnt; |
507 | e->counters.pcnt = 0; | 507 | e->counters.pcnt = 0; |
508 | 508 | ||
509 | /* We're at the start. */ | 509 | /* We're at the start. */ |
510 | if (pos == oldpos) | 510 | if (pos == oldpos) |
511 | goto next; | 511 | goto next; |
512 | 512 | ||
513 | e = (struct ipt_entry *) | 513 | e = (struct ipt_entry *) |
514 | (entry0 + pos); | 514 | (entry0 + pos); |
515 | } while (oldpos == pos + e->next_offset); | 515 | } while (oldpos == pos + e->next_offset); |
516 | 516 | ||
517 | /* Move along one */ | 517 | /* Move along one */ |
518 | size = e->next_offset; | 518 | size = e->next_offset; |
519 | e = (struct ipt_entry *) | 519 | e = (struct ipt_entry *) |
520 | (entry0 + pos + size); | 520 | (entry0 + pos + size); |
521 | e->counters.pcnt = pos; | 521 | e->counters.pcnt = pos; |
522 | pos += size; | 522 | pos += size; |
523 | } else { | 523 | } else { |
524 | int newpos = t->verdict; | 524 | int newpos = t->verdict; |
525 | 525 | ||
526 | if (strcmp(t->target.u.user.name, | 526 | if (strcmp(t->target.u.user.name, |
527 | XT_STANDARD_TARGET) == 0 && | 527 | XT_STANDARD_TARGET) == 0 && |
528 | newpos >= 0) { | 528 | newpos >= 0) { |
529 | if (newpos > newinfo->size - | 529 | if (newpos > newinfo->size - |
530 | sizeof(struct ipt_entry)) { | 530 | sizeof(struct ipt_entry)) { |
531 | duprintf("mark_source_chains: " | 531 | duprintf("mark_source_chains: " |
532 | "bad verdict (%i)\n", | 532 | "bad verdict (%i)\n", |
533 | newpos); | 533 | newpos); |
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | /* This a jump; chase it. */ | 536 | /* This a jump; chase it. */ |
537 | duprintf("Jump rule %u -> %u\n", | 537 | duprintf("Jump rule %u -> %u\n", |
538 | pos, newpos); | 538 | pos, newpos); |
539 | } else { | 539 | } else { |
540 | /* ... this is a fallthru */ | 540 | /* ... this is a fallthru */ |
541 | newpos = pos + e->next_offset; | 541 | newpos = pos + e->next_offset; |
542 | } | 542 | } |
543 | e = (struct ipt_entry *) | 543 | e = (struct ipt_entry *) |
544 | (entry0 + newpos); | 544 | (entry0 + newpos); |
545 | e->counters.pcnt = pos; | 545 | e->counters.pcnt = pos; |
546 | pos = newpos; | 546 | pos = newpos; |
547 | } | 547 | } |
548 | } | 548 | } |
549 | next: | 549 | next: |
550 | duprintf("Finished chain %u\n", hook); | 550 | duprintf("Finished chain %u\n", hook); |
551 | } | 551 | } |
552 | return 1; | 552 | return 1; |
553 | } | 553 | } |
554 | 554 | ||
555 | static void cleanup_match(struct xt_entry_match *m, struct net *net) | 555 | static void cleanup_match(struct xt_entry_match *m, struct net *net) |
556 | { | 556 | { |
557 | struct xt_mtdtor_param par; | 557 | struct xt_mtdtor_param par; |
558 | 558 | ||
559 | par.net = net; | 559 | par.net = net; |
560 | par.match = m->u.kernel.match; | 560 | par.match = m->u.kernel.match; |
561 | par.matchinfo = m->data; | 561 | par.matchinfo = m->data; |
562 | par.family = NFPROTO_IPV4; | 562 | par.family = NFPROTO_IPV4; |
563 | if (par.match->destroy != NULL) | 563 | if (par.match->destroy != NULL) |
564 | par.match->destroy(&par); | 564 | par.match->destroy(&par); |
565 | module_put(par.match->me); | 565 | module_put(par.match->me); |
566 | } | 566 | } |
567 | 567 | ||
568 | static int | 568 | static int |
569 | check_entry(const struct ipt_entry *e, const char *name) | 569 | check_entry(const struct ipt_entry *e, const char *name) |
570 | { | 570 | { |
571 | const struct xt_entry_target *t; | 571 | const struct xt_entry_target *t; |
572 | 572 | ||
573 | if (!ip_checkentry(&e->ip)) { | 573 | if (!ip_checkentry(&e->ip)) { |
574 | duprintf("ip check failed %p %s.\n", e, par->match->name); | 574 | duprintf("ip check failed %p %s.\n", e, par->match->name); |
575 | return -EINVAL; | 575 | return -EINVAL; |
576 | } | 576 | } |
577 | 577 | ||
578 | if (e->target_offset + sizeof(struct xt_entry_target) > | 578 | if (e->target_offset + sizeof(struct xt_entry_target) > |
579 | e->next_offset) | 579 | e->next_offset) |
580 | return -EINVAL; | 580 | return -EINVAL; |
581 | 581 | ||
582 | t = ipt_get_target_c(e); | 582 | t = ipt_get_target_c(e); |
583 | if (e->target_offset + t->u.target_size > e->next_offset) | 583 | if (e->target_offset + t->u.target_size > e->next_offset) |
584 | return -EINVAL; | 584 | return -EINVAL; |
585 | 585 | ||
586 | return 0; | 586 | return 0; |
587 | } | 587 | } |
588 | 588 | ||
589 | static int | 589 | static int |
590 | check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) | 590 | check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) |
591 | { | 591 | { |
592 | const struct ipt_ip *ip = par->entryinfo; | 592 | const struct ipt_ip *ip = par->entryinfo; |
593 | int ret; | 593 | int ret; |
594 | 594 | ||
595 | par->match = m->u.kernel.match; | 595 | par->match = m->u.kernel.match; |
596 | par->matchinfo = m->data; | 596 | par->matchinfo = m->data; |
597 | 597 | ||
598 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), | 598 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), |
599 | ip->proto, ip->invflags & IPT_INV_PROTO); | 599 | ip->proto, ip->invflags & IPT_INV_PROTO); |
600 | if (ret < 0) { | 600 | if (ret < 0) { |
601 | duprintf("check failed for `%s'.\n", par->match->name); | 601 | duprintf("check failed for `%s'.\n", par->match->name); |
602 | return ret; | 602 | return ret; |
603 | } | 603 | } |
604 | return 0; | 604 | return 0; |
605 | } | 605 | } |
606 | 606 | ||
607 | static int | 607 | static int |
608 | find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) | 608 | find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) |
609 | { | 609 | { |
610 | struct xt_match *match; | 610 | struct xt_match *match; |
611 | int ret; | 611 | int ret; |
612 | 612 | ||
613 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, | 613 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, |
614 | m->u.user.revision); | 614 | m->u.user.revision); |
615 | if (IS_ERR(match)) { | 615 | if (IS_ERR(match)) { |
616 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); | 616 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); |
617 | return PTR_ERR(match); | 617 | return PTR_ERR(match); |
618 | } | 618 | } |
619 | m->u.kernel.match = match; | 619 | m->u.kernel.match = match; |
620 | 620 | ||
621 | ret = check_match(m, par); | 621 | ret = check_match(m, par); |
622 | if (ret) | 622 | if (ret) |
623 | goto err; | 623 | goto err; |
624 | 624 | ||
625 | return 0; | 625 | return 0; |
626 | err: | 626 | err: |
627 | module_put(m->u.kernel.match->me); | 627 | module_put(m->u.kernel.match->me); |
628 | return ret; | 628 | return ret; |
629 | } | 629 | } |
630 | 630 | ||
631 | static int check_target(struct ipt_entry *e, struct net *net, const char *name) | 631 | static int check_target(struct ipt_entry *e, struct net *net, const char *name) |
632 | { | 632 | { |
633 | struct xt_entry_target *t = ipt_get_target(e); | 633 | struct xt_entry_target *t = ipt_get_target(e); |
634 | struct xt_tgchk_param par = { | 634 | struct xt_tgchk_param par = { |
635 | .net = net, | 635 | .net = net, |
636 | .table = name, | 636 | .table = name, |
637 | .entryinfo = e, | 637 | .entryinfo = e, |
638 | .target = t->u.kernel.target, | 638 | .target = t->u.kernel.target, |
639 | .targinfo = t->data, | 639 | .targinfo = t->data, |
640 | .hook_mask = e->comefrom, | 640 | .hook_mask = e->comefrom, |
641 | .family = NFPROTO_IPV4, | 641 | .family = NFPROTO_IPV4, |
642 | }; | 642 | }; |
643 | int ret; | 643 | int ret; |
644 | 644 | ||
645 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), | 645 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), |
646 | e->ip.proto, e->ip.invflags & IPT_INV_PROTO); | 646 | e->ip.proto, e->ip.invflags & IPT_INV_PROTO); |
647 | if (ret < 0) { | 647 | if (ret < 0) { |
648 | duprintf("check failed for `%s'.\n", | 648 | duprintf("check failed for `%s'.\n", |
649 | t->u.kernel.target->name); | 649 | t->u.kernel.target->name); |
650 | return ret; | 650 | return ret; |
651 | } | 651 | } |
652 | return 0; | 652 | return 0; |
653 | } | 653 | } |
654 | 654 | ||
655 | static int | 655 | static int |
656 | find_check_entry(struct ipt_entry *e, struct net *net, const char *name, | 656 | find_check_entry(struct ipt_entry *e, struct net *net, const char *name, |
657 | unsigned int size) | 657 | unsigned int size) |
658 | { | 658 | { |
659 | struct xt_entry_target *t; | 659 | struct xt_entry_target *t; |
660 | struct xt_target *target; | 660 | struct xt_target *target; |
661 | int ret; | 661 | int ret; |
662 | unsigned int j; | 662 | unsigned int j; |
663 | struct xt_mtchk_param mtpar; | 663 | struct xt_mtchk_param mtpar; |
664 | struct xt_entry_match *ematch; | 664 | struct xt_entry_match *ematch; |
665 | 665 | ||
666 | ret = check_entry(e, name); | 666 | ret = check_entry(e, name); |
667 | if (ret) | 667 | if (ret) |
668 | return ret; | 668 | return ret; |
669 | 669 | ||
670 | j = 0; | 670 | j = 0; |
671 | mtpar.net = net; | 671 | mtpar.net = net; |
672 | mtpar.table = name; | 672 | mtpar.table = name; |
673 | mtpar.entryinfo = &e->ip; | 673 | mtpar.entryinfo = &e->ip; |
674 | mtpar.hook_mask = e->comefrom; | 674 | mtpar.hook_mask = e->comefrom; |
675 | mtpar.family = NFPROTO_IPV4; | 675 | mtpar.family = NFPROTO_IPV4; |
676 | xt_ematch_foreach(ematch, e) { | 676 | xt_ematch_foreach(ematch, e) { |
677 | ret = find_check_match(ematch, &mtpar); | 677 | ret = find_check_match(ematch, &mtpar); |
678 | if (ret != 0) | 678 | if (ret != 0) |
679 | goto cleanup_matches; | 679 | goto cleanup_matches; |
680 | ++j; | 680 | ++j; |
681 | } | 681 | } |
682 | 682 | ||
683 | t = ipt_get_target(e); | 683 | t = ipt_get_target(e); |
684 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, | 684 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, |
685 | t->u.user.revision); | 685 | t->u.user.revision); |
686 | if (IS_ERR(target)) { | 686 | if (IS_ERR(target)) { |
687 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 687 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
688 | ret = PTR_ERR(target); | 688 | ret = PTR_ERR(target); |
689 | goto cleanup_matches; | 689 | goto cleanup_matches; |
690 | } | 690 | } |
691 | t->u.kernel.target = target; | 691 | t->u.kernel.target = target; |
692 | 692 | ||
693 | ret = check_target(e, net, name); | 693 | ret = check_target(e, net, name); |
694 | if (ret) | 694 | if (ret) |
695 | goto err; | 695 | goto err; |
696 | return 0; | 696 | return 0; |
697 | err: | 697 | err: |
698 | module_put(t->u.kernel.target->me); | 698 | module_put(t->u.kernel.target->me); |
699 | cleanup_matches: | 699 | cleanup_matches: |
700 | xt_ematch_foreach(ematch, e) { | 700 | xt_ematch_foreach(ematch, e) { |
701 | if (j-- == 0) | 701 | if (j-- == 0) |
702 | break; | 702 | break; |
703 | cleanup_match(ematch, net); | 703 | cleanup_match(ematch, net); |
704 | } | 704 | } |
705 | return ret; | 705 | return ret; |
706 | } | 706 | } |
707 | 707 | ||
708 | static bool check_underflow(const struct ipt_entry *e) | 708 | static bool check_underflow(const struct ipt_entry *e) |
709 | { | 709 | { |
710 | const struct xt_entry_target *t; | 710 | const struct xt_entry_target *t; |
711 | unsigned int verdict; | 711 | unsigned int verdict; |
712 | 712 | ||
713 | if (!unconditional(&e->ip)) | 713 | if (!unconditional(&e->ip)) |
714 | return false; | 714 | return false; |
715 | t = ipt_get_target_c(e); | 715 | t = ipt_get_target_c(e); |
716 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 716 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
717 | return false; | 717 | return false; |
718 | verdict = ((struct xt_standard_target *)t)->verdict; | 718 | verdict = ((struct xt_standard_target *)t)->verdict; |
719 | verdict = -verdict - 1; | 719 | verdict = -verdict - 1; |
720 | return verdict == NF_DROP || verdict == NF_ACCEPT; | 720 | return verdict == NF_DROP || verdict == NF_ACCEPT; |
721 | } | 721 | } |
722 | 722 | ||
723 | static int | 723 | static int |
724 | check_entry_size_and_hooks(struct ipt_entry *e, | 724 | check_entry_size_and_hooks(struct ipt_entry *e, |
725 | struct xt_table_info *newinfo, | 725 | struct xt_table_info *newinfo, |
726 | const unsigned char *base, | 726 | const unsigned char *base, |
727 | const unsigned char *limit, | 727 | const unsigned char *limit, |
728 | const unsigned int *hook_entries, | 728 | const unsigned int *hook_entries, |
729 | const unsigned int *underflows, | 729 | const unsigned int *underflows, |
730 | unsigned int valid_hooks) | 730 | unsigned int valid_hooks) |
731 | { | 731 | { |
732 | unsigned int h; | 732 | unsigned int h; |
733 | 733 | ||
734 | if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || | 734 | if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || |
735 | (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { | 735 | (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { |
736 | duprintf("Bad offset %p\n", e); | 736 | duprintf("Bad offset %p\n", e); |
737 | return -EINVAL; | 737 | return -EINVAL; |
738 | } | 738 | } |
739 | 739 | ||
740 | if (e->next_offset | 740 | if (e->next_offset |
741 | < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { | 741 | < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { |
742 | duprintf("checking: element %p size %u\n", | 742 | duprintf("checking: element %p size %u\n", |
743 | e, e->next_offset); | 743 | e, e->next_offset); |
744 | return -EINVAL; | 744 | return -EINVAL; |
745 | } | 745 | } |
746 | 746 | ||
747 | /* Check hooks & underflows */ | 747 | /* Check hooks & underflows */ |
748 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 748 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
749 | if (!(valid_hooks & (1 << h))) | 749 | if (!(valid_hooks & (1 << h))) |
750 | continue; | 750 | continue; |
751 | if ((unsigned char *)e - base == hook_entries[h]) | 751 | if ((unsigned char *)e - base == hook_entries[h]) |
752 | newinfo->hook_entry[h] = hook_entries[h]; | 752 | newinfo->hook_entry[h] = hook_entries[h]; |
753 | if ((unsigned char *)e - base == underflows[h]) { | 753 | if ((unsigned char *)e - base == underflows[h]) { |
754 | if (!check_underflow(e)) { | 754 | if (!check_underflow(e)) { |
755 | pr_err("Underflows must be unconditional and " | 755 | pr_err("Underflows must be unconditional and " |
756 | "use the STANDARD target with " | 756 | "use the STANDARD target with " |
757 | "ACCEPT/DROP\n"); | 757 | "ACCEPT/DROP\n"); |
758 | return -EINVAL; | 758 | return -EINVAL; |
759 | } | 759 | } |
760 | newinfo->underflow[h] = underflows[h]; | 760 | newinfo->underflow[h] = underflows[h]; |
761 | } | 761 | } |
762 | } | 762 | } |
763 | 763 | ||
764 | /* Clear counters and comefrom */ | 764 | /* Clear counters and comefrom */ |
765 | e->counters = ((struct xt_counters) { 0, 0 }); | 765 | e->counters = ((struct xt_counters) { 0, 0 }); |
766 | e->comefrom = 0; | 766 | e->comefrom = 0; |
767 | return 0; | 767 | return 0; |
768 | } | 768 | } |
769 | 769 | ||
770 | static void | 770 | static void |
771 | cleanup_entry(struct ipt_entry *e, struct net *net) | 771 | cleanup_entry(struct ipt_entry *e, struct net *net) |
772 | { | 772 | { |
773 | struct xt_tgdtor_param par; | 773 | struct xt_tgdtor_param par; |
774 | struct xt_entry_target *t; | 774 | struct xt_entry_target *t; |
775 | struct xt_entry_match *ematch; | 775 | struct xt_entry_match *ematch; |
776 | 776 | ||
777 | /* Cleanup all matches */ | 777 | /* Cleanup all matches */ |
778 | xt_ematch_foreach(ematch, e) | 778 | xt_ematch_foreach(ematch, e) |
779 | cleanup_match(ematch, net); | 779 | cleanup_match(ematch, net); |
780 | t = ipt_get_target(e); | 780 | t = ipt_get_target(e); |
781 | 781 | ||
782 | par.net = net; | 782 | par.net = net; |
783 | par.target = t->u.kernel.target; | 783 | par.target = t->u.kernel.target; |
784 | par.targinfo = t->data; | 784 | par.targinfo = t->data; |
785 | par.family = NFPROTO_IPV4; | 785 | par.family = NFPROTO_IPV4; |
786 | if (par.target->destroy != NULL) | 786 | if (par.target->destroy != NULL) |
787 | par.target->destroy(&par); | 787 | par.target->destroy(&par); |
788 | module_put(par.target->me); | 788 | module_put(par.target->me); |
789 | } | 789 | } |
790 | 790 | ||
791 | /* Checks and translates the user-supplied table segment (held in | 791 | /* Checks and translates the user-supplied table segment (held in |
792 | newinfo) */ | 792 | newinfo) */ |
793 | static int | 793 | static int |
794 | translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | 794 | translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, |
795 | const struct ipt_replace *repl) | 795 | const struct ipt_replace *repl) |
796 | { | 796 | { |
797 | struct ipt_entry *iter; | 797 | struct ipt_entry *iter; |
798 | unsigned int i; | 798 | unsigned int i; |
799 | int ret = 0; | 799 | int ret = 0; |
800 | 800 | ||
801 | newinfo->size = repl->size; | 801 | newinfo->size = repl->size; |
802 | newinfo->number = repl->num_entries; | 802 | newinfo->number = repl->num_entries; |
803 | 803 | ||
804 | /* Init all hooks to impossible value. */ | 804 | /* Init all hooks to impossible value. */ |
805 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 805 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
806 | newinfo->hook_entry[i] = 0xFFFFFFFF; | 806 | newinfo->hook_entry[i] = 0xFFFFFFFF; |
807 | newinfo->underflow[i] = 0xFFFFFFFF; | 807 | newinfo->underflow[i] = 0xFFFFFFFF; |
808 | } | 808 | } |
809 | 809 | ||
810 | duprintf("translate_table: size %u\n", newinfo->size); | 810 | duprintf("translate_table: size %u\n", newinfo->size); |
811 | i = 0; | 811 | i = 0; |
812 | /* Walk through entries, checking offsets. */ | 812 | /* Walk through entries, checking offsets. */ |
813 | xt_entry_foreach(iter, entry0, newinfo->size) { | 813 | xt_entry_foreach(iter, entry0, newinfo->size) { |
814 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, | 814 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, |
815 | entry0 + repl->size, | 815 | entry0 + repl->size, |
816 | repl->hook_entry, | 816 | repl->hook_entry, |
817 | repl->underflow, | 817 | repl->underflow, |
818 | repl->valid_hooks); | 818 | repl->valid_hooks); |
819 | if (ret != 0) | 819 | if (ret != 0) |
820 | return ret; | 820 | return ret; |
821 | ++i; | 821 | ++i; |
822 | if (strcmp(ipt_get_target(iter)->u.user.name, | 822 | if (strcmp(ipt_get_target(iter)->u.user.name, |
823 | XT_ERROR_TARGET) == 0) | 823 | XT_ERROR_TARGET) == 0) |
824 | ++newinfo->stacksize; | 824 | ++newinfo->stacksize; |
825 | } | 825 | } |
826 | 826 | ||
827 | if (i != repl->num_entries) { | 827 | if (i != repl->num_entries) { |
828 | duprintf("translate_table: %u not %u entries\n", | 828 | duprintf("translate_table: %u not %u entries\n", |
829 | i, repl->num_entries); | 829 | i, repl->num_entries); |
830 | return -EINVAL; | 830 | return -EINVAL; |
831 | } | 831 | } |
832 | 832 | ||
833 | /* Check hooks all assigned */ | 833 | /* Check hooks all assigned */ |
834 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 834 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
835 | /* Only hooks which are valid */ | 835 | /* Only hooks which are valid */ |
836 | if (!(repl->valid_hooks & (1 << i))) | 836 | if (!(repl->valid_hooks & (1 << i))) |
837 | continue; | 837 | continue; |
838 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 838 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
839 | duprintf("Invalid hook entry %u %u\n", | 839 | duprintf("Invalid hook entry %u %u\n", |
840 | i, repl->hook_entry[i]); | 840 | i, repl->hook_entry[i]); |
841 | return -EINVAL; | 841 | return -EINVAL; |
842 | } | 842 | } |
843 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 843 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
844 | duprintf("Invalid underflow %u %u\n", | 844 | duprintf("Invalid underflow %u %u\n", |
845 | i, repl->underflow[i]); | 845 | i, repl->underflow[i]); |
846 | return -EINVAL; | 846 | return -EINVAL; |
847 | } | 847 | } |
848 | } | 848 | } |
849 | 849 | ||
850 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) | 850 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) |
851 | return -ELOOP; | 851 | return -ELOOP; |
852 | 852 | ||
853 | /* Finally, each sanity check must pass */ | 853 | /* Finally, each sanity check must pass */ |
854 | i = 0; | 854 | i = 0; |
855 | xt_entry_foreach(iter, entry0, newinfo->size) { | 855 | xt_entry_foreach(iter, entry0, newinfo->size) { |
856 | ret = find_check_entry(iter, net, repl->name, repl->size); | 856 | ret = find_check_entry(iter, net, repl->name, repl->size); |
857 | if (ret != 0) | 857 | if (ret != 0) |
858 | break; | 858 | break; |
859 | ++i; | 859 | ++i; |
860 | } | 860 | } |
861 | 861 | ||
862 | if (ret != 0) { | 862 | if (ret != 0) { |
863 | xt_entry_foreach(iter, entry0, newinfo->size) { | 863 | xt_entry_foreach(iter, entry0, newinfo->size) { |
864 | if (i-- == 0) | 864 | if (i-- == 0) |
865 | break; | 865 | break; |
866 | cleanup_entry(iter, net); | 866 | cleanup_entry(iter, net); |
867 | } | 867 | } |
868 | return ret; | 868 | return ret; |
869 | } | 869 | } |
870 | 870 | ||
871 | /* And one copy for every other CPU */ | 871 | /* And one copy for every other CPU */ |
872 | for_each_possible_cpu(i) { | 872 | for_each_possible_cpu(i) { |
873 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 873 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
874 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 874 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
875 | } | 875 | } |
876 | 876 | ||
877 | return ret; | 877 | return ret; |
878 | } | 878 | } |
879 | 879 | ||
880 | static void | 880 | static void |
881 | get_counters(const struct xt_table_info *t, | 881 | get_counters(const struct xt_table_info *t, |
882 | struct xt_counters counters[]) | 882 | struct xt_counters counters[]) |
883 | { | 883 | { |
884 | struct ipt_entry *iter; | 884 | struct ipt_entry *iter; |
885 | unsigned int cpu; | 885 | unsigned int cpu; |
886 | unsigned int i; | 886 | unsigned int i; |
887 | unsigned int curcpu = get_cpu(); | ||
888 | 887 | ||
889 | /* Instead of clearing (by a previous call to memset()) | ||
890 | * the counters and using adds, we set the counters | ||
891 | * with data used by 'current' CPU. | ||
892 | * | ||
893 | * Bottom half has to be disabled to prevent deadlock | ||
894 | * if new softirq were to run and call ipt_do_table | ||
895 | */ | ||
896 | local_bh_disable(); | ||
897 | i = 0; | ||
898 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
899 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
900 | iter->counters.pcnt); | ||
901 | ++i; | ||
902 | } | ||
903 | local_bh_enable(); | ||
904 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
905 | * (preemption is disabled) | ||
906 | */ | ||
907 | |||
908 | for_each_possible_cpu(cpu) { | 888 | for_each_possible_cpu(cpu) { |
909 | if (cpu == curcpu) | 889 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
910 | continue; | 890 | |
911 | i = 0; | 891 | i = 0; |
912 | local_bh_disable(); | ||
913 | xt_info_wrlock(cpu); | ||
914 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 892 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
915 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 893 | u64 bcnt, pcnt; |
916 | iter->counters.pcnt); | 894 | unsigned int start; |
895 | |||
896 | do { | ||
897 | start = read_seqbegin(lock); | ||
898 | bcnt = iter->counters.bcnt; | ||
899 | pcnt = iter->counters.pcnt; | ||
900 | } while (read_seqretry(lock, start)); | ||
901 | |||
902 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
917 | ++i; /* macro does multi eval of i */ | 903 | ++i; /* macro does multi eval of i */ |
918 | } | 904 | } |
919 | xt_info_wrunlock(cpu); | ||
920 | local_bh_enable(); | ||
921 | } | 905 | } |
922 | put_cpu(); | ||
923 | } | 906 | } |
924 | 907 | ||
925 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 908 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
926 | { | 909 | { |
927 | unsigned int countersize; | 910 | unsigned int countersize; |
928 | struct xt_counters *counters; | 911 | struct xt_counters *counters; |
929 | const struct xt_table_info *private = table->private; | 912 | const struct xt_table_info *private = table->private; |
930 | 913 | ||
931 | /* We need atomic snapshot of counters: rest doesn't change | 914 | /* We need atomic snapshot of counters: rest doesn't change |
932 | (other than comefrom, which userspace doesn't care | 915 | (other than comefrom, which userspace doesn't care |
933 | about). */ | 916 | about). */ |
934 | countersize = sizeof(struct xt_counters) * private->number; | 917 | countersize = sizeof(struct xt_counters) * private->number; |
935 | counters = vmalloc(countersize); | 918 | counters = vzalloc(countersize); |
936 | 919 | ||
937 | if (counters == NULL) | 920 | if (counters == NULL) |
938 | return ERR_PTR(-ENOMEM); | 921 | return ERR_PTR(-ENOMEM); |
939 | 922 | ||
940 | get_counters(private, counters); | 923 | get_counters(private, counters); |
941 | 924 | ||
942 | return counters; | 925 | return counters; |
943 | } | 926 | } |
944 | 927 | ||
945 | static int | 928 | static int |
946 | copy_entries_to_user(unsigned int total_size, | 929 | copy_entries_to_user(unsigned int total_size, |
947 | const struct xt_table *table, | 930 | const struct xt_table *table, |
948 | void __user *userptr) | 931 | void __user *userptr) |
949 | { | 932 | { |
950 | unsigned int off, num; | 933 | unsigned int off, num; |
951 | const struct ipt_entry *e; | 934 | const struct ipt_entry *e; |
952 | struct xt_counters *counters; | 935 | struct xt_counters *counters; |
953 | const struct xt_table_info *private = table->private; | 936 | const struct xt_table_info *private = table->private; |
954 | int ret = 0; | 937 | int ret = 0; |
955 | const void *loc_cpu_entry; | 938 | const void *loc_cpu_entry; |
956 | 939 | ||
957 | counters = alloc_counters(table); | 940 | counters = alloc_counters(table); |
958 | if (IS_ERR(counters)) | 941 | if (IS_ERR(counters)) |
959 | return PTR_ERR(counters); | 942 | return PTR_ERR(counters); |
960 | 943 | ||
961 | /* choose the copy that is on our node/cpu, ... | 944 | /* choose the copy that is on our node/cpu, ... |
962 | * This choice is lazy (because current thread is | 945 | * This choice is lazy (because current thread is |
963 | * allowed to migrate to another cpu) | 946 | * allowed to migrate to another cpu) |
964 | */ | 947 | */ |
965 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 948 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
966 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { | 949 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { |
967 | ret = -EFAULT; | 950 | ret = -EFAULT; |
968 | goto free_counters; | 951 | goto free_counters; |
969 | } | 952 | } |
970 | 953 | ||
971 | /* FIXME: use iterator macros --RR */ | 954 | /* FIXME: use iterator macros --RR */ |
972 | /* ... then go back and fix counters and names */ | 955 | /* ... then go back and fix counters and names */ |
973 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ | 956 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ |
974 | unsigned int i; | 957 | unsigned int i; |
975 | const struct xt_entry_match *m; | 958 | const struct xt_entry_match *m; |
976 | const struct xt_entry_target *t; | 959 | const struct xt_entry_target *t; |
977 | 960 | ||
978 | e = (struct ipt_entry *)(loc_cpu_entry + off); | 961 | e = (struct ipt_entry *)(loc_cpu_entry + off); |
979 | if (copy_to_user(userptr + off | 962 | if (copy_to_user(userptr + off |
980 | + offsetof(struct ipt_entry, counters), | 963 | + offsetof(struct ipt_entry, counters), |
981 | &counters[num], | 964 | &counters[num], |
982 | sizeof(counters[num])) != 0) { | 965 | sizeof(counters[num])) != 0) { |
983 | ret = -EFAULT; | 966 | ret = -EFAULT; |
984 | goto free_counters; | 967 | goto free_counters; |
985 | } | 968 | } |
986 | 969 | ||
987 | for (i = sizeof(struct ipt_entry); | 970 | for (i = sizeof(struct ipt_entry); |
988 | i < e->target_offset; | 971 | i < e->target_offset; |
989 | i += m->u.match_size) { | 972 | i += m->u.match_size) { |
990 | m = (void *)e + i; | 973 | m = (void *)e + i; |
991 | 974 | ||
992 | if (copy_to_user(userptr + off + i | 975 | if (copy_to_user(userptr + off + i |
993 | + offsetof(struct xt_entry_match, | 976 | + offsetof(struct xt_entry_match, |
994 | u.user.name), | 977 | u.user.name), |
995 | m->u.kernel.match->name, | 978 | m->u.kernel.match->name, |
996 | strlen(m->u.kernel.match->name)+1) | 979 | strlen(m->u.kernel.match->name)+1) |
997 | != 0) { | 980 | != 0) { |
998 | ret = -EFAULT; | 981 | ret = -EFAULT; |
999 | goto free_counters; | 982 | goto free_counters; |
1000 | } | 983 | } |
1001 | } | 984 | } |
1002 | 985 | ||
1003 | t = ipt_get_target_c(e); | 986 | t = ipt_get_target_c(e); |
1004 | if (copy_to_user(userptr + off + e->target_offset | 987 | if (copy_to_user(userptr + off + e->target_offset |
1005 | + offsetof(struct xt_entry_target, | 988 | + offsetof(struct xt_entry_target, |
1006 | u.user.name), | 989 | u.user.name), |
1007 | t->u.kernel.target->name, | 990 | t->u.kernel.target->name, |
1008 | strlen(t->u.kernel.target->name)+1) != 0) { | 991 | strlen(t->u.kernel.target->name)+1) != 0) { |
1009 | ret = -EFAULT; | 992 | ret = -EFAULT; |
1010 | goto free_counters; | 993 | goto free_counters; |
1011 | } | 994 | } |
1012 | } | 995 | } |
1013 | 996 | ||
1014 | free_counters: | 997 | free_counters: |
1015 | vfree(counters); | 998 | vfree(counters); |
1016 | return ret; | 999 | return ret; |
1017 | } | 1000 | } |
1018 | 1001 | ||
1019 | #ifdef CONFIG_COMPAT | 1002 | #ifdef CONFIG_COMPAT |
1020 | static void compat_standard_from_user(void *dst, const void *src) | 1003 | static void compat_standard_from_user(void *dst, const void *src) |
1021 | { | 1004 | { |
1022 | int v = *(compat_int_t *)src; | 1005 | int v = *(compat_int_t *)src; |
1023 | 1006 | ||
1024 | if (v > 0) | 1007 | if (v > 0) |
1025 | v += xt_compat_calc_jump(AF_INET, v); | 1008 | v += xt_compat_calc_jump(AF_INET, v); |
1026 | memcpy(dst, &v, sizeof(v)); | 1009 | memcpy(dst, &v, sizeof(v)); |
1027 | } | 1010 | } |
1028 | 1011 | ||
1029 | static int compat_standard_to_user(void __user *dst, const void *src) | 1012 | static int compat_standard_to_user(void __user *dst, const void *src) |
1030 | { | 1013 | { |
1031 | compat_int_t cv = *(int *)src; | 1014 | compat_int_t cv = *(int *)src; |
1032 | 1015 | ||
1033 | if (cv > 0) | 1016 | if (cv > 0) |
1034 | cv -= xt_compat_calc_jump(AF_INET, cv); | 1017 | cv -= xt_compat_calc_jump(AF_INET, cv); |
1035 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; | 1018 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; |
1036 | } | 1019 | } |
1037 | 1020 | ||
1038 | static int compat_calc_entry(const struct ipt_entry *e, | 1021 | static int compat_calc_entry(const struct ipt_entry *e, |
1039 | const struct xt_table_info *info, | 1022 | const struct xt_table_info *info, |
1040 | const void *base, struct xt_table_info *newinfo) | 1023 | const void *base, struct xt_table_info *newinfo) |
1041 | { | 1024 | { |
1042 | const struct xt_entry_match *ematch; | 1025 | const struct xt_entry_match *ematch; |
1043 | const struct xt_entry_target *t; | 1026 | const struct xt_entry_target *t; |
1044 | unsigned int entry_offset; | 1027 | unsigned int entry_offset; |
1045 | int off, i, ret; | 1028 | int off, i, ret; |
1046 | 1029 | ||
1047 | off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); | 1030 | off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); |
1048 | entry_offset = (void *)e - base; | 1031 | entry_offset = (void *)e - base; |
1049 | xt_ematch_foreach(ematch, e) | 1032 | xt_ematch_foreach(ematch, e) |
1050 | off += xt_compat_match_offset(ematch->u.kernel.match); | 1033 | off += xt_compat_match_offset(ematch->u.kernel.match); |
1051 | t = ipt_get_target_c(e); | 1034 | t = ipt_get_target_c(e); |
1052 | off += xt_compat_target_offset(t->u.kernel.target); | 1035 | off += xt_compat_target_offset(t->u.kernel.target); |
1053 | newinfo->size -= off; | 1036 | newinfo->size -= off; |
1054 | ret = xt_compat_add_offset(AF_INET, entry_offset, off); | 1037 | ret = xt_compat_add_offset(AF_INET, entry_offset, off); |
1055 | if (ret) | 1038 | if (ret) |
1056 | return ret; | 1039 | return ret; |
1057 | 1040 | ||
1058 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1041 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1059 | if (info->hook_entry[i] && | 1042 | if (info->hook_entry[i] && |
1060 | (e < (struct ipt_entry *)(base + info->hook_entry[i]))) | 1043 | (e < (struct ipt_entry *)(base + info->hook_entry[i]))) |
1061 | newinfo->hook_entry[i] -= off; | 1044 | newinfo->hook_entry[i] -= off; |
1062 | if (info->underflow[i] && | 1045 | if (info->underflow[i] && |
1063 | (e < (struct ipt_entry *)(base + info->underflow[i]))) | 1046 | (e < (struct ipt_entry *)(base + info->underflow[i]))) |
1064 | newinfo->underflow[i] -= off; | 1047 | newinfo->underflow[i] -= off; |
1065 | } | 1048 | } |
1066 | return 0; | 1049 | return 0; |
1067 | } | 1050 | } |
1068 | 1051 | ||
1069 | static int compat_table_info(const struct xt_table_info *info, | 1052 | static int compat_table_info(const struct xt_table_info *info, |
1070 | struct xt_table_info *newinfo) | 1053 | struct xt_table_info *newinfo) |
1071 | { | 1054 | { |
1072 | struct ipt_entry *iter; | 1055 | struct ipt_entry *iter; |
1073 | void *loc_cpu_entry; | 1056 | void *loc_cpu_entry; |
1074 | int ret; | 1057 | int ret; |
1075 | 1058 | ||
1076 | if (!newinfo || !info) | 1059 | if (!newinfo || !info) |
1077 | return -EINVAL; | 1060 | return -EINVAL; |
1078 | 1061 | ||
1079 | /* we dont care about newinfo->entries[] */ | 1062 | /* we dont care about newinfo->entries[] */ |
1080 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | 1063 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); |
1081 | newinfo->initial_entries = 0; | 1064 | newinfo->initial_entries = 0; |
1082 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; | 1065 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; |
1083 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { | 1066 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { |
1084 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); | 1067 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); |
1085 | if (ret != 0) | 1068 | if (ret != 0) |
1086 | return ret; | 1069 | return ret; |
1087 | } | 1070 | } |
1088 | return 0; | 1071 | return 0; |
1089 | } | 1072 | } |
1090 | #endif | 1073 | #endif |
1091 | 1074 | ||
1092 | static int get_info(struct net *net, void __user *user, | 1075 | static int get_info(struct net *net, void __user *user, |
1093 | const int *len, int compat) | 1076 | const int *len, int compat) |
1094 | { | 1077 | { |
1095 | char name[XT_TABLE_MAXNAMELEN]; | 1078 | char name[XT_TABLE_MAXNAMELEN]; |
1096 | struct xt_table *t; | 1079 | struct xt_table *t; |
1097 | int ret; | 1080 | int ret; |
1098 | 1081 | ||
1099 | if (*len != sizeof(struct ipt_getinfo)) { | 1082 | if (*len != sizeof(struct ipt_getinfo)) { |
1100 | duprintf("length %u != %zu\n", *len, | 1083 | duprintf("length %u != %zu\n", *len, |
1101 | sizeof(struct ipt_getinfo)); | 1084 | sizeof(struct ipt_getinfo)); |
1102 | return -EINVAL; | 1085 | return -EINVAL; |
1103 | } | 1086 | } |
1104 | 1087 | ||
1105 | if (copy_from_user(name, user, sizeof(name)) != 0) | 1088 | if (copy_from_user(name, user, sizeof(name)) != 0) |
1106 | return -EFAULT; | 1089 | return -EFAULT; |
1107 | 1090 | ||
1108 | name[XT_TABLE_MAXNAMELEN-1] = '\0'; | 1091 | name[XT_TABLE_MAXNAMELEN-1] = '\0'; |
1109 | #ifdef CONFIG_COMPAT | 1092 | #ifdef CONFIG_COMPAT |
1110 | if (compat) | 1093 | if (compat) |
1111 | xt_compat_lock(AF_INET); | 1094 | xt_compat_lock(AF_INET); |
1112 | #endif | 1095 | #endif |
1113 | t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), | 1096 | t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), |
1114 | "iptable_%s", name); | 1097 | "iptable_%s", name); |
1115 | if (t && !IS_ERR(t)) { | 1098 | if (t && !IS_ERR(t)) { |
1116 | struct ipt_getinfo info; | 1099 | struct ipt_getinfo info; |
1117 | const struct xt_table_info *private = t->private; | 1100 | const struct xt_table_info *private = t->private; |
1118 | #ifdef CONFIG_COMPAT | 1101 | #ifdef CONFIG_COMPAT |
1119 | struct xt_table_info tmp; | 1102 | struct xt_table_info tmp; |
1120 | 1103 | ||
1121 | if (compat) { | 1104 | if (compat) { |
1122 | ret = compat_table_info(private, &tmp); | 1105 | ret = compat_table_info(private, &tmp); |
1123 | xt_compat_flush_offsets(AF_INET); | 1106 | xt_compat_flush_offsets(AF_INET); |
1124 | private = &tmp; | 1107 | private = &tmp; |
1125 | } | 1108 | } |
1126 | #endif | 1109 | #endif |
1127 | memset(&info, 0, sizeof(info)); | 1110 | memset(&info, 0, sizeof(info)); |
1128 | info.valid_hooks = t->valid_hooks; | 1111 | info.valid_hooks = t->valid_hooks; |
1129 | memcpy(info.hook_entry, private->hook_entry, | 1112 | memcpy(info.hook_entry, private->hook_entry, |
1130 | sizeof(info.hook_entry)); | 1113 | sizeof(info.hook_entry)); |
1131 | memcpy(info.underflow, private->underflow, | 1114 | memcpy(info.underflow, private->underflow, |
1132 | sizeof(info.underflow)); | 1115 | sizeof(info.underflow)); |
1133 | info.num_entries = private->number; | 1116 | info.num_entries = private->number; |
1134 | info.size = private->size; | 1117 | info.size = private->size; |
1135 | strcpy(info.name, name); | 1118 | strcpy(info.name, name); |
1136 | 1119 | ||
1137 | if (copy_to_user(user, &info, *len) != 0) | 1120 | if (copy_to_user(user, &info, *len) != 0) |
1138 | ret = -EFAULT; | 1121 | ret = -EFAULT; |
1139 | else | 1122 | else |
1140 | ret = 0; | 1123 | ret = 0; |
1141 | 1124 | ||
1142 | xt_table_unlock(t); | 1125 | xt_table_unlock(t); |
1143 | module_put(t->me); | 1126 | module_put(t->me); |
1144 | } else | 1127 | } else |
1145 | ret = t ? PTR_ERR(t) : -ENOENT; | 1128 | ret = t ? PTR_ERR(t) : -ENOENT; |
1146 | #ifdef CONFIG_COMPAT | 1129 | #ifdef CONFIG_COMPAT |
1147 | if (compat) | 1130 | if (compat) |
1148 | xt_compat_unlock(AF_INET); | 1131 | xt_compat_unlock(AF_INET); |
1149 | #endif | 1132 | #endif |
1150 | return ret; | 1133 | return ret; |
1151 | } | 1134 | } |
1152 | 1135 | ||
1153 | static int | 1136 | static int |
1154 | get_entries(struct net *net, struct ipt_get_entries __user *uptr, | 1137 | get_entries(struct net *net, struct ipt_get_entries __user *uptr, |
1155 | const int *len) | 1138 | const int *len) |
1156 | { | 1139 | { |
1157 | int ret; | 1140 | int ret; |
1158 | struct ipt_get_entries get; | 1141 | struct ipt_get_entries get; |
1159 | struct xt_table *t; | 1142 | struct xt_table *t; |
1160 | 1143 | ||
1161 | if (*len < sizeof(get)) { | 1144 | if (*len < sizeof(get)) { |
1162 | duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); | 1145 | duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); |
1163 | return -EINVAL; | 1146 | return -EINVAL; |
1164 | } | 1147 | } |
1165 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1148 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1166 | return -EFAULT; | 1149 | return -EFAULT; |
1167 | if (*len != sizeof(struct ipt_get_entries) + get.size) { | 1150 | if (*len != sizeof(struct ipt_get_entries) + get.size) { |
1168 | duprintf("get_entries: %u != %zu\n", | 1151 | duprintf("get_entries: %u != %zu\n", |
1169 | *len, sizeof(get) + get.size); | 1152 | *len, sizeof(get) + get.size); |
1170 | return -EINVAL; | 1153 | return -EINVAL; |
1171 | } | 1154 | } |
1172 | 1155 | ||
1173 | t = xt_find_table_lock(net, AF_INET, get.name); | 1156 | t = xt_find_table_lock(net, AF_INET, get.name); |
1174 | if (t && !IS_ERR(t)) { | 1157 | if (t && !IS_ERR(t)) { |
1175 | const struct xt_table_info *private = t->private; | 1158 | const struct xt_table_info *private = t->private; |
1176 | duprintf("t->private->number = %u\n", private->number); | 1159 | duprintf("t->private->number = %u\n", private->number); |
1177 | if (get.size == private->size) | 1160 | if (get.size == private->size) |
1178 | ret = copy_entries_to_user(private->size, | 1161 | ret = copy_entries_to_user(private->size, |
1179 | t, uptr->entrytable); | 1162 | t, uptr->entrytable); |
1180 | else { | 1163 | else { |
1181 | duprintf("get_entries: I've got %u not %u!\n", | 1164 | duprintf("get_entries: I've got %u not %u!\n", |
1182 | private->size, get.size); | 1165 | private->size, get.size); |
1183 | ret = -EAGAIN; | 1166 | ret = -EAGAIN; |
1184 | } | 1167 | } |
1185 | module_put(t->me); | 1168 | module_put(t->me); |
1186 | xt_table_unlock(t); | 1169 | xt_table_unlock(t); |
1187 | } else | 1170 | } else |
1188 | ret = t ? PTR_ERR(t) : -ENOENT; | 1171 | ret = t ? PTR_ERR(t) : -ENOENT; |
1189 | 1172 | ||
1190 | return ret; | 1173 | return ret; |
1191 | } | 1174 | } |
1192 | 1175 | ||
1193 | static int | 1176 | static int |
1194 | __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | 1177 | __do_replace(struct net *net, const char *name, unsigned int valid_hooks, |
1195 | struct xt_table_info *newinfo, unsigned int num_counters, | 1178 | struct xt_table_info *newinfo, unsigned int num_counters, |
1196 | void __user *counters_ptr) | 1179 | void __user *counters_ptr) |
1197 | { | 1180 | { |
1198 | int ret; | 1181 | int ret; |
1199 | struct xt_table *t; | 1182 | struct xt_table *t; |
1200 | struct xt_table_info *oldinfo; | 1183 | struct xt_table_info *oldinfo; |
1201 | struct xt_counters *counters; | 1184 | struct xt_counters *counters; |
1202 | void *loc_cpu_old_entry; | 1185 | void *loc_cpu_old_entry; |
1203 | struct ipt_entry *iter; | 1186 | struct ipt_entry *iter; |
1204 | 1187 | ||
1205 | ret = 0; | 1188 | ret = 0; |
1206 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 1189 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1207 | if (!counters) { | 1190 | if (!counters) { |
1208 | ret = -ENOMEM; | 1191 | ret = -ENOMEM; |
1209 | goto out; | 1192 | goto out; |
1210 | } | 1193 | } |
1211 | 1194 | ||
1212 | t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), | 1195 | t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), |
1213 | "iptable_%s", name); | 1196 | "iptable_%s", name); |
1214 | if (!t || IS_ERR(t)) { | 1197 | if (!t || IS_ERR(t)) { |
1215 | ret = t ? PTR_ERR(t) : -ENOENT; | 1198 | ret = t ? PTR_ERR(t) : -ENOENT; |
1216 | goto free_newinfo_counters_untrans; | 1199 | goto free_newinfo_counters_untrans; |
1217 | } | 1200 | } |
1218 | 1201 | ||
1219 | /* You lied! */ | 1202 | /* You lied! */ |
1220 | if (valid_hooks != t->valid_hooks) { | 1203 | if (valid_hooks != t->valid_hooks) { |
1221 | duprintf("Valid hook crap: %08X vs %08X\n", | 1204 | duprintf("Valid hook crap: %08X vs %08X\n", |
1222 | valid_hooks, t->valid_hooks); | 1205 | valid_hooks, t->valid_hooks); |
1223 | ret = -EINVAL; | 1206 | ret = -EINVAL; |
1224 | goto put_module; | 1207 | goto put_module; |
1225 | } | 1208 | } |
1226 | 1209 | ||
1227 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); | 1210 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); |
1228 | if (!oldinfo) | 1211 | if (!oldinfo) |
1229 | goto put_module; | 1212 | goto put_module; |
1230 | 1213 | ||
1231 | /* Update module usage count based on number of rules */ | 1214 | /* Update module usage count based on number of rules */ |
1232 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", | 1215 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", |
1233 | oldinfo->number, oldinfo->initial_entries, newinfo->number); | 1216 | oldinfo->number, oldinfo->initial_entries, newinfo->number); |
1234 | if ((oldinfo->number > oldinfo->initial_entries) || | 1217 | if ((oldinfo->number > oldinfo->initial_entries) || |
1235 | (newinfo->number <= oldinfo->initial_entries)) | 1218 | (newinfo->number <= oldinfo->initial_entries)) |
1236 | module_put(t->me); | 1219 | module_put(t->me); |
1237 | if ((oldinfo->number > oldinfo->initial_entries) && | 1220 | if ((oldinfo->number > oldinfo->initial_entries) && |
1238 | (newinfo->number <= oldinfo->initial_entries)) | 1221 | (newinfo->number <= oldinfo->initial_entries)) |
1239 | module_put(t->me); | 1222 | module_put(t->me); |
1240 | 1223 | ||
1241 | /* Get the old counters, and synchronize with replace */ | 1224 | /* Get the old counters, and synchronize with replace */ |
1242 | get_counters(oldinfo, counters); | 1225 | get_counters(oldinfo, counters); |
1243 | 1226 | ||
1244 | /* Decrease module usage counts and free resource */ | 1227 | /* Decrease module usage counts and free resource */ |
1245 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1228 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1246 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) | 1229 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) |
1247 | cleanup_entry(iter, net); | 1230 | cleanup_entry(iter, net); |
1248 | 1231 | ||
1249 | xt_free_table_info(oldinfo); | 1232 | xt_free_table_info(oldinfo); |
1250 | if (copy_to_user(counters_ptr, counters, | 1233 | if (copy_to_user(counters_ptr, counters, |
1251 | sizeof(struct xt_counters) * num_counters) != 0) | 1234 | sizeof(struct xt_counters) * num_counters) != 0) |
1252 | ret = -EFAULT; | 1235 | ret = -EFAULT; |
1253 | vfree(counters); | 1236 | vfree(counters); |
1254 | xt_table_unlock(t); | 1237 | xt_table_unlock(t); |
1255 | return ret; | 1238 | return ret; |
1256 | 1239 | ||
1257 | put_module: | 1240 | put_module: |
1258 | module_put(t->me); | 1241 | module_put(t->me); |
1259 | xt_table_unlock(t); | 1242 | xt_table_unlock(t); |
1260 | free_newinfo_counters_untrans: | 1243 | free_newinfo_counters_untrans: |
1261 | vfree(counters); | 1244 | vfree(counters); |
1262 | out: | 1245 | out: |
1263 | return ret; | 1246 | return ret; |
1264 | } | 1247 | } |
1265 | 1248 | ||
1266 | static int | 1249 | static int |
1267 | do_replace(struct net *net, const void __user *user, unsigned int len) | 1250 | do_replace(struct net *net, const void __user *user, unsigned int len) |
1268 | { | 1251 | { |
1269 | int ret; | 1252 | int ret; |
1270 | struct ipt_replace tmp; | 1253 | struct ipt_replace tmp; |
1271 | struct xt_table_info *newinfo; | 1254 | struct xt_table_info *newinfo; |
1272 | void *loc_cpu_entry; | 1255 | void *loc_cpu_entry; |
1273 | struct ipt_entry *iter; | 1256 | struct ipt_entry *iter; |
1274 | 1257 | ||
1275 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1258 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1276 | return -EFAULT; | 1259 | return -EFAULT; |
1277 | 1260 | ||
1278 | /* overflow check */ | 1261 | /* overflow check */ |
1279 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1262 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1280 | return -ENOMEM; | 1263 | return -ENOMEM; |
1281 | 1264 | ||
1282 | newinfo = xt_alloc_table_info(tmp.size); | 1265 | newinfo = xt_alloc_table_info(tmp.size); |
1283 | if (!newinfo) | 1266 | if (!newinfo) |
1284 | return -ENOMEM; | 1267 | return -ENOMEM; |
1285 | 1268 | ||
1286 | /* choose the copy that is on our node/cpu */ | 1269 | /* choose the copy that is on our node/cpu */ |
1287 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1270 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1288 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1271 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1289 | tmp.size) != 0) { | 1272 | tmp.size) != 0) { |
1290 | ret = -EFAULT; | 1273 | ret = -EFAULT; |
1291 | goto free_newinfo; | 1274 | goto free_newinfo; |
1292 | } | 1275 | } |
1293 | 1276 | ||
1294 | ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); | 1277 | ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); |
1295 | if (ret != 0) | 1278 | if (ret != 0) |
1296 | goto free_newinfo; | 1279 | goto free_newinfo; |
1297 | 1280 | ||
1298 | duprintf("Translated table\n"); | 1281 | duprintf("Translated table\n"); |
1299 | 1282 | ||
1300 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1283 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1301 | tmp.num_counters, tmp.counters); | 1284 | tmp.num_counters, tmp.counters); |
1302 | if (ret) | 1285 | if (ret) |
1303 | goto free_newinfo_untrans; | 1286 | goto free_newinfo_untrans; |
1304 | return 0; | 1287 | return 0; |
1305 | 1288 | ||
1306 | free_newinfo_untrans: | 1289 | free_newinfo_untrans: |
1307 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1290 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1308 | cleanup_entry(iter, net); | 1291 | cleanup_entry(iter, net); |
1309 | free_newinfo: | 1292 | free_newinfo: |
1310 | xt_free_table_info(newinfo); | 1293 | xt_free_table_info(newinfo); |
1311 | return ret; | 1294 | return ret; |
1312 | } | 1295 | } |
1313 | 1296 | ||
1314 | static int | 1297 | static int |
1315 | do_add_counters(struct net *net, const void __user *user, | 1298 | do_add_counters(struct net *net, const void __user *user, |
1316 | unsigned int len, int compat) | 1299 | unsigned int len, int compat) |
1317 | { | 1300 | { |
1318 | unsigned int i, curcpu; | 1301 | unsigned int i, curcpu; |
1319 | struct xt_counters_info tmp; | 1302 | struct xt_counters_info tmp; |
1320 | struct xt_counters *paddc; | 1303 | struct xt_counters *paddc; |
1321 | unsigned int num_counters; | 1304 | unsigned int num_counters; |
1322 | const char *name; | 1305 | const char *name; |
1323 | int size; | 1306 | int size; |
1324 | void *ptmp; | 1307 | void *ptmp; |
1325 | struct xt_table *t; | 1308 | struct xt_table *t; |
1326 | const struct xt_table_info *private; | 1309 | const struct xt_table_info *private; |
1327 | int ret = 0; | 1310 | int ret = 0; |
1328 | void *loc_cpu_entry; | 1311 | void *loc_cpu_entry; |
1329 | struct ipt_entry *iter; | 1312 | struct ipt_entry *iter; |
1330 | #ifdef CONFIG_COMPAT | 1313 | #ifdef CONFIG_COMPAT |
1331 | struct compat_xt_counters_info compat_tmp; | 1314 | struct compat_xt_counters_info compat_tmp; |
1332 | 1315 | ||
1333 | if (compat) { | 1316 | if (compat) { |
1334 | ptmp = &compat_tmp; | 1317 | ptmp = &compat_tmp; |
1335 | size = sizeof(struct compat_xt_counters_info); | 1318 | size = sizeof(struct compat_xt_counters_info); |
1336 | } else | 1319 | } else |
1337 | #endif | 1320 | #endif |
1338 | { | 1321 | { |
1339 | ptmp = &tmp; | 1322 | ptmp = &tmp; |
1340 | size = sizeof(struct xt_counters_info); | 1323 | size = sizeof(struct xt_counters_info); |
1341 | } | 1324 | } |
1342 | 1325 | ||
1343 | if (copy_from_user(ptmp, user, size) != 0) | 1326 | if (copy_from_user(ptmp, user, size) != 0) |
1344 | return -EFAULT; | 1327 | return -EFAULT; |
1345 | 1328 | ||
1346 | #ifdef CONFIG_COMPAT | 1329 | #ifdef CONFIG_COMPAT |
1347 | if (compat) { | 1330 | if (compat) { |
1348 | num_counters = compat_tmp.num_counters; | 1331 | num_counters = compat_tmp.num_counters; |
1349 | name = compat_tmp.name; | 1332 | name = compat_tmp.name; |
1350 | } else | 1333 | } else |
1351 | #endif | 1334 | #endif |
1352 | { | 1335 | { |
1353 | num_counters = tmp.num_counters; | 1336 | num_counters = tmp.num_counters; |
1354 | name = tmp.name; | 1337 | name = tmp.name; |
1355 | } | 1338 | } |
1356 | 1339 | ||
1357 | if (len != size + num_counters * sizeof(struct xt_counters)) | 1340 | if (len != size + num_counters * sizeof(struct xt_counters)) |
1358 | return -EINVAL; | 1341 | return -EINVAL; |
1359 | 1342 | ||
1360 | paddc = vmalloc(len - size); | 1343 | paddc = vmalloc(len - size); |
1361 | if (!paddc) | 1344 | if (!paddc) |
1362 | return -ENOMEM; | 1345 | return -ENOMEM; |
1363 | 1346 | ||
1364 | if (copy_from_user(paddc, user + size, len - size) != 0) { | 1347 | if (copy_from_user(paddc, user + size, len - size) != 0) { |
1365 | ret = -EFAULT; | 1348 | ret = -EFAULT; |
1366 | goto free; | 1349 | goto free; |
1367 | } | 1350 | } |
1368 | 1351 | ||
1369 | t = xt_find_table_lock(net, AF_INET, name); | 1352 | t = xt_find_table_lock(net, AF_INET, name); |
1370 | if (!t || IS_ERR(t)) { | 1353 | if (!t || IS_ERR(t)) { |
1371 | ret = t ? PTR_ERR(t) : -ENOENT; | 1354 | ret = t ? PTR_ERR(t) : -ENOENT; |
1372 | goto free; | 1355 | goto free; |
1373 | } | 1356 | } |
1374 | 1357 | ||
1375 | local_bh_disable(); | 1358 | local_bh_disable(); |
1376 | private = t->private; | 1359 | private = t->private; |
1377 | if (private->number != num_counters) { | 1360 | if (private->number != num_counters) { |
1378 | ret = -EINVAL; | 1361 | ret = -EINVAL; |
1379 | goto unlock_up_free; | 1362 | goto unlock_up_free; |
1380 | } | 1363 | } |
1381 | 1364 | ||
1382 | i = 0; | 1365 | i = 0; |
1383 | /* Choose the copy that is on our node */ | 1366 | /* Choose the copy that is on our node */ |
1384 | curcpu = smp_processor_id(); | 1367 | curcpu = smp_processor_id(); |
1385 | loc_cpu_entry = private->entries[curcpu]; | 1368 | loc_cpu_entry = private->entries[curcpu]; |
1386 | xt_info_wrlock(curcpu); | 1369 | xt_info_wrlock(curcpu); |
1387 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { | 1370 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { |
1388 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); | 1371 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); |
1389 | ++i; | 1372 | ++i; |
1390 | } | 1373 | } |
1391 | xt_info_wrunlock(curcpu); | 1374 | xt_info_wrunlock(curcpu); |
1392 | unlock_up_free: | 1375 | unlock_up_free: |
1393 | local_bh_enable(); | 1376 | local_bh_enable(); |
1394 | xt_table_unlock(t); | 1377 | xt_table_unlock(t); |
1395 | module_put(t->me); | 1378 | module_put(t->me); |
1396 | free: | 1379 | free: |
1397 | vfree(paddc); | 1380 | vfree(paddc); |
1398 | 1381 | ||
1399 | return ret; | 1382 | return ret; |
1400 | } | 1383 | } |
1401 | 1384 | ||
1402 | #ifdef CONFIG_COMPAT | 1385 | #ifdef CONFIG_COMPAT |
1403 | struct compat_ipt_replace { | 1386 | struct compat_ipt_replace { |
1404 | char name[XT_TABLE_MAXNAMELEN]; | 1387 | char name[XT_TABLE_MAXNAMELEN]; |
1405 | u32 valid_hooks; | 1388 | u32 valid_hooks; |
1406 | u32 num_entries; | 1389 | u32 num_entries; |
1407 | u32 size; | 1390 | u32 size; |
1408 | u32 hook_entry[NF_INET_NUMHOOKS]; | 1391 | u32 hook_entry[NF_INET_NUMHOOKS]; |
1409 | u32 underflow[NF_INET_NUMHOOKS]; | 1392 | u32 underflow[NF_INET_NUMHOOKS]; |
1410 | u32 num_counters; | 1393 | u32 num_counters; |
1411 | compat_uptr_t counters; /* struct xt_counters * */ | 1394 | compat_uptr_t counters; /* struct xt_counters * */ |
1412 | struct compat_ipt_entry entries[0]; | 1395 | struct compat_ipt_entry entries[0]; |
1413 | }; | 1396 | }; |
1414 | 1397 | ||
1415 | static int | 1398 | static int |
1416 | compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, | 1399 | compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, |
1417 | unsigned int *size, struct xt_counters *counters, | 1400 | unsigned int *size, struct xt_counters *counters, |
1418 | unsigned int i) | 1401 | unsigned int i) |
1419 | { | 1402 | { |
1420 | struct xt_entry_target *t; | 1403 | struct xt_entry_target *t; |
1421 | struct compat_ipt_entry __user *ce; | 1404 | struct compat_ipt_entry __user *ce; |
1422 | u_int16_t target_offset, next_offset; | 1405 | u_int16_t target_offset, next_offset; |
1423 | compat_uint_t origsize; | 1406 | compat_uint_t origsize; |
1424 | const struct xt_entry_match *ematch; | 1407 | const struct xt_entry_match *ematch; |
1425 | int ret = 0; | 1408 | int ret = 0; |
1426 | 1409 | ||
1427 | origsize = *size; | 1410 | origsize = *size; |
1428 | ce = (struct compat_ipt_entry __user *)*dstptr; | 1411 | ce = (struct compat_ipt_entry __user *)*dstptr; |
1429 | if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || | 1412 | if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || |
1430 | copy_to_user(&ce->counters, &counters[i], | 1413 | copy_to_user(&ce->counters, &counters[i], |
1431 | sizeof(counters[i])) != 0) | 1414 | sizeof(counters[i])) != 0) |
1432 | return -EFAULT; | 1415 | return -EFAULT; |
1433 | 1416 | ||
1434 | *dstptr += sizeof(struct compat_ipt_entry); | 1417 | *dstptr += sizeof(struct compat_ipt_entry); |
1435 | *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); | 1418 | *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); |
1436 | 1419 | ||
1437 | xt_ematch_foreach(ematch, e) { | 1420 | xt_ematch_foreach(ematch, e) { |
1438 | ret = xt_compat_match_to_user(ematch, dstptr, size); | 1421 | ret = xt_compat_match_to_user(ematch, dstptr, size); |
1439 | if (ret != 0) | 1422 | if (ret != 0) |
1440 | return ret; | 1423 | return ret; |
1441 | } | 1424 | } |
1442 | target_offset = e->target_offset - (origsize - *size); | 1425 | target_offset = e->target_offset - (origsize - *size); |
1443 | t = ipt_get_target(e); | 1426 | t = ipt_get_target(e); |
1444 | ret = xt_compat_target_to_user(t, dstptr, size); | 1427 | ret = xt_compat_target_to_user(t, dstptr, size); |
1445 | if (ret) | 1428 | if (ret) |
1446 | return ret; | 1429 | return ret; |
1447 | next_offset = e->next_offset - (origsize - *size); | 1430 | next_offset = e->next_offset - (origsize - *size); |
1448 | if (put_user(target_offset, &ce->target_offset) != 0 || | 1431 | if (put_user(target_offset, &ce->target_offset) != 0 || |
1449 | put_user(next_offset, &ce->next_offset) != 0) | 1432 | put_user(next_offset, &ce->next_offset) != 0) |
1450 | return -EFAULT; | 1433 | return -EFAULT; |
1451 | return 0; | 1434 | return 0; |
1452 | } | 1435 | } |
1453 | 1436 | ||
1454 | static int | 1437 | static int |
1455 | compat_find_calc_match(struct xt_entry_match *m, | 1438 | compat_find_calc_match(struct xt_entry_match *m, |
1456 | const char *name, | 1439 | const char *name, |
1457 | const struct ipt_ip *ip, | 1440 | const struct ipt_ip *ip, |
1458 | unsigned int hookmask, | 1441 | unsigned int hookmask, |
1459 | int *size) | 1442 | int *size) |
1460 | { | 1443 | { |
1461 | struct xt_match *match; | 1444 | struct xt_match *match; |
1462 | 1445 | ||
1463 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, | 1446 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, |
1464 | m->u.user.revision); | 1447 | m->u.user.revision); |
1465 | if (IS_ERR(match)) { | 1448 | if (IS_ERR(match)) { |
1466 | duprintf("compat_check_calc_match: `%s' not found\n", | 1449 | duprintf("compat_check_calc_match: `%s' not found\n", |
1467 | m->u.user.name); | 1450 | m->u.user.name); |
1468 | return PTR_ERR(match); | 1451 | return PTR_ERR(match); |
1469 | } | 1452 | } |
1470 | m->u.kernel.match = match; | 1453 | m->u.kernel.match = match; |
1471 | *size += xt_compat_match_offset(match); | 1454 | *size += xt_compat_match_offset(match); |
1472 | return 0; | 1455 | return 0; |
1473 | } | 1456 | } |
1474 | 1457 | ||
1475 | static void compat_release_entry(struct compat_ipt_entry *e) | 1458 | static void compat_release_entry(struct compat_ipt_entry *e) |
1476 | { | 1459 | { |
1477 | struct xt_entry_target *t; | 1460 | struct xt_entry_target *t; |
1478 | struct xt_entry_match *ematch; | 1461 | struct xt_entry_match *ematch; |
1479 | 1462 | ||
1480 | /* Cleanup all matches */ | 1463 | /* Cleanup all matches */ |
1481 | xt_ematch_foreach(ematch, e) | 1464 | xt_ematch_foreach(ematch, e) |
1482 | module_put(ematch->u.kernel.match->me); | 1465 | module_put(ematch->u.kernel.match->me); |
1483 | t = compat_ipt_get_target(e); | 1466 | t = compat_ipt_get_target(e); |
1484 | module_put(t->u.kernel.target->me); | 1467 | module_put(t->u.kernel.target->me); |
1485 | } | 1468 | } |
1486 | 1469 | ||
1487 | static int | 1470 | static int |
1488 | check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, | 1471 | check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, |
1489 | struct xt_table_info *newinfo, | 1472 | struct xt_table_info *newinfo, |
1490 | unsigned int *size, | 1473 | unsigned int *size, |
1491 | const unsigned char *base, | 1474 | const unsigned char *base, |
1492 | const unsigned char *limit, | 1475 | const unsigned char *limit, |
1493 | const unsigned int *hook_entries, | 1476 | const unsigned int *hook_entries, |
1494 | const unsigned int *underflows, | 1477 | const unsigned int *underflows, |
1495 | const char *name) | 1478 | const char *name) |
1496 | { | 1479 | { |
1497 | struct xt_entry_match *ematch; | 1480 | struct xt_entry_match *ematch; |
1498 | struct xt_entry_target *t; | 1481 | struct xt_entry_target *t; |
1499 | struct xt_target *target; | 1482 | struct xt_target *target; |
1500 | unsigned int entry_offset; | 1483 | unsigned int entry_offset; |
1501 | unsigned int j; | 1484 | unsigned int j; |
1502 | int ret, off, h; | 1485 | int ret, off, h; |
1503 | 1486 | ||
1504 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1487 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1505 | if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || | 1488 | if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || |
1506 | (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { | 1489 | (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { |
1507 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1490 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1508 | return -EINVAL; | 1491 | return -EINVAL; |
1509 | } | 1492 | } |
1510 | 1493 | ||
1511 | if (e->next_offset < sizeof(struct compat_ipt_entry) + | 1494 | if (e->next_offset < sizeof(struct compat_ipt_entry) + |
1512 | sizeof(struct compat_xt_entry_target)) { | 1495 | sizeof(struct compat_xt_entry_target)) { |
1513 | duprintf("checking: element %p size %u\n", | 1496 | duprintf("checking: element %p size %u\n", |
1514 | e, e->next_offset); | 1497 | e, e->next_offset); |
1515 | return -EINVAL; | 1498 | return -EINVAL; |
1516 | } | 1499 | } |
1517 | 1500 | ||
1518 | /* For purposes of check_entry casting the compat entry is fine */ | 1501 | /* For purposes of check_entry casting the compat entry is fine */ |
1519 | ret = check_entry((struct ipt_entry *)e, name); | 1502 | ret = check_entry((struct ipt_entry *)e, name); |
1520 | if (ret) | 1503 | if (ret) |
1521 | return ret; | 1504 | return ret; |
1522 | 1505 | ||
1523 | off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); | 1506 | off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); |
1524 | entry_offset = (void *)e - (void *)base; | 1507 | entry_offset = (void *)e - (void *)base; |
1525 | j = 0; | 1508 | j = 0; |
1526 | xt_ematch_foreach(ematch, e) { | 1509 | xt_ematch_foreach(ematch, e) { |
1527 | ret = compat_find_calc_match(ematch, name, | 1510 | ret = compat_find_calc_match(ematch, name, |
1528 | &e->ip, e->comefrom, &off); | 1511 | &e->ip, e->comefrom, &off); |
1529 | if (ret != 0) | 1512 | if (ret != 0) |
1530 | goto release_matches; | 1513 | goto release_matches; |
1531 | ++j; | 1514 | ++j; |
1532 | } | 1515 | } |
1533 | 1516 | ||
1534 | t = compat_ipt_get_target(e); | 1517 | t = compat_ipt_get_target(e); |
1535 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, | 1518 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, |
1536 | t->u.user.revision); | 1519 | t->u.user.revision); |
1537 | if (IS_ERR(target)) { | 1520 | if (IS_ERR(target)) { |
1538 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1521 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1539 | t->u.user.name); | 1522 | t->u.user.name); |
1540 | ret = PTR_ERR(target); | 1523 | ret = PTR_ERR(target); |
1541 | goto release_matches; | 1524 | goto release_matches; |
1542 | } | 1525 | } |
1543 | t->u.kernel.target = target; | 1526 | t->u.kernel.target = target; |
1544 | 1527 | ||
1545 | off += xt_compat_target_offset(target); | 1528 | off += xt_compat_target_offset(target); |
1546 | *size += off; | 1529 | *size += off; |
1547 | ret = xt_compat_add_offset(AF_INET, entry_offset, off); | 1530 | ret = xt_compat_add_offset(AF_INET, entry_offset, off); |
1548 | if (ret) | 1531 | if (ret) |
1549 | goto out; | 1532 | goto out; |
1550 | 1533 | ||
1551 | /* Check hooks & underflows */ | 1534 | /* Check hooks & underflows */ |
1552 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 1535 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
1553 | if ((unsigned char *)e - base == hook_entries[h]) | 1536 | if ((unsigned char *)e - base == hook_entries[h]) |
1554 | newinfo->hook_entry[h] = hook_entries[h]; | 1537 | newinfo->hook_entry[h] = hook_entries[h]; |
1555 | if ((unsigned char *)e - base == underflows[h]) | 1538 | if ((unsigned char *)e - base == underflows[h]) |
1556 | newinfo->underflow[h] = underflows[h]; | 1539 | newinfo->underflow[h] = underflows[h]; |
1557 | } | 1540 | } |
1558 | 1541 | ||
1559 | /* Clear counters and comefrom */ | 1542 | /* Clear counters and comefrom */ |
1560 | memset(&e->counters, 0, sizeof(e->counters)); | 1543 | memset(&e->counters, 0, sizeof(e->counters)); |
1561 | e->comefrom = 0; | 1544 | e->comefrom = 0; |
1562 | return 0; | 1545 | return 0; |
1563 | 1546 | ||
1564 | out: | 1547 | out: |
1565 | module_put(t->u.kernel.target->me); | 1548 | module_put(t->u.kernel.target->me); |
1566 | release_matches: | 1549 | release_matches: |
1567 | xt_ematch_foreach(ematch, e) { | 1550 | xt_ematch_foreach(ematch, e) { |
1568 | if (j-- == 0) | 1551 | if (j-- == 0) |
1569 | break; | 1552 | break; |
1570 | module_put(ematch->u.kernel.match->me); | 1553 | module_put(ematch->u.kernel.match->me); |
1571 | } | 1554 | } |
1572 | return ret; | 1555 | return ret; |
1573 | } | 1556 | } |
1574 | 1557 | ||
1575 | static int | 1558 | static int |
1576 | compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, | 1559 | compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, |
1577 | unsigned int *size, const char *name, | 1560 | unsigned int *size, const char *name, |
1578 | struct xt_table_info *newinfo, unsigned char *base) | 1561 | struct xt_table_info *newinfo, unsigned char *base) |
1579 | { | 1562 | { |
1580 | struct xt_entry_target *t; | 1563 | struct xt_entry_target *t; |
1581 | struct xt_target *target; | 1564 | struct xt_target *target; |
1582 | struct ipt_entry *de; | 1565 | struct ipt_entry *de; |
1583 | unsigned int origsize; | 1566 | unsigned int origsize; |
1584 | int ret, h; | 1567 | int ret, h; |
1585 | struct xt_entry_match *ematch; | 1568 | struct xt_entry_match *ematch; |
1586 | 1569 | ||
1587 | ret = 0; | 1570 | ret = 0; |
1588 | origsize = *size; | 1571 | origsize = *size; |
1589 | de = (struct ipt_entry *)*dstptr; | 1572 | de = (struct ipt_entry *)*dstptr; |
1590 | memcpy(de, e, sizeof(struct ipt_entry)); | 1573 | memcpy(de, e, sizeof(struct ipt_entry)); |
1591 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); | 1574 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); |
1592 | 1575 | ||
1593 | *dstptr += sizeof(struct ipt_entry); | 1576 | *dstptr += sizeof(struct ipt_entry); |
1594 | *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); | 1577 | *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); |
1595 | 1578 | ||
1596 | xt_ematch_foreach(ematch, e) { | 1579 | xt_ematch_foreach(ematch, e) { |
1597 | ret = xt_compat_match_from_user(ematch, dstptr, size); | 1580 | ret = xt_compat_match_from_user(ematch, dstptr, size); |
1598 | if (ret != 0) | 1581 | if (ret != 0) |
1599 | return ret; | 1582 | return ret; |
1600 | } | 1583 | } |
1601 | de->target_offset = e->target_offset - (origsize - *size); | 1584 | de->target_offset = e->target_offset - (origsize - *size); |
1602 | t = compat_ipt_get_target(e); | 1585 | t = compat_ipt_get_target(e); |
1603 | target = t->u.kernel.target; | 1586 | target = t->u.kernel.target; |
1604 | xt_compat_target_from_user(t, dstptr, size); | 1587 | xt_compat_target_from_user(t, dstptr, size); |
1605 | 1588 | ||
1606 | de->next_offset = e->next_offset - (origsize - *size); | 1589 | de->next_offset = e->next_offset - (origsize - *size); |
1607 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 1590 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
1608 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) | 1591 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) |
1609 | newinfo->hook_entry[h] -= origsize - *size; | 1592 | newinfo->hook_entry[h] -= origsize - *size; |
1610 | if ((unsigned char *)de - base < newinfo->underflow[h]) | 1593 | if ((unsigned char *)de - base < newinfo->underflow[h]) |
1611 | newinfo->underflow[h] -= origsize - *size; | 1594 | newinfo->underflow[h] -= origsize - *size; |
1612 | } | 1595 | } |
1613 | return ret; | 1596 | return ret; |
1614 | } | 1597 | } |
1615 | 1598 | ||
1616 | static int | 1599 | static int |
1617 | compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) | 1600 | compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) |
1618 | { | 1601 | { |
1619 | struct xt_entry_match *ematch; | 1602 | struct xt_entry_match *ematch; |
1620 | struct xt_mtchk_param mtpar; | 1603 | struct xt_mtchk_param mtpar; |
1621 | unsigned int j; | 1604 | unsigned int j; |
1622 | int ret = 0; | 1605 | int ret = 0; |
1623 | 1606 | ||
1624 | j = 0; | 1607 | j = 0; |
1625 | mtpar.net = net; | 1608 | mtpar.net = net; |
1626 | mtpar.table = name; | 1609 | mtpar.table = name; |
1627 | mtpar.entryinfo = &e->ip; | 1610 | mtpar.entryinfo = &e->ip; |
1628 | mtpar.hook_mask = e->comefrom; | 1611 | mtpar.hook_mask = e->comefrom; |
1629 | mtpar.family = NFPROTO_IPV4; | 1612 | mtpar.family = NFPROTO_IPV4; |
1630 | xt_ematch_foreach(ematch, e) { | 1613 | xt_ematch_foreach(ematch, e) { |
1631 | ret = check_match(ematch, &mtpar); | 1614 | ret = check_match(ematch, &mtpar); |
1632 | if (ret != 0) | 1615 | if (ret != 0) |
1633 | goto cleanup_matches; | 1616 | goto cleanup_matches; |
1634 | ++j; | 1617 | ++j; |
1635 | } | 1618 | } |
1636 | 1619 | ||
1637 | ret = check_target(e, net, name); | 1620 | ret = check_target(e, net, name); |
1638 | if (ret) | 1621 | if (ret) |
1639 | goto cleanup_matches; | 1622 | goto cleanup_matches; |
1640 | return 0; | 1623 | return 0; |
1641 | 1624 | ||
1642 | cleanup_matches: | 1625 | cleanup_matches: |
1643 | xt_ematch_foreach(ematch, e) { | 1626 | xt_ematch_foreach(ematch, e) { |
1644 | if (j-- == 0) | 1627 | if (j-- == 0) |
1645 | break; | 1628 | break; |
1646 | cleanup_match(ematch, net); | 1629 | cleanup_match(ematch, net); |
1647 | } | 1630 | } |
1648 | return ret; | 1631 | return ret; |
1649 | } | 1632 | } |
1650 | 1633 | ||
1651 | static int | 1634 | static int |
1652 | translate_compat_table(struct net *net, | 1635 | translate_compat_table(struct net *net, |
1653 | const char *name, | 1636 | const char *name, |
1654 | unsigned int valid_hooks, | 1637 | unsigned int valid_hooks, |
1655 | struct xt_table_info **pinfo, | 1638 | struct xt_table_info **pinfo, |
1656 | void **pentry0, | 1639 | void **pentry0, |
1657 | unsigned int total_size, | 1640 | unsigned int total_size, |
1658 | unsigned int number, | 1641 | unsigned int number, |
1659 | unsigned int *hook_entries, | 1642 | unsigned int *hook_entries, |
1660 | unsigned int *underflows) | 1643 | unsigned int *underflows) |
1661 | { | 1644 | { |
1662 | unsigned int i, j; | 1645 | unsigned int i, j; |
1663 | struct xt_table_info *newinfo, *info; | 1646 | struct xt_table_info *newinfo, *info; |
1664 | void *pos, *entry0, *entry1; | 1647 | void *pos, *entry0, *entry1; |
1665 | struct compat_ipt_entry *iter0; | 1648 | struct compat_ipt_entry *iter0; |
1666 | struct ipt_entry *iter1; | 1649 | struct ipt_entry *iter1; |
1667 | unsigned int size; | 1650 | unsigned int size; |
1668 | int ret; | 1651 | int ret; |
1669 | 1652 | ||
1670 | info = *pinfo; | 1653 | info = *pinfo; |
1671 | entry0 = *pentry0; | 1654 | entry0 = *pentry0; |
1672 | size = total_size; | 1655 | size = total_size; |
1673 | info->number = number; | 1656 | info->number = number; |
1674 | 1657 | ||
1675 | /* Init all hooks to impossible value. */ | 1658 | /* Init all hooks to impossible value. */ |
1676 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1659 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1677 | info->hook_entry[i] = 0xFFFFFFFF; | 1660 | info->hook_entry[i] = 0xFFFFFFFF; |
1678 | info->underflow[i] = 0xFFFFFFFF; | 1661 | info->underflow[i] = 0xFFFFFFFF; |
1679 | } | 1662 | } |
1680 | 1663 | ||
1681 | duprintf("translate_compat_table: size %u\n", info->size); | 1664 | duprintf("translate_compat_table: size %u\n", info->size); |
1682 | j = 0; | 1665 | j = 0; |
1683 | xt_compat_lock(AF_INET); | 1666 | xt_compat_lock(AF_INET); |
1684 | /* Walk through entries, checking offsets. */ | 1667 | /* Walk through entries, checking offsets. */ |
1685 | xt_entry_foreach(iter0, entry0, total_size) { | 1668 | xt_entry_foreach(iter0, entry0, total_size) { |
1686 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, | 1669 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, |
1687 | entry0, | 1670 | entry0, |
1688 | entry0 + total_size, | 1671 | entry0 + total_size, |
1689 | hook_entries, | 1672 | hook_entries, |
1690 | underflows, | 1673 | underflows, |
1691 | name); | 1674 | name); |
1692 | if (ret != 0) | 1675 | if (ret != 0) |
1693 | goto out_unlock; | 1676 | goto out_unlock; |
1694 | ++j; | 1677 | ++j; |
1695 | } | 1678 | } |
1696 | 1679 | ||
1697 | ret = -EINVAL; | 1680 | ret = -EINVAL; |
1698 | if (j != number) { | 1681 | if (j != number) { |
1699 | duprintf("translate_compat_table: %u not %u entries\n", | 1682 | duprintf("translate_compat_table: %u not %u entries\n", |
1700 | j, number); | 1683 | j, number); |
1701 | goto out_unlock; | 1684 | goto out_unlock; |
1702 | } | 1685 | } |
1703 | 1686 | ||
1704 | /* Check hooks all assigned */ | 1687 | /* Check hooks all assigned */ |
1705 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1688 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1706 | /* Only hooks which are valid */ | 1689 | /* Only hooks which are valid */ |
1707 | if (!(valid_hooks & (1 << i))) | 1690 | if (!(valid_hooks & (1 << i))) |
1708 | continue; | 1691 | continue; |
1709 | if (info->hook_entry[i] == 0xFFFFFFFF) { | 1692 | if (info->hook_entry[i] == 0xFFFFFFFF) { |
1710 | duprintf("Invalid hook entry %u %u\n", | 1693 | duprintf("Invalid hook entry %u %u\n", |
1711 | i, hook_entries[i]); | 1694 | i, hook_entries[i]); |
1712 | goto out_unlock; | 1695 | goto out_unlock; |
1713 | } | 1696 | } |
1714 | if (info->underflow[i] == 0xFFFFFFFF) { | 1697 | if (info->underflow[i] == 0xFFFFFFFF) { |
1715 | duprintf("Invalid underflow %u %u\n", | 1698 | duprintf("Invalid underflow %u %u\n", |
1716 | i, underflows[i]); | 1699 | i, underflows[i]); |
1717 | goto out_unlock; | 1700 | goto out_unlock; |
1718 | } | 1701 | } |
1719 | } | 1702 | } |
1720 | 1703 | ||
1721 | ret = -ENOMEM; | 1704 | ret = -ENOMEM; |
1722 | newinfo = xt_alloc_table_info(size); | 1705 | newinfo = xt_alloc_table_info(size); |
1723 | if (!newinfo) | 1706 | if (!newinfo) |
1724 | goto out_unlock; | 1707 | goto out_unlock; |
1725 | 1708 | ||
1726 | newinfo->number = number; | 1709 | newinfo->number = number; |
1727 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1710 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1728 | newinfo->hook_entry[i] = info->hook_entry[i]; | 1711 | newinfo->hook_entry[i] = info->hook_entry[i]; |
1729 | newinfo->underflow[i] = info->underflow[i]; | 1712 | newinfo->underflow[i] = info->underflow[i]; |
1730 | } | 1713 | } |
1731 | entry1 = newinfo->entries[raw_smp_processor_id()]; | 1714 | entry1 = newinfo->entries[raw_smp_processor_id()]; |
1732 | pos = entry1; | 1715 | pos = entry1; |
1733 | size = total_size; | 1716 | size = total_size; |
1734 | xt_entry_foreach(iter0, entry0, total_size) { | 1717 | xt_entry_foreach(iter0, entry0, total_size) { |
1735 | ret = compat_copy_entry_from_user(iter0, &pos, &size, | 1718 | ret = compat_copy_entry_from_user(iter0, &pos, &size, |
1736 | name, newinfo, entry1); | 1719 | name, newinfo, entry1); |
1737 | if (ret != 0) | 1720 | if (ret != 0) |
1738 | break; | 1721 | break; |
1739 | } | 1722 | } |
1740 | xt_compat_flush_offsets(AF_INET); | 1723 | xt_compat_flush_offsets(AF_INET); |
1741 | xt_compat_unlock(AF_INET); | 1724 | xt_compat_unlock(AF_INET); |
1742 | if (ret) | 1725 | if (ret) |
1743 | goto free_newinfo; | 1726 | goto free_newinfo; |
1744 | 1727 | ||
1745 | ret = -ELOOP; | 1728 | ret = -ELOOP; |
1746 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) | 1729 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) |
1747 | goto free_newinfo; | 1730 | goto free_newinfo; |
1748 | 1731 | ||
1749 | i = 0; | 1732 | i = 0; |
1750 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1733 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1751 | ret = compat_check_entry(iter1, net, name); | 1734 | ret = compat_check_entry(iter1, net, name); |
1752 | if (ret != 0) | 1735 | if (ret != 0) |
1753 | break; | 1736 | break; |
1754 | ++i; | 1737 | ++i; |
1755 | if (strcmp(ipt_get_target(iter1)->u.user.name, | 1738 | if (strcmp(ipt_get_target(iter1)->u.user.name, |
1756 | XT_ERROR_TARGET) == 0) | 1739 | XT_ERROR_TARGET) == 0) |
1757 | ++newinfo->stacksize; | 1740 | ++newinfo->stacksize; |
1758 | } | 1741 | } |
1759 | if (ret) { | 1742 | if (ret) { |
1760 | /* | 1743 | /* |
1761 | * The first i matches need cleanup_entry (calls ->destroy) | 1744 | * The first i matches need cleanup_entry (calls ->destroy) |
1762 | * because they had called ->check already. The other j-i | 1745 | * because they had called ->check already. The other j-i |
1763 | * entries need only release. | 1746 | * entries need only release. |
1764 | */ | 1747 | */ |
1765 | int skip = i; | 1748 | int skip = i; |
1766 | j -= i; | 1749 | j -= i; |
1767 | xt_entry_foreach(iter0, entry0, newinfo->size) { | 1750 | xt_entry_foreach(iter0, entry0, newinfo->size) { |
1768 | if (skip-- > 0) | 1751 | if (skip-- > 0) |
1769 | continue; | 1752 | continue; |
1770 | if (j-- == 0) | 1753 | if (j-- == 0) |
1771 | break; | 1754 | break; |
1772 | compat_release_entry(iter0); | 1755 | compat_release_entry(iter0); |
1773 | } | 1756 | } |
1774 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1757 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1775 | if (i-- == 0) | 1758 | if (i-- == 0) |
1776 | break; | 1759 | break; |
1777 | cleanup_entry(iter1, net); | 1760 | cleanup_entry(iter1, net); |
1778 | } | 1761 | } |
1779 | xt_free_table_info(newinfo); | 1762 | xt_free_table_info(newinfo); |
1780 | return ret; | 1763 | return ret; |
1781 | } | 1764 | } |
1782 | 1765 | ||
1783 | /* And one copy for every other CPU */ | 1766 | /* And one copy for every other CPU */ |
1784 | for_each_possible_cpu(i) | 1767 | for_each_possible_cpu(i) |
1785 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) | 1768 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) |
1786 | memcpy(newinfo->entries[i], entry1, newinfo->size); | 1769 | memcpy(newinfo->entries[i], entry1, newinfo->size); |
1787 | 1770 | ||
1788 | *pinfo = newinfo; | 1771 | *pinfo = newinfo; |
1789 | *pentry0 = entry1; | 1772 | *pentry0 = entry1; |
1790 | xt_free_table_info(info); | 1773 | xt_free_table_info(info); |
1791 | return 0; | 1774 | return 0; |
1792 | 1775 | ||
1793 | free_newinfo: | 1776 | free_newinfo: |
1794 | xt_free_table_info(newinfo); | 1777 | xt_free_table_info(newinfo); |
1795 | out: | 1778 | out: |
1796 | xt_entry_foreach(iter0, entry0, total_size) { | 1779 | xt_entry_foreach(iter0, entry0, total_size) { |
1797 | if (j-- == 0) | 1780 | if (j-- == 0) |
1798 | break; | 1781 | break; |
1799 | compat_release_entry(iter0); | 1782 | compat_release_entry(iter0); |
1800 | } | 1783 | } |
1801 | return ret; | 1784 | return ret; |
1802 | out_unlock: | 1785 | out_unlock: |
1803 | xt_compat_flush_offsets(AF_INET); | 1786 | xt_compat_flush_offsets(AF_INET); |
1804 | xt_compat_unlock(AF_INET); | 1787 | xt_compat_unlock(AF_INET); |
1805 | goto out; | 1788 | goto out; |
1806 | } | 1789 | } |
1807 | 1790 | ||
1808 | static int | 1791 | static int |
1809 | compat_do_replace(struct net *net, void __user *user, unsigned int len) | 1792 | compat_do_replace(struct net *net, void __user *user, unsigned int len) |
1810 | { | 1793 | { |
1811 | int ret; | 1794 | int ret; |
1812 | struct compat_ipt_replace tmp; | 1795 | struct compat_ipt_replace tmp; |
1813 | struct xt_table_info *newinfo; | 1796 | struct xt_table_info *newinfo; |
1814 | void *loc_cpu_entry; | 1797 | void *loc_cpu_entry; |
1815 | struct ipt_entry *iter; | 1798 | struct ipt_entry *iter; |
1816 | 1799 | ||
1817 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1800 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1818 | return -EFAULT; | 1801 | return -EFAULT; |
1819 | 1802 | ||
1820 | /* overflow check */ | 1803 | /* overflow check */ |
1821 | if (tmp.size >= INT_MAX / num_possible_cpus()) | 1804 | if (tmp.size >= INT_MAX / num_possible_cpus()) |
1822 | return -ENOMEM; | 1805 | return -ENOMEM; |
1823 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1806 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1824 | return -ENOMEM; | 1807 | return -ENOMEM; |
1825 | 1808 | ||
1826 | newinfo = xt_alloc_table_info(tmp.size); | 1809 | newinfo = xt_alloc_table_info(tmp.size); |
1827 | if (!newinfo) | 1810 | if (!newinfo) |
1828 | return -ENOMEM; | 1811 | return -ENOMEM; |
1829 | 1812 | ||
1830 | /* choose the copy that is on our node/cpu */ | 1813 | /* choose the copy that is on our node/cpu */ |
1831 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1814 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1832 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1815 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1833 | tmp.size) != 0) { | 1816 | tmp.size) != 0) { |
1834 | ret = -EFAULT; | 1817 | ret = -EFAULT; |
1835 | goto free_newinfo; | 1818 | goto free_newinfo; |
1836 | } | 1819 | } |
1837 | 1820 | ||
1838 | ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, | 1821 | ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, |
1839 | &newinfo, &loc_cpu_entry, tmp.size, | 1822 | &newinfo, &loc_cpu_entry, tmp.size, |
1840 | tmp.num_entries, tmp.hook_entry, | 1823 | tmp.num_entries, tmp.hook_entry, |
1841 | tmp.underflow); | 1824 | tmp.underflow); |
1842 | if (ret != 0) | 1825 | if (ret != 0) |
1843 | goto free_newinfo; | 1826 | goto free_newinfo; |
1844 | 1827 | ||
1845 | duprintf("compat_do_replace: Translated table\n"); | 1828 | duprintf("compat_do_replace: Translated table\n"); |
1846 | 1829 | ||
1847 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1830 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1848 | tmp.num_counters, compat_ptr(tmp.counters)); | 1831 | tmp.num_counters, compat_ptr(tmp.counters)); |
1849 | if (ret) | 1832 | if (ret) |
1850 | goto free_newinfo_untrans; | 1833 | goto free_newinfo_untrans; |
1851 | return 0; | 1834 | return 0; |
1852 | 1835 | ||
1853 | free_newinfo_untrans: | 1836 | free_newinfo_untrans: |
1854 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1837 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1855 | cleanup_entry(iter, net); | 1838 | cleanup_entry(iter, net); |
1856 | free_newinfo: | 1839 | free_newinfo: |
1857 | xt_free_table_info(newinfo); | 1840 | xt_free_table_info(newinfo); |
1858 | return ret; | 1841 | return ret; |
1859 | } | 1842 | } |
1860 | 1843 | ||
1861 | static int | 1844 | static int |
1862 | compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, | 1845 | compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, |
1863 | unsigned int len) | 1846 | unsigned int len) |
1864 | { | 1847 | { |
1865 | int ret; | 1848 | int ret; |
1866 | 1849 | ||
1867 | if (!capable(CAP_NET_ADMIN)) | 1850 | if (!capable(CAP_NET_ADMIN)) |
1868 | return -EPERM; | 1851 | return -EPERM; |
1869 | 1852 | ||
1870 | switch (cmd) { | 1853 | switch (cmd) { |
1871 | case IPT_SO_SET_REPLACE: | 1854 | case IPT_SO_SET_REPLACE: |
1872 | ret = compat_do_replace(sock_net(sk), user, len); | 1855 | ret = compat_do_replace(sock_net(sk), user, len); |
1873 | break; | 1856 | break; |
1874 | 1857 | ||
1875 | case IPT_SO_SET_ADD_COUNTERS: | 1858 | case IPT_SO_SET_ADD_COUNTERS: |
1876 | ret = do_add_counters(sock_net(sk), user, len, 1); | 1859 | ret = do_add_counters(sock_net(sk), user, len, 1); |
1877 | break; | 1860 | break; |
1878 | 1861 | ||
1879 | default: | 1862 | default: |
1880 | duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); | 1863 | duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); |
1881 | ret = -EINVAL; | 1864 | ret = -EINVAL; |
1882 | } | 1865 | } |
1883 | 1866 | ||
1884 | return ret; | 1867 | return ret; |
1885 | } | 1868 | } |
1886 | 1869 | ||
1887 | struct compat_ipt_get_entries { | 1870 | struct compat_ipt_get_entries { |
1888 | char name[XT_TABLE_MAXNAMELEN]; | 1871 | char name[XT_TABLE_MAXNAMELEN]; |
1889 | compat_uint_t size; | 1872 | compat_uint_t size; |
1890 | struct compat_ipt_entry entrytable[0]; | 1873 | struct compat_ipt_entry entrytable[0]; |
1891 | }; | 1874 | }; |
1892 | 1875 | ||
1893 | static int | 1876 | static int |
1894 | compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, | 1877 | compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, |
1895 | void __user *userptr) | 1878 | void __user *userptr) |
1896 | { | 1879 | { |
1897 | struct xt_counters *counters; | 1880 | struct xt_counters *counters; |
1898 | const struct xt_table_info *private = table->private; | 1881 | const struct xt_table_info *private = table->private; |
1899 | void __user *pos; | 1882 | void __user *pos; |
1900 | unsigned int size; | 1883 | unsigned int size; |
1901 | int ret = 0; | 1884 | int ret = 0; |
1902 | const void *loc_cpu_entry; | 1885 | const void *loc_cpu_entry; |
1903 | unsigned int i = 0; | 1886 | unsigned int i = 0; |
1904 | struct ipt_entry *iter; | 1887 | struct ipt_entry *iter; |
1905 | 1888 | ||
1906 | counters = alloc_counters(table); | 1889 | counters = alloc_counters(table); |
1907 | if (IS_ERR(counters)) | 1890 | if (IS_ERR(counters)) |
1908 | return PTR_ERR(counters); | 1891 | return PTR_ERR(counters); |
1909 | 1892 | ||
1910 | /* choose the copy that is on our node/cpu, ... | 1893 | /* choose the copy that is on our node/cpu, ... |
1911 | * This choice is lazy (because current thread is | 1894 | * This choice is lazy (because current thread is |
1912 | * allowed to migrate to another cpu) | 1895 | * allowed to migrate to another cpu) |
1913 | */ | 1896 | */ |
1914 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1897 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
1915 | pos = userptr; | 1898 | pos = userptr; |
1916 | size = total_size; | 1899 | size = total_size; |
1917 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { | 1900 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { |
1918 | ret = compat_copy_entry_to_user(iter, &pos, | 1901 | ret = compat_copy_entry_to_user(iter, &pos, |
1919 | &size, counters, i++); | 1902 | &size, counters, i++); |
1920 | if (ret != 0) | 1903 | if (ret != 0) |
1921 | break; | 1904 | break; |
1922 | } | 1905 | } |
1923 | 1906 | ||
1924 | vfree(counters); | 1907 | vfree(counters); |
1925 | return ret; | 1908 | return ret; |
1926 | } | 1909 | } |
1927 | 1910 | ||
1928 | static int | 1911 | static int |
1929 | compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, | 1912 | compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, |
1930 | int *len) | 1913 | int *len) |
1931 | { | 1914 | { |
1932 | int ret; | 1915 | int ret; |
1933 | struct compat_ipt_get_entries get; | 1916 | struct compat_ipt_get_entries get; |
1934 | struct xt_table *t; | 1917 | struct xt_table *t; |
1935 | 1918 | ||
1936 | if (*len < sizeof(get)) { | 1919 | if (*len < sizeof(get)) { |
1937 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); | 1920 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); |
1938 | return -EINVAL; | 1921 | return -EINVAL; |
1939 | } | 1922 | } |
1940 | 1923 | ||
1941 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1924 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1942 | return -EFAULT; | 1925 | return -EFAULT; |
1943 | 1926 | ||
1944 | if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { | 1927 | if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { |
1945 | duprintf("compat_get_entries: %u != %zu\n", | 1928 | duprintf("compat_get_entries: %u != %zu\n", |
1946 | *len, sizeof(get) + get.size); | 1929 | *len, sizeof(get) + get.size); |
1947 | return -EINVAL; | 1930 | return -EINVAL; |
1948 | } | 1931 | } |
1949 | 1932 | ||
1950 | xt_compat_lock(AF_INET); | 1933 | xt_compat_lock(AF_INET); |
1951 | t = xt_find_table_lock(net, AF_INET, get.name); | 1934 | t = xt_find_table_lock(net, AF_INET, get.name); |
1952 | if (t && !IS_ERR(t)) { | 1935 | if (t && !IS_ERR(t)) { |
1953 | const struct xt_table_info *private = t->private; | 1936 | const struct xt_table_info *private = t->private; |
1954 | struct xt_table_info info; | 1937 | struct xt_table_info info; |
1955 | duprintf("t->private->number = %u\n", private->number); | 1938 | duprintf("t->private->number = %u\n", private->number); |
1956 | ret = compat_table_info(private, &info); | 1939 | ret = compat_table_info(private, &info); |
1957 | if (!ret && get.size == info.size) { | 1940 | if (!ret && get.size == info.size) { |
1958 | ret = compat_copy_entries_to_user(private->size, | 1941 | ret = compat_copy_entries_to_user(private->size, |
1959 | t, uptr->entrytable); | 1942 | t, uptr->entrytable); |
1960 | } else if (!ret) { | 1943 | } else if (!ret) { |
1961 | duprintf("compat_get_entries: I've got %u not %u!\n", | 1944 | duprintf("compat_get_entries: I've got %u not %u!\n", |
1962 | private->size, get.size); | 1945 | private->size, get.size); |
1963 | ret = -EAGAIN; | 1946 | ret = -EAGAIN; |
1964 | } | 1947 | } |
1965 | xt_compat_flush_offsets(AF_INET); | 1948 | xt_compat_flush_offsets(AF_INET); |
1966 | module_put(t->me); | 1949 | module_put(t->me); |
1967 | xt_table_unlock(t); | 1950 | xt_table_unlock(t); |
1968 | } else | 1951 | } else |
1969 | ret = t ? PTR_ERR(t) : -ENOENT; | 1952 | ret = t ? PTR_ERR(t) : -ENOENT; |
1970 | 1953 | ||
1971 | xt_compat_unlock(AF_INET); | 1954 | xt_compat_unlock(AF_INET); |
1972 | return ret; | 1955 | return ret; |
1973 | } | 1956 | } |
1974 | 1957 | ||
1975 | static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); | 1958 | static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); |
1976 | 1959 | ||
1977 | static int | 1960 | static int |
1978 | compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 1961 | compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
1979 | { | 1962 | { |
1980 | int ret; | 1963 | int ret; |
1981 | 1964 | ||
1982 | if (!capable(CAP_NET_ADMIN)) | 1965 | if (!capable(CAP_NET_ADMIN)) |
1983 | return -EPERM; | 1966 | return -EPERM; |
1984 | 1967 | ||
1985 | switch (cmd) { | 1968 | switch (cmd) { |
1986 | case IPT_SO_GET_INFO: | 1969 | case IPT_SO_GET_INFO: |
1987 | ret = get_info(sock_net(sk), user, len, 1); | 1970 | ret = get_info(sock_net(sk), user, len, 1); |
1988 | break; | 1971 | break; |
1989 | case IPT_SO_GET_ENTRIES: | 1972 | case IPT_SO_GET_ENTRIES: |
1990 | ret = compat_get_entries(sock_net(sk), user, len); | 1973 | ret = compat_get_entries(sock_net(sk), user, len); |
1991 | break; | 1974 | break; |
1992 | default: | 1975 | default: |
1993 | ret = do_ipt_get_ctl(sk, cmd, user, len); | 1976 | ret = do_ipt_get_ctl(sk, cmd, user, len); |
1994 | } | 1977 | } |
1995 | return ret; | 1978 | return ret; |
1996 | } | 1979 | } |
1997 | #endif | 1980 | #endif |
1998 | 1981 | ||
1999 | static int | 1982 | static int |
2000 | do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | 1983 | do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) |
2001 | { | 1984 | { |
2002 | int ret; | 1985 | int ret; |
2003 | 1986 | ||
2004 | if (!capable(CAP_NET_ADMIN)) | 1987 | if (!capable(CAP_NET_ADMIN)) |
2005 | return -EPERM; | 1988 | return -EPERM; |
2006 | 1989 | ||
2007 | switch (cmd) { | 1990 | switch (cmd) { |
2008 | case IPT_SO_SET_REPLACE: | 1991 | case IPT_SO_SET_REPLACE: |
2009 | ret = do_replace(sock_net(sk), user, len); | 1992 | ret = do_replace(sock_net(sk), user, len); |
2010 | break; | 1993 | break; |
2011 | 1994 | ||
2012 | case IPT_SO_SET_ADD_COUNTERS: | 1995 | case IPT_SO_SET_ADD_COUNTERS: |
2013 | ret = do_add_counters(sock_net(sk), user, len, 0); | 1996 | ret = do_add_counters(sock_net(sk), user, len, 0); |
2014 | break; | 1997 | break; |
2015 | 1998 | ||
2016 | default: | 1999 | default: |
2017 | duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); | 2000 | duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); |
2018 | ret = -EINVAL; | 2001 | ret = -EINVAL; |
2019 | } | 2002 | } |
2020 | 2003 | ||
2021 | return ret; | 2004 | return ret; |
2022 | } | 2005 | } |
2023 | 2006 | ||
2024 | static int | 2007 | static int |
2025 | do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 2008 | do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
2026 | { | 2009 | { |
2027 | int ret; | 2010 | int ret; |
2028 | 2011 | ||
2029 | if (!capable(CAP_NET_ADMIN)) | 2012 | if (!capable(CAP_NET_ADMIN)) |
2030 | return -EPERM; | 2013 | return -EPERM; |
2031 | 2014 | ||
2032 | switch (cmd) { | 2015 | switch (cmd) { |
2033 | case IPT_SO_GET_INFO: | 2016 | case IPT_SO_GET_INFO: |
2034 | ret = get_info(sock_net(sk), user, len, 0); | 2017 | ret = get_info(sock_net(sk), user, len, 0); |
2035 | break; | 2018 | break; |
2036 | 2019 | ||
2037 | case IPT_SO_GET_ENTRIES: | 2020 | case IPT_SO_GET_ENTRIES: |
2038 | ret = get_entries(sock_net(sk), user, len); | 2021 | ret = get_entries(sock_net(sk), user, len); |
2039 | break; | 2022 | break; |
2040 | 2023 | ||
2041 | case IPT_SO_GET_REVISION_MATCH: | 2024 | case IPT_SO_GET_REVISION_MATCH: |
2042 | case IPT_SO_GET_REVISION_TARGET: { | 2025 | case IPT_SO_GET_REVISION_TARGET: { |
2043 | struct xt_get_revision rev; | 2026 | struct xt_get_revision rev; |
2044 | int target; | 2027 | int target; |
2045 | 2028 | ||
2046 | if (*len != sizeof(rev)) { | 2029 | if (*len != sizeof(rev)) { |
2047 | ret = -EINVAL; | 2030 | ret = -EINVAL; |
2048 | break; | 2031 | break; |
2049 | } | 2032 | } |
2050 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { | 2033 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { |
2051 | ret = -EFAULT; | 2034 | ret = -EFAULT; |
2052 | break; | 2035 | break; |
2053 | } | 2036 | } |
2054 | 2037 | ||
2055 | if (cmd == IPT_SO_GET_REVISION_TARGET) | 2038 | if (cmd == IPT_SO_GET_REVISION_TARGET) |
2056 | target = 1; | 2039 | target = 1; |
2057 | else | 2040 | else |
2058 | target = 0; | 2041 | target = 0; |
2059 | 2042 | ||
2060 | try_then_request_module(xt_find_revision(AF_INET, rev.name, | 2043 | try_then_request_module(xt_find_revision(AF_INET, rev.name, |
2061 | rev.revision, | 2044 | rev.revision, |
2062 | target, &ret), | 2045 | target, &ret), |
2063 | "ipt_%s", rev.name); | 2046 | "ipt_%s", rev.name); |
2064 | break; | 2047 | break; |
2065 | } | 2048 | } |
2066 | 2049 | ||
2067 | default: | 2050 | default: |
2068 | duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); | 2051 | duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); |
2069 | ret = -EINVAL; | 2052 | ret = -EINVAL; |
2070 | } | 2053 | } |
2071 | 2054 | ||
2072 | return ret; | 2055 | return ret; |
2073 | } | 2056 | } |
2074 | 2057 | ||
2075 | struct xt_table *ipt_register_table(struct net *net, | 2058 | struct xt_table *ipt_register_table(struct net *net, |
2076 | const struct xt_table *table, | 2059 | const struct xt_table *table, |
2077 | const struct ipt_replace *repl) | 2060 | const struct ipt_replace *repl) |
2078 | { | 2061 | { |
2079 | int ret; | 2062 | int ret; |
2080 | struct xt_table_info *newinfo; | 2063 | struct xt_table_info *newinfo; |
2081 | struct xt_table_info bootstrap = {0}; | 2064 | struct xt_table_info bootstrap = {0}; |
2082 | void *loc_cpu_entry; | 2065 | void *loc_cpu_entry; |
2083 | struct xt_table *new_table; | 2066 | struct xt_table *new_table; |
2084 | 2067 | ||
2085 | newinfo = xt_alloc_table_info(repl->size); | 2068 | newinfo = xt_alloc_table_info(repl->size); |
2086 | if (!newinfo) { | 2069 | if (!newinfo) { |
2087 | ret = -ENOMEM; | 2070 | ret = -ENOMEM; |
2088 | goto out; | 2071 | goto out; |
2089 | } | 2072 | } |
2090 | 2073 | ||
2091 | /* choose the copy on our node/cpu, but dont care about preemption */ | 2074 | /* choose the copy on our node/cpu, but dont care about preemption */ |
2092 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 2075 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
2093 | memcpy(loc_cpu_entry, repl->entries, repl->size); | 2076 | memcpy(loc_cpu_entry, repl->entries, repl->size); |
2094 | 2077 | ||
2095 | ret = translate_table(net, newinfo, loc_cpu_entry, repl); | 2078 | ret = translate_table(net, newinfo, loc_cpu_entry, repl); |
2096 | if (ret != 0) | 2079 | if (ret != 0) |
2097 | goto out_free; | 2080 | goto out_free; |
2098 | 2081 | ||
2099 | new_table = xt_register_table(net, table, &bootstrap, newinfo); | 2082 | new_table = xt_register_table(net, table, &bootstrap, newinfo); |
2100 | if (IS_ERR(new_table)) { | 2083 | if (IS_ERR(new_table)) { |
2101 | ret = PTR_ERR(new_table); | 2084 | ret = PTR_ERR(new_table); |
2102 | goto out_free; | 2085 | goto out_free; |
2103 | } | 2086 | } |
2104 | 2087 | ||
2105 | return new_table; | 2088 | return new_table; |
2106 | 2089 | ||
2107 | out_free: | 2090 | out_free: |
2108 | xt_free_table_info(newinfo); | 2091 | xt_free_table_info(newinfo); |
2109 | out: | 2092 | out: |
2110 | return ERR_PTR(ret); | 2093 | return ERR_PTR(ret); |
2111 | } | 2094 | } |
2112 | 2095 | ||
2113 | void ipt_unregister_table(struct net *net, struct xt_table *table) | 2096 | void ipt_unregister_table(struct net *net, struct xt_table *table) |
2114 | { | 2097 | { |
2115 | struct xt_table_info *private; | 2098 | struct xt_table_info *private; |
2116 | void *loc_cpu_entry; | 2099 | void *loc_cpu_entry; |
2117 | struct module *table_owner = table->me; | 2100 | struct module *table_owner = table->me; |
2118 | struct ipt_entry *iter; | 2101 | struct ipt_entry *iter; |
2119 | 2102 | ||
2120 | private = xt_unregister_table(table); | 2103 | private = xt_unregister_table(table); |
2121 | 2104 | ||
2122 | /* Decrease module usage counts and free resources */ | 2105 | /* Decrease module usage counts and free resources */ |
2123 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 2106 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
2124 | xt_entry_foreach(iter, loc_cpu_entry, private->size) | 2107 | xt_entry_foreach(iter, loc_cpu_entry, private->size) |
2125 | cleanup_entry(iter, net); | 2108 | cleanup_entry(iter, net); |
2126 | if (private->number > private->initial_entries) | 2109 | if (private->number > private->initial_entries) |
2127 | module_put(table_owner); | 2110 | module_put(table_owner); |
2128 | xt_free_table_info(private); | 2111 | xt_free_table_info(private); |
2129 | } | 2112 | } |
2130 | 2113 | ||
2131 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ | 2114 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ |
2132 | static inline bool | 2115 | static inline bool |
2133 | icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, | 2116 | icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, |
2134 | u_int8_t type, u_int8_t code, | 2117 | u_int8_t type, u_int8_t code, |
2135 | bool invert) | 2118 | bool invert) |
2136 | { | 2119 | { |
2137 | return ((test_type == 0xFF) || | 2120 | return ((test_type == 0xFF) || |
2138 | (type == test_type && code >= min_code && code <= max_code)) | 2121 | (type == test_type && code >= min_code && code <= max_code)) |
2139 | ^ invert; | 2122 | ^ invert; |
2140 | } | 2123 | } |
2141 | 2124 | ||
2142 | static bool | 2125 | static bool |
2143 | icmp_match(const struct sk_buff *skb, struct xt_action_param *par) | 2126 | icmp_match(const struct sk_buff *skb, struct xt_action_param *par) |
2144 | { | 2127 | { |
2145 | const struct icmphdr *ic; | 2128 | const struct icmphdr *ic; |
2146 | struct icmphdr _icmph; | 2129 | struct icmphdr _icmph; |
2147 | const struct ipt_icmp *icmpinfo = par->matchinfo; | 2130 | const struct ipt_icmp *icmpinfo = par->matchinfo; |
2148 | 2131 | ||
2149 | /* Must not be a fragment. */ | 2132 | /* Must not be a fragment. */ |
2150 | if (par->fragoff != 0) | 2133 | if (par->fragoff != 0) |
2151 | return false; | 2134 | return false; |
2152 | 2135 | ||
2153 | ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); | 2136 | ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); |
2154 | if (ic == NULL) { | 2137 | if (ic == NULL) { |
2155 | /* We've been asked to examine this packet, and we | 2138 | /* We've been asked to examine this packet, and we |
2156 | * can't. Hence, no choice but to drop. | 2139 | * can't. Hence, no choice but to drop. |
2157 | */ | 2140 | */ |
2158 | duprintf("Dropping evil ICMP tinygram.\n"); | 2141 | duprintf("Dropping evil ICMP tinygram.\n"); |
2159 | par->hotdrop = true; | 2142 | par->hotdrop = true; |
2160 | return false; | 2143 | return false; |
2161 | } | 2144 | } |
2162 | 2145 | ||
2163 | return icmp_type_code_match(icmpinfo->type, | 2146 | return icmp_type_code_match(icmpinfo->type, |
2164 | icmpinfo->code[0], | 2147 | icmpinfo->code[0], |
2165 | icmpinfo->code[1], | 2148 | icmpinfo->code[1], |
2166 | ic->type, ic->code, | 2149 | ic->type, ic->code, |
2167 | !!(icmpinfo->invflags&IPT_ICMP_INV)); | 2150 | !!(icmpinfo->invflags&IPT_ICMP_INV)); |
2168 | } | 2151 | } |
2169 | 2152 | ||
2170 | static int icmp_checkentry(const struct xt_mtchk_param *par) | 2153 | static int icmp_checkentry(const struct xt_mtchk_param *par) |
2171 | { | 2154 | { |
2172 | const struct ipt_icmp *icmpinfo = par->matchinfo; | 2155 | const struct ipt_icmp *icmpinfo = par->matchinfo; |
2173 | 2156 | ||
2174 | /* Must specify no unknown invflags */ | 2157 | /* Must specify no unknown invflags */ |
2175 | return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; | 2158 | return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; |
2176 | } | 2159 | } |
2177 | 2160 | ||
2178 | static struct xt_target ipt_builtin_tg[] __read_mostly = { | 2161 | static struct xt_target ipt_builtin_tg[] __read_mostly = { |
2179 | { | 2162 | { |
2180 | .name = XT_STANDARD_TARGET, | 2163 | .name = XT_STANDARD_TARGET, |
2181 | .targetsize = sizeof(int), | 2164 | .targetsize = sizeof(int), |
2182 | .family = NFPROTO_IPV4, | 2165 | .family = NFPROTO_IPV4, |
2183 | #ifdef CONFIG_COMPAT | 2166 | #ifdef CONFIG_COMPAT |
2184 | .compatsize = sizeof(compat_int_t), | 2167 | .compatsize = sizeof(compat_int_t), |
2185 | .compat_from_user = compat_standard_from_user, | 2168 | .compat_from_user = compat_standard_from_user, |
2186 | .compat_to_user = compat_standard_to_user, | 2169 | .compat_to_user = compat_standard_to_user, |
2187 | #endif | 2170 | #endif |
2188 | }, | 2171 | }, |
2189 | { | 2172 | { |
2190 | .name = XT_ERROR_TARGET, | 2173 | .name = XT_ERROR_TARGET, |
2191 | .target = ipt_error, | 2174 | .target = ipt_error, |
2192 | .targetsize = XT_FUNCTION_MAXNAMELEN, | 2175 | .targetsize = XT_FUNCTION_MAXNAMELEN, |
2193 | .family = NFPROTO_IPV4, | 2176 | .family = NFPROTO_IPV4, |
2194 | }, | 2177 | }, |
2195 | }; | 2178 | }; |
2196 | 2179 | ||
2197 | static struct nf_sockopt_ops ipt_sockopts = { | 2180 | static struct nf_sockopt_ops ipt_sockopts = { |
2198 | .pf = PF_INET, | 2181 | .pf = PF_INET, |
2199 | .set_optmin = IPT_BASE_CTL, | 2182 | .set_optmin = IPT_BASE_CTL, |
2200 | .set_optmax = IPT_SO_SET_MAX+1, | 2183 | .set_optmax = IPT_SO_SET_MAX+1, |
2201 | .set = do_ipt_set_ctl, | 2184 | .set = do_ipt_set_ctl, |
2202 | #ifdef CONFIG_COMPAT | 2185 | #ifdef CONFIG_COMPAT |
2203 | .compat_set = compat_do_ipt_set_ctl, | 2186 | .compat_set = compat_do_ipt_set_ctl, |
2204 | #endif | 2187 | #endif |
2205 | .get_optmin = IPT_BASE_CTL, | 2188 | .get_optmin = IPT_BASE_CTL, |
2206 | .get_optmax = IPT_SO_GET_MAX+1, | 2189 | .get_optmax = IPT_SO_GET_MAX+1, |
2207 | .get = do_ipt_get_ctl, | 2190 | .get = do_ipt_get_ctl, |
2208 | #ifdef CONFIG_COMPAT | 2191 | #ifdef CONFIG_COMPAT |
2209 | .compat_get = compat_do_ipt_get_ctl, | 2192 | .compat_get = compat_do_ipt_get_ctl, |
2210 | #endif | 2193 | #endif |
2211 | .owner = THIS_MODULE, | 2194 | .owner = THIS_MODULE, |
2212 | }; | 2195 | }; |
2213 | 2196 | ||
2214 | static struct xt_match ipt_builtin_mt[] __read_mostly = { | 2197 | static struct xt_match ipt_builtin_mt[] __read_mostly = { |
2215 | { | 2198 | { |
2216 | .name = "icmp", | 2199 | .name = "icmp", |
2217 | .match = icmp_match, | 2200 | .match = icmp_match, |
2218 | .matchsize = sizeof(struct ipt_icmp), | 2201 | .matchsize = sizeof(struct ipt_icmp), |
2219 | .checkentry = icmp_checkentry, | 2202 | .checkentry = icmp_checkentry, |
2220 | .proto = IPPROTO_ICMP, | 2203 | .proto = IPPROTO_ICMP, |
2221 | .family = NFPROTO_IPV4, | 2204 | .family = NFPROTO_IPV4, |
2222 | }, | 2205 | }, |
2223 | }; | 2206 | }; |
2224 | 2207 | ||
2225 | static int __net_init ip_tables_net_init(struct net *net) | 2208 | static int __net_init ip_tables_net_init(struct net *net) |
2226 | { | 2209 | { |
2227 | return xt_proto_init(net, NFPROTO_IPV4); | 2210 | return xt_proto_init(net, NFPROTO_IPV4); |
2228 | } | 2211 | } |
2229 | 2212 | ||
2230 | static void __net_exit ip_tables_net_exit(struct net *net) | 2213 | static void __net_exit ip_tables_net_exit(struct net *net) |
2231 | { | 2214 | { |
2232 | xt_proto_fini(net, NFPROTO_IPV4); | 2215 | xt_proto_fini(net, NFPROTO_IPV4); |
2233 | } | 2216 | } |
2234 | 2217 | ||
2235 | static struct pernet_operations ip_tables_net_ops = { | 2218 | static struct pernet_operations ip_tables_net_ops = { |
2236 | .init = ip_tables_net_init, | 2219 | .init = ip_tables_net_init, |
2237 | .exit = ip_tables_net_exit, | 2220 | .exit = ip_tables_net_exit, |
2238 | }; | 2221 | }; |
2239 | 2222 | ||
2240 | static int __init ip_tables_init(void) | 2223 | static int __init ip_tables_init(void) |
2241 | { | 2224 | { |
2242 | int ret; | 2225 | int ret; |
2243 | 2226 | ||
2244 | ret = register_pernet_subsys(&ip_tables_net_ops); | 2227 | ret = register_pernet_subsys(&ip_tables_net_ops); |
2245 | if (ret < 0) | 2228 | if (ret < 0) |
2246 | goto err1; | 2229 | goto err1; |
2247 | 2230 | ||
2248 | /* Noone else will be downing sem now, so we won't sleep */ | 2231 | /* Noone else will be downing sem now, so we won't sleep */ |
2249 | ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); | 2232 | ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2250 | if (ret < 0) | 2233 | if (ret < 0) |
2251 | goto err2; | 2234 | goto err2; |
2252 | ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); | 2235 | ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2253 | if (ret < 0) | 2236 | if (ret < 0) |
2254 | goto err4; | 2237 | goto err4; |
2255 | 2238 | ||
2256 | /* Register setsockopt */ | 2239 | /* Register setsockopt */ |
2257 | ret = nf_register_sockopt(&ipt_sockopts); | 2240 | ret = nf_register_sockopt(&ipt_sockopts); |
2258 | if (ret < 0) | 2241 | if (ret < 0) |
2259 | goto err5; | 2242 | goto err5; |
2260 | 2243 | ||
2261 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); | 2244 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); |
2262 | return 0; | 2245 | return 0; |
2263 | 2246 | ||
2264 | err5: | 2247 | err5: |
2265 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); | 2248 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2266 | err4: | 2249 | err4: |
2267 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); | 2250 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2268 | err2: | 2251 | err2: |
2269 | unregister_pernet_subsys(&ip_tables_net_ops); | 2252 | unregister_pernet_subsys(&ip_tables_net_ops); |
2270 | err1: | 2253 | err1: |
2271 | return ret; | 2254 | return ret; |
2272 | } | 2255 | } |
2273 | 2256 | ||
2274 | static void __exit ip_tables_fini(void) | 2257 | static void __exit ip_tables_fini(void) |
2275 | { | 2258 | { |
2276 | nf_unregister_sockopt(&ipt_sockopts); | 2259 | nf_unregister_sockopt(&ipt_sockopts); |
2277 | 2260 | ||
2278 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); | 2261 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2279 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); | 2262 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2280 | unregister_pernet_subsys(&ip_tables_net_ops); | 2263 | unregister_pernet_subsys(&ip_tables_net_ops); |
net/ipv6/netfilter/ip6_tables.c
1 | /* | 1 | /* |
2 | * Packet matching code. | 2 | * Packet matching code. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | 4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
5 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | 5 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
13 | #include <linux/in.h> | 13 | #include <linux/in.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/kmod.h> | 15 | #include <linux/kmod.h> |
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/poison.h> | 19 | #include <linux/poison.h> |
20 | #include <linux/icmpv6.h> | 20 | #include <linux/icmpv6.h> |
21 | #include <net/ipv6.h> | 21 | #include <net/ipv6.h> |
22 | #include <net/compat.h> | 22 | #include <net/compat.h> |
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
25 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
26 | #include <linux/err.h> | 26 | #include <linux/err.h> |
27 | #include <linux/cpumask.h> | 27 | #include <linux/cpumask.h> |
28 | 28 | ||
29 | #include <linux/netfilter_ipv6/ip6_tables.h> | 29 | #include <linux/netfilter_ipv6/ip6_tables.h> |
30 | #include <linux/netfilter/x_tables.h> | 30 | #include <linux/netfilter/x_tables.h> |
31 | #include <net/netfilter/nf_log.h> | 31 | #include <net/netfilter/nf_log.h> |
32 | #include "../../netfilter/xt_repldata.h" | 32 | #include "../../netfilter/xt_repldata.h" |
33 | 33 | ||
34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
35 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 35 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
36 | MODULE_DESCRIPTION("IPv6 packet filter"); | 36 | MODULE_DESCRIPTION("IPv6 packet filter"); |
37 | 37 | ||
38 | /*#define DEBUG_IP_FIREWALL*/ | 38 | /*#define DEBUG_IP_FIREWALL*/ |
39 | /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ | 39 | /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ |
40 | /*#define DEBUG_IP_FIREWALL_USER*/ | 40 | /*#define DEBUG_IP_FIREWALL_USER*/ |
41 | 41 | ||
42 | #ifdef DEBUG_IP_FIREWALL | 42 | #ifdef DEBUG_IP_FIREWALL |
43 | #define dprintf(format, args...) pr_info(format , ## args) | 43 | #define dprintf(format, args...) pr_info(format , ## args) |
44 | #else | 44 | #else |
45 | #define dprintf(format, args...) | 45 | #define dprintf(format, args...) |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #ifdef DEBUG_IP_FIREWALL_USER | 48 | #ifdef DEBUG_IP_FIREWALL_USER |
49 | #define duprintf(format, args...) pr_info(format , ## args) | 49 | #define duprintf(format, args...) pr_info(format , ## args) |
50 | #else | 50 | #else |
51 | #define duprintf(format, args...) | 51 | #define duprintf(format, args...) |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #ifdef CONFIG_NETFILTER_DEBUG | 54 | #ifdef CONFIG_NETFILTER_DEBUG |
55 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) | 55 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) |
56 | #else | 56 | #else |
57 | #define IP_NF_ASSERT(x) | 57 | #define IP_NF_ASSERT(x) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #if 0 | 60 | #if 0 |
61 | /* All the better to debug you with... */ | 61 | /* All the better to debug you with... */ |
62 | #define static | 62 | #define static |
63 | #define inline | 63 | #define inline |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | void *ip6t_alloc_initial_table(const struct xt_table *info) | 66 | void *ip6t_alloc_initial_table(const struct xt_table *info) |
67 | { | 67 | { |
68 | return xt_alloc_initial_table(ip6t, IP6T); | 68 | return xt_alloc_initial_table(ip6t, IP6T); |
69 | } | 69 | } |
70 | EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); | 70 | EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | We keep a set of rules for each CPU, so we can avoid write-locking | 73 | We keep a set of rules for each CPU, so we can avoid write-locking |
74 | them in the softirq when updating the counters and therefore | 74 | them in the softirq when updating the counters and therefore |
75 | only need to read-lock in the softirq; doing a write_lock_bh() in user | 75 | only need to read-lock in the softirq; doing a write_lock_bh() in user |
76 | context stops packets coming through and allows user context to read | 76 | context stops packets coming through and allows user context to read |
77 | the counters or update the rules. | 77 | the counters or update the rules. |
78 | 78 | ||
79 | Hence the start of any table is given by get_table() below. */ | 79 | Hence the start of any table is given by get_table() below. */ |
80 | 80 | ||
81 | /* Check for an extension */ | 81 | /* Check for an extension */ |
82 | int | 82 | int |
83 | ip6t_ext_hdr(u8 nexthdr) | 83 | ip6t_ext_hdr(u8 nexthdr) |
84 | { | 84 | { |
85 | return (nexthdr == IPPROTO_HOPOPTS) || | 85 | return (nexthdr == IPPROTO_HOPOPTS) || |
86 | (nexthdr == IPPROTO_ROUTING) || | 86 | (nexthdr == IPPROTO_ROUTING) || |
87 | (nexthdr == IPPROTO_FRAGMENT) || | 87 | (nexthdr == IPPROTO_FRAGMENT) || |
88 | (nexthdr == IPPROTO_ESP) || | 88 | (nexthdr == IPPROTO_ESP) || |
89 | (nexthdr == IPPROTO_AH) || | 89 | (nexthdr == IPPROTO_AH) || |
90 | (nexthdr == IPPROTO_NONE) || | 90 | (nexthdr == IPPROTO_NONE) || |
91 | (nexthdr == IPPROTO_DSTOPTS); | 91 | (nexthdr == IPPROTO_DSTOPTS); |
92 | } | 92 | } |
93 | 93 | ||
94 | /* Returns whether matches rule or not. */ | 94 | /* Returns whether matches rule or not. */ |
95 | /* Performance critical - called for every packet */ | 95 | /* Performance critical - called for every packet */ |
96 | static inline bool | 96 | static inline bool |
97 | ip6_packet_match(const struct sk_buff *skb, | 97 | ip6_packet_match(const struct sk_buff *skb, |
98 | const char *indev, | 98 | const char *indev, |
99 | const char *outdev, | 99 | const char *outdev, |
100 | const struct ip6t_ip6 *ip6info, | 100 | const struct ip6t_ip6 *ip6info, |
101 | unsigned int *protoff, | 101 | unsigned int *protoff, |
102 | int *fragoff, bool *hotdrop) | 102 | int *fragoff, bool *hotdrop) |
103 | { | 103 | { |
104 | unsigned long ret; | 104 | unsigned long ret; |
105 | const struct ipv6hdr *ipv6 = ipv6_hdr(skb); | 105 | const struct ipv6hdr *ipv6 = ipv6_hdr(skb); |
106 | 106 | ||
107 | #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) | 107 | #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) |
108 | 108 | ||
109 | if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, | 109 | if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, |
110 | &ip6info->src), IP6T_INV_SRCIP) || | 110 | &ip6info->src), IP6T_INV_SRCIP) || |
111 | FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, | 111 | FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, |
112 | &ip6info->dst), IP6T_INV_DSTIP)) { | 112 | &ip6info->dst), IP6T_INV_DSTIP)) { |
113 | dprintf("Source or dest mismatch.\n"); | 113 | dprintf("Source or dest mismatch.\n"); |
114 | /* | 114 | /* |
115 | dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, | 115 | dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, |
116 | ipinfo->smsk.s_addr, ipinfo->src.s_addr, | 116 | ipinfo->smsk.s_addr, ipinfo->src.s_addr, |
117 | ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); | 117 | ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); |
118 | dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, | 118 | dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, |
119 | ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, | 119 | ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, |
120 | ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ | 120 | ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ |
121 | return false; | 121 | return false; |
122 | } | 122 | } |
123 | 123 | ||
124 | ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); | 124 | ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); |
125 | 125 | ||
126 | if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { | 126 | if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { |
127 | dprintf("VIA in mismatch (%s vs %s).%s\n", | 127 | dprintf("VIA in mismatch (%s vs %s).%s\n", |
128 | indev, ip6info->iniface, | 128 | indev, ip6info->iniface, |
129 | ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":""); | 129 | ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":""); |
130 | return false; | 130 | return false; |
131 | } | 131 | } |
132 | 132 | ||
133 | ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); | 133 | ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); |
134 | 134 | ||
135 | if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { | 135 | if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { |
136 | dprintf("VIA out mismatch (%s vs %s).%s\n", | 136 | dprintf("VIA out mismatch (%s vs %s).%s\n", |
137 | outdev, ip6info->outiface, | 137 | outdev, ip6info->outiface, |
138 | ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":""); | 138 | ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":""); |
139 | return false; | 139 | return false; |
140 | } | 140 | } |
141 | 141 | ||
142 | /* ... might want to do something with class and flowlabel here ... */ | 142 | /* ... might want to do something with class and flowlabel here ... */ |
143 | 143 | ||
144 | /* look for the desired protocol header */ | 144 | /* look for the desired protocol header */ |
145 | if((ip6info->flags & IP6T_F_PROTO)) { | 145 | if((ip6info->flags & IP6T_F_PROTO)) { |
146 | int protohdr; | 146 | int protohdr; |
147 | unsigned short _frag_off; | 147 | unsigned short _frag_off; |
148 | 148 | ||
149 | protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); | 149 | protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); |
150 | if (protohdr < 0) { | 150 | if (protohdr < 0) { |
151 | if (_frag_off == 0) | 151 | if (_frag_off == 0) |
152 | *hotdrop = true; | 152 | *hotdrop = true; |
153 | return false; | 153 | return false; |
154 | } | 154 | } |
155 | *fragoff = _frag_off; | 155 | *fragoff = _frag_off; |
156 | 156 | ||
157 | dprintf("Packet protocol %hi ?= %s%hi.\n", | 157 | dprintf("Packet protocol %hi ?= %s%hi.\n", |
158 | protohdr, | 158 | protohdr, |
159 | ip6info->invflags & IP6T_INV_PROTO ? "!":"", | 159 | ip6info->invflags & IP6T_INV_PROTO ? "!":"", |
160 | ip6info->proto); | 160 | ip6info->proto); |
161 | 161 | ||
162 | if (ip6info->proto == protohdr) { | 162 | if (ip6info->proto == protohdr) { |
163 | if(ip6info->invflags & IP6T_INV_PROTO) { | 163 | if(ip6info->invflags & IP6T_INV_PROTO) { |
164 | return false; | 164 | return false; |
165 | } | 165 | } |
166 | return true; | 166 | return true; |
167 | } | 167 | } |
168 | 168 | ||
169 | /* We need match for the '-p all', too! */ | 169 | /* We need match for the '-p all', too! */ |
170 | if ((ip6info->proto != 0) && | 170 | if ((ip6info->proto != 0) && |
171 | !(ip6info->invflags & IP6T_INV_PROTO)) | 171 | !(ip6info->invflags & IP6T_INV_PROTO)) |
172 | return false; | 172 | return false; |
173 | } | 173 | } |
174 | return true; | 174 | return true; |
175 | } | 175 | } |
176 | 176 | ||
177 | /* should be ip6 safe */ | 177 | /* should be ip6 safe */ |
178 | static bool | 178 | static bool |
179 | ip6_checkentry(const struct ip6t_ip6 *ipv6) | 179 | ip6_checkentry(const struct ip6t_ip6 *ipv6) |
180 | { | 180 | { |
181 | if (ipv6->flags & ~IP6T_F_MASK) { | 181 | if (ipv6->flags & ~IP6T_F_MASK) { |
182 | duprintf("Unknown flag bits set: %08X\n", | 182 | duprintf("Unknown flag bits set: %08X\n", |
183 | ipv6->flags & ~IP6T_F_MASK); | 183 | ipv6->flags & ~IP6T_F_MASK); |
184 | return false; | 184 | return false; |
185 | } | 185 | } |
186 | if (ipv6->invflags & ~IP6T_INV_MASK) { | 186 | if (ipv6->invflags & ~IP6T_INV_MASK) { |
187 | duprintf("Unknown invflag bits set: %08X\n", | 187 | duprintf("Unknown invflag bits set: %08X\n", |
188 | ipv6->invflags & ~IP6T_INV_MASK); | 188 | ipv6->invflags & ~IP6T_INV_MASK); |
189 | return false; | 189 | return false; |
190 | } | 190 | } |
191 | return true; | 191 | return true; |
192 | } | 192 | } |
193 | 193 | ||
194 | static unsigned int | 194 | static unsigned int |
195 | ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) | 195 | ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) |
196 | { | 196 | { |
197 | if (net_ratelimit()) | 197 | if (net_ratelimit()) |
198 | pr_info("error: `%s'\n", (const char *)par->targinfo); | 198 | pr_info("error: `%s'\n", (const char *)par->targinfo); |
199 | 199 | ||
200 | return NF_DROP; | 200 | return NF_DROP; |
201 | } | 201 | } |
202 | 202 | ||
203 | static inline struct ip6t_entry * | 203 | static inline struct ip6t_entry * |
204 | get_entry(const void *base, unsigned int offset) | 204 | get_entry(const void *base, unsigned int offset) |
205 | { | 205 | { |
206 | return (struct ip6t_entry *)(base + offset); | 206 | return (struct ip6t_entry *)(base + offset); |
207 | } | 207 | } |
208 | 208 | ||
209 | /* All zeroes == unconditional rule. */ | 209 | /* All zeroes == unconditional rule. */ |
210 | /* Mildly perf critical (only if packet tracing is on) */ | 210 | /* Mildly perf critical (only if packet tracing is on) */ |
211 | static inline bool unconditional(const struct ip6t_ip6 *ipv6) | 211 | static inline bool unconditional(const struct ip6t_ip6 *ipv6) |
212 | { | 212 | { |
213 | static const struct ip6t_ip6 uncond; | 213 | static const struct ip6t_ip6 uncond; |
214 | 214 | ||
215 | return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; | 215 | return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; |
216 | } | 216 | } |
217 | 217 | ||
218 | static inline const struct xt_entry_target * | 218 | static inline const struct xt_entry_target * |
219 | ip6t_get_target_c(const struct ip6t_entry *e) | 219 | ip6t_get_target_c(const struct ip6t_entry *e) |
220 | { | 220 | { |
221 | return ip6t_get_target((struct ip6t_entry *)e); | 221 | return ip6t_get_target((struct ip6t_entry *)e); |
222 | } | 222 | } |
223 | 223 | ||
224 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 224 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
225 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 225 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
226 | /* This cries for unification! */ | 226 | /* This cries for unification! */ |
227 | static const char *const hooknames[] = { | 227 | static const char *const hooknames[] = { |
228 | [NF_INET_PRE_ROUTING] = "PREROUTING", | 228 | [NF_INET_PRE_ROUTING] = "PREROUTING", |
229 | [NF_INET_LOCAL_IN] = "INPUT", | 229 | [NF_INET_LOCAL_IN] = "INPUT", |
230 | [NF_INET_FORWARD] = "FORWARD", | 230 | [NF_INET_FORWARD] = "FORWARD", |
231 | [NF_INET_LOCAL_OUT] = "OUTPUT", | 231 | [NF_INET_LOCAL_OUT] = "OUTPUT", |
232 | [NF_INET_POST_ROUTING] = "POSTROUTING", | 232 | [NF_INET_POST_ROUTING] = "POSTROUTING", |
233 | }; | 233 | }; |
234 | 234 | ||
235 | enum nf_ip_trace_comments { | 235 | enum nf_ip_trace_comments { |
236 | NF_IP6_TRACE_COMMENT_RULE, | 236 | NF_IP6_TRACE_COMMENT_RULE, |
237 | NF_IP6_TRACE_COMMENT_RETURN, | 237 | NF_IP6_TRACE_COMMENT_RETURN, |
238 | NF_IP6_TRACE_COMMENT_POLICY, | 238 | NF_IP6_TRACE_COMMENT_POLICY, |
239 | }; | 239 | }; |
240 | 240 | ||
241 | static const char *const comments[] = { | 241 | static const char *const comments[] = { |
242 | [NF_IP6_TRACE_COMMENT_RULE] = "rule", | 242 | [NF_IP6_TRACE_COMMENT_RULE] = "rule", |
243 | [NF_IP6_TRACE_COMMENT_RETURN] = "return", | 243 | [NF_IP6_TRACE_COMMENT_RETURN] = "return", |
244 | [NF_IP6_TRACE_COMMENT_POLICY] = "policy", | 244 | [NF_IP6_TRACE_COMMENT_POLICY] = "policy", |
245 | }; | 245 | }; |
246 | 246 | ||
247 | static struct nf_loginfo trace_loginfo = { | 247 | static struct nf_loginfo trace_loginfo = { |
248 | .type = NF_LOG_TYPE_LOG, | 248 | .type = NF_LOG_TYPE_LOG, |
249 | .u = { | 249 | .u = { |
250 | .log = { | 250 | .log = { |
251 | .level = 4, | 251 | .level = 4, |
252 | .logflags = NF_LOG_MASK, | 252 | .logflags = NF_LOG_MASK, |
253 | }, | 253 | }, |
254 | }, | 254 | }, |
255 | }; | 255 | }; |
256 | 256 | ||
257 | /* Mildly perf critical (only if packet tracing is on) */ | 257 | /* Mildly perf critical (only if packet tracing is on) */ |
258 | static inline int | 258 | static inline int |
259 | get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, | 259 | get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, |
260 | const char *hookname, const char **chainname, | 260 | const char *hookname, const char **chainname, |
261 | const char **comment, unsigned int *rulenum) | 261 | const char **comment, unsigned int *rulenum) |
262 | { | 262 | { |
263 | const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); | 263 | const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); |
264 | 264 | ||
265 | if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { | 265 | if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { |
266 | /* Head of user chain: ERROR target with chainname */ | 266 | /* Head of user chain: ERROR target with chainname */ |
267 | *chainname = t->target.data; | 267 | *chainname = t->target.data; |
268 | (*rulenum) = 0; | 268 | (*rulenum) = 0; |
269 | } else if (s == e) { | 269 | } else if (s == e) { |
270 | (*rulenum)++; | 270 | (*rulenum)++; |
271 | 271 | ||
272 | if (s->target_offset == sizeof(struct ip6t_entry) && | 272 | if (s->target_offset == sizeof(struct ip6t_entry) && |
273 | strcmp(t->target.u.kernel.target->name, | 273 | strcmp(t->target.u.kernel.target->name, |
274 | XT_STANDARD_TARGET) == 0 && | 274 | XT_STANDARD_TARGET) == 0 && |
275 | t->verdict < 0 && | 275 | t->verdict < 0 && |
276 | unconditional(&s->ipv6)) { | 276 | unconditional(&s->ipv6)) { |
277 | /* Tail of chains: STANDARD target (return/policy) */ | 277 | /* Tail of chains: STANDARD target (return/policy) */ |
278 | *comment = *chainname == hookname | 278 | *comment = *chainname == hookname |
279 | ? comments[NF_IP6_TRACE_COMMENT_POLICY] | 279 | ? comments[NF_IP6_TRACE_COMMENT_POLICY] |
280 | : comments[NF_IP6_TRACE_COMMENT_RETURN]; | 280 | : comments[NF_IP6_TRACE_COMMENT_RETURN]; |
281 | } | 281 | } |
282 | return 1; | 282 | return 1; |
283 | } else | 283 | } else |
284 | (*rulenum)++; | 284 | (*rulenum)++; |
285 | 285 | ||
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
288 | 288 | ||
289 | static void trace_packet(const struct sk_buff *skb, | 289 | static void trace_packet(const struct sk_buff *skb, |
290 | unsigned int hook, | 290 | unsigned int hook, |
291 | const struct net_device *in, | 291 | const struct net_device *in, |
292 | const struct net_device *out, | 292 | const struct net_device *out, |
293 | const char *tablename, | 293 | const char *tablename, |
294 | const struct xt_table_info *private, | 294 | const struct xt_table_info *private, |
295 | const struct ip6t_entry *e) | 295 | const struct ip6t_entry *e) |
296 | { | 296 | { |
297 | const void *table_base; | 297 | const void *table_base; |
298 | const struct ip6t_entry *root; | 298 | const struct ip6t_entry *root; |
299 | const char *hookname, *chainname, *comment; | 299 | const char *hookname, *chainname, *comment; |
300 | const struct ip6t_entry *iter; | 300 | const struct ip6t_entry *iter; |
301 | unsigned int rulenum = 0; | 301 | unsigned int rulenum = 0; |
302 | 302 | ||
303 | table_base = private->entries[smp_processor_id()]; | 303 | table_base = private->entries[smp_processor_id()]; |
304 | root = get_entry(table_base, private->hook_entry[hook]); | 304 | root = get_entry(table_base, private->hook_entry[hook]); |
305 | 305 | ||
306 | hookname = chainname = hooknames[hook]; | 306 | hookname = chainname = hooknames[hook]; |
307 | comment = comments[NF_IP6_TRACE_COMMENT_RULE]; | 307 | comment = comments[NF_IP6_TRACE_COMMENT_RULE]; |
308 | 308 | ||
309 | xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) | 309 | xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) |
310 | if (get_chainname_rulenum(iter, e, hookname, | 310 | if (get_chainname_rulenum(iter, e, hookname, |
311 | &chainname, &comment, &rulenum) != 0) | 311 | &chainname, &comment, &rulenum) != 0) |
312 | break; | 312 | break; |
313 | 313 | ||
314 | nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, | 314 | nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, |
315 | "TRACE: %s:%s:%s:%u ", | 315 | "TRACE: %s:%s:%s:%u ", |
316 | tablename, chainname, comment, rulenum); | 316 | tablename, chainname, comment, rulenum); |
317 | } | 317 | } |
318 | #endif | 318 | #endif |
319 | 319 | ||
320 | static inline __pure struct ip6t_entry * | 320 | static inline __pure struct ip6t_entry * |
321 | ip6t_next_entry(const struct ip6t_entry *entry) | 321 | ip6t_next_entry(const struct ip6t_entry *entry) |
322 | { | 322 | { |
323 | return (void *)entry + entry->next_offset; | 323 | return (void *)entry + entry->next_offset; |
324 | } | 324 | } |
325 | 325 | ||
326 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ | 326 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ |
327 | unsigned int | 327 | unsigned int |
328 | ip6t_do_table(struct sk_buff *skb, | 328 | ip6t_do_table(struct sk_buff *skb, |
329 | unsigned int hook, | 329 | unsigned int hook, |
330 | const struct net_device *in, | 330 | const struct net_device *in, |
331 | const struct net_device *out, | 331 | const struct net_device *out, |
332 | struct xt_table *table) | 332 | struct xt_table *table) |
333 | { | 333 | { |
334 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 334 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
335 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 335 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
336 | unsigned int verdict = NF_DROP; | 336 | unsigned int verdict = NF_DROP; |
337 | const char *indev, *outdev; | 337 | const char *indev, *outdev; |
338 | const void *table_base; | 338 | const void *table_base; |
339 | struct ip6t_entry *e, **jumpstack; | 339 | struct ip6t_entry *e, **jumpstack; |
340 | unsigned int *stackptr, origptr, cpu; | 340 | unsigned int *stackptr, origptr, cpu; |
341 | const struct xt_table_info *private; | 341 | const struct xt_table_info *private; |
342 | struct xt_action_param acpar; | 342 | struct xt_action_param acpar; |
343 | 343 | ||
344 | /* Initialization */ | 344 | /* Initialization */ |
345 | indev = in ? in->name : nulldevname; | 345 | indev = in ? in->name : nulldevname; |
346 | outdev = out ? out->name : nulldevname; | 346 | outdev = out ? out->name : nulldevname; |
347 | /* We handle fragments by dealing with the first fragment as | 347 | /* We handle fragments by dealing with the first fragment as |
348 | * if it was a normal packet. All other fragments are treated | 348 | * if it was a normal packet. All other fragments are treated |
349 | * normally, except that they will NEVER match rules that ask | 349 | * normally, except that they will NEVER match rules that ask |
350 | * things we don't know, ie. tcp syn flag or ports). If the | 350 | * things we don't know, ie. tcp syn flag or ports). If the |
351 | * rule is also a fragment-specific rule, non-fragments won't | 351 | * rule is also a fragment-specific rule, non-fragments won't |
352 | * match it. */ | 352 | * match it. */ |
353 | acpar.hotdrop = false; | 353 | acpar.hotdrop = false; |
354 | acpar.in = in; | 354 | acpar.in = in; |
355 | acpar.out = out; | 355 | acpar.out = out; |
356 | acpar.family = NFPROTO_IPV6; | 356 | acpar.family = NFPROTO_IPV6; |
357 | acpar.hooknum = hook; | 357 | acpar.hooknum = hook; |
358 | 358 | ||
359 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 359 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
360 | 360 | ||
361 | xt_info_rdlock_bh(); | 361 | xt_info_rdlock_bh(); |
362 | private = table->private; | 362 | private = table->private; |
363 | cpu = smp_processor_id(); | 363 | cpu = smp_processor_id(); |
364 | table_base = private->entries[cpu]; | 364 | table_base = private->entries[cpu]; |
365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | 365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; |
366 | stackptr = per_cpu_ptr(private->stackptr, cpu); | 366 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
367 | origptr = *stackptr; | 367 | origptr = *stackptr; |
368 | 368 | ||
369 | e = get_entry(table_base, private->hook_entry[hook]); | 369 | e = get_entry(table_base, private->hook_entry[hook]); |
370 | 370 | ||
371 | do { | 371 | do { |
372 | const struct xt_entry_target *t; | 372 | const struct xt_entry_target *t; |
373 | const struct xt_entry_match *ematch; | 373 | const struct xt_entry_match *ematch; |
374 | 374 | ||
375 | IP_NF_ASSERT(e); | 375 | IP_NF_ASSERT(e); |
376 | if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, | 376 | if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, |
377 | &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { | 377 | &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { |
378 | no_match: | 378 | no_match: |
379 | e = ip6t_next_entry(e); | 379 | e = ip6t_next_entry(e); |
380 | continue; | 380 | continue; |
381 | } | 381 | } |
382 | 382 | ||
383 | xt_ematch_foreach(ematch, e) { | 383 | xt_ematch_foreach(ematch, e) { |
384 | acpar.match = ematch->u.kernel.match; | 384 | acpar.match = ematch->u.kernel.match; |
385 | acpar.matchinfo = ematch->data; | 385 | acpar.matchinfo = ematch->data; |
386 | if (!acpar.match->match(skb, &acpar)) | 386 | if (!acpar.match->match(skb, &acpar)) |
387 | goto no_match; | 387 | goto no_match; |
388 | } | 388 | } |
389 | 389 | ||
390 | ADD_COUNTER(e->counters, skb->len, 1); | 390 | ADD_COUNTER(e->counters, skb->len, 1); |
391 | 391 | ||
392 | t = ip6t_get_target_c(e); | 392 | t = ip6t_get_target_c(e); |
393 | IP_NF_ASSERT(t->u.kernel.target); | 393 | IP_NF_ASSERT(t->u.kernel.target); |
394 | 394 | ||
395 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 395 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
396 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 396 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
397 | /* The packet is traced: log it */ | 397 | /* The packet is traced: log it */ |
398 | if (unlikely(skb->nf_trace)) | 398 | if (unlikely(skb->nf_trace)) |
399 | trace_packet(skb, hook, in, out, | 399 | trace_packet(skb, hook, in, out, |
400 | table->name, private, e); | 400 | table->name, private, e); |
401 | #endif | 401 | #endif |
402 | /* Standard target? */ | 402 | /* Standard target? */ |
403 | if (!t->u.kernel.target->target) { | 403 | if (!t->u.kernel.target->target) { |
404 | int v; | 404 | int v; |
405 | 405 | ||
406 | v = ((struct xt_standard_target *)t)->verdict; | 406 | v = ((struct xt_standard_target *)t)->verdict; |
407 | if (v < 0) { | 407 | if (v < 0) { |
408 | /* Pop from stack? */ | 408 | /* Pop from stack? */ |
409 | if (v != XT_RETURN) { | 409 | if (v != XT_RETURN) { |
410 | verdict = (unsigned)(-v) - 1; | 410 | verdict = (unsigned)(-v) - 1; |
411 | break; | 411 | break; |
412 | } | 412 | } |
413 | if (*stackptr == 0) | 413 | if (*stackptr == 0) |
414 | e = get_entry(table_base, | 414 | e = get_entry(table_base, |
415 | private->underflow[hook]); | 415 | private->underflow[hook]); |
416 | else | 416 | else |
417 | e = ip6t_next_entry(jumpstack[--*stackptr]); | 417 | e = ip6t_next_entry(jumpstack[--*stackptr]); |
418 | continue; | 418 | continue; |
419 | } | 419 | } |
420 | if (table_base + v != ip6t_next_entry(e) && | 420 | if (table_base + v != ip6t_next_entry(e) && |
421 | !(e->ipv6.flags & IP6T_F_GOTO)) { | 421 | !(e->ipv6.flags & IP6T_F_GOTO)) { |
422 | if (*stackptr >= private->stacksize) { | 422 | if (*stackptr >= private->stacksize) { |
423 | verdict = NF_DROP; | 423 | verdict = NF_DROP; |
424 | break; | 424 | break; |
425 | } | 425 | } |
426 | jumpstack[(*stackptr)++] = e; | 426 | jumpstack[(*stackptr)++] = e; |
427 | } | 427 | } |
428 | 428 | ||
429 | e = get_entry(table_base, v); | 429 | e = get_entry(table_base, v); |
430 | continue; | 430 | continue; |
431 | } | 431 | } |
432 | 432 | ||
433 | acpar.target = t->u.kernel.target; | 433 | acpar.target = t->u.kernel.target; |
434 | acpar.targinfo = t->data; | 434 | acpar.targinfo = t->data; |
435 | 435 | ||
436 | verdict = t->u.kernel.target->target(skb, &acpar); | 436 | verdict = t->u.kernel.target->target(skb, &acpar); |
437 | if (verdict == XT_CONTINUE) | 437 | if (verdict == XT_CONTINUE) |
438 | e = ip6t_next_entry(e); | 438 | e = ip6t_next_entry(e); |
439 | else | 439 | else |
440 | /* Verdict */ | 440 | /* Verdict */ |
441 | break; | 441 | break; |
442 | } while (!acpar.hotdrop); | 442 | } while (!acpar.hotdrop); |
443 | 443 | ||
444 | xt_info_rdunlock_bh(); | 444 | xt_info_rdunlock_bh(); |
445 | *stackptr = origptr; | 445 | *stackptr = origptr; |
446 | 446 | ||
447 | #ifdef DEBUG_ALLOW_ALL | 447 | #ifdef DEBUG_ALLOW_ALL |
448 | return NF_ACCEPT; | 448 | return NF_ACCEPT; |
449 | #else | 449 | #else |
450 | if (acpar.hotdrop) | 450 | if (acpar.hotdrop) |
451 | return NF_DROP; | 451 | return NF_DROP; |
452 | else return verdict; | 452 | else return verdict; |
453 | #endif | 453 | #endif |
454 | } | 454 | } |
455 | 455 | ||
456 | /* Figures out from what hook each rule can be called: returns 0 if | 456 | /* Figures out from what hook each rule can be called: returns 0 if |
457 | there are loops. Puts hook bitmask in comefrom. */ | 457 | there are loops. Puts hook bitmask in comefrom. */ |
458 | static int | 458 | static int |
459 | mark_source_chains(const struct xt_table_info *newinfo, | 459 | mark_source_chains(const struct xt_table_info *newinfo, |
460 | unsigned int valid_hooks, void *entry0) | 460 | unsigned int valid_hooks, void *entry0) |
461 | { | 461 | { |
462 | unsigned int hook; | 462 | unsigned int hook; |
463 | 463 | ||
464 | /* No recursion; use packet counter to save back ptrs (reset | 464 | /* No recursion; use packet counter to save back ptrs (reset |
465 | to 0 as we leave), and comefrom to save source hook bitmask */ | 465 | to 0 as we leave), and comefrom to save source hook bitmask */ |
466 | for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { | 466 | for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { |
467 | unsigned int pos = newinfo->hook_entry[hook]; | 467 | unsigned int pos = newinfo->hook_entry[hook]; |
468 | struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); | 468 | struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); |
469 | 469 | ||
470 | if (!(valid_hooks & (1 << hook))) | 470 | if (!(valid_hooks & (1 << hook))) |
471 | continue; | 471 | continue; |
472 | 472 | ||
473 | /* Set initial back pointer. */ | 473 | /* Set initial back pointer. */ |
474 | e->counters.pcnt = pos; | 474 | e->counters.pcnt = pos; |
475 | 475 | ||
476 | for (;;) { | 476 | for (;;) { |
477 | const struct xt_standard_target *t | 477 | const struct xt_standard_target *t |
478 | = (void *)ip6t_get_target_c(e); | 478 | = (void *)ip6t_get_target_c(e); |
479 | int visited = e->comefrom & (1 << hook); | 479 | int visited = e->comefrom & (1 << hook); |
480 | 480 | ||
481 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { | 481 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { |
482 | pr_err("iptables: loop hook %u pos %u %08X.\n", | 482 | pr_err("iptables: loop hook %u pos %u %08X.\n", |
483 | hook, pos, e->comefrom); | 483 | hook, pos, e->comefrom); |
484 | return 0; | 484 | return 0; |
485 | } | 485 | } |
486 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); | 486 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); |
487 | 487 | ||
488 | /* Unconditional return/END. */ | 488 | /* Unconditional return/END. */ |
489 | if ((e->target_offset == sizeof(struct ip6t_entry) && | 489 | if ((e->target_offset == sizeof(struct ip6t_entry) && |
490 | (strcmp(t->target.u.user.name, | 490 | (strcmp(t->target.u.user.name, |
491 | XT_STANDARD_TARGET) == 0) && | 491 | XT_STANDARD_TARGET) == 0) && |
492 | t->verdict < 0 && | 492 | t->verdict < 0 && |
493 | unconditional(&e->ipv6)) || visited) { | 493 | unconditional(&e->ipv6)) || visited) { |
494 | unsigned int oldpos, size; | 494 | unsigned int oldpos, size; |
495 | 495 | ||
496 | if ((strcmp(t->target.u.user.name, | 496 | if ((strcmp(t->target.u.user.name, |
497 | XT_STANDARD_TARGET) == 0) && | 497 | XT_STANDARD_TARGET) == 0) && |
498 | t->verdict < -NF_MAX_VERDICT - 1) { | 498 | t->verdict < -NF_MAX_VERDICT - 1) { |
499 | duprintf("mark_source_chains: bad " | 499 | duprintf("mark_source_chains: bad " |
500 | "negative verdict (%i)\n", | 500 | "negative verdict (%i)\n", |
501 | t->verdict); | 501 | t->verdict); |
502 | return 0; | 502 | return 0; |
503 | } | 503 | } |
504 | 504 | ||
505 | /* Return: backtrack through the last | 505 | /* Return: backtrack through the last |
506 | big jump. */ | 506 | big jump. */ |
507 | do { | 507 | do { |
508 | e->comefrom ^= (1<<NF_INET_NUMHOOKS); | 508 | e->comefrom ^= (1<<NF_INET_NUMHOOKS); |
509 | #ifdef DEBUG_IP_FIREWALL_USER | 509 | #ifdef DEBUG_IP_FIREWALL_USER |
510 | if (e->comefrom | 510 | if (e->comefrom |
511 | & (1 << NF_INET_NUMHOOKS)) { | 511 | & (1 << NF_INET_NUMHOOKS)) { |
512 | duprintf("Back unset " | 512 | duprintf("Back unset " |
513 | "on hook %u " | 513 | "on hook %u " |
514 | "rule %u\n", | 514 | "rule %u\n", |
515 | hook, pos); | 515 | hook, pos); |
516 | } | 516 | } |
517 | #endif | 517 | #endif |
518 | oldpos = pos; | 518 | oldpos = pos; |
519 | pos = e->counters.pcnt; | 519 | pos = e->counters.pcnt; |
520 | e->counters.pcnt = 0; | 520 | e->counters.pcnt = 0; |
521 | 521 | ||
522 | /* We're at the start. */ | 522 | /* We're at the start. */ |
523 | if (pos == oldpos) | 523 | if (pos == oldpos) |
524 | goto next; | 524 | goto next; |
525 | 525 | ||
526 | e = (struct ip6t_entry *) | 526 | e = (struct ip6t_entry *) |
527 | (entry0 + pos); | 527 | (entry0 + pos); |
528 | } while (oldpos == pos + e->next_offset); | 528 | } while (oldpos == pos + e->next_offset); |
529 | 529 | ||
530 | /* Move along one */ | 530 | /* Move along one */ |
531 | size = e->next_offset; | 531 | size = e->next_offset; |
532 | e = (struct ip6t_entry *) | 532 | e = (struct ip6t_entry *) |
533 | (entry0 + pos + size); | 533 | (entry0 + pos + size); |
534 | e->counters.pcnt = pos; | 534 | e->counters.pcnt = pos; |
535 | pos += size; | 535 | pos += size; |
536 | } else { | 536 | } else { |
537 | int newpos = t->verdict; | 537 | int newpos = t->verdict; |
538 | 538 | ||
539 | if (strcmp(t->target.u.user.name, | 539 | if (strcmp(t->target.u.user.name, |
540 | XT_STANDARD_TARGET) == 0 && | 540 | XT_STANDARD_TARGET) == 0 && |
541 | newpos >= 0) { | 541 | newpos >= 0) { |
542 | if (newpos > newinfo->size - | 542 | if (newpos > newinfo->size - |
543 | sizeof(struct ip6t_entry)) { | 543 | sizeof(struct ip6t_entry)) { |
544 | duprintf("mark_source_chains: " | 544 | duprintf("mark_source_chains: " |
545 | "bad verdict (%i)\n", | 545 | "bad verdict (%i)\n", |
546 | newpos); | 546 | newpos); |
547 | return 0; | 547 | return 0; |
548 | } | 548 | } |
549 | /* This a jump; chase it. */ | 549 | /* This a jump; chase it. */ |
550 | duprintf("Jump rule %u -> %u\n", | 550 | duprintf("Jump rule %u -> %u\n", |
551 | pos, newpos); | 551 | pos, newpos); |
552 | } else { | 552 | } else { |
553 | /* ... this is a fallthru */ | 553 | /* ... this is a fallthru */ |
554 | newpos = pos + e->next_offset; | 554 | newpos = pos + e->next_offset; |
555 | } | 555 | } |
556 | e = (struct ip6t_entry *) | 556 | e = (struct ip6t_entry *) |
557 | (entry0 + newpos); | 557 | (entry0 + newpos); |
558 | e->counters.pcnt = pos; | 558 | e->counters.pcnt = pos; |
559 | pos = newpos; | 559 | pos = newpos; |
560 | } | 560 | } |
561 | } | 561 | } |
562 | next: | 562 | next: |
563 | duprintf("Finished chain %u\n", hook); | 563 | duprintf("Finished chain %u\n", hook); |
564 | } | 564 | } |
565 | return 1; | 565 | return 1; |
566 | } | 566 | } |
567 | 567 | ||
568 | static void cleanup_match(struct xt_entry_match *m, struct net *net) | 568 | static void cleanup_match(struct xt_entry_match *m, struct net *net) |
569 | { | 569 | { |
570 | struct xt_mtdtor_param par; | 570 | struct xt_mtdtor_param par; |
571 | 571 | ||
572 | par.net = net; | 572 | par.net = net; |
573 | par.match = m->u.kernel.match; | 573 | par.match = m->u.kernel.match; |
574 | par.matchinfo = m->data; | 574 | par.matchinfo = m->data; |
575 | par.family = NFPROTO_IPV6; | 575 | par.family = NFPROTO_IPV6; |
576 | if (par.match->destroy != NULL) | 576 | if (par.match->destroy != NULL) |
577 | par.match->destroy(&par); | 577 | par.match->destroy(&par); |
578 | module_put(par.match->me); | 578 | module_put(par.match->me); |
579 | } | 579 | } |
580 | 580 | ||
581 | static int | 581 | static int |
582 | check_entry(const struct ip6t_entry *e, const char *name) | 582 | check_entry(const struct ip6t_entry *e, const char *name) |
583 | { | 583 | { |
584 | const struct xt_entry_target *t; | 584 | const struct xt_entry_target *t; |
585 | 585 | ||
586 | if (!ip6_checkentry(&e->ipv6)) { | 586 | if (!ip6_checkentry(&e->ipv6)) { |
587 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); | 587 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); |
588 | return -EINVAL; | 588 | return -EINVAL; |
589 | } | 589 | } |
590 | 590 | ||
591 | if (e->target_offset + sizeof(struct xt_entry_target) > | 591 | if (e->target_offset + sizeof(struct xt_entry_target) > |
592 | e->next_offset) | 592 | e->next_offset) |
593 | return -EINVAL; | 593 | return -EINVAL; |
594 | 594 | ||
595 | t = ip6t_get_target_c(e); | 595 | t = ip6t_get_target_c(e); |
596 | if (e->target_offset + t->u.target_size > e->next_offset) | 596 | if (e->target_offset + t->u.target_size > e->next_offset) |
597 | return -EINVAL; | 597 | return -EINVAL; |
598 | 598 | ||
599 | return 0; | 599 | return 0; |
600 | } | 600 | } |
601 | 601 | ||
602 | static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) | 602 | static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) |
603 | { | 603 | { |
604 | const struct ip6t_ip6 *ipv6 = par->entryinfo; | 604 | const struct ip6t_ip6 *ipv6 = par->entryinfo; |
605 | int ret; | 605 | int ret; |
606 | 606 | ||
607 | par->match = m->u.kernel.match; | 607 | par->match = m->u.kernel.match; |
608 | par->matchinfo = m->data; | 608 | par->matchinfo = m->data; |
609 | 609 | ||
610 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), | 610 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), |
611 | ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); | 611 | ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); |
612 | if (ret < 0) { | 612 | if (ret < 0) { |
613 | duprintf("ip_tables: check failed for `%s'.\n", | 613 | duprintf("ip_tables: check failed for `%s'.\n", |
614 | par.match->name); | 614 | par.match->name); |
615 | return ret; | 615 | return ret; |
616 | } | 616 | } |
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
620 | static int | 620 | static int |
621 | find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) | 621 | find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) |
622 | { | 622 | { |
623 | struct xt_match *match; | 623 | struct xt_match *match; |
624 | int ret; | 624 | int ret; |
625 | 625 | ||
626 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, | 626 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, |
627 | m->u.user.revision); | 627 | m->u.user.revision); |
628 | if (IS_ERR(match)) { | 628 | if (IS_ERR(match)) { |
629 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); | 629 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); |
630 | return PTR_ERR(match); | 630 | return PTR_ERR(match); |
631 | } | 631 | } |
632 | m->u.kernel.match = match; | 632 | m->u.kernel.match = match; |
633 | 633 | ||
634 | ret = check_match(m, par); | 634 | ret = check_match(m, par); |
635 | if (ret) | 635 | if (ret) |
636 | goto err; | 636 | goto err; |
637 | 637 | ||
638 | return 0; | 638 | return 0; |
639 | err: | 639 | err: |
640 | module_put(m->u.kernel.match->me); | 640 | module_put(m->u.kernel.match->me); |
641 | return ret; | 641 | return ret; |
642 | } | 642 | } |
643 | 643 | ||
644 | static int check_target(struct ip6t_entry *e, struct net *net, const char *name) | 644 | static int check_target(struct ip6t_entry *e, struct net *net, const char *name) |
645 | { | 645 | { |
646 | struct xt_entry_target *t = ip6t_get_target(e); | 646 | struct xt_entry_target *t = ip6t_get_target(e); |
647 | struct xt_tgchk_param par = { | 647 | struct xt_tgchk_param par = { |
648 | .net = net, | 648 | .net = net, |
649 | .table = name, | 649 | .table = name, |
650 | .entryinfo = e, | 650 | .entryinfo = e, |
651 | .target = t->u.kernel.target, | 651 | .target = t->u.kernel.target, |
652 | .targinfo = t->data, | 652 | .targinfo = t->data, |
653 | .hook_mask = e->comefrom, | 653 | .hook_mask = e->comefrom, |
654 | .family = NFPROTO_IPV6, | 654 | .family = NFPROTO_IPV6, |
655 | }; | 655 | }; |
656 | int ret; | 656 | int ret; |
657 | 657 | ||
658 | t = ip6t_get_target(e); | 658 | t = ip6t_get_target(e); |
659 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), | 659 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), |
660 | e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); | 660 | e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); |
661 | if (ret < 0) { | 661 | if (ret < 0) { |
662 | duprintf("ip_tables: check failed for `%s'.\n", | 662 | duprintf("ip_tables: check failed for `%s'.\n", |
663 | t->u.kernel.target->name); | 663 | t->u.kernel.target->name); |
664 | return ret; | 664 | return ret; |
665 | } | 665 | } |
666 | return 0; | 666 | return 0; |
667 | } | 667 | } |
668 | 668 | ||
669 | static int | 669 | static int |
670 | find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, | 670 | find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, |
671 | unsigned int size) | 671 | unsigned int size) |
672 | { | 672 | { |
673 | struct xt_entry_target *t; | 673 | struct xt_entry_target *t; |
674 | struct xt_target *target; | 674 | struct xt_target *target; |
675 | int ret; | 675 | int ret; |
676 | unsigned int j; | 676 | unsigned int j; |
677 | struct xt_mtchk_param mtpar; | 677 | struct xt_mtchk_param mtpar; |
678 | struct xt_entry_match *ematch; | 678 | struct xt_entry_match *ematch; |
679 | 679 | ||
680 | ret = check_entry(e, name); | 680 | ret = check_entry(e, name); |
681 | if (ret) | 681 | if (ret) |
682 | return ret; | 682 | return ret; |
683 | 683 | ||
684 | j = 0; | 684 | j = 0; |
685 | mtpar.net = net; | 685 | mtpar.net = net; |
686 | mtpar.table = name; | 686 | mtpar.table = name; |
687 | mtpar.entryinfo = &e->ipv6; | 687 | mtpar.entryinfo = &e->ipv6; |
688 | mtpar.hook_mask = e->comefrom; | 688 | mtpar.hook_mask = e->comefrom; |
689 | mtpar.family = NFPROTO_IPV6; | 689 | mtpar.family = NFPROTO_IPV6; |
690 | xt_ematch_foreach(ematch, e) { | 690 | xt_ematch_foreach(ematch, e) { |
691 | ret = find_check_match(ematch, &mtpar); | 691 | ret = find_check_match(ematch, &mtpar); |
692 | if (ret != 0) | 692 | if (ret != 0) |
693 | goto cleanup_matches; | 693 | goto cleanup_matches; |
694 | ++j; | 694 | ++j; |
695 | } | 695 | } |
696 | 696 | ||
697 | t = ip6t_get_target(e); | 697 | t = ip6t_get_target(e); |
698 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, | 698 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, |
699 | t->u.user.revision); | 699 | t->u.user.revision); |
700 | if (IS_ERR(target)) { | 700 | if (IS_ERR(target)) { |
701 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 701 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
702 | ret = PTR_ERR(target); | 702 | ret = PTR_ERR(target); |
703 | goto cleanup_matches; | 703 | goto cleanup_matches; |
704 | } | 704 | } |
705 | t->u.kernel.target = target; | 705 | t->u.kernel.target = target; |
706 | 706 | ||
707 | ret = check_target(e, net, name); | 707 | ret = check_target(e, net, name); |
708 | if (ret) | 708 | if (ret) |
709 | goto err; | 709 | goto err; |
710 | return 0; | 710 | return 0; |
711 | err: | 711 | err: |
712 | module_put(t->u.kernel.target->me); | 712 | module_put(t->u.kernel.target->me); |
713 | cleanup_matches: | 713 | cleanup_matches: |
714 | xt_ematch_foreach(ematch, e) { | 714 | xt_ematch_foreach(ematch, e) { |
715 | if (j-- == 0) | 715 | if (j-- == 0) |
716 | break; | 716 | break; |
717 | cleanup_match(ematch, net); | 717 | cleanup_match(ematch, net); |
718 | } | 718 | } |
719 | return ret; | 719 | return ret; |
720 | } | 720 | } |
721 | 721 | ||
722 | static bool check_underflow(const struct ip6t_entry *e) | 722 | static bool check_underflow(const struct ip6t_entry *e) |
723 | { | 723 | { |
724 | const struct xt_entry_target *t; | 724 | const struct xt_entry_target *t; |
725 | unsigned int verdict; | 725 | unsigned int verdict; |
726 | 726 | ||
727 | if (!unconditional(&e->ipv6)) | 727 | if (!unconditional(&e->ipv6)) |
728 | return false; | 728 | return false; |
729 | t = ip6t_get_target_c(e); | 729 | t = ip6t_get_target_c(e); |
730 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 730 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
731 | return false; | 731 | return false; |
732 | verdict = ((struct xt_standard_target *)t)->verdict; | 732 | verdict = ((struct xt_standard_target *)t)->verdict; |
733 | verdict = -verdict - 1; | 733 | verdict = -verdict - 1; |
734 | return verdict == NF_DROP || verdict == NF_ACCEPT; | 734 | return verdict == NF_DROP || verdict == NF_ACCEPT; |
735 | } | 735 | } |
736 | 736 | ||
737 | static int | 737 | static int |
738 | check_entry_size_and_hooks(struct ip6t_entry *e, | 738 | check_entry_size_and_hooks(struct ip6t_entry *e, |
739 | struct xt_table_info *newinfo, | 739 | struct xt_table_info *newinfo, |
740 | const unsigned char *base, | 740 | const unsigned char *base, |
741 | const unsigned char *limit, | 741 | const unsigned char *limit, |
742 | const unsigned int *hook_entries, | 742 | const unsigned int *hook_entries, |
743 | const unsigned int *underflows, | 743 | const unsigned int *underflows, |
744 | unsigned int valid_hooks) | 744 | unsigned int valid_hooks) |
745 | { | 745 | { |
746 | unsigned int h; | 746 | unsigned int h; |
747 | 747 | ||
748 | if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || | 748 | if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || |
749 | (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { | 749 | (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { |
750 | duprintf("Bad offset %p\n", e); | 750 | duprintf("Bad offset %p\n", e); |
751 | return -EINVAL; | 751 | return -EINVAL; |
752 | } | 752 | } |
753 | 753 | ||
754 | if (e->next_offset | 754 | if (e->next_offset |
755 | < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { | 755 | < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { |
756 | duprintf("checking: element %p size %u\n", | 756 | duprintf("checking: element %p size %u\n", |
757 | e, e->next_offset); | 757 | e, e->next_offset); |
758 | return -EINVAL; | 758 | return -EINVAL; |
759 | } | 759 | } |
760 | 760 | ||
761 | /* Check hooks & underflows */ | 761 | /* Check hooks & underflows */ |
762 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 762 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
763 | if (!(valid_hooks & (1 << h))) | 763 | if (!(valid_hooks & (1 << h))) |
764 | continue; | 764 | continue; |
765 | if ((unsigned char *)e - base == hook_entries[h]) | 765 | if ((unsigned char *)e - base == hook_entries[h]) |
766 | newinfo->hook_entry[h] = hook_entries[h]; | 766 | newinfo->hook_entry[h] = hook_entries[h]; |
767 | if ((unsigned char *)e - base == underflows[h]) { | 767 | if ((unsigned char *)e - base == underflows[h]) { |
768 | if (!check_underflow(e)) { | 768 | if (!check_underflow(e)) { |
769 | pr_err("Underflows must be unconditional and " | 769 | pr_err("Underflows must be unconditional and " |
770 | "use the STANDARD target with " | 770 | "use the STANDARD target with " |
771 | "ACCEPT/DROP\n"); | 771 | "ACCEPT/DROP\n"); |
772 | return -EINVAL; | 772 | return -EINVAL; |
773 | } | 773 | } |
774 | newinfo->underflow[h] = underflows[h]; | 774 | newinfo->underflow[h] = underflows[h]; |
775 | } | 775 | } |
776 | } | 776 | } |
777 | 777 | ||
778 | /* Clear counters and comefrom */ | 778 | /* Clear counters and comefrom */ |
779 | e->counters = ((struct xt_counters) { 0, 0 }); | 779 | e->counters = ((struct xt_counters) { 0, 0 }); |
780 | e->comefrom = 0; | 780 | e->comefrom = 0; |
781 | return 0; | 781 | return 0; |
782 | } | 782 | } |
783 | 783 | ||
784 | static void cleanup_entry(struct ip6t_entry *e, struct net *net) | 784 | static void cleanup_entry(struct ip6t_entry *e, struct net *net) |
785 | { | 785 | { |
786 | struct xt_tgdtor_param par; | 786 | struct xt_tgdtor_param par; |
787 | struct xt_entry_target *t; | 787 | struct xt_entry_target *t; |
788 | struct xt_entry_match *ematch; | 788 | struct xt_entry_match *ematch; |
789 | 789 | ||
790 | /* Cleanup all matches */ | 790 | /* Cleanup all matches */ |
791 | xt_ematch_foreach(ematch, e) | 791 | xt_ematch_foreach(ematch, e) |
792 | cleanup_match(ematch, net); | 792 | cleanup_match(ematch, net); |
793 | t = ip6t_get_target(e); | 793 | t = ip6t_get_target(e); |
794 | 794 | ||
795 | par.net = net; | 795 | par.net = net; |
796 | par.target = t->u.kernel.target; | 796 | par.target = t->u.kernel.target; |
797 | par.targinfo = t->data; | 797 | par.targinfo = t->data; |
798 | par.family = NFPROTO_IPV6; | 798 | par.family = NFPROTO_IPV6; |
799 | if (par.target->destroy != NULL) | 799 | if (par.target->destroy != NULL) |
800 | par.target->destroy(&par); | 800 | par.target->destroy(&par); |
801 | module_put(par.target->me); | 801 | module_put(par.target->me); |
802 | } | 802 | } |
803 | 803 | ||
804 | /* Checks and translates the user-supplied table segment (held in | 804 | /* Checks and translates the user-supplied table segment (held in |
805 | newinfo) */ | 805 | newinfo) */ |
806 | static int | 806 | static int |
807 | translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | 807 | translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, |
808 | const struct ip6t_replace *repl) | 808 | const struct ip6t_replace *repl) |
809 | { | 809 | { |
810 | struct ip6t_entry *iter; | 810 | struct ip6t_entry *iter; |
811 | unsigned int i; | 811 | unsigned int i; |
812 | int ret = 0; | 812 | int ret = 0; |
813 | 813 | ||
814 | newinfo->size = repl->size; | 814 | newinfo->size = repl->size; |
815 | newinfo->number = repl->num_entries; | 815 | newinfo->number = repl->num_entries; |
816 | 816 | ||
817 | /* Init all hooks to impossible value. */ | 817 | /* Init all hooks to impossible value. */ |
818 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 818 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
819 | newinfo->hook_entry[i] = 0xFFFFFFFF; | 819 | newinfo->hook_entry[i] = 0xFFFFFFFF; |
820 | newinfo->underflow[i] = 0xFFFFFFFF; | 820 | newinfo->underflow[i] = 0xFFFFFFFF; |
821 | } | 821 | } |
822 | 822 | ||
823 | duprintf("translate_table: size %u\n", newinfo->size); | 823 | duprintf("translate_table: size %u\n", newinfo->size); |
824 | i = 0; | 824 | i = 0; |
825 | /* Walk through entries, checking offsets. */ | 825 | /* Walk through entries, checking offsets. */ |
826 | xt_entry_foreach(iter, entry0, newinfo->size) { | 826 | xt_entry_foreach(iter, entry0, newinfo->size) { |
827 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, | 827 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, |
828 | entry0 + repl->size, | 828 | entry0 + repl->size, |
829 | repl->hook_entry, | 829 | repl->hook_entry, |
830 | repl->underflow, | 830 | repl->underflow, |
831 | repl->valid_hooks); | 831 | repl->valid_hooks); |
832 | if (ret != 0) | 832 | if (ret != 0) |
833 | return ret; | 833 | return ret; |
834 | ++i; | 834 | ++i; |
835 | if (strcmp(ip6t_get_target(iter)->u.user.name, | 835 | if (strcmp(ip6t_get_target(iter)->u.user.name, |
836 | XT_ERROR_TARGET) == 0) | 836 | XT_ERROR_TARGET) == 0) |
837 | ++newinfo->stacksize; | 837 | ++newinfo->stacksize; |
838 | } | 838 | } |
839 | 839 | ||
840 | if (i != repl->num_entries) { | 840 | if (i != repl->num_entries) { |
841 | duprintf("translate_table: %u not %u entries\n", | 841 | duprintf("translate_table: %u not %u entries\n", |
842 | i, repl->num_entries); | 842 | i, repl->num_entries); |
843 | return -EINVAL; | 843 | return -EINVAL; |
844 | } | 844 | } |
845 | 845 | ||
846 | /* Check hooks all assigned */ | 846 | /* Check hooks all assigned */ |
847 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 847 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
848 | /* Only hooks which are valid */ | 848 | /* Only hooks which are valid */ |
849 | if (!(repl->valid_hooks & (1 << i))) | 849 | if (!(repl->valid_hooks & (1 << i))) |
850 | continue; | 850 | continue; |
851 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 851 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
852 | duprintf("Invalid hook entry %u %u\n", | 852 | duprintf("Invalid hook entry %u %u\n", |
853 | i, repl->hook_entry[i]); | 853 | i, repl->hook_entry[i]); |
854 | return -EINVAL; | 854 | return -EINVAL; |
855 | } | 855 | } |
856 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 856 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
857 | duprintf("Invalid underflow %u %u\n", | 857 | duprintf("Invalid underflow %u %u\n", |
858 | i, repl->underflow[i]); | 858 | i, repl->underflow[i]); |
859 | return -EINVAL; | 859 | return -EINVAL; |
860 | } | 860 | } |
861 | } | 861 | } |
862 | 862 | ||
863 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) | 863 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) |
864 | return -ELOOP; | 864 | return -ELOOP; |
865 | 865 | ||
866 | /* Finally, each sanity check must pass */ | 866 | /* Finally, each sanity check must pass */ |
867 | i = 0; | 867 | i = 0; |
868 | xt_entry_foreach(iter, entry0, newinfo->size) { | 868 | xt_entry_foreach(iter, entry0, newinfo->size) { |
869 | ret = find_check_entry(iter, net, repl->name, repl->size); | 869 | ret = find_check_entry(iter, net, repl->name, repl->size); |
870 | if (ret != 0) | 870 | if (ret != 0) |
871 | break; | 871 | break; |
872 | ++i; | 872 | ++i; |
873 | } | 873 | } |
874 | 874 | ||
875 | if (ret != 0) { | 875 | if (ret != 0) { |
876 | xt_entry_foreach(iter, entry0, newinfo->size) { | 876 | xt_entry_foreach(iter, entry0, newinfo->size) { |
877 | if (i-- == 0) | 877 | if (i-- == 0) |
878 | break; | 878 | break; |
879 | cleanup_entry(iter, net); | 879 | cleanup_entry(iter, net); |
880 | } | 880 | } |
881 | return ret; | 881 | return ret; |
882 | } | 882 | } |
883 | 883 | ||
884 | /* And one copy for every other CPU */ | 884 | /* And one copy for every other CPU */ |
885 | for_each_possible_cpu(i) { | 885 | for_each_possible_cpu(i) { |
886 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 886 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
887 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 887 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
888 | } | 888 | } |
889 | 889 | ||
890 | return ret; | 890 | return ret; |
891 | } | 891 | } |
892 | 892 | ||
893 | static void | 893 | static void |
894 | get_counters(const struct xt_table_info *t, | 894 | get_counters(const struct xt_table_info *t, |
895 | struct xt_counters counters[]) | 895 | struct xt_counters counters[]) |
896 | { | 896 | { |
897 | struct ip6t_entry *iter; | 897 | struct ip6t_entry *iter; |
898 | unsigned int cpu; | 898 | unsigned int cpu; |
899 | unsigned int i; | 899 | unsigned int i; |
900 | unsigned int curcpu = get_cpu(); | ||
901 | 900 | ||
902 | /* Instead of clearing (by a previous call to memset()) | ||
903 | * the counters and using adds, we set the counters | ||
904 | * with data used by 'current' CPU | ||
905 | * | ||
906 | * Bottom half has to be disabled to prevent deadlock | ||
907 | * if new softirq were to run and call ipt_do_table | ||
908 | */ | ||
909 | local_bh_disable(); | ||
910 | i = 0; | ||
911 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
912 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
913 | iter->counters.pcnt); | ||
914 | ++i; | ||
915 | } | ||
916 | local_bh_enable(); | ||
917 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
918 | * (preemption is disabled) | ||
919 | */ | ||
920 | |||
921 | for_each_possible_cpu(cpu) { | 901 | for_each_possible_cpu(cpu) { |
922 | if (cpu == curcpu) | 902 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
923 | continue; | 903 | |
924 | i = 0; | 904 | i = 0; |
925 | local_bh_disable(); | ||
926 | xt_info_wrlock(cpu); | ||
927 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 905 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
928 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 906 | u64 bcnt, pcnt; |
929 | iter->counters.pcnt); | 907 | unsigned int start; |
908 | |||
909 | do { | ||
910 | start = read_seqbegin(lock); | ||
911 | bcnt = iter->counters.bcnt; | ||
912 | pcnt = iter->counters.pcnt; | ||
913 | } while (read_seqretry(lock, start)); | ||
914 | |||
915 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
930 | ++i; | 916 | ++i; |
931 | } | 917 | } |
932 | xt_info_wrunlock(cpu); | ||
933 | local_bh_enable(); | ||
934 | } | 918 | } |
935 | put_cpu(); | ||
936 | } | 919 | } |
937 | 920 | ||
938 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 921 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
939 | { | 922 | { |
940 | unsigned int countersize; | 923 | unsigned int countersize; |
941 | struct xt_counters *counters; | 924 | struct xt_counters *counters; |
942 | const struct xt_table_info *private = table->private; | 925 | const struct xt_table_info *private = table->private; |
943 | 926 | ||
944 | /* We need atomic snapshot of counters: rest doesn't change | 927 | /* We need atomic snapshot of counters: rest doesn't change |
945 | (other than comefrom, which userspace doesn't care | 928 | (other than comefrom, which userspace doesn't care |
946 | about). */ | 929 | about). */ |
947 | countersize = sizeof(struct xt_counters) * private->number; | 930 | countersize = sizeof(struct xt_counters) * private->number; |
948 | counters = vmalloc(countersize); | 931 | counters = vzalloc(countersize); |
949 | 932 | ||
950 | if (counters == NULL) | 933 | if (counters == NULL) |
951 | return ERR_PTR(-ENOMEM); | 934 | return ERR_PTR(-ENOMEM); |
952 | 935 | ||
953 | get_counters(private, counters); | 936 | get_counters(private, counters); |
954 | 937 | ||
955 | return counters; | 938 | return counters; |
956 | } | 939 | } |
957 | 940 | ||
958 | static int | 941 | static int |
959 | copy_entries_to_user(unsigned int total_size, | 942 | copy_entries_to_user(unsigned int total_size, |
960 | const struct xt_table *table, | 943 | const struct xt_table *table, |
961 | void __user *userptr) | 944 | void __user *userptr) |
962 | { | 945 | { |
963 | unsigned int off, num; | 946 | unsigned int off, num; |
964 | const struct ip6t_entry *e; | 947 | const struct ip6t_entry *e; |
965 | struct xt_counters *counters; | 948 | struct xt_counters *counters; |
966 | const struct xt_table_info *private = table->private; | 949 | const struct xt_table_info *private = table->private; |
967 | int ret = 0; | 950 | int ret = 0; |
968 | const void *loc_cpu_entry; | 951 | const void *loc_cpu_entry; |
969 | 952 | ||
970 | counters = alloc_counters(table); | 953 | counters = alloc_counters(table); |
971 | if (IS_ERR(counters)) | 954 | if (IS_ERR(counters)) |
972 | return PTR_ERR(counters); | 955 | return PTR_ERR(counters); |
973 | 956 | ||
974 | /* choose the copy that is on our node/cpu, ... | 957 | /* choose the copy that is on our node/cpu, ... |
975 | * This choice is lazy (because current thread is | 958 | * This choice is lazy (because current thread is |
976 | * allowed to migrate to another cpu) | 959 | * allowed to migrate to another cpu) |
977 | */ | 960 | */ |
978 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 961 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
979 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { | 962 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { |
980 | ret = -EFAULT; | 963 | ret = -EFAULT; |
981 | goto free_counters; | 964 | goto free_counters; |
982 | } | 965 | } |
983 | 966 | ||
984 | /* FIXME: use iterator macros --RR */ | 967 | /* FIXME: use iterator macros --RR */ |
985 | /* ... then go back and fix counters and names */ | 968 | /* ... then go back and fix counters and names */ |
986 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ | 969 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ |
987 | unsigned int i; | 970 | unsigned int i; |
988 | const struct xt_entry_match *m; | 971 | const struct xt_entry_match *m; |
989 | const struct xt_entry_target *t; | 972 | const struct xt_entry_target *t; |
990 | 973 | ||
991 | e = (struct ip6t_entry *)(loc_cpu_entry + off); | 974 | e = (struct ip6t_entry *)(loc_cpu_entry + off); |
992 | if (copy_to_user(userptr + off | 975 | if (copy_to_user(userptr + off |
993 | + offsetof(struct ip6t_entry, counters), | 976 | + offsetof(struct ip6t_entry, counters), |
994 | &counters[num], | 977 | &counters[num], |
995 | sizeof(counters[num])) != 0) { | 978 | sizeof(counters[num])) != 0) { |
996 | ret = -EFAULT; | 979 | ret = -EFAULT; |
997 | goto free_counters; | 980 | goto free_counters; |
998 | } | 981 | } |
999 | 982 | ||
1000 | for (i = sizeof(struct ip6t_entry); | 983 | for (i = sizeof(struct ip6t_entry); |
1001 | i < e->target_offset; | 984 | i < e->target_offset; |
1002 | i += m->u.match_size) { | 985 | i += m->u.match_size) { |
1003 | m = (void *)e + i; | 986 | m = (void *)e + i; |
1004 | 987 | ||
1005 | if (copy_to_user(userptr + off + i | 988 | if (copy_to_user(userptr + off + i |
1006 | + offsetof(struct xt_entry_match, | 989 | + offsetof(struct xt_entry_match, |
1007 | u.user.name), | 990 | u.user.name), |
1008 | m->u.kernel.match->name, | 991 | m->u.kernel.match->name, |
1009 | strlen(m->u.kernel.match->name)+1) | 992 | strlen(m->u.kernel.match->name)+1) |
1010 | != 0) { | 993 | != 0) { |
1011 | ret = -EFAULT; | 994 | ret = -EFAULT; |
1012 | goto free_counters; | 995 | goto free_counters; |
1013 | } | 996 | } |
1014 | } | 997 | } |
1015 | 998 | ||
1016 | t = ip6t_get_target_c(e); | 999 | t = ip6t_get_target_c(e); |
1017 | if (copy_to_user(userptr + off + e->target_offset | 1000 | if (copy_to_user(userptr + off + e->target_offset |
1018 | + offsetof(struct xt_entry_target, | 1001 | + offsetof(struct xt_entry_target, |
1019 | u.user.name), | 1002 | u.user.name), |
1020 | t->u.kernel.target->name, | 1003 | t->u.kernel.target->name, |
1021 | strlen(t->u.kernel.target->name)+1) != 0) { | 1004 | strlen(t->u.kernel.target->name)+1) != 0) { |
1022 | ret = -EFAULT; | 1005 | ret = -EFAULT; |
1023 | goto free_counters; | 1006 | goto free_counters; |
1024 | } | 1007 | } |
1025 | } | 1008 | } |
1026 | 1009 | ||
1027 | free_counters: | 1010 | free_counters: |
1028 | vfree(counters); | 1011 | vfree(counters); |
1029 | return ret; | 1012 | return ret; |
1030 | } | 1013 | } |
1031 | 1014 | ||
1032 | #ifdef CONFIG_COMPAT | 1015 | #ifdef CONFIG_COMPAT |
1033 | static void compat_standard_from_user(void *dst, const void *src) | 1016 | static void compat_standard_from_user(void *dst, const void *src) |
1034 | { | 1017 | { |
1035 | int v = *(compat_int_t *)src; | 1018 | int v = *(compat_int_t *)src; |
1036 | 1019 | ||
1037 | if (v > 0) | 1020 | if (v > 0) |
1038 | v += xt_compat_calc_jump(AF_INET6, v); | 1021 | v += xt_compat_calc_jump(AF_INET6, v); |
1039 | memcpy(dst, &v, sizeof(v)); | 1022 | memcpy(dst, &v, sizeof(v)); |
1040 | } | 1023 | } |
1041 | 1024 | ||
1042 | static int compat_standard_to_user(void __user *dst, const void *src) | 1025 | static int compat_standard_to_user(void __user *dst, const void *src) |
1043 | { | 1026 | { |
1044 | compat_int_t cv = *(int *)src; | 1027 | compat_int_t cv = *(int *)src; |
1045 | 1028 | ||
1046 | if (cv > 0) | 1029 | if (cv > 0) |
1047 | cv -= xt_compat_calc_jump(AF_INET6, cv); | 1030 | cv -= xt_compat_calc_jump(AF_INET6, cv); |
1048 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; | 1031 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; |
1049 | } | 1032 | } |
1050 | 1033 | ||
1051 | static int compat_calc_entry(const struct ip6t_entry *e, | 1034 | static int compat_calc_entry(const struct ip6t_entry *e, |
1052 | const struct xt_table_info *info, | 1035 | const struct xt_table_info *info, |
1053 | const void *base, struct xt_table_info *newinfo) | 1036 | const void *base, struct xt_table_info *newinfo) |
1054 | { | 1037 | { |
1055 | const struct xt_entry_match *ematch; | 1038 | const struct xt_entry_match *ematch; |
1056 | const struct xt_entry_target *t; | 1039 | const struct xt_entry_target *t; |
1057 | unsigned int entry_offset; | 1040 | unsigned int entry_offset; |
1058 | int off, i, ret; | 1041 | int off, i, ret; |
1059 | 1042 | ||
1060 | off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); | 1043 | off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); |
1061 | entry_offset = (void *)e - base; | 1044 | entry_offset = (void *)e - base; |
1062 | xt_ematch_foreach(ematch, e) | 1045 | xt_ematch_foreach(ematch, e) |
1063 | off += xt_compat_match_offset(ematch->u.kernel.match); | 1046 | off += xt_compat_match_offset(ematch->u.kernel.match); |
1064 | t = ip6t_get_target_c(e); | 1047 | t = ip6t_get_target_c(e); |
1065 | off += xt_compat_target_offset(t->u.kernel.target); | 1048 | off += xt_compat_target_offset(t->u.kernel.target); |
1066 | newinfo->size -= off; | 1049 | newinfo->size -= off; |
1067 | ret = xt_compat_add_offset(AF_INET6, entry_offset, off); | 1050 | ret = xt_compat_add_offset(AF_INET6, entry_offset, off); |
1068 | if (ret) | 1051 | if (ret) |
1069 | return ret; | 1052 | return ret; |
1070 | 1053 | ||
1071 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1054 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1072 | if (info->hook_entry[i] && | 1055 | if (info->hook_entry[i] && |
1073 | (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) | 1056 | (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) |
1074 | newinfo->hook_entry[i] -= off; | 1057 | newinfo->hook_entry[i] -= off; |
1075 | if (info->underflow[i] && | 1058 | if (info->underflow[i] && |
1076 | (e < (struct ip6t_entry *)(base + info->underflow[i]))) | 1059 | (e < (struct ip6t_entry *)(base + info->underflow[i]))) |
1077 | newinfo->underflow[i] -= off; | 1060 | newinfo->underflow[i] -= off; |
1078 | } | 1061 | } |
1079 | return 0; | 1062 | return 0; |
1080 | } | 1063 | } |
1081 | 1064 | ||
1082 | static int compat_table_info(const struct xt_table_info *info, | 1065 | static int compat_table_info(const struct xt_table_info *info, |
1083 | struct xt_table_info *newinfo) | 1066 | struct xt_table_info *newinfo) |
1084 | { | 1067 | { |
1085 | struct ip6t_entry *iter; | 1068 | struct ip6t_entry *iter; |
1086 | void *loc_cpu_entry; | 1069 | void *loc_cpu_entry; |
1087 | int ret; | 1070 | int ret; |
1088 | 1071 | ||
1089 | if (!newinfo || !info) | 1072 | if (!newinfo || !info) |
1090 | return -EINVAL; | 1073 | return -EINVAL; |
1091 | 1074 | ||
1092 | /* we dont care about newinfo->entries[] */ | 1075 | /* we dont care about newinfo->entries[] */ |
1093 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | 1076 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); |
1094 | newinfo->initial_entries = 0; | 1077 | newinfo->initial_entries = 0; |
1095 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; | 1078 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; |
1096 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { | 1079 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { |
1097 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); | 1080 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); |
1098 | if (ret != 0) | 1081 | if (ret != 0) |
1099 | return ret; | 1082 | return ret; |
1100 | } | 1083 | } |
1101 | return 0; | 1084 | return 0; |
1102 | } | 1085 | } |
1103 | #endif | 1086 | #endif |
1104 | 1087 | ||
1105 | static int get_info(struct net *net, void __user *user, | 1088 | static int get_info(struct net *net, void __user *user, |
1106 | const int *len, int compat) | 1089 | const int *len, int compat) |
1107 | { | 1090 | { |
1108 | char name[XT_TABLE_MAXNAMELEN]; | 1091 | char name[XT_TABLE_MAXNAMELEN]; |
1109 | struct xt_table *t; | 1092 | struct xt_table *t; |
1110 | int ret; | 1093 | int ret; |
1111 | 1094 | ||
1112 | if (*len != sizeof(struct ip6t_getinfo)) { | 1095 | if (*len != sizeof(struct ip6t_getinfo)) { |
1113 | duprintf("length %u != %zu\n", *len, | 1096 | duprintf("length %u != %zu\n", *len, |
1114 | sizeof(struct ip6t_getinfo)); | 1097 | sizeof(struct ip6t_getinfo)); |
1115 | return -EINVAL; | 1098 | return -EINVAL; |
1116 | } | 1099 | } |
1117 | 1100 | ||
1118 | if (copy_from_user(name, user, sizeof(name)) != 0) | 1101 | if (copy_from_user(name, user, sizeof(name)) != 0) |
1119 | return -EFAULT; | 1102 | return -EFAULT; |
1120 | 1103 | ||
1121 | name[XT_TABLE_MAXNAMELEN-1] = '\0'; | 1104 | name[XT_TABLE_MAXNAMELEN-1] = '\0'; |
1122 | #ifdef CONFIG_COMPAT | 1105 | #ifdef CONFIG_COMPAT |
1123 | if (compat) | 1106 | if (compat) |
1124 | xt_compat_lock(AF_INET6); | 1107 | xt_compat_lock(AF_INET6); |
1125 | #endif | 1108 | #endif |
1126 | t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), | 1109 | t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), |
1127 | "ip6table_%s", name); | 1110 | "ip6table_%s", name); |
1128 | if (t && !IS_ERR(t)) { | 1111 | if (t && !IS_ERR(t)) { |
1129 | struct ip6t_getinfo info; | 1112 | struct ip6t_getinfo info; |
1130 | const struct xt_table_info *private = t->private; | 1113 | const struct xt_table_info *private = t->private; |
1131 | #ifdef CONFIG_COMPAT | 1114 | #ifdef CONFIG_COMPAT |
1132 | struct xt_table_info tmp; | 1115 | struct xt_table_info tmp; |
1133 | 1116 | ||
1134 | if (compat) { | 1117 | if (compat) { |
1135 | ret = compat_table_info(private, &tmp); | 1118 | ret = compat_table_info(private, &tmp); |
1136 | xt_compat_flush_offsets(AF_INET6); | 1119 | xt_compat_flush_offsets(AF_INET6); |
1137 | private = &tmp; | 1120 | private = &tmp; |
1138 | } | 1121 | } |
1139 | #endif | 1122 | #endif |
1140 | memset(&info, 0, sizeof(info)); | 1123 | memset(&info, 0, sizeof(info)); |
1141 | info.valid_hooks = t->valid_hooks; | 1124 | info.valid_hooks = t->valid_hooks; |
1142 | memcpy(info.hook_entry, private->hook_entry, | 1125 | memcpy(info.hook_entry, private->hook_entry, |
1143 | sizeof(info.hook_entry)); | 1126 | sizeof(info.hook_entry)); |
1144 | memcpy(info.underflow, private->underflow, | 1127 | memcpy(info.underflow, private->underflow, |
1145 | sizeof(info.underflow)); | 1128 | sizeof(info.underflow)); |
1146 | info.num_entries = private->number; | 1129 | info.num_entries = private->number; |
1147 | info.size = private->size; | 1130 | info.size = private->size; |
1148 | strcpy(info.name, name); | 1131 | strcpy(info.name, name); |
1149 | 1132 | ||
1150 | if (copy_to_user(user, &info, *len) != 0) | 1133 | if (copy_to_user(user, &info, *len) != 0) |
1151 | ret = -EFAULT; | 1134 | ret = -EFAULT; |
1152 | else | 1135 | else |
1153 | ret = 0; | 1136 | ret = 0; |
1154 | 1137 | ||
1155 | xt_table_unlock(t); | 1138 | xt_table_unlock(t); |
1156 | module_put(t->me); | 1139 | module_put(t->me); |
1157 | } else | 1140 | } else |
1158 | ret = t ? PTR_ERR(t) : -ENOENT; | 1141 | ret = t ? PTR_ERR(t) : -ENOENT; |
1159 | #ifdef CONFIG_COMPAT | 1142 | #ifdef CONFIG_COMPAT |
1160 | if (compat) | 1143 | if (compat) |
1161 | xt_compat_unlock(AF_INET6); | 1144 | xt_compat_unlock(AF_INET6); |
1162 | #endif | 1145 | #endif |
1163 | return ret; | 1146 | return ret; |
1164 | } | 1147 | } |
1165 | 1148 | ||
1166 | static int | 1149 | static int |
1167 | get_entries(struct net *net, struct ip6t_get_entries __user *uptr, | 1150 | get_entries(struct net *net, struct ip6t_get_entries __user *uptr, |
1168 | const int *len) | 1151 | const int *len) |
1169 | { | 1152 | { |
1170 | int ret; | 1153 | int ret; |
1171 | struct ip6t_get_entries get; | 1154 | struct ip6t_get_entries get; |
1172 | struct xt_table *t; | 1155 | struct xt_table *t; |
1173 | 1156 | ||
1174 | if (*len < sizeof(get)) { | 1157 | if (*len < sizeof(get)) { |
1175 | duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); | 1158 | duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); |
1176 | return -EINVAL; | 1159 | return -EINVAL; |
1177 | } | 1160 | } |
1178 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1161 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1179 | return -EFAULT; | 1162 | return -EFAULT; |
1180 | if (*len != sizeof(struct ip6t_get_entries) + get.size) { | 1163 | if (*len != sizeof(struct ip6t_get_entries) + get.size) { |
1181 | duprintf("get_entries: %u != %zu\n", | 1164 | duprintf("get_entries: %u != %zu\n", |
1182 | *len, sizeof(get) + get.size); | 1165 | *len, sizeof(get) + get.size); |
1183 | return -EINVAL; | 1166 | return -EINVAL; |
1184 | } | 1167 | } |
1185 | 1168 | ||
1186 | t = xt_find_table_lock(net, AF_INET6, get.name); | 1169 | t = xt_find_table_lock(net, AF_INET6, get.name); |
1187 | if (t && !IS_ERR(t)) { | 1170 | if (t && !IS_ERR(t)) { |
1188 | struct xt_table_info *private = t->private; | 1171 | struct xt_table_info *private = t->private; |
1189 | duprintf("t->private->number = %u\n", private->number); | 1172 | duprintf("t->private->number = %u\n", private->number); |
1190 | if (get.size == private->size) | 1173 | if (get.size == private->size) |
1191 | ret = copy_entries_to_user(private->size, | 1174 | ret = copy_entries_to_user(private->size, |
1192 | t, uptr->entrytable); | 1175 | t, uptr->entrytable); |
1193 | else { | 1176 | else { |
1194 | duprintf("get_entries: I've got %u not %u!\n", | 1177 | duprintf("get_entries: I've got %u not %u!\n", |
1195 | private->size, get.size); | 1178 | private->size, get.size); |
1196 | ret = -EAGAIN; | 1179 | ret = -EAGAIN; |
1197 | } | 1180 | } |
1198 | module_put(t->me); | 1181 | module_put(t->me); |
1199 | xt_table_unlock(t); | 1182 | xt_table_unlock(t); |
1200 | } else | 1183 | } else |
1201 | ret = t ? PTR_ERR(t) : -ENOENT; | 1184 | ret = t ? PTR_ERR(t) : -ENOENT; |
1202 | 1185 | ||
1203 | return ret; | 1186 | return ret; |
1204 | } | 1187 | } |
1205 | 1188 | ||
1206 | static int | 1189 | static int |
1207 | __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | 1190 | __do_replace(struct net *net, const char *name, unsigned int valid_hooks, |
1208 | struct xt_table_info *newinfo, unsigned int num_counters, | 1191 | struct xt_table_info *newinfo, unsigned int num_counters, |
1209 | void __user *counters_ptr) | 1192 | void __user *counters_ptr) |
1210 | { | 1193 | { |
1211 | int ret; | 1194 | int ret; |
1212 | struct xt_table *t; | 1195 | struct xt_table *t; |
1213 | struct xt_table_info *oldinfo; | 1196 | struct xt_table_info *oldinfo; |
1214 | struct xt_counters *counters; | 1197 | struct xt_counters *counters; |
1215 | const void *loc_cpu_old_entry; | 1198 | const void *loc_cpu_old_entry; |
1216 | struct ip6t_entry *iter; | 1199 | struct ip6t_entry *iter; |
1217 | 1200 | ||
1218 | ret = 0; | 1201 | ret = 0; |
1219 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 1202 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1220 | if (!counters) { | 1203 | if (!counters) { |
1221 | ret = -ENOMEM; | 1204 | ret = -ENOMEM; |
1222 | goto out; | 1205 | goto out; |
1223 | } | 1206 | } |
1224 | 1207 | ||
1225 | t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), | 1208 | t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), |
1226 | "ip6table_%s", name); | 1209 | "ip6table_%s", name); |
1227 | if (!t || IS_ERR(t)) { | 1210 | if (!t || IS_ERR(t)) { |
1228 | ret = t ? PTR_ERR(t) : -ENOENT; | 1211 | ret = t ? PTR_ERR(t) : -ENOENT; |
1229 | goto free_newinfo_counters_untrans; | 1212 | goto free_newinfo_counters_untrans; |
1230 | } | 1213 | } |
1231 | 1214 | ||
1232 | /* You lied! */ | 1215 | /* You lied! */ |
1233 | if (valid_hooks != t->valid_hooks) { | 1216 | if (valid_hooks != t->valid_hooks) { |
1234 | duprintf("Valid hook crap: %08X vs %08X\n", | 1217 | duprintf("Valid hook crap: %08X vs %08X\n", |
1235 | valid_hooks, t->valid_hooks); | 1218 | valid_hooks, t->valid_hooks); |
1236 | ret = -EINVAL; | 1219 | ret = -EINVAL; |
1237 | goto put_module; | 1220 | goto put_module; |
1238 | } | 1221 | } |
1239 | 1222 | ||
1240 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); | 1223 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); |
1241 | if (!oldinfo) | 1224 | if (!oldinfo) |
1242 | goto put_module; | 1225 | goto put_module; |
1243 | 1226 | ||
1244 | /* Update module usage count based on number of rules */ | 1227 | /* Update module usage count based on number of rules */ |
1245 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", | 1228 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", |
1246 | oldinfo->number, oldinfo->initial_entries, newinfo->number); | 1229 | oldinfo->number, oldinfo->initial_entries, newinfo->number); |
1247 | if ((oldinfo->number > oldinfo->initial_entries) || | 1230 | if ((oldinfo->number > oldinfo->initial_entries) || |
1248 | (newinfo->number <= oldinfo->initial_entries)) | 1231 | (newinfo->number <= oldinfo->initial_entries)) |
1249 | module_put(t->me); | 1232 | module_put(t->me); |
1250 | if ((oldinfo->number > oldinfo->initial_entries) && | 1233 | if ((oldinfo->number > oldinfo->initial_entries) && |
1251 | (newinfo->number <= oldinfo->initial_entries)) | 1234 | (newinfo->number <= oldinfo->initial_entries)) |
1252 | module_put(t->me); | 1235 | module_put(t->me); |
1253 | 1236 | ||
1254 | /* Get the old counters, and synchronize with replace */ | 1237 | /* Get the old counters, and synchronize with replace */ |
1255 | get_counters(oldinfo, counters); | 1238 | get_counters(oldinfo, counters); |
1256 | 1239 | ||
1257 | /* Decrease module usage counts and free resource */ | 1240 | /* Decrease module usage counts and free resource */ |
1258 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1241 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1259 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) | 1242 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) |
1260 | cleanup_entry(iter, net); | 1243 | cleanup_entry(iter, net); |
1261 | 1244 | ||
1262 | xt_free_table_info(oldinfo); | 1245 | xt_free_table_info(oldinfo); |
1263 | if (copy_to_user(counters_ptr, counters, | 1246 | if (copy_to_user(counters_ptr, counters, |
1264 | sizeof(struct xt_counters) * num_counters) != 0) | 1247 | sizeof(struct xt_counters) * num_counters) != 0) |
1265 | ret = -EFAULT; | 1248 | ret = -EFAULT; |
1266 | vfree(counters); | 1249 | vfree(counters); |
1267 | xt_table_unlock(t); | 1250 | xt_table_unlock(t); |
1268 | return ret; | 1251 | return ret; |
1269 | 1252 | ||
1270 | put_module: | 1253 | put_module: |
1271 | module_put(t->me); | 1254 | module_put(t->me); |
1272 | xt_table_unlock(t); | 1255 | xt_table_unlock(t); |
1273 | free_newinfo_counters_untrans: | 1256 | free_newinfo_counters_untrans: |
1274 | vfree(counters); | 1257 | vfree(counters); |
1275 | out: | 1258 | out: |
1276 | return ret; | 1259 | return ret; |
1277 | } | 1260 | } |
1278 | 1261 | ||
1279 | static int | 1262 | static int |
1280 | do_replace(struct net *net, const void __user *user, unsigned int len) | 1263 | do_replace(struct net *net, const void __user *user, unsigned int len) |
1281 | { | 1264 | { |
1282 | int ret; | 1265 | int ret; |
1283 | struct ip6t_replace tmp; | 1266 | struct ip6t_replace tmp; |
1284 | struct xt_table_info *newinfo; | 1267 | struct xt_table_info *newinfo; |
1285 | void *loc_cpu_entry; | 1268 | void *loc_cpu_entry; |
1286 | struct ip6t_entry *iter; | 1269 | struct ip6t_entry *iter; |
1287 | 1270 | ||
1288 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1271 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1289 | return -EFAULT; | 1272 | return -EFAULT; |
1290 | 1273 | ||
1291 | /* overflow check */ | 1274 | /* overflow check */ |
1292 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1275 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1293 | return -ENOMEM; | 1276 | return -ENOMEM; |
1294 | 1277 | ||
1295 | newinfo = xt_alloc_table_info(tmp.size); | 1278 | newinfo = xt_alloc_table_info(tmp.size); |
1296 | if (!newinfo) | 1279 | if (!newinfo) |
1297 | return -ENOMEM; | 1280 | return -ENOMEM; |
1298 | 1281 | ||
1299 | /* choose the copy that is on our node/cpu */ | 1282 | /* choose the copy that is on our node/cpu */ |
1300 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1283 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1301 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1284 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1302 | tmp.size) != 0) { | 1285 | tmp.size) != 0) { |
1303 | ret = -EFAULT; | 1286 | ret = -EFAULT; |
1304 | goto free_newinfo; | 1287 | goto free_newinfo; |
1305 | } | 1288 | } |
1306 | 1289 | ||
1307 | ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); | 1290 | ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); |
1308 | if (ret != 0) | 1291 | if (ret != 0) |
1309 | goto free_newinfo; | 1292 | goto free_newinfo; |
1310 | 1293 | ||
1311 | duprintf("ip_tables: Translated table\n"); | 1294 | duprintf("ip_tables: Translated table\n"); |
1312 | 1295 | ||
1313 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1296 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1314 | tmp.num_counters, tmp.counters); | 1297 | tmp.num_counters, tmp.counters); |
1315 | if (ret) | 1298 | if (ret) |
1316 | goto free_newinfo_untrans; | 1299 | goto free_newinfo_untrans; |
1317 | return 0; | 1300 | return 0; |
1318 | 1301 | ||
1319 | free_newinfo_untrans: | 1302 | free_newinfo_untrans: |
1320 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1303 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1321 | cleanup_entry(iter, net); | 1304 | cleanup_entry(iter, net); |
1322 | free_newinfo: | 1305 | free_newinfo: |
1323 | xt_free_table_info(newinfo); | 1306 | xt_free_table_info(newinfo); |
1324 | return ret; | 1307 | return ret; |
1325 | } | 1308 | } |
1326 | 1309 | ||
1327 | static int | 1310 | static int |
1328 | do_add_counters(struct net *net, const void __user *user, unsigned int len, | 1311 | do_add_counters(struct net *net, const void __user *user, unsigned int len, |
1329 | int compat) | 1312 | int compat) |
1330 | { | 1313 | { |
1331 | unsigned int i, curcpu; | 1314 | unsigned int i, curcpu; |
1332 | struct xt_counters_info tmp; | 1315 | struct xt_counters_info tmp; |
1333 | struct xt_counters *paddc; | 1316 | struct xt_counters *paddc; |
1334 | unsigned int num_counters; | 1317 | unsigned int num_counters; |
1335 | char *name; | 1318 | char *name; |
1336 | int size; | 1319 | int size; |
1337 | void *ptmp; | 1320 | void *ptmp; |
1338 | struct xt_table *t; | 1321 | struct xt_table *t; |
1339 | const struct xt_table_info *private; | 1322 | const struct xt_table_info *private; |
1340 | int ret = 0; | 1323 | int ret = 0; |
1341 | const void *loc_cpu_entry; | 1324 | const void *loc_cpu_entry; |
1342 | struct ip6t_entry *iter; | 1325 | struct ip6t_entry *iter; |
1343 | #ifdef CONFIG_COMPAT | 1326 | #ifdef CONFIG_COMPAT |
1344 | struct compat_xt_counters_info compat_tmp; | 1327 | struct compat_xt_counters_info compat_tmp; |
1345 | 1328 | ||
1346 | if (compat) { | 1329 | if (compat) { |
1347 | ptmp = &compat_tmp; | 1330 | ptmp = &compat_tmp; |
1348 | size = sizeof(struct compat_xt_counters_info); | 1331 | size = sizeof(struct compat_xt_counters_info); |
1349 | } else | 1332 | } else |
1350 | #endif | 1333 | #endif |
1351 | { | 1334 | { |
1352 | ptmp = &tmp; | 1335 | ptmp = &tmp; |
1353 | size = sizeof(struct xt_counters_info); | 1336 | size = sizeof(struct xt_counters_info); |
1354 | } | 1337 | } |
1355 | 1338 | ||
1356 | if (copy_from_user(ptmp, user, size) != 0) | 1339 | if (copy_from_user(ptmp, user, size) != 0) |
1357 | return -EFAULT; | 1340 | return -EFAULT; |
1358 | 1341 | ||
1359 | #ifdef CONFIG_COMPAT | 1342 | #ifdef CONFIG_COMPAT |
1360 | if (compat) { | 1343 | if (compat) { |
1361 | num_counters = compat_tmp.num_counters; | 1344 | num_counters = compat_tmp.num_counters; |
1362 | name = compat_tmp.name; | 1345 | name = compat_tmp.name; |
1363 | } else | 1346 | } else |
1364 | #endif | 1347 | #endif |
1365 | { | 1348 | { |
1366 | num_counters = tmp.num_counters; | 1349 | num_counters = tmp.num_counters; |
1367 | name = tmp.name; | 1350 | name = tmp.name; |
1368 | } | 1351 | } |
1369 | 1352 | ||
1370 | if (len != size + num_counters * sizeof(struct xt_counters)) | 1353 | if (len != size + num_counters * sizeof(struct xt_counters)) |
1371 | return -EINVAL; | 1354 | return -EINVAL; |
1372 | 1355 | ||
1373 | paddc = vmalloc(len - size); | 1356 | paddc = vmalloc(len - size); |
1374 | if (!paddc) | 1357 | if (!paddc) |
1375 | return -ENOMEM; | 1358 | return -ENOMEM; |
1376 | 1359 | ||
1377 | if (copy_from_user(paddc, user + size, len - size) != 0) { | 1360 | if (copy_from_user(paddc, user + size, len - size) != 0) { |
1378 | ret = -EFAULT; | 1361 | ret = -EFAULT; |
1379 | goto free; | 1362 | goto free; |
1380 | } | 1363 | } |
1381 | 1364 | ||
1382 | t = xt_find_table_lock(net, AF_INET6, name); | 1365 | t = xt_find_table_lock(net, AF_INET6, name); |
1383 | if (!t || IS_ERR(t)) { | 1366 | if (!t || IS_ERR(t)) { |
1384 | ret = t ? PTR_ERR(t) : -ENOENT; | 1367 | ret = t ? PTR_ERR(t) : -ENOENT; |
1385 | goto free; | 1368 | goto free; |
1386 | } | 1369 | } |
1387 | 1370 | ||
1388 | 1371 | ||
1389 | local_bh_disable(); | 1372 | local_bh_disable(); |
1390 | private = t->private; | 1373 | private = t->private; |
1391 | if (private->number != num_counters) { | 1374 | if (private->number != num_counters) { |
1392 | ret = -EINVAL; | 1375 | ret = -EINVAL; |
1393 | goto unlock_up_free; | 1376 | goto unlock_up_free; |
1394 | } | 1377 | } |
1395 | 1378 | ||
1396 | i = 0; | 1379 | i = 0; |
1397 | /* Choose the copy that is on our node */ | 1380 | /* Choose the copy that is on our node */ |
1398 | curcpu = smp_processor_id(); | 1381 | curcpu = smp_processor_id(); |
1399 | xt_info_wrlock(curcpu); | 1382 | xt_info_wrlock(curcpu); |
1400 | loc_cpu_entry = private->entries[curcpu]; | 1383 | loc_cpu_entry = private->entries[curcpu]; |
1401 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { | 1384 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { |
1402 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); | 1385 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); |
1403 | ++i; | 1386 | ++i; |
1404 | } | 1387 | } |
1405 | xt_info_wrunlock(curcpu); | 1388 | xt_info_wrunlock(curcpu); |
1406 | 1389 | ||
1407 | unlock_up_free: | 1390 | unlock_up_free: |
1408 | local_bh_enable(); | 1391 | local_bh_enable(); |
1409 | xt_table_unlock(t); | 1392 | xt_table_unlock(t); |
1410 | module_put(t->me); | 1393 | module_put(t->me); |
1411 | free: | 1394 | free: |
1412 | vfree(paddc); | 1395 | vfree(paddc); |
1413 | 1396 | ||
1414 | return ret; | 1397 | return ret; |
1415 | } | 1398 | } |
1416 | 1399 | ||
1417 | #ifdef CONFIG_COMPAT | 1400 | #ifdef CONFIG_COMPAT |
1418 | struct compat_ip6t_replace { | 1401 | struct compat_ip6t_replace { |
1419 | char name[XT_TABLE_MAXNAMELEN]; | 1402 | char name[XT_TABLE_MAXNAMELEN]; |
1420 | u32 valid_hooks; | 1403 | u32 valid_hooks; |
1421 | u32 num_entries; | 1404 | u32 num_entries; |
1422 | u32 size; | 1405 | u32 size; |
1423 | u32 hook_entry[NF_INET_NUMHOOKS]; | 1406 | u32 hook_entry[NF_INET_NUMHOOKS]; |
1424 | u32 underflow[NF_INET_NUMHOOKS]; | 1407 | u32 underflow[NF_INET_NUMHOOKS]; |
1425 | u32 num_counters; | 1408 | u32 num_counters; |
1426 | compat_uptr_t counters; /* struct xt_counters * */ | 1409 | compat_uptr_t counters; /* struct xt_counters * */ |
1427 | struct compat_ip6t_entry entries[0]; | 1410 | struct compat_ip6t_entry entries[0]; |
1428 | }; | 1411 | }; |
1429 | 1412 | ||
1430 | static int | 1413 | static int |
1431 | compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, | 1414 | compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, |
1432 | unsigned int *size, struct xt_counters *counters, | 1415 | unsigned int *size, struct xt_counters *counters, |
1433 | unsigned int i) | 1416 | unsigned int i) |
1434 | { | 1417 | { |
1435 | struct xt_entry_target *t; | 1418 | struct xt_entry_target *t; |
1436 | struct compat_ip6t_entry __user *ce; | 1419 | struct compat_ip6t_entry __user *ce; |
1437 | u_int16_t target_offset, next_offset; | 1420 | u_int16_t target_offset, next_offset; |
1438 | compat_uint_t origsize; | 1421 | compat_uint_t origsize; |
1439 | const struct xt_entry_match *ematch; | 1422 | const struct xt_entry_match *ematch; |
1440 | int ret = 0; | 1423 | int ret = 0; |
1441 | 1424 | ||
1442 | origsize = *size; | 1425 | origsize = *size; |
1443 | ce = (struct compat_ip6t_entry __user *)*dstptr; | 1426 | ce = (struct compat_ip6t_entry __user *)*dstptr; |
1444 | if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || | 1427 | if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || |
1445 | copy_to_user(&ce->counters, &counters[i], | 1428 | copy_to_user(&ce->counters, &counters[i], |
1446 | sizeof(counters[i])) != 0) | 1429 | sizeof(counters[i])) != 0) |
1447 | return -EFAULT; | 1430 | return -EFAULT; |
1448 | 1431 | ||
1449 | *dstptr += sizeof(struct compat_ip6t_entry); | 1432 | *dstptr += sizeof(struct compat_ip6t_entry); |
1450 | *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); | 1433 | *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); |
1451 | 1434 | ||
1452 | xt_ematch_foreach(ematch, e) { | 1435 | xt_ematch_foreach(ematch, e) { |
1453 | ret = xt_compat_match_to_user(ematch, dstptr, size); | 1436 | ret = xt_compat_match_to_user(ematch, dstptr, size); |
1454 | if (ret != 0) | 1437 | if (ret != 0) |
1455 | return ret; | 1438 | return ret; |
1456 | } | 1439 | } |
1457 | target_offset = e->target_offset - (origsize - *size); | 1440 | target_offset = e->target_offset - (origsize - *size); |
1458 | t = ip6t_get_target(e); | 1441 | t = ip6t_get_target(e); |
1459 | ret = xt_compat_target_to_user(t, dstptr, size); | 1442 | ret = xt_compat_target_to_user(t, dstptr, size); |
1460 | if (ret) | 1443 | if (ret) |
1461 | return ret; | 1444 | return ret; |
1462 | next_offset = e->next_offset - (origsize - *size); | 1445 | next_offset = e->next_offset - (origsize - *size); |
1463 | if (put_user(target_offset, &ce->target_offset) != 0 || | 1446 | if (put_user(target_offset, &ce->target_offset) != 0 || |
1464 | put_user(next_offset, &ce->next_offset) != 0) | 1447 | put_user(next_offset, &ce->next_offset) != 0) |
1465 | return -EFAULT; | 1448 | return -EFAULT; |
1466 | return 0; | 1449 | return 0; |
1467 | } | 1450 | } |
1468 | 1451 | ||
1469 | static int | 1452 | static int |
1470 | compat_find_calc_match(struct xt_entry_match *m, | 1453 | compat_find_calc_match(struct xt_entry_match *m, |
1471 | const char *name, | 1454 | const char *name, |
1472 | const struct ip6t_ip6 *ipv6, | 1455 | const struct ip6t_ip6 *ipv6, |
1473 | unsigned int hookmask, | 1456 | unsigned int hookmask, |
1474 | int *size) | 1457 | int *size) |
1475 | { | 1458 | { |
1476 | struct xt_match *match; | 1459 | struct xt_match *match; |
1477 | 1460 | ||
1478 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, | 1461 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, |
1479 | m->u.user.revision); | 1462 | m->u.user.revision); |
1480 | if (IS_ERR(match)) { | 1463 | if (IS_ERR(match)) { |
1481 | duprintf("compat_check_calc_match: `%s' not found\n", | 1464 | duprintf("compat_check_calc_match: `%s' not found\n", |
1482 | m->u.user.name); | 1465 | m->u.user.name); |
1483 | return PTR_ERR(match); | 1466 | return PTR_ERR(match); |
1484 | } | 1467 | } |
1485 | m->u.kernel.match = match; | 1468 | m->u.kernel.match = match; |
1486 | *size += xt_compat_match_offset(match); | 1469 | *size += xt_compat_match_offset(match); |
1487 | return 0; | 1470 | return 0; |
1488 | } | 1471 | } |
1489 | 1472 | ||
1490 | static void compat_release_entry(struct compat_ip6t_entry *e) | 1473 | static void compat_release_entry(struct compat_ip6t_entry *e) |
1491 | { | 1474 | { |
1492 | struct xt_entry_target *t; | 1475 | struct xt_entry_target *t; |
1493 | struct xt_entry_match *ematch; | 1476 | struct xt_entry_match *ematch; |
1494 | 1477 | ||
1495 | /* Cleanup all matches */ | 1478 | /* Cleanup all matches */ |
1496 | xt_ematch_foreach(ematch, e) | 1479 | xt_ematch_foreach(ematch, e) |
1497 | module_put(ematch->u.kernel.match->me); | 1480 | module_put(ematch->u.kernel.match->me); |
1498 | t = compat_ip6t_get_target(e); | 1481 | t = compat_ip6t_get_target(e); |
1499 | module_put(t->u.kernel.target->me); | 1482 | module_put(t->u.kernel.target->me); |
1500 | } | 1483 | } |
1501 | 1484 | ||
1502 | static int | 1485 | static int |
1503 | check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, | 1486 | check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, |
1504 | struct xt_table_info *newinfo, | 1487 | struct xt_table_info *newinfo, |
1505 | unsigned int *size, | 1488 | unsigned int *size, |
1506 | const unsigned char *base, | 1489 | const unsigned char *base, |
1507 | const unsigned char *limit, | 1490 | const unsigned char *limit, |
1508 | const unsigned int *hook_entries, | 1491 | const unsigned int *hook_entries, |
1509 | const unsigned int *underflows, | 1492 | const unsigned int *underflows, |
1510 | const char *name) | 1493 | const char *name) |
1511 | { | 1494 | { |
1512 | struct xt_entry_match *ematch; | 1495 | struct xt_entry_match *ematch; |
1513 | struct xt_entry_target *t; | 1496 | struct xt_entry_target *t; |
1514 | struct xt_target *target; | 1497 | struct xt_target *target; |
1515 | unsigned int entry_offset; | 1498 | unsigned int entry_offset; |
1516 | unsigned int j; | 1499 | unsigned int j; |
1517 | int ret, off, h; | 1500 | int ret, off, h; |
1518 | 1501 | ||
1519 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1502 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1520 | if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || | 1503 | if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || |
1521 | (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { | 1504 | (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { |
1522 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1505 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1523 | return -EINVAL; | 1506 | return -EINVAL; |
1524 | } | 1507 | } |
1525 | 1508 | ||
1526 | if (e->next_offset < sizeof(struct compat_ip6t_entry) + | 1509 | if (e->next_offset < sizeof(struct compat_ip6t_entry) + |
1527 | sizeof(struct compat_xt_entry_target)) { | 1510 | sizeof(struct compat_xt_entry_target)) { |
1528 | duprintf("checking: element %p size %u\n", | 1511 | duprintf("checking: element %p size %u\n", |
1529 | e, e->next_offset); | 1512 | e, e->next_offset); |
1530 | return -EINVAL; | 1513 | return -EINVAL; |
1531 | } | 1514 | } |
1532 | 1515 | ||
1533 | /* For purposes of check_entry casting the compat entry is fine */ | 1516 | /* For purposes of check_entry casting the compat entry is fine */ |
1534 | ret = check_entry((struct ip6t_entry *)e, name); | 1517 | ret = check_entry((struct ip6t_entry *)e, name); |
1535 | if (ret) | 1518 | if (ret) |
1536 | return ret; | 1519 | return ret; |
1537 | 1520 | ||
1538 | off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); | 1521 | off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); |
1539 | entry_offset = (void *)e - (void *)base; | 1522 | entry_offset = (void *)e - (void *)base; |
1540 | j = 0; | 1523 | j = 0; |
1541 | xt_ematch_foreach(ematch, e) { | 1524 | xt_ematch_foreach(ematch, e) { |
1542 | ret = compat_find_calc_match(ematch, name, | 1525 | ret = compat_find_calc_match(ematch, name, |
1543 | &e->ipv6, e->comefrom, &off); | 1526 | &e->ipv6, e->comefrom, &off); |
1544 | if (ret != 0) | 1527 | if (ret != 0) |
1545 | goto release_matches; | 1528 | goto release_matches; |
1546 | ++j; | 1529 | ++j; |
1547 | } | 1530 | } |
1548 | 1531 | ||
1549 | t = compat_ip6t_get_target(e); | 1532 | t = compat_ip6t_get_target(e); |
1550 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, | 1533 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, |
1551 | t->u.user.revision); | 1534 | t->u.user.revision); |
1552 | if (IS_ERR(target)) { | 1535 | if (IS_ERR(target)) { |
1553 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1536 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1554 | t->u.user.name); | 1537 | t->u.user.name); |
1555 | ret = PTR_ERR(target); | 1538 | ret = PTR_ERR(target); |
1556 | goto release_matches; | 1539 | goto release_matches; |
1557 | } | 1540 | } |
1558 | t->u.kernel.target = target; | 1541 | t->u.kernel.target = target; |
1559 | 1542 | ||
1560 | off += xt_compat_target_offset(target); | 1543 | off += xt_compat_target_offset(target); |
1561 | *size += off; | 1544 | *size += off; |
1562 | ret = xt_compat_add_offset(AF_INET6, entry_offset, off); | 1545 | ret = xt_compat_add_offset(AF_INET6, entry_offset, off); |
1563 | if (ret) | 1546 | if (ret) |
1564 | goto out; | 1547 | goto out; |
1565 | 1548 | ||
1566 | /* Check hooks & underflows */ | 1549 | /* Check hooks & underflows */ |
1567 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 1550 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
1568 | if ((unsigned char *)e - base == hook_entries[h]) | 1551 | if ((unsigned char *)e - base == hook_entries[h]) |
1569 | newinfo->hook_entry[h] = hook_entries[h]; | 1552 | newinfo->hook_entry[h] = hook_entries[h]; |
1570 | if ((unsigned char *)e - base == underflows[h]) | 1553 | if ((unsigned char *)e - base == underflows[h]) |
1571 | newinfo->underflow[h] = underflows[h]; | 1554 | newinfo->underflow[h] = underflows[h]; |
1572 | } | 1555 | } |
1573 | 1556 | ||
1574 | /* Clear counters and comefrom */ | 1557 | /* Clear counters and comefrom */ |
1575 | memset(&e->counters, 0, sizeof(e->counters)); | 1558 | memset(&e->counters, 0, sizeof(e->counters)); |
1576 | e->comefrom = 0; | 1559 | e->comefrom = 0; |
1577 | return 0; | 1560 | return 0; |
1578 | 1561 | ||
1579 | out: | 1562 | out: |
1580 | module_put(t->u.kernel.target->me); | 1563 | module_put(t->u.kernel.target->me); |
1581 | release_matches: | 1564 | release_matches: |
1582 | xt_ematch_foreach(ematch, e) { | 1565 | xt_ematch_foreach(ematch, e) { |
1583 | if (j-- == 0) | 1566 | if (j-- == 0) |
1584 | break; | 1567 | break; |
1585 | module_put(ematch->u.kernel.match->me); | 1568 | module_put(ematch->u.kernel.match->me); |
1586 | } | 1569 | } |
1587 | return ret; | 1570 | return ret; |
1588 | } | 1571 | } |
1589 | 1572 | ||
1590 | static int | 1573 | static int |
1591 | compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, | 1574 | compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, |
1592 | unsigned int *size, const char *name, | 1575 | unsigned int *size, const char *name, |
1593 | struct xt_table_info *newinfo, unsigned char *base) | 1576 | struct xt_table_info *newinfo, unsigned char *base) |
1594 | { | 1577 | { |
1595 | struct xt_entry_target *t; | 1578 | struct xt_entry_target *t; |
1596 | struct xt_target *target; | 1579 | struct xt_target *target; |
1597 | struct ip6t_entry *de; | 1580 | struct ip6t_entry *de; |
1598 | unsigned int origsize; | 1581 | unsigned int origsize; |
1599 | int ret, h; | 1582 | int ret, h; |
1600 | struct xt_entry_match *ematch; | 1583 | struct xt_entry_match *ematch; |
1601 | 1584 | ||
1602 | ret = 0; | 1585 | ret = 0; |
1603 | origsize = *size; | 1586 | origsize = *size; |
1604 | de = (struct ip6t_entry *)*dstptr; | 1587 | de = (struct ip6t_entry *)*dstptr; |
1605 | memcpy(de, e, sizeof(struct ip6t_entry)); | 1588 | memcpy(de, e, sizeof(struct ip6t_entry)); |
1606 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); | 1589 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); |
1607 | 1590 | ||
1608 | *dstptr += sizeof(struct ip6t_entry); | 1591 | *dstptr += sizeof(struct ip6t_entry); |
1609 | *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); | 1592 | *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); |
1610 | 1593 | ||
1611 | xt_ematch_foreach(ematch, e) { | 1594 | xt_ematch_foreach(ematch, e) { |
1612 | ret = xt_compat_match_from_user(ematch, dstptr, size); | 1595 | ret = xt_compat_match_from_user(ematch, dstptr, size); |
1613 | if (ret != 0) | 1596 | if (ret != 0) |
1614 | return ret; | 1597 | return ret; |
1615 | } | 1598 | } |
1616 | de->target_offset = e->target_offset - (origsize - *size); | 1599 | de->target_offset = e->target_offset - (origsize - *size); |
1617 | t = compat_ip6t_get_target(e); | 1600 | t = compat_ip6t_get_target(e); |
1618 | target = t->u.kernel.target; | 1601 | target = t->u.kernel.target; |
1619 | xt_compat_target_from_user(t, dstptr, size); | 1602 | xt_compat_target_from_user(t, dstptr, size); |
1620 | 1603 | ||
1621 | de->next_offset = e->next_offset - (origsize - *size); | 1604 | de->next_offset = e->next_offset - (origsize - *size); |
1622 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 1605 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
1623 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) | 1606 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) |
1624 | newinfo->hook_entry[h] -= origsize - *size; | 1607 | newinfo->hook_entry[h] -= origsize - *size; |
1625 | if ((unsigned char *)de - base < newinfo->underflow[h]) | 1608 | if ((unsigned char *)de - base < newinfo->underflow[h]) |
1626 | newinfo->underflow[h] -= origsize - *size; | 1609 | newinfo->underflow[h] -= origsize - *size; |
1627 | } | 1610 | } |
1628 | return ret; | 1611 | return ret; |
1629 | } | 1612 | } |
1630 | 1613 | ||
1631 | static int compat_check_entry(struct ip6t_entry *e, struct net *net, | 1614 | static int compat_check_entry(struct ip6t_entry *e, struct net *net, |
1632 | const char *name) | 1615 | const char *name) |
1633 | { | 1616 | { |
1634 | unsigned int j; | 1617 | unsigned int j; |
1635 | int ret = 0; | 1618 | int ret = 0; |
1636 | struct xt_mtchk_param mtpar; | 1619 | struct xt_mtchk_param mtpar; |
1637 | struct xt_entry_match *ematch; | 1620 | struct xt_entry_match *ematch; |
1638 | 1621 | ||
1639 | j = 0; | 1622 | j = 0; |
1640 | mtpar.net = net; | 1623 | mtpar.net = net; |
1641 | mtpar.table = name; | 1624 | mtpar.table = name; |
1642 | mtpar.entryinfo = &e->ipv6; | 1625 | mtpar.entryinfo = &e->ipv6; |
1643 | mtpar.hook_mask = e->comefrom; | 1626 | mtpar.hook_mask = e->comefrom; |
1644 | mtpar.family = NFPROTO_IPV6; | 1627 | mtpar.family = NFPROTO_IPV6; |
1645 | xt_ematch_foreach(ematch, e) { | 1628 | xt_ematch_foreach(ematch, e) { |
1646 | ret = check_match(ematch, &mtpar); | 1629 | ret = check_match(ematch, &mtpar); |
1647 | if (ret != 0) | 1630 | if (ret != 0) |
1648 | goto cleanup_matches; | 1631 | goto cleanup_matches; |
1649 | ++j; | 1632 | ++j; |
1650 | } | 1633 | } |
1651 | 1634 | ||
1652 | ret = check_target(e, net, name); | 1635 | ret = check_target(e, net, name); |
1653 | if (ret) | 1636 | if (ret) |
1654 | goto cleanup_matches; | 1637 | goto cleanup_matches; |
1655 | return 0; | 1638 | return 0; |
1656 | 1639 | ||
1657 | cleanup_matches: | 1640 | cleanup_matches: |
1658 | xt_ematch_foreach(ematch, e) { | 1641 | xt_ematch_foreach(ematch, e) { |
1659 | if (j-- == 0) | 1642 | if (j-- == 0) |
1660 | break; | 1643 | break; |
1661 | cleanup_match(ematch, net); | 1644 | cleanup_match(ematch, net); |
1662 | } | 1645 | } |
1663 | return ret; | 1646 | return ret; |
1664 | } | 1647 | } |
1665 | 1648 | ||
1666 | static int | 1649 | static int |
1667 | translate_compat_table(struct net *net, | 1650 | translate_compat_table(struct net *net, |
1668 | const char *name, | 1651 | const char *name, |
1669 | unsigned int valid_hooks, | 1652 | unsigned int valid_hooks, |
1670 | struct xt_table_info **pinfo, | 1653 | struct xt_table_info **pinfo, |
1671 | void **pentry0, | 1654 | void **pentry0, |
1672 | unsigned int total_size, | 1655 | unsigned int total_size, |
1673 | unsigned int number, | 1656 | unsigned int number, |
1674 | unsigned int *hook_entries, | 1657 | unsigned int *hook_entries, |
1675 | unsigned int *underflows) | 1658 | unsigned int *underflows) |
1676 | { | 1659 | { |
1677 | unsigned int i, j; | 1660 | unsigned int i, j; |
1678 | struct xt_table_info *newinfo, *info; | 1661 | struct xt_table_info *newinfo, *info; |
1679 | void *pos, *entry0, *entry1; | 1662 | void *pos, *entry0, *entry1; |
1680 | struct compat_ip6t_entry *iter0; | 1663 | struct compat_ip6t_entry *iter0; |
1681 | struct ip6t_entry *iter1; | 1664 | struct ip6t_entry *iter1; |
1682 | unsigned int size; | 1665 | unsigned int size; |
1683 | int ret = 0; | 1666 | int ret = 0; |
1684 | 1667 | ||
1685 | info = *pinfo; | 1668 | info = *pinfo; |
1686 | entry0 = *pentry0; | 1669 | entry0 = *pentry0; |
1687 | size = total_size; | 1670 | size = total_size; |
1688 | info->number = number; | 1671 | info->number = number; |
1689 | 1672 | ||
1690 | /* Init all hooks to impossible value. */ | 1673 | /* Init all hooks to impossible value. */ |
1691 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1674 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1692 | info->hook_entry[i] = 0xFFFFFFFF; | 1675 | info->hook_entry[i] = 0xFFFFFFFF; |
1693 | info->underflow[i] = 0xFFFFFFFF; | 1676 | info->underflow[i] = 0xFFFFFFFF; |
1694 | } | 1677 | } |
1695 | 1678 | ||
1696 | duprintf("translate_compat_table: size %u\n", info->size); | 1679 | duprintf("translate_compat_table: size %u\n", info->size); |
1697 | j = 0; | 1680 | j = 0; |
1698 | xt_compat_lock(AF_INET6); | 1681 | xt_compat_lock(AF_INET6); |
1699 | /* Walk through entries, checking offsets. */ | 1682 | /* Walk through entries, checking offsets. */ |
1700 | xt_entry_foreach(iter0, entry0, total_size) { | 1683 | xt_entry_foreach(iter0, entry0, total_size) { |
1701 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, | 1684 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, |
1702 | entry0, | 1685 | entry0, |
1703 | entry0 + total_size, | 1686 | entry0 + total_size, |
1704 | hook_entries, | 1687 | hook_entries, |
1705 | underflows, | 1688 | underflows, |
1706 | name); | 1689 | name); |
1707 | if (ret != 0) | 1690 | if (ret != 0) |
1708 | goto out_unlock; | 1691 | goto out_unlock; |
1709 | ++j; | 1692 | ++j; |
1710 | } | 1693 | } |
1711 | 1694 | ||
1712 | ret = -EINVAL; | 1695 | ret = -EINVAL; |
1713 | if (j != number) { | 1696 | if (j != number) { |
1714 | duprintf("translate_compat_table: %u not %u entries\n", | 1697 | duprintf("translate_compat_table: %u not %u entries\n", |
1715 | j, number); | 1698 | j, number); |
1716 | goto out_unlock; | 1699 | goto out_unlock; |
1717 | } | 1700 | } |
1718 | 1701 | ||
1719 | /* Check hooks all assigned */ | 1702 | /* Check hooks all assigned */ |
1720 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1703 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1721 | /* Only hooks which are valid */ | 1704 | /* Only hooks which are valid */ |
1722 | if (!(valid_hooks & (1 << i))) | 1705 | if (!(valid_hooks & (1 << i))) |
1723 | continue; | 1706 | continue; |
1724 | if (info->hook_entry[i] == 0xFFFFFFFF) { | 1707 | if (info->hook_entry[i] == 0xFFFFFFFF) { |
1725 | duprintf("Invalid hook entry %u %u\n", | 1708 | duprintf("Invalid hook entry %u %u\n", |
1726 | i, hook_entries[i]); | 1709 | i, hook_entries[i]); |
1727 | goto out_unlock; | 1710 | goto out_unlock; |
1728 | } | 1711 | } |
1729 | if (info->underflow[i] == 0xFFFFFFFF) { | 1712 | if (info->underflow[i] == 0xFFFFFFFF) { |
1730 | duprintf("Invalid underflow %u %u\n", | 1713 | duprintf("Invalid underflow %u %u\n", |
1731 | i, underflows[i]); | 1714 | i, underflows[i]); |
1732 | goto out_unlock; | 1715 | goto out_unlock; |
1733 | } | 1716 | } |
1734 | } | 1717 | } |
1735 | 1718 | ||
1736 | ret = -ENOMEM; | 1719 | ret = -ENOMEM; |
1737 | newinfo = xt_alloc_table_info(size); | 1720 | newinfo = xt_alloc_table_info(size); |
1738 | if (!newinfo) | 1721 | if (!newinfo) |
1739 | goto out_unlock; | 1722 | goto out_unlock; |
1740 | 1723 | ||
1741 | newinfo->number = number; | 1724 | newinfo->number = number; |
1742 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1725 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1743 | newinfo->hook_entry[i] = info->hook_entry[i]; | 1726 | newinfo->hook_entry[i] = info->hook_entry[i]; |
1744 | newinfo->underflow[i] = info->underflow[i]; | 1727 | newinfo->underflow[i] = info->underflow[i]; |
1745 | } | 1728 | } |
1746 | entry1 = newinfo->entries[raw_smp_processor_id()]; | 1729 | entry1 = newinfo->entries[raw_smp_processor_id()]; |
1747 | pos = entry1; | 1730 | pos = entry1; |
1748 | size = total_size; | 1731 | size = total_size; |
1749 | xt_entry_foreach(iter0, entry0, total_size) { | 1732 | xt_entry_foreach(iter0, entry0, total_size) { |
1750 | ret = compat_copy_entry_from_user(iter0, &pos, &size, | 1733 | ret = compat_copy_entry_from_user(iter0, &pos, &size, |
1751 | name, newinfo, entry1); | 1734 | name, newinfo, entry1); |
1752 | if (ret != 0) | 1735 | if (ret != 0) |
1753 | break; | 1736 | break; |
1754 | } | 1737 | } |
1755 | xt_compat_flush_offsets(AF_INET6); | 1738 | xt_compat_flush_offsets(AF_INET6); |
1756 | xt_compat_unlock(AF_INET6); | 1739 | xt_compat_unlock(AF_INET6); |
1757 | if (ret) | 1740 | if (ret) |
1758 | goto free_newinfo; | 1741 | goto free_newinfo; |
1759 | 1742 | ||
1760 | ret = -ELOOP; | 1743 | ret = -ELOOP; |
1761 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) | 1744 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) |
1762 | goto free_newinfo; | 1745 | goto free_newinfo; |
1763 | 1746 | ||
1764 | i = 0; | 1747 | i = 0; |
1765 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1748 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1766 | ret = compat_check_entry(iter1, net, name); | 1749 | ret = compat_check_entry(iter1, net, name); |
1767 | if (ret != 0) | 1750 | if (ret != 0) |
1768 | break; | 1751 | break; |
1769 | ++i; | 1752 | ++i; |
1770 | if (strcmp(ip6t_get_target(iter1)->u.user.name, | 1753 | if (strcmp(ip6t_get_target(iter1)->u.user.name, |
1771 | XT_ERROR_TARGET) == 0) | 1754 | XT_ERROR_TARGET) == 0) |
1772 | ++newinfo->stacksize; | 1755 | ++newinfo->stacksize; |
1773 | } | 1756 | } |
1774 | if (ret) { | 1757 | if (ret) { |
1775 | /* | 1758 | /* |
1776 | * The first i matches need cleanup_entry (calls ->destroy) | 1759 | * The first i matches need cleanup_entry (calls ->destroy) |
1777 | * because they had called ->check already. The other j-i | 1760 | * because they had called ->check already. The other j-i |
1778 | * entries need only release. | 1761 | * entries need only release. |
1779 | */ | 1762 | */ |
1780 | int skip = i; | 1763 | int skip = i; |
1781 | j -= i; | 1764 | j -= i; |
1782 | xt_entry_foreach(iter0, entry0, newinfo->size) { | 1765 | xt_entry_foreach(iter0, entry0, newinfo->size) { |
1783 | if (skip-- > 0) | 1766 | if (skip-- > 0) |
1784 | continue; | 1767 | continue; |
1785 | if (j-- == 0) | 1768 | if (j-- == 0) |
1786 | break; | 1769 | break; |
1787 | compat_release_entry(iter0); | 1770 | compat_release_entry(iter0); |
1788 | } | 1771 | } |
1789 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1772 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1790 | if (i-- == 0) | 1773 | if (i-- == 0) |
1791 | break; | 1774 | break; |
1792 | cleanup_entry(iter1, net); | 1775 | cleanup_entry(iter1, net); |
1793 | } | 1776 | } |
1794 | xt_free_table_info(newinfo); | 1777 | xt_free_table_info(newinfo); |
1795 | return ret; | 1778 | return ret; |
1796 | } | 1779 | } |
1797 | 1780 | ||
1798 | /* And one copy for every other CPU */ | 1781 | /* And one copy for every other CPU */ |
1799 | for_each_possible_cpu(i) | 1782 | for_each_possible_cpu(i) |
1800 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) | 1783 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) |
1801 | memcpy(newinfo->entries[i], entry1, newinfo->size); | 1784 | memcpy(newinfo->entries[i], entry1, newinfo->size); |
1802 | 1785 | ||
1803 | *pinfo = newinfo; | 1786 | *pinfo = newinfo; |
1804 | *pentry0 = entry1; | 1787 | *pentry0 = entry1; |
1805 | xt_free_table_info(info); | 1788 | xt_free_table_info(info); |
1806 | return 0; | 1789 | return 0; |
1807 | 1790 | ||
1808 | free_newinfo: | 1791 | free_newinfo: |
1809 | xt_free_table_info(newinfo); | 1792 | xt_free_table_info(newinfo); |
1810 | out: | 1793 | out: |
1811 | xt_entry_foreach(iter0, entry0, total_size) { | 1794 | xt_entry_foreach(iter0, entry0, total_size) { |
1812 | if (j-- == 0) | 1795 | if (j-- == 0) |
1813 | break; | 1796 | break; |
1814 | compat_release_entry(iter0); | 1797 | compat_release_entry(iter0); |
1815 | } | 1798 | } |
1816 | return ret; | 1799 | return ret; |
1817 | out_unlock: | 1800 | out_unlock: |
1818 | xt_compat_flush_offsets(AF_INET6); | 1801 | xt_compat_flush_offsets(AF_INET6); |
1819 | xt_compat_unlock(AF_INET6); | 1802 | xt_compat_unlock(AF_INET6); |
1820 | goto out; | 1803 | goto out; |
1821 | } | 1804 | } |
1822 | 1805 | ||
1823 | static int | 1806 | static int |
1824 | compat_do_replace(struct net *net, void __user *user, unsigned int len) | 1807 | compat_do_replace(struct net *net, void __user *user, unsigned int len) |
1825 | { | 1808 | { |
1826 | int ret; | 1809 | int ret; |
1827 | struct compat_ip6t_replace tmp; | 1810 | struct compat_ip6t_replace tmp; |
1828 | struct xt_table_info *newinfo; | 1811 | struct xt_table_info *newinfo; |
1829 | void *loc_cpu_entry; | 1812 | void *loc_cpu_entry; |
1830 | struct ip6t_entry *iter; | 1813 | struct ip6t_entry *iter; |
1831 | 1814 | ||
1832 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1815 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1833 | return -EFAULT; | 1816 | return -EFAULT; |
1834 | 1817 | ||
1835 | /* overflow check */ | 1818 | /* overflow check */ |
1836 | if (tmp.size >= INT_MAX / num_possible_cpus()) | 1819 | if (tmp.size >= INT_MAX / num_possible_cpus()) |
1837 | return -ENOMEM; | 1820 | return -ENOMEM; |
1838 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1821 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1839 | return -ENOMEM; | 1822 | return -ENOMEM; |
1840 | 1823 | ||
1841 | newinfo = xt_alloc_table_info(tmp.size); | 1824 | newinfo = xt_alloc_table_info(tmp.size); |
1842 | if (!newinfo) | 1825 | if (!newinfo) |
1843 | return -ENOMEM; | 1826 | return -ENOMEM; |
1844 | 1827 | ||
1845 | /* choose the copy that is on our node/cpu */ | 1828 | /* choose the copy that is on our node/cpu */ |
1846 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1829 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1847 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1830 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1848 | tmp.size) != 0) { | 1831 | tmp.size) != 0) { |
1849 | ret = -EFAULT; | 1832 | ret = -EFAULT; |
1850 | goto free_newinfo; | 1833 | goto free_newinfo; |
1851 | } | 1834 | } |
1852 | 1835 | ||
1853 | ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, | 1836 | ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, |
1854 | &newinfo, &loc_cpu_entry, tmp.size, | 1837 | &newinfo, &loc_cpu_entry, tmp.size, |
1855 | tmp.num_entries, tmp.hook_entry, | 1838 | tmp.num_entries, tmp.hook_entry, |
1856 | tmp.underflow); | 1839 | tmp.underflow); |
1857 | if (ret != 0) | 1840 | if (ret != 0) |
1858 | goto free_newinfo; | 1841 | goto free_newinfo; |
1859 | 1842 | ||
1860 | duprintf("compat_do_replace: Translated table\n"); | 1843 | duprintf("compat_do_replace: Translated table\n"); |
1861 | 1844 | ||
1862 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1845 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1863 | tmp.num_counters, compat_ptr(tmp.counters)); | 1846 | tmp.num_counters, compat_ptr(tmp.counters)); |
1864 | if (ret) | 1847 | if (ret) |
1865 | goto free_newinfo_untrans; | 1848 | goto free_newinfo_untrans; |
1866 | return 0; | 1849 | return 0; |
1867 | 1850 | ||
1868 | free_newinfo_untrans: | 1851 | free_newinfo_untrans: |
1869 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1852 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1870 | cleanup_entry(iter, net); | 1853 | cleanup_entry(iter, net); |
1871 | free_newinfo: | 1854 | free_newinfo: |
1872 | xt_free_table_info(newinfo); | 1855 | xt_free_table_info(newinfo); |
1873 | return ret; | 1856 | return ret; |
1874 | } | 1857 | } |
1875 | 1858 | ||
1876 | static int | 1859 | static int |
1877 | compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, | 1860 | compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, |
1878 | unsigned int len) | 1861 | unsigned int len) |
1879 | { | 1862 | { |
1880 | int ret; | 1863 | int ret; |
1881 | 1864 | ||
1882 | if (!capable(CAP_NET_ADMIN)) | 1865 | if (!capable(CAP_NET_ADMIN)) |
1883 | return -EPERM; | 1866 | return -EPERM; |
1884 | 1867 | ||
1885 | switch (cmd) { | 1868 | switch (cmd) { |
1886 | case IP6T_SO_SET_REPLACE: | 1869 | case IP6T_SO_SET_REPLACE: |
1887 | ret = compat_do_replace(sock_net(sk), user, len); | 1870 | ret = compat_do_replace(sock_net(sk), user, len); |
1888 | break; | 1871 | break; |
1889 | 1872 | ||
1890 | case IP6T_SO_SET_ADD_COUNTERS: | 1873 | case IP6T_SO_SET_ADD_COUNTERS: |
1891 | ret = do_add_counters(sock_net(sk), user, len, 1); | 1874 | ret = do_add_counters(sock_net(sk), user, len, 1); |
1892 | break; | 1875 | break; |
1893 | 1876 | ||
1894 | default: | 1877 | default: |
1895 | duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); | 1878 | duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); |
1896 | ret = -EINVAL; | 1879 | ret = -EINVAL; |
1897 | } | 1880 | } |
1898 | 1881 | ||
1899 | return ret; | 1882 | return ret; |
1900 | } | 1883 | } |
1901 | 1884 | ||
1902 | struct compat_ip6t_get_entries { | 1885 | struct compat_ip6t_get_entries { |
1903 | char name[XT_TABLE_MAXNAMELEN]; | 1886 | char name[XT_TABLE_MAXNAMELEN]; |
1904 | compat_uint_t size; | 1887 | compat_uint_t size; |
1905 | struct compat_ip6t_entry entrytable[0]; | 1888 | struct compat_ip6t_entry entrytable[0]; |
1906 | }; | 1889 | }; |
1907 | 1890 | ||
1908 | static int | 1891 | static int |
1909 | compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, | 1892 | compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, |
1910 | void __user *userptr) | 1893 | void __user *userptr) |
1911 | { | 1894 | { |
1912 | struct xt_counters *counters; | 1895 | struct xt_counters *counters; |
1913 | const struct xt_table_info *private = table->private; | 1896 | const struct xt_table_info *private = table->private; |
1914 | void __user *pos; | 1897 | void __user *pos; |
1915 | unsigned int size; | 1898 | unsigned int size; |
1916 | int ret = 0; | 1899 | int ret = 0; |
1917 | const void *loc_cpu_entry; | 1900 | const void *loc_cpu_entry; |
1918 | unsigned int i = 0; | 1901 | unsigned int i = 0; |
1919 | struct ip6t_entry *iter; | 1902 | struct ip6t_entry *iter; |
1920 | 1903 | ||
1921 | counters = alloc_counters(table); | 1904 | counters = alloc_counters(table); |
1922 | if (IS_ERR(counters)) | 1905 | if (IS_ERR(counters)) |
1923 | return PTR_ERR(counters); | 1906 | return PTR_ERR(counters); |
1924 | 1907 | ||
1925 | /* choose the copy that is on our node/cpu, ... | 1908 | /* choose the copy that is on our node/cpu, ... |
1926 | * This choice is lazy (because current thread is | 1909 | * This choice is lazy (because current thread is |
1927 | * allowed to migrate to another cpu) | 1910 | * allowed to migrate to another cpu) |
1928 | */ | 1911 | */ |
1929 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1912 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
1930 | pos = userptr; | 1913 | pos = userptr; |
1931 | size = total_size; | 1914 | size = total_size; |
1932 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { | 1915 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { |
1933 | ret = compat_copy_entry_to_user(iter, &pos, | 1916 | ret = compat_copy_entry_to_user(iter, &pos, |
1934 | &size, counters, i++); | 1917 | &size, counters, i++); |
1935 | if (ret != 0) | 1918 | if (ret != 0) |
1936 | break; | 1919 | break; |
1937 | } | 1920 | } |
1938 | 1921 | ||
1939 | vfree(counters); | 1922 | vfree(counters); |
1940 | return ret; | 1923 | return ret; |
1941 | } | 1924 | } |
1942 | 1925 | ||
1943 | static int | 1926 | static int |
1944 | compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, | 1927 | compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, |
1945 | int *len) | 1928 | int *len) |
1946 | { | 1929 | { |
1947 | int ret; | 1930 | int ret; |
1948 | struct compat_ip6t_get_entries get; | 1931 | struct compat_ip6t_get_entries get; |
1949 | struct xt_table *t; | 1932 | struct xt_table *t; |
1950 | 1933 | ||
1951 | if (*len < sizeof(get)) { | 1934 | if (*len < sizeof(get)) { |
1952 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); | 1935 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); |
1953 | return -EINVAL; | 1936 | return -EINVAL; |
1954 | } | 1937 | } |
1955 | 1938 | ||
1956 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1939 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1957 | return -EFAULT; | 1940 | return -EFAULT; |
1958 | 1941 | ||
1959 | if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { | 1942 | if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { |
1960 | duprintf("compat_get_entries: %u != %zu\n", | 1943 | duprintf("compat_get_entries: %u != %zu\n", |
1961 | *len, sizeof(get) + get.size); | 1944 | *len, sizeof(get) + get.size); |
1962 | return -EINVAL; | 1945 | return -EINVAL; |
1963 | } | 1946 | } |
1964 | 1947 | ||
1965 | xt_compat_lock(AF_INET6); | 1948 | xt_compat_lock(AF_INET6); |
1966 | t = xt_find_table_lock(net, AF_INET6, get.name); | 1949 | t = xt_find_table_lock(net, AF_INET6, get.name); |
1967 | if (t && !IS_ERR(t)) { | 1950 | if (t && !IS_ERR(t)) { |
1968 | const struct xt_table_info *private = t->private; | 1951 | const struct xt_table_info *private = t->private; |
1969 | struct xt_table_info info; | 1952 | struct xt_table_info info; |
1970 | duprintf("t->private->number = %u\n", private->number); | 1953 | duprintf("t->private->number = %u\n", private->number); |
1971 | ret = compat_table_info(private, &info); | 1954 | ret = compat_table_info(private, &info); |
1972 | if (!ret && get.size == info.size) { | 1955 | if (!ret && get.size == info.size) { |
1973 | ret = compat_copy_entries_to_user(private->size, | 1956 | ret = compat_copy_entries_to_user(private->size, |
1974 | t, uptr->entrytable); | 1957 | t, uptr->entrytable); |
1975 | } else if (!ret) { | 1958 | } else if (!ret) { |
1976 | duprintf("compat_get_entries: I've got %u not %u!\n", | 1959 | duprintf("compat_get_entries: I've got %u not %u!\n", |
1977 | private->size, get.size); | 1960 | private->size, get.size); |
1978 | ret = -EAGAIN; | 1961 | ret = -EAGAIN; |
1979 | } | 1962 | } |
1980 | xt_compat_flush_offsets(AF_INET6); | 1963 | xt_compat_flush_offsets(AF_INET6); |
1981 | module_put(t->me); | 1964 | module_put(t->me); |
1982 | xt_table_unlock(t); | 1965 | xt_table_unlock(t); |
1983 | } else | 1966 | } else |
1984 | ret = t ? PTR_ERR(t) : -ENOENT; | 1967 | ret = t ? PTR_ERR(t) : -ENOENT; |
1985 | 1968 | ||
1986 | xt_compat_unlock(AF_INET6); | 1969 | xt_compat_unlock(AF_INET6); |
1987 | return ret; | 1970 | return ret; |
1988 | } | 1971 | } |
1989 | 1972 | ||
1990 | static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); | 1973 | static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); |
1991 | 1974 | ||
1992 | static int | 1975 | static int |
1993 | compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 1976 | compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
1994 | { | 1977 | { |
1995 | int ret; | 1978 | int ret; |
1996 | 1979 | ||
1997 | if (!capable(CAP_NET_ADMIN)) | 1980 | if (!capable(CAP_NET_ADMIN)) |
1998 | return -EPERM; | 1981 | return -EPERM; |
1999 | 1982 | ||
2000 | switch (cmd) { | 1983 | switch (cmd) { |
2001 | case IP6T_SO_GET_INFO: | 1984 | case IP6T_SO_GET_INFO: |
2002 | ret = get_info(sock_net(sk), user, len, 1); | 1985 | ret = get_info(sock_net(sk), user, len, 1); |
2003 | break; | 1986 | break; |
2004 | case IP6T_SO_GET_ENTRIES: | 1987 | case IP6T_SO_GET_ENTRIES: |
2005 | ret = compat_get_entries(sock_net(sk), user, len); | 1988 | ret = compat_get_entries(sock_net(sk), user, len); |
2006 | break; | 1989 | break; |
2007 | default: | 1990 | default: |
2008 | ret = do_ip6t_get_ctl(sk, cmd, user, len); | 1991 | ret = do_ip6t_get_ctl(sk, cmd, user, len); |
2009 | } | 1992 | } |
2010 | return ret; | 1993 | return ret; |
2011 | } | 1994 | } |
2012 | #endif | 1995 | #endif |
2013 | 1996 | ||
2014 | static int | 1997 | static int |
2015 | do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | 1998 | do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) |
2016 | { | 1999 | { |
2017 | int ret; | 2000 | int ret; |
2018 | 2001 | ||
2019 | if (!capable(CAP_NET_ADMIN)) | 2002 | if (!capable(CAP_NET_ADMIN)) |
2020 | return -EPERM; | 2003 | return -EPERM; |
2021 | 2004 | ||
2022 | switch (cmd) { | 2005 | switch (cmd) { |
2023 | case IP6T_SO_SET_REPLACE: | 2006 | case IP6T_SO_SET_REPLACE: |
2024 | ret = do_replace(sock_net(sk), user, len); | 2007 | ret = do_replace(sock_net(sk), user, len); |
2025 | break; | 2008 | break; |
2026 | 2009 | ||
2027 | case IP6T_SO_SET_ADD_COUNTERS: | 2010 | case IP6T_SO_SET_ADD_COUNTERS: |
2028 | ret = do_add_counters(sock_net(sk), user, len, 0); | 2011 | ret = do_add_counters(sock_net(sk), user, len, 0); |
2029 | break; | 2012 | break; |
2030 | 2013 | ||
2031 | default: | 2014 | default: |
2032 | duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); | 2015 | duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); |
2033 | ret = -EINVAL; | 2016 | ret = -EINVAL; |
2034 | } | 2017 | } |
2035 | 2018 | ||
2036 | return ret; | 2019 | return ret; |
2037 | } | 2020 | } |
2038 | 2021 | ||
2039 | static int | 2022 | static int |
2040 | do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 2023 | do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
2041 | { | 2024 | { |
2042 | int ret; | 2025 | int ret; |
2043 | 2026 | ||
2044 | if (!capable(CAP_NET_ADMIN)) | 2027 | if (!capable(CAP_NET_ADMIN)) |
2045 | return -EPERM; | 2028 | return -EPERM; |
2046 | 2029 | ||
2047 | switch (cmd) { | 2030 | switch (cmd) { |
2048 | case IP6T_SO_GET_INFO: | 2031 | case IP6T_SO_GET_INFO: |
2049 | ret = get_info(sock_net(sk), user, len, 0); | 2032 | ret = get_info(sock_net(sk), user, len, 0); |
2050 | break; | 2033 | break; |
2051 | 2034 | ||
2052 | case IP6T_SO_GET_ENTRIES: | 2035 | case IP6T_SO_GET_ENTRIES: |
2053 | ret = get_entries(sock_net(sk), user, len); | 2036 | ret = get_entries(sock_net(sk), user, len); |
2054 | break; | 2037 | break; |
2055 | 2038 | ||
2056 | case IP6T_SO_GET_REVISION_MATCH: | 2039 | case IP6T_SO_GET_REVISION_MATCH: |
2057 | case IP6T_SO_GET_REVISION_TARGET: { | 2040 | case IP6T_SO_GET_REVISION_TARGET: { |
2058 | struct xt_get_revision rev; | 2041 | struct xt_get_revision rev; |
2059 | int target; | 2042 | int target; |
2060 | 2043 | ||
2061 | if (*len != sizeof(rev)) { | 2044 | if (*len != sizeof(rev)) { |
2062 | ret = -EINVAL; | 2045 | ret = -EINVAL; |
2063 | break; | 2046 | break; |
2064 | } | 2047 | } |
2065 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { | 2048 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { |
2066 | ret = -EFAULT; | 2049 | ret = -EFAULT; |
2067 | break; | 2050 | break; |
2068 | } | 2051 | } |
2069 | 2052 | ||
2070 | if (cmd == IP6T_SO_GET_REVISION_TARGET) | 2053 | if (cmd == IP6T_SO_GET_REVISION_TARGET) |
2071 | target = 1; | 2054 | target = 1; |
2072 | else | 2055 | else |
2073 | target = 0; | 2056 | target = 0; |
2074 | 2057 | ||
2075 | try_then_request_module(xt_find_revision(AF_INET6, rev.name, | 2058 | try_then_request_module(xt_find_revision(AF_INET6, rev.name, |
2076 | rev.revision, | 2059 | rev.revision, |
2077 | target, &ret), | 2060 | target, &ret), |
2078 | "ip6t_%s", rev.name); | 2061 | "ip6t_%s", rev.name); |
2079 | break; | 2062 | break; |
2080 | } | 2063 | } |
2081 | 2064 | ||
2082 | default: | 2065 | default: |
2083 | duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); | 2066 | duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); |
2084 | ret = -EINVAL; | 2067 | ret = -EINVAL; |
2085 | } | 2068 | } |
2086 | 2069 | ||
2087 | return ret; | 2070 | return ret; |
2088 | } | 2071 | } |
2089 | 2072 | ||
2090 | struct xt_table *ip6t_register_table(struct net *net, | 2073 | struct xt_table *ip6t_register_table(struct net *net, |
2091 | const struct xt_table *table, | 2074 | const struct xt_table *table, |
2092 | const struct ip6t_replace *repl) | 2075 | const struct ip6t_replace *repl) |
2093 | { | 2076 | { |
2094 | int ret; | 2077 | int ret; |
2095 | struct xt_table_info *newinfo; | 2078 | struct xt_table_info *newinfo; |
2096 | struct xt_table_info bootstrap = {0}; | 2079 | struct xt_table_info bootstrap = {0}; |
2097 | void *loc_cpu_entry; | 2080 | void *loc_cpu_entry; |
2098 | struct xt_table *new_table; | 2081 | struct xt_table *new_table; |
2099 | 2082 | ||
2100 | newinfo = xt_alloc_table_info(repl->size); | 2083 | newinfo = xt_alloc_table_info(repl->size); |
2101 | if (!newinfo) { | 2084 | if (!newinfo) { |
2102 | ret = -ENOMEM; | 2085 | ret = -ENOMEM; |
2103 | goto out; | 2086 | goto out; |
2104 | } | 2087 | } |
2105 | 2088 | ||
2106 | /* choose the copy on our node/cpu, but dont care about preemption */ | 2089 | /* choose the copy on our node/cpu, but dont care about preemption */ |
2107 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 2090 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
2108 | memcpy(loc_cpu_entry, repl->entries, repl->size); | 2091 | memcpy(loc_cpu_entry, repl->entries, repl->size); |
2109 | 2092 | ||
2110 | ret = translate_table(net, newinfo, loc_cpu_entry, repl); | 2093 | ret = translate_table(net, newinfo, loc_cpu_entry, repl); |
2111 | if (ret != 0) | 2094 | if (ret != 0) |
2112 | goto out_free; | 2095 | goto out_free; |
2113 | 2096 | ||
2114 | new_table = xt_register_table(net, table, &bootstrap, newinfo); | 2097 | new_table = xt_register_table(net, table, &bootstrap, newinfo); |
2115 | if (IS_ERR(new_table)) { | 2098 | if (IS_ERR(new_table)) { |
2116 | ret = PTR_ERR(new_table); | 2099 | ret = PTR_ERR(new_table); |
2117 | goto out_free; | 2100 | goto out_free; |
2118 | } | 2101 | } |
2119 | return new_table; | 2102 | return new_table; |
2120 | 2103 | ||
2121 | out_free: | 2104 | out_free: |
2122 | xt_free_table_info(newinfo); | 2105 | xt_free_table_info(newinfo); |
2123 | out: | 2106 | out: |
2124 | return ERR_PTR(ret); | 2107 | return ERR_PTR(ret); |
2125 | } | 2108 | } |
2126 | 2109 | ||
2127 | void ip6t_unregister_table(struct net *net, struct xt_table *table) | 2110 | void ip6t_unregister_table(struct net *net, struct xt_table *table) |
2128 | { | 2111 | { |
2129 | struct xt_table_info *private; | 2112 | struct xt_table_info *private; |
2130 | void *loc_cpu_entry; | 2113 | void *loc_cpu_entry; |
2131 | struct module *table_owner = table->me; | 2114 | struct module *table_owner = table->me; |
2132 | struct ip6t_entry *iter; | 2115 | struct ip6t_entry *iter; |
2133 | 2116 | ||
2134 | private = xt_unregister_table(table); | 2117 | private = xt_unregister_table(table); |
2135 | 2118 | ||
2136 | /* Decrease module usage counts and free resources */ | 2119 | /* Decrease module usage counts and free resources */ |
2137 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 2120 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
2138 | xt_entry_foreach(iter, loc_cpu_entry, private->size) | 2121 | xt_entry_foreach(iter, loc_cpu_entry, private->size) |
2139 | cleanup_entry(iter, net); | 2122 | cleanup_entry(iter, net); |
2140 | if (private->number > private->initial_entries) | 2123 | if (private->number > private->initial_entries) |
2141 | module_put(table_owner); | 2124 | module_put(table_owner); |
2142 | xt_free_table_info(private); | 2125 | xt_free_table_info(private); |
2143 | } | 2126 | } |
2144 | 2127 | ||
2145 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ | 2128 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ |
2146 | static inline bool | 2129 | static inline bool |
2147 | icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, | 2130 | icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, |
2148 | u_int8_t type, u_int8_t code, | 2131 | u_int8_t type, u_int8_t code, |
2149 | bool invert) | 2132 | bool invert) |
2150 | { | 2133 | { |
2151 | return (type == test_type && code >= min_code && code <= max_code) | 2134 | return (type == test_type && code >= min_code && code <= max_code) |
2152 | ^ invert; | 2135 | ^ invert; |
2153 | } | 2136 | } |
2154 | 2137 | ||
2155 | static bool | 2138 | static bool |
2156 | icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) | 2139 | icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) |
2157 | { | 2140 | { |
2158 | const struct icmp6hdr *ic; | 2141 | const struct icmp6hdr *ic; |
2159 | struct icmp6hdr _icmph; | 2142 | struct icmp6hdr _icmph; |
2160 | const struct ip6t_icmp *icmpinfo = par->matchinfo; | 2143 | const struct ip6t_icmp *icmpinfo = par->matchinfo; |
2161 | 2144 | ||
2162 | /* Must not be a fragment. */ | 2145 | /* Must not be a fragment. */ |
2163 | if (par->fragoff != 0) | 2146 | if (par->fragoff != 0) |
2164 | return false; | 2147 | return false; |
2165 | 2148 | ||
2166 | ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); | 2149 | ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); |
2167 | if (ic == NULL) { | 2150 | if (ic == NULL) { |
2168 | /* We've been asked to examine this packet, and we | 2151 | /* We've been asked to examine this packet, and we |
2169 | * can't. Hence, no choice but to drop. | 2152 | * can't. Hence, no choice but to drop. |
2170 | */ | 2153 | */ |
2171 | duprintf("Dropping evil ICMP tinygram.\n"); | 2154 | duprintf("Dropping evil ICMP tinygram.\n"); |
2172 | par->hotdrop = true; | 2155 | par->hotdrop = true; |
2173 | return false; | 2156 | return false; |
2174 | } | 2157 | } |
2175 | 2158 | ||
2176 | return icmp6_type_code_match(icmpinfo->type, | 2159 | return icmp6_type_code_match(icmpinfo->type, |
2177 | icmpinfo->code[0], | 2160 | icmpinfo->code[0], |
2178 | icmpinfo->code[1], | 2161 | icmpinfo->code[1], |
2179 | ic->icmp6_type, ic->icmp6_code, | 2162 | ic->icmp6_type, ic->icmp6_code, |
2180 | !!(icmpinfo->invflags&IP6T_ICMP_INV)); | 2163 | !!(icmpinfo->invflags&IP6T_ICMP_INV)); |
2181 | } | 2164 | } |
2182 | 2165 | ||
2183 | /* Called when user tries to insert an entry of this type. */ | 2166 | /* Called when user tries to insert an entry of this type. */ |
2184 | static int icmp6_checkentry(const struct xt_mtchk_param *par) | 2167 | static int icmp6_checkentry(const struct xt_mtchk_param *par) |
2185 | { | 2168 | { |
2186 | const struct ip6t_icmp *icmpinfo = par->matchinfo; | 2169 | const struct ip6t_icmp *icmpinfo = par->matchinfo; |
2187 | 2170 | ||
2188 | /* Must specify no unknown invflags */ | 2171 | /* Must specify no unknown invflags */ |
2189 | return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; | 2172 | return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; |
2190 | } | 2173 | } |
2191 | 2174 | ||
2192 | /* The built-in targets: standard (NULL) and error. */ | 2175 | /* The built-in targets: standard (NULL) and error. */ |
2193 | static struct xt_target ip6t_builtin_tg[] __read_mostly = { | 2176 | static struct xt_target ip6t_builtin_tg[] __read_mostly = { |
2194 | { | 2177 | { |
2195 | .name = XT_STANDARD_TARGET, | 2178 | .name = XT_STANDARD_TARGET, |
2196 | .targetsize = sizeof(int), | 2179 | .targetsize = sizeof(int), |
2197 | .family = NFPROTO_IPV6, | 2180 | .family = NFPROTO_IPV6, |
2198 | #ifdef CONFIG_COMPAT | 2181 | #ifdef CONFIG_COMPAT |
2199 | .compatsize = sizeof(compat_int_t), | 2182 | .compatsize = sizeof(compat_int_t), |
2200 | .compat_from_user = compat_standard_from_user, | 2183 | .compat_from_user = compat_standard_from_user, |
2201 | .compat_to_user = compat_standard_to_user, | 2184 | .compat_to_user = compat_standard_to_user, |
2202 | #endif | 2185 | #endif |
2203 | }, | 2186 | }, |
2204 | { | 2187 | { |
2205 | .name = XT_ERROR_TARGET, | 2188 | .name = XT_ERROR_TARGET, |
2206 | .target = ip6t_error, | 2189 | .target = ip6t_error, |
2207 | .targetsize = XT_FUNCTION_MAXNAMELEN, | 2190 | .targetsize = XT_FUNCTION_MAXNAMELEN, |
2208 | .family = NFPROTO_IPV6, | 2191 | .family = NFPROTO_IPV6, |
2209 | }, | 2192 | }, |
2210 | }; | 2193 | }; |
2211 | 2194 | ||
2212 | static struct nf_sockopt_ops ip6t_sockopts = { | 2195 | static struct nf_sockopt_ops ip6t_sockopts = { |
2213 | .pf = PF_INET6, | 2196 | .pf = PF_INET6, |
2214 | .set_optmin = IP6T_BASE_CTL, | 2197 | .set_optmin = IP6T_BASE_CTL, |
2215 | .set_optmax = IP6T_SO_SET_MAX+1, | 2198 | .set_optmax = IP6T_SO_SET_MAX+1, |
2216 | .set = do_ip6t_set_ctl, | 2199 | .set = do_ip6t_set_ctl, |
2217 | #ifdef CONFIG_COMPAT | 2200 | #ifdef CONFIG_COMPAT |
2218 | .compat_set = compat_do_ip6t_set_ctl, | 2201 | .compat_set = compat_do_ip6t_set_ctl, |
2219 | #endif | 2202 | #endif |
2220 | .get_optmin = IP6T_BASE_CTL, | 2203 | .get_optmin = IP6T_BASE_CTL, |
2221 | .get_optmax = IP6T_SO_GET_MAX+1, | 2204 | .get_optmax = IP6T_SO_GET_MAX+1, |
2222 | .get = do_ip6t_get_ctl, | 2205 | .get = do_ip6t_get_ctl, |
2223 | #ifdef CONFIG_COMPAT | 2206 | #ifdef CONFIG_COMPAT |
2224 | .compat_get = compat_do_ip6t_get_ctl, | 2207 | .compat_get = compat_do_ip6t_get_ctl, |
2225 | #endif | 2208 | #endif |
2226 | .owner = THIS_MODULE, | 2209 | .owner = THIS_MODULE, |
2227 | }; | 2210 | }; |
2228 | 2211 | ||
2229 | static struct xt_match ip6t_builtin_mt[] __read_mostly = { | 2212 | static struct xt_match ip6t_builtin_mt[] __read_mostly = { |
2230 | { | 2213 | { |
2231 | .name = "icmp6", | 2214 | .name = "icmp6", |
2232 | .match = icmp6_match, | 2215 | .match = icmp6_match, |
2233 | .matchsize = sizeof(struct ip6t_icmp), | 2216 | .matchsize = sizeof(struct ip6t_icmp), |
2234 | .checkentry = icmp6_checkentry, | 2217 | .checkentry = icmp6_checkentry, |
2235 | .proto = IPPROTO_ICMPV6, | 2218 | .proto = IPPROTO_ICMPV6, |
2236 | .family = NFPROTO_IPV6, | 2219 | .family = NFPROTO_IPV6, |
2237 | }, | 2220 | }, |
2238 | }; | 2221 | }; |
2239 | 2222 | ||
2240 | static int __net_init ip6_tables_net_init(struct net *net) | 2223 | static int __net_init ip6_tables_net_init(struct net *net) |
2241 | { | 2224 | { |
2242 | return xt_proto_init(net, NFPROTO_IPV6); | 2225 | return xt_proto_init(net, NFPROTO_IPV6); |
2243 | } | 2226 | } |
2244 | 2227 | ||
2245 | static void __net_exit ip6_tables_net_exit(struct net *net) | 2228 | static void __net_exit ip6_tables_net_exit(struct net *net) |
2246 | { | 2229 | { |
2247 | xt_proto_fini(net, NFPROTO_IPV6); | 2230 | xt_proto_fini(net, NFPROTO_IPV6); |
2248 | } | 2231 | } |
2249 | 2232 | ||
2250 | static struct pernet_operations ip6_tables_net_ops = { | 2233 | static struct pernet_operations ip6_tables_net_ops = { |
2251 | .init = ip6_tables_net_init, | 2234 | .init = ip6_tables_net_init, |
2252 | .exit = ip6_tables_net_exit, | 2235 | .exit = ip6_tables_net_exit, |
2253 | }; | 2236 | }; |
2254 | 2237 | ||
2255 | static int __init ip6_tables_init(void) | 2238 | static int __init ip6_tables_init(void) |
2256 | { | 2239 | { |
2257 | int ret; | 2240 | int ret; |
2258 | 2241 | ||
2259 | ret = register_pernet_subsys(&ip6_tables_net_ops); | 2242 | ret = register_pernet_subsys(&ip6_tables_net_ops); |
2260 | if (ret < 0) | 2243 | if (ret < 0) |
2261 | goto err1; | 2244 | goto err1; |
2262 | 2245 | ||
2263 | /* Noone else will be downing sem now, so we won't sleep */ | 2246 | /* Noone else will be downing sem now, so we won't sleep */ |
2264 | ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); | 2247 | ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2265 | if (ret < 0) | 2248 | if (ret < 0) |
2266 | goto err2; | 2249 | goto err2; |
2267 | ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); | 2250 | ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2268 | if (ret < 0) | 2251 | if (ret < 0) |
2269 | goto err4; | 2252 | goto err4; |
2270 | 2253 | ||
2271 | /* Register setsockopt */ | 2254 | /* Register setsockopt */ |
2272 | ret = nf_register_sockopt(&ip6t_sockopts); | 2255 | ret = nf_register_sockopt(&ip6t_sockopts); |
2273 | if (ret < 0) | 2256 | if (ret < 0) |
2274 | goto err5; | 2257 | goto err5; |
2275 | 2258 | ||
2276 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); | 2259 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); |
2277 | return 0; | 2260 | return 0; |
2278 | 2261 | ||
2279 | err5: | 2262 | err5: |
2280 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); | 2263 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2281 | err4: | 2264 | err4: |
2282 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); | 2265 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2283 | err2: | 2266 | err2: |
2284 | unregister_pernet_subsys(&ip6_tables_net_ops); | 2267 | unregister_pernet_subsys(&ip6_tables_net_ops); |
2285 | err1: | 2268 | err1: |
2286 | return ret; | 2269 | return ret; |
2287 | } | 2270 | } |
2288 | 2271 | ||
2289 | static void __exit ip6_tables_fini(void) | 2272 | static void __exit ip6_tables_fini(void) |
2290 | { | 2273 | { |
2291 | nf_unregister_sockopt(&ip6t_sockopts); | 2274 | nf_unregister_sockopt(&ip6t_sockopts); |
2292 | 2275 | ||
2293 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); | 2276 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2294 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); | 2277 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2295 | unregister_pernet_subsys(&ip6_tables_net_ops); | 2278 | unregister_pernet_subsys(&ip6_tables_net_ops); |
2296 | } | 2279 | } |
2297 | 2280 | ||
2298 | /* | 2281 | /* |
2299 | * find the offset to specified header or the protocol number of last header | 2282 | * find the offset to specified header or the protocol number of last header |
2300 | * if target < 0. "last header" is transport protocol header, ESP, or | 2283 | * if target < 0. "last header" is transport protocol header, ESP, or |
2301 | * "No next header". | 2284 | * "No next header". |
2302 | * | 2285 | * |
2303 | * If target header is found, its offset is set in *offset and return protocol | 2286 | * If target header is found, its offset is set in *offset and return protocol |
2304 | * number. Otherwise, return -1. | 2287 | * number. Otherwise, return -1. |
2305 | * | 2288 | * |
2306 | * If the first fragment doesn't contain the final protocol header or | 2289 | * If the first fragment doesn't contain the final protocol header or |
2307 | * NEXTHDR_NONE it is considered invalid. | 2290 | * NEXTHDR_NONE it is considered invalid. |
2308 | * | 2291 | * |
2309 | * Note that non-1st fragment is special case that "the protocol number | 2292 | * Note that non-1st fragment is special case that "the protocol number |
2310 | * of last header" is "next header" field in Fragment header. In this case, | 2293 | * of last header" is "next header" field in Fragment header. In this case, |
2311 | * *offset is meaningless and fragment offset is stored in *fragoff if fragoff | 2294 | * *offset is meaningless and fragment offset is stored in *fragoff if fragoff |
2312 | * isn't NULL. | 2295 | * isn't NULL. |
2313 | * | 2296 | * |
2314 | */ | 2297 | */ |
2315 | int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, | 2298 | int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, |
2316 | int target, unsigned short *fragoff) | 2299 | int target, unsigned short *fragoff) |
2317 | { | 2300 | { |
2318 | unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); | 2301 | unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); |
2319 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; | 2302 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; |
2320 | unsigned int len = skb->len - start; | 2303 | unsigned int len = skb->len - start; |
2321 | 2304 | ||
2322 | if (fragoff) | 2305 | if (fragoff) |
2323 | *fragoff = 0; | 2306 | *fragoff = 0; |
2324 | 2307 | ||
2325 | while (nexthdr != target) { | 2308 | while (nexthdr != target) { |
2326 | struct ipv6_opt_hdr _hdr, *hp; | 2309 | struct ipv6_opt_hdr _hdr, *hp; |
2327 | unsigned int hdrlen; | 2310 | unsigned int hdrlen; |
2328 | 2311 | ||
2329 | if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { | 2312 | if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { |
2330 | if (target < 0) | 2313 | if (target < 0) |
2331 | break; | 2314 | break; |
2332 | return -ENOENT; | 2315 | return -ENOENT; |
2333 | } | 2316 | } |
2334 | 2317 | ||
2335 | hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); | 2318 | hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); |
2336 | if (hp == NULL) | 2319 | if (hp == NULL) |
2337 | return -EBADMSG; | 2320 | return -EBADMSG; |
2338 | if (nexthdr == NEXTHDR_FRAGMENT) { | 2321 | if (nexthdr == NEXTHDR_FRAGMENT) { |
2339 | unsigned short _frag_off; | 2322 | unsigned short _frag_off; |
2340 | __be16 *fp; | 2323 | __be16 *fp; |
2341 | fp = skb_header_pointer(skb, | 2324 | fp = skb_header_pointer(skb, |
2342 | start+offsetof(struct frag_hdr, | 2325 | start+offsetof(struct frag_hdr, |
2343 | frag_off), | 2326 | frag_off), |
2344 | sizeof(_frag_off), | 2327 | sizeof(_frag_off), |
2345 | &_frag_off); | 2328 | &_frag_off); |
2346 | if (fp == NULL) | 2329 | if (fp == NULL) |
2347 | return -EBADMSG; | 2330 | return -EBADMSG; |
2348 | 2331 | ||
2349 | _frag_off = ntohs(*fp) & ~0x7; | 2332 | _frag_off = ntohs(*fp) & ~0x7; |
2350 | if (_frag_off) { | 2333 | if (_frag_off) { |
2351 | if (target < 0 && | 2334 | if (target < 0 && |
2352 | ((!ipv6_ext_hdr(hp->nexthdr)) || | 2335 | ((!ipv6_ext_hdr(hp->nexthdr)) || |
2353 | hp->nexthdr == NEXTHDR_NONE)) { | 2336 | hp->nexthdr == NEXTHDR_NONE)) { |
2354 | if (fragoff) | 2337 | if (fragoff) |
2355 | *fragoff = _frag_off; | 2338 | *fragoff = _frag_off; |
2356 | return hp->nexthdr; | 2339 | return hp->nexthdr; |
2357 | } | 2340 | } |
2358 | return -ENOENT; | 2341 | return -ENOENT; |
2359 | } | 2342 | } |
2360 | hdrlen = 8; | 2343 | hdrlen = 8; |
2361 | } else if (nexthdr == NEXTHDR_AUTH) | 2344 | } else if (nexthdr == NEXTHDR_AUTH) |
2362 | hdrlen = (hp->hdrlen + 2) << 2; | 2345 | hdrlen = (hp->hdrlen + 2) << 2; |
2363 | else | 2346 | else |
2364 | hdrlen = ipv6_optlen(hp); | 2347 | hdrlen = ipv6_optlen(hp); |
2365 | 2348 | ||
2366 | nexthdr = hp->nexthdr; | 2349 | nexthdr = hp->nexthdr; |
2367 | len -= hdrlen; | 2350 | len -= hdrlen; |
2368 | start += hdrlen; | 2351 | start += hdrlen; |
2369 | } | 2352 | } |
2370 | 2353 | ||
2371 | *offset = start; | 2354 | *offset = start; |
2372 | return nexthdr; | 2355 | return nexthdr; |
2373 | } | 2356 | } |
2374 | 2357 | ||
2375 | EXPORT_SYMBOL(ip6t_register_table); | 2358 | EXPORT_SYMBOL(ip6t_register_table); |
net/netfilter/x_tables.c
1 | /* | 1 | /* |
2 | * x_tables core - Backend for {ip,ip6,arp}_tables | 2 | * x_tables core - Backend for {ip,ip6,arp}_tables |
3 | * | 3 | * |
4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> | 4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> |
5 | * | 5 | * |
6 | * Based on existing ip_tables code which is | 6 | * Based on existing ip_tables code which is |
7 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | 7 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
8 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | 8 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/socket.h> | 17 | #include <linux/socket.h> |
18 | #include <linux/net.h> | 18 | #include <linux/net.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <net/net_namespace.h> | 26 | #include <net/net_namespace.h> |
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | #include <linux/netfilter_arp.h> | 29 | #include <linux/netfilter_arp.h> |
30 | #include <linux/netfilter_ipv4/ip_tables.h> | 30 | #include <linux/netfilter_ipv4/ip_tables.h> |
31 | #include <linux/netfilter_ipv6/ip6_tables.h> | 31 | #include <linux/netfilter_ipv6/ip6_tables.h> |
32 | #include <linux/netfilter_arp/arp_tables.h> | 32 | #include <linux/netfilter_arp/arp_tables.h> |
33 | 33 | ||
34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
35 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 35 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
36 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); | 36 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); |
37 | 37 | ||
38 | #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) | 38 | #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) |
39 | 39 | ||
40 | struct compat_delta { | 40 | struct compat_delta { |
41 | struct compat_delta *next; | 41 | struct compat_delta *next; |
42 | unsigned int offset; | 42 | unsigned int offset; |
43 | int delta; | 43 | int delta; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | struct xt_af { | 46 | struct xt_af { |
47 | struct mutex mutex; | 47 | struct mutex mutex; |
48 | struct list_head match; | 48 | struct list_head match; |
49 | struct list_head target; | 49 | struct list_head target; |
50 | #ifdef CONFIG_COMPAT | 50 | #ifdef CONFIG_COMPAT |
51 | struct mutex compat_mutex; | 51 | struct mutex compat_mutex; |
52 | struct compat_delta *compat_offsets; | 52 | struct compat_delta *compat_offsets; |
53 | #endif | 53 | #endif |
54 | }; | 54 | }; |
55 | 55 | ||
56 | static struct xt_af *xt; | 56 | static struct xt_af *xt; |
57 | 57 | ||
58 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { | 58 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
59 | [NFPROTO_UNSPEC] = "x", | 59 | [NFPROTO_UNSPEC] = "x", |
60 | [NFPROTO_IPV4] = "ip", | 60 | [NFPROTO_IPV4] = "ip", |
61 | [NFPROTO_ARP] = "arp", | 61 | [NFPROTO_ARP] = "arp", |
62 | [NFPROTO_BRIDGE] = "eb", | 62 | [NFPROTO_BRIDGE] = "eb", |
63 | [NFPROTO_IPV6] = "ip6", | 63 | [NFPROTO_IPV6] = "ip6", |
64 | }; | 64 | }; |
65 | 65 | ||
66 | /* Allow this many total (re)entries. */ | 66 | /* Allow this many total (re)entries. */ |
67 | static const unsigned int xt_jumpstack_multiplier = 2; | 67 | static const unsigned int xt_jumpstack_multiplier = 2; |
68 | 68 | ||
69 | /* Registration hooks for targets. */ | 69 | /* Registration hooks for targets. */ |
70 | int | 70 | int |
71 | xt_register_target(struct xt_target *target) | 71 | xt_register_target(struct xt_target *target) |
72 | { | 72 | { |
73 | u_int8_t af = target->family; | 73 | u_int8_t af = target->family; |
74 | int ret; | 74 | int ret; |
75 | 75 | ||
76 | ret = mutex_lock_interruptible(&xt[af].mutex); | 76 | ret = mutex_lock_interruptible(&xt[af].mutex); |
77 | if (ret != 0) | 77 | if (ret != 0) |
78 | return ret; | 78 | return ret; |
79 | list_add(&target->list, &xt[af].target); | 79 | list_add(&target->list, &xt[af].target); |
80 | mutex_unlock(&xt[af].mutex); | 80 | mutex_unlock(&xt[af].mutex); |
81 | return ret; | 81 | return ret; |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(xt_register_target); | 83 | EXPORT_SYMBOL(xt_register_target); |
84 | 84 | ||
85 | void | 85 | void |
86 | xt_unregister_target(struct xt_target *target) | 86 | xt_unregister_target(struct xt_target *target) |
87 | { | 87 | { |
88 | u_int8_t af = target->family; | 88 | u_int8_t af = target->family; |
89 | 89 | ||
90 | mutex_lock(&xt[af].mutex); | 90 | mutex_lock(&xt[af].mutex); |
91 | list_del(&target->list); | 91 | list_del(&target->list); |
92 | mutex_unlock(&xt[af].mutex); | 92 | mutex_unlock(&xt[af].mutex); |
93 | } | 93 | } |
94 | EXPORT_SYMBOL(xt_unregister_target); | 94 | EXPORT_SYMBOL(xt_unregister_target); |
95 | 95 | ||
96 | int | 96 | int |
97 | xt_register_targets(struct xt_target *target, unsigned int n) | 97 | xt_register_targets(struct xt_target *target, unsigned int n) |
98 | { | 98 | { |
99 | unsigned int i; | 99 | unsigned int i; |
100 | int err = 0; | 100 | int err = 0; |
101 | 101 | ||
102 | for (i = 0; i < n; i++) { | 102 | for (i = 0; i < n; i++) { |
103 | err = xt_register_target(&target[i]); | 103 | err = xt_register_target(&target[i]); |
104 | if (err) | 104 | if (err) |
105 | goto err; | 105 | goto err; |
106 | } | 106 | } |
107 | return err; | 107 | return err; |
108 | 108 | ||
109 | err: | 109 | err: |
110 | if (i > 0) | 110 | if (i > 0) |
111 | xt_unregister_targets(target, i); | 111 | xt_unregister_targets(target, i); |
112 | return err; | 112 | return err; |
113 | } | 113 | } |
114 | EXPORT_SYMBOL(xt_register_targets); | 114 | EXPORT_SYMBOL(xt_register_targets); |
115 | 115 | ||
116 | void | 116 | void |
117 | xt_unregister_targets(struct xt_target *target, unsigned int n) | 117 | xt_unregister_targets(struct xt_target *target, unsigned int n) |
118 | { | 118 | { |
119 | while (n-- > 0) | 119 | while (n-- > 0) |
120 | xt_unregister_target(&target[n]); | 120 | xt_unregister_target(&target[n]); |
121 | } | 121 | } |
122 | EXPORT_SYMBOL(xt_unregister_targets); | 122 | EXPORT_SYMBOL(xt_unregister_targets); |
123 | 123 | ||
124 | int | 124 | int |
125 | xt_register_match(struct xt_match *match) | 125 | xt_register_match(struct xt_match *match) |
126 | { | 126 | { |
127 | u_int8_t af = match->family; | 127 | u_int8_t af = match->family; |
128 | int ret; | 128 | int ret; |
129 | 129 | ||
130 | ret = mutex_lock_interruptible(&xt[af].mutex); | 130 | ret = mutex_lock_interruptible(&xt[af].mutex); |
131 | if (ret != 0) | 131 | if (ret != 0) |
132 | return ret; | 132 | return ret; |
133 | 133 | ||
134 | list_add(&match->list, &xt[af].match); | 134 | list_add(&match->list, &xt[af].match); |
135 | mutex_unlock(&xt[af].mutex); | 135 | mutex_unlock(&xt[af].mutex); |
136 | 136 | ||
137 | return ret; | 137 | return ret; |
138 | } | 138 | } |
139 | EXPORT_SYMBOL(xt_register_match); | 139 | EXPORT_SYMBOL(xt_register_match); |
140 | 140 | ||
141 | void | 141 | void |
142 | xt_unregister_match(struct xt_match *match) | 142 | xt_unregister_match(struct xt_match *match) |
143 | { | 143 | { |
144 | u_int8_t af = match->family; | 144 | u_int8_t af = match->family; |
145 | 145 | ||
146 | mutex_lock(&xt[af].mutex); | 146 | mutex_lock(&xt[af].mutex); |
147 | list_del(&match->list); | 147 | list_del(&match->list); |
148 | mutex_unlock(&xt[af].mutex); | 148 | mutex_unlock(&xt[af].mutex); |
149 | } | 149 | } |
150 | EXPORT_SYMBOL(xt_unregister_match); | 150 | EXPORT_SYMBOL(xt_unregister_match); |
151 | 151 | ||
152 | int | 152 | int |
153 | xt_register_matches(struct xt_match *match, unsigned int n) | 153 | xt_register_matches(struct xt_match *match, unsigned int n) |
154 | { | 154 | { |
155 | unsigned int i; | 155 | unsigned int i; |
156 | int err = 0; | 156 | int err = 0; |
157 | 157 | ||
158 | for (i = 0; i < n; i++) { | 158 | for (i = 0; i < n; i++) { |
159 | err = xt_register_match(&match[i]); | 159 | err = xt_register_match(&match[i]); |
160 | if (err) | 160 | if (err) |
161 | goto err; | 161 | goto err; |
162 | } | 162 | } |
163 | return err; | 163 | return err; |
164 | 164 | ||
165 | err: | 165 | err: |
166 | if (i > 0) | 166 | if (i > 0) |
167 | xt_unregister_matches(match, i); | 167 | xt_unregister_matches(match, i); |
168 | return err; | 168 | return err; |
169 | } | 169 | } |
170 | EXPORT_SYMBOL(xt_register_matches); | 170 | EXPORT_SYMBOL(xt_register_matches); |
171 | 171 | ||
172 | void | 172 | void |
173 | xt_unregister_matches(struct xt_match *match, unsigned int n) | 173 | xt_unregister_matches(struct xt_match *match, unsigned int n) |
174 | { | 174 | { |
175 | while (n-- > 0) | 175 | while (n-- > 0) |
176 | xt_unregister_match(&match[n]); | 176 | xt_unregister_match(&match[n]); |
177 | } | 177 | } |
178 | EXPORT_SYMBOL(xt_unregister_matches); | 178 | EXPORT_SYMBOL(xt_unregister_matches); |
179 | 179 | ||
180 | 180 | ||
181 | /* | 181 | /* |
182 | * These are weird, but module loading must not be done with mutex | 182 | * These are weird, but module loading must not be done with mutex |
183 | * held (since they will register), and we have to have a single | 183 | * held (since they will register), and we have to have a single |
184 | * function to use try_then_request_module(). | 184 | * function to use try_then_request_module(). |
185 | */ | 185 | */ |
186 | 186 | ||
187 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ | 187 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ |
188 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) | 188 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) |
189 | { | 189 | { |
190 | struct xt_match *m; | 190 | struct xt_match *m; |
191 | int err = 0; | 191 | int err = 0; |
192 | 192 | ||
193 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) | 193 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) |
194 | return ERR_PTR(-EINTR); | 194 | return ERR_PTR(-EINTR); |
195 | 195 | ||
196 | list_for_each_entry(m, &xt[af].match, list) { | 196 | list_for_each_entry(m, &xt[af].match, list) { |
197 | if (strcmp(m->name, name) == 0) { | 197 | if (strcmp(m->name, name) == 0) { |
198 | if (m->revision == revision) { | 198 | if (m->revision == revision) { |
199 | if (try_module_get(m->me)) { | 199 | if (try_module_get(m->me)) { |
200 | mutex_unlock(&xt[af].mutex); | 200 | mutex_unlock(&xt[af].mutex); |
201 | return m; | 201 | return m; |
202 | } | 202 | } |
203 | } else | 203 | } else |
204 | err = -EPROTOTYPE; /* Found something. */ | 204 | err = -EPROTOTYPE; /* Found something. */ |
205 | } | 205 | } |
206 | } | 206 | } |
207 | mutex_unlock(&xt[af].mutex); | 207 | mutex_unlock(&xt[af].mutex); |
208 | 208 | ||
209 | if (af != NFPROTO_UNSPEC) | 209 | if (af != NFPROTO_UNSPEC) |
210 | /* Try searching again in the family-independent list */ | 210 | /* Try searching again in the family-independent list */ |
211 | return xt_find_match(NFPROTO_UNSPEC, name, revision); | 211 | return xt_find_match(NFPROTO_UNSPEC, name, revision); |
212 | 212 | ||
213 | return ERR_PTR(err); | 213 | return ERR_PTR(err); |
214 | } | 214 | } |
215 | EXPORT_SYMBOL(xt_find_match); | 215 | EXPORT_SYMBOL(xt_find_match); |
216 | 216 | ||
217 | struct xt_match * | 217 | struct xt_match * |
218 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) | 218 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) |
219 | { | 219 | { |
220 | struct xt_match *match; | 220 | struct xt_match *match; |
221 | 221 | ||
222 | match = try_then_request_module(xt_find_match(nfproto, name, revision), | 222 | match = try_then_request_module(xt_find_match(nfproto, name, revision), |
223 | "%st_%s", xt_prefix[nfproto], name); | 223 | "%st_%s", xt_prefix[nfproto], name); |
224 | return (match != NULL) ? match : ERR_PTR(-ENOENT); | 224 | return (match != NULL) ? match : ERR_PTR(-ENOENT); |
225 | } | 225 | } |
226 | EXPORT_SYMBOL_GPL(xt_request_find_match); | 226 | EXPORT_SYMBOL_GPL(xt_request_find_match); |
227 | 227 | ||
228 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ | 228 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
229 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) | 229 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
230 | { | 230 | { |
231 | struct xt_target *t; | 231 | struct xt_target *t; |
232 | int err = 0; | 232 | int err = 0; |
233 | 233 | ||
234 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) | 234 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) |
235 | return ERR_PTR(-EINTR); | 235 | return ERR_PTR(-EINTR); |
236 | 236 | ||
237 | list_for_each_entry(t, &xt[af].target, list) { | 237 | list_for_each_entry(t, &xt[af].target, list) { |
238 | if (strcmp(t->name, name) == 0) { | 238 | if (strcmp(t->name, name) == 0) { |
239 | if (t->revision == revision) { | 239 | if (t->revision == revision) { |
240 | if (try_module_get(t->me)) { | 240 | if (try_module_get(t->me)) { |
241 | mutex_unlock(&xt[af].mutex); | 241 | mutex_unlock(&xt[af].mutex); |
242 | return t; | 242 | return t; |
243 | } | 243 | } |
244 | } else | 244 | } else |
245 | err = -EPROTOTYPE; /* Found something. */ | 245 | err = -EPROTOTYPE; /* Found something. */ |
246 | } | 246 | } |
247 | } | 247 | } |
248 | mutex_unlock(&xt[af].mutex); | 248 | mutex_unlock(&xt[af].mutex); |
249 | 249 | ||
250 | if (af != NFPROTO_UNSPEC) | 250 | if (af != NFPROTO_UNSPEC) |
251 | /* Try searching again in the family-independent list */ | 251 | /* Try searching again in the family-independent list */ |
252 | return xt_find_target(NFPROTO_UNSPEC, name, revision); | 252 | return xt_find_target(NFPROTO_UNSPEC, name, revision); |
253 | 253 | ||
254 | return ERR_PTR(err); | 254 | return ERR_PTR(err); |
255 | } | 255 | } |
256 | EXPORT_SYMBOL(xt_find_target); | 256 | EXPORT_SYMBOL(xt_find_target); |
257 | 257 | ||
258 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) | 258 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) |
259 | { | 259 | { |
260 | struct xt_target *target; | 260 | struct xt_target *target; |
261 | 261 | ||
262 | target = try_then_request_module(xt_find_target(af, name, revision), | 262 | target = try_then_request_module(xt_find_target(af, name, revision), |
263 | "%st_%s", xt_prefix[af], name); | 263 | "%st_%s", xt_prefix[af], name); |
264 | return (target != NULL) ? target : ERR_PTR(-ENOENT); | 264 | return (target != NULL) ? target : ERR_PTR(-ENOENT); |
265 | } | 265 | } |
266 | EXPORT_SYMBOL_GPL(xt_request_find_target); | 266 | EXPORT_SYMBOL_GPL(xt_request_find_target); |
267 | 267 | ||
268 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) | 268 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) |
269 | { | 269 | { |
270 | const struct xt_match *m; | 270 | const struct xt_match *m; |
271 | int have_rev = 0; | 271 | int have_rev = 0; |
272 | 272 | ||
273 | list_for_each_entry(m, &xt[af].match, list) { | 273 | list_for_each_entry(m, &xt[af].match, list) { |
274 | if (strcmp(m->name, name) == 0) { | 274 | if (strcmp(m->name, name) == 0) { |
275 | if (m->revision > *bestp) | 275 | if (m->revision > *bestp) |
276 | *bestp = m->revision; | 276 | *bestp = m->revision; |
277 | if (m->revision == revision) | 277 | if (m->revision == revision) |
278 | have_rev = 1; | 278 | have_rev = 1; |
279 | } | 279 | } |
280 | } | 280 | } |
281 | 281 | ||
282 | if (af != NFPROTO_UNSPEC && !have_rev) | 282 | if (af != NFPROTO_UNSPEC && !have_rev) |
283 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); | 283 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); |
284 | 284 | ||
285 | return have_rev; | 285 | return have_rev; |
286 | } | 286 | } |
287 | 287 | ||
288 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) | 288 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) |
289 | { | 289 | { |
290 | const struct xt_target *t; | 290 | const struct xt_target *t; |
291 | int have_rev = 0; | 291 | int have_rev = 0; |
292 | 292 | ||
293 | list_for_each_entry(t, &xt[af].target, list) { | 293 | list_for_each_entry(t, &xt[af].target, list) { |
294 | if (strcmp(t->name, name) == 0) { | 294 | if (strcmp(t->name, name) == 0) { |
295 | if (t->revision > *bestp) | 295 | if (t->revision > *bestp) |
296 | *bestp = t->revision; | 296 | *bestp = t->revision; |
297 | if (t->revision == revision) | 297 | if (t->revision == revision) |
298 | have_rev = 1; | 298 | have_rev = 1; |
299 | } | 299 | } |
300 | } | 300 | } |
301 | 301 | ||
302 | if (af != NFPROTO_UNSPEC && !have_rev) | 302 | if (af != NFPROTO_UNSPEC && !have_rev) |
303 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); | 303 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); |
304 | 304 | ||
305 | return have_rev; | 305 | return have_rev; |
306 | } | 306 | } |
307 | 307 | ||
308 | /* Returns true or false (if no such extension at all) */ | 308 | /* Returns true or false (if no such extension at all) */ |
309 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, | 309 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, |
310 | int *err) | 310 | int *err) |
311 | { | 311 | { |
312 | int have_rev, best = -1; | 312 | int have_rev, best = -1; |
313 | 313 | ||
314 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) { | 314 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) { |
315 | *err = -EINTR; | 315 | *err = -EINTR; |
316 | return 1; | 316 | return 1; |
317 | } | 317 | } |
318 | if (target == 1) | 318 | if (target == 1) |
319 | have_rev = target_revfn(af, name, revision, &best); | 319 | have_rev = target_revfn(af, name, revision, &best); |
320 | else | 320 | else |
321 | have_rev = match_revfn(af, name, revision, &best); | 321 | have_rev = match_revfn(af, name, revision, &best); |
322 | mutex_unlock(&xt[af].mutex); | 322 | mutex_unlock(&xt[af].mutex); |
323 | 323 | ||
324 | /* Nothing at all? Return 0 to try loading module. */ | 324 | /* Nothing at all? Return 0 to try loading module. */ |
325 | if (best == -1) { | 325 | if (best == -1) { |
326 | *err = -ENOENT; | 326 | *err = -ENOENT; |
327 | return 0; | 327 | return 0; |
328 | } | 328 | } |
329 | 329 | ||
330 | *err = best; | 330 | *err = best; |
331 | if (!have_rev) | 331 | if (!have_rev) |
332 | *err = -EPROTONOSUPPORT; | 332 | *err = -EPROTONOSUPPORT; |
333 | return 1; | 333 | return 1; |
334 | } | 334 | } |
335 | EXPORT_SYMBOL_GPL(xt_find_revision); | 335 | EXPORT_SYMBOL_GPL(xt_find_revision); |
336 | 336 | ||
337 | static char *textify_hooks(char *buf, size_t size, unsigned int mask) | 337 | static char *textify_hooks(char *buf, size_t size, unsigned int mask) |
338 | { | 338 | { |
339 | static const char *const names[] = { | 339 | static const char *const names[] = { |
340 | "PREROUTING", "INPUT", "FORWARD", | 340 | "PREROUTING", "INPUT", "FORWARD", |
341 | "OUTPUT", "POSTROUTING", "BROUTING", | 341 | "OUTPUT", "POSTROUTING", "BROUTING", |
342 | }; | 342 | }; |
343 | unsigned int i; | 343 | unsigned int i; |
344 | char *p = buf; | 344 | char *p = buf; |
345 | bool np = false; | 345 | bool np = false; |
346 | int res; | 346 | int res; |
347 | 347 | ||
348 | *p = '\0'; | 348 | *p = '\0'; |
349 | for (i = 0; i < ARRAY_SIZE(names); ++i) { | 349 | for (i = 0; i < ARRAY_SIZE(names); ++i) { |
350 | if (!(mask & (1 << i))) | 350 | if (!(mask & (1 << i))) |
351 | continue; | 351 | continue; |
352 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); | 352 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); |
353 | if (res > 0) { | 353 | if (res > 0) { |
354 | size -= res; | 354 | size -= res; |
355 | p += res; | 355 | p += res; |
356 | } | 356 | } |
357 | np = true; | 357 | np = true; |
358 | } | 358 | } |
359 | 359 | ||
360 | return buf; | 360 | return buf; |
361 | } | 361 | } |
362 | 362 | ||
363 | int xt_check_match(struct xt_mtchk_param *par, | 363 | int xt_check_match(struct xt_mtchk_param *par, |
364 | unsigned int size, u_int8_t proto, bool inv_proto) | 364 | unsigned int size, u_int8_t proto, bool inv_proto) |
365 | { | 365 | { |
366 | int ret; | 366 | int ret; |
367 | 367 | ||
368 | if (XT_ALIGN(par->match->matchsize) != size && | 368 | if (XT_ALIGN(par->match->matchsize) != size && |
369 | par->match->matchsize != -1) { | 369 | par->match->matchsize != -1) { |
370 | /* | 370 | /* |
371 | * ebt_among is exempt from centralized matchsize checking | 371 | * ebt_among is exempt from centralized matchsize checking |
372 | * because it uses a dynamic-size data set. | 372 | * because it uses a dynamic-size data set. |
373 | */ | 373 | */ |
374 | pr_err("%s_tables: %s.%u match: invalid size " | 374 | pr_err("%s_tables: %s.%u match: invalid size " |
375 | "%u (kernel) != (user) %u\n", | 375 | "%u (kernel) != (user) %u\n", |
376 | xt_prefix[par->family], par->match->name, | 376 | xt_prefix[par->family], par->match->name, |
377 | par->match->revision, | 377 | par->match->revision, |
378 | XT_ALIGN(par->match->matchsize), size); | 378 | XT_ALIGN(par->match->matchsize), size); |
379 | return -EINVAL; | 379 | return -EINVAL; |
380 | } | 380 | } |
381 | if (par->match->table != NULL && | 381 | if (par->match->table != NULL && |
382 | strcmp(par->match->table, par->table) != 0) { | 382 | strcmp(par->match->table, par->table) != 0) { |
383 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", | 383 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", |
384 | xt_prefix[par->family], par->match->name, | 384 | xt_prefix[par->family], par->match->name, |
385 | par->match->table, par->table); | 385 | par->match->table, par->table); |
386 | return -EINVAL; | 386 | return -EINVAL; |
387 | } | 387 | } |
388 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { | 388 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
389 | char used[64], allow[64]; | 389 | char used[64], allow[64]; |
390 | 390 | ||
391 | pr_err("%s_tables: %s match: used from hooks %s, but only " | 391 | pr_err("%s_tables: %s match: used from hooks %s, but only " |
392 | "valid from %s\n", | 392 | "valid from %s\n", |
393 | xt_prefix[par->family], par->match->name, | 393 | xt_prefix[par->family], par->match->name, |
394 | textify_hooks(used, sizeof(used), par->hook_mask), | 394 | textify_hooks(used, sizeof(used), par->hook_mask), |
395 | textify_hooks(allow, sizeof(allow), par->match->hooks)); | 395 | textify_hooks(allow, sizeof(allow), par->match->hooks)); |
396 | return -EINVAL; | 396 | return -EINVAL; |
397 | } | 397 | } |
398 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { | 398 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
399 | pr_err("%s_tables: %s match: only valid for protocol %u\n", | 399 | pr_err("%s_tables: %s match: only valid for protocol %u\n", |
400 | xt_prefix[par->family], par->match->name, | 400 | xt_prefix[par->family], par->match->name, |
401 | par->match->proto); | 401 | par->match->proto); |
402 | return -EINVAL; | 402 | return -EINVAL; |
403 | } | 403 | } |
404 | if (par->match->checkentry != NULL) { | 404 | if (par->match->checkentry != NULL) { |
405 | ret = par->match->checkentry(par); | 405 | ret = par->match->checkentry(par); |
406 | if (ret < 0) | 406 | if (ret < 0) |
407 | return ret; | 407 | return ret; |
408 | else if (ret > 0) | 408 | else if (ret > 0) |
409 | /* Flag up potential errors. */ | 409 | /* Flag up potential errors. */ |
410 | return -EIO; | 410 | return -EIO; |
411 | } | 411 | } |
412 | return 0; | 412 | return 0; |
413 | } | 413 | } |
414 | EXPORT_SYMBOL_GPL(xt_check_match); | 414 | EXPORT_SYMBOL_GPL(xt_check_match); |
415 | 415 | ||
416 | #ifdef CONFIG_COMPAT | 416 | #ifdef CONFIG_COMPAT |
417 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta) | 417 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta) |
418 | { | 418 | { |
419 | struct compat_delta *tmp; | 419 | struct compat_delta *tmp; |
420 | 420 | ||
421 | tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL); | 421 | tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL); |
422 | if (!tmp) | 422 | if (!tmp) |
423 | return -ENOMEM; | 423 | return -ENOMEM; |
424 | 424 | ||
425 | tmp->offset = offset; | 425 | tmp->offset = offset; |
426 | tmp->delta = delta; | 426 | tmp->delta = delta; |
427 | 427 | ||
428 | if (xt[af].compat_offsets) { | 428 | if (xt[af].compat_offsets) { |
429 | tmp->next = xt[af].compat_offsets->next; | 429 | tmp->next = xt[af].compat_offsets->next; |
430 | xt[af].compat_offsets->next = tmp; | 430 | xt[af].compat_offsets->next = tmp; |
431 | } else { | 431 | } else { |
432 | xt[af].compat_offsets = tmp; | 432 | xt[af].compat_offsets = tmp; |
433 | tmp->next = NULL; | 433 | tmp->next = NULL; |
434 | } | 434 | } |
435 | return 0; | 435 | return 0; |
436 | } | 436 | } |
437 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); | 437 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); |
438 | 438 | ||
439 | void xt_compat_flush_offsets(u_int8_t af) | 439 | void xt_compat_flush_offsets(u_int8_t af) |
440 | { | 440 | { |
441 | struct compat_delta *tmp, *next; | 441 | struct compat_delta *tmp, *next; |
442 | 442 | ||
443 | if (xt[af].compat_offsets) { | 443 | if (xt[af].compat_offsets) { |
444 | for (tmp = xt[af].compat_offsets; tmp; tmp = next) { | 444 | for (tmp = xt[af].compat_offsets; tmp; tmp = next) { |
445 | next = tmp->next; | 445 | next = tmp->next; |
446 | kfree(tmp); | 446 | kfree(tmp); |
447 | } | 447 | } |
448 | xt[af].compat_offsets = NULL; | 448 | xt[af].compat_offsets = NULL; |
449 | } | 449 | } |
450 | } | 450 | } |
451 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | 451 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); |
452 | 452 | ||
453 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) | 453 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) |
454 | { | 454 | { |
455 | struct compat_delta *tmp; | 455 | struct compat_delta *tmp; |
456 | int delta; | 456 | int delta; |
457 | 457 | ||
458 | for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) | 458 | for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) |
459 | if (tmp->offset < offset) | 459 | if (tmp->offset < offset) |
460 | delta += tmp->delta; | 460 | delta += tmp->delta; |
461 | return delta; | 461 | return delta; |
462 | } | 462 | } |
463 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | 463 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); |
464 | 464 | ||
465 | int xt_compat_match_offset(const struct xt_match *match) | 465 | int xt_compat_match_offset(const struct xt_match *match) |
466 | { | 466 | { |
467 | u_int16_t csize = match->compatsize ? : match->matchsize; | 467 | u_int16_t csize = match->compatsize ? : match->matchsize; |
468 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); | 468 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); |
469 | } | 469 | } |
470 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); | 470 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); |
471 | 471 | ||
472 | int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, | 472 | int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, |
473 | unsigned int *size) | 473 | unsigned int *size) |
474 | { | 474 | { |
475 | const struct xt_match *match = m->u.kernel.match; | 475 | const struct xt_match *match = m->u.kernel.match; |
476 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; | 476 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; |
477 | int pad, off = xt_compat_match_offset(match); | 477 | int pad, off = xt_compat_match_offset(match); |
478 | u_int16_t msize = cm->u.user.match_size; | 478 | u_int16_t msize = cm->u.user.match_size; |
479 | 479 | ||
480 | m = *dstptr; | 480 | m = *dstptr; |
481 | memcpy(m, cm, sizeof(*cm)); | 481 | memcpy(m, cm, sizeof(*cm)); |
482 | if (match->compat_from_user) | 482 | if (match->compat_from_user) |
483 | match->compat_from_user(m->data, cm->data); | 483 | match->compat_from_user(m->data, cm->data); |
484 | else | 484 | else |
485 | memcpy(m->data, cm->data, msize - sizeof(*cm)); | 485 | memcpy(m->data, cm->data, msize - sizeof(*cm)); |
486 | pad = XT_ALIGN(match->matchsize) - match->matchsize; | 486 | pad = XT_ALIGN(match->matchsize) - match->matchsize; |
487 | if (pad > 0) | 487 | if (pad > 0) |
488 | memset(m->data + match->matchsize, 0, pad); | 488 | memset(m->data + match->matchsize, 0, pad); |
489 | 489 | ||
490 | msize += off; | 490 | msize += off; |
491 | m->u.user.match_size = msize; | 491 | m->u.user.match_size = msize; |
492 | 492 | ||
493 | *size += off; | 493 | *size += off; |
494 | *dstptr += msize; | 494 | *dstptr += msize; |
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); | 497 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); |
498 | 498 | ||
499 | int xt_compat_match_to_user(const struct xt_entry_match *m, | 499 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
500 | void __user **dstptr, unsigned int *size) | 500 | void __user **dstptr, unsigned int *size) |
501 | { | 501 | { |
502 | const struct xt_match *match = m->u.kernel.match; | 502 | const struct xt_match *match = m->u.kernel.match; |
503 | struct compat_xt_entry_match __user *cm = *dstptr; | 503 | struct compat_xt_entry_match __user *cm = *dstptr; |
504 | int off = xt_compat_match_offset(match); | 504 | int off = xt_compat_match_offset(match); |
505 | u_int16_t msize = m->u.user.match_size - off; | 505 | u_int16_t msize = m->u.user.match_size - off; |
506 | 506 | ||
507 | if (copy_to_user(cm, m, sizeof(*cm)) || | 507 | if (copy_to_user(cm, m, sizeof(*cm)) || |
508 | put_user(msize, &cm->u.user.match_size) || | 508 | put_user(msize, &cm->u.user.match_size) || |
509 | copy_to_user(cm->u.user.name, m->u.kernel.match->name, | 509 | copy_to_user(cm->u.user.name, m->u.kernel.match->name, |
510 | strlen(m->u.kernel.match->name) + 1)) | 510 | strlen(m->u.kernel.match->name) + 1)) |
511 | return -EFAULT; | 511 | return -EFAULT; |
512 | 512 | ||
513 | if (match->compat_to_user) { | 513 | if (match->compat_to_user) { |
514 | if (match->compat_to_user((void __user *)cm->data, m->data)) | 514 | if (match->compat_to_user((void __user *)cm->data, m->data)) |
515 | return -EFAULT; | 515 | return -EFAULT; |
516 | } else { | 516 | } else { |
517 | if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) | 517 | if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) |
518 | return -EFAULT; | 518 | return -EFAULT; |
519 | } | 519 | } |
520 | 520 | ||
521 | *size -= off; | 521 | *size -= off; |
522 | *dstptr += msize; | 522 | *dstptr += msize; |
523 | return 0; | 523 | return 0; |
524 | } | 524 | } |
525 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); | 525 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); |
526 | #endif /* CONFIG_COMPAT */ | 526 | #endif /* CONFIG_COMPAT */ |
527 | 527 | ||
528 | int xt_check_target(struct xt_tgchk_param *par, | 528 | int xt_check_target(struct xt_tgchk_param *par, |
529 | unsigned int size, u_int8_t proto, bool inv_proto) | 529 | unsigned int size, u_int8_t proto, bool inv_proto) |
530 | { | 530 | { |
531 | int ret; | 531 | int ret; |
532 | 532 | ||
533 | if (XT_ALIGN(par->target->targetsize) != size) { | 533 | if (XT_ALIGN(par->target->targetsize) != size) { |
534 | pr_err("%s_tables: %s.%u target: invalid size " | 534 | pr_err("%s_tables: %s.%u target: invalid size " |
535 | "%u (kernel) != (user) %u\n", | 535 | "%u (kernel) != (user) %u\n", |
536 | xt_prefix[par->family], par->target->name, | 536 | xt_prefix[par->family], par->target->name, |
537 | par->target->revision, | 537 | par->target->revision, |
538 | XT_ALIGN(par->target->targetsize), size); | 538 | XT_ALIGN(par->target->targetsize), size); |
539 | return -EINVAL; | 539 | return -EINVAL; |
540 | } | 540 | } |
541 | if (par->target->table != NULL && | 541 | if (par->target->table != NULL && |
542 | strcmp(par->target->table, par->table) != 0) { | 542 | strcmp(par->target->table, par->table) != 0) { |
543 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", | 543 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", |
544 | xt_prefix[par->family], par->target->name, | 544 | xt_prefix[par->family], par->target->name, |
545 | par->target->table, par->table); | 545 | par->target->table, par->table); |
546 | return -EINVAL; | 546 | return -EINVAL; |
547 | } | 547 | } |
548 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { | 548 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
549 | char used[64], allow[64]; | 549 | char used[64], allow[64]; |
550 | 550 | ||
551 | pr_err("%s_tables: %s target: used from hooks %s, but only " | 551 | pr_err("%s_tables: %s target: used from hooks %s, but only " |
552 | "usable from %s\n", | 552 | "usable from %s\n", |
553 | xt_prefix[par->family], par->target->name, | 553 | xt_prefix[par->family], par->target->name, |
554 | textify_hooks(used, sizeof(used), par->hook_mask), | 554 | textify_hooks(used, sizeof(used), par->hook_mask), |
555 | textify_hooks(allow, sizeof(allow), par->target->hooks)); | 555 | textify_hooks(allow, sizeof(allow), par->target->hooks)); |
556 | return -EINVAL; | 556 | return -EINVAL; |
557 | } | 557 | } |
558 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { | 558 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
559 | pr_err("%s_tables: %s target: only valid for protocol %u\n", | 559 | pr_err("%s_tables: %s target: only valid for protocol %u\n", |
560 | xt_prefix[par->family], par->target->name, | 560 | xt_prefix[par->family], par->target->name, |
561 | par->target->proto); | 561 | par->target->proto); |
562 | return -EINVAL; | 562 | return -EINVAL; |
563 | } | 563 | } |
564 | if (par->target->checkentry != NULL) { | 564 | if (par->target->checkentry != NULL) { |
565 | ret = par->target->checkentry(par); | 565 | ret = par->target->checkentry(par); |
566 | if (ret < 0) | 566 | if (ret < 0) |
567 | return ret; | 567 | return ret; |
568 | else if (ret > 0) | 568 | else if (ret > 0) |
569 | /* Flag up potential errors. */ | 569 | /* Flag up potential errors. */ |
570 | return -EIO; | 570 | return -EIO; |
571 | } | 571 | } |
572 | return 0; | 572 | return 0; |
573 | } | 573 | } |
574 | EXPORT_SYMBOL_GPL(xt_check_target); | 574 | EXPORT_SYMBOL_GPL(xt_check_target); |
575 | 575 | ||
576 | #ifdef CONFIG_COMPAT | 576 | #ifdef CONFIG_COMPAT |
577 | int xt_compat_target_offset(const struct xt_target *target) | 577 | int xt_compat_target_offset(const struct xt_target *target) |
578 | { | 578 | { |
579 | u_int16_t csize = target->compatsize ? : target->targetsize; | 579 | u_int16_t csize = target->compatsize ? : target->targetsize; |
580 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); | 580 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); |
581 | } | 581 | } |
582 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); | 582 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); |
583 | 583 | ||
584 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, | 584 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, |
585 | unsigned int *size) | 585 | unsigned int *size) |
586 | { | 586 | { |
587 | const struct xt_target *target = t->u.kernel.target; | 587 | const struct xt_target *target = t->u.kernel.target; |
588 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; | 588 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
589 | int pad, off = xt_compat_target_offset(target); | 589 | int pad, off = xt_compat_target_offset(target); |
590 | u_int16_t tsize = ct->u.user.target_size; | 590 | u_int16_t tsize = ct->u.user.target_size; |
591 | 591 | ||
592 | t = *dstptr; | 592 | t = *dstptr; |
593 | memcpy(t, ct, sizeof(*ct)); | 593 | memcpy(t, ct, sizeof(*ct)); |
594 | if (target->compat_from_user) | 594 | if (target->compat_from_user) |
595 | target->compat_from_user(t->data, ct->data); | 595 | target->compat_from_user(t->data, ct->data); |
596 | else | 596 | else |
597 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); | 597 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); |
598 | pad = XT_ALIGN(target->targetsize) - target->targetsize; | 598 | pad = XT_ALIGN(target->targetsize) - target->targetsize; |
599 | if (pad > 0) | 599 | if (pad > 0) |
600 | memset(t->data + target->targetsize, 0, pad); | 600 | memset(t->data + target->targetsize, 0, pad); |
601 | 601 | ||
602 | tsize += off; | 602 | tsize += off; |
603 | t->u.user.target_size = tsize; | 603 | t->u.user.target_size = tsize; |
604 | 604 | ||
605 | *size += off; | 605 | *size += off; |
606 | *dstptr += tsize; | 606 | *dstptr += tsize; |
607 | } | 607 | } |
608 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); | 608 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); |
609 | 609 | ||
610 | int xt_compat_target_to_user(const struct xt_entry_target *t, | 610 | int xt_compat_target_to_user(const struct xt_entry_target *t, |
611 | void __user **dstptr, unsigned int *size) | 611 | void __user **dstptr, unsigned int *size) |
612 | { | 612 | { |
613 | const struct xt_target *target = t->u.kernel.target; | 613 | const struct xt_target *target = t->u.kernel.target; |
614 | struct compat_xt_entry_target __user *ct = *dstptr; | 614 | struct compat_xt_entry_target __user *ct = *dstptr; |
615 | int off = xt_compat_target_offset(target); | 615 | int off = xt_compat_target_offset(target); |
616 | u_int16_t tsize = t->u.user.target_size - off; | 616 | u_int16_t tsize = t->u.user.target_size - off; |
617 | 617 | ||
618 | if (copy_to_user(ct, t, sizeof(*ct)) || | 618 | if (copy_to_user(ct, t, sizeof(*ct)) || |
619 | put_user(tsize, &ct->u.user.target_size) || | 619 | put_user(tsize, &ct->u.user.target_size) || |
620 | copy_to_user(ct->u.user.name, t->u.kernel.target->name, | 620 | copy_to_user(ct->u.user.name, t->u.kernel.target->name, |
621 | strlen(t->u.kernel.target->name) + 1)) | 621 | strlen(t->u.kernel.target->name) + 1)) |
622 | return -EFAULT; | 622 | return -EFAULT; |
623 | 623 | ||
624 | if (target->compat_to_user) { | 624 | if (target->compat_to_user) { |
625 | if (target->compat_to_user((void __user *)ct->data, t->data)) | 625 | if (target->compat_to_user((void __user *)ct->data, t->data)) |
626 | return -EFAULT; | 626 | return -EFAULT; |
627 | } else { | 627 | } else { |
628 | if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) | 628 | if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) |
629 | return -EFAULT; | 629 | return -EFAULT; |
630 | } | 630 | } |
631 | 631 | ||
632 | *size -= off; | 632 | *size -= off; |
633 | *dstptr += tsize; | 633 | *dstptr += tsize; |
634 | return 0; | 634 | return 0; |
635 | } | 635 | } |
636 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); | 636 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); |
637 | #endif | 637 | #endif |
638 | 638 | ||
639 | struct xt_table_info *xt_alloc_table_info(unsigned int size) | 639 | struct xt_table_info *xt_alloc_table_info(unsigned int size) |
640 | { | 640 | { |
641 | struct xt_table_info *newinfo; | 641 | struct xt_table_info *newinfo; |
642 | int cpu; | 642 | int cpu; |
643 | 643 | ||
644 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ | 644 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
645 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) | 645 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) |
646 | return NULL; | 646 | return NULL; |
647 | 647 | ||
648 | newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL); | 648 | newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL); |
649 | if (!newinfo) | 649 | if (!newinfo) |
650 | return NULL; | 650 | return NULL; |
651 | 651 | ||
652 | newinfo->size = size; | 652 | newinfo->size = size; |
653 | 653 | ||
654 | for_each_possible_cpu(cpu) { | 654 | for_each_possible_cpu(cpu) { |
655 | if (size <= PAGE_SIZE) | 655 | if (size <= PAGE_SIZE) |
656 | newinfo->entries[cpu] = kmalloc_node(size, | 656 | newinfo->entries[cpu] = kmalloc_node(size, |
657 | GFP_KERNEL, | 657 | GFP_KERNEL, |
658 | cpu_to_node(cpu)); | 658 | cpu_to_node(cpu)); |
659 | else | 659 | else |
660 | newinfo->entries[cpu] = vmalloc_node(size, | 660 | newinfo->entries[cpu] = vmalloc_node(size, |
661 | cpu_to_node(cpu)); | 661 | cpu_to_node(cpu)); |
662 | 662 | ||
663 | if (newinfo->entries[cpu] == NULL) { | 663 | if (newinfo->entries[cpu] == NULL) { |
664 | xt_free_table_info(newinfo); | 664 | xt_free_table_info(newinfo); |
665 | return NULL; | 665 | return NULL; |
666 | } | 666 | } |
667 | } | 667 | } |
668 | 668 | ||
669 | return newinfo; | 669 | return newinfo; |
670 | } | 670 | } |
671 | EXPORT_SYMBOL(xt_alloc_table_info); | 671 | EXPORT_SYMBOL(xt_alloc_table_info); |
672 | 672 | ||
673 | void xt_free_table_info(struct xt_table_info *info) | 673 | void xt_free_table_info(struct xt_table_info *info) |
674 | { | 674 | { |
675 | int cpu; | 675 | int cpu; |
676 | 676 | ||
677 | for_each_possible_cpu(cpu) { | 677 | for_each_possible_cpu(cpu) { |
678 | if (info->size <= PAGE_SIZE) | 678 | if (info->size <= PAGE_SIZE) |
679 | kfree(info->entries[cpu]); | 679 | kfree(info->entries[cpu]); |
680 | else | 680 | else |
681 | vfree(info->entries[cpu]); | 681 | vfree(info->entries[cpu]); |
682 | } | 682 | } |
683 | 683 | ||
684 | if (info->jumpstack != NULL) { | 684 | if (info->jumpstack != NULL) { |
685 | if (sizeof(void *) * info->stacksize > PAGE_SIZE) { | 685 | if (sizeof(void *) * info->stacksize > PAGE_SIZE) { |
686 | for_each_possible_cpu(cpu) | 686 | for_each_possible_cpu(cpu) |
687 | vfree(info->jumpstack[cpu]); | 687 | vfree(info->jumpstack[cpu]); |
688 | } else { | 688 | } else { |
689 | for_each_possible_cpu(cpu) | 689 | for_each_possible_cpu(cpu) |
690 | kfree(info->jumpstack[cpu]); | 690 | kfree(info->jumpstack[cpu]); |
691 | } | 691 | } |
692 | } | 692 | } |
693 | 693 | ||
694 | if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE) | 694 | if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE) |
695 | vfree(info->jumpstack); | 695 | vfree(info->jumpstack); |
696 | else | 696 | else |
697 | kfree(info->jumpstack); | 697 | kfree(info->jumpstack); |
698 | 698 | ||
699 | free_percpu(info->stackptr); | 699 | free_percpu(info->stackptr); |
700 | 700 | ||
701 | kfree(info); | 701 | kfree(info); |
702 | } | 702 | } |
703 | EXPORT_SYMBOL(xt_free_table_info); | 703 | EXPORT_SYMBOL(xt_free_table_info); |
704 | 704 | ||
705 | /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ | 705 | /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ |
706 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, | 706 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
707 | const char *name) | 707 | const char *name) |
708 | { | 708 | { |
709 | struct xt_table *t; | 709 | struct xt_table *t; |
710 | 710 | ||
711 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) | 711 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) |
712 | return ERR_PTR(-EINTR); | 712 | return ERR_PTR(-EINTR); |
713 | 713 | ||
714 | list_for_each_entry(t, &net->xt.tables[af], list) | 714 | list_for_each_entry(t, &net->xt.tables[af], list) |
715 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) | 715 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) |
716 | return t; | 716 | return t; |
717 | mutex_unlock(&xt[af].mutex); | 717 | mutex_unlock(&xt[af].mutex); |
718 | return NULL; | 718 | return NULL; |
719 | } | 719 | } |
720 | EXPORT_SYMBOL_GPL(xt_find_table_lock); | 720 | EXPORT_SYMBOL_GPL(xt_find_table_lock); |
721 | 721 | ||
722 | void xt_table_unlock(struct xt_table *table) | 722 | void xt_table_unlock(struct xt_table *table) |
723 | { | 723 | { |
724 | mutex_unlock(&xt[table->af].mutex); | 724 | mutex_unlock(&xt[table->af].mutex); |
725 | } | 725 | } |
726 | EXPORT_SYMBOL_GPL(xt_table_unlock); | 726 | EXPORT_SYMBOL_GPL(xt_table_unlock); |
727 | 727 | ||
728 | #ifdef CONFIG_COMPAT | 728 | #ifdef CONFIG_COMPAT |
729 | void xt_compat_lock(u_int8_t af) | 729 | void xt_compat_lock(u_int8_t af) |
730 | { | 730 | { |
731 | mutex_lock(&xt[af].compat_mutex); | 731 | mutex_lock(&xt[af].compat_mutex); |
732 | } | 732 | } |
733 | EXPORT_SYMBOL_GPL(xt_compat_lock); | 733 | EXPORT_SYMBOL_GPL(xt_compat_lock); |
734 | 734 | ||
735 | void xt_compat_unlock(u_int8_t af) | 735 | void xt_compat_unlock(u_int8_t af) |
736 | { | 736 | { |
737 | mutex_unlock(&xt[af].compat_mutex); | 737 | mutex_unlock(&xt[af].compat_mutex); |
738 | } | 738 | } |
739 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | 739 | EXPORT_SYMBOL_GPL(xt_compat_unlock); |
740 | #endif | 740 | #endif |
741 | 741 | ||
742 | DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); | 742 | DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); |
743 | EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); | 743 | EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); |
744 | 744 | ||
745 | static int xt_jumpstack_alloc(struct xt_table_info *i) | 745 | static int xt_jumpstack_alloc(struct xt_table_info *i) |
746 | { | 746 | { |
747 | unsigned int size; | 747 | unsigned int size; |
748 | int cpu; | 748 | int cpu; |
749 | 749 | ||
750 | i->stackptr = alloc_percpu(unsigned int); | 750 | i->stackptr = alloc_percpu(unsigned int); |
751 | if (i->stackptr == NULL) | 751 | if (i->stackptr == NULL) |
752 | return -ENOMEM; | 752 | return -ENOMEM; |
753 | 753 | ||
754 | size = sizeof(void **) * nr_cpu_ids; | 754 | size = sizeof(void **) * nr_cpu_ids; |
755 | if (size > PAGE_SIZE) | 755 | if (size > PAGE_SIZE) |
756 | i->jumpstack = vmalloc(size); | 756 | i->jumpstack = vmalloc(size); |
757 | else | 757 | else |
758 | i->jumpstack = kmalloc(size, GFP_KERNEL); | 758 | i->jumpstack = kmalloc(size, GFP_KERNEL); |
759 | if (i->jumpstack == NULL) | 759 | if (i->jumpstack == NULL) |
760 | return -ENOMEM; | 760 | return -ENOMEM; |
761 | memset(i->jumpstack, 0, size); | 761 | memset(i->jumpstack, 0, size); |
762 | 762 | ||
763 | i->stacksize *= xt_jumpstack_multiplier; | 763 | i->stacksize *= xt_jumpstack_multiplier; |
764 | size = sizeof(void *) * i->stacksize; | 764 | size = sizeof(void *) * i->stacksize; |
765 | for_each_possible_cpu(cpu) { | 765 | for_each_possible_cpu(cpu) { |
766 | if (size > PAGE_SIZE) | 766 | if (size > PAGE_SIZE) |
767 | i->jumpstack[cpu] = vmalloc_node(size, | 767 | i->jumpstack[cpu] = vmalloc_node(size, |
768 | cpu_to_node(cpu)); | 768 | cpu_to_node(cpu)); |
769 | else | 769 | else |
770 | i->jumpstack[cpu] = kmalloc_node(size, | 770 | i->jumpstack[cpu] = kmalloc_node(size, |
771 | GFP_KERNEL, cpu_to_node(cpu)); | 771 | GFP_KERNEL, cpu_to_node(cpu)); |
772 | if (i->jumpstack[cpu] == NULL) | 772 | if (i->jumpstack[cpu] == NULL) |
773 | /* | 773 | /* |
774 | * Freeing will be done later on by the callers. The | 774 | * Freeing will be done later on by the callers. The |
775 | * chain is: xt_replace_table -> __do_replace -> | 775 | * chain is: xt_replace_table -> __do_replace -> |
776 | * do_replace -> xt_free_table_info. | 776 | * do_replace -> xt_free_table_info. |
777 | */ | 777 | */ |
778 | return -ENOMEM; | 778 | return -ENOMEM; |
779 | } | 779 | } |
780 | 780 | ||
781 | return 0; | 781 | return 0; |
782 | } | 782 | } |
783 | 783 | ||
784 | struct xt_table_info * | 784 | struct xt_table_info * |
785 | xt_replace_table(struct xt_table *table, | 785 | xt_replace_table(struct xt_table *table, |
786 | unsigned int num_counters, | 786 | unsigned int num_counters, |
787 | struct xt_table_info *newinfo, | 787 | struct xt_table_info *newinfo, |
788 | int *error) | 788 | int *error) |
789 | { | 789 | { |
790 | struct xt_table_info *private; | 790 | struct xt_table_info *private; |
791 | int ret; | 791 | int ret; |
792 | 792 | ||
793 | ret = xt_jumpstack_alloc(newinfo); | 793 | ret = xt_jumpstack_alloc(newinfo); |
794 | if (ret < 0) { | 794 | if (ret < 0) { |
795 | *error = ret; | 795 | *error = ret; |
796 | return NULL; | 796 | return NULL; |
797 | } | 797 | } |
798 | 798 | ||
799 | /* Do the substitution. */ | 799 | /* Do the substitution. */ |
800 | local_bh_disable(); | 800 | local_bh_disable(); |
801 | private = table->private; | 801 | private = table->private; |
802 | 802 | ||
803 | /* Check inside lock: is the old number correct? */ | 803 | /* Check inside lock: is the old number correct? */ |
804 | if (num_counters != private->number) { | 804 | if (num_counters != private->number) { |
805 | pr_debug("num_counters != table->private->number (%u/%u)\n", | 805 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
806 | num_counters, private->number); | 806 | num_counters, private->number); |
807 | local_bh_enable(); | 807 | local_bh_enable(); |
808 | *error = -EAGAIN; | 808 | *error = -EAGAIN; |
809 | return NULL; | 809 | return NULL; |
810 | } | 810 | } |
811 | 811 | ||
812 | table->private = newinfo; | 812 | table->private = newinfo; |
813 | newinfo->initial_entries = private->initial_entries; | 813 | newinfo->initial_entries = private->initial_entries; |
814 | 814 | ||
815 | /* | 815 | /* |
816 | * Even though table entries have now been swapped, other CPU's | 816 | * Even though table entries have now been swapped, other CPU's |
817 | * may still be using the old entries. This is okay, because | 817 | * may still be using the old entries. This is okay, because |
818 | * resynchronization happens because of the locking done | 818 | * resynchronization happens because of the locking done |
819 | * during the get_counters() routine. | 819 | * during the get_counters() routine. |
820 | */ | 820 | */ |
821 | local_bh_enable(); | 821 | local_bh_enable(); |
822 | 822 | ||
823 | return private; | 823 | return private; |
824 | } | 824 | } |
825 | EXPORT_SYMBOL_GPL(xt_replace_table); | 825 | EXPORT_SYMBOL_GPL(xt_replace_table); |
826 | 826 | ||
827 | struct xt_table *xt_register_table(struct net *net, | 827 | struct xt_table *xt_register_table(struct net *net, |
828 | const struct xt_table *input_table, | 828 | const struct xt_table *input_table, |
829 | struct xt_table_info *bootstrap, | 829 | struct xt_table_info *bootstrap, |
830 | struct xt_table_info *newinfo) | 830 | struct xt_table_info *newinfo) |
831 | { | 831 | { |
832 | int ret; | 832 | int ret; |
833 | struct xt_table_info *private; | 833 | struct xt_table_info *private; |
834 | struct xt_table *t, *table; | 834 | struct xt_table *t, *table; |
835 | 835 | ||
836 | /* Don't add one object to multiple lists. */ | 836 | /* Don't add one object to multiple lists. */ |
837 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); | 837 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
838 | if (!table) { | 838 | if (!table) { |
839 | ret = -ENOMEM; | 839 | ret = -ENOMEM; |
840 | goto out; | 840 | goto out; |
841 | } | 841 | } |
842 | 842 | ||
843 | ret = mutex_lock_interruptible(&xt[table->af].mutex); | 843 | ret = mutex_lock_interruptible(&xt[table->af].mutex); |
844 | if (ret != 0) | 844 | if (ret != 0) |
845 | goto out_free; | 845 | goto out_free; |
846 | 846 | ||
847 | /* Don't autoload: we'd eat our tail... */ | 847 | /* Don't autoload: we'd eat our tail... */ |
848 | list_for_each_entry(t, &net->xt.tables[table->af], list) { | 848 | list_for_each_entry(t, &net->xt.tables[table->af], list) { |
849 | if (strcmp(t->name, table->name) == 0) { | 849 | if (strcmp(t->name, table->name) == 0) { |
850 | ret = -EEXIST; | 850 | ret = -EEXIST; |
851 | goto unlock; | 851 | goto unlock; |
852 | } | 852 | } |
853 | } | 853 | } |
854 | 854 | ||
855 | /* Simplifies replace_table code. */ | 855 | /* Simplifies replace_table code. */ |
856 | table->private = bootstrap; | 856 | table->private = bootstrap; |
857 | 857 | ||
858 | if (!xt_replace_table(table, 0, newinfo, &ret)) | 858 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
859 | goto unlock; | 859 | goto unlock; |
860 | 860 | ||
861 | private = table->private; | 861 | private = table->private; |
862 | pr_debug("table->private->number = %u\n", private->number); | 862 | pr_debug("table->private->number = %u\n", private->number); |
863 | 863 | ||
864 | /* save number of initial entries */ | 864 | /* save number of initial entries */ |
865 | private->initial_entries = private->number; | 865 | private->initial_entries = private->number; |
866 | 866 | ||
867 | list_add(&table->list, &net->xt.tables[table->af]); | 867 | list_add(&table->list, &net->xt.tables[table->af]); |
868 | mutex_unlock(&xt[table->af].mutex); | 868 | mutex_unlock(&xt[table->af].mutex); |
869 | return table; | 869 | return table; |
870 | 870 | ||
871 | unlock: | 871 | unlock: |
872 | mutex_unlock(&xt[table->af].mutex); | 872 | mutex_unlock(&xt[table->af].mutex); |
873 | out_free: | 873 | out_free: |
874 | kfree(table); | 874 | kfree(table); |
875 | out: | 875 | out: |
876 | return ERR_PTR(ret); | 876 | return ERR_PTR(ret); |
877 | } | 877 | } |
878 | EXPORT_SYMBOL_GPL(xt_register_table); | 878 | EXPORT_SYMBOL_GPL(xt_register_table); |
879 | 879 | ||
880 | void *xt_unregister_table(struct xt_table *table) | 880 | void *xt_unregister_table(struct xt_table *table) |
881 | { | 881 | { |
882 | struct xt_table_info *private; | 882 | struct xt_table_info *private; |
883 | 883 | ||
884 | mutex_lock(&xt[table->af].mutex); | 884 | mutex_lock(&xt[table->af].mutex); |
885 | private = table->private; | 885 | private = table->private; |
886 | list_del(&table->list); | 886 | list_del(&table->list); |
887 | mutex_unlock(&xt[table->af].mutex); | 887 | mutex_unlock(&xt[table->af].mutex); |
888 | kfree(table); | 888 | kfree(table); |
889 | 889 | ||
890 | return private; | 890 | return private; |
891 | } | 891 | } |
892 | EXPORT_SYMBOL_GPL(xt_unregister_table); | 892 | EXPORT_SYMBOL_GPL(xt_unregister_table); |
893 | 893 | ||
894 | #ifdef CONFIG_PROC_FS | 894 | #ifdef CONFIG_PROC_FS |
895 | struct xt_names_priv { | 895 | struct xt_names_priv { |
896 | struct seq_net_private p; | 896 | struct seq_net_private p; |
897 | u_int8_t af; | 897 | u_int8_t af; |
898 | }; | 898 | }; |
899 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) | 899 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) |
900 | { | 900 | { |
901 | struct xt_names_priv *priv = seq->private; | 901 | struct xt_names_priv *priv = seq->private; |
902 | struct net *net = seq_file_net(seq); | 902 | struct net *net = seq_file_net(seq); |
903 | u_int8_t af = priv->af; | 903 | u_int8_t af = priv->af; |
904 | 904 | ||
905 | mutex_lock(&xt[af].mutex); | 905 | mutex_lock(&xt[af].mutex); |
906 | return seq_list_start(&net->xt.tables[af], *pos); | 906 | return seq_list_start(&net->xt.tables[af], *pos); |
907 | } | 907 | } |
908 | 908 | ||
909 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 909 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
910 | { | 910 | { |
911 | struct xt_names_priv *priv = seq->private; | 911 | struct xt_names_priv *priv = seq->private; |
912 | struct net *net = seq_file_net(seq); | 912 | struct net *net = seq_file_net(seq); |
913 | u_int8_t af = priv->af; | 913 | u_int8_t af = priv->af; |
914 | 914 | ||
915 | return seq_list_next(v, &net->xt.tables[af], pos); | 915 | return seq_list_next(v, &net->xt.tables[af], pos); |
916 | } | 916 | } |
917 | 917 | ||
918 | static void xt_table_seq_stop(struct seq_file *seq, void *v) | 918 | static void xt_table_seq_stop(struct seq_file *seq, void *v) |
919 | { | 919 | { |
920 | struct xt_names_priv *priv = seq->private; | 920 | struct xt_names_priv *priv = seq->private; |
921 | u_int8_t af = priv->af; | 921 | u_int8_t af = priv->af; |
922 | 922 | ||
923 | mutex_unlock(&xt[af].mutex); | 923 | mutex_unlock(&xt[af].mutex); |
924 | } | 924 | } |
925 | 925 | ||
926 | static int xt_table_seq_show(struct seq_file *seq, void *v) | 926 | static int xt_table_seq_show(struct seq_file *seq, void *v) |
927 | { | 927 | { |
928 | struct xt_table *table = list_entry(v, struct xt_table, list); | 928 | struct xt_table *table = list_entry(v, struct xt_table, list); |
929 | 929 | ||
930 | if (strlen(table->name)) | 930 | if (strlen(table->name)) |
931 | return seq_printf(seq, "%s\n", table->name); | 931 | return seq_printf(seq, "%s\n", table->name); |
932 | else | 932 | else |
933 | return 0; | 933 | return 0; |
934 | } | 934 | } |
935 | 935 | ||
936 | static const struct seq_operations xt_table_seq_ops = { | 936 | static const struct seq_operations xt_table_seq_ops = { |
937 | .start = xt_table_seq_start, | 937 | .start = xt_table_seq_start, |
938 | .next = xt_table_seq_next, | 938 | .next = xt_table_seq_next, |
939 | .stop = xt_table_seq_stop, | 939 | .stop = xt_table_seq_stop, |
940 | .show = xt_table_seq_show, | 940 | .show = xt_table_seq_show, |
941 | }; | 941 | }; |
942 | 942 | ||
943 | static int xt_table_open(struct inode *inode, struct file *file) | 943 | static int xt_table_open(struct inode *inode, struct file *file) |
944 | { | 944 | { |
945 | int ret; | 945 | int ret; |
946 | struct xt_names_priv *priv; | 946 | struct xt_names_priv *priv; |
947 | 947 | ||
948 | ret = seq_open_net(inode, file, &xt_table_seq_ops, | 948 | ret = seq_open_net(inode, file, &xt_table_seq_ops, |
949 | sizeof(struct xt_names_priv)); | 949 | sizeof(struct xt_names_priv)); |
950 | if (!ret) { | 950 | if (!ret) { |
951 | priv = ((struct seq_file *)file->private_data)->private; | 951 | priv = ((struct seq_file *)file->private_data)->private; |
952 | priv->af = (unsigned long)PDE(inode)->data; | 952 | priv->af = (unsigned long)PDE(inode)->data; |
953 | } | 953 | } |
954 | return ret; | 954 | return ret; |
955 | } | 955 | } |
956 | 956 | ||
957 | static const struct file_operations xt_table_ops = { | 957 | static const struct file_operations xt_table_ops = { |
958 | .owner = THIS_MODULE, | 958 | .owner = THIS_MODULE, |
959 | .open = xt_table_open, | 959 | .open = xt_table_open, |
960 | .read = seq_read, | 960 | .read = seq_read, |
961 | .llseek = seq_lseek, | 961 | .llseek = seq_lseek, |
962 | .release = seq_release_net, | 962 | .release = seq_release_net, |
963 | }; | 963 | }; |
964 | 964 | ||
965 | /* | 965 | /* |
966 | * Traverse state for ip{,6}_{tables,matches} for helping crossing | 966 | * Traverse state for ip{,6}_{tables,matches} for helping crossing |
967 | * the multi-AF mutexes. | 967 | * the multi-AF mutexes. |
968 | */ | 968 | */ |
969 | struct nf_mttg_trav { | 969 | struct nf_mttg_trav { |
970 | struct list_head *head, *curr; | 970 | struct list_head *head, *curr; |
971 | uint8_t class, nfproto; | 971 | uint8_t class, nfproto; |
972 | }; | 972 | }; |
973 | 973 | ||
974 | enum { | 974 | enum { |
975 | MTTG_TRAV_INIT, | 975 | MTTG_TRAV_INIT, |
976 | MTTG_TRAV_NFP_UNSPEC, | 976 | MTTG_TRAV_NFP_UNSPEC, |
977 | MTTG_TRAV_NFP_SPEC, | 977 | MTTG_TRAV_NFP_SPEC, |
978 | MTTG_TRAV_DONE, | 978 | MTTG_TRAV_DONE, |
979 | }; | 979 | }; |
980 | 980 | ||
981 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, | 981 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, |
982 | bool is_target) | 982 | bool is_target) |
983 | { | 983 | { |
984 | static const uint8_t next_class[] = { | 984 | static const uint8_t next_class[] = { |
985 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, | 985 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, |
986 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, | 986 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, |
987 | }; | 987 | }; |
988 | struct nf_mttg_trav *trav = seq->private; | 988 | struct nf_mttg_trav *trav = seq->private; |
989 | 989 | ||
990 | switch (trav->class) { | 990 | switch (trav->class) { |
991 | case MTTG_TRAV_INIT: | 991 | case MTTG_TRAV_INIT: |
992 | trav->class = MTTG_TRAV_NFP_UNSPEC; | 992 | trav->class = MTTG_TRAV_NFP_UNSPEC; |
993 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); | 993 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); |
994 | trav->head = trav->curr = is_target ? | 994 | trav->head = trav->curr = is_target ? |
995 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; | 995 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; |
996 | break; | 996 | break; |
997 | case MTTG_TRAV_NFP_UNSPEC: | 997 | case MTTG_TRAV_NFP_UNSPEC: |
998 | trav->curr = trav->curr->next; | 998 | trav->curr = trav->curr->next; |
999 | if (trav->curr != trav->head) | 999 | if (trav->curr != trav->head) |
1000 | break; | 1000 | break; |
1001 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | 1001 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); |
1002 | mutex_lock(&xt[trav->nfproto].mutex); | 1002 | mutex_lock(&xt[trav->nfproto].mutex); |
1003 | trav->head = trav->curr = is_target ? | 1003 | trav->head = trav->curr = is_target ? |
1004 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; | 1004 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; |
1005 | trav->class = next_class[trav->class]; | 1005 | trav->class = next_class[trav->class]; |
1006 | break; | 1006 | break; |
1007 | case MTTG_TRAV_NFP_SPEC: | 1007 | case MTTG_TRAV_NFP_SPEC: |
1008 | trav->curr = trav->curr->next; | 1008 | trav->curr = trav->curr->next; |
1009 | if (trav->curr != trav->head) | 1009 | if (trav->curr != trav->head) |
1010 | break; | 1010 | break; |
1011 | /* fallthru, _stop will unlock */ | 1011 | /* fallthru, _stop will unlock */ |
1012 | default: | 1012 | default: |
1013 | return NULL; | 1013 | return NULL; |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | if (ppos != NULL) | 1016 | if (ppos != NULL) |
1017 | ++*ppos; | 1017 | ++*ppos; |
1018 | return trav; | 1018 | return trav; |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, | 1021 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
1022 | bool is_target) | 1022 | bool is_target) |
1023 | { | 1023 | { |
1024 | struct nf_mttg_trav *trav = seq->private; | 1024 | struct nf_mttg_trav *trav = seq->private; |
1025 | unsigned int j; | 1025 | unsigned int j; |
1026 | 1026 | ||
1027 | trav->class = MTTG_TRAV_INIT; | 1027 | trav->class = MTTG_TRAV_INIT; |
1028 | for (j = 0; j < *pos; ++j) | 1028 | for (j = 0; j < *pos; ++j) |
1029 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) | 1029 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) |
1030 | return NULL; | 1030 | return NULL; |
1031 | return trav; | 1031 | return trav; |
1032 | } | 1032 | } |
1033 | 1033 | ||
1034 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) | 1034 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
1035 | { | 1035 | { |
1036 | struct nf_mttg_trav *trav = seq->private; | 1036 | struct nf_mttg_trav *trav = seq->private; |
1037 | 1037 | ||
1038 | switch (trav->class) { | 1038 | switch (trav->class) { |
1039 | case MTTG_TRAV_NFP_UNSPEC: | 1039 | case MTTG_TRAV_NFP_UNSPEC: |
1040 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | 1040 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); |
1041 | break; | 1041 | break; |
1042 | case MTTG_TRAV_NFP_SPEC: | 1042 | case MTTG_TRAV_NFP_SPEC: |
1043 | mutex_unlock(&xt[trav->nfproto].mutex); | 1043 | mutex_unlock(&xt[trav->nfproto].mutex); |
1044 | break; | 1044 | break; |
1045 | } | 1045 | } |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) | 1048 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
1049 | { | 1049 | { |
1050 | return xt_mttg_seq_start(seq, pos, false); | 1050 | return xt_mttg_seq_start(seq, pos, false); |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) | 1053 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
1054 | { | 1054 | { |
1055 | return xt_mttg_seq_next(seq, v, ppos, false); | 1055 | return xt_mttg_seq_next(seq, v, ppos, false); |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | static int xt_match_seq_show(struct seq_file *seq, void *v) | 1058 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
1059 | { | 1059 | { |
1060 | const struct nf_mttg_trav *trav = seq->private; | 1060 | const struct nf_mttg_trav *trav = seq->private; |
1061 | const struct xt_match *match; | 1061 | const struct xt_match *match; |
1062 | 1062 | ||
1063 | switch (trav->class) { | 1063 | switch (trav->class) { |
1064 | case MTTG_TRAV_NFP_UNSPEC: | 1064 | case MTTG_TRAV_NFP_UNSPEC: |
1065 | case MTTG_TRAV_NFP_SPEC: | 1065 | case MTTG_TRAV_NFP_SPEC: |
1066 | if (trav->curr == trav->head) | 1066 | if (trav->curr == trav->head) |
1067 | return 0; | 1067 | return 0; |
1068 | match = list_entry(trav->curr, struct xt_match, list); | 1068 | match = list_entry(trav->curr, struct xt_match, list); |
1069 | return (*match->name == '\0') ? 0 : | 1069 | return (*match->name == '\0') ? 0 : |
1070 | seq_printf(seq, "%s\n", match->name); | 1070 | seq_printf(seq, "%s\n", match->name); |
1071 | } | 1071 | } |
1072 | return 0; | 1072 | return 0; |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | static const struct seq_operations xt_match_seq_ops = { | 1075 | static const struct seq_operations xt_match_seq_ops = { |
1076 | .start = xt_match_seq_start, | 1076 | .start = xt_match_seq_start, |
1077 | .next = xt_match_seq_next, | 1077 | .next = xt_match_seq_next, |
1078 | .stop = xt_mttg_seq_stop, | 1078 | .stop = xt_mttg_seq_stop, |
1079 | .show = xt_match_seq_show, | 1079 | .show = xt_match_seq_show, |
1080 | }; | 1080 | }; |
1081 | 1081 | ||
1082 | static int xt_match_open(struct inode *inode, struct file *file) | 1082 | static int xt_match_open(struct inode *inode, struct file *file) |
1083 | { | 1083 | { |
1084 | struct seq_file *seq; | 1084 | struct seq_file *seq; |
1085 | struct nf_mttg_trav *trav; | 1085 | struct nf_mttg_trav *trav; |
1086 | int ret; | 1086 | int ret; |
1087 | 1087 | ||
1088 | trav = kmalloc(sizeof(*trav), GFP_KERNEL); | 1088 | trav = kmalloc(sizeof(*trav), GFP_KERNEL); |
1089 | if (trav == NULL) | 1089 | if (trav == NULL) |
1090 | return -ENOMEM; | 1090 | return -ENOMEM; |
1091 | 1091 | ||
1092 | ret = seq_open(file, &xt_match_seq_ops); | 1092 | ret = seq_open(file, &xt_match_seq_ops); |
1093 | if (ret < 0) { | 1093 | if (ret < 0) { |
1094 | kfree(trav); | 1094 | kfree(trav); |
1095 | return ret; | 1095 | return ret; |
1096 | } | 1096 | } |
1097 | 1097 | ||
1098 | seq = file->private_data; | 1098 | seq = file->private_data; |
1099 | seq->private = trav; | 1099 | seq->private = trav; |
1100 | trav->nfproto = (unsigned long)PDE(inode)->data; | 1100 | trav->nfproto = (unsigned long)PDE(inode)->data; |
1101 | return 0; | 1101 | return 0; |
1102 | } | 1102 | } |
1103 | 1103 | ||
1104 | static const struct file_operations xt_match_ops = { | 1104 | static const struct file_operations xt_match_ops = { |
1105 | .owner = THIS_MODULE, | 1105 | .owner = THIS_MODULE, |
1106 | .open = xt_match_open, | 1106 | .open = xt_match_open, |
1107 | .read = seq_read, | 1107 | .read = seq_read, |
1108 | .llseek = seq_lseek, | 1108 | .llseek = seq_lseek, |
1109 | .release = seq_release_private, | 1109 | .release = seq_release_private, |
1110 | }; | 1110 | }; |
1111 | 1111 | ||
1112 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) | 1112 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
1113 | { | 1113 | { |
1114 | return xt_mttg_seq_start(seq, pos, true); | 1114 | return xt_mttg_seq_start(seq, pos, true); |
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) | 1117 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
1118 | { | 1118 | { |
1119 | return xt_mttg_seq_next(seq, v, ppos, true); | 1119 | return xt_mttg_seq_next(seq, v, ppos, true); |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static int xt_target_seq_show(struct seq_file *seq, void *v) | 1122 | static int xt_target_seq_show(struct seq_file *seq, void *v) |
1123 | { | 1123 | { |
1124 | const struct nf_mttg_trav *trav = seq->private; | 1124 | const struct nf_mttg_trav *trav = seq->private; |
1125 | const struct xt_target *target; | 1125 | const struct xt_target *target; |
1126 | 1126 | ||
1127 | switch (trav->class) { | 1127 | switch (trav->class) { |
1128 | case MTTG_TRAV_NFP_UNSPEC: | 1128 | case MTTG_TRAV_NFP_UNSPEC: |
1129 | case MTTG_TRAV_NFP_SPEC: | 1129 | case MTTG_TRAV_NFP_SPEC: |
1130 | if (trav->curr == trav->head) | 1130 | if (trav->curr == trav->head) |
1131 | return 0; | 1131 | return 0; |
1132 | target = list_entry(trav->curr, struct xt_target, list); | 1132 | target = list_entry(trav->curr, struct xt_target, list); |
1133 | return (*target->name == '\0') ? 0 : | 1133 | return (*target->name == '\0') ? 0 : |
1134 | seq_printf(seq, "%s\n", target->name); | 1134 | seq_printf(seq, "%s\n", target->name); |
1135 | } | 1135 | } |
1136 | return 0; | 1136 | return 0; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | static const struct seq_operations xt_target_seq_ops = { | 1139 | static const struct seq_operations xt_target_seq_ops = { |
1140 | .start = xt_target_seq_start, | 1140 | .start = xt_target_seq_start, |
1141 | .next = xt_target_seq_next, | 1141 | .next = xt_target_seq_next, |
1142 | .stop = xt_mttg_seq_stop, | 1142 | .stop = xt_mttg_seq_stop, |
1143 | .show = xt_target_seq_show, | 1143 | .show = xt_target_seq_show, |
1144 | }; | 1144 | }; |
1145 | 1145 | ||
1146 | static int xt_target_open(struct inode *inode, struct file *file) | 1146 | static int xt_target_open(struct inode *inode, struct file *file) |
1147 | { | 1147 | { |
1148 | struct seq_file *seq; | 1148 | struct seq_file *seq; |
1149 | struct nf_mttg_trav *trav; | 1149 | struct nf_mttg_trav *trav; |
1150 | int ret; | 1150 | int ret; |
1151 | 1151 | ||
1152 | trav = kmalloc(sizeof(*trav), GFP_KERNEL); | 1152 | trav = kmalloc(sizeof(*trav), GFP_KERNEL); |
1153 | if (trav == NULL) | 1153 | if (trav == NULL) |
1154 | return -ENOMEM; | 1154 | return -ENOMEM; |
1155 | 1155 | ||
1156 | ret = seq_open(file, &xt_target_seq_ops); | 1156 | ret = seq_open(file, &xt_target_seq_ops); |
1157 | if (ret < 0) { | 1157 | if (ret < 0) { |
1158 | kfree(trav); | 1158 | kfree(trav); |
1159 | return ret; | 1159 | return ret; |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | seq = file->private_data; | 1162 | seq = file->private_data; |
1163 | seq->private = trav; | 1163 | seq->private = trav; |
1164 | trav->nfproto = (unsigned long)PDE(inode)->data; | 1164 | trav->nfproto = (unsigned long)PDE(inode)->data; |
1165 | return 0; | 1165 | return 0; |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | static const struct file_operations xt_target_ops = { | 1168 | static const struct file_operations xt_target_ops = { |
1169 | .owner = THIS_MODULE, | 1169 | .owner = THIS_MODULE, |
1170 | .open = xt_target_open, | 1170 | .open = xt_target_open, |
1171 | .read = seq_read, | 1171 | .read = seq_read, |
1172 | .llseek = seq_lseek, | 1172 | .llseek = seq_lseek, |
1173 | .release = seq_release_private, | 1173 | .release = seq_release_private, |
1174 | }; | 1174 | }; |
1175 | 1175 | ||
1176 | #define FORMAT_TABLES "_tables_names" | 1176 | #define FORMAT_TABLES "_tables_names" |
1177 | #define FORMAT_MATCHES "_tables_matches" | 1177 | #define FORMAT_MATCHES "_tables_matches" |
1178 | #define FORMAT_TARGETS "_tables_targets" | 1178 | #define FORMAT_TARGETS "_tables_targets" |
1179 | 1179 | ||
1180 | #endif /* CONFIG_PROC_FS */ | 1180 | #endif /* CONFIG_PROC_FS */ |
1181 | 1181 | ||
1182 | /** | 1182 | /** |
1183 | * xt_hook_link - set up hooks for a new table | 1183 | * xt_hook_link - set up hooks for a new table |
1184 | * @table: table with metadata needed to set up hooks | 1184 | * @table: table with metadata needed to set up hooks |
1185 | * @fn: Hook function | 1185 | * @fn: Hook function |
1186 | * | 1186 | * |
1187 | * This function will take care of creating and registering the necessary | 1187 | * This function will take care of creating and registering the necessary |
1188 | * Netfilter hooks for XT tables. | 1188 | * Netfilter hooks for XT tables. |
1189 | */ | 1189 | */ |
1190 | struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn) | 1190 | struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn) |
1191 | { | 1191 | { |
1192 | unsigned int hook_mask = table->valid_hooks; | 1192 | unsigned int hook_mask = table->valid_hooks; |
1193 | uint8_t i, num_hooks = hweight32(hook_mask); | 1193 | uint8_t i, num_hooks = hweight32(hook_mask); |
1194 | uint8_t hooknum; | 1194 | uint8_t hooknum; |
1195 | struct nf_hook_ops *ops; | 1195 | struct nf_hook_ops *ops; |
1196 | int ret; | 1196 | int ret; |
1197 | 1197 | ||
1198 | ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); | 1198 | ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); |
1199 | if (ops == NULL) | 1199 | if (ops == NULL) |
1200 | return ERR_PTR(-ENOMEM); | 1200 | return ERR_PTR(-ENOMEM); |
1201 | 1201 | ||
1202 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; | 1202 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; |
1203 | hook_mask >>= 1, ++hooknum) { | 1203 | hook_mask >>= 1, ++hooknum) { |
1204 | if (!(hook_mask & 1)) | 1204 | if (!(hook_mask & 1)) |
1205 | continue; | 1205 | continue; |
1206 | ops[i].hook = fn; | 1206 | ops[i].hook = fn; |
1207 | ops[i].owner = table->me; | 1207 | ops[i].owner = table->me; |
1208 | ops[i].pf = table->af; | 1208 | ops[i].pf = table->af; |
1209 | ops[i].hooknum = hooknum; | 1209 | ops[i].hooknum = hooknum; |
1210 | ops[i].priority = table->priority; | 1210 | ops[i].priority = table->priority; |
1211 | ++i; | 1211 | ++i; |
1212 | } | 1212 | } |
1213 | 1213 | ||
1214 | ret = nf_register_hooks(ops, num_hooks); | 1214 | ret = nf_register_hooks(ops, num_hooks); |
1215 | if (ret < 0) { | 1215 | if (ret < 0) { |
1216 | kfree(ops); | 1216 | kfree(ops); |
1217 | return ERR_PTR(ret); | 1217 | return ERR_PTR(ret); |
1218 | } | 1218 | } |
1219 | 1219 | ||
1220 | return ops; | 1220 | return ops; |
1221 | } | 1221 | } |
1222 | EXPORT_SYMBOL_GPL(xt_hook_link); | 1222 | EXPORT_SYMBOL_GPL(xt_hook_link); |
1223 | 1223 | ||
1224 | /** | 1224 | /** |
1225 | * xt_hook_unlink - remove hooks for a table | 1225 | * xt_hook_unlink - remove hooks for a table |
1226 | * @ops: nf_hook_ops array as returned by nf_hook_link | 1226 | * @ops: nf_hook_ops array as returned by nf_hook_link |
1227 | * @hook_mask: the very same mask that was passed to nf_hook_link | 1227 | * @hook_mask: the very same mask that was passed to nf_hook_link |
1228 | */ | 1228 | */ |
1229 | void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops) | 1229 | void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops) |
1230 | { | 1230 | { |
1231 | nf_unregister_hooks(ops, hweight32(table->valid_hooks)); | 1231 | nf_unregister_hooks(ops, hweight32(table->valid_hooks)); |
1232 | kfree(ops); | 1232 | kfree(ops); |
1233 | } | 1233 | } |
1234 | EXPORT_SYMBOL_GPL(xt_hook_unlink); | 1234 | EXPORT_SYMBOL_GPL(xt_hook_unlink); |
1235 | 1235 | ||
1236 | int xt_proto_init(struct net *net, u_int8_t af) | 1236 | int xt_proto_init(struct net *net, u_int8_t af) |
1237 | { | 1237 | { |
1238 | #ifdef CONFIG_PROC_FS | 1238 | #ifdef CONFIG_PROC_FS |
1239 | char buf[XT_FUNCTION_MAXNAMELEN]; | 1239 | char buf[XT_FUNCTION_MAXNAMELEN]; |
1240 | struct proc_dir_entry *proc; | 1240 | struct proc_dir_entry *proc; |
1241 | #endif | 1241 | #endif |
1242 | 1242 | ||
1243 | if (af >= ARRAY_SIZE(xt_prefix)) | 1243 | if (af >= ARRAY_SIZE(xt_prefix)) |
1244 | return -EINVAL; | 1244 | return -EINVAL; |
1245 | 1245 | ||
1246 | 1246 | ||
1247 | #ifdef CONFIG_PROC_FS | 1247 | #ifdef CONFIG_PROC_FS |
1248 | strlcpy(buf, xt_prefix[af], sizeof(buf)); | 1248 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1249 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); | 1249 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
1250 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, | 1250 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, |
1251 | (void *)(unsigned long)af); | 1251 | (void *)(unsigned long)af); |
1252 | if (!proc) | 1252 | if (!proc) |
1253 | goto out; | 1253 | goto out; |
1254 | 1254 | ||
1255 | strlcpy(buf, xt_prefix[af], sizeof(buf)); | 1255 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1256 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); | 1256 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
1257 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, | 1257 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, |
1258 | (void *)(unsigned long)af); | 1258 | (void *)(unsigned long)af); |
1259 | if (!proc) | 1259 | if (!proc) |
1260 | goto out_remove_tables; | 1260 | goto out_remove_tables; |
1261 | 1261 | ||
1262 | strlcpy(buf, xt_prefix[af], sizeof(buf)); | 1262 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1263 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); | 1263 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
1264 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, | 1264 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, |
1265 | (void *)(unsigned long)af); | 1265 | (void *)(unsigned long)af); |
1266 | if (!proc) | 1266 | if (!proc) |
1267 | goto out_remove_matches; | 1267 | goto out_remove_matches; |
1268 | #endif | 1268 | #endif |
1269 | 1269 | ||
1270 | return 0; | 1270 | return 0; |
1271 | 1271 | ||
1272 | #ifdef CONFIG_PROC_FS | 1272 | #ifdef CONFIG_PROC_FS |
1273 | out_remove_matches: | 1273 | out_remove_matches: |
1274 | strlcpy(buf, xt_prefix[af], sizeof(buf)); | 1274 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1275 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); | 1275 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
1276 | proc_net_remove(net, buf); | 1276 | proc_net_remove(net, buf); |
1277 | 1277 | ||
1278 | out_remove_tables: | 1278 | out_remove_tables: |
1279 | strlcpy(buf, xt_prefix[af], sizeof(buf)); | 1279 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1280 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); | 1280 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
1281 | proc_net_remove(net, buf); | 1281 | proc_net_remove(net, buf); |
1282 | out: | 1282 | out: |
1283 | return -1; | 1283 | return -1; |
1284 | #endif | 1284 | #endif |
1285 | } | 1285 | } |
1286 | EXPORT_SYMBOL_GPL(xt_proto_init); | 1286 | EXPORT_SYMBOL_GPL(xt_proto_init); |
1287 | 1287 | ||
1288 | void xt_proto_fini(struct net *net, u_int8_t af) | 1288 | void xt_proto_fini(struct net *net, u_int8_t af) |
1289 | { | 1289 | { |
1290 | #ifdef CONFIG_PROC_FS | 1290 | #ifdef CONFIG_PROC_FS |
1291 | char buf[XT_FUNCTION_MAXNAMELEN]; | 1291 | char buf[XT_FUNCTION_MAXNAMELEN]; |
1292 | 1292 | ||
1293 | strlcpy(buf, xt_prefix[af], sizeof(buf)); | 1293 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1294 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); | 1294 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
1295 | proc_net_remove(net, buf); | 1295 | proc_net_remove(net, buf); |
1296 | 1296 | ||
1297 | strlcpy(buf, xt_prefix[af], sizeof(buf)); | 1297 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1298 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); | 1298 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
1299 | proc_net_remove(net, buf); | 1299 | proc_net_remove(net, buf); |
1300 | 1300 | ||
1301 | strlcpy(buf, xt_prefix[af], sizeof(buf)); | 1301 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1302 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); | 1302 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
1303 | proc_net_remove(net, buf); | 1303 | proc_net_remove(net, buf); |
1304 | #endif /*CONFIG_PROC_FS*/ | 1304 | #endif /*CONFIG_PROC_FS*/ |
1305 | } | 1305 | } |
1306 | EXPORT_SYMBOL_GPL(xt_proto_fini); | 1306 | EXPORT_SYMBOL_GPL(xt_proto_fini); |
1307 | 1307 | ||
1308 | static int __net_init xt_net_init(struct net *net) | 1308 | static int __net_init xt_net_init(struct net *net) |
1309 | { | 1309 | { |
1310 | int i; | 1310 | int i; |
1311 | 1311 | ||
1312 | for (i = 0; i < NFPROTO_NUMPROTO; i++) | 1312 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
1313 | INIT_LIST_HEAD(&net->xt.tables[i]); | 1313 | INIT_LIST_HEAD(&net->xt.tables[i]); |
1314 | return 0; | 1314 | return 0; |
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | static struct pernet_operations xt_net_ops = { | 1317 | static struct pernet_operations xt_net_ops = { |
1318 | .init = xt_net_init, | 1318 | .init = xt_net_init, |
1319 | }; | 1319 | }; |
1320 | 1320 | ||
1321 | static int __init xt_init(void) | 1321 | static int __init xt_init(void) |
1322 | { | 1322 | { |
1323 | unsigned int i; | 1323 | unsigned int i; |
1324 | int rv; | 1324 | int rv; |
1325 | 1325 | ||
1326 | for_each_possible_cpu(i) { | 1326 | for_each_possible_cpu(i) { |
1327 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); | 1327 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); |
1328 | spin_lock_init(&lock->lock); | 1328 | |
1329 | seqlock_init(&lock->lock); | ||
1329 | lock->readers = 0; | 1330 | lock->readers = 0; |
1330 | } | 1331 | } |
1331 | 1332 | ||
1332 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); | 1333 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
1333 | if (!xt) | 1334 | if (!xt) |
1334 | return -ENOMEM; | 1335 | return -ENOMEM; |
1335 | 1336 | ||
1336 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { | 1337 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
1337 | mutex_init(&xt[i].mutex); | 1338 | mutex_init(&xt[i].mutex); |
1338 | #ifdef CONFIG_COMPAT | 1339 | #ifdef CONFIG_COMPAT |
1339 | mutex_init(&xt[i].compat_mutex); | 1340 | mutex_init(&xt[i].compat_mutex); |
1340 | xt[i].compat_offsets = NULL; | 1341 | xt[i].compat_offsets = NULL; |
1341 | #endif | 1342 | #endif |
1342 | INIT_LIST_HEAD(&xt[i].target); | 1343 | INIT_LIST_HEAD(&xt[i].target); |
1343 | INIT_LIST_HEAD(&xt[i].match); | 1344 | INIT_LIST_HEAD(&xt[i].match); |
1344 | } | 1345 | } |
1345 | rv = register_pernet_subsys(&xt_net_ops); | 1346 | rv = register_pernet_subsys(&xt_net_ops); |
1346 | if (rv < 0) | 1347 | if (rv < 0) |
1347 | kfree(xt); | 1348 | kfree(xt); |
1348 | return rv; | 1349 | return rv; |
1349 | } | 1350 | } |
1350 | 1351 | ||
1351 | static void __exit xt_fini(void) | 1352 | static void __exit xt_fini(void) |
1352 | { | 1353 | { |
1353 | unregister_pernet_subsys(&xt_net_ops); | 1354 | unregister_pernet_subsys(&xt_net_ops); |
1354 | kfree(xt); | 1355 | kfree(xt); |
1355 | } | 1356 | } |
1356 | 1357 | ||
1357 | module_init(xt_init); | 1358 | module_init(xt_init); |
1358 | module_exit(xt_fini); | 1359 | module_exit(xt_fini); |
1359 | 1360 | ||
1360 | 1361 |