Commit e3eaa9910b380530cfd2c0670fcd3f627674da8a

Authored by Jan Engelhardt
1 parent 2b95efe7f6

netfilter: xtables: generate initial table on-demand

The static initial tables are pretty large, and after the net
namespace has been instantiated, they just hang around for nothing.
This commit removes them and creates tables on-demand at runtime when
needed.

Size shrinks by 7735 bytes (x86_64).

Signed-off-by: Jan Engelhardt <jengelh@medozas.de>

Showing 18 changed files with 141 additions and 334 deletions Inline Diff

include/linux/netfilter_arp/arp_tables.h
1 /* 1 /*
2 * Format of an ARP firewall descriptor 2 * Format of an ARP firewall descriptor
3 * 3 *
4 * src, tgt, src_mask, tgt_mask, arpop, arpop_mask are always stored in 4 * src, tgt, src_mask, tgt_mask, arpop, arpop_mask are always stored in
5 * network byte order. 5 * network byte order.
6 * flags are stored in host byte order (of course). 6 * flags are stored in host byte order (of course).
7 */ 7 */
8 8
9 #ifndef _ARPTABLES_H 9 #ifndef _ARPTABLES_H
10 #define _ARPTABLES_H 10 #define _ARPTABLES_H
11 11
12 #ifdef __KERNEL__ 12 #ifdef __KERNEL__
13 #include <linux/if.h> 13 #include <linux/if.h>
14 #include <linux/in.h> 14 #include <linux/in.h>
15 #include <linux/if_arp.h> 15 #include <linux/if_arp.h>
16 #include <linux/skbuff.h> 16 #include <linux/skbuff.h>
17 #endif 17 #endif
18 #include <linux/types.h> 18 #include <linux/types.h>
19 #include <linux/compiler.h> 19 #include <linux/compiler.h>
20 #include <linux/netfilter_arp.h> 20 #include <linux/netfilter_arp.h>
21 21
22 #include <linux/netfilter/x_tables.h> 22 #include <linux/netfilter/x_tables.h>
23 23
24 #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 24 #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
25 #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 25 #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
26 26
27 #define ARPT_DEV_ADDR_LEN_MAX 16 27 #define ARPT_DEV_ADDR_LEN_MAX 16
28 28
29 struct arpt_devaddr_info { 29 struct arpt_devaddr_info {
30 char addr[ARPT_DEV_ADDR_LEN_MAX]; 30 char addr[ARPT_DEV_ADDR_LEN_MAX];
31 char mask[ARPT_DEV_ADDR_LEN_MAX]; 31 char mask[ARPT_DEV_ADDR_LEN_MAX];
32 }; 32 };
33 33
34 /* Yes, Virginia, you have to zero the padding. */ 34 /* Yes, Virginia, you have to zero the padding. */
35 struct arpt_arp { 35 struct arpt_arp {
36 /* Source and target IP addr */ 36 /* Source and target IP addr */
37 struct in_addr src, tgt; 37 struct in_addr src, tgt;
38 /* Mask for src and target IP addr */ 38 /* Mask for src and target IP addr */
39 struct in_addr smsk, tmsk; 39 struct in_addr smsk, tmsk;
40 40
41 /* Device hw address length, src+target device addresses */ 41 /* Device hw address length, src+target device addresses */
42 u_int8_t arhln, arhln_mask; 42 u_int8_t arhln, arhln_mask;
43 struct arpt_devaddr_info src_devaddr; 43 struct arpt_devaddr_info src_devaddr;
44 struct arpt_devaddr_info tgt_devaddr; 44 struct arpt_devaddr_info tgt_devaddr;
45 45
46 /* ARP operation code. */ 46 /* ARP operation code. */
47 __be16 arpop, arpop_mask; 47 __be16 arpop, arpop_mask;
48 48
49 /* ARP hardware address and protocol address format. */ 49 /* ARP hardware address and protocol address format. */
50 __be16 arhrd, arhrd_mask; 50 __be16 arhrd, arhrd_mask;
51 __be16 arpro, arpro_mask; 51 __be16 arpro, arpro_mask;
52 52
53 /* The protocol address length is only accepted if it is 4 53 /* The protocol address length is only accepted if it is 4
54 * so there is no use in offering a way to do filtering on it. 54 * so there is no use in offering a way to do filtering on it.
55 */ 55 */
56 56
57 char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 57 char iniface[IFNAMSIZ], outiface[IFNAMSIZ];
58 unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 58 unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ];
59 59
60 /* Flags word */ 60 /* Flags word */
61 u_int8_t flags; 61 u_int8_t flags;
62 /* Inverse flags */ 62 /* Inverse flags */
63 u_int16_t invflags; 63 u_int16_t invflags;
64 }; 64 };
65 65
66 #define arpt_entry_target xt_entry_target 66 #define arpt_entry_target xt_entry_target
67 #define arpt_standard_target xt_standard_target 67 #define arpt_standard_target xt_standard_target
68 68
69 /* Values for "flag" field in struct arpt_ip (general arp structure). 69 /* Values for "flag" field in struct arpt_ip (general arp structure).
70 * No flags defined yet. 70 * No flags defined yet.
71 */ 71 */
72 #define ARPT_F_MASK 0x00 /* All possible flag bits mask. */ 72 #define ARPT_F_MASK 0x00 /* All possible flag bits mask. */
73 73
74 /* Values for "inv" field in struct arpt_arp. */ 74 /* Values for "inv" field in struct arpt_arp. */
75 #define ARPT_INV_VIA_IN 0x0001 /* Invert the sense of IN IFACE. */ 75 #define ARPT_INV_VIA_IN 0x0001 /* Invert the sense of IN IFACE. */
76 #define ARPT_INV_VIA_OUT 0x0002 /* Invert the sense of OUT IFACE */ 76 #define ARPT_INV_VIA_OUT 0x0002 /* Invert the sense of OUT IFACE */
77 #define ARPT_INV_SRCIP 0x0004 /* Invert the sense of SRC IP. */ 77 #define ARPT_INV_SRCIP 0x0004 /* Invert the sense of SRC IP. */
78 #define ARPT_INV_TGTIP 0x0008 /* Invert the sense of TGT IP. */ 78 #define ARPT_INV_TGTIP 0x0008 /* Invert the sense of TGT IP. */
79 #define ARPT_INV_SRCDEVADDR 0x0010 /* Invert the sense of SRC DEV ADDR. */ 79 #define ARPT_INV_SRCDEVADDR 0x0010 /* Invert the sense of SRC DEV ADDR. */
80 #define ARPT_INV_TGTDEVADDR 0x0020 /* Invert the sense of TGT DEV ADDR. */ 80 #define ARPT_INV_TGTDEVADDR 0x0020 /* Invert the sense of TGT DEV ADDR. */
81 #define ARPT_INV_ARPOP 0x0040 /* Invert the sense of ARP OP. */ 81 #define ARPT_INV_ARPOP 0x0040 /* Invert the sense of ARP OP. */
82 #define ARPT_INV_ARPHRD 0x0080 /* Invert the sense of ARP HRD. */ 82 #define ARPT_INV_ARPHRD 0x0080 /* Invert the sense of ARP HRD. */
83 #define ARPT_INV_ARPPRO 0x0100 /* Invert the sense of ARP PRO. */ 83 #define ARPT_INV_ARPPRO 0x0100 /* Invert the sense of ARP PRO. */
84 #define ARPT_INV_ARPHLN 0x0200 /* Invert the sense of ARP HLN. */ 84 #define ARPT_INV_ARPHLN 0x0200 /* Invert the sense of ARP HLN. */
85 #define ARPT_INV_MASK 0x03FF /* All possible flag bits mask. */ 85 #define ARPT_INV_MASK 0x03FF /* All possible flag bits mask. */
86 86
87 /* This structure defines each of the firewall rules. Consists of 3 87 /* This structure defines each of the firewall rules. Consists of 3
88 parts which are 1) general ARP header stuff 2) match specific 88 parts which are 1) general ARP header stuff 2) match specific
89 stuff 3) the target to perform if the rule matches */ 89 stuff 3) the target to perform if the rule matches */
90 struct arpt_entry 90 struct arpt_entry
91 { 91 {
92 struct arpt_arp arp; 92 struct arpt_arp arp;
93 93
94 /* Size of arpt_entry + matches */ 94 /* Size of arpt_entry + matches */
95 u_int16_t target_offset; 95 u_int16_t target_offset;
96 /* Size of arpt_entry + matches + target */ 96 /* Size of arpt_entry + matches + target */
97 u_int16_t next_offset; 97 u_int16_t next_offset;
98 98
99 /* Back pointer */ 99 /* Back pointer */
100 unsigned int comefrom; 100 unsigned int comefrom;
101 101
102 /* Packet and byte counters. */ 102 /* Packet and byte counters. */
103 struct xt_counters counters; 103 struct xt_counters counters;
104 104
105 /* The matches (if any), then the target. */ 105 /* The matches (if any), then the target. */
106 unsigned char elems[0]; 106 unsigned char elems[0];
107 }; 107 };
108 108
109 /* 109 /*
110 * New IP firewall options for [gs]etsockopt at the RAW IP level. 110 * New IP firewall options for [gs]etsockopt at the RAW IP level.
111 * Unlike BSD Linux inherits IP options so you don't have to use a raw 111 * Unlike BSD Linux inherits IP options so you don't have to use a raw
112 * socket for this. Instead we check rights in the calls. 112 * socket for this. Instead we check rights in the calls.
113 * 113 *
114 * ATTENTION: check linux/in.h before adding new number here. 114 * ATTENTION: check linux/in.h before adding new number here.
115 */ 115 */
116 #define ARPT_BASE_CTL 96 116 #define ARPT_BASE_CTL 96
117 117
118 #define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL) 118 #define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL)
119 #define ARPT_SO_SET_ADD_COUNTERS (ARPT_BASE_CTL + 1) 119 #define ARPT_SO_SET_ADD_COUNTERS (ARPT_BASE_CTL + 1)
120 #define ARPT_SO_SET_MAX ARPT_SO_SET_ADD_COUNTERS 120 #define ARPT_SO_SET_MAX ARPT_SO_SET_ADD_COUNTERS
121 121
122 #define ARPT_SO_GET_INFO (ARPT_BASE_CTL) 122 #define ARPT_SO_GET_INFO (ARPT_BASE_CTL)
123 #define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1) 123 #define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1)
124 /* #define ARPT_SO_GET_REVISION_MATCH (APRT_BASE_CTL + 2) */ 124 /* #define ARPT_SO_GET_REVISION_MATCH (APRT_BASE_CTL + 2) */
125 #define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3) 125 #define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3)
126 #define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET) 126 #define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET)
127 127
128 /* CONTINUE verdict for targets */ 128 /* CONTINUE verdict for targets */
129 #define ARPT_CONTINUE XT_CONTINUE 129 #define ARPT_CONTINUE XT_CONTINUE
130 130
131 /* For standard target */ 131 /* For standard target */
132 #define ARPT_RETURN XT_RETURN 132 #define ARPT_RETURN XT_RETURN
133 133
134 /* The argument to ARPT_SO_GET_INFO */ 134 /* The argument to ARPT_SO_GET_INFO */
135 struct arpt_getinfo { 135 struct arpt_getinfo {
136 /* Which table: caller fills this in. */ 136 /* Which table: caller fills this in. */
137 char name[ARPT_TABLE_MAXNAMELEN]; 137 char name[ARPT_TABLE_MAXNAMELEN];
138 138
139 /* Kernel fills these in. */ 139 /* Kernel fills these in. */
140 /* Which hook entry points are valid: bitmask */ 140 /* Which hook entry points are valid: bitmask */
141 unsigned int valid_hooks; 141 unsigned int valid_hooks;
142 142
143 /* Hook entry points: one per netfilter hook. */ 143 /* Hook entry points: one per netfilter hook. */
144 unsigned int hook_entry[NF_ARP_NUMHOOKS]; 144 unsigned int hook_entry[NF_ARP_NUMHOOKS];
145 145
146 /* Underflow points. */ 146 /* Underflow points. */
147 unsigned int underflow[NF_ARP_NUMHOOKS]; 147 unsigned int underflow[NF_ARP_NUMHOOKS];
148 148
149 /* Number of entries */ 149 /* Number of entries */
150 unsigned int num_entries; 150 unsigned int num_entries;
151 151
152 /* Size of entries. */ 152 /* Size of entries. */
153 unsigned int size; 153 unsigned int size;
154 }; 154 };
155 155
156 /* The argument to ARPT_SO_SET_REPLACE. */ 156 /* The argument to ARPT_SO_SET_REPLACE. */
157 struct arpt_replace { 157 struct arpt_replace {
158 /* Which table. */ 158 /* Which table. */
159 char name[ARPT_TABLE_MAXNAMELEN]; 159 char name[ARPT_TABLE_MAXNAMELEN];
160 160
161 /* Which hook entry points are valid: bitmask. You can't 161 /* Which hook entry points are valid: bitmask. You can't
162 change this. */ 162 change this. */
163 unsigned int valid_hooks; 163 unsigned int valid_hooks;
164 164
165 /* Number of entries */ 165 /* Number of entries */
166 unsigned int num_entries; 166 unsigned int num_entries;
167 167
168 /* Total size of new entries */ 168 /* Total size of new entries */
169 unsigned int size; 169 unsigned int size;
170 170
171 /* Hook entry points. */ 171 /* Hook entry points. */
172 unsigned int hook_entry[NF_ARP_NUMHOOKS]; 172 unsigned int hook_entry[NF_ARP_NUMHOOKS];
173 173
174 /* Underflow points. */ 174 /* Underflow points. */
175 unsigned int underflow[NF_ARP_NUMHOOKS]; 175 unsigned int underflow[NF_ARP_NUMHOOKS];
176 176
177 /* Information about old entries: */ 177 /* Information about old entries: */
178 /* Number of counters (must be equal to current number of entries). */ 178 /* Number of counters (must be equal to current number of entries). */
179 unsigned int num_counters; 179 unsigned int num_counters;
180 /* The old entries' counters. */ 180 /* The old entries' counters. */
181 struct xt_counters __user *counters; 181 struct xt_counters __user *counters;
182 182
183 /* The entries (hang off end: not really an array). */ 183 /* The entries (hang off end: not really an array). */
184 struct arpt_entry entries[0]; 184 struct arpt_entry entries[0];
185 }; 185 };
186 186
187 /* The argument to ARPT_SO_ADD_COUNTERS. */ 187 /* The argument to ARPT_SO_ADD_COUNTERS. */
188 #define arpt_counters_info xt_counters_info 188 #define arpt_counters_info xt_counters_info
189 #define arpt_counters xt_counters 189 #define arpt_counters xt_counters
190 190
191 /* The argument to ARPT_SO_GET_ENTRIES. */ 191 /* The argument to ARPT_SO_GET_ENTRIES. */
192 struct arpt_get_entries { 192 struct arpt_get_entries {
193 /* Which table: user fills this in. */ 193 /* Which table: user fills this in. */
194 char name[ARPT_TABLE_MAXNAMELEN]; 194 char name[ARPT_TABLE_MAXNAMELEN];
195 195
196 /* User fills this in: total entry size. */ 196 /* User fills this in: total entry size. */
197 unsigned int size; 197 unsigned int size;
198 198
199 /* The entries. */ 199 /* The entries. */
200 struct arpt_entry entrytable[0]; 200 struct arpt_entry entrytable[0];
201 }; 201 };
202 202
203 /* Standard return verdict, or do jump. */ 203 /* Standard return verdict, or do jump. */
204 #define ARPT_STANDARD_TARGET XT_STANDARD_TARGET 204 #define ARPT_STANDARD_TARGET XT_STANDARD_TARGET
205 /* Error verdict. */ 205 /* Error verdict. */
206 #define ARPT_ERROR_TARGET XT_ERROR_TARGET 206 #define ARPT_ERROR_TARGET XT_ERROR_TARGET
207 207
208 /* Helper functions */ 208 /* Helper functions */
209 static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e) 209 static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e)
210 { 210 {
211 return (void *)e + e->target_offset; 211 return (void *)e + e->target_offset;
212 } 212 }
213 213
214 /* fn returns 0 to continue iteration */ 214 /* fn returns 0 to continue iteration */
215 #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ 215 #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \
216 XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) 216 XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args)
217 217
218 /* 218 /*
219 * Main firewall chains definitions and global var's definitions. 219 * Main firewall chains definitions and global var's definitions.
220 */ 220 */
221 #ifdef __KERNEL__ 221 #ifdef __KERNEL__
222 222
223 /* Standard entry. */ 223 /* Standard entry. */
224 struct arpt_standard { 224 struct arpt_standard {
225 struct arpt_entry entry; 225 struct arpt_entry entry;
226 struct arpt_standard_target target; 226 struct arpt_standard_target target;
227 }; 227 };
228 228
229 struct arpt_error_target { 229 struct arpt_error_target {
230 struct arpt_entry_target target; 230 struct arpt_entry_target target;
231 char errorname[ARPT_FUNCTION_MAXNAMELEN]; 231 char errorname[ARPT_FUNCTION_MAXNAMELEN];
232 }; 232 };
233 233
234 struct arpt_error { 234 struct arpt_error {
235 struct arpt_entry entry; 235 struct arpt_entry entry;
236 struct arpt_error_target target; 236 struct arpt_error_target target;
237 }; 237 };
238 238
239 #define ARPT_ENTRY_INIT(__size) \ 239 #define ARPT_ENTRY_INIT(__size) \
240 { \ 240 { \
241 .target_offset = sizeof(struct arpt_entry), \ 241 .target_offset = sizeof(struct arpt_entry), \
242 .next_offset = (__size), \ 242 .next_offset = (__size), \
243 } 243 }
244 244
245 #define ARPT_STANDARD_INIT(__verdict) \ 245 #define ARPT_STANDARD_INIT(__verdict) \
246 { \ 246 { \
247 .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ 247 .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \
248 .target = XT_TARGET_INIT(ARPT_STANDARD_TARGET, \ 248 .target = XT_TARGET_INIT(ARPT_STANDARD_TARGET, \
249 sizeof(struct arpt_standard_target)), \ 249 sizeof(struct arpt_standard_target)), \
250 .target.verdict = -(__verdict) - 1, \ 250 .target.verdict = -(__verdict) - 1, \
251 } 251 }
252 252
253 #define ARPT_ERROR_INIT \ 253 #define ARPT_ERROR_INIT \
254 { \ 254 { \
255 .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ 255 .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \
256 .target = XT_TARGET_INIT(ARPT_ERROR_TARGET, \ 256 .target = XT_TARGET_INIT(ARPT_ERROR_TARGET, \
257 sizeof(struct arpt_error_target)), \ 257 sizeof(struct arpt_error_target)), \
258 .target.errorname = "ERROR", \ 258 .target.errorname = "ERROR", \
259 } 259 }
260 260
261 extern void *arpt_alloc_initial_table(const struct xt_table *);
261 extern struct xt_table *arpt_register_table(struct net *net, 262 extern struct xt_table *arpt_register_table(struct net *net,
262 const struct xt_table *table, 263 const struct xt_table *table,
263 const struct arpt_replace *repl); 264 const struct arpt_replace *repl);
264 extern void arpt_unregister_table(struct xt_table *table); 265 extern void arpt_unregister_table(struct xt_table *table);
265 extern unsigned int arpt_do_table(struct sk_buff *skb, 266 extern unsigned int arpt_do_table(struct sk_buff *skb,
266 unsigned int hook, 267 unsigned int hook,
267 const struct net_device *in, 268 const struct net_device *in,
268 const struct net_device *out, 269 const struct net_device *out,
269 struct xt_table *table); 270 struct xt_table *table);
270 271
271 #define ARPT_ALIGN(s) XT_ALIGN(s) 272 #define ARPT_ALIGN(s) XT_ALIGN(s)
272 273
273 #ifdef CONFIG_COMPAT 274 #ifdef CONFIG_COMPAT
274 #include <net/compat.h> 275 #include <net/compat.h>
275 276
276 struct compat_arpt_entry { 277 struct compat_arpt_entry {
277 struct arpt_arp arp; 278 struct arpt_arp arp;
278 u_int16_t target_offset; 279 u_int16_t target_offset;
279 u_int16_t next_offset; 280 u_int16_t next_offset;
280 compat_uint_t comefrom; 281 compat_uint_t comefrom;
281 struct compat_xt_counters counters; 282 struct compat_xt_counters counters;
282 unsigned char elems[0]; 283 unsigned char elems[0];
283 }; 284 };
284 285
285 static inline struct arpt_entry_target * 286 static inline struct arpt_entry_target *
286 compat_arpt_get_target(struct compat_arpt_entry *e) 287 compat_arpt_get_target(struct compat_arpt_entry *e)
287 { 288 {
288 return (void *)e + e->target_offset; 289 return (void *)e + e->target_offset;
289 } 290 }
290 291
291 #define COMPAT_ARPT_ALIGN(s) COMPAT_XT_ALIGN(s) 292 #define COMPAT_ARPT_ALIGN(s) COMPAT_XT_ALIGN(s)
292 293
293 /* fn returns 0 to continue iteration */ 294 /* fn returns 0 to continue iteration */
294 #define COMPAT_ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ 295 #define COMPAT_ARPT_ENTRY_ITERATE(entries, size, fn, args...) \
295 XT_ENTRY_ITERATE(struct compat_arpt_entry, entries, size, fn, ## args) 296 XT_ENTRY_ITERATE(struct compat_arpt_entry, entries, size, fn, ## args)
296 297
297 #define COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \ 298 #define COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \
298 XT_ENTRY_ITERATE_CONTINUE(struct compat_arpt_entry, entries, size, n, \ 299 XT_ENTRY_ITERATE_CONTINUE(struct compat_arpt_entry, entries, size, n, \
299 fn, ## args) 300 fn, ## args)
300 301
301 #endif /* CONFIG_COMPAT */ 302 #endif /* CONFIG_COMPAT */
302 #endif /*__KERNEL__*/ 303 #endif /*__KERNEL__*/
303 #endif /* _ARPTABLES_H */ 304 #endif /* _ARPTABLES_H */
304 305
include/linux/netfilter_ipv4/ip_tables.h
1 /* 1 /*
2 * 25-Jul-1998 Major changes to allow for ip chain table 2 * 25-Jul-1998 Major changes to allow for ip chain table
3 * 3 *
4 * 3-Jan-2000 Named tables to allow packet selection for different uses. 4 * 3-Jan-2000 Named tables to allow packet selection for different uses.
5 */ 5 */
6 6
7 /* 7 /*
8 * Format of an IP firewall descriptor 8 * Format of an IP firewall descriptor
9 * 9 *
10 * src, dst, src_mask, dst_mask are always stored in network byte order. 10 * src, dst, src_mask, dst_mask are always stored in network byte order.
11 * flags are stored in host byte order (of course). 11 * flags are stored in host byte order (of course).
12 * Port numbers are stored in HOST byte order. 12 * Port numbers are stored in HOST byte order.
13 */ 13 */
14 14
15 #ifndef _IPTABLES_H 15 #ifndef _IPTABLES_H
16 #define _IPTABLES_H 16 #define _IPTABLES_H
17 17
18 #ifdef __KERNEL__ 18 #ifdef __KERNEL__
19 #include <linux/if.h> 19 #include <linux/if.h>
20 #include <linux/in.h> 20 #include <linux/in.h>
21 #include <linux/ip.h> 21 #include <linux/ip.h>
22 #include <linux/skbuff.h> 22 #include <linux/skbuff.h>
23 #endif 23 #endif
24 #include <linux/types.h> 24 #include <linux/types.h>
25 #include <linux/compiler.h> 25 #include <linux/compiler.h>
26 #include <linux/netfilter_ipv4.h> 26 #include <linux/netfilter_ipv4.h>
27 27
28 #include <linux/netfilter/x_tables.h> 28 #include <linux/netfilter/x_tables.h>
29 29
30 #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 30 #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
31 #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 31 #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
32 #define ipt_match xt_match 32 #define ipt_match xt_match
33 #define ipt_target xt_target 33 #define ipt_target xt_target
34 #define ipt_table xt_table 34 #define ipt_table xt_table
35 #define ipt_get_revision xt_get_revision 35 #define ipt_get_revision xt_get_revision
36 36
37 /* Yes, Virginia, you have to zero the padding. */ 37 /* Yes, Virginia, you have to zero the padding. */
38 struct ipt_ip { 38 struct ipt_ip {
39 /* Source and destination IP addr */ 39 /* Source and destination IP addr */
40 struct in_addr src, dst; 40 struct in_addr src, dst;
41 /* Mask for src and dest IP addr */ 41 /* Mask for src and dest IP addr */
42 struct in_addr smsk, dmsk; 42 struct in_addr smsk, dmsk;
43 char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 43 char iniface[IFNAMSIZ], outiface[IFNAMSIZ];
44 unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 44 unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ];
45 45
46 /* Protocol, 0 = ANY */ 46 /* Protocol, 0 = ANY */
47 u_int16_t proto; 47 u_int16_t proto;
48 48
49 /* Flags word */ 49 /* Flags word */
50 u_int8_t flags; 50 u_int8_t flags;
51 /* Inverse flags */ 51 /* Inverse flags */
52 u_int8_t invflags; 52 u_int8_t invflags;
53 }; 53 };
54 54
55 #define ipt_entry_match xt_entry_match 55 #define ipt_entry_match xt_entry_match
56 #define ipt_entry_target xt_entry_target 56 #define ipt_entry_target xt_entry_target
57 #define ipt_standard_target xt_standard_target 57 #define ipt_standard_target xt_standard_target
58 58
59 #define ipt_counters xt_counters 59 #define ipt_counters xt_counters
60 60
61 /* Values for "flag" field in struct ipt_ip (general ip structure). */ 61 /* Values for "flag" field in struct ipt_ip (general ip structure). */
62 #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ 62 #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
63 #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ 63 #define IPT_F_GOTO 0x02 /* Set if jump is a goto */
64 #define IPT_F_MASK 0x03 /* All possible flag bits mask. */ 64 #define IPT_F_MASK 0x03 /* All possible flag bits mask. */
65 65
66 /* Values for "inv" field in struct ipt_ip. */ 66 /* Values for "inv" field in struct ipt_ip. */
67 #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ 67 #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
68 #define IPT_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ 68 #define IPT_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */
69 #define IPT_INV_TOS 0x04 /* Invert the sense of TOS. */ 69 #define IPT_INV_TOS 0x04 /* Invert the sense of TOS. */
70 #define IPT_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ 70 #define IPT_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */
71 #define IPT_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ 71 #define IPT_INV_DSTIP 0x10 /* Invert the sense of DST OP. */
72 #define IPT_INV_FRAG 0x20 /* Invert the sense of FRAG. */ 72 #define IPT_INV_FRAG 0x20 /* Invert the sense of FRAG. */
73 #define IPT_INV_PROTO XT_INV_PROTO 73 #define IPT_INV_PROTO XT_INV_PROTO
74 #define IPT_INV_MASK 0x7F /* All possible flag bits mask. */ 74 #define IPT_INV_MASK 0x7F /* All possible flag bits mask. */
75 75
76 /* This structure defines each of the firewall rules. Consists of 3 76 /* This structure defines each of the firewall rules. Consists of 3
77 parts which are 1) general IP header stuff 2) match specific 77 parts which are 1) general IP header stuff 2) match specific
78 stuff 3) the target to perform if the rule matches */ 78 stuff 3) the target to perform if the rule matches */
79 struct ipt_entry { 79 struct ipt_entry {
80 struct ipt_ip ip; 80 struct ipt_ip ip;
81 81
82 /* Mark with fields that we care about. */ 82 /* Mark with fields that we care about. */
83 unsigned int nfcache; 83 unsigned int nfcache;
84 84
85 /* Size of ipt_entry + matches */ 85 /* Size of ipt_entry + matches */
86 u_int16_t target_offset; 86 u_int16_t target_offset;
87 /* Size of ipt_entry + matches + target */ 87 /* Size of ipt_entry + matches + target */
88 u_int16_t next_offset; 88 u_int16_t next_offset;
89 89
90 /* Back pointer */ 90 /* Back pointer */
91 unsigned int comefrom; 91 unsigned int comefrom;
92 92
93 /* Packet and byte counters. */ 93 /* Packet and byte counters. */
94 struct xt_counters counters; 94 struct xt_counters counters;
95 95
96 /* The matches (if any), then the target. */ 96 /* The matches (if any), then the target. */
97 unsigned char elems[0]; 97 unsigned char elems[0];
98 }; 98 };
99 99
100 /* 100 /*
101 * New IP firewall options for [gs]etsockopt at the RAW IP level. 101 * New IP firewall options for [gs]etsockopt at the RAW IP level.
102 * Unlike BSD Linux inherits IP options so you don't have to use a raw 102 * Unlike BSD Linux inherits IP options so you don't have to use a raw
103 * socket for this. Instead we check rights in the calls. 103 * socket for this. Instead we check rights in the calls.
104 * 104 *
105 * ATTENTION: check linux/in.h before adding new number here. 105 * ATTENTION: check linux/in.h before adding new number here.
106 */ 106 */
107 #define IPT_BASE_CTL 64 107 #define IPT_BASE_CTL 64
108 108
109 #define IPT_SO_SET_REPLACE (IPT_BASE_CTL) 109 #define IPT_SO_SET_REPLACE (IPT_BASE_CTL)
110 #define IPT_SO_SET_ADD_COUNTERS (IPT_BASE_CTL + 1) 110 #define IPT_SO_SET_ADD_COUNTERS (IPT_BASE_CTL + 1)
111 #define IPT_SO_SET_MAX IPT_SO_SET_ADD_COUNTERS 111 #define IPT_SO_SET_MAX IPT_SO_SET_ADD_COUNTERS
112 112
113 #define IPT_SO_GET_INFO (IPT_BASE_CTL) 113 #define IPT_SO_GET_INFO (IPT_BASE_CTL)
114 #define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1) 114 #define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1)
115 #define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2) 115 #define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2)
116 #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) 116 #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3)
117 #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET 117 #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET
118 118
119 #define IPT_CONTINUE XT_CONTINUE 119 #define IPT_CONTINUE XT_CONTINUE
120 #define IPT_RETURN XT_RETURN 120 #define IPT_RETURN XT_RETURN
121 121
122 #include <linux/netfilter/xt_tcpudp.h> 122 #include <linux/netfilter/xt_tcpudp.h>
123 #define ipt_udp xt_udp 123 #define ipt_udp xt_udp
124 #define ipt_tcp xt_tcp 124 #define ipt_tcp xt_tcp
125 125
126 #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT 126 #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT
127 #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT 127 #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT
128 #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS 128 #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS
129 #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION 129 #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION
130 #define IPT_TCP_INV_MASK XT_TCP_INV_MASK 130 #define IPT_TCP_INV_MASK XT_TCP_INV_MASK
131 131
132 #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT 132 #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT
133 #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT 133 #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT
134 #define IPT_UDP_INV_MASK XT_UDP_INV_MASK 134 #define IPT_UDP_INV_MASK XT_UDP_INV_MASK
135 135
136 /* ICMP matching stuff */ 136 /* ICMP matching stuff */
137 struct ipt_icmp { 137 struct ipt_icmp {
138 u_int8_t type; /* type to match */ 138 u_int8_t type; /* type to match */
139 u_int8_t code[2]; /* range of code */ 139 u_int8_t code[2]; /* range of code */
140 u_int8_t invflags; /* Inverse flags */ 140 u_int8_t invflags; /* Inverse flags */
141 }; 141 };
142 142
143 /* Values for "inv" field for struct ipt_icmp. */ 143 /* Values for "inv" field for struct ipt_icmp. */
144 #define IPT_ICMP_INV 0x01 /* Invert the sense of type/code test */ 144 #define IPT_ICMP_INV 0x01 /* Invert the sense of type/code test */
145 145
146 /* The argument to IPT_SO_GET_INFO */ 146 /* The argument to IPT_SO_GET_INFO */
147 struct ipt_getinfo { 147 struct ipt_getinfo {
148 /* Which table: caller fills this in. */ 148 /* Which table: caller fills this in. */
149 char name[IPT_TABLE_MAXNAMELEN]; 149 char name[IPT_TABLE_MAXNAMELEN];
150 150
151 /* Kernel fills these in. */ 151 /* Kernel fills these in. */
152 /* Which hook entry points are valid: bitmask */ 152 /* Which hook entry points are valid: bitmask */
153 unsigned int valid_hooks; 153 unsigned int valid_hooks;
154 154
155 /* Hook entry points: one per netfilter hook. */ 155 /* Hook entry points: one per netfilter hook. */
156 unsigned int hook_entry[NF_INET_NUMHOOKS]; 156 unsigned int hook_entry[NF_INET_NUMHOOKS];
157 157
158 /* Underflow points. */ 158 /* Underflow points. */
159 unsigned int underflow[NF_INET_NUMHOOKS]; 159 unsigned int underflow[NF_INET_NUMHOOKS];
160 160
161 /* Number of entries */ 161 /* Number of entries */
162 unsigned int num_entries; 162 unsigned int num_entries;
163 163
164 /* Size of entries. */ 164 /* Size of entries. */
165 unsigned int size; 165 unsigned int size;
166 }; 166 };
167 167
168 /* The argument to IPT_SO_SET_REPLACE. */ 168 /* The argument to IPT_SO_SET_REPLACE. */
169 struct ipt_replace { 169 struct ipt_replace {
170 /* Which table. */ 170 /* Which table. */
171 char name[IPT_TABLE_MAXNAMELEN]; 171 char name[IPT_TABLE_MAXNAMELEN];
172 172
173 /* Which hook entry points are valid: bitmask. You can't 173 /* Which hook entry points are valid: bitmask. You can't
174 change this. */ 174 change this. */
175 unsigned int valid_hooks; 175 unsigned int valid_hooks;
176 176
177 /* Number of entries */ 177 /* Number of entries */
178 unsigned int num_entries; 178 unsigned int num_entries;
179 179
180 /* Total size of new entries */ 180 /* Total size of new entries */
181 unsigned int size; 181 unsigned int size;
182 182
183 /* Hook entry points. */ 183 /* Hook entry points. */
184 unsigned int hook_entry[NF_INET_NUMHOOKS]; 184 unsigned int hook_entry[NF_INET_NUMHOOKS];
185 185
186 /* Underflow points. */ 186 /* Underflow points. */
187 unsigned int underflow[NF_INET_NUMHOOKS]; 187 unsigned int underflow[NF_INET_NUMHOOKS];
188 188
189 /* Information about old entries: */ 189 /* Information about old entries: */
190 /* Number of counters (must be equal to current number of entries). */ 190 /* Number of counters (must be equal to current number of entries). */
191 unsigned int num_counters; 191 unsigned int num_counters;
192 /* The old entries' counters. */ 192 /* The old entries' counters. */
193 struct xt_counters __user *counters; 193 struct xt_counters __user *counters;
194 194
195 /* The entries (hang off end: not really an array). */ 195 /* The entries (hang off end: not really an array). */
196 struct ipt_entry entries[0]; 196 struct ipt_entry entries[0];
197 }; 197 };
198 198
199 /* The argument to IPT_SO_ADD_COUNTERS. */ 199 /* The argument to IPT_SO_ADD_COUNTERS. */
200 #define ipt_counters_info xt_counters_info 200 #define ipt_counters_info xt_counters_info
201 201
202 /* The argument to IPT_SO_GET_ENTRIES. */ 202 /* The argument to IPT_SO_GET_ENTRIES. */
203 struct ipt_get_entries { 203 struct ipt_get_entries {
204 /* Which table: user fills this in. */ 204 /* Which table: user fills this in. */
205 char name[IPT_TABLE_MAXNAMELEN]; 205 char name[IPT_TABLE_MAXNAMELEN];
206 206
207 /* User fills this in: total entry size. */ 207 /* User fills this in: total entry size. */
208 unsigned int size; 208 unsigned int size;
209 209
210 /* The entries. */ 210 /* The entries. */
211 struct ipt_entry entrytable[0]; 211 struct ipt_entry entrytable[0];
212 }; 212 };
213 213
214 /* Standard return verdict, or do jump. */ 214 /* Standard return verdict, or do jump. */
215 #define IPT_STANDARD_TARGET XT_STANDARD_TARGET 215 #define IPT_STANDARD_TARGET XT_STANDARD_TARGET
216 /* Error verdict. */ 216 /* Error verdict. */
217 #define IPT_ERROR_TARGET XT_ERROR_TARGET 217 #define IPT_ERROR_TARGET XT_ERROR_TARGET
218 218
219 /* Helper functions */ 219 /* Helper functions */
220 static __inline__ struct ipt_entry_target * 220 static __inline__ struct ipt_entry_target *
221 ipt_get_target(struct ipt_entry *e) 221 ipt_get_target(struct ipt_entry *e)
222 { 222 {
223 return (void *)e + e->target_offset; 223 return (void *)e + e->target_offset;
224 } 224 }
225 225
226 /* fn returns 0 to continue iteration */ 226 /* fn returns 0 to continue iteration */
227 #define IPT_MATCH_ITERATE(e, fn, args...) \ 227 #define IPT_MATCH_ITERATE(e, fn, args...) \
228 XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) 228 XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args)
229 229
230 /* fn returns 0 to continue iteration */ 230 /* fn returns 0 to continue iteration */
231 #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ 231 #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \
232 XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) 232 XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args)
233 233
234 /* 234 /*
235 * Main firewall chains definitions and global var's definitions. 235 * Main firewall chains definitions and global var's definitions.
236 */ 236 */
237 #ifdef __KERNEL__ 237 #ifdef __KERNEL__
238 238
239 #include <linux/init.h> 239 #include <linux/init.h>
240 extern void ipt_init(void) __init; 240 extern void ipt_init(void) __init;
241 241
242 extern struct xt_table *ipt_register_table(struct net *net, 242 extern struct xt_table *ipt_register_table(struct net *net,
243 const struct xt_table *table, 243 const struct xt_table *table,
244 const struct ipt_replace *repl); 244 const struct ipt_replace *repl);
245 extern void ipt_unregister_table(struct net *net, struct xt_table *table); 245 extern void ipt_unregister_table(struct net *net, struct xt_table *table);
246 246
247 /* Standard entry. */ 247 /* Standard entry. */
248 struct ipt_standard { 248 struct ipt_standard {
249 struct ipt_entry entry; 249 struct ipt_entry entry;
250 struct ipt_standard_target target; 250 struct ipt_standard_target target;
251 }; 251 };
252 252
253 struct ipt_error_target { 253 struct ipt_error_target {
254 struct ipt_entry_target target; 254 struct ipt_entry_target target;
255 char errorname[IPT_FUNCTION_MAXNAMELEN]; 255 char errorname[IPT_FUNCTION_MAXNAMELEN];
256 }; 256 };
257 257
258 struct ipt_error { 258 struct ipt_error {
259 struct ipt_entry entry; 259 struct ipt_entry entry;
260 struct ipt_error_target target; 260 struct ipt_error_target target;
261 }; 261 };
262 262
263 #define IPT_ENTRY_INIT(__size) \ 263 #define IPT_ENTRY_INIT(__size) \
264 { \ 264 { \
265 .target_offset = sizeof(struct ipt_entry), \ 265 .target_offset = sizeof(struct ipt_entry), \
266 .next_offset = (__size), \ 266 .next_offset = (__size), \
267 } 267 }
268 268
269 #define IPT_STANDARD_INIT(__verdict) \ 269 #define IPT_STANDARD_INIT(__verdict) \
270 { \ 270 { \
271 .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ 271 .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \
272 .target = XT_TARGET_INIT(IPT_STANDARD_TARGET, \ 272 .target = XT_TARGET_INIT(IPT_STANDARD_TARGET, \
273 sizeof(struct xt_standard_target)), \ 273 sizeof(struct xt_standard_target)), \
274 .target.verdict = -(__verdict) - 1, \ 274 .target.verdict = -(__verdict) - 1, \
275 } 275 }
276 276
277 #define IPT_ERROR_INIT \ 277 #define IPT_ERROR_INIT \
278 { \ 278 { \
279 .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ 279 .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \
280 .target = XT_TARGET_INIT(IPT_ERROR_TARGET, \ 280 .target = XT_TARGET_INIT(IPT_ERROR_TARGET, \
281 sizeof(struct ipt_error_target)), \ 281 sizeof(struct ipt_error_target)), \
282 .target.errorname = "ERROR", \ 282 .target.errorname = "ERROR", \
283 } 283 }
284 284
285 extern void *ipt_alloc_initial_table(const struct xt_table *);
285 extern unsigned int ipt_do_table(struct sk_buff *skb, 286 extern unsigned int ipt_do_table(struct sk_buff *skb,
286 unsigned int hook, 287 unsigned int hook,
287 const struct net_device *in, 288 const struct net_device *in,
288 const struct net_device *out, 289 const struct net_device *out,
289 struct xt_table *table); 290 struct xt_table *table);
290 291
291 #define IPT_ALIGN(s) XT_ALIGN(s) 292 #define IPT_ALIGN(s) XT_ALIGN(s)
292 293
293 #ifdef CONFIG_COMPAT 294 #ifdef CONFIG_COMPAT
294 #include <net/compat.h> 295 #include <net/compat.h>
295 296
296 struct compat_ipt_entry { 297 struct compat_ipt_entry {
297 struct ipt_ip ip; 298 struct ipt_ip ip;
298 compat_uint_t nfcache; 299 compat_uint_t nfcache;
299 u_int16_t target_offset; 300 u_int16_t target_offset;
300 u_int16_t next_offset; 301 u_int16_t next_offset;
301 compat_uint_t comefrom; 302 compat_uint_t comefrom;
302 struct compat_xt_counters counters; 303 struct compat_xt_counters counters;
303 unsigned char elems[0]; 304 unsigned char elems[0];
304 }; 305 };
305 306
306 /* Helper functions */ 307 /* Helper functions */
307 static inline struct ipt_entry_target * 308 static inline struct ipt_entry_target *
308 compat_ipt_get_target(struct compat_ipt_entry *e) 309 compat_ipt_get_target(struct compat_ipt_entry *e)
309 { 310 {
310 return (void *)e + e->target_offset; 311 return (void *)e + e->target_offset;
311 } 312 }
312 313
313 #define COMPAT_IPT_ALIGN(s) COMPAT_XT_ALIGN(s) 314 #define COMPAT_IPT_ALIGN(s) COMPAT_XT_ALIGN(s)
314 315
315 /* fn returns 0 to continue iteration */ 316 /* fn returns 0 to continue iteration */
316 #define COMPAT_IPT_MATCH_ITERATE(e, fn, args...) \ 317 #define COMPAT_IPT_MATCH_ITERATE(e, fn, args...) \
317 XT_MATCH_ITERATE(struct compat_ipt_entry, e, fn, ## args) 318 XT_MATCH_ITERATE(struct compat_ipt_entry, e, fn, ## args)
318 319
319 /* fn returns 0 to continue iteration */ 320 /* fn returns 0 to continue iteration */
320 #define COMPAT_IPT_ENTRY_ITERATE(entries, size, fn, args...) \ 321 #define COMPAT_IPT_ENTRY_ITERATE(entries, size, fn, args...) \
321 XT_ENTRY_ITERATE(struct compat_ipt_entry, entries, size, fn, ## args) 322 XT_ENTRY_ITERATE(struct compat_ipt_entry, entries, size, fn, ## args)
322 323
323 /* fn returns 0 to continue iteration */ 324 /* fn returns 0 to continue iteration */
324 #define COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \ 325 #define COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \
325 XT_ENTRY_ITERATE_CONTINUE(struct compat_ipt_entry, entries, size, n, \ 326 XT_ENTRY_ITERATE_CONTINUE(struct compat_ipt_entry, entries, size, n, \
326 fn, ## args) 327 fn, ## args)
327 328
328 #endif /* CONFIG_COMPAT */ 329 #endif /* CONFIG_COMPAT */
329 #endif /*__KERNEL__*/ 330 #endif /*__KERNEL__*/
330 #endif /* _IPTABLES_H */ 331 #endif /* _IPTABLES_H */
331 332
include/linux/netfilter_ipv6/ip6_tables.h
1 /* 1 /*
2 * 25-Jul-1998 Major changes to allow for ip chain table 2 * 25-Jul-1998 Major changes to allow for ip chain table
3 * 3 *
4 * 3-Jan-2000 Named tables to allow packet selection for different uses. 4 * 3-Jan-2000 Named tables to allow packet selection for different uses.
5 */ 5 */
6 6
7 /* 7 /*
8 * Format of an IP6 firewall descriptor 8 * Format of an IP6 firewall descriptor
9 * 9 *
10 * src, dst, src_mask, dst_mask are always stored in network byte order. 10 * src, dst, src_mask, dst_mask are always stored in network byte order.
11 * flags are stored in host byte order (of course). 11 * flags are stored in host byte order (of course).
12 * Port numbers are stored in HOST byte order. 12 * Port numbers are stored in HOST byte order.
13 */ 13 */
14 14
15 #ifndef _IP6_TABLES_H 15 #ifndef _IP6_TABLES_H
16 #define _IP6_TABLES_H 16 #define _IP6_TABLES_H
17 17
18 #ifdef __KERNEL__ 18 #ifdef __KERNEL__
19 #include <linux/if.h> 19 #include <linux/if.h>
20 #include <linux/in6.h> 20 #include <linux/in6.h>
21 #include <linux/ipv6.h> 21 #include <linux/ipv6.h>
22 #include <linux/skbuff.h> 22 #include <linux/skbuff.h>
23 #endif 23 #endif
24 #include <linux/types.h> 24 #include <linux/types.h>
25 #include <linux/compiler.h> 25 #include <linux/compiler.h>
26 #include <linux/netfilter_ipv6.h> 26 #include <linux/netfilter_ipv6.h>
27 27
28 #include <linux/netfilter/x_tables.h> 28 #include <linux/netfilter/x_tables.h>
29 29
30 #define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 30 #define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
31 #define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 31 #define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
32 32
33 #define ip6t_match xt_match 33 #define ip6t_match xt_match
34 #define ip6t_target xt_target 34 #define ip6t_target xt_target
35 #define ip6t_table xt_table 35 #define ip6t_table xt_table
36 #define ip6t_get_revision xt_get_revision 36 #define ip6t_get_revision xt_get_revision
37 37
38 /* Yes, Virginia, you have to zero the padding. */ 38 /* Yes, Virginia, you have to zero the padding. */
39 struct ip6t_ip6 { 39 struct ip6t_ip6 {
40 /* Source and destination IP6 addr */ 40 /* Source and destination IP6 addr */
41 struct in6_addr src, dst; 41 struct in6_addr src, dst;
42 /* Mask for src and dest IP6 addr */ 42 /* Mask for src and dest IP6 addr */
43 struct in6_addr smsk, dmsk; 43 struct in6_addr smsk, dmsk;
44 char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 44 char iniface[IFNAMSIZ], outiface[IFNAMSIZ];
45 unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 45 unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ];
46 46
47 /* Upper protocol number 47 /* Upper protocol number
48 * - The allowed value is 0 (any) or protocol number of last parsable 48 * - The allowed value is 0 (any) or protocol number of last parsable
49 * header, which is 50 (ESP), 59 (No Next Header), 135 (MH), or 49 * header, which is 50 (ESP), 59 (No Next Header), 135 (MH), or
50 * the non IPv6 extension headers. 50 * the non IPv6 extension headers.
51 * - The protocol numbers of IPv6 extension headers except of ESP and 51 * - The protocol numbers of IPv6 extension headers except of ESP and
52 * MH do not match any packets. 52 * MH do not match any packets.
53 * - You also need to set IP6T_FLAGS_PROTO to "flags" to check protocol. 53 * - You also need to set IP6T_FLAGS_PROTO to "flags" to check protocol.
54 */ 54 */
55 u_int16_t proto; 55 u_int16_t proto;
56 /* TOS to match iff flags & IP6T_F_TOS */ 56 /* TOS to match iff flags & IP6T_F_TOS */
57 u_int8_t tos; 57 u_int8_t tos;
58 58
59 /* Flags word */ 59 /* Flags word */
60 u_int8_t flags; 60 u_int8_t flags;
61 /* Inverse flags */ 61 /* Inverse flags */
62 u_int8_t invflags; 62 u_int8_t invflags;
63 }; 63 };
64 64
65 #define ip6t_entry_match xt_entry_match 65 #define ip6t_entry_match xt_entry_match
66 #define ip6t_entry_target xt_entry_target 66 #define ip6t_entry_target xt_entry_target
67 #define ip6t_standard_target xt_standard_target 67 #define ip6t_standard_target xt_standard_target
68 68
69 #define ip6t_counters xt_counters 69 #define ip6t_counters xt_counters
70 70
71 /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */ 71 /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */
72 #define IP6T_F_PROTO 0x01 /* Set if rule cares about upper 72 #define IP6T_F_PROTO 0x01 /* Set if rule cares about upper
73 protocols */ 73 protocols */
74 #define IP6T_F_TOS 0x02 /* Match the TOS. */ 74 #define IP6T_F_TOS 0x02 /* Match the TOS. */
75 #define IP6T_F_GOTO 0x04 /* Set if jump is a goto */ 75 #define IP6T_F_GOTO 0x04 /* Set if jump is a goto */
76 #define IP6T_F_MASK 0x07 /* All possible flag bits mask. */ 76 #define IP6T_F_MASK 0x07 /* All possible flag bits mask. */
77 77
78 /* Values for "inv" field in struct ip6t_ip6. */ 78 /* Values for "inv" field in struct ip6t_ip6. */
79 #define IP6T_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ 79 #define IP6T_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
80 #define IP6T_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ 80 #define IP6T_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */
81 #define IP6T_INV_TOS 0x04 /* Invert the sense of TOS. */ 81 #define IP6T_INV_TOS 0x04 /* Invert the sense of TOS. */
82 #define IP6T_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ 82 #define IP6T_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */
83 #define IP6T_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ 83 #define IP6T_INV_DSTIP 0x10 /* Invert the sense of DST OP. */
84 #define IP6T_INV_FRAG 0x20 /* Invert the sense of FRAG. */ 84 #define IP6T_INV_FRAG 0x20 /* Invert the sense of FRAG. */
85 #define IP6T_INV_PROTO XT_INV_PROTO 85 #define IP6T_INV_PROTO XT_INV_PROTO
86 #define IP6T_INV_MASK 0x7F /* All possible flag bits mask. */ 86 #define IP6T_INV_MASK 0x7F /* All possible flag bits mask. */
87 87
88 /* This structure defines each of the firewall rules. Consists of 3 88 /* This structure defines each of the firewall rules. Consists of 3
89 parts which are 1) general IP header stuff 2) match specific 89 parts which are 1) general IP header stuff 2) match specific
90 stuff 3) the target to perform if the rule matches */ 90 stuff 3) the target to perform if the rule matches */
91 struct ip6t_entry { 91 struct ip6t_entry {
92 struct ip6t_ip6 ipv6; 92 struct ip6t_ip6 ipv6;
93 93
94 /* Mark with fields that we care about. */ 94 /* Mark with fields that we care about. */
95 unsigned int nfcache; 95 unsigned int nfcache;
96 96
97 /* Size of ipt_entry + matches */ 97 /* Size of ipt_entry + matches */
98 u_int16_t target_offset; 98 u_int16_t target_offset;
99 /* Size of ipt_entry + matches + target */ 99 /* Size of ipt_entry + matches + target */
100 u_int16_t next_offset; 100 u_int16_t next_offset;
101 101
102 /* Back pointer */ 102 /* Back pointer */
103 unsigned int comefrom; 103 unsigned int comefrom;
104 104
105 /* Packet and byte counters. */ 105 /* Packet and byte counters. */
106 struct xt_counters counters; 106 struct xt_counters counters;
107 107
108 /* The matches (if any), then the target. */ 108 /* The matches (if any), then the target. */
109 unsigned char elems[0]; 109 unsigned char elems[0];
110 }; 110 };
111 111
112 /* Standard entry */ 112 /* Standard entry */
113 struct ip6t_standard { 113 struct ip6t_standard {
114 struct ip6t_entry entry; 114 struct ip6t_entry entry;
115 struct ip6t_standard_target target; 115 struct ip6t_standard_target target;
116 }; 116 };
117 117
118 struct ip6t_error_target { 118 struct ip6t_error_target {
119 struct ip6t_entry_target target; 119 struct ip6t_entry_target target;
120 char errorname[IP6T_FUNCTION_MAXNAMELEN]; 120 char errorname[IP6T_FUNCTION_MAXNAMELEN];
121 }; 121 };
122 122
123 struct ip6t_error { 123 struct ip6t_error {
124 struct ip6t_entry entry; 124 struct ip6t_entry entry;
125 struct ip6t_error_target target; 125 struct ip6t_error_target target;
126 }; 126 };
127 127
128 #define IP6T_ENTRY_INIT(__size) \ 128 #define IP6T_ENTRY_INIT(__size) \
129 { \ 129 { \
130 .target_offset = sizeof(struct ip6t_entry), \ 130 .target_offset = sizeof(struct ip6t_entry), \
131 .next_offset = (__size), \ 131 .next_offset = (__size), \
132 } 132 }
133 133
134 #define IP6T_STANDARD_INIT(__verdict) \ 134 #define IP6T_STANDARD_INIT(__verdict) \
135 { \ 135 { \
136 .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ 136 .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \
137 .target = XT_TARGET_INIT(IP6T_STANDARD_TARGET, \ 137 .target = XT_TARGET_INIT(IP6T_STANDARD_TARGET, \
138 sizeof(struct ip6t_standard_target)), \ 138 sizeof(struct ip6t_standard_target)), \
139 .target.verdict = -(__verdict) - 1, \ 139 .target.verdict = -(__verdict) - 1, \
140 } 140 }
141 141
142 #define IP6T_ERROR_INIT \ 142 #define IP6T_ERROR_INIT \
143 { \ 143 { \
144 .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ 144 .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \
145 .target = XT_TARGET_INIT(IP6T_ERROR_TARGET, \ 145 .target = XT_TARGET_INIT(IP6T_ERROR_TARGET, \
146 sizeof(struct ip6t_error_target)), \ 146 sizeof(struct ip6t_error_target)), \
147 .target.errorname = "ERROR", \ 147 .target.errorname = "ERROR", \
148 } 148 }
149 149
150 /* 150 /*
151 * New IP firewall options for [gs]etsockopt at the RAW IP level. 151 * New IP firewall options for [gs]etsockopt at the RAW IP level.
152 * Unlike BSD Linux inherits IP options so you don't have to use 152 * Unlike BSD Linux inherits IP options so you don't have to use
153 * a raw socket for this. Instead we check rights in the calls. 153 * a raw socket for this. Instead we check rights in the calls.
154 * 154 *
155 * ATTENTION: check linux/in6.h before adding new number here. 155 * ATTENTION: check linux/in6.h before adding new number here.
156 */ 156 */
157 #define IP6T_BASE_CTL 64 157 #define IP6T_BASE_CTL 64
158 158
159 #define IP6T_SO_SET_REPLACE (IP6T_BASE_CTL) 159 #define IP6T_SO_SET_REPLACE (IP6T_BASE_CTL)
160 #define IP6T_SO_SET_ADD_COUNTERS (IP6T_BASE_CTL + 1) 160 #define IP6T_SO_SET_ADD_COUNTERS (IP6T_BASE_CTL + 1)
161 #define IP6T_SO_SET_MAX IP6T_SO_SET_ADD_COUNTERS 161 #define IP6T_SO_SET_MAX IP6T_SO_SET_ADD_COUNTERS
162 162
163 #define IP6T_SO_GET_INFO (IP6T_BASE_CTL) 163 #define IP6T_SO_GET_INFO (IP6T_BASE_CTL)
164 #define IP6T_SO_GET_ENTRIES (IP6T_BASE_CTL + 1) 164 #define IP6T_SO_GET_ENTRIES (IP6T_BASE_CTL + 1)
165 #define IP6T_SO_GET_REVISION_MATCH (IP6T_BASE_CTL + 4) 165 #define IP6T_SO_GET_REVISION_MATCH (IP6T_BASE_CTL + 4)
166 #define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5) 166 #define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5)
167 #define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET 167 #define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET
168 168
169 /* CONTINUE verdict for targets */ 169 /* CONTINUE verdict for targets */
170 #define IP6T_CONTINUE XT_CONTINUE 170 #define IP6T_CONTINUE XT_CONTINUE
171 171
172 /* For standard target */ 172 /* For standard target */
173 #define IP6T_RETURN XT_RETURN 173 #define IP6T_RETURN XT_RETURN
174 174
175 /* TCP/UDP matching stuff */ 175 /* TCP/UDP matching stuff */
176 #include <linux/netfilter/xt_tcpudp.h> 176 #include <linux/netfilter/xt_tcpudp.h>
177 177
178 #define ip6t_tcp xt_tcp 178 #define ip6t_tcp xt_tcp
179 #define ip6t_udp xt_udp 179 #define ip6t_udp xt_udp
180 180
181 /* Values for "inv" field in struct ipt_tcp. */ 181 /* Values for "inv" field in struct ipt_tcp. */
182 #define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT 182 #define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT
183 #define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT 183 #define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT
184 #define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS 184 #define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS
185 #define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION 185 #define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION
186 #define IP6T_TCP_INV_MASK XT_TCP_INV_MASK 186 #define IP6T_TCP_INV_MASK XT_TCP_INV_MASK
187 187
188 /* Values for "invflags" field in struct ipt_udp. */ 188 /* Values for "invflags" field in struct ipt_udp. */
189 #define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT 189 #define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT
190 #define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT 190 #define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT
191 #define IP6T_UDP_INV_MASK XT_UDP_INV_MASK 191 #define IP6T_UDP_INV_MASK XT_UDP_INV_MASK
192 192
193 /* ICMP matching stuff */ 193 /* ICMP matching stuff */
194 struct ip6t_icmp { 194 struct ip6t_icmp {
195 u_int8_t type; /* type to match */ 195 u_int8_t type; /* type to match */
196 u_int8_t code[2]; /* range of code */ 196 u_int8_t code[2]; /* range of code */
197 u_int8_t invflags; /* Inverse flags */ 197 u_int8_t invflags; /* Inverse flags */
198 }; 198 };
199 199
200 /* Values for "inv" field for struct ipt_icmp. */ 200 /* Values for "inv" field for struct ipt_icmp. */
201 #define IP6T_ICMP_INV 0x01 /* Invert the sense of type/code test */ 201 #define IP6T_ICMP_INV 0x01 /* Invert the sense of type/code test */
202 202
203 /* The argument to IP6T_SO_GET_INFO */ 203 /* The argument to IP6T_SO_GET_INFO */
204 struct ip6t_getinfo { 204 struct ip6t_getinfo {
205 /* Which table: caller fills this in. */ 205 /* Which table: caller fills this in. */
206 char name[IP6T_TABLE_MAXNAMELEN]; 206 char name[IP6T_TABLE_MAXNAMELEN];
207 207
208 /* Kernel fills these in. */ 208 /* Kernel fills these in. */
209 /* Which hook entry points are valid: bitmask */ 209 /* Which hook entry points are valid: bitmask */
210 unsigned int valid_hooks; 210 unsigned int valid_hooks;
211 211
212 /* Hook entry points: one per netfilter hook. */ 212 /* Hook entry points: one per netfilter hook. */
213 unsigned int hook_entry[NF_INET_NUMHOOKS]; 213 unsigned int hook_entry[NF_INET_NUMHOOKS];
214 214
215 /* Underflow points. */ 215 /* Underflow points. */
216 unsigned int underflow[NF_INET_NUMHOOKS]; 216 unsigned int underflow[NF_INET_NUMHOOKS];
217 217
218 /* Number of entries */ 218 /* Number of entries */
219 unsigned int num_entries; 219 unsigned int num_entries;
220 220
221 /* Size of entries. */ 221 /* Size of entries. */
222 unsigned int size; 222 unsigned int size;
223 }; 223 };
224 224
225 /* The argument to IP6T_SO_SET_REPLACE. */ 225 /* The argument to IP6T_SO_SET_REPLACE. */
226 struct ip6t_replace { 226 struct ip6t_replace {
227 /* Which table. */ 227 /* Which table. */
228 char name[IP6T_TABLE_MAXNAMELEN]; 228 char name[IP6T_TABLE_MAXNAMELEN];
229 229
230 /* Which hook entry points are valid: bitmask. You can't 230 /* Which hook entry points are valid: bitmask. You can't
231 change this. */ 231 change this. */
232 unsigned int valid_hooks; 232 unsigned int valid_hooks;
233 233
234 /* Number of entries */ 234 /* Number of entries */
235 unsigned int num_entries; 235 unsigned int num_entries;
236 236
237 /* Total size of new entries */ 237 /* Total size of new entries */
238 unsigned int size; 238 unsigned int size;
239 239
240 /* Hook entry points. */ 240 /* Hook entry points. */
241 unsigned int hook_entry[NF_INET_NUMHOOKS]; 241 unsigned int hook_entry[NF_INET_NUMHOOKS];
242 242
243 /* Underflow points. */ 243 /* Underflow points. */
244 unsigned int underflow[NF_INET_NUMHOOKS]; 244 unsigned int underflow[NF_INET_NUMHOOKS];
245 245
246 /* Information about old entries: */ 246 /* Information about old entries: */
247 /* Number of counters (must be equal to current number of entries). */ 247 /* Number of counters (must be equal to current number of entries). */
248 unsigned int num_counters; 248 unsigned int num_counters;
249 /* The old entries' counters. */ 249 /* The old entries' counters. */
250 struct xt_counters __user *counters; 250 struct xt_counters __user *counters;
251 251
252 /* The entries (hang off end: not really an array). */ 252 /* The entries (hang off end: not really an array). */
253 struct ip6t_entry entries[0]; 253 struct ip6t_entry entries[0];
254 }; 254 };
255 255
256 /* The argument to IP6T_SO_ADD_COUNTERS. */ 256 /* The argument to IP6T_SO_ADD_COUNTERS. */
257 #define ip6t_counters_info xt_counters_info 257 #define ip6t_counters_info xt_counters_info
258 258
259 /* The argument to IP6T_SO_GET_ENTRIES. */ 259 /* The argument to IP6T_SO_GET_ENTRIES. */
260 struct ip6t_get_entries { 260 struct ip6t_get_entries {
261 /* Which table: user fills this in. */ 261 /* Which table: user fills this in. */
262 char name[IP6T_TABLE_MAXNAMELEN]; 262 char name[IP6T_TABLE_MAXNAMELEN];
263 263
264 /* User fills this in: total entry size. */ 264 /* User fills this in: total entry size. */
265 unsigned int size; 265 unsigned int size;
266 266
267 /* The entries. */ 267 /* The entries. */
268 struct ip6t_entry entrytable[0]; 268 struct ip6t_entry entrytable[0];
269 }; 269 };
270 270
271 /* Standard return verdict, or do jump. */ 271 /* Standard return verdict, or do jump. */
272 #define IP6T_STANDARD_TARGET XT_STANDARD_TARGET 272 #define IP6T_STANDARD_TARGET XT_STANDARD_TARGET
273 /* Error verdict. */ 273 /* Error verdict. */
274 #define IP6T_ERROR_TARGET XT_ERROR_TARGET 274 #define IP6T_ERROR_TARGET XT_ERROR_TARGET
275 275
276 /* Helper functions */ 276 /* Helper functions */
277 static __inline__ struct ip6t_entry_target * 277 static __inline__ struct ip6t_entry_target *
278 ip6t_get_target(struct ip6t_entry *e) 278 ip6t_get_target(struct ip6t_entry *e)
279 { 279 {
280 return (void *)e + e->target_offset; 280 return (void *)e + e->target_offset;
281 } 281 }
282 282
283 /* fn returns 0 to continue iteration */ 283 /* fn returns 0 to continue iteration */
284 #define IP6T_MATCH_ITERATE(e, fn, args...) \ 284 #define IP6T_MATCH_ITERATE(e, fn, args...) \
285 XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) 285 XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args)
286 286
287 /* fn returns 0 to continue iteration */ 287 /* fn returns 0 to continue iteration */
288 #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ 288 #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \
289 XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) 289 XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args)
290 290
291 /* 291 /*
292 * Main firewall chains definitions and global var's definitions. 292 * Main firewall chains definitions and global var's definitions.
293 */ 293 */
294 294
295 #ifdef __KERNEL__ 295 #ifdef __KERNEL__
296 296
297 #include <linux/init.h> 297 #include <linux/init.h>
298 extern void ip6t_init(void) __init; 298 extern void ip6t_init(void) __init;
299 299
300 extern void *ip6t_alloc_initial_table(const struct xt_table *);
300 extern struct xt_table *ip6t_register_table(struct net *net, 301 extern struct xt_table *ip6t_register_table(struct net *net,
301 const struct xt_table *table, 302 const struct xt_table *table,
302 const struct ip6t_replace *repl); 303 const struct ip6t_replace *repl);
303 extern void ip6t_unregister_table(struct net *net, struct xt_table *table); 304 extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
304 extern unsigned int ip6t_do_table(struct sk_buff *skb, 305 extern unsigned int ip6t_do_table(struct sk_buff *skb,
305 unsigned int hook, 306 unsigned int hook,
306 const struct net_device *in, 307 const struct net_device *in,
307 const struct net_device *out, 308 const struct net_device *out,
308 struct xt_table *table); 309 struct xt_table *table);
309 310
310 /* Check for an extension */ 311 /* Check for an extension */
311 extern int ip6t_ext_hdr(u8 nexthdr); 312 extern int ip6t_ext_hdr(u8 nexthdr);
312 /* find specified header and get offset to it */ 313 /* find specified header and get offset to it */
313 extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, 314 extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
314 int target, unsigned short *fragoff); 315 int target, unsigned short *fragoff);
315 316
316 extern int ip6_masked_addrcmp(const struct in6_addr *addr1, 317 extern int ip6_masked_addrcmp(const struct in6_addr *addr1,
317 const struct in6_addr *mask, 318 const struct in6_addr *mask,
318 const struct in6_addr *addr2); 319 const struct in6_addr *addr2);
319 320
320 #define IP6T_ALIGN(s) XT_ALIGN(s) 321 #define IP6T_ALIGN(s) XT_ALIGN(s)
321 322
322 #ifdef CONFIG_COMPAT 323 #ifdef CONFIG_COMPAT
323 #include <net/compat.h> 324 #include <net/compat.h>
324 325
325 struct compat_ip6t_entry { 326 struct compat_ip6t_entry {
326 struct ip6t_ip6 ipv6; 327 struct ip6t_ip6 ipv6;
327 compat_uint_t nfcache; 328 compat_uint_t nfcache;
328 u_int16_t target_offset; 329 u_int16_t target_offset;
329 u_int16_t next_offset; 330 u_int16_t next_offset;
330 compat_uint_t comefrom; 331 compat_uint_t comefrom;
331 struct compat_xt_counters counters; 332 struct compat_xt_counters counters;
332 unsigned char elems[0]; 333 unsigned char elems[0];
333 }; 334 };
334 335
335 static inline struct ip6t_entry_target * 336 static inline struct ip6t_entry_target *
336 compat_ip6t_get_target(struct compat_ip6t_entry *e) 337 compat_ip6t_get_target(struct compat_ip6t_entry *e)
337 { 338 {
338 return (void *)e + e->target_offset; 339 return (void *)e + e->target_offset;
339 } 340 }
340 341
341 #define COMPAT_IP6T_ALIGN(s) COMPAT_XT_ALIGN(s) 342 #define COMPAT_IP6T_ALIGN(s) COMPAT_XT_ALIGN(s)
342 343
343 /* fn returns 0 to continue iteration */ 344 /* fn returns 0 to continue iteration */
344 #define COMPAT_IP6T_MATCH_ITERATE(e, fn, args...) \ 345 #define COMPAT_IP6T_MATCH_ITERATE(e, fn, args...) \
345 XT_MATCH_ITERATE(struct compat_ip6t_entry, e, fn, ## args) 346 XT_MATCH_ITERATE(struct compat_ip6t_entry, e, fn, ## args)
346 347
347 /* fn returns 0 to continue iteration */ 348 /* fn returns 0 to continue iteration */
348 #define COMPAT_IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ 349 #define COMPAT_IP6T_ENTRY_ITERATE(entries, size, fn, args...) \
349 XT_ENTRY_ITERATE(struct compat_ip6t_entry, entries, size, fn, ## args) 350 XT_ENTRY_ITERATE(struct compat_ip6t_entry, entries, size, fn, ## args)
350 351
351 #define COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \ 352 #define COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entries, size, n, fn, args...) \
352 XT_ENTRY_ITERATE_CONTINUE(struct compat_ip6t_entry, entries, size, n, \ 353 XT_ENTRY_ITERATE_CONTINUE(struct compat_ip6t_entry, entries, size, n, \
353 fn, ## args) 354 fn, ## args)
354 355
355 #endif /* CONFIG_COMPAT */ 356 #endif /* CONFIG_COMPAT */
356 #endif /*__KERNEL__*/ 357 #endif /*__KERNEL__*/
357 #endif /* _IP6_TABLES_H */ 358 #endif /* _IP6_TABLES_H */
358 359
net/ipv4/netfilter/arp_tables.c
1 /* 1 /*
2 * Packet matching code for ARP packets. 2 * Packet matching code for ARP packets.
3 * 3 *
4 * Based heavily, if not almost entirely, upon ip_tables.c framework. 4 * Based heavily, if not almost entirely, upon ip_tables.c framework.
5 * 5 *
6 * Some ARP specific bits are: 6 * Some ARP specific bits are:
7 * 7 *
8 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
9 * 9 *
10 */ 10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/kernel.h> 12 #include <linux/kernel.h>
13 #include <linux/skbuff.h> 13 #include <linux/skbuff.h>
14 #include <linux/netdevice.h> 14 #include <linux/netdevice.h>
15 #include <linux/capability.h> 15 #include <linux/capability.h>
16 #include <linux/if_arp.h> 16 #include <linux/if_arp.h>
17 #include <linux/kmod.h> 17 #include <linux/kmod.h>
18 #include <linux/vmalloc.h> 18 #include <linux/vmalloc.h>
19 #include <linux/proc_fs.h> 19 #include <linux/proc_fs.h>
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/init.h> 21 #include <linux/init.h>
22 #include <linux/mutex.h> 22 #include <linux/mutex.h>
23 #include <linux/err.h> 23 #include <linux/err.h>
24 #include <net/compat.h> 24 #include <net/compat.h>
25 #include <net/sock.h> 25 #include <net/sock.h>
26 #include <asm/uaccess.h> 26 #include <asm/uaccess.h>
27 27
28 #include <linux/netfilter/x_tables.h> 28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_arp/arp_tables.h> 29 #include <linux/netfilter_arp/arp_tables.h>
30 #include "../../netfilter/xt_repldata.h"
30 31
31 MODULE_LICENSE("GPL"); 32 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 33 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
33 MODULE_DESCRIPTION("arptables core"); 34 MODULE_DESCRIPTION("arptables core");
34 35
35 /*#define DEBUG_ARP_TABLES*/ 36 /*#define DEBUG_ARP_TABLES*/
36 /*#define DEBUG_ARP_TABLES_USER*/ 37 /*#define DEBUG_ARP_TABLES_USER*/
37 38
38 #ifdef DEBUG_ARP_TABLES 39 #ifdef DEBUG_ARP_TABLES
39 #define dprintf(format, args...) printk(format , ## args) 40 #define dprintf(format, args...) printk(format , ## args)
40 #else 41 #else
41 #define dprintf(format, args...) 42 #define dprintf(format, args...)
42 #endif 43 #endif
43 44
44 #ifdef DEBUG_ARP_TABLES_USER 45 #ifdef DEBUG_ARP_TABLES_USER
45 #define duprintf(format, args...) printk(format , ## args) 46 #define duprintf(format, args...) printk(format , ## args)
46 #else 47 #else
47 #define duprintf(format, args...) 48 #define duprintf(format, args...)
48 #endif 49 #endif
49 50
50 #ifdef CONFIG_NETFILTER_DEBUG 51 #ifdef CONFIG_NETFILTER_DEBUG
51 #define ARP_NF_ASSERT(x) \ 52 #define ARP_NF_ASSERT(x) \
52 do { \ 53 do { \
53 if (!(x)) \ 54 if (!(x)) \
54 printk("ARP_NF_ASSERT: %s:%s:%u\n", \ 55 printk("ARP_NF_ASSERT: %s:%s:%u\n", \
55 __func__, __FILE__, __LINE__); \ 56 __func__, __FILE__, __LINE__); \
56 } while(0) 57 } while(0)
57 #else 58 #else
58 #define ARP_NF_ASSERT(x) 59 #define ARP_NF_ASSERT(x)
59 #endif 60 #endif
61
62 void *arpt_alloc_initial_table(const struct xt_table *info)
63 {
64 return xt_alloc_initial_table(arpt, ARPT);
65 }
66 EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
60 67
61 static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, 68 static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
62 const char *hdr_addr, int len) 69 const char *hdr_addr, int len)
63 { 70 {
64 int i, ret; 71 int i, ret;
65 72
66 if (len > ARPT_DEV_ADDR_LEN_MAX) 73 if (len > ARPT_DEV_ADDR_LEN_MAX)
67 len = ARPT_DEV_ADDR_LEN_MAX; 74 len = ARPT_DEV_ADDR_LEN_MAX;
68 75
69 ret = 0; 76 ret = 0;
70 for (i = 0; i < len; i++) 77 for (i = 0; i < len; i++)
71 ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; 78 ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i];
72 79
73 return (ret != 0); 80 return (ret != 0);
74 } 81 }
75 82
76 /* 83 /*
77 * Unfortunatly, _b and _mask are not aligned to an int (or long int) 84 * Unfortunatly, _b and _mask are not aligned to an int (or long int)
78 * Some arches dont care, unrolling the loop is a win on them. 85 * Some arches dont care, unrolling the loop is a win on them.
79 * For other arches, we only have a 16bit alignement. 86 * For other arches, we only have a 16bit alignement.
80 */ 87 */
81 static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) 88 static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
82 { 89 {
83 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 90 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
84 unsigned long ret = ifname_compare_aligned(_a, _b, _mask); 91 unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
85 #else 92 #else
86 unsigned long ret = 0; 93 unsigned long ret = 0;
87 const u16 *a = (const u16 *)_a; 94 const u16 *a = (const u16 *)_a;
88 const u16 *b = (const u16 *)_b; 95 const u16 *b = (const u16 *)_b;
89 const u16 *mask = (const u16 *)_mask; 96 const u16 *mask = (const u16 *)_mask;
90 int i; 97 int i;
91 98
92 for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) 99 for (i = 0; i < IFNAMSIZ/sizeof(u16); i++)
93 ret |= (a[i] ^ b[i]) & mask[i]; 100 ret |= (a[i] ^ b[i]) & mask[i];
94 #endif 101 #endif
95 return ret; 102 return ret;
96 } 103 }
97 104
98 /* Returns whether packet matches rule or not. */ 105 /* Returns whether packet matches rule or not. */
99 static inline int arp_packet_match(const struct arphdr *arphdr, 106 static inline int arp_packet_match(const struct arphdr *arphdr,
100 struct net_device *dev, 107 struct net_device *dev,
101 const char *indev, 108 const char *indev,
102 const char *outdev, 109 const char *outdev,
103 const struct arpt_arp *arpinfo) 110 const struct arpt_arp *arpinfo)
104 { 111 {
105 const char *arpptr = (char *)(arphdr + 1); 112 const char *arpptr = (char *)(arphdr + 1);
106 const char *src_devaddr, *tgt_devaddr; 113 const char *src_devaddr, *tgt_devaddr;
107 __be32 src_ipaddr, tgt_ipaddr; 114 __be32 src_ipaddr, tgt_ipaddr;
108 long ret; 115 long ret;
109 116
110 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) 117 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
111 118
112 if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, 119 if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop,
113 ARPT_INV_ARPOP)) { 120 ARPT_INV_ARPOP)) {
114 dprintf("ARP operation field mismatch.\n"); 121 dprintf("ARP operation field mismatch.\n");
115 dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", 122 dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
116 arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); 123 arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask);
117 return 0; 124 return 0;
118 } 125 }
119 126
120 if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, 127 if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd,
121 ARPT_INV_ARPHRD)) { 128 ARPT_INV_ARPHRD)) {
122 dprintf("ARP hardware address format mismatch.\n"); 129 dprintf("ARP hardware address format mismatch.\n");
123 dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", 130 dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
124 arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); 131 arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask);
125 return 0; 132 return 0;
126 } 133 }
127 134
128 if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, 135 if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro,
129 ARPT_INV_ARPPRO)) { 136 ARPT_INV_ARPPRO)) {
130 dprintf("ARP protocol address format mismatch.\n"); 137 dprintf("ARP protocol address format mismatch.\n");
131 dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", 138 dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
132 arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); 139 arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask);
133 return 0; 140 return 0;
134 } 141 }
135 142
136 if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, 143 if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln,
137 ARPT_INV_ARPHLN)) { 144 ARPT_INV_ARPHLN)) {
138 dprintf("ARP hardware address length mismatch.\n"); 145 dprintf("ARP hardware address length mismatch.\n");
139 dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", 146 dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
140 arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); 147 arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask);
141 return 0; 148 return 0;
142 } 149 }
143 150
144 src_devaddr = arpptr; 151 src_devaddr = arpptr;
145 arpptr += dev->addr_len; 152 arpptr += dev->addr_len;
146 memcpy(&src_ipaddr, arpptr, sizeof(u32)); 153 memcpy(&src_ipaddr, arpptr, sizeof(u32));
147 arpptr += sizeof(u32); 154 arpptr += sizeof(u32);
148 tgt_devaddr = arpptr; 155 tgt_devaddr = arpptr;
149 arpptr += dev->addr_len; 156 arpptr += dev->addr_len;
150 memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); 157 memcpy(&tgt_ipaddr, arpptr, sizeof(u32));
151 158
152 if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), 159 if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len),
153 ARPT_INV_SRCDEVADDR) || 160 ARPT_INV_SRCDEVADDR) ||
154 FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), 161 FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len),
155 ARPT_INV_TGTDEVADDR)) { 162 ARPT_INV_TGTDEVADDR)) {
156 dprintf("Source or target device address mismatch.\n"); 163 dprintf("Source or target device address mismatch.\n");
157 164
158 return 0; 165 return 0;
159 } 166 }
160 167
161 if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, 168 if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr,
162 ARPT_INV_SRCIP) || 169 ARPT_INV_SRCIP) ||
163 FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), 170 FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr),
164 ARPT_INV_TGTIP)) { 171 ARPT_INV_TGTIP)) {
165 dprintf("Source or target IP address mismatch.\n"); 172 dprintf("Source or target IP address mismatch.\n");
166 173
167 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", 174 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
168 &src_ipaddr, 175 &src_ipaddr,
169 &arpinfo->smsk.s_addr, 176 &arpinfo->smsk.s_addr,
170 &arpinfo->src.s_addr, 177 &arpinfo->src.s_addr,
171 arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); 178 arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : "");
172 dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", 179 dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n",
173 &tgt_ipaddr, 180 &tgt_ipaddr,
174 &arpinfo->tmsk.s_addr, 181 &arpinfo->tmsk.s_addr,
175 &arpinfo->tgt.s_addr, 182 &arpinfo->tgt.s_addr,
176 arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); 183 arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : "");
177 return 0; 184 return 0;
178 } 185 }
179 186
180 /* Look for ifname matches. */ 187 /* Look for ifname matches. */
181 ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); 188 ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
182 189
183 if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { 190 if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
184 dprintf("VIA in mismatch (%s vs %s).%s\n", 191 dprintf("VIA in mismatch (%s vs %s).%s\n",
185 indev, arpinfo->iniface, 192 indev, arpinfo->iniface,
186 arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":""); 193 arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":"");
187 return 0; 194 return 0;
188 } 195 }
189 196
190 ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); 197 ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
191 198
192 if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { 199 if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
193 dprintf("VIA out mismatch (%s vs %s).%s\n", 200 dprintf("VIA out mismatch (%s vs %s).%s\n",
194 outdev, arpinfo->outiface, 201 outdev, arpinfo->outiface,
195 arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":""); 202 arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":"");
196 return 0; 203 return 0;
197 } 204 }
198 205
199 return 1; 206 return 1;
200 #undef FWINV 207 #undef FWINV
201 } 208 }
202 209
203 static inline int arp_checkentry(const struct arpt_arp *arp) 210 static inline int arp_checkentry(const struct arpt_arp *arp)
204 { 211 {
205 if (arp->flags & ~ARPT_F_MASK) { 212 if (arp->flags & ~ARPT_F_MASK) {
206 duprintf("Unknown flag bits set: %08X\n", 213 duprintf("Unknown flag bits set: %08X\n",
207 arp->flags & ~ARPT_F_MASK); 214 arp->flags & ~ARPT_F_MASK);
208 return 0; 215 return 0;
209 } 216 }
210 if (arp->invflags & ~ARPT_INV_MASK) { 217 if (arp->invflags & ~ARPT_INV_MASK) {
211 duprintf("Unknown invflag bits set: %08X\n", 218 duprintf("Unknown invflag bits set: %08X\n",
212 arp->invflags & ~ARPT_INV_MASK); 219 arp->invflags & ~ARPT_INV_MASK);
213 return 0; 220 return 0;
214 } 221 }
215 222
216 return 1; 223 return 1;
217 } 224 }
218 225
219 static unsigned int 226 static unsigned int
220 arpt_error(struct sk_buff *skb, const struct xt_target_param *par) 227 arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
221 { 228 {
222 if (net_ratelimit()) 229 if (net_ratelimit())
223 printk("arp_tables: error: '%s'\n", 230 printk("arp_tables: error: '%s'\n",
224 (const char *)par->targinfo); 231 (const char *)par->targinfo);
225 232
226 return NF_DROP; 233 return NF_DROP;
227 } 234 }
228 235
229 static inline struct arpt_entry *get_entry(void *base, unsigned int offset) 236 static inline struct arpt_entry *get_entry(void *base, unsigned int offset)
230 { 237 {
231 return (struct arpt_entry *)(base + offset); 238 return (struct arpt_entry *)(base + offset);
232 } 239 }
233 240
234 static inline __pure 241 static inline __pure
235 struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) 242 struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
236 { 243 {
237 return (void *)entry + entry->next_offset; 244 return (void *)entry + entry->next_offset;
238 } 245 }
239 246
240 unsigned int arpt_do_table(struct sk_buff *skb, 247 unsigned int arpt_do_table(struct sk_buff *skb,
241 unsigned int hook, 248 unsigned int hook,
242 const struct net_device *in, 249 const struct net_device *in,
243 const struct net_device *out, 250 const struct net_device *out,
244 struct xt_table *table) 251 struct xt_table *table)
245 { 252 {
246 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 253 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
247 unsigned int verdict = NF_DROP; 254 unsigned int verdict = NF_DROP;
248 const struct arphdr *arp; 255 const struct arphdr *arp;
249 bool hotdrop = false; 256 bool hotdrop = false;
250 struct arpt_entry *e, *back; 257 struct arpt_entry *e, *back;
251 const char *indev, *outdev; 258 const char *indev, *outdev;
252 void *table_base; 259 void *table_base;
253 const struct xt_table_info *private; 260 const struct xt_table_info *private;
254 struct xt_target_param tgpar; 261 struct xt_target_param tgpar;
255 262
256 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 263 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
257 return NF_DROP; 264 return NF_DROP;
258 265
259 indev = in ? in->name : nulldevname; 266 indev = in ? in->name : nulldevname;
260 outdev = out ? out->name : nulldevname; 267 outdev = out ? out->name : nulldevname;
261 268
262 xt_info_rdlock_bh(); 269 xt_info_rdlock_bh();
263 private = table->private; 270 private = table->private;
264 table_base = private->entries[smp_processor_id()]; 271 table_base = private->entries[smp_processor_id()];
265 272
266 e = get_entry(table_base, private->hook_entry[hook]); 273 e = get_entry(table_base, private->hook_entry[hook]);
267 back = get_entry(table_base, private->underflow[hook]); 274 back = get_entry(table_base, private->underflow[hook]);
268 275
269 tgpar.in = in; 276 tgpar.in = in;
270 tgpar.out = out; 277 tgpar.out = out;
271 tgpar.hooknum = hook; 278 tgpar.hooknum = hook;
272 tgpar.family = NFPROTO_ARP; 279 tgpar.family = NFPROTO_ARP;
273 280
274 arp = arp_hdr(skb); 281 arp = arp_hdr(skb);
275 do { 282 do {
276 struct arpt_entry_target *t; 283 struct arpt_entry_target *t;
277 int hdr_len; 284 int hdr_len;
278 285
279 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { 286 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
280 e = arpt_next_entry(e); 287 e = arpt_next_entry(e);
281 continue; 288 continue;
282 } 289 }
283 290
284 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + 291 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) +
285 (2 * skb->dev->addr_len); 292 (2 * skb->dev->addr_len);
286 ADD_COUNTER(e->counters, hdr_len, 1); 293 ADD_COUNTER(e->counters, hdr_len, 1);
287 294
288 t = arpt_get_target(e); 295 t = arpt_get_target(e);
289 296
290 /* Standard target? */ 297 /* Standard target? */
291 if (!t->u.kernel.target->target) { 298 if (!t->u.kernel.target->target) {
292 int v; 299 int v;
293 300
294 v = ((struct arpt_standard_target *)t)->verdict; 301 v = ((struct arpt_standard_target *)t)->verdict;
295 if (v < 0) { 302 if (v < 0) {
296 /* Pop from stack? */ 303 /* Pop from stack? */
297 if (v != ARPT_RETURN) { 304 if (v != ARPT_RETURN) {
298 verdict = (unsigned)(-v) - 1; 305 verdict = (unsigned)(-v) - 1;
299 break; 306 break;
300 } 307 }
301 e = back; 308 e = back;
302 back = get_entry(table_base, back->comefrom); 309 back = get_entry(table_base, back->comefrom);
303 continue; 310 continue;
304 } 311 }
305 if (table_base + v 312 if (table_base + v
306 != arpt_next_entry(e)) { 313 != arpt_next_entry(e)) {
307 /* Save old back ptr in next entry */ 314 /* Save old back ptr in next entry */
308 struct arpt_entry *next = arpt_next_entry(e); 315 struct arpt_entry *next = arpt_next_entry(e);
309 next->comefrom = (void *)back - table_base; 316 next->comefrom = (void *)back - table_base;
310 317
311 /* set back pointer to next entry */ 318 /* set back pointer to next entry */
312 back = next; 319 back = next;
313 } 320 }
314 321
315 e = get_entry(table_base, v); 322 e = get_entry(table_base, v);
316 continue; 323 continue;
317 } 324 }
318 325
319 /* Targets which reenter must return 326 /* Targets which reenter must return
320 * abs. verdicts 327 * abs. verdicts
321 */ 328 */
322 tgpar.target = t->u.kernel.target; 329 tgpar.target = t->u.kernel.target;
323 tgpar.targinfo = t->data; 330 tgpar.targinfo = t->data;
324 verdict = t->u.kernel.target->target(skb, &tgpar); 331 verdict = t->u.kernel.target->target(skb, &tgpar);
325 332
326 /* Target might have changed stuff. */ 333 /* Target might have changed stuff. */
327 arp = arp_hdr(skb); 334 arp = arp_hdr(skb);
328 335
329 if (verdict == ARPT_CONTINUE) 336 if (verdict == ARPT_CONTINUE)
330 e = arpt_next_entry(e); 337 e = arpt_next_entry(e);
331 else 338 else
332 /* Verdict */ 339 /* Verdict */
333 break; 340 break;
334 } while (!hotdrop); 341 } while (!hotdrop);
335 xt_info_rdunlock_bh(); 342 xt_info_rdunlock_bh();
336 343
337 if (hotdrop) 344 if (hotdrop)
338 return NF_DROP; 345 return NF_DROP;
339 else 346 else
340 return verdict; 347 return verdict;
341 } 348 }
342 349
343 /* All zeroes == unconditional rule. */ 350 /* All zeroes == unconditional rule. */
344 static inline bool unconditional(const struct arpt_arp *arp) 351 static inline bool unconditional(const struct arpt_arp *arp)
345 { 352 {
346 static const struct arpt_arp uncond; 353 static const struct arpt_arp uncond;
347 354
348 return memcmp(arp, &uncond, sizeof(uncond)) == 0; 355 return memcmp(arp, &uncond, sizeof(uncond)) == 0;
349 } 356 }
350 357
351 /* Figures out from what hook each rule can be called: returns 0 if 358 /* Figures out from what hook each rule can be called: returns 0 if
352 * there are loops. Puts hook bitmask in comefrom. 359 * there are loops. Puts hook bitmask in comefrom.
353 */ 360 */
354 static int mark_source_chains(struct xt_table_info *newinfo, 361 static int mark_source_chains(struct xt_table_info *newinfo,
355 unsigned int valid_hooks, void *entry0) 362 unsigned int valid_hooks, void *entry0)
356 { 363 {
357 unsigned int hook; 364 unsigned int hook;
358 365
359 /* No recursion; use packet counter to save back ptrs (reset 366 /* No recursion; use packet counter to save back ptrs (reset
360 * to 0 as we leave), and comefrom to save source hook bitmask. 367 * to 0 as we leave), and comefrom to save source hook bitmask.
361 */ 368 */
362 for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { 369 for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) {
363 unsigned int pos = newinfo->hook_entry[hook]; 370 unsigned int pos = newinfo->hook_entry[hook];
364 struct arpt_entry *e 371 struct arpt_entry *e
365 = (struct arpt_entry *)(entry0 + pos); 372 = (struct arpt_entry *)(entry0 + pos);
366 373
367 if (!(valid_hooks & (1 << hook))) 374 if (!(valid_hooks & (1 << hook)))
368 continue; 375 continue;
369 376
370 /* Set initial back pointer. */ 377 /* Set initial back pointer. */
371 e->counters.pcnt = pos; 378 e->counters.pcnt = pos;
372 379
373 for (;;) { 380 for (;;) {
374 const struct arpt_standard_target *t 381 const struct arpt_standard_target *t
375 = (void *)arpt_get_target(e); 382 = (void *)arpt_get_target(e);
376 int visited = e->comefrom & (1 << hook); 383 int visited = e->comefrom & (1 << hook);
377 384
378 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { 385 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
379 printk("arptables: loop hook %u pos %u %08X.\n", 386 printk("arptables: loop hook %u pos %u %08X.\n",
380 hook, pos, e->comefrom); 387 hook, pos, e->comefrom);
381 return 0; 388 return 0;
382 } 389 }
383 e->comefrom 390 e->comefrom
384 |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); 391 |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
385 392
386 /* Unconditional return/END. */ 393 /* Unconditional return/END. */
387 if ((e->target_offset == sizeof(struct arpt_entry) && 394 if ((e->target_offset == sizeof(struct arpt_entry) &&
388 (strcmp(t->target.u.user.name, 395 (strcmp(t->target.u.user.name,
389 ARPT_STANDARD_TARGET) == 0) && 396 ARPT_STANDARD_TARGET) == 0) &&
390 t->verdict < 0 && unconditional(&e->arp)) || 397 t->verdict < 0 && unconditional(&e->arp)) ||
391 visited) { 398 visited) {
392 unsigned int oldpos, size; 399 unsigned int oldpos, size;
393 400
394 if ((strcmp(t->target.u.user.name, 401 if ((strcmp(t->target.u.user.name,
395 ARPT_STANDARD_TARGET) == 0) && 402 ARPT_STANDARD_TARGET) == 0) &&
396 t->verdict < -NF_MAX_VERDICT - 1) { 403 t->verdict < -NF_MAX_VERDICT - 1) {
397 duprintf("mark_source_chains: bad " 404 duprintf("mark_source_chains: bad "
398 "negative verdict (%i)\n", 405 "negative verdict (%i)\n",
399 t->verdict); 406 t->verdict);
400 return 0; 407 return 0;
401 } 408 }
402 409
403 /* Return: backtrack through the last 410 /* Return: backtrack through the last
404 * big jump. 411 * big jump.
405 */ 412 */
406 do { 413 do {
407 e->comefrom ^= (1<<NF_ARP_NUMHOOKS); 414 e->comefrom ^= (1<<NF_ARP_NUMHOOKS);
408 oldpos = pos; 415 oldpos = pos;
409 pos = e->counters.pcnt; 416 pos = e->counters.pcnt;
410 e->counters.pcnt = 0; 417 e->counters.pcnt = 0;
411 418
412 /* We're at the start. */ 419 /* We're at the start. */
413 if (pos == oldpos) 420 if (pos == oldpos)
414 goto next; 421 goto next;
415 422
416 e = (struct arpt_entry *) 423 e = (struct arpt_entry *)
417 (entry0 + pos); 424 (entry0 + pos);
418 } while (oldpos == pos + e->next_offset); 425 } while (oldpos == pos + e->next_offset);
419 426
420 /* Move along one */ 427 /* Move along one */
421 size = e->next_offset; 428 size = e->next_offset;
422 e = (struct arpt_entry *) 429 e = (struct arpt_entry *)
423 (entry0 + pos + size); 430 (entry0 + pos + size);
424 e->counters.pcnt = pos; 431 e->counters.pcnt = pos;
425 pos += size; 432 pos += size;
426 } else { 433 } else {
427 int newpos = t->verdict; 434 int newpos = t->verdict;
428 435
429 if (strcmp(t->target.u.user.name, 436 if (strcmp(t->target.u.user.name,
430 ARPT_STANDARD_TARGET) == 0 && 437 ARPT_STANDARD_TARGET) == 0 &&
431 newpos >= 0) { 438 newpos >= 0) {
432 if (newpos > newinfo->size - 439 if (newpos > newinfo->size -
433 sizeof(struct arpt_entry)) { 440 sizeof(struct arpt_entry)) {
434 duprintf("mark_source_chains: " 441 duprintf("mark_source_chains: "
435 "bad verdict (%i)\n", 442 "bad verdict (%i)\n",
436 newpos); 443 newpos);
437 return 0; 444 return 0;
438 } 445 }
439 446
440 /* This a jump; chase it. */ 447 /* This a jump; chase it. */
441 duprintf("Jump rule %u -> %u\n", 448 duprintf("Jump rule %u -> %u\n",
442 pos, newpos); 449 pos, newpos);
443 } else { 450 } else {
444 /* ... this is a fallthru */ 451 /* ... this is a fallthru */
445 newpos = pos + e->next_offset; 452 newpos = pos + e->next_offset;
446 } 453 }
447 e = (struct arpt_entry *) 454 e = (struct arpt_entry *)
448 (entry0 + newpos); 455 (entry0 + newpos);
449 e->counters.pcnt = pos; 456 e->counters.pcnt = pos;
450 pos = newpos; 457 pos = newpos;
451 } 458 }
452 } 459 }
453 next: 460 next:
454 duprintf("Finished chain %u\n", hook); 461 duprintf("Finished chain %u\n", hook);
455 } 462 }
456 return 1; 463 return 1;
457 } 464 }
458 465
459 static inline int check_entry(struct arpt_entry *e, const char *name) 466 static inline int check_entry(struct arpt_entry *e, const char *name)
460 { 467 {
461 const struct arpt_entry_target *t; 468 const struct arpt_entry_target *t;
462 469
463 if (!arp_checkentry(&e->arp)) { 470 if (!arp_checkentry(&e->arp)) {
464 duprintf("arp_tables: arp check failed %p %s.\n", e, name); 471 duprintf("arp_tables: arp check failed %p %s.\n", e, name);
465 return -EINVAL; 472 return -EINVAL;
466 } 473 }
467 474
468 if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset) 475 if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset)
469 return -EINVAL; 476 return -EINVAL;
470 477
471 t = arpt_get_target(e); 478 t = arpt_get_target(e);
472 if (e->target_offset + t->u.target_size > e->next_offset) 479 if (e->target_offset + t->u.target_size > e->next_offset)
473 return -EINVAL; 480 return -EINVAL;
474 481
475 return 0; 482 return 0;
476 } 483 }
477 484
478 static inline int check_target(struct arpt_entry *e, const char *name) 485 static inline int check_target(struct arpt_entry *e, const char *name)
479 { 486 {
480 struct arpt_entry_target *t = arpt_get_target(e); 487 struct arpt_entry_target *t = arpt_get_target(e);
481 int ret; 488 int ret;
482 struct xt_tgchk_param par = { 489 struct xt_tgchk_param par = {
483 .table = name, 490 .table = name,
484 .entryinfo = e, 491 .entryinfo = e,
485 .target = t->u.kernel.target, 492 .target = t->u.kernel.target,
486 .targinfo = t->data, 493 .targinfo = t->data,
487 .hook_mask = e->comefrom, 494 .hook_mask = e->comefrom,
488 .family = NFPROTO_ARP, 495 .family = NFPROTO_ARP,
489 }; 496 };
490 497
491 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); 498 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
492 if (ret < 0) { 499 if (ret < 0) {
493 duprintf("arp_tables: check failed for `%s'.\n", 500 duprintf("arp_tables: check failed for `%s'.\n",
494 t->u.kernel.target->name); 501 t->u.kernel.target->name);
495 return ret; 502 return ret;
496 } 503 }
497 return 0; 504 return 0;
498 } 505 }
499 506
500 static inline int 507 static inline int
501 find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, 508 find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
502 unsigned int *i) 509 unsigned int *i)
503 { 510 {
504 struct arpt_entry_target *t; 511 struct arpt_entry_target *t;
505 struct xt_target *target; 512 struct xt_target *target;
506 int ret; 513 int ret;
507 514
508 ret = check_entry(e, name); 515 ret = check_entry(e, name);
509 if (ret) 516 if (ret)
510 return ret; 517 return ret;
511 518
512 t = arpt_get_target(e); 519 t = arpt_get_target(e);
513 target = try_then_request_module(xt_find_target(NFPROTO_ARP, 520 target = try_then_request_module(xt_find_target(NFPROTO_ARP,
514 t->u.user.name, 521 t->u.user.name,
515 t->u.user.revision), 522 t->u.user.revision),
516 "arpt_%s", t->u.user.name); 523 "arpt_%s", t->u.user.name);
517 if (IS_ERR(target) || !target) { 524 if (IS_ERR(target) || !target) {
518 duprintf("find_check_entry: `%s' not found\n", t->u.user.name); 525 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
519 ret = target ? PTR_ERR(target) : -ENOENT; 526 ret = target ? PTR_ERR(target) : -ENOENT;
520 goto out; 527 goto out;
521 } 528 }
522 t->u.kernel.target = target; 529 t->u.kernel.target = target;
523 530
524 ret = check_target(e, name); 531 ret = check_target(e, name);
525 if (ret) 532 if (ret)
526 goto err; 533 goto err;
527 534
528 (*i)++; 535 (*i)++;
529 return 0; 536 return 0;
530 err: 537 err:
531 module_put(t->u.kernel.target->me); 538 module_put(t->u.kernel.target->me);
532 out: 539 out:
533 return ret; 540 return ret;
534 } 541 }
535 542
536 static bool check_underflow(struct arpt_entry *e) 543 static bool check_underflow(struct arpt_entry *e)
537 { 544 {
538 const struct arpt_entry_target *t; 545 const struct arpt_entry_target *t;
539 unsigned int verdict; 546 unsigned int verdict;
540 547
541 if (!unconditional(&e->arp)) 548 if (!unconditional(&e->arp))
542 return false; 549 return false;
543 t = arpt_get_target(e); 550 t = arpt_get_target(e);
544 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 551 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
545 return false; 552 return false;
546 verdict = ((struct arpt_standard_target *)t)->verdict; 553 verdict = ((struct arpt_standard_target *)t)->verdict;
547 verdict = -verdict - 1; 554 verdict = -verdict - 1;
548 return verdict == NF_DROP || verdict == NF_ACCEPT; 555 return verdict == NF_DROP || verdict == NF_ACCEPT;
549 } 556 }
550 557
551 static inline int check_entry_size_and_hooks(struct arpt_entry *e, 558 static inline int check_entry_size_and_hooks(struct arpt_entry *e,
552 struct xt_table_info *newinfo, 559 struct xt_table_info *newinfo,
553 unsigned char *base, 560 unsigned char *base,
554 unsigned char *limit, 561 unsigned char *limit,
555 const unsigned int *hook_entries, 562 const unsigned int *hook_entries,
556 const unsigned int *underflows, 563 const unsigned int *underflows,
557 unsigned int valid_hooks, 564 unsigned int valid_hooks,
558 unsigned int *i) 565 unsigned int *i)
559 { 566 {
560 unsigned int h; 567 unsigned int h;
561 568
562 if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || 569 if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
563 (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { 570 (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
564 duprintf("Bad offset %p\n", e); 571 duprintf("Bad offset %p\n", e);
565 return -EINVAL; 572 return -EINVAL;
566 } 573 }
567 574
568 if (e->next_offset 575 if (e->next_offset
569 < sizeof(struct arpt_entry) + sizeof(struct arpt_entry_target)) { 576 < sizeof(struct arpt_entry) + sizeof(struct arpt_entry_target)) {
570 duprintf("checking: element %p size %u\n", 577 duprintf("checking: element %p size %u\n",
571 e, e->next_offset); 578 e, e->next_offset);
572 return -EINVAL; 579 return -EINVAL;
573 } 580 }
574 581
575 /* Check hooks & underflows */ 582 /* Check hooks & underflows */
576 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 583 for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
577 if (!(valid_hooks & (1 << h))) 584 if (!(valid_hooks & (1 << h)))
578 continue; 585 continue;
579 if ((unsigned char *)e - base == hook_entries[h]) 586 if ((unsigned char *)e - base == hook_entries[h])
580 newinfo->hook_entry[h] = hook_entries[h]; 587 newinfo->hook_entry[h] = hook_entries[h];
581 if ((unsigned char *)e - base == underflows[h]) { 588 if ((unsigned char *)e - base == underflows[h]) {
582 if (!check_underflow(e)) { 589 if (!check_underflow(e)) {
583 pr_err("Underflows must be unconditional and " 590 pr_err("Underflows must be unconditional and "
584 "use the STANDARD target with " 591 "use the STANDARD target with "
585 "ACCEPT/DROP\n"); 592 "ACCEPT/DROP\n");
586 return -EINVAL; 593 return -EINVAL;
587 } 594 }
588 newinfo->underflow[h] = underflows[h]; 595 newinfo->underflow[h] = underflows[h];
589 } 596 }
590 } 597 }
591 598
592 /* Clear counters and comefrom */ 599 /* Clear counters and comefrom */
593 e->counters = ((struct xt_counters) { 0, 0 }); 600 e->counters = ((struct xt_counters) { 0, 0 });
594 e->comefrom = 0; 601 e->comefrom = 0;
595 602
596 (*i)++; 603 (*i)++;
597 return 0; 604 return 0;
598 } 605 }
599 606
600 static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i) 607 static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
601 { 608 {
602 struct xt_tgdtor_param par; 609 struct xt_tgdtor_param par;
603 struct arpt_entry_target *t; 610 struct arpt_entry_target *t;
604 611
605 if (i && (*i)-- == 0) 612 if (i && (*i)-- == 0)
606 return 1; 613 return 1;
607 614
608 t = arpt_get_target(e); 615 t = arpt_get_target(e);
609 par.target = t->u.kernel.target; 616 par.target = t->u.kernel.target;
610 par.targinfo = t->data; 617 par.targinfo = t->data;
611 par.family = NFPROTO_ARP; 618 par.family = NFPROTO_ARP;
612 if (par.target->destroy != NULL) 619 if (par.target->destroy != NULL)
613 par.target->destroy(&par); 620 par.target->destroy(&par);
614 module_put(par.target->me); 621 module_put(par.target->me);
615 return 0; 622 return 0;
616 } 623 }
617 624
618 /* Checks and translates the user-supplied table segment (held in 625 /* Checks and translates the user-supplied table segment (held in
619 * newinfo). 626 * newinfo).
620 */ 627 */
621 static int translate_table(const char *name, 628 static int translate_table(const char *name,
622 unsigned int valid_hooks, 629 unsigned int valid_hooks,
623 struct xt_table_info *newinfo, 630 struct xt_table_info *newinfo,
624 void *entry0, 631 void *entry0,
625 unsigned int size, 632 unsigned int size,
626 unsigned int number, 633 unsigned int number,
627 const unsigned int *hook_entries, 634 const unsigned int *hook_entries,
628 const unsigned int *underflows) 635 const unsigned int *underflows)
629 { 636 {
630 unsigned int i; 637 unsigned int i;
631 int ret; 638 int ret;
632 639
633 newinfo->size = size; 640 newinfo->size = size;
634 newinfo->number = number; 641 newinfo->number = number;
635 642
636 /* Init all hooks to impossible value. */ 643 /* Init all hooks to impossible value. */
637 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 644 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
638 newinfo->hook_entry[i] = 0xFFFFFFFF; 645 newinfo->hook_entry[i] = 0xFFFFFFFF;
639 newinfo->underflow[i] = 0xFFFFFFFF; 646 newinfo->underflow[i] = 0xFFFFFFFF;
640 } 647 }
641 648
642 duprintf("translate_table: size %u\n", newinfo->size); 649 duprintf("translate_table: size %u\n", newinfo->size);
643 i = 0; 650 i = 0;
644 651
645 /* Walk through entries, checking offsets. */ 652 /* Walk through entries, checking offsets. */
646 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, 653 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
647 check_entry_size_and_hooks, 654 check_entry_size_and_hooks,
648 newinfo, 655 newinfo,
649 entry0, 656 entry0,
650 entry0 + size, 657 entry0 + size,
651 hook_entries, underflows, valid_hooks, &i); 658 hook_entries, underflows, valid_hooks, &i);
652 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); 659 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
653 if (ret != 0) 660 if (ret != 0)
654 return ret; 661 return ret;
655 662
656 if (i != number) { 663 if (i != number) {
657 duprintf("translate_table: %u not %u entries\n", 664 duprintf("translate_table: %u not %u entries\n",
658 i, number); 665 i, number);
659 return -EINVAL; 666 return -EINVAL;
660 } 667 }
661 668
662 /* Check hooks all assigned */ 669 /* Check hooks all assigned */
663 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 670 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
664 /* Only hooks which are valid */ 671 /* Only hooks which are valid */
665 if (!(valid_hooks & (1 << i))) 672 if (!(valid_hooks & (1 << i)))
666 continue; 673 continue;
667 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 674 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
668 duprintf("Invalid hook entry %u %u\n", 675 duprintf("Invalid hook entry %u %u\n",
669 i, hook_entries[i]); 676 i, hook_entries[i]);
670 return -EINVAL; 677 return -EINVAL;
671 } 678 }
672 if (newinfo->underflow[i] == 0xFFFFFFFF) { 679 if (newinfo->underflow[i] == 0xFFFFFFFF) {
673 duprintf("Invalid underflow %u %u\n", 680 duprintf("Invalid underflow %u %u\n",
674 i, underflows[i]); 681 i, underflows[i]);
675 return -EINVAL; 682 return -EINVAL;
676 } 683 }
677 } 684 }
678 685
679 if (!mark_source_chains(newinfo, valid_hooks, entry0)) { 686 if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
680 duprintf("Looping hook\n"); 687 duprintf("Looping hook\n");
681 return -ELOOP; 688 return -ELOOP;
682 } 689 }
683 690
684 /* Finally, each sanity check must pass */ 691 /* Finally, each sanity check must pass */
685 i = 0; 692 i = 0;
686 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, 693 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
687 find_check_entry, name, size, &i); 694 find_check_entry, name, size, &i);
688 695
689 if (ret != 0) { 696 if (ret != 0) {
690 ARPT_ENTRY_ITERATE(entry0, newinfo->size, 697 ARPT_ENTRY_ITERATE(entry0, newinfo->size,
691 cleanup_entry, &i); 698 cleanup_entry, &i);
692 return ret; 699 return ret;
693 } 700 }
694 701
695 /* And one copy for every other CPU */ 702 /* And one copy for every other CPU */
696 for_each_possible_cpu(i) { 703 for_each_possible_cpu(i) {
697 if (newinfo->entries[i] && newinfo->entries[i] != entry0) 704 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
698 memcpy(newinfo->entries[i], entry0, newinfo->size); 705 memcpy(newinfo->entries[i], entry0, newinfo->size);
699 } 706 }
700 707
701 return ret; 708 return ret;
702 } 709 }
703 710
704 /* Gets counters. */ 711 /* Gets counters. */
705 static inline int add_entry_to_counter(const struct arpt_entry *e, 712 static inline int add_entry_to_counter(const struct arpt_entry *e,
706 struct xt_counters total[], 713 struct xt_counters total[],
707 unsigned int *i) 714 unsigned int *i)
708 { 715 {
709 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); 716 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
710 717
711 (*i)++; 718 (*i)++;
712 return 0; 719 return 0;
713 } 720 }
714 721
715 static inline int set_entry_to_counter(const struct arpt_entry *e, 722 static inline int set_entry_to_counter(const struct arpt_entry *e,
716 struct xt_counters total[], 723 struct xt_counters total[],
717 unsigned int *i) 724 unsigned int *i)
718 { 725 {
719 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); 726 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
720 727
721 (*i)++; 728 (*i)++;
722 return 0; 729 return 0;
723 } 730 }
724 731
725 static void get_counters(const struct xt_table_info *t, 732 static void get_counters(const struct xt_table_info *t,
726 struct xt_counters counters[]) 733 struct xt_counters counters[])
727 { 734 {
728 unsigned int cpu; 735 unsigned int cpu;
729 unsigned int i; 736 unsigned int i;
730 unsigned int curcpu; 737 unsigned int curcpu;
731 738
732 /* Instead of clearing (by a previous call to memset()) 739 /* Instead of clearing (by a previous call to memset())
733 * the counters and using adds, we set the counters 740 * the counters and using adds, we set the counters
734 * with data used by 'current' CPU 741 * with data used by 'current' CPU
735 * 742 *
736 * Bottom half has to be disabled to prevent deadlock 743 * Bottom half has to be disabled to prevent deadlock
737 * if new softirq were to run and call ipt_do_table 744 * if new softirq were to run and call ipt_do_table
738 */ 745 */
739 local_bh_disable(); 746 local_bh_disable();
740 curcpu = smp_processor_id(); 747 curcpu = smp_processor_id();
741 748
742 i = 0; 749 i = 0;
743 ARPT_ENTRY_ITERATE(t->entries[curcpu], 750 ARPT_ENTRY_ITERATE(t->entries[curcpu],
744 t->size, 751 t->size,
745 set_entry_to_counter, 752 set_entry_to_counter,
746 counters, 753 counters,
747 &i); 754 &i);
748 755
749 for_each_possible_cpu(cpu) { 756 for_each_possible_cpu(cpu) {
750 if (cpu == curcpu) 757 if (cpu == curcpu)
751 continue; 758 continue;
752 i = 0; 759 i = 0;
753 xt_info_wrlock(cpu); 760 xt_info_wrlock(cpu);
754 ARPT_ENTRY_ITERATE(t->entries[cpu], 761 ARPT_ENTRY_ITERATE(t->entries[cpu],
755 t->size, 762 t->size,
756 add_entry_to_counter, 763 add_entry_to_counter,
757 counters, 764 counters,
758 &i); 765 &i);
759 xt_info_wrunlock(cpu); 766 xt_info_wrunlock(cpu);
760 } 767 }
761 local_bh_enable(); 768 local_bh_enable();
762 } 769 }
763 770
764 static struct xt_counters *alloc_counters(struct xt_table *table) 771 static struct xt_counters *alloc_counters(struct xt_table *table)
765 { 772 {
766 unsigned int countersize; 773 unsigned int countersize;
767 struct xt_counters *counters; 774 struct xt_counters *counters;
768 struct xt_table_info *private = table->private; 775 struct xt_table_info *private = table->private;
769 776
770 /* We need atomic snapshot of counters: rest doesn't change 777 /* We need atomic snapshot of counters: rest doesn't change
771 * (other than comefrom, which userspace doesn't care 778 * (other than comefrom, which userspace doesn't care
772 * about). 779 * about).
773 */ 780 */
774 countersize = sizeof(struct xt_counters) * private->number; 781 countersize = sizeof(struct xt_counters) * private->number;
775 counters = vmalloc_node(countersize, numa_node_id()); 782 counters = vmalloc_node(countersize, numa_node_id());
776 783
777 if (counters == NULL) 784 if (counters == NULL)
778 return ERR_PTR(-ENOMEM); 785 return ERR_PTR(-ENOMEM);
779 786
780 get_counters(private, counters); 787 get_counters(private, counters);
781 788
782 return counters; 789 return counters;
783 } 790 }
784 791
785 static int copy_entries_to_user(unsigned int total_size, 792 static int copy_entries_to_user(unsigned int total_size,
786 struct xt_table *table, 793 struct xt_table *table,
787 void __user *userptr) 794 void __user *userptr)
788 { 795 {
789 unsigned int off, num; 796 unsigned int off, num;
790 struct arpt_entry *e; 797 struct arpt_entry *e;
791 struct xt_counters *counters; 798 struct xt_counters *counters;
792 struct xt_table_info *private = table->private; 799 struct xt_table_info *private = table->private;
793 int ret = 0; 800 int ret = 0;
794 void *loc_cpu_entry; 801 void *loc_cpu_entry;
795 802
796 counters = alloc_counters(table); 803 counters = alloc_counters(table);
797 if (IS_ERR(counters)) 804 if (IS_ERR(counters))
798 return PTR_ERR(counters); 805 return PTR_ERR(counters);
799 806
800 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 807 loc_cpu_entry = private->entries[raw_smp_processor_id()];
801 /* ... then copy entire thing ... */ 808 /* ... then copy entire thing ... */
802 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 809 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
803 ret = -EFAULT; 810 ret = -EFAULT;
804 goto free_counters; 811 goto free_counters;
805 } 812 }
806 813
807 /* FIXME: use iterator macros --RR */ 814 /* FIXME: use iterator macros --RR */
808 /* ... then go back and fix counters and names */ 815 /* ... then go back and fix counters and names */
809 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 816 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
810 struct arpt_entry_target *t; 817 struct arpt_entry_target *t;
811 818
812 e = (struct arpt_entry *)(loc_cpu_entry + off); 819 e = (struct arpt_entry *)(loc_cpu_entry + off);
813 if (copy_to_user(userptr + off 820 if (copy_to_user(userptr + off
814 + offsetof(struct arpt_entry, counters), 821 + offsetof(struct arpt_entry, counters),
815 &counters[num], 822 &counters[num],
816 sizeof(counters[num])) != 0) { 823 sizeof(counters[num])) != 0) {
817 ret = -EFAULT; 824 ret = -EFAULT;
818 goto free_counters; 825 goto free_counters;
819 } 826 }
820 827
821 t = arpt_get_target(e); 828 t = arpt_get_target(e);
822 if (copy_to_user(userptr + off + e->target_offset 829 if (copy_to_user(userptr + off + e->target_offset
823 + offsetof(struct arpt_entry_target, 830 + offsetof(struct arpt_entry_target,
824 u.user.name), 831 u.user.name),
825 t->u.kernel.target->name, 832 t->u.kernel.target->name,
826 strlen(t->u.kernel.target->name)+1) != 0) { 833 strlen(t->u.kernel.target->name)+1) != 0) {
827 ret = -EFAULT; 834 ret = -EFAULT;
828 goto free_counters; 835 goto free_counters;
829 } 836 }
830 } 837 }
831 838
832 free_counters: 839 free_counters:
833 vfree(counters); 840 vfree(counters);
834 return ret; 841 return ret;
835 } 842 }
836 843
837 #ifdef CONFIG_COMPAT 844 #ifdef CONFIG_COMPAT
838 static void compat_standard_from_user(void *dst, void *src) 845 static void compat_standard_from_user(void *dst, void *src)
839 { 846 {
840 int v = *(compat_int_t *)src; 847 int v = *(compat_int_t *)src;
841 848
842 if (v > 0) 849 if (v > 0)
843 v += xt_compat_calc_jump(NFPROTO_ARP, v); 850 v += xt_compat_calc_jump(NFPROTO_ARP, v);
844 memcpy(dst, &v, sizeof(v)); 851 memcpy(dst, &v, sizeof(v));
845 } 852 }
846 853
847 static int compat_standard_to_user(void __user *dst, void *src) 854 static int compat_standard_to_user(void __user *dst, void *src)
848 { 855 {
849 compat_int_t cv = *(int *)src; 856 compat_int_t cv = *(int *)src;
850 857
851 if (cv > 0) 858 if (cv > 0)
852 cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); 859 cv -= xt_compat_calc_jump(NFPROTO_ARP, cv);
853 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 860 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
854 } 861 }
855 862
856 static int compat_calc_entry(struct arpt_entry *e, 863 static int compat_calc_entry(struct arpt_entry *e,
857 const struct xt_table_info *info, 864 const struct xt_table_info *info,
858 void *base, struct xt_table_info *newinfo) 865 void *base, struct xt_table_info *newinfo)
859 { 866 {
860 struct arpt_entry_target *t; 867 struct arpt_entry_target *t;
861 unsigned int entry_offset; 868 unsigned int entry_offset;
862 int off, i, ret; 869 int off, i, ret;
863 870
864 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 871 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
865 entry_offset = (void *)e - base; 872 entry_offset = (void *)e - base;
866 873
867 t = arpt_get_target(e); 874 t = arpt_get_target(e);
868 off += xt_compat_target_offset(t->u.kernel.target); 875 off += xt_compat_target_offset(t->u.kernel.target);
869 newinfo->size -= off; 876 newinfo->size -= off;
870 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 877 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
871 if (ret) 878 if (ret)
872 return ret; 879 return ret;
873 880
874 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 881 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
875 if (info->hook_entry[i] && 882 if (info->hook_entry[i] &&
876 (e < (struct arpt_entry *)(base + info->hook_entry[i]))) 883 (e < (struct arpt_entry *)(base + info->hook_entry[i])))
877 newinfo->hook_entry[i] -= off; 884 newinfo->hook_entry[i] -= off;
878 if (info->underflow[i] && 885 if (info->underflow[i] &&
879 (e < (struct arpt_entry *)(base + info->underflow[i]))) 886 (e < (struct arpt_entry *)(base + info->underflow[i])))
880 newinfo->underflow[i] -= off; 887 newinfo->underflow[i] -= off;
881 } 888 }
882 return 0; 889 return 0;
883 } 890 }
884 891
885 static int compat_table_info(const struct xt_table_info *info, 892 static int compat_table_info(const struct xt_table_info *info,
886 struct xt_table_info *newinfo) 893 struct xt_table_info *newinfo)
887 { 894 {
888 void *loc_cpu_entry; 895 void *loc_cpu_entry;
889 896
890 if (!newinfo || !info) 897 if (!newinfo || !info)
891 return -EINVAL; 898 return -EINVAL;
892 899
893 /* we dont care about newinfo->entries[] */ 900 /* we dont care about newinfo->entries[] */
894 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 901 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
895 newinfo->initial_entries = 0; 902 newinfo->initial_entries = 0;
896 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 903 loc_cpu_entry = info->entries[raw_smp_processor_id()];
897 return ARPT_ENTRY_ITERATE(loc_cpu_entry, info->size, 904 return ARPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
898 compat_calc_entry, info, loc_cpu_entry, 905 compat_calc_entry, info, loc_cpu_entry,
899 newinfo); 906 newinfo);
900 } 907 }
901 #endif 908 #endif
902 909
903 static int get_info(struct net *net, void __user *user, int *len, int compat) 910 static int get_info(struct net *net, void __user *user, int *len, int compat)
904 { 911 {
905 char name[ARPT_TABLE_MAXNAMELEN]; 912 char name[ARPT_TABLE_MAXNAMELEN];
906 struct xt_table *t; 913 struct xt_table *t;
907 int ret; 914 int ret;
908 915
909 if (*len != sizeof(struct arpt_getinfo)) { 916 if (*len != sizeof(struct arpt_getinfo)) {
910 duprintf("length %u != %Zu\n", *len, 917 duprintf("length %u != %Zu\n", *len,
911 sizeof(struct arpt_getinfo)); 918 sizeof(struct arpt_getinfo));
912 return -EINVAL; 919 return -EINVAL;
913 } 920 }
914 921
915 if (copy_from_user(name, user, sizeof(name)) != 0) 922 if (copy_from_user(name, user, sizeof(name)) != 0)
916 return -EFAULT; 923 return -EFAULT;
917 924
918 name[ARPT_TABLE_MAXNAMELEN-1] = '\0'; 925 name[ARPT_TABLE_MAXNAMELEN-1] = '\0';
919 #ifdef CONFIG_COMPAT 926 #ifdef CONFIG_COMPAT
920 if (compat) 927 if (compat)
921 xt_compat_lock(NFPROTO_ARP); 928 xt_compat_lock(NFPROTO_ARP);
922 #endif 929 #endif
923 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), 930 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
924 "arptable_%s", name); 931 "arptable_%s", name);
925 if (t && !IS_ERR(t)) { 932 if (t && !IS_ERR(t)) {
926 struct arpt_getinfo info; 933 struct arpt_getinfo info;
927 const struct xt_table_info *private = t->private; 934 const struct xt_table_info *private = t->private;
928 #ifdef CONFIG_COMPAT 935 #ifdef CONFIG_COMPAT
929 struct xt_table_info tmp; 936 struct xt_table_info tmp;
930 937
931 if (compat) { 938 if (compat) {
932 ret = compat_table_info(private, &tmp); 939 ret = compat_table_info(private, &tmp);
933 xt_compat_flush_offsets(NFPROTO_ARP); 940 xt_compat_flush_offsets(NFPROTO_ARP);
934 private = &tmp; 941 private = &tmp;
935 } 942 }
936 #endif 943 #endif
937 info.valid_hooks = t->valid_hooks; 944 info.valid_hooks = t->valid_hooks;
938 memcpy(info.hook_entry, private->hook_entry, 945 memcpy(info.hook_entry, private->hook_entry,
939 sizeof(info.hook_entry)); 946 sizeof(info.hook_entry));
940 memcpy(info.underflow, private->underflow, 947 memcpy(info.underflow, private->underflow,
941 sizeof(info.underflow)); 948 sizeof(info.underflow));
942 info.num_entries = private->number; 949 info.num_entries = private->number;
943 info.size = private->size; 950 info.size = private->size;
944 strcpy(info.name, name); 951 strcpy(info.name, name);
945 952
946 if (copy_to_user(user, &info, *len) != 0) 953 if (copy_to_user(user, &info, *len) != 0)
947 ret = -EFAULT; 954 ret = -EFAULT;
948 else 955 else
949 ret = 0; 956 ret = 0;
950 xt_table_unlock(t); 957 xt_table_unlock(t);
951 module_put(t->me); 958 module_put(t->me);
952 } else 959 } else
953 ret = t ? PTR_ERR(t) : -ENOENT; 960 ret = t ? PTR_ERR(t) : -ENOENT;
954 #ifdef CONFIG_COMPAT 961 #ifdef CONFIG_COMPAT
955 if (compat) 962 if (compat)
956 xt_compat_unlock(NFPROTO_ARP); 963 xt_compat_unlock(NFPROTO_ARP);
957 #endif 964 #endif
958 return ret; 965 return ret;
959 } 966 }
960 967
961 static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, 968 static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
962 int *len) 969 int *len)
963 { 970 {
964 int ret; 971 int ret;
965 struct arpt_get_entries get; 972 struct arpt_get_entries get;
966 struct xt_table *t; 973 struct xt_table *t;
967 974
968 if (*len < sizeof(get)) { 975 if (*len < sizeof(get)) {
969 duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); 976 duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));
970 return -EINVAL; 977 return -EINVAL;
971 } 978 }
972 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 979 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
973 return -EFAULT; 980 return -EFAULT;
974 if (*len != sizeof(struct arpt_get_entries) + get.size) { 981 if (*len != sizeof(struct arpt_get_entries) + get.size) {
975 duprintf("get_entries: %u != %Zu\n", *len, 982 duprintf("get_entries: %u != %Zu\n", *len,
976 sizeof(struct arpt_get_entries) + get.size); 983 sizeof(struct arpt_get_entries) + get.size);
977 return -EINVAL; 984 return -EINVAL;
978 } 985 }
979 986
980 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 987 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
981 if (t && !IS_ERR(t)) { 988 if (t && !IS_ERR(t)) {
982 const struct xt_table_info *private = t->private; 989 const struct xt_table_info *private = t->private;
983 990
984 duprintf("t->private->number = %u\n", 991 duprintf("t->private->number = %u\n",
985 private->number); 992 private->number);
986 if (get.size == private->size) 993 if (get.size == private->size)
987 ret = copy_entries_to_user(private->size, 994 ret = copy_entries_to_user(private->size,
988 t, uptr->entrytable); 995 t, uptr->entrytable);
989 else { 996 else {
990 duprintf("get_entries: I've got %u not %u!\n", 997 duprintf("get_entries: I've got %u not %u!\n",
991 private->size, get.size); 998 private->size, get.size);
992 ret = -EAGAIN; 999 ret = -EAGAIN;
993 } 1000 }
994 module_put(t->me); 1001 module_put(t->me);
995 xt_table_unlock(t); 1002 xt_table_unlock(t);
996 } else 1003 } else
997 ret = t ? PTR_ERR(t) : -ENOENT; 1004 ret = t ? PTR_ERR(t) : -ENOENT;
998 1005
999 return ret; 1006 return ret;
1000 } 1007 }
1001 1008
1002 static int __do_replace(struct net *net, const char *name, 1009 static int __do_replace(struct net *net, const char *name,
1003 unsigned int valid_hooks, 1010 unsigned int valid_hooks,
1004 struct xt_table_info *newinfo, 1011 struct xt_table_info *newinfo,
1005 unsigned int num_counters, 1012 unsigned int num_counters,
1006 void __user *counters_ptr) 1013 void __user *counters_ptr)
1007 { 1014 {
1008 int ret; 1015 int ret;
1009 struct xt_table *t; 1016 struct xt_table *t;
1010 struct xt_table_info *oldinfo; 1017 struct xt_table_info *oldinfo;
1011 struct xt_counters *counters; 1018 struct xt_counters *counters;
1012 void *loc_cpu_old_entry; 1019 void *loc_cpu_old_entry;
1013 1020
1014 ret = 0; 1021 ret = 0;
1015 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1022 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1016 numa_node_id()); 1023 numa_node_id());
1017 if (!counters) { 1024 if (!counters) {
1018 ret = -ENOMEM; 1025 ret = -ENOMEM;
1019 goto out; 1026 goto out;
1020 } 1027 }
1021 1028
1022 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), 1029 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
1023 "arptable_%s", name); 1030 "arptable_%s", name);
1024 if (!t || IS_ERR(t)) { 1031 if (!t || IS_ERR(t)) {
1025 ret = t ? PTR_ERR(t) : -ENOENT; 1032 ret = t ? PTR_ERR(t) : -ENOENT;
1026 goto free_newinfo_counters_untrans; 1033 goto free_newinfo_counters_untrans;
1027 } 1034 }
1028 1035
1029 /* You lied! */ 1036 /* You lied! */
1030 if (valid_hooks != t->valid_hooks) { 1037 if (valid_hooks != t->valid_hooks) {
1031 duprintf("Valid hook crap: %08X vs %08X\n", 1038 duprintf("Valid hook crap: %08X vs %08X\n",
1032 valid_hooks, t->valid_hooks); 1039 valid_hooks, t->valid_hooks);
1033 ret = -EINVAL; 1040 ret = -EINVAL;
1034 goto put_module; 1041 goto put_module;
1035 } 1042 }
1036 1043
1037 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); 1044 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1038 if (!oldinfo) 1045 if (!oldinfo)
1039 goto put_module; 1046 goto put_module;
1040 1047
1041 /* Update module usage count based on number of rules */ 1048 /* Update module usage count based on number of rules */
1042 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 1049 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1043 oldinfo->number, oldinfo->initial_entries, newinfo->number); 1050 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1044 if ((oldinfo->number > oldinfo->initial_entries) || 1051 if ((oldinfo->number > oldinfo->initial_entries) ||
1045 (newinfo->number <= oldinfo->initial_entries)) 1052 (newinfo->number <= oldinfo->initial_entries))
1046 module_put(t->me); 1053 module_put(t->me);
1047 if ((oldinfo->number > oldinfo->initial_entries) && 1054 if ((oldinfo->number > oldinfo->initial_entries) &&
1048 (newinfo->number <= oldinfo->initial_entries)) 1055 (newinfo->number <= oldinfo->initial_entries))
1049 module_put(t->me); 1056 module_put(t->me);
1050 1057
1051 /* Get the old counters, and synchronize with replace */ 1058 /* Get the old counters, and synchronize with replace */
1052 get_counters(oldinfo, counters); 1059 get_counters(oldinfo, counters);
1053 1060
1054 /* Decrease module usage counts and free resource */ 1061 /* Decrease module usage counts and free resource */
1055 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1062 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1056 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1063 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1057 NULL); 1064 NULL);
1058 1065
1059 xt_free_table_info(oldinfo); 1066 xt_free_table_info(oldinfo);
1060 if (copy_to_user(counters_ptr, counters, 1067 if (copy_to_user(counters_ptr, counters,
1061 sizeof(struct xt_counters) * num_counters) != 0) 1068 sizeof(struct xt_counters) * num_counters) != 0)
1062 ret = -EFAULT; 1069 ret = -EFAULT;
1063 vfree(counters); 1070 vfree(counters);
1064 xt_table_unlock(t); 1071 xt_table_unlock(t);
1065 return ret; 1072 return ret;
1066 1073
1067 put_module: 1074 put_module:
1068 module_put(t->me); 1075 module_put(t->me);
1069 xt_table_unlock(t); 1076 xt_table_unlock(t);
1070 free_newinfo_counters_untrans: 1077 free_newinfo_counters_untrans:
1071 vfree(counters); 1078 vfree(counters);
1072 out: 1079 out:
1073 return ret; 1080 return ret;
1074 } 1081 }
1075 1082
1076 static int do_replace(struct net *net, void __user *user, unsigned int len) 1083 static int do_replace(struct net *net, void __user *user, unsigned int len)
1077 { 1084 {
1078 int ret; 1085 int ret;
1079 struct arpt_replace tmp; 1086 struct arpt_replace tmp;
1080 struct xt_table_info *newinfo; 1087 struct xt_table_info *newinfo;
1081 void *loc_cpu_entry; 1088 void *loc_cpu_entry;
1082 1089
1083 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1090 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1084 return -EFAULT; 1091 return -EFAULT;
1085 1092
1086 /* overflow check */ 1093 /* overflow check */
1087 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1094 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1088 return -ENOMEM; 1095 return -ENOMEM;
1089 1096
1090 newinfo = xt_alloc_table_info(tmp.size); 1097 newinfo = xt_alloc_table_info(tmp.size);
1091 if (!newinfo) 1098 if (!newinfo)
1092 return -ENOMEM; 1099 return -ENOMEM;
1093 1100
1094 /* choose the copy that is on our node/cpu */ 1101 /* choose the copy that is on our node/cpu */
1095 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1102 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1096 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), 1103 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1097 tmp.size) != 0) { 1104 tmp.size) != 0) {
1098 ret = -EFAULT; 1105 ret = -EFAULT;
1099 goto free_newinfo; 1106 goto free_newinfo;
1100 } 1107 }
1101 1108
1102 ret = translate_table(tmp.name, tmp.valid_hooks, 1109 ret = translate_table(tmp.name, tmp.valid_hooks,
1103 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, 1110 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1104 tmp.hook_entry, tmp.underflow); 1111 tmp.hook_entry, tmp.underflow);
1105 if (ret != 0) 1112 if (ret != 0)
1106 goto free_newinfo; 1113 goto free_newinfo;
1107 1114
1108 duprintf("arp_tables: Translated table\n"); 1115 duprintf("arp_tables: Translated table\n");
1109 1116
1110 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1117 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1111 tmp.num_counters, tmp.counters); 1118 tmp.num_counters, tmp.counters);
1112 if (ret) 1119 if (ret)
1113 goto free_newinfo_untrans; 1120 goto free_newinfo_untrans;
1114 return 0; 1121 return 0;
1115 1122
1116 free_newinfo_untrans: 1123 free_newinfo_untrans:
1117 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1124 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1118 free_newinfo: 1125 free_newinfo:
1119 xt_free_table_info(newinfo); 1126 xt_free_table_info(newinfo);
1120 return ret; 1127 return ret;
1121 } 1128 }
1122 1129
1123 /* We're lazy, and add to the first CPU; overflow works its fey magic 1130 /* We're lazy, and add to the first CPU; overflow works its fey magic
1124 * and everything is OK. */ 1131 * and everything is OK. */
1125 static int 1132 static int
1126 add_counter_to_entry(struct arpt_entry *e, 1133 add_counter_to_entry(struct arpt_entry *e,
1127 const struct xt_counters addme[], 1134 const struct xt_counters addme[],
1128 unsigned int *i) 1135 unsigned int *i)
1129 { 1136 {
1130 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 1137 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1131 1138
1132 (*i)++; 1139 (*i)++;
1133 return 0; 1140 return 0;
1134 } 1141 }
1135 1142
1136 static int do_add_counters(struct net *net, void __user *user, unsigned int len, 1143 static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1137 int compat) 1144 int compat)
1138 { 1145 {
1139 unsigned int i, curcpu; 1146 unsigned int i, curcpu;
1140 struct xt_counters_info tmp; 1147 struct xt_counters_info tmp;
1141 struct xt_counters *paddc; 1148 struct xt_counters *paddc;
1142 unsigned int num_counters; 1149 unsigned int num_counters;
1143 const char *name; 1150 const char *name;
1144 int size; 1151 int size;
1145 void *ptmp; 1152 void *ptmp;
1146 struct xt_table *t; 1153 struct xt_table *t;
1147 const struct xt_table_info *private; 1154 const struct xt_table_info *private;
1148 int ret = 0; 1155 int ret = 0;
1149 void *loc_cpu_entry; 1156 void *loc_cpu_entry;
1150 #ifdef CONFIG_COMPAT 1157 #ifdef CONFIG_COMPAT
1151 struct compat_xt_counters_info compat_tmp; 1158 struct compat_xt_counters_info compat_tmp;
1152 1159
1153 if (compat) { 1160 if (compat) {
1154 ptmp = &compat_tmp; 1161 ptmp = &compat_tmp;
1155 size = sizeof(struct compat_xt_counters_info); 1162 size = sizeof(struct compat_xt_counters_info);
1156 } else 1163 } else
1157 #endif 1164 #endif
1158 { 1165 {
1159 ptmp = &tmp; 1166 ptmp = &tmp;
1160 size = sizeof(struct xt_counters_info); 1167 size = sizeof(struct xt_counters_info);
1161 } 1168 }
1162 1169
1163 if (copy_from_user(ptmp, user, size) != 0) 1170 if (copy_from_user(ptmp, user, size) != 0)
1164 return -EFAULT; 1171 return -EFAULT;
1165 1172
1166 #ifdef CONFIG_COMPAT 1173 #ifdef CONFIG_COMPAT
1167 if (compat) { 1174 if (compat) {
1168 num_counters = compat_tmp.num_counters; 1175 num_counters = compat_tmp.num_counters;
1169 name = compat_tmp.name; 1176 name = compat_tmp.name;
1170 } else 1177 } else
1171 #endif 1178 #endif
1172 { 1179 {
1173 num_counters = tmp.num_counters; 1180 num_counters = tmp.num_counters;
1174 name = tmp.name; 1181 name = tmp.name;
1175 } 1182 }
1176 1183
1177 if (len != size + num_counters * sizeof(struct xt_counters)) 1184 if (len != size + num_counters * sizeof(struct xt_counters))
1178 return -EINVAL; 1185 return -EINVAL;
1179 1186
1180 paddc = vmalloc_node(len - size, numa_node_id()); 1187 paddc = vmalloc_node(len - size, numa_node_id());
1181 if (!paddc) 1188 if (!paddc)
1182 return -ENOMEM; 1189 return -ENOMEM;
1183 1190
1184 if (copy_from_user(paddc, user + size, len - size) != 0) { 1191 if (copy_from_user(paddc, user + size, len - size) != 0) {
1185 ret = -EFAULT; 1192 ret = -EFAULT;
1186 goto free; 1193 goto free;
1187 } 1194 }
1188 1195
1189 t = xt_find_table_lock(net, NFPROTO_ARP, name); 1196 t = xt_find_table_lock(net, NFPROTO_ARP, name);
1190 if (!t || IS_ERR(t)) { 1197 if (!t || IS_ERR(t)) {
1191 ret = t ? PTR_ERR(t) : -ENOENT; 1198 ret = t ? PTR_ERR(t) : -ENOENT;
1192 goto free; 1199 goto free;
1193 } 1200 }
1194 1201
1195 local_bh_disable(); 1202 local_bh_disable();
1196 private = t->private; 1203 private = t->private;
1197 if (private->number != num_counters) { 1204 if (private->number != num_counters) {
1198 ret = -EINVAL; 1205 ret = -EINVAL;
1199 goto unlock_up_free; 1206 goto unlock_up_free;
1200 } 1207 }
1201 1208
1202 i = 0; 1209 i = 0;
1203 /* Choose the copy that is on our node */ 1210 /* Choose the copy that is on our node */
1204 curcpu = smp_processor_id(); 1211 curcpu = smp_processor_id();
1205 loc_cpu_entry = private->entries[curcpu]; 1212 loc_cpu_entry = private->entries[curcpu];
1206 xt_info_wrlock(curcpu); 1213 xt_info_wrlock(curcpu);
1207 ARPT_ENTRY_ITERATE(loc_cpu_entry, 1214 ARPT_ENTRY_ITERATE(loc_cpu_entry,
1208 private->size, 1215 private->size,
1209 add_counter_to_entry, 1216 add_counter_to_entry,
1210 paddc, 1217 paddc,
1211 &i); 1218 &i);
1212 xt_info_wrunlock(curcpu); 1219 xt_info_wrunlock(curcpu);
1213 unlock_up_free: 1220 unlock_up_free:
1214 local_bh_enable(); 1221 local_bh_enable();
1215 xt_table_unlock(t); 1222 xt_table_unlock(t);
1216 module_put(t->me); 1223 module_put(t->me);
1217 free: 1224 free:
1218 vfree(paddc); 1225 vfree(paddc);
1219 1226
1220 return ret; 1227 return ret;
1221 } 1228 }
1222 1229
1223 #ifdef CONFIG_COMPAT 1230 #ifdef CONFIG_COMPAT
1224 static inline int 1231 static inline int
1225 compat_release_entry(struct compat_arpt_entry *e, unsigned int *i) 1232 compat_release_entry(struct compat_arpt_entry *e, unsigned int *i)
1226 { 1233 {
1227 struct arpt_entry_target *t; 1234 struct arpt_entry_target *t;
1228 1235
1229 if (i && (*i)-- == 0) 1236 if (i && (*i)-- == 0)
1230 return 1; 1237 return 1;
1231 1238
1232 t = compat_arpt_get_target(e); 1239 t = compat_arpt_get_target(e);
1233 module_put(t->u.kernel.target->me); 1240 module_put(t->u.kernel.target->me);
1234 return 0; 1241 return 0;
1235 } 1242 }
1236 1243
1237 static inline int 1244 static inline int
1238 check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, 1245 check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1239 struct xt_table_info *newinfo, 1246 struct xt_table_info *newinfo,
1240 unsigned int *size, 1247 unsigned int *size,
1241 unsigned char *base, 1248 unsigned char *base,
1242 unsigned char *limit, 1249 unsigned char *limit,
1243 unsigned int *hook_entries, 1250 unsigned int *hook_entries,
1244 unsigned int *underflows, 1251 unsigned int *underflows,
1245 unsigned int *i, 1252 unsigned int *i,
1246 const char *name) 1253 const char *name)
1247 { 1254 {
1248 struct arpt_entry_target *t; 1255 struct arpt_entry_target *t;
1249 struct xt_target *target; 1256 struct xt_target *target;
1250 unsigned int entry_offset; 1257 unsigned int entry_offset;
1251 int ret, off, h; 1258 int ret, off, h;
1252 1259
1253 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1260 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1254 if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || 1261 if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
1255 (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { 1262 (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
1256 duprintf("Bad offset %p, limit = %p\n", e, limit); 1263 duprintf("Bad offset %p, limit = %p\n", e, limit);
1257 return -EINVAL; 1264 return -EINVAL;
1258 } 1265 }
1259 1266
1260 if (e->next_offset < sizeof(struct compat_arpt_entry) + 1267 if (e->next_offset < sizeof(struct compat_arpt_entry) +
1261 sizeof(struct compat_xt_entry_target)) { 1268 sizeof(struct compat_xt_entry_target)) {
1262 duprintf("checking: element %p size %u\n", 1269 duprintf("checking: element %p size %u\n",
1263 e, e->next_offset); 1270 e, e->next_offset);
1264 return -EINVAL; 1271 return -EINVAL;
1265 } 1272 }
1266 1273
1267 /* For purposes of check_entry casting the compat entry is fine */ 1274 /* For purposes of check_entry casting the compat entry is fine */
1268 ret = check_entry((struct arpt_entry *)e, name); 1275 ret = check_entry((struct arpt_entry *)e, name);
1269 if (ret) 1276 if (ret)
1270 return ret; 1277 return ret;
1271 1278
1272 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1279 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1273 entry_offset = (void *)e - (void *)base; 1280 entry_offset = (void *)e - (void *)base;
1274 1281
1275 t = compat_arpt_get_target(e); 1282 t = compat_arpt_get_target(e);
1276 target = try_then_request_module(xt_find_target(NFPROTO_ARP, 1283 target = try_then_request_module(xt_find_target(NFPROTO_ARP,
1277 t->u.user.name, 1284 t->u.user.name,
1278 t->u.user.revision), 1285 t->u.user.revision),
1279 "arpt_%s", t->u.user.name); 1286 "arpt_%s", t->u.user.name);
1280 if (IS_ERR(target) || !target) { 1287 if (IS_ERR(target) || !target) {
1281 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", 1288 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1282 t->u.user.name); 1289 t->u.user.name);
1283 ret = target ? PTR_ERR(target) : -ENOENT; 1290 ret = target ? PTR_ERR(target) : -ENOENT;
1284 goto out; 1291 goto out;
1285 } 1292 }
1286 t->u.kernel.target = target; 1293 t->u.kernel.target = target;
1287 1294
1288 off += xt_compat_target_offset(target); 1295 off += xt_compat_target_offset(target);
1289 *size += off; 1296 *size += off;
1290 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 1297 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
1291 if (ret) 1298 if (ret)
1292 goto release_target; 1299 goto release_target;
1293 1300
1294 /* Check hooks & underflows */ 1301 /* Check hooks & underflows */
1295 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 1302 for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
1296 if ((unsigned char *)e - base == hook_entries[h]) 1303 if ((unsigned char *)e - base == hook_entries[h])
1297 newinfo->hook_entry[h] = hook_entries[h]; 1304 newinfo->hook_entry[h] = hook_entries[h];
1298 if ((unsigned char *)e - base == underflows[h]) 1305 if ((unsigned char *)e - base == underflows[h])
1299 newinfo->underflow[h] = underflows[h]; 1306 newinfo->underflow[h] = underflows[h];
1300 } 1307 }
1301 1308
1302 /* Clear counters and comefrom */ 1309 /* Clear counters and comefrom */
1303 memset(&e->counters, 0, sizeof(e->counters)); 1310 memset(&e->counters, 0, sizeof(e->counters));
1304 e->comefrom = 0; 1311 e->comefrom = 0;
1305 1312
1306 (*i)++; 1313 (*i)++;
1307 return 0; 1314 return 0;
1308 1315
1309 release_target: 1316 release_target:
1310 module_put(t->u.kernel.target->me); 1317 module_put(t->u.kernel.target->me);
1311 out: 1318 out:
1312 return ret; 1319 return ret;
1313 } 1320 }
1314 1321
1315 static int 1322 static int
1316 compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, 1323 compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
1317 unsigned int *size, const char *name, 1324 unsigned int *size, const char *name,
1318 struct xt_table_info *newinfo, unsigned char *base) 1325 struct xt_table_info *newinfo, unsigned char *base)
1319 { 1326 {
1320 struct arpt_entry_target *t; 1327 struct arpt_entry_target *t;
1321 struct xt_target *target; 1328 struct xt_target *target;
1322 struct arpt_entry *de; 1329 struct arpt_entry *de;
1323 unsigned int origsize; 1330 unsigned int origsize;
1324 int ret, h; 1331 int ret, h;
1325 1332
1326 ret = 0; 1333 ret = 0;
1327 origsize = *size; 1334 origsize = *size;
1328 de = (struct arpt_entry *)*dstptr; 1335 de = (struct arpt_entry *)*dstptr;
1329 memcpy(de, e, sizeof(struct arpt_entry)); 1336 memcpy(de, e, sizeof(struct arpt_entry));
1330 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1337 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1331 1338
1332 *dstptr += sizeof(struct arpt_entry); 1339 *dstptr += sizeof(struct arpt_entry);
1333 *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1340 *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1334 1341
1335 de->target_offset = e->target_offset - (origsize - *size); 1342 de->target_offset = e->target_offset - (origsize - *size);
1336 t = compat_arpt_get_target(e); 1343 t = compat_arpt_get_target(e);
1337 target = t->u.kernel.target; 1344 target = t->u.kernel.target;
1338 xt_compat_target_from_user(t, dstptr, size); 1345 xt_compat_target_from_user(t, dstptr, size);
1339 1346
1340 de->next_offset = e->next_offset - (origsize - *size); 1347 de->next_offset = e->next_offset - (origsize - *size);
1341 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 1348 for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
1342 if ((unsigned char *)de - base < newinfo->hook_entry[h]) 1349 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1343 newinfo->hook_entry[h] -= origsize - *size; 1350 newinfo->hook_entry[h] -= origsize - *size;
1344 if ((unsigned char *)de - base < newinfo->underflow[h]) 1351 if ((unsigned char *)de - base < newinfo->underflow[h])
1345 newinfo->underflow[h] -= origsize - *size; 1352 newinfo->underflow[h] -= origsize - *size;
1346 } 1353 }
1347 return ret; 1354 return ret;
1348 } 1355 }
1349 1356
1350 static inline int compat_check_entry(struct arpt_entry *e, const char *name, 1357 static inline int compat_check_entry(struct arpt_entry *e, const char *name,
1351 unsigned int *i) 1358 unsigned int *i)
1352 { 1359 {
1353 int ret; 1360 int ret;
1354 1361
1355 ret = check_target(e, name); 1362 ret = check_target(e, name);
1356 if (ret) 1363 if (ret)
1357 return ret; 1364 return ret;
1358 1365
1359 (*i)++; 1366 (*i)++;
1360 return 0; 1367 return 0;
1361 } 1368 }
1362 1369
1363 static int translate_compat_table(const char *name, 1370 static int translate_compat_table(const char *name,
1364 unsigned int valid_hooks, 1371 unsigned int valid_hooks,
1365 struct xt_table_info **pinfo, 1372 struct xt_table_info **pinfo,
1366 void **pentry0, 1373 void **pentry0,
1367 unsigned int total_size, 1374 unsigned int total_size,
1368 unsigned int number, 1375 unsigned int number,
1369 unsigned int *hook_entries, 1376 unsigned int *hook_entries,
1370 unsigned int *underflows) 1377 unsigned int *underflows)
1371 { 1378 {
1372 unsigned int i, j; 1379 unsigned int i, j;
1373 struct xt_table_info *newinfo, *info; 1380 struct xt_table_info *newinfo, *info;
1374 void *pos, *entry0, *entry1; 1381 void *pos, *entry0, *entry1;
1375 unsigned int size; 1382 unsigned int size;
1376 int ret; 1383 int ret;
1377 1384
1378 info = *pinfo; 1385 info = *pinfo;
1379 entry0 = *pentry0; 1386 entry0 = *pentry0;
1380 size = total_size; 1387 size = total_size;
1381 info->number = number; 1388 info->number = number;
1382 1389
1383 /* Init all hooks to impossible value. */ 1390 /* Init all hooks to impossible value. */
1384 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1391 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1385 info->hook_entry[i] = 0xFFFFFFFF; 1392 info->hook_entry[i] = 0xFFFFFFFF;
1386 info->underflow[i] = 0xFFFFFFFF; 1393 info->underflow[i] = 0xFFFFFFFF;
1387 } 1394 }
1388 1395
1389 duprintf("translate_compat_table: size %u\n", info->size); 1396 duprintf("translate_compat_table: size %u\n", info->size);
1390 j = 0; 1397 j = 0;
1391 xt_compat_lock(NFPROTO_ARP); 1398 xt_compat_lock(NFPROTO_ARP);
1392 /* Walk through entries, checking offsets. */ 1399 /* Walk through entries, checking offsets. */
1393 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, 1400 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size,
1394 check_compat_entry_size_and_hooks, 1401 check_compat_entry_size_and_hooks,
1395 info, &size, entry0, 1402 info, &size, entry0,
1396 entry0 + total_size, 1403 entry0 + total_size,
1397 hook_entries, underflows, &j, name); 1404 hook_entries, underflows, &j, name);
1398 if (ret != 0) 1405 if (ret != 0)
1399 goto out_unlock; 1406 goto out_unlock;
1400 1407
1401 ret = -EINVAL; 1408 ret = -EINVAL;
1402 if (j != number) { 1409 if (j != number) {
1403 duprintf("translate_compat_table: %u not %u entries\n", 1410 duprintf("translate_compat_table: %u not %u entries\n",
1404 j, number); 1411 j, number);
1405 goto out_unlock; 1412 goto out_unlock;
1406 } 1413 }
1407 1414
1408 /* Check hooks all assigned */ 1415 /* Check hooks all assigned */
1409 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1416 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1410 /* Only hooks which are valid */ 1417 /* Only hooks which are valid */
1411 if (!(valid_hooks & (1 << i))) 1418 if (!(valid_hooks & (1 << i)))
1412 continue; 1419 continue;
1413 if (info->hook_entry[i] == 0xFFFFFFFF) { 1420 if (info->hook_entry[i] == 0xFFFFFFFF) {
1414 duprintf("Invalid hook entry %u %u\n", 1421 duprintf("Invalid hook entry %u %u\n",
1415 i, hook_entries[i]); 1422 i, hook_entries[i]);
1416 goto out_unlock; 1423 goto out_unlock;
1417 } 1424 }
1418 if (info->underflow[i] == 0xFFFFFFFF) { 1425 if (info->underflow[i] == 0xFFFFFFFF) {
1419 duprintf("Invalid underflow %u %u\n", 1426 duprintf("Invalid underflow %u %u\n",
1420 i, underflows[i]); 1427 i, underflows[i]);
1421 goto out_unlock; 1428 goto out_unlock;
1422 } 1429 }
1423 } 1430 }
1424 1431
1425 ret = -ENOMEM; 1432 ret = -ENOMEM;
1426 newinfo = xt_alloc_table_info(size); 1433 newinfo = xt_alloc_table_info(size);
1427 if (!newinfo) 1434 if (!newinfo)
1428 goto out_unlock; 1435 goto out_unlock;
1429 1436
1430 newinfo->number = number; 1437 newinfo->number = number;
1431 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1438 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1432 newinfo->hook_entry[i] = info->hook_entry[i]; 1439 newinfo->hook_entry[i] = info->hook_entry[i];
1433 newinfo->underflow[i] = info->underflow[i]; 1440 newinfo->underflow[i] = info->underflow[i];
1434 } 1441 }
1435 entry1 = newinfo->entries[raw_smp_processor_id()]; 1442 entry1 = newinfo->entries[raw_smp_processor_id()];
1436 pos = entry1; 1443 pos = entry1;
1437 size = total_size; 1444 size = total_size;
1438 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, 1445 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size,
1439 compat_copy_entry_from_user, 1446 compat_copy_entry_from_user,
1440 &pos, &size, name, newinfo, entry1); 1447 &pos, &size, name, newinfo, entry1);
1441 xt_compat_flush_offsets(NFPROTO_ARP); 1448 xt_compat_flush_offsets(NFPROTO_ARP);
1442 xt_compat_unlock(NFPROTO_ARP); 1449 xt_compat_unlock(NFPROTO_ARP);
1443 if (ret) 1450 if (ret)
1444 goto free_newinfo; 1451 goto free_newinfo;
1445 1452
1446 ret = -ELOOP; 1453 ret = -ELOOP;
1447 if (!mark_source_chains(newinfo, valid_hooks, entry1)) 1454 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1448 goto free_newinfo; 1455 goto free_newinfo;
1449 1456
1450 i = 0; 1457 i = 0;
1451 ret = ARPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1458 ret = ARPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1452 name, &i); 1459 name, &i);
1453 if (ret) { 1460 if (ret) {
1454 j -= i; 1461 j -= i;
1455 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1462 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1456 compat_release_entry, &j); 1463 compat_release_entry, &j);
1457 ARPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1464 ARPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1458 xt_free_table_info(newinfo); 1465 xt_free_table_info(newinfo);
1459 return ret; 1466 return ret;
1460 } 1467 }
1461 1468
1462 /* And one copy for every other CPU */ 1469 /* And one copy for every other CPU */
1463 for_each_possible_cpu(i) 1470 for_each_possible_cpu(i)
1464 if (newinfo->entries[i] && newinfo->entries[i] != entry1) 1471 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1465 memcpy(newinfo->entries[i], entry1, newinfo->size); 1472 memcpy(newinfo->entries[i], entry1, newinfo->size);
1466 1473
1467 *pinfo = newinfo; 1474 *pinfo = newinfo;
1468 *pentry0 = entry1; 1475 *pentry0 = entry1;
1469 xt_free_table_info(info); 1476 xt_free_table_info(info);
1470 return 0; 1477 return 0;
1471 1478
1472 free_newinfo: 1479 free_newinfo:
1473 xt_free_table_info(newinfo); 1480 xt_free_table_info(newinfo);
1474 out: 1481 out:
1475 COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1482 COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1476 return ret; 1483 return ret;
1477 out_unlock: 1484 out_unlock:
1478 xt_compat_flush_offsets(NFPROTO_ARP); 1485 xt_compat_flush_offsets(NFPROTO_ARP);
1479 xt_compat_unlock(NFPROTO_ARP); 1486 xt_compat_unlock(NFPROTO_ARP);
1480 goto out; 1487 goto out;
1481 } 1488 }
1482 1489
1483 struct compat_arpt_replace { 1490 struct compat_arpt_replace {
1484 char name[ARPT_TABLE_MAXNAMELEN]; 1491 char name[ARPT_TABLE_MAXNAMELEN];
1485 u32 valid_hooks; 1492 u32 valid_hooks;
1486 u32 num_entries; 1493 u32 num_entries;
1487 u32 size; 1494 u32 size;
1488 u32 hook_entry[NF_ARP_NUMHOOKS]; 1495 u32 hook_entry[NF_ARP_NUMHOOKS];
1489 u32 underflow[NF_ARP_NUMHOOKS]; 1496 u32 underflow[NF_ARP_NUMHOOKS];
1490 u32 num_counters; 1497 u32 num_counters;
1491 compat_uptr_t counters; 1498 compat_uptr_t counters;
1492 struct compat_arpt_entry entries[0]; 1499 struct compat_arpt_entry entries[0];
1493 }; 1500 };
1494 1501
1495 static int compat_do_replace(struct net *net, void __user *user, 1502 static int compat_do_replace(struct net *net, void __user *user,
1496 unsigned int len) 1503 unsigned int len)
1497 { 1504 {
1498 int ret; 1505 int ret;
1499 struct compat_arpt_replace tmp; 1506 struct compat_arpt_replace tmp;
1500 struct xt_table_info *newinfo; 1507 struct xt_table_info *newinfo;
1501 void *loc_cpu_entry; 1508 void *loc_cpu_entry;
1502 1509
1503 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1510 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1504 return -EFAULT; 1511 return -EFAULT;
1505 1512
1506 /* overflow check */ 1513 /* overflow check */
1507 if (tmp.size >= INT_MAX / num_possible_cpus()) 1514 if (tmp.size >= INT_MAX / num_possible_cpus())
1508 return -ENOMEM; 1515 return -ENOMEM;
1509 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1516 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1510 return -ENOMEM; 1517 return -ENOMEM;
1511 1518
1512 newinfo = xt_alloc_table_info(tmp.size); 1519 newinfo = xt_alloc_table_info(tmp.size);
1513 if (!newinfo) 1520 if (!newinfo)
1514 return -ENOMEM; 1521 return -ENOMEM;
1515 1522
1516 /* choose the copy that is on our node/cpu */ 1523 /* choose the copy that is on our node/cpu */
1517 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1524 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1518 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { 1525 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
1519 ret = -EFAULT; 1526 ret = -EFAULT;
1520 goto free_newinfo; 1527 goto free_newinfo;
1521 } 1528 }
1522 1529
1523 ret = translate_compat_table(tmp.name, tmp.valid_hooks, 1530 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1524 &newinfo, &loc_cpu_entry, tmp.size, 1531 &newinfo, &loc_cpu_entry, tmp.size,
1525 tmp.num_entries, tmp.hook_entry, 1532 tmp.num_entries, tmp.hook_entry,
1526 tmp.underflow); 1533 tmp.underflow);
1527 if (ret != 0) 1534 if (ret != 0)
1528 goto free_newinfo; 1535 goto free_newinfo;
1529 1536
1530 duprintf("compat_do_replace: Translated table\n"); 1537 duprintf("compat_do_replace: Translated table\n");
1531 1538
1532 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1539 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1533 tmp.num_counters, compat_ptr(tmp.counters)); 1540 tmp.num_counters, compat_ptr(tmp.counters));
1534 if (ret) 1541 if (ret)
1535 goto free_newinfo_untrans; 1542 goto free_newinfo_untrans;
1536 return 0; 1543 return 0;
1537 1544
1538 free_newinfo_untrans: 1545 free_newinfo_untrans:
1539 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1546 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1540 free_newinfo: 1547 free_newinfo:
1541 xt_free_table_info(newinfo); 1548 xt_free_table_info(newinfo);
1542 return ret; 1549 return ret;
1543 } 1550 }
1544 1551
1545 static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, 1552 static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1546 unsigned int len) 1553 unsigned int len)
1547 { 1554 {
1548 int ret; 1555 int ret;
1549 1556
1550 if (!capable(CAP_NET_ADMIN)) 1557 if (!capable(CAP_NET_ADMIN))
1551 return -EPERM; 1558 return -EPERM;
1552 1559
1553 switch (cmd) { 1560 switch (cmd) {
1554 case ARPT_SO_SET_REPLACE: 1561 case ARPT_SO_SET_REPLACE:
1555 ret = compat_do_replace(sock_net(sk), user, len); 1562 ret = compat_do_replace(sock_net(sk), user, len);
1556 break; 1563 break;
1557 1564
1558 case ARPT_SO_SET_ADD_COUNTERS: 1565 case ARPT_SO_SET_ADD_COUNTERS:
1559 ret = do_add_counters(sock_net(sk), user, len, 1); 1566 ret = do_add_counters(sock_net(sk), user, len, 1);
1560 break; 1567 break;
1561 1568
1562 default: 1569 default:
1563 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); 1570 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd);
1564 ret = -EINVAL; 1571 ret = -EINVAL;
1565 } 1572 }
1566 1573
1567 return ret; 1574 return ret;
1568 } 1575 }
1569 1576
1570 static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, 1577 static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1571 compat_uint_t *size, 1578 compat_uint_t *size,
1572 struct xt_counters *counters, 1579 struct xt_counters *counters,
1573 unsigned int *i) 1580 unsigned int *i)
1574 { 1581 {
1575 struct arpt_entry_target *t; 1582 struct arpt_entry_target *t;
1576 struct compat_arpt_entry __user *ce; 1583 struct compat_arpt_entry __user *ce;
1577 u_int16_t target_offset, next_offset; 1584 u_int16_t target_offset, next_offset;
1578 compat_uint_t origsize; 1585 compat_uint_t origsize;
1579 int ret; 1586 int ret;
1580 1587
1581 ret = -EFAULT; 1588 ret = -EFAULT;
1582 origsize = *size; 1589 origsize = *size;
1583 ce = (struct compat_arpt_entry __user *)*dstptr; 1590 ce = (struct compat_arpt_entry __user *)*dstptr;
1584 if (copy_to_user(ce, e, sizeof(struct arpt_entry))) 1591 if (copy_to_user(ce, e, sizeof(struct arpt_entry)))
1585 goto out; 1592 goto out;
1586 1593
1587 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1594 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1588 goto out; 1595 goto out;
1589 1596
1590 *dstptr += sizeof(struct compat_arpt_entry); 1597 *dstptr += sizeof(struct compat_arpt_entry);
1591 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1598 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
1592 1599
1593 target_offset = e->target_offset - (origsize - *size); 1600 target_offset = e->target_offset - (origsize - *size);
1594 1601
1595 t = arpt_get_target(e); 1602 t = arpt_get_target(e);
1596 ret = xt_compat_target_to_user(t, dstptr, size); 1603 ret = xt_compat_target_to_user(t, dstptr, size);
1597 if (ret) 1604 if (ret)
1598 goto out; 1605 goto out;
1599 ret = -EFAULT; 1606 ret = -EFAULT;
1600 next_offset = e->next_offset - (origsize - *size); 1607 next_offset = e->next_offset - (origsize - *size);
1601 if (put_user(target_offset, &ce->target_offset)) 1608 if (put_user(target_offset, &ce->target_offset))
1602 goto out; 1609 goto out;
1603 if (put_user(next_offset, &ce->next_offset)) 1610 if (put_user(next_offset, &ce->next_offset))
1604 goto out; 1611 goto out;
1605 1612
1606 (*i)++; 1613 (*i)++;
1607 return 0; 1614 return 0;
1608 out: 1615 out:
1609 return ret; 1616 return ret;
1610 } 1617 }
1611 1618
1612 static int compat_copy_entries_to_user(unsigned int total_size, 1619 static int compat_copy_entries_to_user(unsigned int total_size,
1613 struct xt_table *table, 1620 struct xt_table *table,
1614 void __user *userptr) 1621 void __user *userptr)
1615 { 1622 {
1616 struct xt_counters *counters; 1623 struct xt_counters *counters;
1617 const struct xt_table_info *private = table->private; 1624 const struct xt_table_info *private = table->private;
1618 void __user *pos; 1625 void __user *pos;
1619 unsigned int size; 1626 unsigned int size;
1620 int ret = 0; 1627 int ret = 0;
1621 void *loc_cpu_entry; 1628 void *loc_cpu_entry;
1622 unsigned int i = 0; 1629 unsigned int i = 0;
1623 1630
1624 counters = alloc_counters(table); 1631 counters = alloc_counters(table);
1625 if (IS_ERR(counters)) 1632 if (IS_ERR(counters))
1626 return PTR_ERR(counters); 1633 return PTR_ERR(counters);
1627 1634
1628 /* choose the copy on our node/cpu */ 1635 /* choose the copy on our node/cpu */
1629 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1636 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1630 pos = userptr; 1637 pos = userptr;
1631 size = total_size; 1638 size = total_size;
1632 ret = ARPT_ENTRY_ITERATE(loc_cpu_entry, total_size, 1639 ret = ARPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1633 compat_copy_entry_to_user, 1640 compat_copy_entry_to_user,
1634 &pos, &size, counters, &i); 1641 &pos, &size, counters, &i);
1635 vfree(counters); 1642 vfree(counters);
1636 return ret; 1643 return ret;
1637 } 1644 }
1638 1645
1639 struct compat_arpt_get_entries { 1646 struct compat_arpt_get_entries {
1640 char name[ARPT_TABLE_MAXNAMELEN]; 1647 char name[ARPT_TABLE_MAXNAMELEN];
1641 compat_uint_t size; 1648 compat_uint_t size;
1642 struct compat_arpt_entry entrytable[0]; 1649 struct compat_arpt_entry entrytable[0];
1643 }; 1650 };
1644 1651
1645 static int compat_get_entries(struct net *net, 1652 static int compat_get_entries(struct net *net,
1646 struct compat_arpt_get_entries __user *uptr, 1653 struct compat_arpt_get_entries __user *uptr,
1647 int *len) 1654 int *len)
1648 { 1655 {
1649 int ret; 1656 int ret;
1650 struct compat_arpt_get_entries get; 1657 struct compat_arpt_get_entries get;
1651 struct xt_table *t; 1658 struct xt_table *t;
1652 1659
1653 if (*len < sizeof(get)) { 1660 if (*len < sizeof(get)) {
1654 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); 1661 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1655 return -EINVAL; 1662 return -EINVAL;
1656 } 1663 }
1657 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1664 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1658 return -EFAULT; 1665 return -EFAULT;
1659 if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { 1666 if (*len != sizeof(struct compat_arpt_get_entries) + get.size) {
1660 duprintf("compat_get_entries: %u != %zu\n", 1667 duprintf("compat_get_entries: %u != %zu\n",
1661 *len, sizeof(get) + get.size); 1668 *len, sizeof(get) + get.size);
1662 return -EINVAL; 1669 return -EINVAL;
1663 } 1670 }
1664 1671
1665 xt_compat_lock(NFPROTO_ARP); 1672 xt_compat_lock(NFPROTO_ARP);
1666 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 1673 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
1667 if (t && !IS_ERR(t)) { 1674 if (t && !IS_ERR(t)) {
1668 const struct xt_table_info *private = t->private; 1675 const struct xt_table_info *private = t->private;
1669 struct xt_table_info info; 1676 struct xt_table_info info;
1670 1677
1671 duprintf("t->private->number = %u\n", private->number); 1678 duprintf("t->private->number = %u\n", private->number);
1672 ret = compat_table_info(private, &info); 1679 ret = compat_table_info(private, &info);
1673 if (!ret && get.size == info.size) { 1680 if (!ret && get.size == info.size) {
1674 ret = compat_copy_entries_to_user(private->size, 1681 ret = compat_copy_entries_to_user(private->size,
1675 t, uptr->entrytable); 1682 t, uptr->entrytable);
1676 } else if (!ret) { 1683 } else if (!ret) {
1677 duprintf("compat_get_entries: I've got %u not %u!\n", 1684 duprintf("compat_get_entries: I've got %u not %u!\n",
1678 private->size, get.size); 1685 private->size, get.size);
1679 ret = -EAGAIN; 1686 ret = -EAGAIN;
1680 } 1687 }
1681 xt_compat_flush_offsets(NFPROTO_ARP); 1688 xt_compat_flush_offsets(NFPROTO_ARP);
1682 module_put(t->me); 1689 module_put(t->me);
1683 xt_table_unlock(t); 1690 xt_table_unlock(t);
1684 } else 1691 } else
1685 ret = t ? PTR_ERR(t) : -ENOENT; 1692 ret = t ? PTR_ERR(t) : -ENOENT;
1686 1693
1687 xt_compat_unlock(NFPROTO_ARP); 1694 xt_compat_unlock(NFPROTO_ARP);
1688 return ret; 1695 return ret;
1689 } 1696 }
1690 1697
1691 static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); 1698 static int do_arpt_get_ctl(struct sock *, int, void __user *, int *);
1692 1699
1693 static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, 1700 static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
1694 int *len) 1701 int *len)
1695 { 1702 {
1696 int ret; 1703 int ret;
1697 1704
1698 if (!capable(CAP_NET_ADMIN)) 1705 if (!capable(CAP_NET_ADMIN))
1699 return -EPERM; 1706 return -EPERM;
1700 1707
1701 switch (cmd) { 1708 switch (cmd) {
1702 case ARPT_SO_GET_INFO: 1709 case ARPT_SO_GET_INFO:
1703 ret = get_info(sock_net(sk), user, len, 1); 1710 ret = get_info(sock_net(sk), user, len, 1);
1704 break; 1711 break;
1705 case ARPT_SO_GET_ENTRIES: 1712 case ARPT_SO_GET_ENTRIES:
1706 ret = compat_get_entries(sock_net(sk), user, len); 1713 ret = compat_get_entries(sock_net(sk), user, len);
1707 break; 1714 break;
1708 default: 1715 default:
1709 ret = do_arpt_get_ctl(sk, cmd, user, len); 1716 ret = do_arpt_get_ctl(sk, cmd, user, len);
1710 } 1717 }
1711 return ret; 1718 return ret;
1712 } 1719 }
1713 #endif 1720 #endif
1714 1721
1715 static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 1722 static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1716 { 1723 {
1717 int ret; 1724 int ret;
1718 1725
1719 if (!capable(CAP_NET_ADMIN)) 1726 if (!capable(CAP_NET_ADMIN))
1720 return -EPERM; 1727 return -EPERM;
1721 1728
1722 switch (cmd) { 1729 switch (cmd) {
1723 case ARPT_SO_SET_REPLACE: 1730 case ARPT_SO_SET_REPLACE:
1724 ret = do_replace(sock_net(sk), user, len); 1731 ret = do_replace(sock_net(sk), user, len);
1725 break; 1732 break;
1726 1733
1727 case ARPT_SO_SET_ADD_COUNTERS: 1734 case ARPT_SO_SET_ADD_COUNTERS:
1728 ret = do_add_counters(sock_net(sk), user, len, 0); 1735 ret = do_add_counters(sock_net(sk), user, len, 0);
1729 break; 1736 break;
1730 1737
1731 default: 1738 default:
1732 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); 1739 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd);
1733 ret = -EINVAL; 1740 ret = -EINVAL;
1734 } 1741 }
1735 1742
1736 return ret; 1743 return ret;
1737 } 1744 }
1738 1745
1739 static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 1746 static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1740 { 1747 {
1741 int ret; 1748 int ret;
1742 1749
1743 if (!capable(CAP_NET_ADMIN)) 1750 if (!capable(CAP_NET_ADMIN))
1744 return -EPERM; 1751 return -EPERM;
1745 1752
1746 switch (cmd) { 1753 switch (cmd) {
1747 case ARPT_SO_GET_INFO: 1754 case ARPT_SO_GET_INFO:
1748 ret = get_info(sock_net(sk), user, len, 0); 1755 ret = get_info(sock_net(sk), user, len, 0);
1749 break; 1756 break;
1750 1757
1751 case ARPT_SO_GET_ENTRIES: 1758 case ARPT_SO_GET_ENTRIES:
1752 ret = get_entries(sock_net(sk), user, len); 1759 ret = get_entries(sock_net(sk), user, len);
1753 break; 1760 break;
1754 1761
1755 case ARPT_SO_GET_REVISION_TARGET: { 1762 case ARPT_SO_GET_REVISION_TARGET: {
1756 struct xt_get_revision rev; 1763 struct xt_get_revision rev;
1757 1764
1758 if (*len != sizeof(rev)) { 1765 if (*len != sizeof(rev)) {
1759 ret = -EINVAL; 1766 ret = -EINVAL;
1760 break; 1767 break;
1761 } 1768 }
1762 if (copy_from_user(&rev, user, sizeof(rev)) != 0) { 1769 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1763 ret = -EFAULT; 1770 ret = -EFAULT;
1764 break; 1771 break;
1765 } 1772 }
1766 1773
1767 try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, 1774 try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
1768 rev.revision, 1, &ret), 1775 rev.revision, 1, &ret),
1769 "arpt_%s", rev.name); 1776 "arpt_%s", rev.name);
1770 break; 1777 break;
1771 } 1778 }
1772 1779
1773 default: 1780 default:
1774 duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); 1781 duprintf("do_arpt_get_ctl: unknown request %i\n", cmd);
1775 ret = -EINVAL; 1782 ret = -EINVAL;
1776 } 1783 }
1777 1784
1778 return ret; 1785 return ret;
1779 } 1786 }
1780 1787
1781 struct xt_table *arpt_register_table(struct net *net, 1788 struct xt_table *arpt_register_table(struct net *net,
1782 const struct xt_table *table, 1789 const struct xt_table *table,
1783 const struct arpt_replace *repl) 1790 const struct arpt_replace *repl)
1784 { 1791 {
1785 int ret; 1792 int ret;
1786 struct xt_table_info *newinfo; 1793 struct xt_table_info *newinfo;
1787 struct xt_table_info bootstrap 1794 struct xt_table_info bootstrap
1788 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1795 = { 0, 0, 0, { 0 }, { 0 }, { } };
1789 void *loc_cpu_entry; 1796 void *loc_cpu_entry;
1790 struct xt_table *new_table; 1797 struct xt_table *new_table;
1791 1798
1792 newinfo = xt_alloc_table_info(repl->size); 1799 newinfo = xt_alloc_table_info(repl->size);
1793 if (!newinfo) { 1800 if (!newinfo) {
1794 ret = -ENOMEM; 1801 ret = -ENOMEM;
1795 goto out; 1802 goto out;
1796 } 1803 }
1797 1804
1798 /* choose the copy on our node/cpu */ 1805 /* choose the copy on our node/cpu */
1799 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1806 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1800 memcpy(loc_cpu_entry, repl->entries, repl->size); 1807 memcpy(loc_cpu_entry, repl->entries, repl->size);
1801 1808
1802 ret = translate_table(table->name, table->valid_hooks, 1809 ret = translate_table(table->name, table->valid_hooks,
1803 newinfo, loc_cpu_entry, repl->size, 1810 newinfo, loc_cpu_entry, repl->size,
1804 repl->num_entries, 1811 repl->num_entries,
1805 repl->hook_entry, 1812 repl->hook_entry,
1806 repl->underflow); 1813 repl->underflow);
1807 1814
1808 duprintf("arpt_register_table: translate table gives %d\n", ret); 1815 duprintf("arpt_register_table: translate table gives %d\n", ret);
1809 if (ret != 0) 1816 if (ret != 0)
1810 goto out_free; 1817 goto out_free;
1811 1818
1812 new_table = xt_register_table(net, table, &bootstrap, newinfo); 1819 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1813 if (IS_ERR(new_table)) { 1820 if (IS_ERR(new_table)) {
1814 ret = PTR_ERR(new_table); 1821 ret = PTR_ERR(new_table);
1815 goto out_free; 1822 goto out_free;
1816 } 1823 }
1817 return new_table; 1824 return new_table;
1818 1825
1819 out_free: 1826 out_free:
1820 xt_free_table_info(newinfo); 1827 xt_free_table_info(newinfo);
1821 out: 1828 out:
1822 return ERR_PTR(ret); 1829 return ERR_PTR(ret);
1823 } 1830 }
1824 1831
1825 void arpt_unregister_table(struct xt_table *table) 1832 void arpt_unregister_table(struct xt_table *table)
1826 { 1833 {
1827 struct xt_table_info *private; 1834 struct xt_table_info *private;
1828 void *loc_cpu_entry; 1835 void *loc_cpu_entry;
1829 struct module *table_owner = table->me; 1836 struct module *table_owner = table->me;
1830 1837
1831 private = xt_unregister_table(table); 1838 private = xt_unregister_table(table);
1832 1839
1833 /* Decrease module usage counts and free resources */ 1840 /* Decrease module usage counts and free resources */
1834 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1841 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1835 ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, 1842 ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size,
1836 cleanup_entry, NULL); 1843 cleanup_entry, NULL);
1837 if (private->number > private->initial_entries) 1844 if (private->number > private->initial_entries)
1838 module_put(table_owner); 1845 module_put(table_owner);
1839 xt_free_table_info(private); 1846 xt_free_table_info(private);
1840 } 1847 }
1841 1848
1842 /* The built-in targets: standard (NULL) and error. */ 1849 /* The built-in targets: standard (NULL) and error. */
1843 static struct xt_target arpt_standard_target __read_mostly = { 1850 static struct xt_target arpt_standard_target __read_mostly = {
1844 .name = ARPT_STANDARD_TARGET, 1851 .name = ARPT_STANDARD_TARGET,
1845 .targetsize = sizeof(int), 1852 .targetsize = sizeof(int),
1846 .family = NFPROTO_ARP, 1853 .family = NFPROTO_ARP,
1847 #ifdef CONFIG_COMPAT 1854 #ifdef CONFIG_COMPAT
1848 .compatsize = sizeof(compat_int_t), 1855 .compatsize = sizeof(compat_int_t),
1849 .compat_from_user = compat_standard_from_user, 1856 .compat_from_user = compat_standard_from_user,
1850 .compat_to_user = compat_standard_to_user, 1857 .compat_to_user = compat_standard_to_user,
1851 #endif 1858 #endif
1852 }; 1859 };
1853 1860
1854 static struct xt_target arpt_error_target __read_mostly = { 1861 static struct xt_target arpt_error_target __read_mostly = {
1855 .name = ARPT_ERROR_TARGET, 1862 .name = ARPT_ERROR_TARGET,
1856 .target = arpt_error, 1863 .target = arpt_error,
1857 .targetsize = ARPT_FUNCTION_MAXNAMELEN, 1864 .targetsize = ARPT_FUNCTION_MAXNAMELEN,
1858 .family = NFPROTO_ARP, 1865 .family = NFPROTO_ARP,
1859 }; 1866 };
1860 1867
1861 static struct nf_sockopt_ops arpt_sockopts = { 1868 static struct nf_sockopt_ops arpt_sockopts = {
1862 .pf = PF_INET, 1869 .pf = PF_INET,
1863 .set_optmin = ARPT_BASE_CTL, 1870 .set_optmin = ARPT_BASE_CTL,
1864 .set_optmax = ARPT_SO_SET_MAX+1, 1871 .set_optmax = ARPT_SO_SET_MAX+1,
1865 .set = do_arpt_set_ctl, 1872 .set = do_arpt_set_ctl,
1866 #ifdef CONFIG_COMPAT 1873 #ifdef CONFIG_COMPAT
1867 .compat_set = compat_do_arpt_set_ctl, 1874 .compat_set = compat_do_arpt_set_ctl,
1868 #endif 1875 #endif
1869 .get_optmin = ARPT_BASE_CTL, 1876 .get_optmin = ARPT_BASE_CTL,
1870 .get_optmax = ARPT_SO_GET_MAX+1, 1877 .get_optmax = ARPT_SO_GET_MAX+1,
1871 .get = do_arpt_get_ctl, 1878 .get = do_arpt_get_ctl,
1872 #ifdef CONFIG_COMPAT 1879 #ifdef CONFIG_COMPAT
1873 .compat_get = compat_do_arpt_get_ctl, 1880 .compat_get = compat_do_arpt_get_ctl,
1874 #endif 1881 #endif
1875 .owner = THIS_MODULE, 1882 .owner = THIS_MODULE,
1876 }; 1883 };
1877 1884
1878 static int __net_init arp_tables_net_init(struct net *net) 1885 static int __net_init arp_tables_net_init(struct net *net)
1879 { 1886 {
1880 return xt_proto_init(net, NFPROTO_ARP); 1887 return xt_proto_init(net, NFPROTO_ARP);
1881 } 1888 }
1882 1889
1883 static void __net_exit arp_tables_net_exit(struct net *net) 1890 static void __net_exit arp_tables_net_exit(struct net *net)
1884 { 1891 {
1885 xt_proto_fini(net, NFPROTO_ARP); 1892 xt_proto_fini(net, NFPROTO_ARP);
1886 } 1893 }
1887 1894
1888 static struct pernet_operations arp_tables_net_ops = { 1895 static struct pernet_operations arp_tables_net_ops = {
1889 .init = arp_tables_net_init, 1896 .init = arp_tables_net_init,
1890 .exit = arp_tables_net_exit, 1897 .exit = arp_tables_net_exit,
1891 }; 1898 };
1892 1899
1893 static int __init arp_tables_init(void) 1900 static int __init arp_tables_init(void)
1894 { 1901 {
1895 int ret; 1902 int ret;
1896 1903
1897 ret = register_pernet_subsys(&arp_tables_net_ops); 1904 ret = register_pernet_subsys(&arp_tables_net_ops);
1898 if (ret < 0) 1905 if (ret < 0)
1899 goto err1; 1906 goto err1;
1900 1907
1901 /* Noone else will be downing sem now, so we won't sleep */ 1908 /* Noone else will be downing sem now, so we won't sleep */
1902 ret = xt_register_target(&arpt_standard_target); 1909 ret = xt_register_target(&arpt_standard_target);
1903 if (ret < 0) 1910 if (ret < 0)
1904 goto err2; 1911 goto err2;
1905 ret = xt_register_target(&arpt_error_target); 1912 ret = xt_register_target(&arpt_error_target);
1906 if (ret < 0) 1913 if (ret < 0)
1907 goto err3; 1914 goto err3;
1908 1915
1909 /* Register setsockopt */ 1916 /* Register setsockopt */
1910 ret = nf_register_sockopt(&arpt_sockopts); 1917 ret = nf_register_sockopt(&arpt_sockopts);
1911 if (ret < 0) 1918 if (ret < 0)
1912 goto err4; 1919 goto err4;
1913 1920
1914 printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n"); 1921 printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n");
1915 return 0; 1922 return 0;
1916 1923
1917 err4: 1924 err4:
1918 xt_unregister_target(&arpt_error_target); 1925 xt_unregister_target(&arpt_error_target);
1919 err3: 1926 err3:
1920 xt_unregister_target(&arpt_standard_target); 1927 xt_unregister_target(&arpt_standard_target);
1921 err2: 1928 err2:
1922 unregister_pernet_subsys(&arp_tables_net_ops); 1929 unregister_pernet_subsys(&arp_tables_net_ops);
1923 err1: 1930 err1:
1924 return ret; 1931 return ret;
1925 } 1932 }
1926 1933
1927 static void __exit arp_tables_fini(void) 1934 static void __exit arp_tables_fini(void)
1928 { 1935 {
1929 nf_unregister_sockopt(&arpt_sockopts); 1936 nf_unregister_sockopt(&arpt_sockopts);
1930 xt_unregister_target(&arpt_error_target); 1937 xt_unregister_target(&arpt_error_target);
1931 xt_unregister_target(&arpt_standard_target); 1938 xt_unregister_target(&arpt_standard_target);
1932 unregister_pernet_subsys(&arp_tables_net_ops); 1939 unregister_pernet_subsys(&arp_tables_net_ops);
1933 } 1940 }
1934 1941
1935 EXPORT_SYMBOL(arpt_register_table); 1942 EXPORT_SYMBOL(arpt_register_table);
1936 EXPORT_SYMBOL(arpt_unregister_table); 1943 EXPORT_SYMBOL(arpt_unregister_table);
1937 EXPORT_SYMBOL(arpt_do_table); 1944 EXPORT_SYMBOL(arpt_do_table);
1938 1945
1939 module_init(arp_tables_init); 1946 module_init(arp_tables_init);
1940 module_exit(arp_tables_fini); 1947 module_exit(arp_tables_fini);
1941 1948
net/ipv4/netfilter/arptable_filter.c
1 /* 1 /*
2 * Filtering ARP tables module. 2 * Filtering ARP tables module.
3 * 3 *
4 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
5 * 5 *
6 */ 6 */
7 7
8 #include <linux/module.h> 8 #include <linux/module.h>
9 #include <linux/netfilter/x_tables.h>
9 #include <linux/netfilter_arp/arp_tables.h> 10 #include <linux/netfilter_arp/arp_tables.h>
10 11
11 MODULE_LICENSE("GPL"); 12 MODULE_LICENSE("GPL");
12 MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 13 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
13 MODULE_DESCRIPTION("arptables filter table"); 14 MODULE_DESCRIPTION("arptables filter table");
14 15
15 #define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ 16 #define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
16 (1 << NF_ARP_FORWARD)) 17 (1 << NF_ARP_FORWARD))
17 18
18 static const struct
19 {
20 struct arpt_replace repl;
21 struct arpt_standard entries[3];
22 struct arpt_error term;
23 } initial_table __net_initdata = {
24 .repl = {
25 .name = "filter",
26 .valid_hooks = FILTER_VALID_HOOKS,
27 .num_entries = 4,
28 .size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
29 .hook_entry = {
30 [NF_ARP_IN] = 0,
31 [NF_ARP_OUT] = sizeof(struct arpt_standard),
32 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
33 },
34 .underflow = {
35 [NF_ARP_IN] = 0,
36 [NF_ARP_OUT] = sizeof(struct arpt_standard),
37 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
38 },
39 },
40 .entries = {
41 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
42 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
43 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
44 },
45 .term = ARPT_ERROR_INIT,
46 };
47
48 static const struct xt_table packet_filter = { 19 static const struct xt_table packet_filter = {
49 .name = "filter", 20 .name = "filter",
50 .valid_hooks = FILTER_VALID_HOOKS, 21 .valid_hooks = FILTER_VALID_HOOKS,
51 .me = THIS_MODULE, 22 .me = THIS_MODULE,
52 .af = NFPROTO_ARP, 23 .af = NFPROTO_ARP,
53 .priority = NF_IP_PRI_FILTER, 24 .priority = NF_IP_PRI_FILTER,
54 }; 25 };
55 26
56 /* The work comes in here from netfilter.c */ 27 /* The work comes in here from netfilter.c */
57 static unsigned int 28 static unsigned int
58 arptable_filter_hook(unsigned int hook, struct sk_buff *skb, 29 arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
59 const struct net_device *in, const struct net_device *out, 30 const struct net_device *in, const struct net_device *out,
60 int (*okfn)(struct sk_buff *)) 31 int (*okfn)(struct sk_buff *))
61 { 32 {
62 const struct net *net = dev_net((in != NULL) ? in : out); 33 const struct net *net = dev_net((in != NULL) ? in : out);
63 34
64 return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter); 35 return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
65 } 36 }
66 37
67 static struct nf_hook_ops *arpfilter_ops __read_mostly; 38 static struct nf_hook_ops *arpfilter_ops __read_mostly;
68 39
69 static int __net_init arptable_filter_net_init(struct net *net) 40 static int __net_init arptable_filter_net_init(struct net *net)
70 { 41 {
71 /* Register table */ 42 struct arpt_replace *repl;
43
44 repl = arpt_alloc_initial_table(&packet_filter);
45 if (repl == NULL)
46 return -ENOMEM;
72 net->ipv4.arptable_filter = 47 net->ipv4.arptable_filter =
73 arpt_register_table(net, &packet_filter, &initial_table.repl); 48 arpt_register_table(net, &packet_filter, repl);
49 kfree(repl);
74 if (IS_ERR(net->ipv4.arptable_filter)) 50 if (IS_ERR(net->ipv4.arptable_filter))
75 return PTR_ERR(net->ipv4.arptable_filter); 51 return PTR_ERR(net->ipv4.arptable_filter);
76 return 0; 52 return 0;
77 } 53 }
78 54
79 static void __net_exit arptable_filter_net_exit(struct net *net) 55 static void __net_exit arptable_filter_net_exit(struct net *net)
80 { 56 {
81 arpt_unregister_table(net->ipv4.arptable_filter); 57 arpt_unregister_table(net->ipv4.arptable_filter);
82 } 58 }
83 59
84 static struct pernet_operations arptable_filter_net_ops = { 60 static struct pernet_operations arptable_filter_net_ops = {
85 .init = arptable_filter_net_init, 61 .init = arptable_filter_net_init,
86 .exit = arptable_filter_net_exit, 62 .exit = arptable_filter_net_exit,
87 }; 63 };
88 64
89 static int __init arptable_filter_init(void) 65 static int __init arptable_filter_init(void)
90 { 66 {
91 int ret; 67 int ret;
92 68
93 ret = register_pernet_subsys(&arptable_filter_net_ops); 69 ret = register_pernet_subsys(&arptable_filter_net_ops);
94 if (ret < 0) 70 if (ret < 0)
95 return ret; 71 return ret;
96 72
97 arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook); 73 arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook);
98 if (IS_ERR(arpfilter_ops)) { 74 if (IS_ERR(arpfilter_ops)) {
99 ret = PTR_ERR(arpfilter_ops); 75 ret = PTR_ERR(arpfilter_ops);
100 goto cleanup_table; 76 goto cleanup_table;
101 } 77 }
102 return ret; 78 return ret;
103 79
104 cleanup_table: 80 cleanup_table:
105 unregister_pernet_subsys(&arptable_filter_net_ops); 81 unregister_pernet_subsys(&arptable_filter_net_ops);
106 return ret; 82 return ret;
107 } 83 }
108 84
109 static void __exit arptable_filter_fini(void) 85 static void __exit arptable_filter_fini(void)
110 { 86 {
111 xt_hook_unlink(&packet_filter, arpfilter_ops); 87 xt_hook_unlink(&packet_filter, arpfilter_ops);
net/ipv4/netfilter/ip_tables.c
1 /* 1 /*
2 * Packet matching code. 2 * Packet matching code.
3 * 3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h> 12 #include <linux/cache.h>
13 #include <linux/capability.h> 13 #include <linux/capability.h>
14 #include <linux/skbuff.h> 14 #include <linux/skbuff.h>
15 #include <linux/kmod.h> 15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h> 16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h> 17 #include <linux/netdevice.h>
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/icmp.h> 19 #include <linux/icmp.h>
20 #include <net/ip.h> 20 #include <net/ip.h>
21 #include <net/compat.h> 21 #include <net/compat.h>
22 #include <asm/uaccess.h> 22 #include <asm/uaccess.h>
23 #include <linux/mutex.h> 23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h> 24 #include <linux/proc_fs.h>
25 #include <linux/err.h> 25 #include <linux/err.h>
26 #include <linux/cpumask.h> 26 #include <linux/cpumask.h>
27 27
28 #include <linux/netfilter/x_tables.h> 28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h> 29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h> 30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
31 32
32 MODULE_LICENSE("GPL"); 33 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
34 MODULE_DESCRIPTION("IPv4 packet filter"); 35 MODULE_DESCRIPTION("IPv4 packet filter");
35 36
36 /*#define DEBUG_IP_FIREWALL*/ 37 /*#define DEBUG_IP_FIREWALL*/
37 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ 38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
38 /*#define DEBUG_IP_FIREWALL_USER*/ 39 /*#define DEBUG_IP_FIREWALL_USER*/
39 40
40 #ifdef DEBUG_IP_FIREWALL 41 #ifdef DEBUG_IP_FIREWALL
41 #define dprintf(format, args...) printk(format , ## args) 42 #define dprintf(format, args...) printk(format , ## args)
42 #else 43 #else
43 #define dprintf(format, args...) 44 #define dprintf(format, args...)
44 #endif 45 #endif
45 46
46 #ifdef DEBUG_IP_FIREWALL_USER 47 #ifdef DEBUG_IP_FIREWALL_USER
47 #define duprintf(format, args...) printk(format , ## args) 48 #define duprintf(format, args...) printk(format , ## args)
48 #else 49 #else
49 #define duprintf(format, args...) 50 #define duprintf(format, args...)
50 #endif 51 #endif
51 52
52 #ifdef CONFIG_NETFILTER_DEBUG 53 #ifdef CONFIG_NETFILTER_DEBUG
53 #define IP_NF_ASSERT(x) \ 54 #define IP_NF_ASSERT(x) \
54 do { \ 55 do { \
55 if (!(x)) \ 56 if (!(x)) \
56 printk("IP_NF_ASSERT: %s:%s:%u\n", \ 57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
57 __func__, __FILE__, __LINE__); \ 58 __func__, __FILE__, __LINE__); \
58 } while(0) 59 } while(0)
59 #else 60 #else
60 #define IP_NF_ASSERT(x) 61 #define IP_NF_ASSERT(x)
61 #endif 62 #endif
62 63
63 #if 0 64 #if 0
64 /* All the better to debug you with... */ 65 /* All the better to debug you with... */
65 #define static 66 #define static
66 #define inline 67 #define inline
67 #endif 68 #endif
69
70 void *ipt_alloc_initial_table(const struct xt_table *info)
71 {
72 return xt_alloc_initial_table(ipt, IPT);
73 }
74 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
68 75
69 /* 76 /*
70 We keep a set of rules for each CPU, so we can avoid write-locking 77 We keep a set of rules for each CPU, so we can avoid write-locking
71 them in the softirq when updating the counters and therefore 78 them in the softirq when updating the counters and therefore
72 only need to read-lock in the softirq; doing a write_lock_bh() in user 79 only need to read-lock in the softirq; doing a write_lock_bh() in user
73 context stops packets coming through and allows user context to read 80 context stops packets coming through and allows user context to read
74 the counters or update the rules. 81 the counters or update the rules.
75 82
76 Hence the start of any table is given by get_table() below. */ 83 Hence the start of any table is given by get_table() below. */
77 84
78 /* Returns whether matches rule or not. */ 85 /* Returns whether matches rule or not. */
79 /* Performance critical - called for every packet */ 86 /* Performance critical - called for every packet */
80 static inline bool 87 static inline bool
81 ip_packet_match(const struct iphdr *ip, 88 ip_packet_match(const struct iphdr *ip,
82 const char *indev, 89 const char *indev,
83 const char *outdev, 90 const char *outdev,
84 const struct ipt_ip *ipinfo, 91 const struct ipt_ip *ipinfo,
85 int isfrag) 92 int isfrag)
86 { 93 {
87 unsigned long ret; 94 unsigned long ret;
88 95
89 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) 96 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
90 97
91 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, 98 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
92 IPT_INV_SRCIP) || 99 IPT_INV_SRCIP) ||
93 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, 100 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
94 IPT_INV_DSTIP)) { 101 IPT_INV_DSTIP)) {
95 dprintf("Source or dest mismatch.\n"); 102 dprintf("Source or dest mismatch.\n");
96 103
97 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", 104 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
98 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, 105 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); 106 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", 107 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
101 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, 108 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
102 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); 109 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
103 return false; 110 return false;
104 } 111 }
105 112
106 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); 113 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
107 114
108 if (FWINV(ret != 0, IPT_INV_VIA_IN)) { 115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
109 dprintf("VIA in mismatch (%s vs %s).%s\n", 116 dprintf("VIA in mismatch (%s vs %s).%s\n",
110 indev, ipinfo->iniface, 117 indev, ipinfo->iniface,
111 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":""); 118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
112 return false; 119 return false;
113 } 120 }
114 121
115 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); 122 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
116 123
117 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { 124 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
118 dprintf("VIA out mismatch (%s vs %s).%s\n", 125 dprintf("VIA out mismatch (%s vs %s).%s\n",
119 outdev, ipinfo->outiface, 126 outdev, ipinfo->outiface,
120 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":""); 127 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
121 return false; 128 return false;
122 } 129 }
123 130
124 /* Check specific protocol */ 131 /* Check specific protocol */
125 if (ipinfo->proto && 132 if (ipinfo->proto &&
126 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { 133 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
127 dprintf("Packet protocol %hi does not match %hi.%s\n", 134 dprintf("Packet protocol %hi does not match %hi.%s\n",
128 ip->protocol, ipinfo->proto, 135 ip->protocol, ipinfo->proto,
129 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":""); 136 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
130 return false; 137 return false;
131 } 138 }
132 139
133 /* If we have a fragment rule but the packet is not a fragment 140 /* If we have a fragment rule but the packet is not a fragment
134 * then we return zero */ 141 * then we return zero */
135 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { 142 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
136 dprintf("Fragment rule but not fragment.%s\n", 143 dprintf("Fragment rule but not fragment.%s\n",
137 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); 144 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
138 return false; 145 return false;
139 } 146 }
140 147
141 return true; 148 return true;
142 } 149 }
143 150
144 static bool 151 static bool
145 ip_checkentry(const struct ipt_ip *ip) 152 ip_checkentry(const struct ipt_ip *ip)
146 { 153 {
147 if (ip->flags & ~IPT_F_MASK) { 154 if (ip->flags & ~IPT_F_MASK) {
148 duprintf("Unknown flag bits set: %08X\n", 155 duprintf("Unknown flag bits set: %08X\n",
149 ip->flags & ~IPT_F_MASK); 156 ip->flags & ~IPT_F_MASK);
150 return false; 157 return false;
151 } 158 }
152 if (ip->invflags & ~IPT_INV_MASK) { 159 if (ip->invflags & ~IPT_INV_MASK) {
153 duprintf("Unknown invflag bits set: %08X\n", 160 duprintf("Unknown invflag bits set: %08X\n",
154 ip->invflags & ~IPT_INV_MASK); 161 ip->invflags & ~IPT_INV_MASK);
155 return false; 162 return false;
156 } 163 }
157 return true; 164 return true;
158 } 165 }
159 166
160 static unsigned int 167 static unsigned int
161 ipt_error(struct sk_buff *skb, const struct xt_target_param *par) 168 ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
162 { 169 {
163 if (net_ratelimit()) 170 if (net_ratelimit())
164 printk("ip_tables: error: `%s'\n", 171 printk("ip_tables: error: `%s'\n",
165 (const char *)par->targinfo); 172 (const char *)par->targinfo);
166 173
167 return NF_DROP; 174 return NF_DROP;
168 } 175 }
169 176
170 /* Performance critical - called for every packet */ 177 /* Performance critical - called for every packet */
171 static inline bool 178 static inline bool
172 do_match(struct ipt_entry_match *m, const struct sk_buff *skb, 179 do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
173 struct xt_match_param *par) 180 struct xt_match_param *par)
174 { 181 {
175 par->match = m->u.kernel.match; 182 par->match = m->u.kernel.match;
176 par->matchinfo = m->data; 183 par->matchinfo = m->data;
177 184
178 /* Stop iteration if it doesn't match */ 185 /* Stop iteration if it doesn't match */
179 if (!m->u.kernel.match->match(skb, par)) 186 if (!m->u.kernel.match->match(skb, par))
180 return true; 187 return true;
181 else 188 else
182 return false; 189 return false;
183 } 190 }
184 191
185 /* Performance critical */ 192 /* Performance critical */
186 static inline struct ipt_entry * 193 static inline struct ipt_entry *
187 get_entry(void *base, unsigned int offset) 194 get_entry(void *base, unsigned int offset)
188 { 195 {
189 return (struct ipt_entry *)(base + offset); 196 return (struct ipt_entry *)(base + offset);
190 } 197 }
191 198
192 /* All zeroes == unconditional rule. */ 199 /* All zeroes == unconditional rule. */
193 /* Mildly perf critical (only if packet tracing is on) */ 200 /* Mildly perf critical (only if packet tracing is on) */
194 static inline bool unconditional(const struct ipt_ip *ip) 201 static inline bool unconditional(const struct ipt_ip *ip)
195 { 202 {
196 static const struct ipt_ip uncond; 203 static const struct ipt_ip uncond;
197 204
198 return memcmp(ip, &uncond, sizeof(uncond)) == 0; 205 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
199 #undef FWINV 206 #undef FWINV
200 } 207 }
201 208
202 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 209 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
203 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 210 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
204 static const char *const hooknames[] = { 211 static const char *const hooknames[] = {
205 [NF_INET_PRE_ROUTING] = "PREROUTING", 212 [NF_INET_PRE_ROUTING] = "PREROUTING",
206 [NF_INET_LOCAL_IN] = "INPUT", 213 [NF_INET_LOCAL_IN] = "INPUT",
207 [NF_INET_FORWARD] = "FORWARD", 214 [NF_INET_FORWARD] = "FORWARD",
208 [NF_INET_LOCAL_OUT] = "OUTPUT", 215 [NF_INET_LOCAL_OUT] = "OUTPUT",
209 [NF_INET_POST_ROUTING] = "POSTROUTING", 216 [NF_INET_POST_ROUTING] = "POSTROUTING",
210 }; 217 };
211 218
212 enum nf_ip_trace_comments { 219 enum nf_ip_trace_comments {
213 NF_IP_TRACE_COMMENT_RULE, 220 NF_IP_TRACE_COMMENT_RULE,
214 NF_IP_TRACE_COMMENT_RETURN, 221 NF_IP_TRACE_COMMENT_RETURN,
215 NF_IP_TRACE_COMMENT_POLICY, 222 NF_IP_TRACE_COMMENT_POLICY,
216 }; 223 };
217 224
218 static const char *const comments[] = { 225 static const char *const comments[] = {
219 [NF_IP_TRACE_COMMENT_RULE] = "rule", 226 [NF_IP_TRACE_COMMENT_RULE] = "rule",
220 [NF_IP_TRACE_COMMENT_RETURN] = "return", 227 [NF_IP_TRACE_COMMENT_RETURN] = "return",
221 [NF_IP_TRACE_COMMENT_POLICY] = "policy", 228 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
222 }; 229 };
223 230
224 static struct nf_loginfo trace_loginfo = { 231 static struct nf_loginfo trace_loginfo = {
225 .type = NF_LOG_TYPE_LOG, 232 .type = NF_LOG_TYPE_LOG,
226 .u = { 233 .u = {
227 .log = { 234 .log = {
228 .level = 4, 235 .level = 4,
229 .logflags = NF_LOG_MASK, 236 .logflags = NF_LOG_MASK,
230 }, 237 },
231 }, 238 },
232 }; 239 };
233 240
234 /* Mildly perf critical (only if packet tracing is on) */ 241 /* Mildly perf critical (only if packet tracing is on) */
235 static inline int 242 static inline int
236 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e, 243 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
237 const char *hookname, const char **chainname, 244 const char *hookname, const char **chainname,
238 const char **comment, unsigned int *rulenum) 245 const char **comment, unsigned int *rulenum)
239 { 246 {
240 struct ipt_standard_target *t = (void *)ipt_get_target(s); 247 struct ipt_standard_target *t = (void *)ipt_get_target(s);
241 248
242 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) { 249 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
243 /* Head of user chain: ERROR target with chainname */ 250 /* Head of user chain: ERROR target with chainname */
244 *chainname = t->target.data; 251 *chainname = t->target.data;
245 (*rulenum) = 0; 252 (*rulenum) = 0;
246 } else if (s == e) { 253 } else if (s == e) {
247 (*rulenum)++; 254 (*rulenum)++;
248 255
249 if (s->target_offset == sizeof(struct ipt_entry) && 256 if (s->target_offset == sizeof(struct ipt_entry) &&
250 strcmp(t->target.u.kernel.target->name, 257 strcmp(t->target.u.kernel.target->name,
251 IPT_STANDARD_TARGET) == 0 && 258 IPT_STANDARD_TARGET) == 0 &&
252 t->verdict < 0 && 259 t->verdict < 0 &&
253 unconditional(&s->ip)) { 260 unconditional(&s->ip)) {
254 /* Tail of chains: STANDARD target (return/policy) */ 261 /* Tail of chains: STANDARD target (return/policy) */
255 *comment = *chainname == hookname 262 *comment = *chainname == hookname
256 ? comments[NF_IP_TRACE_COMMENT_POLICY] 263 ? comments[NF_IP_TRACE_COMMENT_POLICY]
257 : comments[NF_IP_TRACE_COMMENT_RETURN]; 264 : comments[NF_IP_TRACE_COMMENT_RETURN];
258 } 265 }
259 return 1; 266 return 1;
260 } else 267 } else
261 (*rulenum)++; 268 (*rulenum)++;
262 269
263 return 0; 270 return 0;
264 } 271 }
265 272
266 static void trace_packet(struct sk_buff *skb, 273 static void trace_packet(struct sk_buff *skb,
267 unsigned int hook, 274 unsigned int hook,
268 const struct net_device *in, 275 const struct net_device *in,
269 const struct net_device *out, 276 const struct net_device *out,
270 const char *tablename, 277 const char *tablename,
271 struct xt_table_info *private, 278 struct xt_table_info *private,
272 struct ipt_entry *e) 279 struct ipt_entry *e)
273 { 280 {
274 void *table_base; 281 void *table_base;
275 const struct ipt_entry *root; 282 const struct ipt_entry *root;
276 const char *hookname, *chainname, *comment; 283 const char *hookname, *chainname, *comment;
277 unsigned int rulenum = 0; 284 unsigned int rulenum = 0;
278 285
279 table_base = private->entries[smp_processor_id()]; 286 table_base = private->entries[smp_processor_id()];
280 root = get_entry(table_base, private->hook_entry[hook]); 287 root = get_entry(table_base, private->hook_entry[hook]);
281 288
282 hookname = chainname = hooknames[hook]; 289 hookname = chainname = hooknames[hook];
283 comment = comments[NF_IP_TRACE_COMMENT_RULE]; 290 comment = comments[NF_IP_TRACE_COMMENT_RULE];
284 291
285 IPT_ENTRY_ITERATE(root, 292 IPT_ENTRY_ITERATE(root,
286 private->size - private->hook_entry[hook], 293 private->size - private->hook_entry[hook],
287 get_chainname_rulenum, 294 get_chainname_rulenum,
288 e, hookname, &chainname, &comment, &rulenum); 295 e, hookname, &chainname, &comment, &rulenum);
289 296
290 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, 297 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
291 "TRACE: %s:%s:%s:%u ", 298 "TRACE: %s:%s:%s:%u ",
292 tablename, chainname, comment, rulenum); 299 tablename, chainname, comment, rulenum);
293 } 300 }
294 #endif 301 #endif
295 302
296 static inline __pure 303 static inline __pure
297 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) 304 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
298 { 305 {
299 return (void *)entry + entry->next_offset; 306 return (void *)entry + entry->next_offset;
300 } 307 }
301 308
302 /* Returns one of the generic firewall policies, like NF_ACCEPT. */ 309 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
303 unsigned int 310 unsigned int
304 ipt_do_table(struct sk_buff *skb, 311 ipt_do_table(struct sk_buff *skb,
305 unsigned int hook, 312 unsigned int hook,
306 const struct net_device *in, 313 const struct net_device *in,
307 const struct net_device *out, 314 const struct net_device *out,
308 struct xt_table *table) 315 struct xt_table *table)
309 { 316 {
310 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom 317 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
311 318
312 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 319 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
313 const struct iphdr *ip; 320 const struct iphdr *ip;
314 bool hotdrop = false; 321 bool hotdrop = false;
315 /* Initializing verdict to NF_DROP keeps gcc happy. */ 322 /* Initializing verdict to NF_DROP keeps gcc happy. */
316 unsigned int verdict = NF_DROP; 323 unsigned int verdict = NF_DROP;
317 const char *indev, *outdev; 324 const char *indev, *outdev;
318 void *table_base; 325 void *table_base;
319 struct ipt_entry *e, *back; 326 struct ipt_entry *e, *back;
320 struct xt_table_info *private; 327 struct xt_table_info *private;
321 struct xt_match_param mtpar; 328 struct xt_match_param mtpar;
322 struct xt_target_param tgpar; 329 struct xt_target_param tgpar;
323 330
324 /* Initialization */ 331 /* Initialization */
325 ip = ip_hdr(skb); 332 ip = ip_hdr(skb);
326 indev = in ? in->name : nulldevname; 333 indev = in ? in->name : nulldevname;
327 outdev = out ? out->name : nulldevname; 334 outdev = out ? out->name : nulldevname;
328 /* We handle fragments by dealing with the first fragment as 335 /* We handle fragments by dealing with the first fragment as
329 * if it was a normal packet. All other fragments are treated 336 * if it was a normal packet. All other fragments are treated
330 * normally, except that they will NEVER match rules that ask 337 * normally, except that they will NEVER match rules that ask
331 * things we don't know, ie. tcp syn flag or ports). If the 338 * things we don't know, ie. tcp syn flag or ports). If the
332 * rule is also a fragment-specific rule, non-fragments won't 339 * rule is also a fragment-specific rule, non-fragments won't
333 * match it. */ 340 * match it. */
334 mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; 341 mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
335 mtpar.thoff = ip_hdrlen(skb); 342 mtpar.thoff = ip_hdrlen(skb);
336 mtpar.hotdrop = &hotdrop; 343 mtpar.hotdrop = &hotdrop;
337 mtpar.in = tgpar.in = in; 344 mtpar.in = tgpar.in = in;
338 mtpar.out = tgpar.out = out; 345 mtpar.out = tgpar.out = out;
339 mtpar.family = tgpar.family = NFPROTO_IPV4; 346 mtpar.family = tgpar.family = NFPROTO_IPV4;
340 mtpar.hooknum = tgpar.hooknum = hook; 347 mtpar.hooknum = tgpar.hooknum = hook;
341 348
342 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 349 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
343 xt_info_rdlock_bh(); 350 xt_info_rdlock_bh();
344 private = table->private; 351 private = table->private;
345 table_base = private->entries[smp_processor_id()]; 352 table_base = private->entries[smp_processor_id()];
346 353
347 e = get_entry(table_base, private->hook_entry[hook]); 354 e = get_entry(table_base, private->hook_entry[hook]);
348 355
349 /* For return from builtin chain */ 356 /* For return from builtin chain */
350 back = get_entry(table_base, private->underflow[hook]); 357 back = get_entry(table_base, private->underflow[hook]);
351 358
352 do { 359 do {
353 struct ipt_entry_target *t; 360 struct ipt_entry_target *t;
354 361
355 IP_NF_ASSERT(e); 362 IP_NF_ASSERT(e);
356 IP_NF_ASSERT(back); 363 IP_NF_ASSERT(back);
357 if (!ip_packet_match(ip, indev, outdev, 364 if (!ip_packet_match(ip, indev, outdev,
358 &e->ip, mtpar.fragoff) || 365 &e->ip, mtpar.fragoff) ||
359 IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) { 366 IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
360 e = ipt_next_entry(e); 367 e = ipt_next_entry(e);
361 continue; 368 continue;
362 } 369 }
363 370
364 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); 371 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
365 372
366 t = ipt_get_target(e); 373 t = ipt_get_target(e);
367 IP_NF_ASSERT(t->u.kernel.target); 374 IP_NF_ASSERT(t->u.kernel.target);
368 375
369 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 376 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
370 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 377 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
371 /* The packet is traced: log it */ 378 /* The packet is traced: log it */
372 if (unlikely(skb->nf_trace)) 379 if (unlikely(skb->nf_trace))
373 trace_packet(skb, hook, in, out, 380 trace_packet(skb, hook, in, out,
374 table->name, private, e); 381 table->name, private, e);
375 #endif 382 #endif
376 /* Standard target? */ 383 /* Standard target? */
377 if (!t->u.kernel.target->target) { 384 if (!t->u.kernel.target->target) {
378 int v; 385 int v;
379 386
380 v = ((struct ipt_standard_target *)t)->verdict; 387 v = ((struct ipt_standard_target *)t)->verdict;
381 if (v < 0) { 388 if (v < 0) {
382 /* Pop from stack? */ 389 /* Pop from stack? */
383 if (v != IPT_RETURN) { 390 if (v != IPT_RETURN) {
384 verdict = (unsigned)(-v) - 1; 391 verdict = (unsigned)(-v) - 1;
385 break; 392 break;
386 } 393 }
387 e = back; 394 e = back;
388 back = get_entry(table_base, back->comefrom); 395 back = get_entry(table_base, back->comefrom);
389 continue; 396 continue;
390 } 397 }
391 if (table_base + v != ipt_next_entry(e) && 398 if (table_base + v != ipt_next_entry(e) &&
392 !(e->ip.flags & IPT_F_GOTO)) { 399 !(e->ip.flags & IPT_F_GOTO)) {
393 /* Save old back ptr in next entry */ 400 /* Save old back ptr in next entry */
394 struct ipt_entry *next = ipt_next_entry(e); 401 struct ipt_entry *next = ipt_next_entry(e);
395 next->comefrom = (void *)back - table_base; 402 next->comefrom = (void *)back - table_base;
396 /* set back pointer to next entry */ 403 /* set back pointer to next entry */
397 back = next; 404 back = next;
398 } 405 }
399 406
400 e = get_entry(table_base, v); 407 e = get_entry(table_base, v);
401 continue; 408 continue;
402 } 409 }
403 410
404 /* Targets which reenter must return 411 /* Targets which reenter must return
405 abs. verdicts */ 412 abs. verdicts */
406 tgpar.target = t->u.kernel.target; 413 tgpar.target = t->u.kernel.target;
407 tgpar.targinfo = t->data; 414 tgpar.targinfo = t->data;
408 415
409 416
410 #ifdef CONFIG_NETFILTER_DEBUG 417 #ifdef CONFIG_NETFILTER_DEBUG
411 tb_comefrom = 0xeeeeeeec; 418 tb_comefrom = 0xeeeeeeec;
412 #endif 419 #endif
413 verdict = t->u.kernel.target->target(skb, &tgpar); 420 verdict = t->u.kernel.target->target(skb, &tgpar);
414 #ifdef CONFIG_NETFILTER_DEBUG 421 #ifdef CONFIG_NETFILTER_DEBUG
415 if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) { 422 if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
416 printk("Target %s reentered!\n", 423 printk("Target %s reentered!\n",
417 t->u.kernel.target->name); 424 t->u.kernel.target->name);
418 verdict = NF_DROP; 425 verdict = NF_DROP;
419 } 426 }
420 tb_comefrom = 0x57acc001; 427 tb_comefrom = 0x57acc001;
421 #endif 428 #endif
422 /* Target might have changed stuff. */ 429 /* Target might have changed stuff. */
423 ip = ip_hdr(skb); 430 ip = ip_hdr(skb);
424 if (verdict == IPT_CONTINUE) 431 if (verdict == IPT_CONTINUE)
425 e = ipt_next_entry(e); 432 e = ipt_next_entry(e);
426 else 433 else
427 /* Verdict */ 434 /* Verdict */
428 break; 435 break;
429 } while (!hotdrop); 436 } while (!hotdrop);
430 xt_info_rdunlock_bh(); 437 xt_info_rdunlock_bh();
431 438
432 #ifdef DEBUG_ALLOW_ALL 439 #ifdef DEBUG_ALLOW_ALL
433 return NF_ACCEPT; 440 return NF_ACCEPT;
434 #else 441 #else
435 if (hotdrop) 442 if (hotdrop)
436 return NF_DROP; 443 return NF_DROP;
437 else return verdict; 444 else return verdict;
438 #endif 445 #endif
439 446
440 #undef tb_comefrom 447 #undef tb_comefrom
441 } 448 }
442 449
443 /* Figures out from what hook each rule can be called: returns 0 if 450 /* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */ 451 there are loops. Puts hook bitmask in comefrom. */
445 static int 452 static int
446 mark_source_chains(struct xt_table_info *newinfo, 453 mark_source_chains(struct xt_table_info *newinfo,
447 unsigned int valid_hooks, void *entry0) 454 unsigned int valid_hooks, void *entry0)
448 { 455 {
449 unsigned int hook; 456 unsigned int hook;
450 457
451 /* No recursion; use packet counter to save back ptrs (reset 458 /* No recursion; use packet counter to save back ptrs (reset
452 to 0 as we leave), and comefrom to save source hook bitmask */ 459 to 0 as we leave), and comefrom to save source hook bitmask */
453 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { 460 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
454 unsigned int pos = newinfo->hook_entry[hook]; 461 unsigned int pos = newinfo->hook_entry[hook];
455 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); 462 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
456 463
457 if (!(valid_hooks & (1 << hook))) 464 if (!(valid_hooks & (1 << hook)))
458 continue; 465 continue;
459 466
460 /* Set initial back pointer. */ 467 /* Set initial back pointer. */
461 e->counters.pcnt = pos; 468 e->counters.pcnt = pos;
462 469
463 for (;;) { 470 for (;;) {
464 struct ipt_standard_target *t 471 struct ipt_standard_target *t
465 = (void *)ipt_get_target(e); 472 = (void *)ipt_get_target(e);
466 int visited = e->comefrom & (1 << hook); 473 int visited = e->comefrom & (1 << hook);
467 474
468 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 475 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
469 printk("iptables: loop hook %u pos %u %08X.\n", 476 printk("iptables: loop hook %u pos %u %08X.\n",
470 hook, pos, e->comefrom); 477 hook, pos, e->comefrom);
471 return 0; 478 return 0;
472 } 479 }
473 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); 480 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
474 481
475 /* Unconditional return/END. */ 482 /* Unconditional return/END. */
476 if ((e->target_offset == sizeof(struct ipt_entry) && 483 if ((e->target_offset == sizeof(struct ipt_entry) &&
477 (strcmp(t->target.u.user.name, 484 (strcmp(t->target.u.user.name,
478 IPT_STANDARD_TARGET) == 0) && 485 IPT_STANDARD_TARGET) == 0) &&
479 t->verdict < 0 && unconditional(&e->ip)) || 486 t->verdict < 0 && unconditional(&e->ip)) ||
480 visited) { 487 visited) {
481 unsigned int oldpos, size; 488 unsigned int oldpos, size;
482 489
483 if ((strcmp(t->target.u.user.name, 490 if ((strcmp(t->target.u.user.name,
484 IPT_STANDARD_TARGET) == 0) && 491 IPT_STANDARD_TARGET) == 0) &&
485 t->verdict < -NF_MAX_VERDICT - 1) { 492 t->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("mark_source_chains: bad " 493 duprintf("mark_source_chains: bad "
487 "negative verdict (%i)\n", 494 "negative verdict (%i)\n",
488 t->verdict); 495 t->verdict);
489 return 0; 496 return 0;
490 } 497 }
491 498
492 /* Return: backtrack through the last 499 /* Return: backtrack through the last
493 big jump. */ 500 big jump. */
494 do { 501 do {
495 e->comefrom ^= (1<<NF_INET_NUMHOOKS); 502 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
496 #ifdef DEBUG_IP_FIREWALL_USER 503 #ifdef DEBUG_IP_FIREWALL_USER
497 if (e->comefrom 504 if (e->comefrom
498 & (1 << NF_INET_NUMHOOKS)) { 505 & (1 << NF_INET_NUMHOOKS)) {
499 duprintf("Back unset " 506 duprintf("Back unset "
500 "on hook %u " 507 "on hook %u "
501 "rule %u\n", 508 "rule %u\n",
502 hook, pos); 509 hook, pos);
503 } 510 }
504 #endif 511 #endif
505 oldpos = pos; 512 oldpos = pos;
506 pos = e->counters.pcnt; 513 pos = e->counters.pcnt;
507 e->counters.pcnt = 0; 514 e->counters.pcnt = 0;
508 515
509 /* We're at the start. */ 516 /* We're at the start. */
510 if (pos == oldpos) 517 if (pos == oldpos)
511 goto next; 518 goto next;
512 519
513 e = (struct ipt_entry *) 520 e = (struct ipt_entry *)
514 (entry0 + pos); 521 (entry0 + pos);
515 } while (oldpos == pos + e->next_offset); 522 } while (oldpos == pos + e->next_offset);
516 523
517 /* Move along one */ 524 /* Move along one */
518 size = e->next_offset; 525 size = e->next_offset;
519 e = (struct ipt_entry *) 526 e = (struct ipt_entry *)
520 (entry0 + pos + size); 527 (entry0 + pos + size);
521 e->counters.pcnt = pos; 528 e->counters.pcnt = pos;
522 pos += size; 529 pos += size;
523 } else { 530 } else {
524 int newpos = t->verdict; 531 int newpos = t->verdict;
525 532
526 if (strcmp(t->target.u.user.name, 533 if (strcmp(t->target.u.user.name,
527 IPT_STANDARD_TARGET) == 0 && 534 IPT_STANDARD_TARGET) == 0 &&
528 newpos >= 0) { 535 newpos >= 0) {
529 if (newpos > newinfo->size - 536 if (newpos > newinfo->size -
530 sizeof(struct ipt_entry)) { 537 sizeof(struct ipt_entry)) {
531 duprintf("mark_source_chains: " 538 duprintf("mark_source_chains: "
532 "bad verdict (%i)\n", 539 "bad verdict (%i)\n",
533 newpos); 540 newpos);
534 return 0; 541 return 0;
535 } 542 }
536 /* This a jump; chase it. */ 543 /* This a jump; chase it. */
537 duprintf("Jump rule %u -> %u\n", 544 duprintf("Jump rule %u -> %u\n",
538 pos, newpos); 545 pos, newpos);
539 } else { 546 } else {
540 /* ... this is a fallthru */ 547 /* ... this is a fallthru */
541 newpos = pos + e->next_offset; 548 newpos = pos + e->next_offset;
542 } 549 }
543 e = (struct ipt_entry *) 550 e = (struct ipt_entry *)
544 (entry0 + newpos); 551 (entry0 + newpos);
545 e->counters.pcnt = pos; 552 e->counters.pcnt = pos;
546 pos = newpos; 553 pos = newpos;
547 } 554 }
548 } 555 }
549 next: 556 next:
550 duprintf("Finished chain %u\n", hook); 557 duprintf("Finished chain %u\n", hook);
551 } 558 }
552 return 1; 559 return 1;
553 } 560 }
554 561
555 static int 562 static int
556 cleanup_match(struct ipt_entry_match *m, struct net *net, unsigned int *i) 563 cleanup_match(struct ipt_entry_match *m, struct net *net, unsigned int *i)
557 { 564 {
558 struct xt_mtdtor_param par; 565 struct xt_mtdtor_param par;
559 566
560 if (i && (*i)-- == 0) 567 if (i && (*i)-- == 0)
561 return 1; 568 return 1;
562 569
563 par.net = net; 570 par.net = net;
564 par.match = m->u.kernel.match; 571 par.match = m->u.kernel.match;
565 par.matchinfo = m->data; 572 par.matchinfo = m->data;
566 par.family = NFPROTO_IPV4; 573 par.family = NFPROTO_IPV4;
567 if (par.match->destroy != NULL) 574 if (par.match->destroy != NULL)
568 par.match->destroy(&par); 575 par.match->destroy(&par);
569 module_put(par.match->me); 576 module_put(par.match->me);
570 return 0; 577 return 0;
571 } 578 }
572 579
573 static int 580 static int
574 check_entry(struct ipt_entry *e, const char *name) 581 check_entry(struct ipt_entry *e, const char *name)
575 { 582 {
576 struct ipt_entry_target *t; 583 struct ipt_entry_target *t;
577 584
578 if (!ip_checkentry(&e->ip)) { 585 if (!ip_checkentry(&e->ip)) {
579 duprintf("ip_tables: ip check failed %p %s.\n", e, name); 586 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
580 return -EINVAL; 587 return -EINVAL;
581 } 588 }
582 589
583 if (e->target_offset + sizeof(struct ipt_entry_target) > 590 if (e->target_offset + sizeof(struct ipt_entry_target) >
584 e->next_offset) 591 e->next_offset)
585 return -EINVAL; 592 return -EINVAL;
586 593
587 t = ipt_get_target(e); 594 t = ipt_get_target(e);
588 if (e->target_offset + t->u.target_size > e->next_offset) 595 if (e->target_offset + t->u.target_size > e->next_offset)
589 return -EINVAL; 596 return -EINVAL;
590 597
591 return 0; 598 return 0;
592 } 599 }
593 600
594 static int 601 static int
595 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par, 602 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
596 unsigned int *i) 603 unsigned int *i)
597 { 604 {
598 const struct ipt_ip *ip = par->entryinfo; 605 const struct ipt_ip *ip = par->entryinfo;
599 int ret; 606 int ret;
600 607
601 par->match = m->u.kernel.match; 608 par->match = m->u.kernel.match;
602 par->matchinfo = m->data; 609 par->matchinfo = m->data;
603 610
604 ret = xt_check_match(par, m->u.match_size - sizeof(*m), 611 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
605 ip->proto, ip->invflags & IPT_INV_PROTO); 612 ip->proto, ip->invflags & IPT_INV_PROTO);
606 if (ret < 0) { 613 if (ret < 0) {
607 duprintf("ip_tables: check failed for `%s'.\n", 614 duprintf("ip_tables: check failed for `%s'.\n",
608 par.match->name); 615 par.match->name);
609 return ret; 616 return ret;
610 } 617 }
611 ++*i; 618 ++*i;
612 return 0; 619 return 0;
613 } 620 }
614 621
615 static int 622 static int
616 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par, 623 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
617 unsigned int *i) 624 unsigned int *i)
618 { 625 {
619 struct xt_match *match; 626 struct xt_match *match;
620 int ret; 627 int ret;
621 628
622 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, 629 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
623 m->u.user.revision), 630 m->u.user.revision),
624 "ipt_%s", m->u.user.name); 631 "ipt_%s", m->u.user.name);
625 if (IS_ERR(match) || !match) { 632 if (IS_ERR(match) || !match) {
626 duprintf("find_check_match: `%s' not found\n", m->u.user.name); 633 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
627 return match ? PTR_ERR(match) : -ENOENT; 634 return match ? PTR_ERR(match) : -ENOENT;
628 } 635 }
629 m->u.kernel.match = match; 636 m->u.kernel.match = match;
630 637
631 ret = check_match(m, par, i); 638 ret = check_match(m, par, i);
632 if (ret) 639 if (ret)
633 goto err; 640 goto err;
634 641
635 return 0; 642 return 0;
636 err: 643 err:
637 module_put(m->u.kernel.match->me); 644 module_put(m->u.kernel.match->me);
638 return ret; 645 return ret;
639 } 646 }
640 647
641 static int check_target(struct ipt_entry *e, struct net *net, const char *name) 648 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
642 { 649 {
643 struct ipt_entry_target *t = ipt_get_target(e); 650 struct ipt_entry_target *t = ipt_get_target(e);
644 struct xt_tgchk_param par = { 651 struct xt_tgchk_param par = {
645 .net = net, 652 .net = net,
646 .table = name, 653 .table = name,
647 .entryinfo = e, 654 .entryinfo = e,
648 .target = t->u.kernel.target, 655 .target = t->u.kernel.target,
649 .targinfo = t->data, 656 .targinfo = t->data,
650 .hook_mask = e->comefrom, 657 .hook_mask = e->comefrom,
651 .family = NFPROTO_IPV4, 658 .family = NFPROTO_IPV4,
652 }; 659 };
653 int ret; 660 int ret;
654 661
655 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 662 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
656 e->ip.proto, e->ip.invflags & IPT_INV_PROTO); 663 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
657 if (ret < 0) { 664 if (ret < 0) {
658 duprintf("ip_tables: check failed for `%s'.\n", 665 duprintf("ip_tables: check failed for `%s'.\n",
659 t->u.kernel.target->name); 666 t->u.kernel.target->name);
660 return ret; 667 return ret;
661 } 668 }
662 return 0; 669 return 0;
663 } 670 }
664 671
665 static int 672 static int
666 find_check_entry(struct ipt_entry *e, struct net *net, const char *name, 673 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
667 unsigned int size, unsigned int *i) 674 unsigned int size, unsigned int *i)
668 { 675 {
669 struct ipt_entry_target *t; 676 struct ipt_entry_target *t;
670 struct xt_target *target; 677 struct xt_target *target;
671 int ret; 678 int ret;
672 unsigned int j; 679 unsigned int j;
673 struct xt_mtchk_param mtpar; 680 struct xt_mtchk_param mtpar;
674 681
675 ret = check_entry(e, name); 682 ret = check_entry(e, name);
676 if (ret) 683 if (ret)
677 return ret; 684 return ret;
678 685
679 j = 0; 686 j = 0;
680 mtpar.net = net; 687 mtpar.net = net;
681 mtpar.table = name; 688 mtpar.table = name;
682 mtpar.entryinfo = &e->ip; 689 mtpar.entryinfo = &e->ip;
683 mtpar.hook_mask = e->comefrom; 690 mtpar.hook_mask = e->comefrom;
684 mtpar.family = NFPROTO_IPV4; 691 mtpar.family = NFPROTO_IPV4;
685 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j); 692 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
686 if (ret != 0) 693 if (ret != 0)
687 goto cleanup_matches; 694 goto cleanup_matches;
688 695
689 t = ipt_get_target(e); 696 t = ipt_get_target(e);
690 target = try_then_request_module(xt_find_target(AF_INET, 697 target = try_then_request_module(xt_find_target(AF_INET,
691 t->u.user.name, 698 t->u.user.name,
692 t->u.user.revision), 699 t->u.user.revision),
693 "ipt_%s", t->u.user.name); 700 "ipt_%s", t->u.user.name);
694 if (IS_ERR(target) || !target) { 701 if (IS_ERR(target) || !target) {
695 duprintf("find_check_entry: `%s' not found\n", t->u.user.name); 702 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
696 ret = target ? PTR_ERR(target) : -ENOENT; 703 ret = target ? PTR_ERR(target) : -ENOENT;
697 goto cleanup_matches; 704 goto cleanup_matches;
698 } 705 }
699 t->u.kernel.target = target; 706 t->u.kernel.target = target;
700 707
701 ret = check_target(e, net, name); 708 ret = check_target(e, net, name);
702 if (ret) 709 if (ret)
703 goto err; 710 goto err;
704 711
705 (*i)++; 712 (*i)++;
706 return 0; 713 return 0;
707 err: 714 err:
708 module_put(t->u.kernel.target->me); 715 module_put(t->u.kernel.target->me);
709 cleanup_matches: 716 cleanup_matches:
710 IPT_MATCH_ITERATE(e, cleanup_match, net, &j); 717 IPT_MATCH_ITERATE(e, cleanup_match, net, &j);
711 return ret; 718 return ret;
712 } 719 }
713 720
714 static bool check_underflow(struct ipt_entry *e) 721 static bool check_underflow(struct ipt_entry *e)
715 { 722 {
716 const struct ipt_entry_target *t; 723 const struct ipt_entry_target *t;
717 unsigned int verdict; 724 unsigned int verdict;
718 725
719 if (!unconditional(&e->ip)) 726 if (!unconditional(&e->ip))
720 return false; 727 return false;
721 t = ipt_get_target(e); 728 t = ipt_get_target(e);
722 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 729 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
723 return false; 730 return false;
724 verdict = ((struct ipt_standard_target *)t)->verdict; 731 verdict = ((struct ipt_standard_target *)t)->verdict;
725 verdict = -verdict - 1; 732 verdict = -verdict - 1;
726 return verdict == NF_DROP || verdict == NF_ACCEPT; 733 return verdict == NF_DROP || verdict == NF_ACCEPT;
727 } 734 }
728 735
729 static int 736 static int
730 check_entry_size_and_hooks(struct ipt_entry *e, 737 check_entry_size_and_hooks(struct ipt_entry *e,
731 struct xt_table_info *newinfo, 738 struct xt_table_info *newinfo,
732 unsigned char *base, 739 unsigned char *base,
733 unsigned char *limit, 740 unsigned char *limit,
734 const unsigned int *hook_entries, 741 const unsigned int *hook_entries,
735 const unsigned int *underflows, 742 const unsigned int *underflows,
736 unsigned int valid_hooks, 743 unsigned int valid_hooks,
737 unsigned int *i) 744 unsigned int *i)
738 { 745 {
739 unsigned int h; 746 unsigned int h;
740 747
741 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || 748 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
742 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { 749 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
743 duprintf("Bad offset %p\n", e); 750 duprintf("Bad offset %p\n", e);
744 return -EINVAL; 751 return -EINVAL;
745 } 752 }
746 753
747 if (e->next_offset 754 if (e->next_offset
748 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) { 755 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
749 duprintf("checking: element %p size %u\n", 756 duprintf("checking: element %p size %u\n",
750 e, e->next_offset); 757 e, e->next_offset);
751 return -EINVAL; 758 return -EINVAL;
752 } 759 }
753 760
754 /* Check hooks & underflows */ 761 /* Check hooks & underflows */
755 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 762 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
756 if (!(valid_hooks & (1 << h))) 763 if (!(valid_hooks & (1 << h)))
757 continue; 764 continue;
758 if ((unsigned char *)e - base == hook_entries[h]) 765 if ((unsigned char *)e - base == hook_entries[h])
759 newinfo->hook_entry[h] = hook_entries[h]; 766 newinfo->hook_entry[h] = hook_entries[h];
760 if ((unsigned char *)e - base == underflows[h]) { 767 if ((unsigned char *)e - base == underflows[h]) {
761 if (!check_underflow(e)) { 768 if (!check_underflow(e)) {
762 pr_err("Underflows must be unconditional and " 769 pr_err("Underflows must be unconditional and "
763 "use the STANDARD target with " 770 "use the STANDARD target with "
764 "ACCEPT/DROP\n"); 771 "ACCEPT/DROP\n");
765 return -EINVAL; 772 return -EINVAL;
766 } 773 }
767 newinfo->underflow[h] = underflows[h]; 774 newinfo->underflow[h] = underflows[h];
768 } 775 }
769 } 776 }
770 777
771 /* Clear counters and comefrom */ 778 /* Clear counters and comefrom */
772 e->counters = ((struct xt_counters) { 0, 0 }); 779 e->counters = ((struct xt_counters) { 0, 0 });
773 e->comefrom = 0; 780 e->comefrom = 0;
774 781
775 (*i)++; 782 (*i)++;
776 return 0; 783 return 0;
777 } 784 }
778 785
779 static int 786 static int
780 cleanup_entry(struct ipt_entry *e, struct net *net, unsigned int *i) 787 cleanup_entry(struct ipt_entry *e, struct net *net, unsigned int *i)
781 { 788 {
782 struct xt_tgdtor_param par; 789 struct xt_tgdtor_param par;
783 struct ipt_entry_target *t; 790 struct ipt_entry_target *t;
784 791
785 if (i && (*i)-- == 0) 792 if (i && (*i)-- == 0)
786 return 1; 793 return 1;
787 794
788 /* Cleanup all matches */ 795 /* Cleanup all matches */
789 IPT_MATCH_ITERATE(e, cleanup_match, net, NULL); 796 IPT_MATCH_ITERATE(e, cleanup_match, net, NULL);
790 t = ipt_get_target(e); 797 t = ipt_get_target(e);
791 798
792 par.net = net; 799 par.net = net;
793 par.target = t->u.kernel.target; 800 par.target = t->u.kernel.target;
794 par.targinfo = t->data; 801 par.targinfo = t->data;
795 par.family = NFPROTO_IPV4; 802 par.family = NFPROTO_IPV4;
796 if (par.target->destroy != NULL) 803 if (par.target->destroy != NULL)
797 par.target->destroy(&par); 804 par.target->destroy(&par);
798 module_put(par.target->me); 805 module_put(par.target->me);
799 return 0; 806 return 0;
800 } 807 }
801 808
802 /* Checks and translates the user-supplied table segment (held in 809 /* Checks and translates the user-supplied table segment (held in
803 newinfo) */ 810 newinfo) */
804 static int 811 static int
805 translate_table(struct net *net, 812 translate_table(struct net *net,
806 const char *name, 813 const char *name,
807 unsigned int valid_hooks, 814 unsigned int valid_hooks,
808 struct xt_table_info *newinfo, 815 struct xt_table_info *newinfo,
809 void *entry0, 816 void *entry0,
810 unsigned int size, 817 unsigned int size,
811 unsigned int number, 818 unsigned int number,
812 const unsigned int *hook_entries, 819 const unsigned int *hook_entries,
813 const unsigned int *underflows) 820 const unsigned int *underflows)
814 { 821 {
815 unsigned int i; 822 unsigned int i;
816 int ret; 823 int ret;
817 824
818 newinfo->size = size; 825 newinfo->size = size;
819 newinfo->number = number; 826 newinfo->number = number;
820 827
821 /* Init all hooks to impossible value. */ 828 /* Init all hooks to impossible value. */
822 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 829 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
823 newinfo->hook_entry[i] = 0xFFFFFFFF; 830 newinfo->hook_entry[i] = 0xFFFFFFFF;
824 newinfo->underflow[i] = 0xFFFFFFFF; 831 newinfo->underflow[i] = 0xFFFFFFFF;
825 } 832 }
826 833
827 duprintf("translate_table: size %u\n", newinfo->size); 834 duprintf("translate_table: size %u\n", newinfo->size);
828 i = 0; 835 i = 0;
829 /* Walk through entries, checking offsets. */ 836 /* Walk through entries, checking offsets. */
830 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, 837 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
831 check_entry_size_and_hooks, 838 check_entry_size_and_hooks,
832 newinfo, 839 newinfo,
833 entry0, 840 entry0,
834 entry0 + size, 841 entry0 + size,
835 hook_entries, underflows, valid_hooks, &i); 842 hook_entries, underflows, valid_hooks, &i);
836 if (ret != 0) 843 if (ret != 0)
837 return ret; 844 return ret;
838 845
839 if (i != number) { 846 if (i != number) {
840 duprintf("translate_table: %u not %u entries\n", 847 duprintf("translate_table: %u not %u entries\n",
841 i, number); 848 i, number);
842 return -EINVAL; 849 return -EINVAL;
843 } 850 }
844 851
845 /* Check hooks all assigned */ 852 /* Check hooks all assigned */
846 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 853 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
847 /* Only hooks which are valid */ 854 /* Only hooks which are valid */
848 if (!(valid_hooks & (1 << i))) 855 if (!(valid_hooks & (1 << i)))
849 continue; 856 continue;
850 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 857 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
851 duprintf("Invalid hook entry %u %u\n", 858 duprintf("Invalid hook entry %u %u\n",
852 i, hook_entries[i]); 859 i, hook_entries[i]);
853 return -EINVAL; 860 return -EINVAL;
854 } 861 }
855 if (newinfo->underflow[i] == 0xFFFFFFFF) { 862 if (newinfo->underflow[i] == 0xFFFFFFFF) {
856 duprintf("Invalid underflow %u %u\n", 863 duprintf("Invalid underflow %u %u\n",
857 i, underflows[i]); 864 i, underflows[i]);
858 return -EINVAL; 865 return -EINVAL;
859 } 866 }
860 } 867 }
861 868
862 if (!mark_source_chains(newinfo, valid_hooks, entry0)) 869 if (!mark_source_chains(newinfo, valid_hooks, entry0))
863 return -ELOOP; 870 return -ELOOP;
864 871
865 /* Finally, each sanity check must pass */ 872 /* Finally, each sanity check must pass */
866 i = 0; 873 i = 0;
867 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, 874 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
868 find_check_entry, net, name, size, &i); 875 find_check_entry, net, name, size, &i);
869 876
870 if (ret != 0) { 877 if (ret != 0) {
871 IPT_ENTRY_ITERATE(entry0, newinfo->size, 878 IPT_ENTRY_ITERATE(entry0, newinfo->size,
872 cleanup_entry, net, &i); 879 cleanup_entry, net, &i);
873 return ret; 880 return ret;
874 } 881 }
875 882
876 /* And one copy for every other CPU */ 883 /* And one copy for every other CPU */
877 for_each_possible_cpu(i) { 884 for_each_possible_cpu(i) {
878 if (newinfo->entries[i] && newinfo->entries[i] != entry0) 885 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
879 memcpy(newinfo->entries[i], entry0, newinfo->size); 886 memcpy(newinfo->entries[i], entry0, newinfo->size);
880 } 887 }
881 888
882 return ret; 889 return ret;
883 } 890 }
884 891
885 /* Gets counters. */ 892 /* Gets counters. */
886 static inline int 893 static inline int
887 add_entry_to_counter(const struct ipt_entry *e, 894 add_entry_to_counter(const struct ipt_entry *e,
888 struct xt_counters total[], 895 struct xt_counters total[],
889 unsigned int *i) 896 unsigned int *i)
890 { 897 {
891 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); 898 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
892 899
893 (*i)++; 900 (*i)++;
894 return 0; 901 return 0;
895 } 902 }
896 903
897 static inline int 904 static inline int
898 set_entry_to_counter(const struct ipt_entry *e, 905 set_entry_to_counter(const struct ipt_entry *e,
899 struct ipt_counters total[], 906 struct ipt_counters total[],
900 unsigned int *i) 907 unsigned int *i)
901 { 908 {
902 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); 909 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
903 910
904 (*i)++; 911 (*i)++;
905 return 0; 912 return 0;
906 } 913 }
907 914
908 static void 915 static void
909 get_counters(const struct xt_table_info *t, 916 get_counters(const struct xt_table_info *t,
910 struct xt_counters counters[]) 917 struct xt_counters counters[])
911 { 918 {
912 unsigned int cpu; 919 unsigned int cpu;
913 unsigned int i; 920 unsigned int i;
914 unsigned int curcpu; 921 unsigned int curcpu;
915 922
916 /* Instead of clearing (by a previous call to memset()) 923 /* Instead of clearing (by a previous call to memset())
917 * the counters and using adds, we set the counters 924 * the counters and using adds, we set the counters
918 * with data used by 'current' CPU. 925 * with data used by 'current' CPU.
919 * 926 *
920 * Bottom half has to be disabled to prevent deadlock 927 * Bottom half has to be disabled to prevent deadlock
921 * if new softirq were to run and call ipt_do_table 928 * if new softirq were to run and call ipt_do_table
922 */ 929 */
923 local_bh_disable(); 930 local_bh_disable();
924 curcpu = smp_processor_id(); 931 curcpu = smp_processor_id();
925 932
926 i = 0; 933 i = 0;
927 IPT_ENTRY_ITERATE(t->entries[curcpu], 934 IPT_ENTRY_ITERATE(t->entries[curcpu],
928 t->size, 935 t->size,
929 set_entry_to_counter, 936 set_entry_to_counter,
930 counters, 937 counters,
931 &i); 938 &i);
932 939
933 for_each_possible_cpu(cpu) { 940 for_each_possible_cpu(cpu) {
934 if (cpu == curcpu) 941 if (cpu == curcpu)
935 continue; 942 continue;
936 i = 0; 943 i = 0;
937 xt_info_wrlock(cpu); 944 xt_info_wrlock(cpu);
938 IPT_ENTRY_ITERATE(t->entries[cpu], 945 IPT_ENTRY_ITERATE(t->entries[cpu],
939 t->size, 946 t->size,
940 add_entry_to_counter, 947 add_entry_to_counter,
941 counters, 948 counters,
942 &i); 949 &i);
943 xt_info_wrunlock(cpu); 950 xt_info_wrunlock(cpu);
944 } 951 }
945 local_bh_enable(); 952 local_bh_enable();
946 } 953 }
947 954
948 static struct xt_counters * alloc_counters(struct xt_table *table) 955 static struct xt_counters * alloc_counters(struct xt_table *table)
949 { 956 {
950 unsigned int countersize; 957 unsigned int countersize;
951 struct xt_counters *counters; 958 struct xt_counters *counters;
952 struct xt_table_info *private = table->private; 959 struct xt_table_info *private = table->private;
953 960
954 /* We need atomic snapshot of counters: rest doesn't change 961 /* We need atomic snapshot of counters: rest doesn't change
955 (other than comefrom, which userspace doesn't care 962 (other than comefrom, which userspace doesn't care
956 about). */ 963 about). */
957 countersize = sizeof(struct xt_counters) * private->number; 964 countersize = sizeof(struct xt_counters) * private->number;
958 counters = vmalloc_node(countersize, numa_node_id()); 965 counters = vmalloc_node(countersize, numa_node_id());
959 966
960 if (counters == NULL) 967 if (counters == NULL)
961 return ERR_PTR(-ENOMEM); 968 return ERR_PTR(-ENOMEM);
962 969
963 get_counters(private, counters); 970 get_counters(private, counters);
964 971
965 return counters; 972 return counters;
966 } 973 }
967 974
968 static int 975 static int
969 copy_entries_to_user(unsigned int total_size, 976 copy_entries_to_user(unsigned int total_size,
970 struct xt_table *table, 977 struct xt_table *table,
971 void __user *userptr) 978 void __user *userptr)
972 { 979 {
973 unsigned int off, num; 980 unsigned int off, num;
974 struct ipt_entry *e; 981 struct ipt_entry *e;
975 struct xt_counters *counters; 982 struct xt_counters *counters;
976 const struct xt_table_info *private = table->private; 983 const struct xt_table_info *private = table->private;
977 int ret = 0; 984 int ret = 0;
978 const void *loc_cpu_entry; 985 const void *loc_cpu_entry;
979 986
980 counters = alloc_counters(table); 987 counters = alloc_counters(table);
981 if (IS_ERR(counters)) 988 if (IS_ERR(counters))
982 return PTR_ERR(counters); 989 return PTR_ERR(counters);
983 990
984 /* choose the copy that is on our node/cpu, ... 991 /* choose the copy that is on our node/cpu, ...
985 * This choice is lazy (because current thread is 992 * This choice is lazy (because current thread is
986 * allowed to migrate to another cpu) 993 * allowed to migrate to another cpu)
987 */ 994 */
988 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 995 loc_cpu_entry = private->entries[raw_smp_processor_id()];
989 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 996 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
990 ret = -EFAULT; 997 ret = -EFAULT;
991 goto free_counters; 998 goto free_counters;
992 } 999 }
993 1000
994 /* FIXME: use iterator macros --RR */ 1001 /* FIXME: use iterator macros --RR */
995 /* ... then go back and fix counters and names */ 1002 /* ... then go back and fix counters and names */
996 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 1003 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
997 unsigned int i; 1004 unsigned int i;
998 const struct ipt_entry_match *m; 1005 const struct ipt_entry_match *m;
999 const struct ipt_entry_target *t; 1006 const struct ipt_entry_target *t;
1000 1007
1001 e = (struct ipt_entry *)(loc_cpu_entry + off); 1008 e = (struct ipt_entry *)(loc_cpu_entry + off);
1002 if (copy_to_user(userptr + off 1009 if (copy_to_user(userptr + off
1003 + offsetof(struct ipt_entry, counters), 1010 + offsetof(struct ipt_entry, counters),
1004 &counters[num], 1011 &counters[num],
1005 sizeof(counters[num])) != 0) { 1012 sizeof(counters[num])) != 0) {
1006 ret = -EFAULT; 1013 ret = -EFAULT;
1007 goto free_counters; 1014 goto free_counters;
1008 } 1015 }
1009 1016
1010 for (i = sizeof(struct ipt_entry); 1017 for (i = sizeof(struct ipt_entry);
1011 i < e->target_offset; 1018 i < e->target_offset;
1012 i += m->u.match_size) { 1019 i += m->u.match_size) {
1013 m = (void *)e + i; 1020 m = (void *)e + i;
1014 1021
1015 if (copy_to_user(userptr + off + i 1022 if (copy_to_user(userptr + off + i
1016 + offsetof(struct ipt_entry_match, 1023 + offsetof(struct ipt_entry_match,
1017 u.user.name), 1024 u.user.name),
1018 m->u.kernel.match->name, 1025 m->u.kernel.match->name,
1019 strlen(m->u.kernel.match->name)+1) 1026 strlen(m->u.kernel.match->name)+1)
1020 != 0) { 1027 != 0) {
1021 ret = -EFAULT; 1028 ret = -EFAULT;
1022 goto free_counters; 1029 goto free_counters;
1023 } 1030 }
1024 } 1031 }
1025 1032
1026 t = ipt_get_target(e); 1033 t = ipt_get_target(e);
1027 if (copy_to_user(userptr + off + e->target_offset 1034 if (copy_to_user(userptr + off + e->target_offset
1028 + offsetof(struct ipt_entry_target, 1035 + offsetof(struct ipt_entry_target,
1029 u.user.name), 1036 u.user.name),
1030 t->u.kernel.target->name, 1037 t->u.kernel.target->name,
1031 strlen(t->u.kernel.target->name)+1) != 0) { 1038 strlen(t->u.kernel.target->name)+1) != 0) {
1032 ret = -EFAULT; 1039 ret = -EFAULT;
1033 goto free_counters; 1040 goto free_counters;
1034 } 1041 }
1035 } 1042 }
1036 1043
1037 free_counters: 1044 free_counters:
1038 vfree(counters); 1045 vfree(counters);
1039 return ret; 1046 return ret;
1040 } 1047 }
1041 1048
1042 #ifdef CONFIG_COMPAT 1049 #ifdef CONFIG_COMPAT
1043 static void compat_standard_from_user(void *dst, void *src) 1050 static void compat_standard_from_user(void *dst, void *src)
1044 { 1051 {
1045 int v = *(compat_int_t *)src; 1052 int v = *(compat_int_t *)src;
1046 1053
1047 if (v > 0) 1054 if (v > 0)
1048 v += xt_compat_calc_jump(AF_INET, v); 1055 v += xt_compat_calc_jump(AF_INET, v);
1049 memcpy(dst, &v, sizeof(v)); 1056 memcpy(dst, &v, sizeof(v));
1050 } 1057 }
1051 1058
1052 static int compat_standard_to_user(void __user *dst, void *src) 1059 static int compat_standard_to_user(void __user *dst, void *src)
1053 { 1060 {
1054 compat_int_t cv = *(int *)src; 1061 compat_int_t cv = *(int *)src;
1055 1062
1056 if (cv > 0) 1063 if (cv > 0)
1057 cv -= xt_compat_calc_jump(AF_INET, cv); 1064 cv -= xt_compat_calc_jump(AF_INET, cv);
1058 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1065 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1059 } 1066 }
1060 1067
1061 static inline int 1068 static inline int
1062 compat_calc_match(struct ipt_entry_match *m, int *size) 1069 compat_calc_match(struct ipt_entry_match *m, int *size)
1063 { 1070 {
1064 *size += xt_compat_match_offset(m->u.kernel.match); 1071 *size += xt_compat_match_offset(m->u.kernel.match);
1065 return 0; 1072 return 0;
1066 } 1073 }
1067 1074
1068 static int compat_calc_entry(struct ipt_entry *e, 1075 static int compat_calc_entry(struct ipt_entry *e,
1069 const struct xt_table_info *info, 1076 const struct xt_table_info *info,
1070 void *base, struct xt_table_info *newinfo) 1077 void *base, struct xt_table_info *newinfo)
1071 { 1078 {
1072 struct ipt_entry_target *t; 1079 struct ipt_entry_target *t;
1073 unsigned int entry_offset; 1080 unsigned int entry_offset;
1074 int off, i, ret; 1081 int off, i, ret;
1075 1082
1076 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1083 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1077 entry_offset = (void *)e - base; 1084 entry_offset = (void *)e - base;
1078 IPT_MATCH_ITERATE(e, compat_calc_match, &off); 1085 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1079 t = ipt_get_target(e); 1086 t = ipt_get_target(e);
1080 off += xt_compat_target_offset(t->u.kernel.target); 1087 off += xt_compat_target_offset(t->u.kernel.target);
1081 newinfo->size -= off; 1088 newinfo->size -= off;
1082 ret = xt_compat_add_offset(AF_INET, entry_offset, off); 1089 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1083 if (ret) 1090 if (ret)
1084 return ret; 1091 return ret;
1085 1092
1086 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1093 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1087 if (info->hook_entry[i] && 1094 if (info->hook_entry[i] &&
1088 (e < (struct ipt_entry *)(base + info->hook_entry[i]))) 1095 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1089 newinfo->hook_entry[i] -= off; 1096 newinfo->hook_entry[i] -= off;
1090 if (info->underflow[i] && 1097 if (info->underflow[i] &&
1091 (e < (struct ipt_entry *)(base + info->underflow[i]))) 1098 (e < (struct ipt_entry *)(base + info->underflow[i])))
1092 newinfo->underflow[i] -= off; 1099 newinfo->underflow[i] -= off;
1093 } 1100 }
1094 return 0; 1101 return 0;
1095 } 1102 }
1096 1103
1097 static int compat_table_info(const struct xt_table_info *info, 1104 static int compat_table_info(const struct xt_table_info *info,
1098 struct xt_table_info *newinfo) 1105 struct xt_table_info *newinfo)
1099 { 1106 {
1100 void *loc_cpu_entry; 1107 void *loc_cpu_entry;
1101 1108
1102 if (!newinfo || !info) 1109 if (!newinfo || !info)
1103 return -EINVAL; 1110 return -EINVAL;
1104 1111
1105 /* we dont care about newinfo->entries[] */ 1112 /* we dont care about newinfo->entries[] */
1106 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1113 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1107 newinfo->initial_entries = 0; 1114 newinfo->initial_entries = 0;
1108 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1115 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1109 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size, 1116 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1110 compat_calc_entry, info, loc_cpu_entry, 1117 compat_calc_entry, info, loc_cpu_entry,
1111 newinfo); 1118 newinfo);
1112 } 1119 }
1113 #endif 1120 #endif
1114 1121
1115 static int get_info(struct net *net, void __user *user, int *len, int compat) 1122 static int get_info(struct net *net, void __user *user, int *len, int compat)
1116 { 1123 {
1117 char name[IPT_TABLE_MAXNAMELEN]; 1124 char name[IPT_TABLE_MAXNAMELEN];
1118 struct xt_table *t; 1125 struct xt_table *t;
1119 int ret; 1126 int ret;
1120 1127
1121 if (*len != sizeof(struct ipt_getinfo)) { 1128 if (*len != sizeof(struct ipt_getinfo)) {
1122 duprintf("length %u != %zu\n", *len, 1129 duprintf("length %u != %zu\n", *len,
1123 sizeof(struct ipt_getinfo)); 1130 sizeof(struct ipt_getinfo));
1124 return -EINVAL; 1131 return -EINVAL;
1125 } 1132 }
1126 1133
1127 if (copy_from_user(name, user, sizeof(name)) != 0) 1134 if (copy_from_user(name, user, sizeof(name)) != 0)
1128 return -EFAULT; 1135 return -EFAULT;
1129 1136
1130 name[IPT_TABLE_MAXNAMELEN-1] = '\0'; 1137 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1131 #ifdef CONFIG_COMPAT 1138 #ifdef CONFIG_COMPAT
1132 if (compat) 1139 if (compat)
1133 xt_compat_lock(AF_INET); 1140 xt_compat_lock(AF_INET);
1134 #endif 1141 #endif
1135 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), 1142 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1136 "iptable_%s", name); 1143 "iptable_%s", name);
1137 if (t && !IS_ERR(t)) { 1144 if (t && !IS_ERR(t)) {
1138 struct ipt_getinfo info; 1145 struct ipt_getinfo info;
1139 const struct xt_table_info *private = t->private; 1146 const struct xt_table_info *private = t->private;
1140 #ifdef CONFIG_COMPAT 1147 #ifdef CONFIG_COMPAT
1141 struct xt_table_info tmp; 1148 struct xt_table_info tmp;
1142 1149
1143 if (compat) { 1150 if (compat) {
1144 ret = compat_table_info(private, &tmp); 1151 ret = compat_table_info(private, &tmp);
1145 xt_compat_flush_offsets(AF_INET); 1152 xt_compat_flush_offsets(AF_INET);
1146 private = &tmp; 1153 private = &tmp;
1147 } 1154 }
1148 #endif 1155 #endif
1149 info.valid_hooks = t->valid_hooks; 1156 info.valid_hooks = t->valid_hooks;
1150 memcpy(info.hook_entry, private->hook_entry, 1157 memcpy(info.hook_entry, private->hook_entry,
1151 sizeof(info.hook_entry)); 1158 sizeof(info.hook_entry));
1152 memcpy(info.underflow, private->underflow, 1159 memcpy(info.underflow, private->underflow,
1153 sizeof(info.underflow)); 1160 sizeof(info.underflow));
1154 info.num_entries = private->number; 1161 info.num_entries = private->number;
1155 info.size = private->size; 1162 info.size = private->size;
1156 strcpy(info.name, name); 1163 strcpy(info.name, name);
1157 1164
1158 if (copy_to_user(user, &info, *len) != 0) 1165 if (copy_to_user(user, &info, *len) != 0)
1159 ret = -EFAULT; 1166 ret = -EFAULT;
1160 else 1167 else
1161 ret = 0; 1168 ret = 0;
1162 1169
1163 xt_table_unlock(t); 1170 xt_table_unlock(t);
1164 module_put(t->me); 1171 module_put(t->me);
1165 } else 1172 } else
1166 ret = t ? PTR_ERR(t) : -ENOENT; 1173 ret = t ? PTR_ERR(t) : -ENOENT;
1167 #ifdef CONFIG_COMPAT 1174 #ifdef CONFIG_COMPAT
1168 if (compat) 1175 if (compat)
1169 xt_compat_unlock(AF_INET); 1176 xt_compat_unlock(AF_INET);
1170 #endif 1177 #endif
1171 return ret; 1178 return ret;
1172 } 1179 }
1173 1180
1174 static int 1181 static int
1175 get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len) 1182 get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1176 { 1183 {
1177 int ret; 1184 int ret;
1178 struct ipt_get_entries get; 1185 struct ipt_get_entries get;
1179 struct xt_table *t; 1186 struct xt_table *t;
1180 1187
1181 if (*len < sizeof(get)) { 1188 if (*len < sizeof(get)) {
1182 duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); 1189 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1183 return -EINVAL; 1190 return -EINVAL;
1184 } 1191 }
1185 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1192 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1186 return -EFAULT; 1193 return -EFAULT;
1187 if (*len != sizeof(struct ipt_get_entries) + get.size) { 1194 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1188 duprintf("get_entries: %u != %zu\n", 1195 duprintf("get_entries: %u != %zu\n",
1189 *len, sizeof(get) + get.size); 1196 *len, sizeof(get) + get.size);
1190 return -EINVAL; 1197 return -EINVAL;
1191 } 1198 }
1192 1199
1193 t = xt_find_table_lock(net, AF_INET, get.name); 1200 t = xt_find_table_lock(net, AF_INET, get.name);
1194 if (t && !IS_ERR(t)) { 1201 if (t && !IS_ERR(t)) {
1195 const struct xt_table_info *private = t->private; 1202 const struct xt_table_info *private = t->private;
1196 duprintf("t->private->number = %u\n", private->number); 1203 duprintf("t->private->number = %u\n", private->number);
1197 if (get.size == private->size) 1204 if (get.size == private->size)
1198 ret = copy_entries_to_user(private->size, 1205 ret = copy_entries_to_user(private->size,
1199 t, uptr->entrytable); 1206 t, uptr->entrytable);
1200 else { 1207 else {
1201 duprintf("get_entries: I've got %u not %u!\n", 1208 duprintf("get_entries: I've got %u not %u!\n",
1202 private->size, get.size); 1209 private->size, get.size);
1203 ret = -EAGAIN; 1210 ret = -EAGAIN;
1204 } 1211 }
1205 module_put(t->me); 1212 module_put(t->me);
1206 xt_table_unlock(t); 1213 xt_table_unlock(t);
1207 } else 1214 } else
1208 ret = t ? PTR_ERR(t) : -ENOENT; 1215 ret = t ? PTR_ERR(t) : -ENOENT;
1209 1216
1210 return ret; 1217 return ret;
1211 } 1218 }
1212 1219
1213 static int 1220 static int
1214 __do_replace(struct net *net, const char *name, unsigned int valid_hooks, 1221 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1215 struct xt_table_info *newinfo, unsigned int num_counters, 1222 struct xt_table_info *newinfo, unsigned int num_counters,
1216 void __user *counters_ptr) 1223 void __user *counters_ptr)
1217 { 1224 {
1218 int ret; 1225 int ret;
1219 struct xt_table *t; 1226 struct xt_table *t;
1220 struct xt_table_info *oldinfo; 1227 struct xt_table_info *oldinfo;
1221 struct xt_counters *counters; 1228 struct xt_counters *counters;
1222 void *loc_cpu_old_entry; 1229 void *loc_cpu_old_entry;
1223 1230
1224 ret = 0; 1231 ret = 0;
1225 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1232 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1226 if (!counters) { 1233 if (!counters) {
1227 ret = -ENOMEM; 1234 ret = -ENOMEM;
1228 goto out; 1235 goto out;
1229 } 1236 }
1230 1237
1231 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), 1238 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1232 "iptable_%s", name); 1239 "iptable_%s", name);
1233 if (!t || IS_ERR(t)) { 1240 if (!t || IS_ERR(t)) {
1234 ret = t ? PTR_ERR(t) : -ENOENT; 1241 ret = t ? PTR_ERR(t) : -ENOENT;
1235 goto free_newinfo_counters_untrans; 1242 goto free_newinfo_counters_untrans;
1236 } 1243 }
1237 1244
1238 /* You lied! */ 1245 /* You lied! */
1239 if (valid_hooks != t->valid_hooks) { 1246 if (valid_hooks != t->valid_hooks) {
1240 duprintf("Valid hook crap: %08X vs %08X\n", 1247 duprintf("Valid hook crap: %08X vs %08X\n",
1241 valid_hooks, t->valid_hooks); 1248 valid_hooks, t->valid_hooks);
1242 ret = -EINVAL; 1249 ret = -EINVAL;
1243 goto put_module; 1250 goto put_module;
1244 } 1251 }
1245 1252
1246 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); 1253 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1247 if (!oldinfo) 1254 if (!oldinfo)
1248 goto put_module; 1255 goto put_module;
1249 1256
1250 /* Update module usage count based on number of rules */ 1257 /* Update module usage count based on number of rules */
1251 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 1258 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1252 oldinfo->number, oldinfo->initial_entries, newinfo->number); 1259 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1253 if ((oldinfo->number > oldinfo->initial_entries) || 1260 if ((oldinfo->number > oldinfo->initial_entries) ||
1254 (newinfo->number <= oldinfo->initial_entries)) 1261 (newinfo->number <= oldinfo->initial_entries))
1255 module_put(t->me); 1262 module_put(t->me);
1256 if ((oldinfo->number > oldinfo->initial_entries) && 1263 if ((oldinfo->number > oldinfo->initial_entries) &&
1257 (newinfo->number <= oldinfo->initial_entries)) 1264 (newinfo->number <= oldinfo->initial_entries))
1258 module_put(t->me); 1265 module_put(t->me);
1259 1266
1260 /* Get the old counters, and synchronize with replace */ 1267 /* Get the old counters, and synchronize with replace */
1261 get_counters(oldinfo, counters); 1268 get_counters(oldinfo, counters);
1262 1269
1263 /* Decrease module usage counts and free resource */ 1270 /* Decrease module usage counts and free resource */
1264 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1271 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1265 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1272 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1266 net, NULL); 1273 net, NULL);
1267 xt_free_table_info(oldinfo); 1274 xt_free_table_info(oldinfo);
1268 if (copy_to_user(counters_ptr, counters, 1275 if (copy_to_user(counters_ptr, counters,
1269 sizeof(struct xt_counters) * num_counters) != 0) 1276 sizeof(struct xt_counters) * num_counters) != 0)
1270 ret = -EFAULT; 1277 ret = -EFAULT;
1271 vfree(counters); 1278 vfree(counters);
1272 xt_table_unlock(t); 1279 xt_table_unlock(t);
1273 return ret; 1280 return ret;
1274 1281
1275 put_module: 1282 put_module:
1276 module_put(t->me); 1283 module_put(t->me);
1277 xt_table_unlock(t); 1284 xt_table_unlock(t);
1278 free_newinfo_counters_untrans: 1285 free_newinfo_counters_untrans:
1279 vfree(counters); 1286 vfree(counters);
1280 out: 1287 out:
1281 return ret; 1288 return ret;
1282 } 1289 }
1283 1290
1284 static int 1291 static int
1285 do_replace(struct net *net, void __user *user, unsigned int len) 1292 do_replace(struct net *net, void __user *user, unsigned int len)
1286 { 1293 {
1287 int ret; 1294 int ret;
1288 struct ipt_replace tmp; 1295 struct ipt_replace tmp;
1289 struct xt_table_info *newinfo; 1296 struct xt_table_info *newinfo;
1290 void *loc_cpu_entry; 1297 void *loc_cpu_entry;
1291 1298
1292 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1299 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1293 return -EFAULT; 1300 return -EFAULT;
1294 1301
1295 /* overflow check */ 1302 /* overflow check */
1296 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1303 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1297 return -ENOMEM; 1304 return -ENOMEM;
1298 1305
1299 newinfo = xt_alloc_table_info(tmp.size); 1306 newinfo = xt_alloc_table_info(tmp.size);
1300 if (!newinfo) 1307 if (!newinfo)
1301 return -ENOMEM; 1308 return -ENOMEM;
1302 1309
1303 /* choose the copy that is on our node/cpu */ 1310 /* choose the copy that is on our node/cpu */
1304 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1311 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1305 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), 1312 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1306 tmp.size) != 0) { 1313 tmp.size) != 0) {
1307 ret = -EFAULT; 1314 ret = -EFAULT;
1308 goto free_newinfo; 1315 goto free_newinfo;
1309 } 1316 }
1310 1317
1311 ret = translate_table(net, tmp.name, tmp.valid_hooks, 1318 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1312 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, 1319 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1313 tmp.hook_entry, tmp.underflow); 1320 tmp.hook_entry, tmp.underflow);
1314 if (ret != 0) 1321 if (ret != 0)
1315 goto free_newinfo; 1322 goto free_newinfo;
1316 1323
1317 duprintf("ip_tables: Translated table\n"); 1324 duprintf("ip_tables: Translated table\n");
1318 1325
1319 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1326 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1320 tmp.num_counters, tmp.counters); 1327 tmp.num_counters, tmp.counters);
1321 if (ret) 1328 if (ret)
1322 goto free_newinfo_untrans; 1329 goto free_newinfo_untrans;
1323 return 0; 1330 return 0;
1324 1331
1325 free_newinfo_untrans: 1332 free_newinfo_untrans:
1326 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL); 1333 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1327 free_newinfo: 1334 free_newinfo:
1328 xt_free_table_info(newinfo); 1335 xt_free_table_info(newinfo);
1329 return ret; 1336 return ret;
1330 } 1337 }
1331 1338
1332 /* We're lazy, and add to the first CPU; overflow works its fey magic 1339 /* We're lazy, and add to the first CPU; overflow works its fey magic
1333 * and everything is OK. */ 1340 * and everything is OK. */
1334 static int 1341 static int
1335 add_counter_to_entry(struct ipt_entry *e, 1342 add_counter_to_entry(struct ipt_entry *e,
1336 const struct xt_counters addme[], 1343 const struct xt_counters addme[],
1337 unsigned int *i) 1344 unsigned int *i)
1338 { 1345 {
1339 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 1346 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1340 1347
1341 (*i)++; 1348 (*i)++;
1342 return 0; 1349 return 0;
1343 } 1350 }
1344 1351
1345 static int 1352 static int
1346 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) 1353 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1347 { 1354 {
1348 unsigned int i, curcpu; 1355 unsigned int i, curcpu;
1349 struct xt_counters_info tmp; 1356 struct xt_counters_info tmp;
1350 struct xt_counters *paddc; 1357 struct xt_counters *paddc;
1351 unsigned int num_counters; 1358 unsigned int num_counters;
1352 const char *name; 1359 const char *name;
1353 int size; 1360 int size;
1354 void *ptmp; 1361 void *ptmp;
1355 struct xt_table *t; 1362 struct xt_table *t;
1356 const struct xt_table_info *private; 1363 const struct xt_table_info *private;
1357 int ret = 0; 1364 int ret = 0;
1358 void *loc_cpu_entry; 1365 void *loc_cpu_entry;
1359 #ifdef CONFIG_COMPAT 1366 #ifdef CONFIG_COMPAT
1360 struct compat_xt_counters_info compat_tmp; 1367 struct compat_xt_counters_info compat_tmp;
1361 1368
1362 if (compat) { 1369 if (compat) {
1363 ptmp = &compat_tmp; 1370 ptmp = &compat_tmp;
1364 size = sizeof(struct compat_xt_counters_info); 1371 size = sizeof(struct compat_xt_counters_info);
1365 } else 1372 } else
1366 #endif 1373 #endif
1367 { 1374 {
1368 ptmp = &tmp; 1375 ptmp = &tmp;
1369 size = sizeof(struct xt_counters_info); 1376 size = sizeof(struct xt_counters_info);
1370 } 1377 }
1371 1378
1372 if (copy_from_user(ptmp, user, size) != 0) 1379 if (copy_from_user(ptmp, user, size) != 0)
1373 return -EFAULT; 1380 return -EFAULT;
1374 1381
1375 #ifdef CONFIG_COMPAT 1382 #ifdef CONFIG_COMPAT
1376 if (compat) { 1383 if (compat) {
1377 num_counters = compat_tmp.num_counters; 1384 num_counters = compat_tmp.num_counters;
1378 name = compat_tmp.name; 1385 name = compat_tmp.name;
1379 } else 1386 } else
1380 #endif 1387 #endif
1381 { 1388 {
1382 num_counters = tmp.num_counters; 1389 num_counters = tmp.num_counters;
1383 name = tmp.name; 1390 name = tmp.name;
1384 } 1391 }
1385 1392
1386 if (len != size + num_counters * sizeof(struct xt_counters)) 1393 if (len != size + num_counters * sizeof(struct xt_counters))
1387 return -EINVAL; 1394 return -EINVAL;
1388 1395
1389 paddc = vmalloc_node(len - size, numa_node_id()); 1396 paddc = vmalloc_node(len - size, numa_node_id());
1390 if (!paddc) 1397 if (!paddc)
1391 return -ENOMEM; 1398 return -ENOMEM;
1392 1399
1393 if (copy_from_user(paddc, user + size, len - size) != 0) { 1400 if (copy_from_user(paddc, user + size, len - size) != 0) {
1394 ret = -EFAULT; 1401 ret = -EFAULT;
1395 goto free; 1402 goto free;
1396 } 1403 }
1397 1404
1398 t = xt_find_table_lock(net, AF_INET, name); 1405 t = xt_find_table_lock(net, AF_INET, name);
1399 if (!t || IS_ERR(t)) { 1406 if (!t || IS_ERR(t)) {
1400 ret = t ? PTR_ERR(t) : -ENOENT; 1407 ret = t ? PTR_ERR(t) : -ENOENT;
1401 goto free; 1408 goto free;
1402 } 1409 }
1403 1410
1404 local_bh_disable(); 1411 local_bh_disable();
1405 private = t->private; 1412 private = t->private;
1406 if (private->number != num_counters) { 1413 if (private->number != num_counters) {
1407 ret = -EINVAL; 1414 ret = -EINVAL;
1408 goto unlock_up_free; 1415 goto unlock_up_free;
1409 } 1416 }
1410 1417
1411 i = 0; 1418 i = 0;
1412 /* Choose the copy that is on our node */ 1419 /* Choose the copy that is on our node */
1413 curcpu = smp_processor_id(); 1420 curcpu = smp_processor_id();
1414 loc_cpu_entry = private->entries[curcpu]; 1421 loc_cpu_entry = private->entries[curcpu];
1415 xt_info_wrlock(curcpu); 1422 xt_info_wrlock(curcpu);
1416 IPT_ENTRY_ITERATE(loc_cpu_entry, 1423 IPT_ENTRY_ITERATE(loc_cpu_entry,
1417 private->size, 1424 private->size,
1418 add_counter_to_entry, 1425 add_counter_to_entry,
1419 paddc, 1426 paddc,
1420 &i); 1427 &i);
1421 xt_info_wrunlock(curcpu); 1428 xt_info_wrunlock(curcpu);
1422 unlock_up_free: 1429 unlock_up_free:
1423 local_bh_enable(); 1430 local_bh_enable();
1424 xt_table_unlock(t); 1431 xt_table_unlock(t);
1425 module_put(t->me); 1432 module_put(t->me);
1426 free: 1433 free:
1427 vfree(paddc); 1434 vfree(paddc);
1428 1435
1429 return ret; 1436 return ret;
1430 } 1437 }
1431 1438
1432 #ifdef CONFIG_COMPAT 1439 #ifdef CONFIG_COMPAT
1433 struct compat_ipt_replace { 1440 struct compat_ipt_replace {
1434 char name[IPT_TABLE_MAXNAMELEN]; 1441 char name[IPT_TABLE_MAXNAMELEN];
1435 u32 valid_hooks; 1442 u32 valid_hooks;
1436 u32 num_entries; 1443 u32 num_entries;
1437 u32 size; 1444 u32 size;
1438 u32 hook_entry[NF_INET_NUMHOOKS]; 1445 u32 hook_entry[NF_INET_NUMHOOKS];
1439 u32 underflow[NF_INET_NUMHOOKS]; 1446 u32 underflow[NF_INET_NUMHOOKS];
1440 u32 num_counters; 1447 u32 num_counters;
1441 compat_uptr_t counters; /* struct ipt_counters * */ 1448 compat_uptr_t counters; /* struct ipt_counters * */
1442 struct compat_ipt_entry entries[0]; 1449 struct compat_ipt_entry entries[0];
1443 }; 1450 };
1444 1451
1445 static int 1452 static int
1446 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, 1453 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1447 unsigned int *size, struct xt_counters *counters, 1454 unsigned int *size, struct xt_counters *counters,
1448 unsigned int *i) 1455 unsigned int *i)
1449 { 1456 {
1450 struct ipt_entry_target *t; 1457 struct ipt_entry_target *t;
1451 struct compat_ipt_entry __user *ce; 1458 struct compat_ipt_entry __user *ce;
1452 u_int16_t target_offset, next_offset; 1459 u_int16_t target_offset, next_offset;
1453 compat_uint_t origsize; 1460 compat_uint_t origsize;
1454 int ret; 1461 int ret;
1455 1462
1456 ret = -EFAULT; 1463 ret = -EFAULT;
1457 origsize = *size; 1464 origsize = *size;
1458 ce = (struct compat_ipt_entry __user *)*dstptr; 1465 ce = (struct compat_ipt_entry __user *)*dstptr;
1459 if (copy_to_user(ce, e, sizeof(struct ipt_entry))) 1466 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1460 goto out; 1467 goto out;
1461 1468
1462 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1469 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1463 goto out; 1470 goto out;
1464 1471
1465 *dstptr += sizeof(struct compat_ipt_entry); 1472 *dstptr += sizeof(struct compat_ipt_entry);
1466 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1473 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1467 1474
1468 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size); 1475 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1469 target_offset = e->target_offset - (origsize - *size); 1476 target_offset = e->target_offset - (origsize - *size);
1470 if (ret) 1477 if (ret)
1471 goto out; 1478 goto out;
1472 t = ipt_get_target(e); 1479 t = ipt_get_target(e);
1473 ret = xt_compat_target_to_user(t, dstptr, size); 1480 ret = xt_compat_target_to_user(t, dstptr, size);
1474 if (ret) 1481 if (ret)
1475 goto out; 1482 goto out;
1476 ret = -EFAULT; 1483 ret = -EFAULT;
1477 next_offset = e->next_offset - (origsize - *size); 1484 next_offset = e->next_offset - (origsize - *size);
1478 if (put_user(target_offset, &ce->target_offset)) 1485 if (put_user(target_offset, &ce->target_offset))
1479 goto out; 1486 goto out;
1480 if (put_user(next_offset, &ce->next_offset)) 1487 if (put_user(next_offset, &ce->next_offset))
1481 goto out; 1488 goto out;
1482 1489
1483 (*i)++; 1490 (*i)++;
1484 return 0; 1491 return 0;
1485 out: 1492 out:
1486 return ret; 1493 return ret;
1487 } 1494 }
1488 1495
1489 static int 1496 static int
1490 compat_find_calc_match(struct ipt_entry_match *m, 1497 compat_find_calc_match(struct ipt_entry_match *m,
1491 const char *name, 1498 const char *name,
1492 const struct ipt_ip *ip, 1499 const struct ipt_ip *ip,
1493 unsigned int hookmask, 1500 unsigned int hookmask,
1494 int *size, unsigned int *i) 1501 int *size, unsigned int *i)
1495 { 1502 {
1496 struct xt_match *match; 1503 struct xt_match *match;
1497 1504
1498 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, 1505 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1499 m->u.user.revision), 1506 m->u.user.revision),
1500 "ipt_%s", m->u.user.name); 1507 "ipt_%s", m->u.user.name);
1501 if (IS_ERR(match) || !match) { 1508 if (IS_ERR(match) || !match) {
1502 duprintf("compat_check_calc_match: `%s' not found\n", 1509 duprintf("compat_check_calc_match: `%s' not found\n",
1503 m->u.user.name); 1510 m->u.user.name);
1504 return match ? PTR_ERR(match) : -ENOENT; 1511 return match ? PTR_ERR(match) : -ENOENT;
1505 } 1512 }
1506 m->u.kernel.match = match; 1513 m->u.kernel.match = match;
1507 *size += xt_compat_match_offset(match); 1514 *size += xt_compat_match_offset(match);
1508 1515
1509 (*i)++; 1516 (*i)++;
1510 return 0; 1517 return 0;
1511 } 1518 }
1512 1519
1513 static int 1520 static int
1514 compat_release_match(struct ipt_entry_match *m, unsigned int *i) 1521 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1515 { 1522 {
1516 if (i && (*i)-- == 0) 1523 if (i && (*i)-- == 0)
1517 return 1; 1524 return 1;
1518 1525
1519 module_put(m->u.kernel.match->me); 1526 module_put(m->u.kernel.match->me);
1520 return 0; 1527 return 0;
1521 } 1528 }
1522 1529
1523 static int 1530 static int
1524 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i) 1531 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1525 { 1532 {
1526 struct ipt_entry_target *t; 1533 struct ipt_entry_target *t;
1527 1534
1528 if (i && (*i)-- == 0) 1535 if (i && (*i)-- == 0)
1529 return 1; 1536 return 1;
1530 1537
1531 /* Cleanup all matches */ 1538 /* Cleanup all matches */
1532 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL); 1539 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1533 t = compat_ipt_get_target(e); 1540 t = compat_ipt_get_target(e);
1534 module_put(t->u.kernel.target->me); 1541 module_put(t->u.kernel.target->me);
1535 return 0; 1542 return 0;
1536 } 1543 }
1537 1544
1538 static int 1545 static int
1539 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, 1546 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1540 struct xt_table_info *newinfo, 1547 struct xt_table_info *newinfo,
1541 unsigned int *size, 1548 unsigned int *size,
1542 unsigned char *base, 1549 unsigned char *base,
1543 unsigned char *limit, 1550 unsigned char *limit,
1544 unsigned int *hook_entries, 1551 unsigned int *hook_entries,
1545 unsigned int *underflows, 1552 unsigned int *underflows,
1546 unsigned int *i, 1553 unsigned int *i,
1547 const char *name) 1554 const char *name)
1548 { 1555 {
1549 struct ipt_entry_target *t; 1556 struct ipt_entry_target *t;
1550 struct xt_target *target; 1557 struct xt_target *target;
1551 unsigned int entry_offset; 1558 unsigned int entry_offset;
1552 unsigned int j; 1559 unsigned int j;
1553 int ret, off, h; 1560 int ret, off, h;
1554 1561
1555 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1562 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1556 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || 1563 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1557 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { 1564 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1558 duprintf("Bad offset %p, limit = %p\n", e, limit); 1565 duprintf("Bad offset %p, limit = %p\n", e, limit);
1559 return -EINVAL; 1566 return -EINVAL;
1560 } 1567 }
1561 1568
1562 if (e->next_offset < sizeof(struct compat_ipt_entry) + 1569 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1563 sizeof(struct compat_xt_entry_target)) { 1570 sizeof(struct compat_xt_entry_target)) {
1564 duprintf("checking: element %p size %u\n", 1571 duprintf("checking: element %p size %u\n",
1565 e, e->next_offset); 1572 e, e->next_offset);
1566 return -EINVAL; 1573 return -EINVAL;
1567 } 1574 }
1568 1575
1569 /* For purposes of check_entry casting the compat entry is fine */ 1576 /* For purposes of check_entry casting the compat entry is fine */
1570 ret = check_entry((struct ipt_entry *)e, name); 1577 ret = check_entry((struct ipt_entry *)e, name);
1571 if (ret) 1578 if (ret)
1572 return ret; 1579 return ret;
1573 1580
1574 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1581 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1575 entry_offset = (void *)e - (void *)base; 1582 entry_offset = (void *)e - (void *)base;
1576 j = 0; 1583 j = 0;
1577 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name, 1584 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1578 &e->ip, e->comefrom, &off, &j); 1585 &e->ip, e->comefrom, &off, &j);
1579 if (ret != 0) 1586 if (ret != 0)
1580 goto release_matches; 1587 goto release_matches;
1581 1588
1582 t = compat_ipt_get_target(e); 1589 t = compat_ipt_get_target(e);
1583 target = try_then_request_module(xt_find_target(AF_INET, 1590 target = try_then_request_module(xt_find_target(AF_INET,
1584 t->u.user.name, 1591 t->u.user.name,
1585 t->u.user.revision), 1592 t->u.user.revision),
1586 "ipt_%s", t->u.user.name); 1593 "ipt_%s", t->u.user.name);
1587 if (IS_ERR(target) || !target) { 1594 if (IS_ERR(target) || !target) {
1588 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", 1595 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1589 t->u.user.name); 1596 t->u.user.name);
1590 ret = target ? PTR_ERR(target) : -ENOENT; 1597 ret = target ? PTR_ERR(target) : -ENOENT;
1591 goto release_matches; 1598 goto release_matches;
1592 } 1599 }
1593 t->u.kernel.target = target; 1600 t->u.kernel.target = target;
1594 1601
1595 off += xt_compat_target_offset(target); 1602 off += xt_compat_target_offset(target);
1596 *size += off; 1603 *size += off;
1597 ret = xt_compat_add_offset(AF_INET, entry_offset, off); 1604 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1598 if (ret) 1605 if (ret)
1599 goto out; 1606 goto out;
1600 1607
1601 /* Check hooks & underflows */ 1608 /* Check hooks & underflows */
1602 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 1609 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1603 if ((unsigned char *)e - base == hook_entries[h]) 1610 if ((unsigned char *)e - base == hook_entries[h])
1604 newinfo->hook_entry[h] = hook_entries[h]; 1611 newinfo->hook_entry[h] = hook_entries[h];
1605 if ((unsigned char *)e - base == underflows[h]) 1612 if ((unsigned char *)e - base == underflows[h])
1606 newinfo->underflow[h] = underflows[h]; 1613 newinfo->underflow[h] = underflows[h];
1607 } 1614 }
1608 1615
1609 /* Clear counters and comefrom */ 1616 /* Clear counters and comefrom */
1610 memset(&e->counters, 0, sizeof(e->counters)); 1617 memset(&e->counters, 0, sizeof(e->counters));
1611 e->comefrom = 0; 1618 e->comefrom = 0;
1612 1619
1613 (*i)++; 1620 (*i)++;
1614 return 0; 1621 return 0;
1615 1622
1616 out: 1623 out:
1617 module_put(t->u.kernel.target->me); 1624 module_put(t->u.kernel.target->me);
1618 release_matches: 1625 release_matches:
1619 IPT_MATCH_ITERATE(e, compat_release_match, &j); 1626 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1620 return ret; 1627 return ret;
1621 } 1628 }
1622 1629
1623 static int 1630 static int
1624 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, 1631 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1625 unsigned int *size, const char *name, 1632 unsigned int *size, const char *name,
1626 struct xt_table_info *newinfo, unsigned char *base) 1633 struct xt_table_info *newinfo, unsigned char *base)
1627 { 1634 {
1628 struct ipt_entry_target *t; 1635 struct ipt_entry_target *t;
1629 struct xt_target *target; 1636 struct xt_target *target;
1630 struct ipt_entry *de; 1637 struct ipt_entry *de;
1631 unsigned int origsize; 1638 unsigned int origsize;
1632 int ret, h; 1639 int ret, h;
1633 1640
1634 ret = 0; 1641 ret = 0;
1635 origsize = *size; 1642 origsize = *size;
1636 de = (struct ipt_entry *)*dstptr; 1643 de = (struct ipt_entry *)*dstptr;
1637 memcpy(de, e, sizeof(struct ipt_entry)); 1644 memcpy(de, e, sizeof(struct ipt_entry));
1638 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1645 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1639 1646
1640 *dstptr += sizeof(struct ipt_entry); 1647 *dstptr += sizeof(struct ipt_entry);
1641 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1648 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1642 1649
1643 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user, 1650 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1644 dstptr, size); 1651 dstptr, size);
1645 if (ret) 1652 if (ret)
1646 return ret; 1653 return ret;
1647 de->target_offset = e->target_offset - (origsize - *size); 1654 de->target_offset = e->target_offset - (origsize - *size);
1648 t = compat_ipt_get_target(e); 1655 t = compat_ipt_get_target(e);
1649 target = t->u.kernel.target; 1656 target = t->u.kernel.target;
1650 xt_compat_target_from_user(t, dstptr, size); 1657 xt_compat_target_from_user(t, dstptr, size);
1651 1658
1652 de->next_offset = e->next_offset - (origsize - *size); 1659 de->next_offset = e->next_offset - (origsize - *size);
1653 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 1660 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1654 if ((unsigned char *)de - base < newinfo->hook_entry[h]) 1661 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1655 newinfo->hook_entry[h] -= origsize - *size; 1662 newinfo->hook_entry[h] -= origsize - *size;
1656 if ((unsigned char *)de - base < newinfo->underflow[h]) 1663 if ((unsigned char *)de - base < newinfo->underflow[h])
1657 newinfo->underflow[h] -= origsize - *size; 1664 newinfo->underflow[h] -= origsize - *size;
1658 } 1665 }
1659 return ret; 1666 return ret;
1660 } 1667 }
1661 1668
1662 static int 1669 static int
1663 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name, 1670 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name,
1664 unsigned int *i) 1671 unsigned int *i)
1665 { 1672 {
1666 struct xt_mtchk_param mtpar; 1673 struct xt_mtchk_param mtpar;
1667 unsigned int j; 1674 unsigned int j;
1668 int ret; 1675 int ret;
1669 1676
1670 j = 0; 1677 j = 0;
1671 mtpar.net = net; 1678 mtpar.net = net;
1672 mtpar.table = name; 1679 mtpar.table = name;
1673 mtpar.entryinfo = &e->ip; 1680 mtpar.entryinfo = &e->ip;
1674 mtpar.hook_mask = e->comefrom; 1681 mtpar.hook_mask = e->comefrom;
1675 mtpar.family = NFPROTO_IPV4; 1682 mtpar.family = NFPROTO_IPV4;
1676 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j); 1683 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j);
1677 if (ret) 1684 if (ret)
1678 goto cleanup_matches; 1685 goto cleanup_matches;
1679 1686
1680 ret = check_target(e, net, name); 1687 ret = check_target(e, net, name);
1681 if (ret) 1688 if (ret)
1682 goto cleanup_matches; 1689 goto cleanup_matches;
1683 1690
1684 (*i)++; 1691 (*i)++;
1685 return 0; 1692 return 0;
1686 1693
1687 cleanup_matches: 1694 cleanup_matches:
1688 IPT_MATCH_ITERATE(e, cleanup_match, net, &j); 1695 IPT_MATCH_ITERATE(e, cleanup_match, net, &j);
1689 return ret; 1696 return ret;
1690 } 1697 }
1691 1698
1692 static int 1699 static int
1693 translate_compat_table(struct net *net, 1700 translate_compat_table(struct net *net,
1694 const char *name, 1701 const char *name,
1695 unsigned int valid_hooks, 1702 unsigned int valid_hooks,
1696 struct xt_table_info **pinfo, 1703 struct xt_table_info **pinfo,
1697 void **pentry0, 1704 void **pentry0,
1698 unsigned int total_size, 1705 unsigned int total_size,
1699 unsigned int number, 1706 unsigned int number,
1700 unsigned int *hook_entries, 1707 unsigned int *hook_entries,
1701 unsigned int *underflows) 1708 unsigned int *underflows)
1702 { 1709 {
1703 unsigned int i, j; 1710 unsigned int i, j;
1704 struct xt_table_info *newinfo, *info; 1711 struct xt_table_info *newinfo, *info;
1705 void *pos, *entry0, *entry1; 1712 void *pos, *entry0, *entry1;
1706 unsigned int size; 1713 unsigned int size;
1707 int ret; 1714 int ret;
1708 1715
1709 info = *pinfo; 1716 info = *pinfo;
1710 entry0 = *pentry0; 1717 entry0 = *pentry0;
1711 size = total_size; 1718 size = total_size;
1712 info->number = number; 1719 info->number = number;
1713 1720
1714 /* Init all hooks to impossible value. */ 1721 /* Init all hooks to impossible value. */
1715 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1722 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1716 info->hook_entry[i] = 0xFFFFFFFF; 1723 info->hook_entry[i] = 0xFFFFFFFF;
1717 info->underflow[i] = 0xFFFFFFFF; 1724 info->underflow[i] = 0xFFFFFFFF;
1718 } 1725 }
1719 1726
1720 duprintf("translate_compat_table: size %u\n", info->size); 1727 duprintf("translate_compat_table: size %u\n", info->size);
1721 j = 0; 1728 j = 0;
1722 xt_compat_lock(AF_INET); 1729 xt_compat_lock(AF_INET);
1723 /* Walk through entries, checking offsets. */ 1730 /* Walk through entries, checking offsets. */
1724 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, 1731 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1725 check_compat_entry_size_and_hooks, 1732 check_compat_entry_size_and_hooks,
1726 info, &size, entry0, 1733 info, &size, entry0,
1727 entry0 + total_size, 1734 entry0 + total_size,
1728 hook_entries, underflows, &j, name); 1735 hook_entries, underflows, &j, name);
1729 if (ret != 0) 1736 if (ret != 0)
1730 goto out_unlock; 1737 goto out_unlock;
1731 1738
1732 ret = -EINVAL; 1739 ret = -EINVAL;
1733 if (j != number) { 1740 if (j != number) {
1734 duprintf("translate_compat_table: %u not %u entries\n", 1741 duprintf("translate_compat_table: %u not %u entries\n",
1735 j, number); 1742 j, number);
1736 goto out_unlock; 1743 goto out_unlock;
1737 } 1744 }
1738 1745
1739 /* Check hooks all assigned */ 1746 /* Check hooks all assigned */
1740 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1747 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1741 /* Only hooks which are valid */ 1748 /* Only hooks which are valid */
1742 if (!(valid_hooks & (1 << i))) 1749 if (!(valid_hooks & (1 << i)))
1743 continue; 1750 continue;
1744 if (info->hook_entry[i] == 0xFFFFFFFF) { 1751 if (info->hook_entry[i] == 0xFFFFFFFF) {
1745 duprintf("Invalid hook entry %u %u\n", 1752 duprintf("Invalid hook entry %u %u\n",
1746 i, hook_entries[i]); 1753 i, hook_entries[i]);
1747 goto out_unlock; 1754 goto out_unlock;
1748 } 1755 }
1749 if (info->underflow[i] == 0xFFFFFFFF) { 1756 if (info->underflow[i] == 0xFFFFFFFF) {
1750 duprintf("Invalid underflow %u %u\n", 1757 duprintf("Invalid underflow %u %u\n",
1751 i, underflows[i]); 1758 i, underflows[i]);
1752 goto out_unlock; 1759 goto out_unlock;
1753 } 1760 }
1754 } 1761 }
1755 1762
1756 ret = -ENOMEM; 1763 ret = -ENOMEM;
1757 newinfo = xt_alloc_table_info(size); 1764 newinfo = xt_alloc_table_info(size);
1758 if (!newinfo) 1765 if (!newinfo)
1759 goto out_unlock; 1766 goto out_unlock;
1760 1767
1761 newinfo->number = number; 1768 newinfo->number = number;
1762 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1769 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1763 newinfo->hook_entry[i] = info->hook_entry[i]; 1770 newinfo->hook_entry[i] = info->hook_entry[i];
1764 newinfo->underflow[i] = info->underflow[i]; 1771 newinfo->underflow[i] = info->underflow[i];
1765 } 1772 }
1766 entry1 = newinfo->entries[raw_smp_processor_id()]; 1773 entry1 = newinfo->entries[raw_smp_processor_id()];
1767 pos = entry1; 1774 pos = entry1;
1768 size = total_size; 1775 size = total_size;
1769 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, 1776 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1770 compat_copy_entry_from_user, 1777 compat_copy_entry_from_user,
1771 &pos, &size, name, newinfo, entry1); 1778 &pos, &size, name, newinfo, entry1);
1772 xt_compat_flush_offsets(AF_INET); 1779 xt_compat_flush_offsets(AF_INET);
1773 xt_compat_unlock(AF_INET); 1780 xt_compat_unlock(AF_INET);
1774 if (ret) 1781 if (ret)
1775 goto free_newinfo; 1782 goto free_newinfo;
1776 1783
1777 ret = -ELOOP; 1784 ret = -ELOOP;
1778 if (!mark_source_chains(newinfo, valid_hooks, entry1)) 1785 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1779 goto free_newinfo; 1786 goto free_newinfo;
1780 1787
1781 i = 0; 1788 i = 0;
1782 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1789 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1783 net, name, &i); 1790 net, name, &i);
1784 if (ret) { 1791 if (ret) {
1785 j -= i; 1792 j -= i;
1786 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1793 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1787 compat_release_entry, &j); 1794 compat_release_entry, &j);
1788 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i); 1795 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i);
1789 xt_free_table_info(newinfo); 1796 xt_free_table_info(newinfo);
1790 return ret; 1797 return ret;
1791 } 1798 }
1792 1799
1793 /* And one copy for every other CPU */ 1800 /* And one copy for every other CPU */
1794 for_each_possible_cpu(i) 1801 for_each_possible_cpu(i)
1795 if (newinfo->entries[i] && newinfo->entries[i] != entry1) 1802 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1796 memcpy(newinfo->entries[i], entry1, newinfo->size); 1803 memcpy(newinfo->entries[i], entry1, newinfo->size);
1797 1804
1798 *pinfo = newinfo; 1805 *pinfo = newinfo;
1799 *pentry0 = entry1; 1806 *pentry0 = entry1;
1800 xt_free_table_info(info); 1807 xt_free_table_info(info);
1801 return 0; 1808 return 0;
1802 1809
1803 free_newinfo: 1810 free_newinfo:
1804 xt_free_table_info(newinfo); 1811 xt_free_table_info(newinfo);
1805 out: 1812 out:
1806 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1813 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1807 return ret; 1814 return ret;
1808 out_unlock: 1815 out_unlock:
1809 xt_compat_flush_offsets(AF_INET); 1816 xt_compat_flush_offsets(AF_INET);
1810 xt_compat_unlock(AF_INET); 1817 xt_compat_unlock(AF_INET);
1811 goto out; 1818 goto out;
1812 } 1819 }
1813 1820
1814 static int 1821 static int
1815 compat_do_replace(struct net *net, void __user *user, unsigned int len) 1822 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1816 { 1823 {
1817 int ret; 1824 int ret;
1818 struct compat_ipt_replace tmp; 1825 struct compat_ipt_replace tmp;
1819 struct xt_table_info *newinfo; 1826 struct xt_table_info *newinfo;
1820 void *loc_cpu_entry; 1827 void *loc_cpu_entry;
1821 1828
1822 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1829 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1823 return -EFAULT; 1830 return -EFAULT;
1824 1831
1825 /* overflow check */ 1832 /* overflow check */
1826 if (tmp.size >= INT_MAX / num_possible_cpus()) 1833 if (tmp.size >= INT_MAX / num_possible_cpus())
1827 return -ENOMEM; 1834 return -ENOMEM;
1828 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1835 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1829 return -ENOMEM; 1836 return -ENOMEM;
1830 1837
1831 newinfo = xt_alloc_table_info(tmp.size); 1838 newinfo = xt_alloc_table_info(tmp.size);
1832 if (!newinfo) 1839 if (!newinfo)
1833 return -ENOMEM; 1840 return -ENOMEM;
1834 1841
1835 /* choose the copy that is on our node/cpu */ 1842 /* choose the copy that is on our node/cpu */
1836 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1843 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1837 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), 1844 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1838 tmp.size) != 0) { 1845 tmp.size) != 0) {
1839 ret = -EFAULT; 1846 ret = -EFAULT;
1840 goto free_newinfo; 1847 goto free_newinfo;
1841 } 1848 }
1842 1849
1843 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, 1850 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1844 &newinfo, &loc_cpu_entry, tmp.size, 1851 &newinfo, &loc_cpu_entry, tmp.size,
1845 tmp.num_entries, tmp.hook_entry, 1852 tmp.num_entries, tmp.hook_entry,
1846 tmp.underflow); 1853 tmp.underflow);
1847 if (ret != 0) 1854 if (ret != 0)
1848 goto free_newinfo; 1855 goto free_newinfo;
1849 1856
1850 duprintf("compat_do_replace: Translated table\n"); 1857 duprintf("compat_do_replace: Translated table\n");
1851 1858
1852 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1859 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1853 tmp.num_counters, compat_ptr(tmp.counters)); 1860 tmp.num_counters, compat_ptr(tmp.counters));
1854 if (ret) 1861 if (ret)
1855 goto free_newinfo_untrans; 1862 goto free_newinfo_untrans;
1856 return 0; 1863 return 0;
1857 1864
1858 free_newinfo_untrans: 1865 free_newinfo_untrans:
1859 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL); 1866 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1860 free_newinfo: 1867 free_newinfo:
1861 xt_free_table_info(newinfo); 1868 xt_free_table_info(newinfo);
1862 return ret; 1869 return ret;
1863 } 1870 }
1864 1871
1865 static int 1872 static int
1866 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, 1873 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1867 unsigned int len) 1874 unsigned int len)
1868 { 1875 {
1869 int ret; 1876 int ret;
1870 1877
1871 if (!capable(CAP_NET_ADMIN)) 1878 if (!capable(CAP_NET_ADMIN))
1872 return -EPERM; 1879 return -EPERM;
1873 1880
1874 switch (cmd) { 1881 switch (cmd) {
1875 case IPT_SO_SET_REPLACE: 1882 case IPT_SO_SET_REPLACE:
1876 ret = compat_do_replace(sock_net(sk), user, len); 1883 ret = compat_do_replace(sock_net(sk), user, len);
1877 break; 1884 break;
1878 1885
1879 case IPT_SO_SET_ADD_COUNTERS: 1886 case IPT_SO_SET_ADD_COUNTERS:
1880 ret = do_add_counters(sock_net(sk), user, len, 1); 1887 ret = do_add_counters(sock_net(sk), user, len, 1);
1881 break; 1888 break;
1882 1889
1883 default: 1890 default:
1884 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); 1891 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1885 ret = -EINVAL; 1892 ret = -EINVAL;
1886 } 1893 }
1887 1894
1888 return ret; 1895 return ret;
1889 } 1896 }
1890 1897
1891 struct compat_ipt_get_entries { 1898 struct compat_ipt_get_entries {
1892 char name[IPT_TABLE_MAXNAMELEN]; 1899 char name[IPT_TABLE_MAXNAMELEN];
1893 compat_uint_t size; 1900 compat_uint_t size;
1894 struct compat_ipt_entry entrytable[0]; 1901 struct compat_ipt_entry entrytable[0];
1895 }; 1902 };
1896 1903
1897 static int 1904 static int
1898 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, 1905 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1899 void __user *userptr) 1906 void __user *userptr)
1900 { 1907 {
1901 struct xt_counters *counters; 1908 struct xt_counters *counters;
1902 const struct xt_table_info *private = table->private; 1909 const struct xt_table_info *private = table->private;
1903 void __user *pos; 1910 void __user *pos;
1904 unsigned int size; 1911 unsigned int size;
1905 int ret = 0; 1912 int ret = 0;
1906 const void *loc_cpu_entry; 1913 const void *loc_cpu_entry;
1907 unsigned int i = 0; 1914 unsigned int i = 0;
1908 1915
1909 counters = alloc_counters(table); 1916 counters = alloc_counters(table);
1910 if (IS_ERR(counters)) 1917 if (IS_ERR(counters))
1911 return PTR_ERR(counters); 1918 return PTR_ERR(counters);
1912 1919
1913 /* choose the copy that is on our node/cpu, ... 1920 /* choose the copy that is on our node/cpu, ...
1914 * This choice is lazy (because current thread is 1921 * This choice is lazy (because current thread is
1915 * allowed to migrate to another cpu) 1922 * allowed to migrate to another cpu)
1916 */ 1923 */
1917 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1924 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1918 pos = userptr; 1925 pos = userptr;
1919 size = total_size; 1926 size = total_size;
1920 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size, 1927 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1921 compat_copy_entry_to_user, 1928 compat_copy_entry_to_user,
1922 &pos, &size, counters, &i); 1929 &pos, &size, counters, &i);
1923 1930
1924 vfree(counters); 1931 vfree(counters);
1925 return ret; 1932 return ret;
1926 } 1933 }
1927 1934
1928 static int 1935 static int
1929 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, 1936 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1930 int *len) 1937 int *len)
1931 { 1938 {
1932 int ret; 1939 int ret;
1933 struct compat_ipt_get_entries get; 1940 struct compat_ipt_get_entries get;
1934 struct xt_table *t; 1941 struct xt_table *t;
1935 1942
1936 if (*len < sizeof(get)) { 1943 if (*len < sizeof(get)) {
1937 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); 1944 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1938 return -EINVAL; 1945 return -EINVAL;
1939 } 1946 }
1940 1947
1941 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1948 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1942 return -EFAULT; 1949 return -EFAULT;
1943 1950
1944 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { 1951 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1945 duprintf("compat_get_entries: %u != %zu\n", 1952 duprintf("compat_get_entries: %u != %zu\n",
1946 *len, sizeof(get) + get.size); 1953 *len, sizeof(get) + get.size);
1947 return -EINVAL; 1954 return -EINVAL;
1948 } 1955 }
1949 1956
1950 xt_compat_lock(AF_INET); 1957 xt_compat_lock(AF_INET);
1951 t = xt_find_table_lock(net, AF_INET, get.name); 1958 t = xt_find_table_lock(net, AF_INET, get.name);
1952 if (t && !IS_ERR(t)) { 1959 if (t && !IS_ERR(t)) {
1953 const struct xt_table_info *private = t->private; 1960 const struct xt_table_info *private = t->private;
1954 struct xt_table_info info; 1961 struct xt_table_info info;
1955 duprintf("t->private->number = %u\n", private->number); 1962 duprintf("t->private->number = %u\n", private->number);
1956 ret = compat_table_info(private, &info); 1963 ret = compat_table_info(private, &info);
1957 if (!ret && get.size == info.size) { 1964 if (!ret && get.size == info.size) {
1958 ret = compat_copy_entries_to_user(private->size, 1965 ret = compat_copy_entries_to_user(private->size,
1959 t, uptr->entrytable); 1966 t, uptr->entrytable);
1960 } else if (!ret) { 1967 } else if (!ret) {
1961 duprintf("compat_get_entries: I've got %u not %u!\n", 1968 duprintf("compat_get_entries: I've got %u not %u!\n",
1962 private->size, get.size); 1969 private->size, get.size);
1963 ret = -EAGAIN; 1970 ret = -EAGAIN;
1964 } 1971 }
1965 xt_compat_flush_offsets(AF_INET); 1972 xt_compat_flush_offsets(AF_INET);
1966 module_put(t->me); 1973 module_put(t->me);
1967 xt_table_unlock(t); 1974 xt_table_unlock(t);
1968 } else 1975 } else
1969 ret = t ? PTR_ERR(t) : -ENOENT; 1976 ret = t ? PTR_ERR(t) : -ENOENT;
1970 1977
1971 xt_compat_unlock(AF_INET); 1978 xt_compat_unlock(AF_INET);
1972 return ret; 1979 return ret;
1973 } 1980 }
1974 1981
1975 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); 1982 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1976 1983
1977 static int 1984 static int
1978 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 1985 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1979 { 1986 {
1980 int ret; 1987 int ret;
1981 1988
1982 if (!capable(CAP_NET_ADMIN)) 1989 if (!capable(CAP_NET_ADMIN))
1983 return -EPERM; 1990 return -EPERM;
1984 1991
1985 switch (cmd) { 1992 switch (cmd) {
1986 case IPT_SO_GET_INFO: 1993 case IPT_SO_GET_INFO:
1987 ret = get_info(sock_net(sk), user, len, 1); 1994 ret = get_info(sock_net(sk), user, len, 1);
1988 break; 1995 break;
1989 case IPT_SO_GET_ENTRIES: 1996 case IPT_SO_GET_ENTRIES:
1990 ret = compat_get_entries(sock_net(sk), user, len); 1997 ret = compat_get_entries(sock_net(sk), user, len);
1991 break; 1998 break;
1992 default: 1999 default:
1993 ret = do_ipt_get_ctl(sk, cmd, user, len); 2000 ret = do_ipt_get_ctl(sk, cmd, user, len);
1994 } 2001 }
1995 return ret; 2002 return ret;
1996 } 2003 }
1997 #endif 2004 #endif
1998 2005
1999 static int 2006 static int
2000 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 2007 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2001 { 2008 {
2002 int ret; 2009 int ret;
2003 2010
2004 if (!capable(CAP_NET_ADMIN)) 2011 if (!capable(CAP_NET_ADMIN))
2005 return -EPERM; 2012 return -EPERM;
2006 2013
2007 switch (cmd) { 2014 switch (cmd) {
2008 case IPT_SO_SET_REPLACE: 2015 case IPT_SO_SET_REPLACE:
2009 ret = do_replace(sock_net(sk), user, len); 2016 ret = do_replace(sock_net(sk), user, len);
2010 break; 2017 break;
2011 2018
2012 case IPT_SO_SET_ADD_COUNTERS: 2019 case IPT_SO_SET_ADD_COUNTERS:
2013 ret = do_add_counters(sock_net(sk), user, len, 0); 2020 ret = do_add_counters(sock_net(sk), user, len, 0);
2014 break; 2021 break;
2015 2022
2016 default: 2023 default:
2017 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); 2024 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2018 ret = -EINVAL; 2025 ret = -EINVAL;
2019 } 2026 }
2020 2027
2021 return ret; 2028 return ret;
2022 } 2029 }
2023 2030
2024 static int 2031 static int
2025 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 2032 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2026 { 2033 {
2027 int ret; 2034 int ret;
2028 2035
2029 if (!capable(CAP_NET_ADMIN)) 2036 if (!capable(CAP_NET_ADMIN))
2030 return -EPERM; 2037 return -EPERM;
2031 2038
2032 switch (cmd) { 2039 switch (cmd) {
2033 case IPT_SO_GET_INFO: 2040 case IPT_SO_GET_INFO:
2034 ret = get_info(sock_net(sk), user, len, 0); 2041 ret = get_info(sock_net(sk), user, len, 0);
2035 break; 2042 break;
2036 2043
2037 case IPT_SO_GET_ENTRIES: 2044 case IPT_SO_GET_ENTRIES:
2038 ret = get_entries(sock_net(sk), user, len); 2045 ret = get_entries(sock_net(sk), user, len);
2039 break; 2046 break;
2040 2047
2041 case IPT_SO_GET_REVISION_MATCH: 2048 case IPT_SO_GET_REVISION_MATCH:
2042 case IPT_SO_GET_REVISION_TARGET: { 2049 case IPT_SO_GET_REVISION_TARGET: {
2043 struct ipt_get_revision rev; 2050 struct ipt_get_revision rev;
2044 int target; 2051 int target;
2045 2052
2046 if (*len != sizeof(rev)) { 2053 if (*len != sizeof(rev)) {
2047 ret = -EINVAL; 2054 ret = -EINVAL;
2048 break; 2055 break;
2049 } 2056 }
2050 if (copy_from_user(&rev, user, sizeof(rev)) != 0) { 2057 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2051 ret = -EFAULT; 2058 ret = -EFAULT;
2052 break; 2059 break;
2053 } 2060 }
2054 2061
2055 if (cmd == IPT_SO_GET_REVISION_TARGET) 2062 if (cmd == IPT_SO_GET_REVISION_TARGET)
2056 target = 1; 2063 target = 1;
2057 else 2064 else
2058 target = 0; 2065 target = 0;
2059 2066
2060 try_then_request_module(xt_find_revision(AF_INET, rev.name, 2067 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2061 rev.revision, 2068 rev.revision,
2062 target, &ret), 2069 target, &ret),
2063 "ipt_%s", rev.name); 2070 "ipt_%s", rev.name);
2064 break; 2071 break;
2065 } 2072 }
2066 2073
2067 default: 2074 default:
2068 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); 2075 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2069 ret = -EINVAL; 2076 ret = -EINVAL;
2070 } 2077 }
2071 2078
2072 return ret; 2079 return ret;
2073 } 2080 }
2074 2081
2075 struct xt_table *ipt_register_table(struct net *net, 2082 struct xt_table *ipt_register_table(struct net *net,
2076 const struct xt_table *table, 2083 const struct xt_table *table,
2077 const struct ipt_replace *repl) 2084 const struct ipt_replace *repl)
2078 { 2085 {
2079 int ret; 2086 int ret;
2080 struct xt_table_info *newinfo; 2087 struct xt_table_info *newinfo;
2081 struct xt_table_info bootstrap 2088 struct xt_table_info bootstrap
2082 = { 0, 0, 0, { 0 }, { 0 }, { } }; 2089 = { 0, 0, 0, { 0 }, { 0 }, { } };
2083 void *loc_cpu_entry; 2090 void *loc_cpu_entry;
2084 struct xt_table *new_table; 2091 struct xt_table *new_table;
2085 2092
2086 newinfo = xt_alloc_table_info(repl->size); 2093 newinfo = xt_alloc_table_info(repl->size);
2087 if (!newinfo) { 2094 if (!newinfo) {
2088 ret = -ENOMEM; 2095 ret = -ENOMEM;
2089 goto out; 2096 goto out;
2090 } 2097 }
2091 2098
2092 /* choose the copy on our node/cpu, but dont care about preemption */ 2099 /* choose the copy on our node/cpu, but dont care about preemption */
2093 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2100 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2094 memcpy(loc_cpu_entry, repl->entries, repl->size); 2101 memcpy(loc_cpu_entry, repl->entries, repl->size);
2095 2102
2096 ret = translate_table(net, table->name, table->valid_hooks, 2103 ret = translate_table(net, table->name, table->valid_hooks,
2097 newinfo, loc_cpu_entry, repl->size, 2104 newinfo, loc_cpu_entry, repl->size,
2098 repl->num_entries, 2105 repl->num_entries,
2099 repl->hook_entry, 2106 repl->hook_entry,
2100 repl->underflow); 2107 repl->underflow);
2101 if (ret != 0) 2108 if (ret != 0)
2102 goto out_free; 2109 goto out_free;
2103 2110
2104 new_table = xt_register_table(net, table, &bootstrap, newinfo); 2111 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2105 if (IS_ERR(new_table)) { 2112 if (IS_ERR(new_table)) {
2106 ret = PTR_ERR(new_table); 2113 ret = PTR_ERR(new_table);
2107 goto out_free; 2114 goto out_free;
2108 } 2115 }
2109 2116
2110 return new_table; 2117 return new_table;
2111 2118
2112 out_free: 2119 out_free:
2113 xt_free_table_info(newinfo); 2120 xt_free_table_info(newinfo);
2114 out: 2121 out:
2115 return ERR_PTR(ret); 2122 return ERR_PTR(ret);
2116 } 2123 }
2117 2124
2118 void ipt_unregister_table(struct net *net, struct xt_table *table) 2125 void ipt_unregister_table(struct net *net, struct xt_table *table)
2119 { 2126 {
2120 struct xt_table_info *private; 2127 struct xt_table_info *private;
2121 void *loc_cpu_entry; 2128 void *loc_cpu_entry;
2122 struct module *table_owner = table->me; 2129 struct module *table_owner = table->me;
2123 2130
2124 private = xt_unregister_table(table); 2131 private = xt_unregister_table(table);
2125 2132
2126 /* Decrease module usage counts and free resources */ 2133 /* Decrease module usage counts and free resources */
2127 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2134 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2128 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL); 2135 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL);
2129 if (private->number > private->initial_entries) 2136 if (private->number > private->initial_entries)
2130 module_put(table_owner); 2137 module_put(table_owner);
2131 xt_free_table_info(private); 2138 xt_free_table_info(private);
2132 } 2139 }
2133 2140
2134 /* Returns 1 if the type and code is matched by the range, 0 otherwise */ 2141 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2135 static inline bool 2142 static inline bool
2136 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, 2143 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2137 u_int8_t type, u_int8_t code, 2144 u_int8_t type, u_int8_t code,
2138 bool invert) 2145 bool invert)
2139 { 2146 {
2140 return ((test_type == 0xFF) || 2147 return ((test_type == 0xFF) ||
2141 (type == test_type && code >= min_code && code <= max_code)) 2148 (type == test_type && code >= min_code && code <= max_code))
2142 ^ invert; 2149 ^ invert;
2143 } 2150 }
2144 2151
2145 static bool 2152 static bool
2146 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par) 2153 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
2147 { 2154 {
2148 const struct icmphdr *ic; 2155 const struct icmphdr *ic;
2149 struct icmphdr _icmph; 2156 struct icmphdr _icmph;
2150 const struct ipt_icmp *icmpinfo = par->matchinfo; 2157 const struct ipt_icmp *icmpinfo = par->matchinfo;
2151 2158
2152 /* Must not be a fragment. */ 2159 /* Must not be a fragment. */
2153 if (par->fragoff != 0) 2160 if (par->fragoff != 0)
2154 return false; 2161 return false;
2155 2162
2156 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); 2163 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2157 if (ic == NULL) { 2164 if (ic == NULL) {
2158 /* We've been asked to examine this packet, and we 2165 /* We've been asked to examine this packet, and we
2159 * can't. Hence, no choice but to drop. 2166 * can't. Hence, no choice but to drop.
2160 */ 2167 */
2161 duprintf("Dropping evil ICMP tinygram.\n"); 2168 duprintf("Dropping evil ICMP tinygram.\n");
2162 *par->hotdrop = true; 2169 *par->hotdrop = true;
2163 return false; 2170 return false;
2164 } 2171 }
2165 2172
2166 return icmp_type_code_match(icmpinfo->type, 2173 return icmp_type_code_match(icmpinfo->type,
2167 icmpinfo->code[0], 2174 icmpinfo->code[0],
2168 icmpinfo->code[1], 2175 icmpinfo->code[1],
2169 ic->type, ic->code, 2176 ic->type, ic->code,
2170 !!(icmpinfo->invflags&IPT_ICMP_INV)); 2177 !!(icmpinfo->invflags&IPT_ICMP_INV));
2171 } 2178 }
2172 2179
2173 static bool icmp_checkentry(const struct xt_mtchk_param *par) 2180 static bool icmp_checkentry(const struct xt_mtchk_param *par)
2174 { 2181 {
2175 const struct ipt_icmp *icmpinfo = par->matchinfo; 2182 const struct ipt_icmp *icmpinfo = par->matchinfo;
2176 2183
2177 /* Must specify no unknown invflags */ 2184 /* Must specify no unknown invflags */
2178 return !(icmpinfo->invflags & ~IPT_ICMP_INV); 2185 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2179 } 2186 }
2180 2187
2181 /* The built-in targets: standard (NULL) and error. */ 2188 /* The built-in targets: standard (NULL) and error. */
2182 static struct xt_target ipt_standard_target __read_mostly = { 2189 static struct xt_target ipt_standard_target __read_mostly = {
2183 .name = IPT_STANDARD_TARGET, 2190 .name = IPT_STANDARD_TARGET,
2184 .targetsize = sizeof(int), 2191 .targetsize = sizeof(int),
2185 .family = NFPROTO_IPV4, 2192 .family = NFPROTO_IPV4,
2186 #ifdef CONFIG_COMPAT 2193 #ifdef CONFIG_COMPAT
2187 .compatsize = sizeof(compat_int_t), 2194 .compatsize = sizeof(compat_int_t),
2188 .compat_from_user = compat_standard_from_user, 2195 .compat_from_user = compat_standard_from_user,
2189 .compat_to_user = compat_standard_to_user, 2196 .compat_to_user = compat_standard_to_user,
2190 #endif 2197 #endif
2191 }; 2198 };
2192 2199
2193 static struct xt_target ipt_error_target __read_mostly = { 2200 static struct xt_target ipt_error_target __read_mostly = {
2194 .name = IPT_ERROR_TARGET, 2201 .name = IPT_ERROR_TARGET,
2195 .target = ipt_error, 2202 .target = ipt_error,
2196 .targetsize = IPT_FUNCTION_MAXNAMELEN, 2203 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2197 .family = NFPROTO_IPV4, 2204 .family = NFPROTO_IPV4,
2198 }; 2205 };
2199 2206
2200 static struct nf_sockopt_ops ipt_sockopts = { 2207 static struct nf_sockopt_ops ipt_sockopts = {
2201 .pf = PF_INET, 2208 .pf = PF_INET,
2202 .set_optmin = IPT_BASE_CTL, 2209 .set_optmin = IPT_BASE_CTL,
2203 .set_optmax = IPT_SO_SET_MAX+1, 2210 .set_optmax = IPT_SO_SET_MAX+1,
2204 .set = do_ipt_set_ctl, 2211 .set = do_ipt_set_ctl,
2205 #ifdef CONFIG_COMPAT 2212 #ifdef CONFIG_COMPAT
2206 .compat_set = compat_do_ipt_set_ctl, 2213 .compat_set = compat_do_ipt_set_ctl,
2207 #endif 2214 #endif
2208 .get_optmin = IPT_BASE_CTL, 2215 .get_optmin = IPT_BASE_CTL,
2209 .get_optmax = IPT_SO_GET_MAX+1, 2216 .get_optmax = IPT_SO_GET_MAX+1,
2210 .get = do_ipt_get_ctl, 2217 .get = do_ipt_get_ctl,
2211 #ifdef CONFIG_COMPAT 2218 #ifdef CONFIG_COMPAT
2212 .compat_get = compat_do_ipt_get_ctl, 2219 .compat_get = compat_do_ipt_get_ctl,
2213 #endif 2220 #endif
2214 .owner = THIS_MODULE, 2221 .owner = THIS_MODULE,
2215 }; 2222 };
2216 2223
2217 static struct xt_match icmp_matchstruct __read_mostly = { 2224 static struct xt_match icmp_matchstruct __read_mostly = {
2218 .name = "icmp", 2225 .name = "icmp",
2219 .match = icmp_match, 2226 .match = icmp_match,
2220 .matchsize = sizeof(struct ipt_icmp), 2227 .matchsize = sizeof(struct ipt_icmp),
2221 .checkentry = icmp_checkentry, 2228 .checkentry = icmp_checkentry,
2222 .proto = IPPROTO_ICMP, 2229 .proto = IPPROTO_ICMP,
2223 .family = NFPROTO_IPV4, 2230 .family = NFPROTO_IPV4,
2224 }; 2231 };
2225 2232
2226 static int __net_init ip_tables_net_init(struct net *net) 2233 static int __net_init ip_tables_net_init(struct net *net)
2227 { 2234 {
2228 return xt_proto_init(net, NFPROTO_IPV4); 2235 return xt_proto_init(net, NFPROTO_IPV4);
2229 } 2236 }
2230 2237
2231 static void __net_exit ip_tables_net_exit(struct net *net) 2238 static void __net_exit ip_tables_net_exit(struct net *net)
2232 { 2239 {
2233 xt_proto_fini(net, NFPROTO_IPV4); 2240 xt_proto_fini(net, NFPROTO_IPV4);
2234 } 2241 }
2235 2242
2236 static struct pernet_operations ip_tables_net_ops = { 2243 static struct pernet_operations ip_tables_net_ops = {
2237 .init = ip_tables_net_init, 2244 .init = ip_tables_net_init,
2238 .exit = ip_tables_net_exit, 2245 .exit = ip_tables_net_exit,
2239 }; 2246 };
2240 2247
2241 static int __init ip_tables_init(void) 2248 static int __init ip_tables_init(void)
2242 { 2249 {
2243 int ret; 2250 int ret;
2244 2251
2245 ret = register_pernet_subsys(&ip_tables_net_ops); 2252 ret = register_pernet_subsys(&ip_tables_net_ops);
2246 if (ret < 0) 2253 if (ret < 0)
2247 goto err1; 2254 goto err1;
2248 2255
2249 /* Noone else will be downing sem now, so we won't sleep */ 2256 /* Noone else will be downing sem now, so we won't sleep */
2250 ret = xt_register_target(&ipt_standard_target); 2257 ret = xt_register_target(&ipt_standard_target);
2251 if (ret < 0) 2258 if (ret < 0)
2252 goto err2; 2259 goto err2;
2253 ret = xt_register_target(&ipt_error_target); 2260 ret = xt_register_target(&ipt_error_target);
2254 if (ret < 0) 2261 if (ret < 0)
2255 goto err3; 2262 goto err3;
2256 ret = xt_register_match(&icmp_matchstruct); 2263 ret = xt_register_match(&icmp_matchstruct);
2257 if (ret < 0) 2264 if (ret < 0)
2258 goto err4; 2265 goto err4;
2259 2266
2260 /* Register setsockopt */ 2267 /* Register setsockopt */
2261 ret = nf_register_sockopt(&ipt_sockopts); 2268 ret = nf_register_sockopt(&ipt_sockopts);
2262 if (ret < 0) 2269 if (ret < 0)
2263 goto err5; 2270 goto err5;
2264 2271
2265 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n"); 2272 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2266 return 0; 2273 return 0;
2267 2274
2268 err5: 2275 err5:
2269 xt_unregister_match(&icmp_matchstruct); 2276 xt_unregister_match(&icmp_matchstruct);
2270 err4: 2277 err4:
2271 xt_unregister_target(&ipt_error_target); 2278 xt_unregister_target(&ipt_error_target);
2272 err3: 2279 err3:
2273 xt_unregister_target(&ipt_standard_target); 2280 xt_unregister_target(&ipt_standard_target);
2274 err2: 2281 err2:
2275 unregister_pernet_subsys(&ip_tables_net_ops); 2282 unregister_pernet_subsys(&ip_tables_net_ops);
2276 err1: 2283 err1:
2277 return ret; 2284 return ret;
2278 } 2285 }
2279 2286
2280 static void __exit ip_tables_fini(void) 2287 static void __exit ip_tables_fini(void)
2281 { 2288 {
2282 nf_unregister_sockopt(&ipt_sockopts); 2289 nf_unregister_sockopt(&ipt_sockopts);
2283 2290
2284 xt_unregister_match(&icmp_matchstruct); 2291 xt_unregister_match(&icmp_matchstruct);
2285 xt_unregister_target(&ipt_error_target); 2292 xt_unregister_target(&ipt_error_target);
2286 xt_unregister_target(&ipt_standard_target); 2293 xt_unregister_target(&ipt_standard_target);
2287 2294
2288 unregister_pernet_subsys(&ip_tables_net_ops); 2295 unregister_pernet_subsys(&ip_tables_net_ops);
2289 } 2296 }
2290 2297
2291 EXPORT_SYMBOL(ipt_register_table); 2298 EXPORT_SYMBOL(ipt_register_table);
2292 EXPORT_SYMBOL(ipt_unregister_table); 2299 EXPORT_SYMBOL(ipt_unregister_table);
2293 EXPORT_SYMBOL(ipt_do_table); 2300 EXPORT_SYMBOL(ipt_do_table);
2294 module_init(ip_tables_init); 2301 module_init(ip_tables_init);
2295 module_exit(ip_tables_fini); 2302 module_exit(ip_tables_fini);
2296 2303
net/ipv4/netfilter/iptable_filter.c
1 /* 1 /*
2 * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. 2 * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x.
3 * 3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> 5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 */ 11 */
12 12
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/moduleparam.h> 14 #include <linux/moduleparam.h>
15 #include <linux/netfilter_ipv4/ip_tables.h> 15 #include <linux/netfilter_ipv4/ip_tables.h>
16 #include <net/ip.h> 16 #include <net/ip.h>
17 17
18 MODULE_LICENSE("GPL"); 18 MODULE_LICENSE("GPL");
19 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 19 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
20 MODULE_DESCRIPTION("iptables filter table"); 20 MODULE_DESCRIPTION("iptables filter table");
21 21
22 #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ 22 #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \
23 (1 << NF_INET_FORWARD) | \ 23 (1 << NF_INET_FORWARD) | \
24 (1 << NF_INET_LOCAL_OUT)) 24 (1 << NF_INET_LOCAL_OUT))
25 25
26 static struct
27 {
28 struct ipt_replace repl;
29 struct ipt_standard entries[3];
30 struct ipt_error term;
31 } initial_table __net_initdata = {
32 .repl = {
33 .name = "filter",
34 .valid_hooks = FILTER_VALID_HOOKS,
35 .num_entries = 4,
36 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
37 .hook_entry = {
38 [NF_INET_LOCAL_IN] = 0,
39 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
40 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
41 },
42 .underflow = {
43 [NF_INET_LOCAL_IN] = 0,
44 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
46 },
47 },
48 .entries = {
49 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
50 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
51 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
52 },
53 .term = IPT_ERROR_INIT, /* ERROR */
54 };
55
56 static const struct xt_table packet_filter = { 26 static const struct xt_table packet_filter = {
57 .name = "filter", 27 .name = "filter",
58 .valid_hooks = FILTER_VALID_HOOKS, 28 .valid_hooks = FILTER_VALID_HOOKS,
59 .me = THIS_MODULE, 29 .me = THIS_MODULE,
60 .af = NFPROTO_IPV4, 30 .af = NFPROTO_IPV4,
61 .priority = NF_IP_PRI_FILTER, 31 .priority = NF_IP_PRI_FILTER,
62 }; 32 };
63 33
64 static unsigned int 34 static unsigned int
65 iptable_filter_hook(unsigned int hook, struct sk_buff *skb, 35 iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
66 const struct net_device *in, const struct net_device *out, 36 const struct net_device *in, const struct net_device *out,
67 int (*okfn)(struct sk_buff *)) 37 int (*okfn)(struct sk_buff *))
68 { 38 {
69 const struct net *net; 39 const struct net *net;
70 40
71 if (hook == NF_INET_LOCAL_OUT && 41 if (hook == NF_INET_LOCAL_OUT &&
72 (skb->len < sizeof(struct iphdr) || 42 (skb->len < sizeof(struct iphdr) ||
73 ip_hdrlen(skb) < sizeof(struct iphdr))) 43 ip_hdrlen(skb) < sizeof(struct iphdr)))
74 /* root is playing with raw sockets. */ 44 /* root is playing with raw sockets. */
75 return NF_ACCEPT; 45 return NF_ACCEPT;
76 46
77 net = dev_net((in != NULL) ? in : out); 47 net = dev_net((in != NULL) ? in : out);
78 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter); 48 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
79 } 49 }
80 50
81 static struct nf_hook_ops *filter_ops __read_mostly; 51 static struct nf_hook_ops *filter_ops __read_mostly;
82 52
83 /* Default to forward because I got too much mail already. */ 53 /* Default to forward because I got too much mail already. */
84 static int forward = NF_ACCEPT; 54 static int forward = NF_ACCEPT;
85 module_param(forward, bool, 0000); 55 module_param(forward, bool, 0000);
86 56
87 static int __net_init iptable_filter_net_init(struct net *net) 57 static int __net_init iptable_filter_net_init(struct net *net)
88 { 58 {
89 /* Register table */ 59 struct ipt_replace *repl;
60
61 repl = ipt_alloc_initial_table(&packet_filter);
62 if (repl == NULL)
63 return -ENOMEM;
64 /* Entry 1 is the FORWARD hook */
65 ((struct ipt_standard *)repl->entries)[1].target.verdict =
66 -forward - 1;
67
90 net->ipv4.iptable_filter = 68 net->ipv4.iptable_filter =
91 ipt_register_table(net, &packet_filter, &initial_table.repl); 69 ipt_register_table(net, &packet_filter, repl);
70 kfree(repl);
92 if (IS_ERR(net->ipv4.iptable_filter)) 71 if (IS_ERR(net->ipv4.iptable_filter))
93 return PTR_ERR(net->ipv4.iptable_filter); 72 return PTR_ERR(net->ipv4.iptable_filter);
94 return 0; 73 return 0;
95 } 74 }
96 75
97 static void __net_exit iptable_filter_net_exit(struct net *net) 76 static void __net_exit iptable_filter_net_exit(struct net *net)
98 { 77 {
99 ipt_unregister_table(net, net->ipv4.iptable_filter); 78 ipt_unregister_table(net, net->ipv4.iptable_filter);
100 } 79 }
101 80
102 static struct pernet_operations iptable_filter_net_ops = { 81 static struct pernet_operations iptable_filter_net_ops = {
103 .init = iptable_filter_net_init, 82 .init = iptable_filter_net_init,
104 .exit = iptable_filter_net_exit, 83 .exit = iptable_filter_net_exit,
105 }; 84 };
106 85
107 static int __init iptable_filter_init(void) 86 static int __init iptable_filter_init(void)
108 { 87 {
109 int ret; 88 int ret;
110 89
111 if (forward < 0 || forward > NF_MAX_VERDICT) { 90 if (forward < 0 || forward > NF_MAX_VERDICT) {
112 printk("iptables forward must be 0 or 1\n"); 91 printk("iptables forward must be 0 or 1\n");
113 return -EINVAL; 92 return -EINVAL;
114 } 93 }
115
116 /* Entry 1 is the FORWARD hook */
117 initial_table.entries[1].target.verdict = -forward - 1;
118 94
119 ret = register_pernet_subsys(&iptable_filter_net_ops); 95 ret = register_pernet_subsys(&iptable_filter_net_ops);
120 if (ret < 0) 96 if (ret < 0)
121 return ret; 97 return ret;
122 98
123 /* Register hooks */ 99 /* Register hooks */
124 filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook); 100 filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
125 if (IS_ERR(filter_ops)) { 101 if (IS_ERR(filter_ops)) {
126 ret = PTR_ERR(filter_ops); 102 ret = PTR_ERR(filter_ops);
127 goto cleanup_table; 103 goto cleanup_table;
128 } 104 }
129 105
130 return ret; 106 return ret;
131 107
132 cleanup_table: 108 cleanup_table:
133 unregister_pernet_subsys(&iptable_filter_net_ops); 109 unregister_pernet_subsys(&iptable_filter_net_ops);
134 return ret; 110 return ret;
135 } 111 }
136 112
net/ipv4/netfilter/iptable_mangle.c
1 /* 1 /*
2 * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. 2 * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x.
3 * 3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> 5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/netfilter_ipv4/ip_tables.h> 12 #include <linux/netfilter_ipv4/ip_tables.h>
13 #include <linux/netdevice.h> 13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h> 14 #include <linux/skbuff.h>
15 #include <net/sock.h> 15 #include <net/sock.h>
16 #include <net/route.h> 16 #include <net/route.h>
17 #include <linux/ip.h> 17 #include <linux/ip.h>
18 #include <net/ip.h> 18 #include <net/ip.h>
19 19
20 MODULE_LICENSE("GPL"); 20 MODULE_LICENSE("GPL");
21 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 21 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
22 MODULE_DESCRIPTION("iptables mangle table"); 22 MODULE_DESCRIPTION("iptables mangle table");
23 23
24 #define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ 24 #define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
25 (1 << NF_INET_LOCAL_IN) | \ 25 (1 << NF_INET_LOCAL_IN) | \
26 (1 << NF_INET_FORWARD) | \ 26 (1 << NF_INET_FORWARD) | \
27 (1 << NF_INET_LOCAL_OUT) | \ 27 (1 << NF_INET_LOCAL_OUT) | \
28 (1 << NF_INET_POST_ROUTING)) 28 (1 << NF_INET_POST_ROUTING))
29 29
30 /* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
31 static const struct
32 {
33 struct ipt_replace repl;
34 struct ipt_standard entries[5];
35 struct ipt_error term;
36 } initial_table __net_initdata = {
37 .repl = {
38 .name = "mangle",
39 .valid_hooks = MANGLE_VALID_HOOKS,
40 .num_entries = 6,
41 .size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
45 [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
46 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
47 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
48 },
49 .underflow = {
50 [NF_INET_PRE_ROUTING] = 0,
51 [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
52 [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
53 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
54 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
55 },
56 },
57 .entries = {
58 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
59 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
60 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
61 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
62 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
63 },
64 .term = IPT_ERROR_INIT, /* ERROR */
65 };
66
67 static const struct xt_table packet_mangler = { 30 static const struct xt_table packet_mangler = {
68 .name = "mangle", 31 .name = "mangle",
69 .valid_hooks = MANGLE_VALID_HOOKS, 32 .valid_hooks = MANGLE_VALID_HOOKS,
70 .me = THIS_MODULE, 33 .me = THIS_MODULE,
71 .af = NFPROTO_IPV4, 34 .af = NFPROTO_IPV4,
72 .priority = NF_IP_PRI_MANGLE, 35 .priority = NF_IP_PRI_MANGLE,
73 }; 36 };
74 37
75 static unsigned int 38 static unsigned int
76 ipt_local_hook(unsigned int hook, 39 ipt_local_hook(unsigned int hook,
77 struct sk_buff *skb, 40 struct sk_buff *skb,
78 const struct net_device *in, 41 const struct net_device *in,
79 const struct net_device *out, 42 const struct net_device *out,
80 int (*okfn)(struct sk_buff *)) 43 int (*okfn)(struct sk_buff *))
81 { 44 {
82 unsigned int ret; 45 unsigned int ret;
83 const struct iphdr *iph; 46 const struct iphdr *iph;
84 u_int8_t tos; 47 u_int8_t tos;
85 __be32 saddr, daddr; 48 __be32 saddr, daddr;
86 u_int32_t mark; 49 u_int32_t mark;
87 50
88 /* root is playing with raw sockets. */ 51 /* root is playing with raw sockets. */
89 if (skb->len < sizeof(struct iphdr) || 52 if (skb->len < sizeof(struct iphdr) ||
90 ip_hdrlen(skb) < sizeof(struct iphdr)) 53 ip_hdrlen(skb) < sizeof(struct iphdr))
91 return NF_ACCEPT; 54 return NF_ACCEPT;
92 55
93 /* Save things which could affect route */ 56 /* Save things which could affect route */
94 mark = skb->mark; 57 mark = skb->mark;
95 iph = ip_hdr(skb); 58 iph = ip_hdr(skb);
96 saddr = iph->saddr; 59 saddr = iph->saddr;
97 daddr = iph->daddr; 60 daddr = iph->daddr;
98 tos = iph->tos; 61 tos = iph->tos;
99 62
100 ret = ipt_do_table(skb, hook, in, out, 63 ret = ipt_do_table(skb, hook, in, out,
101 dev_net(out)->ipv4.iptable_mangle); 64 dev_net(out)->ipv4.iptable_mangle);
102 /* Reroute for ANY change. */ 65 /* Reroute for ANY change. */
103 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { 66 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
104 iph = ip_hdr(skb); 67 iph = ip_hdr(skb);
105 68
106 if (iph->saddr != saddr || 69 if (iph->saddr != saddr ||
107 iph->daddr != daddr || 70 iph->daddr != daddr ||
108 skb->mark != mark || 71 skb->mark != mark ||
109 iph->tos != tos) 72 iph->tos != tos)
110 if (ip_route_me_harder(skb, RTN_UNSPEC)) 73 if (ip_route_me_harder(skb, RTN_UNSPEC))
111 ret = NF_DROP; 74 ret = NF_DROP;
112 } 75 }
113 76
114 return ret; 77 return ret;
115 } 78 }
116 79
117 /* The work comes in here from netfilter.c. */ 80 /* The work comes in here from netfilter.c. */
118 static unsigned int 81 static unsigned int
119 iptable_mangle_hook(unsigned int hook, 82 iptable_mangle_hook(unsigned int hook,
120 struct sk_buff *skb, 83 struct sk_buff *skb,
121 const struct net_device *in, 84 const struct net_device *in,
122 const struct net_device *out, 85 const struct net_device *out,
123 int (*okfn)(struct sk_buff *)) 86 int (*okfn)(struct sk_buff *))
124 { 87 {
125 if (hook == NF_INET_LOCAL_OUT) 88 if (hook == NF_INET_LOCAL_OUT)
126 return ipt_local_hook(hook, skb, in, out, okfn); 89 return ipt_local_hook(hook, skb, in, out, okfn);
127 90
128 /* PREROUTING/INPUT/FORWARD: */ 91 /* PREROUTING/INPUT/FORWARD: */
129 return ipt_do_table(skb, hook, in, out, 92 return ipt_do_table(skb, hook, in, out,
130 dev_net(in)->ipv4.iptable_mangle); 93 dev_net(in)->ipv4.iptable_mangle);
131 } 94 }
132 95
133 static struct nf_hook_ops *mangle_ops __read_mostly; 96 static struct nf_hook_ops *mangle_ops __read_mostly;
134 97
135 static int __net_init iptable_mangle_net_init(struct net *net) 98 static int __net_init iptable_mangle_net_init(struct net *net)
136 { 99 {
137 /* Register table */ 100 struct ipt_replace *repl;
101
102 repl = ipt_alloc_initial_table(&packet_mangler);
103 if (repl == NULL)
104 return -ENOMEM;
138 net->ipv4.iptable_mangle = 105 net->ipv4.iptable_mangle =
139 ipt_register_table(net, &packet_mangler, &initial_table.repl); 106 ipt_register_table(net, &packet_mangler, repl);
107 kfree(repl);
140 if (IS_ERR(net->ipv4.iptable_mangle)) 108 if (IS_ERR(net->ipv4.iptable_mangle))
141 return PTR_ERR(net->ipv4.iptable_mangle); 109 return PTR_ERR(net->ipv4.iptable_mangle);
142 return 0; 110 return 0;
143 } 111 }
144 112
145 static void __net_exit iptable_mangle_net_exit(struct net *net) 113 static void __net_exit iptable_mangle_net_exit(struct net *net)
146 { 114 {
147 ipt_unregister_table(net, net->ipv4.iptable_mangle); 115 ipt_unregister_table(net, net->ipv4.iptable_mangle);
148 } 116 }
149 117
150 static struct pernet_operations iptable_mangle_net_ops = { 118 static struct pernet_operations iptable_mangle_net_ops = {
151 .init = iptable_mangle_net_init, 119 .init = iptable_mangle_net_init,
152 .exit = iptable_mangle_net_exit, 120 .exit = iptable_mangle_net_exit,
153 }; 121 };
154 122
155 static int __init iptable_mangle_init(void) 123 static int __init iptable_mangle_init(void)
156 { 124 {
157 int ret; 125 int ret;
158 126
159 ret = register_pernet_subsys(&iptable_mangle_net_ops); 127 ret = register_pernet_subsys(&iptable_mangle_net_ops);
160 if (ret < 0) 128 if (ret < 0)
161 return ret; 129 return ret;
162 130
163 /* Register hooks */ 131 /* Register hooks */
164 mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook); 132 mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
165 if (IS_ERR(mangle_ops)) { 133 if (IS_ERR(mangle_ops)) {
166 ret = PTR_ERR(mangle_ops); 134 ret = PTR_ERR(mangle_ops);
167 goto cleanup_table; 135 goto cleanup_table;
168 } 136 }
169 137
170 return ret; 138 return ret;
171 139
172 cleanup_table: 140 cleanup_table:
173 unregister_pernet_subsys(&iptable_mangle_net_ops); 141 unregister_pernet_subsys(&iptable_mangle_net_ops);
174 return ret; 142 return ret;
175 } 143 }
176 144
177 static void __exit iptable_mangle_fini(void) 145 static void __exit iptable_mangle_fini(void)
178 { 146 {
179 xt_hook_unlink(&packet_mangler, mangle_ops); 147 xt_hook_unlink(&packet_mangler, mangle_ops);
180 unregister_pernet_subsys(&iptable_mangle_net_ops); 148 unregister_pernet_subsys(&iptable_mangle_net_ops);
net/ipv4/netfilter/iptable_raw.c
1 /* 1 /*
2 * 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT . 2 * 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT .
3 * 3 *
4 * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> 4 * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5 */ 5 */
6 #include <linux/module.h> 6 #include <linux/module.h>
7 #include <linux/netfilter_ipv4/ip_tables.h> 7 #include <linux/netfilter_ipv4/ip_tables.h>
8 #include <net/ip.h> 8 #include <net/ip.h>
9 9
10 #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 10 #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
11 11
12 static const struct
13 {
14 struct ipt_replace repl;
15 struct ipt_standard entries[2];
16 struct ipt_error term;
17 } initial_table __net_initdata = {
18 .repl = {
19 .name = "raw",
20 .valid_hooks = RAW_VALID_HOOKS,
21 .num_entries = 3,
22 .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
23 .hook_entry = {
24 [NF_INET_PRE_ROUTING] = 0,
25 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
26 },
27 .underflow = {
28 [NF_INET_PRE_ROUTING] = 0,
29 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
30 },
31 },
32 .entries = {
33 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
34 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
35 },
36 .term = IPT_ERROR_INIT, /* ERROR */
37 };
38
39 static const struct xt_table packet_raw = { 12 static const struct xt_table packet_raw = {
40 .name = "raw", 13 .name = "raw",
41 .valid_hooks = RAW_VALID_HOOKS, 14 .valid_hooks = RAW_VALID_HOOKS,
42 .me = THIS_MODULE, 15 .me = THIS_MODULE,
43 .af = NFPROTO_IPV4, 16 .af = NFPROTO_IPV4,
44 .priority = NF_IP_PRI_RAW, 17 .priority = NF_IP_PRI_RAW,
45 }; 18 };
46 19
47 /* The work comes in here from netfilter.c. */ 20 /* The work comes in here from netfilter.c. */
48 static unsigned int 21 static unsigned int
49 iptable_raw_hook(unsigned int hook, struct sk_buff *skb, 22 iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
50 const struct net_device *in, const struct net_device *out, 23 const struct net_device *in, const struct net_device *out,
51 int (*okfn)(struct sk_buff *)) 24 int (*okfn)(struct sk_buff *))
52 { 25 {
53 const struct net *net; 26 const struct net *net;
54 27
55 if (hook == NF_INET_LOCAL_OUT && 28 if (hook == NF_INET_LOCAL_OUT &&
56 (skb->len < sizeof(struct iphdr) || 29 (skb->len < sizeof(struct iphdr) ||
57 ip_hdrlen(skb) < sizeof(struct iphdr))) 30 ip_hdrlen(skb) < sizeof(struct iphdr)))
58 /* root is playing with raw sockets. */ 31 /* root is playing with raw sockets. */
59 return NF_ACCEPT; 32 return NF_ACCEPT;
60 33
61 net = dev_net((in != NULL) ? in : out); 34 net = dev_net((in != NULL) ? in : out);
62 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw); 35 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
63 } 36 }
64 37
65 static struct nf_hook_ops *rawtable_ops __read_mostly; 38 static struct nf_hook_ops *rawtable_ops __read_mostly;
66 39
67 static int __net_init iptable_raw_net_init(struct net *net) 40 static int __net_init iptable_raw_net_init(struct net *net)
68 { 41 {
69 /* Register table */ 42 struct ipt_replace *repl;
43
44 repl = ipt_alloc_initial_table(&packet_raw);
45 if (repl == NULL)
46 return -ENOMEM;
70 net->ipv4.iptable_raw = 47 net->ipv4.iptable_raw =
71 ipt_register_table(net, &packet_raw, &initial_table.repl); 48 ipt_register_table(net, &packet_raw, repl);
49 kfree(repl);
72 if (IS_ERR(net->ipv4.iptable_raw)) 50 if (IS_ERR(net->ipv4.iptable_raw))
73 return PTR_ERR(net->ipv4.iptable_raw); 51 return PTR_ERR(net->ipv4.iptable_raw);
74 return 0; 52 return 0;
75 } 53 }
76 54
77 static void __net_exit iptable_raw_net_exit(struct net *net) 55 static void __net_exit iptable_raw_net_exit(struct net *net)
78 { 56 {
79 ipt_unregister_table(net, net->ipv4.iptable_raw); 57 ipt_unregister_table(net, net->ipv4.iptable_raw);
80 } 58 }
81 59
82 static struct pernet_operations iptable_raw_net_ops = { 60 static struct pernet_operations iptable_raw_net_ops = {
83 .init = iptable_raw_net_init, 61 .init = iptable_raw_net_init,
84 .exit = iptable_raw_net_exit, 62 .exit = iptable_raw_net_exit,
85 }; 63 };
86 64
87 static int __init iptable_raw_init(void) 65 static int __init iptable_raw_init(void)
88 { 66 {
89 int ret; 67 int ret;
90 68
91 ret = register_pernet_subsys(&iptable_raw_net_ops); 69 ret = register_pernet_subsys(&iptable_raw_net_ops);
92 if (ret < 0) 70 if (ret < 0)
93 return ret; 71 return ret;
94 72
95 /* Register hooks */ 73 /* Register hooks */
96 rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook); 74 rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
97 if (IS_ERR(rawtable_ops)) { 75 if (IS_ERR(rawtable_ops)) {
98 ret = PTR_ERR(rawtable_ops); 76 ret = PTR_ERR(rawtable_ops);
99 goto cleanup_table; 77 goto cleanup_table;
100 } 78 }
101 79
102 return ret; 80 return ret;
103 81
104 cleanup_table: 82 cleanup_table:
105 unregister_pernet_subsys(&iptable_raw_net_ops); 83 unregister_pernet_subsys(&iptable_raw_net_ops);
106 return ret; 84 return ret;
107 } 85 }
108 86
109 static void __exit iptable_raw_fini(void) 87 static void __exit iptable_raw_fini(void)
110 { 88 {
111 xt_hook_unlink(&packet_raw, rawtable_ops); 89 xt_hook_unlink(&packet_raw, rawtable_ops);
112 unregister_pernet_subsys(&iptable_raw_net_ops); 90 unregister_pernet_subsys(&iptable_raw_net_ops);
113 } 91 }
net/ipv4/netfilter/iptable_security.c
1 /* 1 /*
2 * "security" table 2 * "security" table
3 * 3 *
4 * This is for use by Mandatory Access Control (MAC) security models, 4 * This is for use by Mandatory Access Control (MAC) security models,
5 * which need to be able to manage security policy in separate context 5 * which need to be able to manage security policy in separate context
6 * to DAC. 6 * to DAC.
7 * 7 *
8 * Based on iptable_mangle.c 8 * Based on iptable_mangle.c
9 * 9 *
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org> 11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com> 12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as 15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation. 16 * published by the Free Software Foundation.
17 */ 17 */
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/netfilter_ipv4/ip_tables.h> 19 #include <linux/netfilter_ipv4/ip_tables.h>
20 #include <net/ip.h> 20 #include <net/ip.h>
21 21
22 MODULE_LICENSE("GPL"); 22 MODULE_LICENSE("GPL");
23 MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); 23 MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
24 MODULE_DESCRIPTION("iptables security table, for MAC rules"); 24 MODULE_DESCRIPTION("iptables security table, for MAC rules");
25 25
26 #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \ 26 #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
27 (1 << NF_INET_FORWARD) | \ 27 (1 << NF_INET_FORWARD) | \
28 (1 << NF_INET_LOCAL_OUT) 28 (1 << NF_INET_LOCAL_OUT)
29 29
30 static const struct
31 {
32 struct ipt_replace repl;
33 struct ipt_standard entries[3];
34 struct ipt_error term;
35 } initial_table __net_initdata = {
36 .repl = {
37 .name = "security",
38 .valid_hooks = SECURITY_VALID_HOOKS,
39 .num_entries = 4,
40 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
41 .hook_entry = {
42 [NF_INET_LOCAL_IN] = 0,
43 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
44 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
45 },
46 .underflow = {
47 [NF_INET_LOCAL_IN] = 0,
48 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
49 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
50 },
51 },
52 .entries = {
53 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
54 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
56 },
57 .term = IPT_ERROR_INIT, /* ERROR */
58 };
59
60 static const struct xt_table security_table = { 30 static const struct xt_table security_table = {
61 .name = "security", 31 .name = "security",
62 .valid_hooks = SECURITY_VALID_HOOKS, 32 .valid_hooks = SECURITY_VALID_HOOKS,
63 .me = THIS_MODULE, 33 .me = THIS_MODULE,
64 .af = NFPROTO_IPV4, 34 .af = NFPROTO_IPV4,
65 .priority = NF_IP_PRI_SECURITY, 35 .priority = NF_IP_PRI_SECURITY,
66 }; 36 };
67 37
68 static unsigned int 38 static unsigned int
69 iptable_security_hook(unsigned int hook, struct sk_buff *skb, 39 iptable_security_hook(unsigned int hook, struct sk_buff *skb,
70 const struct net_device *in, 40 const struct net_device *in,
71 const struct net_device *out, 41 const struct net_device *out,
72 int (*okfn)(struct sk_buff *)) 42 int (*okfn)(struct sk_buff *))
73 { 43 {
74 const struct net *net; 44 const struct net *net;
75 45
76 if (hook == NF_INET_LOCAL_OUT && 46 if (hook == NF_INET_LOCAL_OUT &&
77 (skb->len < sizeof(struct iphdr) || 47 (skb->len < sizeof(struct iphdr) ||
78 ip_hdrlen(skb) < sizeof(struct iphdr))) 48 ip_hdrlen(skb) < sizeof(struct iphdr)))
79 /* Somebody is playing with raw sockets. */ 49 /* Somebody is playing with raw sockets. */
80 return NF_ACCEPT; 50 return NF_ACCEPT;
81 51
82 net = dev_net((in != NULL) ? in : out); 52 net = dev_net((in != NULL) ? in : out);
83 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security); 53 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
84 } 54 }
85 55
86 static struct nf_hook_ops *sectbl_ops __read_mostly; 56 static struct nf_hook_ops *sectbl_ops __read_mostly;
87 57
88 static int __net_init iptable_security_net_init(struct net *net) 58 static int __net_init iptable_security_net_init(struct net *net)
89 { 59 {
90 net->ipv4.iptable_security = 60 struct ipt_replace *repl;
91 ipt_register_table(net, &security_table, &initial_table.repl);
92 61
62 repl = ipt_alloc_initial_table(&security_table);
63 if (repl == NULL)
64 return -ENOMEM;
65 net->ipv4.iptable_security =
66 ipt_register_table(net, &security_table, repl);
67 kfree(repl);
93 if (IS_ERR(net->ipv4.iptable_security)) 68 if (IS_ERR(net->ipv4.iptable_security))
94 return PTR_ERR(net->ipv4.iptable_security); 69 return PTR_ERR(net->ipv4.iptable_security);
95 70
96 return 0; 71 return 0;
97 } 72 }
98 73
99 static void __net_exit iptable_security_net_exit(struct net *net) 74 static void __net_exit iptable_security_net_exit(struct net *net)
100 { 75 {
101 ipt_unregister_table(net, net->ipv4.iptable_security); 76 ipt_unregister_table(net, net->ipv4.iptable_security);
102 } 77 }
103 78
104 static struct pernet_operations iptable_security_net_ops = { 79 static struct pernet_operations iptable_security_net_ops = {
105 .init = iptable_security_net_init, 80 .init = iptable_security_net_init,
106 .exit = iptable_security_net_exit, 81 .exit = iptable_security_net_exit,
107 }; 82 };
108 83
109 static int __init iptable_security_init(void) 84 static int __init iptable_security_init(void)
110 { 85 {
111 int ret; 86 int ret;
112 87
113 ret = register_pernet_subsys(&iptable_security_net_ops); 88 ret = register_pernet_subsys(&iptable_security_net_ops);
114 if (ret < 0) 89 if (ret < 0)
115 return ret; 90 return ret;
116 91
117 sectbl_ops = xt_hook_link(&security_table, iptable_security_hook); 92 sectbl_ops = xt_hook_link(&security_table, iptable_security_hook);
118 if (IS_ERR(sectbl_ops)) { 93 if (IS_ERR(sectbl_ops)) {
119 ret = PTR_ERR(sectbl_ops); 94 ret = PTR_ERR(sectbl_ops);
120 goto cleanup_table; 95 goto cleanup_table;
121 } 96 }
122 97
123 return ret; 98 return ret;
124 99
125 cleanup_table: 100 cleanup_table:
126 unregister_pernet_subsys(&iptable_security_net_ops); 101 unregister_pernet_subsys(&iptable_security_net_ops);
127 return ret; 102 return ret;
128 } 103 }
129 104
130 static void __exit iptable_security_fini(void) 105 static void __exit iptable_security_fini(void)
131 { 106 {
132 xt_hook_unlink(&security_table, sectbl_ops); 107 xt_hook_unlink(&security_table, sectbl_ops);
net/ipv4/netfilter/nf_nat_rule.c
1 /* (C) 1999-2001 Paul `Rusty' Russell 1 /* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 2 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9 /* Everything about the rules for NAT. */ 9 /* Everything about the rules for NAT. */
10 #include <linux/types.h> 10 #include <linux/types.h>
11 #include <linux/ip.h> 11 #include <linux/ip.h>
12 #include <linux/netfilter.h> 12 #include <linux/netfilter.h>
13 #include <linux/netfilter_ipv4.h> 13 #include <linux/netfilter_ipv4.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/kmod.h> 15 #include <linux/kmod.h>
16 #include <linux/skbuff.h> 16 #include <linux/skbuff.h>
17 #include <linux/proc_fs.h> 17 #include <linux/proc_fs.h>
18 #include <net/checksum.h> 18 #include <net/checksum.h>
19 #include <net/route.h> 19 #include <net/route.h>
20 #include <linux/bitops.h> 20 #include <linux/bitops.h>
21 21
22 #include <linux/netfilter_ipv4/ip_tables.h> 22 #include <linux/netfilter_ipv4/ip_tables.h>
23 #include <net/netfilter/nf_nat.h> 23 #include <net/netfilter/nf_nat.h>
24 #include <net/netfilter/nf_nat_core.h> 24 #include <net/netfilter/nf_nat_core.h>
25 #include <net/netfilter/nf_nat_rule.h> 25 #include <net/netfilter/nf_nat_rule.h>
26 26
27 #define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ 27 #define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
28 (1 << NF_INET_POST_ROUTING) | \ 28 (1 << NF_INET_POST_ROUTING) | \
29 (1 << NF_INET_LOCAL_OUT)) 29 (1 << NF_INET_LOCAL_OUT))
30 30
31 static const struct
32 {
33 struct ipt_replace repl;
34 struct ipt_standard entries[3];
35 struct ipt_error term;
36 } nat_initial_table __net_initdata = {
37 .repl = {
38 .name = "nat",
39 .valid_hooks = NAT_VALID_HOOKS,
40 .num_entries = 4,
41 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
46 },
47 .underflow = {
48 [NF_INET_PRE_ROUTING] = 0,
49 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
50 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
51 },
52 },
53 .entries = {
54 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
57 },
58 .term = IPT_ERROR_INIT, /* ERROR */
59 };
60
61 static const struct xt_table nat_table = { 31 static const struct xt_table nat_table = {
62 .name = "nat", 32 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 33 .valid_hooks = NAT_VALID_HOOKS,
64 .me = THIS_MODULE, 34 .me = THIS_MODULE,
65 .af = NFPROTO_IPV4, 35 .af = NFPROTO_IPV4,
66 }; 36 };
67 37
68 /* Source NAT */ 38 /* Source NAT */
69 static unsigned int 39 static unsigned int
70 ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par) 40 ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par)
71 { 41 {
72 struct nf_conn *ct; 42 struct nf_conn *ct;
73 enum ip_conntrack_info ctinfo; 43 enum ip_conntrack_info ctinfo;
74 const struct nf_nat_multi_range_compat *mr = par->targinfo; 44 const struct nf_nat_multi_range_compat *mr = par->targinfo;
75 45
76 NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); 46 NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING);
77 47
78 ct = nf_ct_get(skb, &ctinfo); 48 ct = nf_ct_get(skb, &ctinfo);
79 49
80 /* Connection must be valid and new. */ 50 /* Connection must be valid and new. */
81 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 51 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
82 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 52 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
83 NF_CT_ASSERT(par->out != NULL); 53 NF_CT_ASSERT(par->out != NULL);
84 54
85 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC); 55 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
86 } 56 }
87 57
88 static unsigned int 58 static unsigned int
89 ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par) 59 ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par)
90 { 60 {
91 struct nf_conn *ct; 61 struct nf_conn *ct;
92 enum ip_conntrack_info ctinfo; 62 enum ip_conntrack_info ctinfo;
93 const struct nf_nat_multi_range_compat *mr = par->targinfo; 63 const struct nf_nat_multi_range_compat *mr = par->targinfo;
94 64
95 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || 65 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
96 par->hooknum == NF_INET_LOCAL_OUT); 66 par->hooknum == NF_INET_LOCAL_OUT);
97 67
98 ct = nf_ct_get(skb, &ctinfo); 68 ct = nf_ct_get(skb, &ctinfo);
99 69
100 /* Connection must be valid and new. */ 70 /* Connection must be valid and new. */
101 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); 71 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
102 72
103 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST); 73 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
104 } 74 }
105 75
106 static bool ipt_snat_checkentry(const struct xt_tgchk_param *par) 76 static bool ipt_snat_checkentry(const struct xt_tgchk_param *par)
107 { 77 {
108 const struct nf_nat_multi_range_compat *mr = par->targinfo; 78 const struct nf_nat_multi_range_compat *mr = par->targinfo;
109 79
110 /* Must be a valid range */ 80 /* Must be a valid range */
111 if (mr->rangesize != 1) { 81 if (mr->rangesize != 1) {
112 printk("SNAT: multiple ranges no longer supported\n"); 82 printk("SNAT: multiple ranges no longer supported\n");
113 return false; 83 return false;
114 } 84 }
115 return true; 85 return true;
116 } 86 }
117 87
118 static bool ipt_dnat_checkentry(const struct xt_tgchk_param *par) 88 static bool ipt_dnat_checkentry(const struct xt_tgchk_param *par)
119 { 89 {
120 const struct nf_nat_multi_range_compat *mr = par->targinfo; 90 const struct nf_nat_multi_range_compat *mr = par->targinfo;
121 91
122 /* Must be a valid range */ 92 /* Must be a valid range */
123 if (mr->rangesize != 1) { 93 if (mr->rangesize != 1) {
124 printk("DNAT: multiple ranges no longer supported\n"); 94 printk("DNAT: multiple ranges no longer supported\n");
125 return false; 95 return false;
126 } 96 }
127 return true; 97 return true;
128 } 98 }
129 99
130 unsigned int 100 unsigned int
131 alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 101 alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
132 { 102 {
133 /* Force range to this IP; let proto decide mapping for 103 /* Force range to this IP; let proto decide mapping for
134 per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 104 per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
135 Use reply in case it's already been mangled (eg local packet). 105 Use reply in case it's already been mangled (eg local packet).
136 */ 106 */
137 __be32 ip 107 __be32 ip
138 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC 108 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
139 ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip 109 ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
140 : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip); 110 : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
141 struct nf_nat_range range 111 struct nf_nat_range range
142 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; 112 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } };
143 113
144 pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, &ip); 114 pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, &ip);
145 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); 115 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
146 } 116 }
147 117
148 int nf_nat_rule_find(struct sk_buff *skb, 118 int nf_nat_rule_find(struct sk_buff *skb,
149 unsigned int hooknum, 119 unsigned int hooknum,
150 const struct net_device *in, 120 const struct net_device *in,
151 const struct net_device *out, 121 const struct net_device *out,
152 struct nf_conn *ct) 122 struct nf_conn *ct)
153 { 123 {
154 struct net *net = nf_ct_net(ct); 124 struct net *net = nf_ct_net(ct);
155 int ret; 125 int ret;
156 126
157 ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table); 127 ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
158 128
159 if (ret == NF_ACCEPT) { 129 if (ret == NF_ACCEPT) {
160 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) 130 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
161 /* NUL mapping */ 131 /* NUL mapping */
162 ret = alloc_null_binding(ct, hooknum); 132 ret = alloc_null_binding(ct, hooknum);
163 } 133 }
164 return ret; 134 return ret;
165 } 135 }
166 136
167 static struct xt_target ipt_snat_reg __read_mostly = { 137 static struct xt_target ipt_snat_reg __read_mostly = {
168 .name = "SNAT", 138 .name = "SNAT",
169 .target = ipt_snat_target, 139 .target = ipt_snat_target,
170 .targetsize = sizeof(struct nf_nat_multi_range_compat), 140 .targetsize = sizeof(struct nf_nat_multi_range_compat),
171 .table = "nat", 141 .table = "nat",
172 .hooks = 1 << NF_INET_POST_ROUTING, 142 .hooks = 1 << NF_INET_POST_ROUTING,
173 .checkentry = ipt_snat_checkentry, 143 .checkentry = ipt_snat_checkentry,
174 .family = AF_INET, 144 .family = AF_INET,
175 }; 145 };
176 146
177 static struct xt_target ipt_dnat_reg __read_mostly = { 147 static struct xt_target ipt_dnat_reg __read_mostly = {
178 .name = "DNAT", 148 .name = "DNAT",
179 .target = ipt_dnat_target, 149 .target = ipt_dnat_target,
180 .targetsize = sizeof(struct nf_nat_multi_range_compat), 150 .targetsize = sizeof(struct nf_nat_multi_range_compat),
181 .table = "nat", 151 .table = "nat",
182 .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), 152 .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
183 .checkentry = ipt_dnat_checkentry, 153 .checkentry = ipt_dnat_checkentry,
184 .family = AF_INET, 154 .family = AF_INET,
185 }; 155 };
186 156
187 static int __net_init nf_nat_rule_net_init(struct net *net) 157 static int __net_init nf_nat_rule_net_init(struct net *net)
188 { 158 {
189 net->ipv4.nat_table = ipt_register_table(net, &nat_table, 159 struct ipt_replace *repl;
190 &nat_initial_table.repl); 160
161 repl = ipt_alloc_initial_table(&nat_table);
162 if (repl == NULL)
163 return -ENOMEM;
164 net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
165 kfree(repl);
191 if (IS_ERR(net->ipv4.nat_table)) 166 if (IS_ERR(net->ipv4.nat_table))
192 return PTR_ERR(net->ipv4.nat_table); 167 return PTR_ERR(net->ipv4.nat_table);
193 return 0; 168 return 0;
194 } 169 }
195 170
196 static void __net_exit nf_nat_rule_net_exit(struct net *net) 171 static void __net_exit nf_nat_rule_net_exit(struct net *net)
197 { 172 {
198 ipt_unregister_table(net, net->ipv4.nat_table); 173 ipt_unregister_table(net, net->ipv4.nat_table);
199 } 174 }
200 175
201 static struct pernet_operations nf_nat_rule_net_ops = { 176 static struct pernet_operations nf_nat_rule_net_ops = {
202 .init = nf_nat_rule_net_init, 177 .init = nf_nat_rule_net_init,
203 .exit = nf_nat_rule_net_exit, 178 .exit = nf_nat_rule_net_exit,
204 }; 179 };
205 180
206 int __init nf_nat_rule_init(void) 181 int __init nf_nat_rule_init(void)
207 { 182 {
208 int ret; 183 int ret;
209 184
210 ret = register_pernet_subsys(&nf_nat_rule_net_ops); 185 ret = register_pernet_subsys(&nf_nat_rule_net_ops);
211 if (ret != 0) 186 if (ret != 0)
212 goto out; 187 goto out;
213 ret = xt_register_target(&ipt_snat_reg); 188 ret = xt_register_target(&ipt_snat_reg);
214 if (ret != 0) 189 if (ret != 0)
215 goto unregister_table; 190 goto unregister_table;
216 191
217 ret = xt_register_target(&ipt_dnat_reg); 192 ret = xt_register_target(&ipt_dnat_reg);
218 if (ret != 0) 193 if (ret != 0)
219 goto unregister_snat; 194 goto unregister_snat;
220 195
221 return ret; 196 return ret;
222 197
223 unregister_snat: 198 unregister_snat:
224 xt_unregister_target(&ipt_snat_reg); 199 xt_unregister_target(&ipt_snat_reg);
225 unregister_table: 200 unregister_table:
226 unregister_pernet_subsys(&nf_nat_rule_net_ops); 201 unregister_pernet_subsys(&nf_nat_rule_net_ops);
227 out: 202 out:
228 return ret; 203 return ret;
229 } 204 }
230 205
231 void nf_nat_rule_cleanup(void) 206 void nf_nat_rule_cleanup(void)
232 { 207 {
net/ipv6/netfilter/ip6_tables.c
1 /* 1 /*
2 * Packet matching code. 2 * Packet matching code.
3 * 3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h> 12 #include <linux/capability.h>
13 #include <linux/in.h> 13 #include <linux/in.h>
14 #include <linux/skbuff.h> 14 #include <linux/skbuff.h>
15 #include <linux/kmod.h> 15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h> 16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h> 17 #include <linux/netdevice.h>
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/poison.h> 19 #include <linux/poison.h>
20 #include <linux/icmpv6.h> 20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h> 21 #include <net/ipv6.h>
22 #include <net/compat.h> 22 #include <net/compat.h>
23 #include <asm/uaccess.h> 23 #include <asm/uaccess.h>
24 #include <linux/mutex.h> 24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h> 25 #include <linux/proc_fs.h>
26 #include <linux/err.h> 26 #include <linux/err.h>
27 #include <linux/cpumask.h> 27 #include <linux/cpumask.h>
28 28
29 #include <linux/netfilter_ipv6/ip6_tables.h> 29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h> 30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h> 31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
32 33
33 MODULE_LICENSE("GPL"); 34 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter"); 36 MODULE_DESCRIPTION("IPv6 packet filter");
36 37
37 /*#define DEBUG_IP_FIREWALL*/ 38 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ 39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/ 40 /*#define DEBUG_IP_FIREWALL_USER*/
40 41
41 #ifdef DEBUG_IP_FIREWALL 42 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args) 43 #define dprintf(format, args...) printk(format , ## args)
43 #else 44 #else
44 #define dprintf(format, args...) 45 #define dprintf(format, args...)
45 #endif 46 #endif
46 47
47 #ifdef DEBUG_IP_FIREWALL_USER 48 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args) 49 #define duprintf(format, args...) printk(format , ## args)
49 #else 50 #else
50 #define duprintf(format, args...) 51 #define duprintf(format, args...)
51 #endif 52 #endif
52 53
53 #ifdef CONFIG_NETFILTER_DEBUG 54 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \ 55 #define IP_NF_ASSERT(x) \
55 do { \ 56 do { \
56 if (!(x)) \ 57 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \ 58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \ 59 __func__, __FILE__, __LINE__); \
59 } while(0) 60 } while(0)
60 #else 61 #else
61 #define IP_NF_ASSERT(x) 62 #define IP_NF_ASSERT(x)
62 #endif 63 #endif
63 64
64 #if 0 65 #if 0
65 /* All the better to debug you with... */ 66 /* All the better to debug you with... */
66 #define static 67 #define static
67 #define inline 68 #define inline
68 #endif 69 #endif
70
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
72 {
73 return xt_alloc_initial_table(ip6t, IP6T);
74 }
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
69 76
70 /* 77 /*
71 We keep a set of rules for each CPU, so we can avoid write-locking 78 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore 79 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user 80 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read 81 context stops packets coming through and allows user context to read
75 the counters or update the rules. 82 the counters or update the rules.
76 83
77 Hence the start of any table is given by get_table() below. */ 84 Hence the start of any table is given by get_table() below. */
78 85
79 /* Check for an extension */ 86 /* Check for an extension */
80 int 87 int
81 ip6t_ext_hdr(u8 nexthdr) 88 ip6t_ext_hdr(u8 nexthdr)
82 { 89 {
83 return ( (nexthdr == IPPROTO_HOPOPTS) || 90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) || 91 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) || 92 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) || 93 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) || 94 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) || 95 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) ); 96 (nexthdr == IPPROTO_DSTOPTS) );
90 } 97 }
91 98
92 /* Returns whether matches rule or not. */ 99 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */ 100 /* Performance critical - called for every packet */
94 static inline bool 101 static inline bool
95 ip6_packet_match(const struct sk_buff *skb, 102 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev, 103 const char *indev,
97 const char *outdev, 104 const char *outdev,
98 const struct ip6t_ip6 *ip6info, 105 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff, 106 unsigned int *protoff,
100 int *fragoff, bool *hotdrop) 107 int *fragoff, bool *hotdrop)
101 { 108 {
102 unsigned long ret; 109 unsigned long ret;
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb); 110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
104 111
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) 112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
106 113
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, 114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP) || 115 &ip6info->src), IP6T_INV_SRCIP) ||
109 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, 116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) { 117 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n"); 118 dprintf("Source or dest mismatch.\n");
112 /* 119 /*
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, 120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr, 121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); 122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, 123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, 124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ 125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
119 return false; 126 return false;
120 } 127 }
121 128
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); 129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
123 130
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { 131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n", 132 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface, 133 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":""); 134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
128 return false; 135 return false;
129 } 136 }
130 137
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); 138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
132 139
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { 140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n", 141 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface, 142 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":""); 143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
137 return false; 144 return false;
138 } 145 }
139 146
140 /* ... might want to do something with class and flowlabel here ... */ 147 /* ... might want to do something with class and flowlabel here ... */
141 148
142 /* look for the desired protocol header */ 149 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) { 150 if((ip6info->flags & IP6T_F_PROTO)) {
144 int protohdr; 151 int protohdr;
145 unsigned short _frag_off; 152 unsigned short _frag_off;
146 153
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); 154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
148 if (protohdr < 0) { 155 if (protohdr < 0) {
149 if (_frag_off == 0) 156 if (_frag_off == 0)
150 *hotdrop = true; 157 *hotdrop = true;
151 return false; 158 return false;
152 } 159 }
153 *fragoff = _frag_off; 160 *fragoff = _frag_off;
154 161
155 dprintf("Packet protocol %hi ?= %s%hi.\n", 162 dprintf("Packet protocol %hi ?= %s%hi.\n",
156 protohdr, 163 protohdr,
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"", 164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
158 ip6info->proto); 165 ip6info->proto);
159 166
160 if (ip6info->proto == protohdr) { 167 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) { 168 if(ip6info->invflags & IP6T_INV_PROTO) {
162 return false; 169 return false;
163 } 170 }
164 return true; 171 return true;
165 } 172 }
166 173
167 /* We need match for the '-p all', too! */ 174 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) && 175 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO)) 176 !(ip6info->invflags & IP6T_INV_PROTO))
170 return false; 177 return false;
171 } 178 }
172 return true; 179 return true;
173 } 180 }
174 181
175 /* should be ip6 safe */ 182 /* should be ip6 safe */
176 static bool 183 static bool
177 ip6_checkentry(const struct ip6t_ip6 *ipv6) 184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
178 { 185 {
179 if (ipv6->flags & ~IP6T_F_MASK) { 186 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n", 187 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK); 188 ipv6->flags & ~IP6T_F_MASK);
182 return false; 189 return false;
183 } 190 }
184 if (ipv6->invflags & ~IP6T_INV_MASK) { 191 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n", 192 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK); 193 ipv6->invflags & ~IP6T_INV_MASK);
187 return false; 194 return false;
188 } 195 }
189 return true; 196 return true;
190 } 197 }
191 198
192 static unsigned int 199 static unsigned int
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par) 200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
194 { 201 {
195 if (net_ratelimit()) 202 if (net_ratelimit())
196 printk("ip6_tables: error: `%s'\n", 203 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo); 204 (const char *)par->targinfo);
198 205
199 return NF_DROP; 206 return NF_DROP;
200 } 207 }
201 208
202 /* Performance critical - called for every packet */ 209 /* Performance critical - called for every packet */
203 static inline bool 210 static inline bool
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb, 211 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par) 212 struct xt_match_param *par)
206 { 213 {
207 par->match = m->u.kernel.match; 214 par->match = m->u.kernel.match;
208 par->matchinfo = m->data; 215 par->matchinfo = m->data;
209 216
210 /* Stop iteration if it doesn't match */ 217 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par)) 218 if (!m->u.kernel.match->match(skb, par))
212 return true; 219 return true;
213 else 220 else
214 return false; 221 return false;
215 } 222 }
216 223
217 static inline struct ip6t_entry * 224 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset) 225 get_entry(void *base, unsigned int offset)
219 { 226 {
220 return (struct ip6t_entry *)(base + offset); 227 return (struct ip6t_entry *)(base + offset);
221 } 228 }
222 229
223 /* All zeroes == unconditional rule. */ 230 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */ 231 /* Mildly perf critical (only if packet tracing is on) */
225 static inline bool unconditional(const struct ip6t_ip6 *ipv6) 232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
226 { 233 {
227 static const struct ip6t_ip6 uncond; 234 static const struct ip6t_ip6 uncond;
228 235
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; 236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
230 } 237 }
231 238
232 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 239 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 240 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234 /* This cries for unification! */ 241 /* This cries for unification! */
235 static const char *const hooknames[] = { 242 static const char *const hooknames[] = {
236 [NF_INET_PRE_ROUTING] = "PREROUTING", 243 [NF_INET_PRE_ROUTING] = "PREROUTING",
237 [NF_INET_LOCAL_IN] = "INPUT", 244 [NF_INET_LOCAL_IN] = "INPUT",
238 [NF_INET_FORWARD] = "FORWARD", 245 [NF_INET_FORWARD] = "FORWARD",
239 [NF_INET_LOCAL_OUT] = "OUTPUT", 246 [NF_INET_LOCAL_OUT] = "OUTPUT",
240 [NF_INET_POST_ROUTING] = "POSTROUTING", 247 [NF_INET_POST_ROUTING] = "POSTROUTING",
241 }; 248 };
242 249
243 enum nf_ip_trace_comments { 250 enum nf_ip_trace_comments {
244 NF_IP6_TRACE_COMMENT_RULE, 251 NF_IP6_TRACE_COMMENT_RULE,
245 NF_IP6_TRACE_COMMENT_RETURN, 252 NF_IP6_TRACE_COMMENT_RETURN,
246 NF_IP6_TRACE_COMMENT_POLICY, 253 NF_IP6_TRACE_COMMENT_POLICY,
247 }; 254 };
248 255
249 static const char *const comments[] = { 256 static const char *const comments[] = {
250 [NF_IP6_TRACE_COMMENT_RULE] = "rule", 257 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
251 [NF_IP6_TRACE_COMMENT_RETURN] = "return", 258 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
252 [NF_IP6_TRACE_COMMENT_POLICY] = "policy", 259 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
253 }; 260 };
254 261
255 static struct nf_loginfo trace_loginfo = { 262 static struct nf_loginfo trace_loginfo = {
256 .type = NF_LOG_TYPE_LOG, 263 .type = NF_LOG_TYPE_LOG,
257 .u = { 264 .u = {
258 .log = { 265 .log = {
259 .level = 4, 266 .level = 4,
260 .logflags = NF_LOG_MASK, 267 .logflags = NF_LOG_MASK,
261 }, 268 },
262 }, 269 },
263 }; 270 };
264 271
265 /* Mildly perf critical (only if packet tracing is on) */ 272 /* Mildly perf critical (only if packet tracing is on) */
266 static inline int 273 static inline int
267 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e, 274 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
268 const char *hookname, const char **chainname, 275 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum) 276 const char **comment, unsigned int *rulenum)
270 { 277 {
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s); 278 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
272 279
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) { 280 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */ 281 /* Head of user chain: ERROR target with chainname */
275 *chainname = t->target.data; 282 *chainname = t->target.data;
276 (*rulenum) = 0; 283 (*rulenum) = 0;
277 } else if (s == e) { 284 } else if (s == e) {
278 (*rulenum)++; 285 (*rulenum)++;
279 286
280 if (s->target_offset == sizeof(struct ip6t_entry) && 287 if (s->target_offset == sizeof(struct ip6t_entry) &&
281 strcmp(t->target.u.kernel.target->name, 288 strcmp(t->target.u.kernel.target->name,
282 IP6T_STANDARD_TARGET) == 0 && 289 IP6T_STANDARD_TARGET) == 0 &&
283 t->verdict < 0 && 290 t->verdict < 0 &&
284 unconditional(&s->ipv6)) { 291 unconditional(&s->ipv6)) {
285 /* Tail of chains: STANDARD target (return/policy) */ 292 /* Tail of chains: STANDARD target (return/policy) */
286 *comment = *chainname == hookname 293 *comment = *chainname == hookname
287 ? comments[NF_IP6_TRACE_COMMENT_POLICY] 294 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
288 : comments[NF_IP6_TRACE_COMMENT_RETURN]; 295 : comments[NF_IP6_TRACE_COMMENT_RETURN];
289 } 296 }
290 return 1; 297 return 1;
291 } else 298 } else
292 (*rulenum)++; 299 (*rulenum)++;
293 300
294 return 0; 301 return 0;
295 } 302 }
296 303
297 static void trace_packet(struct sk_buff *skb, 304 static void trace_packet(struct sk_buff *skb,
298 unsigned int hook, 305 unsigned int hook,
299 const struct net_device *in, 306 const struct net_device *in,
300 const struct net_device *out, 307 const struct net_device *out,
301 const char *tablename, 308 const char *tablename,
302 struct xt_table_info *private, 309 struct xt_table_info *private,
303 struct ip6t_entry *e) 310 struct ip6t_entry *e)
304 { 311 {
305 void *table_base; 312 void *table_base;
306 const struct ip6t_entry *root; 313 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment; 314 const char *hookname, *chainname, *comment;
308 unsigned int rulenum = 0; 315 unsigned int rulenum = 0;
309 316
310 table_base = private->entries[smp_processor_id()]; 317 table_base = private->entries[smp_processor_id()];
311 root = get_entry(table_base, private->hook_entry[hook]); 318 root = get_entry(table_base, private->hook_entry[hook]);
312 319
313 hookname = chainname = hooknames[hook]; 320 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE]; 321 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
315 322
316 IP6T_ENTRY_ITERATE(root, 323 IP6T_ENTRY_ITERATE(root,
317 private->size - private->hook_entry[hook], 324 private->size - private->hook_entry[hook],
318 get_chainname_rulenum, 325 get_chainname_rulenum,
319 e, hookname, &chainname, &comment, &rulenum); 326 e, hookname, &chainname, &comment, &rulenum);
320 327
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, 328 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ", 329 "TRACE: %s:%s:%s:%u ",
323 tablename, chainname, comment, rulenum); 330 tablename, chainname, comment, rulenum);
324 } 331 }
325 #endif 332 #endif
326 333
327 static inline __pure struct ip6t_entry * 334 static inline __pure struct ip6t_entry *
328 ip6t_next_entry(const struct ip6t_entry *entry) 335 ip6t_next_entry(const struct ip6t_entry *entry)
329 { 336 {
330 return (void *)entry + entry->next_offset; 337 return (void *)entry + entry->next_offset;
331 } 338 }
332 339
333 /* Returns one of the generic firewall policies, like NF_ACCEPT. */ 340 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
334 unsigned int 341 unsigned int
335 ip6t_do_table(struct sk_buff *skb, 342 ip6t_do_table(struct sk_buff *skb,
336 unsigned int hook, 343 unsigned int hook,
337 const struct net_device *in, 344 const struct net_device *in,
338 const struct net_device *out, 345 const struct net_device *out,
339 struct xt_table *table) 346 struct xt_table *table)
340 { 347 {
341 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom 348 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
342 349
343 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 350 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
344 bool hotdrop = false; 351 bool hotdrop = false;
345 /* Initializing verdict to NF_DROP keeps gcc happy. */ 352 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP; 353 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev; 354 const char *indev, *outdev;
348 void *table_base; 355 void *table_base;
349 struct ip6t_entry *e, *back; 356 struct ip6t_entry *e, *back;
350 struct xt_table_info *private; 357 struct xt_table_info *private;
351 struct xt_match_param mtpar; 358 struct xt_match_param mtpar;
352 struct xt_target_param tgpar; 359 struct xt_target_param tgpar;
353 360
354 /* Initialization */ 361 /* Initialization */
355 indev = in ? in->name : nulldevname; 362 indev = in ? in->name : nulldevname;
356 outdev = out ? out->name : nulldevname; 363 outdev = out ? out->name : nulldevname;
357 /* We handle fragments by dealing with the first fragment as 364 /* We handle fragments by dealing with the first fragment as
358 * if it was a normal packet. All other fragments are treated 365 * if it was a normal packet. All other fragments are treated
359 * normally, except that they will NEVER match rules that ask 366 * normally, except that they will NEVER match rules that ask
360 * things we don't know, ie. tcp syn flag or ports). If the 367 * things we don't know, ie. tcp syn flag or ports). If the
361 * rule is also a fragment-specific rule, non-fragments won't 368 * rule is also a fragment-specific rule, non-fragments won't
362 * match it. */ 369 * match it. */
363 mtpar.hotdrop = &hotdrop; 370 mtpar.hotdrop = &hotdrop;
364 mtpar.in = tgpar.in = in; 371 mtpar.in = tgpar.in = in;
365 mtpar.out = tgpar.out = out; 372 mtpar.out = tgpar.out = out;
366 mtpar.family = tgpar.family = NFPROTO_IPV6; 373 mtpar.family = tgpar.family = NFPROTO_IPV6;
367 mtpar.hooknum = tgpar.hooknum = hook; 374 mtpar.hooknum = tgpar.hooknum = hook;
368 375
369 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 376 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
370 377
371 xt_info_rdlock_bh(); 378 xt_info_rdlock_bh();
372 private = table->private; 379 private = table->private;
373 table_base = private->entries[smp_processor_id()]; 380 table_base = private->entries[smp_processor_id()];
374 381
375 e = get_entry(table_base, private->hook_entry[hook]); 382 e = get_entry(table_base, private->hook_entry[hook]);
376 383
377 /* For return from builtin chain */ 384 /* For return from builtin chain */
378 back = get_entry(table_base, private->underflow[hook]); 385 back = get_entry(table_base, private->underflow[hook]);
379 386
380 do { 387 do {
381 struct ip6t_entry_target *t; 388 struct ip6t_entry_target *t;
382 389
383 IP_NF_ASSERT(e); 390 IP_NF_ASSERT(e);
384 IP_NF_ASSERT(back); 391 IP_NF_ASSERT(back);
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, 392 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) || 393 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) { 394 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
388 e = ip6t_next_entry(e); 395 e = ip6t_next_entry(e);
389 continue; 396 continue;
390 } 397 }
391 398
392 ADD_COUNTER(e->counters, 399 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) + 400 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1); 401 sizeof(struct ipv6hdr), 1);
395 402
396 t = ip6t_get_target(e); 403 t = ip6t_get_target(e);
397 IP_NF_ASSERT(t->u.kernel.target); 404 IP_NF_ASSERT(t->u.kernel.target);
398 405
399 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 406 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
400 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 407 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
401 /* The packet is traced: log it */ 408 /* The packet is traced: log it */
402 if (unlikely(skb->nf_trace)) 409 if (unlikely(skb->nf_trace))
403 trace_packet(skb, hook, in, out, 410 trace_packet(skb, hook, in, out,
404 table->name, private, e); 411 table->name, private, e);
405 #endif 412 #endif
406 /* Standard target? */ 413 /* Standard target? */
407 if (!t->u.kernel.target->target) { 414 if (!t->u.kernel.target->target) {
408 int v; 415 int v;
409 416
410 v = ((struct ip6t_standard_target *)t)->verdict; 417 v = ((struct ip6t_standard_target *)t)->verdict;
411 if (v < 0) { 418 if (v < 0) {
412 /* Pop from stack? */ 419 /* Pop from stack? */
413 if (v != IP6T_RETURN) { 420 if (v != IP6T_RETURN) {
414 verdict = (unsigned)(-v) - 1; 421 verdict = (unsigned)(-v) - 1;
415 break; 422 break;
416 } 423 }
417 e = back; 424 e = back;
418 back = get_entry(table_base, back->comefrom); 425 back = get_entry(table_base, back->comefrom);
419 continue; 426 continue;
420 } 427 }
421 if (table_base + v != ip6t_next_entry(e) && 428 if (table_base + v != ip6t_next_entry(e) &&
422 !(e->ipv6.flags & IP6T_F_GOTO)) { 429 !(e->ipv6.flags & IP6T_F_GOTO)) {
423 /* Save old back ptr in next entry */ 430 /* Save old back ptr in next entry */
424 struct ip6t_entry *next = ip6t_next_entry(e); 431 struct ip6t_entry *next = ip6t_next_entry(e);
425 next->comefrom = (void *)back - table_base; 432 next->comefrom = (void *)back - table_base;
426 /* set back pointer to next entry */ 433 /* set back pointer to next entry */
427 back = next; 434 back = next;
428 } 435 }
429 436
430 e = get_entry(table_base, v); 437 e = get_entry(table_base, v);
431 continue; 438 continue;
432 } 439 }
433 440
434 /* Targets which reenter must return 441 /* Targets which reenter must return
435 abs. verdicts */ 442 abs. verdicts */
436 tgpar.target = t->u.kernel.target; 443 tgpar.target = t->u.kernel.target;
437 tgpar.targinfo = t->data; 444 tgpar.targinfo = t->data;
438 445
439 #ifdef CONFIG_NETFILTER_DEBUG 446 #ifdef CONFIG_NETFILTER_DEBUG
440 tb_comefrom = 0xeeeeeeec; 447 tb_comefrom = 0xeeeeeeec;
441 #endif 448 #endif
442 verdict = t->u.kernel.target->target(skb, &tgpar); 449 verdict = t->u.kernel.target->target(skb, &tgpar);
443 450
444 #ifdef CONFIG_NETFILTER_DEBUG 451 #ifdef CONFIG_NETFILTER_DEBUG
445 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) { 452 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n", 453 printk("Target %s reentered!\n",
447 t->u.kernel.target->name); 454 t->u.kernel.target->name);
448 verdict = NF_DROP; 455 verdict = NF_DROP;
449 } 456 }
450 tb_comefrom = 0x57acc001; 457 tb_comefrom = 0x57acc001;
451 #endif 458 #endif
452 if (verdict == IP6T_CONTINUE) 459 if (verdict == IP6T_CONTINUE)
453 e = ip6t_next_entry(e); 460 e = ip6t_next_entry(e);
454 else 461 else
455 /* Verdict */ 462 /* Verdict */
456 break; 463 break;
457 } while (!hotdrop); 464 } while (!hotdrop);
458 465
459 #ifdef CONFIG_NETFILTER_DEBUG 466 #ifdef CONFIG_NETFILTER_DEBUG
460 tb_comefrom = NETFILTER_LINK_POISON; 467 tb_comefrom = NETFILTER_LINK_POISON;
461 #endif 468 #endif
462 xt_info_rdunlock_bh(); 469 xt_info_rdunlock_bh();
463 470
464 #ifdef DEBUG_ALLOW_ALL 471 #ifdef DEBUG_ALLOW_ALL
465 return NF_ACCEPT; 472 return NF_ACCEPT;
466 #else 473 #else
467 if (hotdrop) 474 if (hotdrop)
468 return NF_DROP; 475 return NF_DROP;
469 else return verdict; 476 else return verdict;
470 #endif 477 #endif
471 478
472 #undef tb_comefrom 479 #undef tb_comefrom
473 } 480 }
474 481
475 /* Figures out from what hook each rule can be called: returns 0 if 482 /* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */ 483 there are loops. Puts hook bitmask in comefrom. */
477 static int 484 static int
478 mark_source_chains(struct xt_table_info *newinfo, 485 mark_source_chains(struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0) 486 unsigned int valid_hooks, void *entry0)
480 { 487 {
481 unsigned int hook; 488 unsigned int hook;
482 489
483 /* No recursion; use packet counter to save back ptrs (reset 490 /* No recursion; use packet counter to save back ptrs (reset
484 to 0 as we leave), and comefrom to save source hook bitmask */ 491 to 0 as we leave), and comefrom to save source hook bitmask */
485 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { 492 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
486 unsigned int pos = newinfo->hook_entry[hook]; 493 unsigned int pos = newinfo->hook_entry[hook];
487 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); 494 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
488 495
489 if (!(valid_hooks & (1 << hook))) 496 if (!(valid_hooks & (1 << hook)))
490 continue; 497 continue;
491 498
492 /* Set initial back pointer. */ 499 /* Set initial back pointer. */
493 e->counters.pcnt = pos; 500 e->counters.pcnt = pos;
494 501
495 for (;;) { 502 for (;;) {
496 struct ip6t_standard_target *t 503 struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e); 504 = (void *)ip6t_get_target(e);
498 int visited = e->comefrom & (1 << hook); 505 int visited = e->comefrom & (1 << hook);
499 506
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 507 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
501 printk("iptables: loop hook %u pos %u %08X.\n", 508 printk("iptables: loop hook %u pos %u %08X.\n",
502 hook, pos, e->comefrom); 509 hook, pos, e->comefrom);
503 return 0; 510 return 0;
504 } 511 }
505 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); 512 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
506 513
507 /* Unconditional return/END. */ 514 /* Unconditional return/END. */
508 if ((e->target_offset == sizeof(struct ip6t_entry) && 515 if ((e->target_offset == sizeof(struct ip6t_entry) &&
509 (strcmp(t->target.u.user.name, 516 (strcmp(t->target.u.user.name,
510 IP6T_STANDARD_TARGET) == 0) && 517 IP6T_STANDARD_TARGET) == 0) &&
511 t->verdict < 0 && 518 t->verdict < 0 &&
512 unconditional(&e->ipv6)) || visited) { 519 unconditional(&e->ipv6)) || visited) {
513 unsigned int oldpos, size; 520 unsigned int oldpos, size;
514 521
515 if ((strcmp(t->target.u.user.name, 522 if ((strcmp(t->target.u.user.name,
516 IP6T_STANDARD_TARGET) == 0) && 523 IP6T_STANDARD_TARGET) == 0) &&
517 t->verdict < -NF_MAX_VERDICT - 1) { 524 t->verdict < -NF_MAX_VERDICT - 1) {
518 duprintf("mark_source_chains: bad " 525 duprintf("mark_source_chains: bad "
519 "negative verdict (%i)\n", 526 "negative verdict (%i)\n",
520 t->verdict); 527 t->verdict);
521 return 0; 528 return 0;
522 } 529 }
523 530
524 /* Return: backtrack through the last 531 /* Return: backtrack through the last
525 big jump. */ 532 big jump. */
526 do { 533 do {
527 e->comefrom ^= (1<<NF_INET_NUMHOOKS); 534 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
528 #ifdef DEBUG_IP_FIREWALL_USER 535 #ifdef DEBUG_IP_FIREWALL_USER
529 if (e->comefrom 536 if (e->comefrom
530 & (1 << NF_INET_NUMHOOKS)) { 537 & (1 << NF_INET_NUMHOOKS)) {
531 duprintf("Back unset " 538 duprintf("Back unset "
532 "on hook %u " 539 "on hook %u "
533 "rule %u\n", 540 "rule %u\n",
534 hook, pos); 541 hook, pos);
535 } 542 }
536 #endif 543 #endif
537 oldpos = pos; 544 oldpos = pos;
538 pos = e->counters.pcnt; 545 pos = e->counters.pcnt;
539 e->counters.pcnt = 0; 546 e->counters.pcnt = 0;
540 547
541 /* We're at the start. */ 548 /* We're at the start. */
542 if (pos == oldpos) 549 if (pos == oldpos)
543 goto next; 550 goto next;
544 551
545 e = (struct ip6t_entry *) 552 e = (struct ip6t_entry *)
546 (entry0 + pos); 553 (entry0 + pos);
547 } while (oldpos == pos + e->next_offset); 554 } while (oldpos == pos + e->next_offset);
548 555
549 /* Move along one */ 556 /* Move along one */
550 size = e->next_offset; 557 size = e->next_offset;
551 e = (struct ip6t_entry *) 558 e = (struct ip6t_entry *)
552 (entry0 + pos + size); 559 (entry0 + pos + size);
553 e->counters.pcnt = pos; 560 e->counters.pcnt = pos;
554 pos += size; 561 pos += size;
555 } else { 562 } else {
556 int newpos = t->verdict; 563 int newpos = t->verdict;
557 564
558 if (strcmp(t->target.u.user.name, 565 if (strcmp(t->target.u.user.name,
559 IP6T_STANDARD_TARGET) == 0 && 566 IP6T_STANDARD_TARGET) == 0 &&
560 newpos >= 0) { 567 newpos >= 0) {
561 if (newpos > newinfo->size - 568 if (newpos > newinfo->size -
562 sizeof(struct ip6t_entry)) { 569 sizeof(struct ip6t_entry)) {
563 duprintf("mark_source_chains: " 570 duprintf("mark_source_chains: "
564 "bad verdict (%i)\n", 571 "bad verdict (%i)\n",
565 newpos); 572 newpos);
566 return 0; 573 return 0;
567 } 574 }
568 /* This a jump; chase it. */ 575 /* This a jump; chase it. */
569 duprintf("Jump rule %u -> %u\n", 576 duprintf("Jump rule %u -> %u\n",
570 pos, newpos); 577 pos, newpos);
571 } else { 578 } else {
572 /* ... this is a fallthru */ 579 /* ... this is a fallthru */
573 newpos = pos + e->next_offset; 580 newpos = pos + e->next_offset;
574 } 581 }
575 e = (struct ip6t_entry *) 582 e = (struct ip6t_entry *)
576 (entry0 + newpos); 583 (entry0 + newpos);
577 e->counters.pcnt = pos; 584 e->counters.pcnt = pos;
578 pos = newpos; 585 pos = newpos;
579 } 586 }
580 } 587 }
581 next: 588 next:
582 duprintf("Finished chain %u\n", hook); 589 duprintf("Finished chain %u\n", hook);
583 } 590 }
584 return 1; 591 return 1;
585 } 592 }
586 593
587 static int 594 static int
588 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i) 595 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
589 { 596 {
590 struct xt_mtdtor_param par; 597 struct xt_mtdtor_param par;
591 598
592 if (i && (*i)-- == 0) 599 if (i && (*i)-- == 0)
593 return 1; 600 return 1;
594 601
595 par.net = net; 602 par.net = net;
596 par.match = m->u.kernel.match; 603 par.match = m->u.kernel.match;
597 par.matchinfo = m->data; 604 par.matchinfo = m->data;
598 par.family = NFPROTO_IPV6; 605 par.family = NFPROTO_IPV6;
599 if (par.match->destroy != NULL) 606 if (par.match->destroy != NULL)
600 par.match->destroy(&par); 607 par.match->destroy(&par);
601 module_put(par.match->me); 608 module_put(par.match->me);
602 return 0; 609 return 0;
603 } 610 }
604 611
605 static int 612 static int
606 check_entry(struct ip6t_entry *e, const char *name) 613 check_entry(struct ip6t_entry *e, const char *name)
607 { 614 {
608 struct ip6t_entry_target *t; 615 struct ip6t_entry_target *t;
609 616
610 if (!ip6_checkentry(&e->ipv6)) { 617 if (!ip6_checkentry(&e->ipv6)) {
611 duprintf("ip_tables: ip check failed %p %s.\n", e, name); 618 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
612 return -EINVAL; 619 return -EINVAL;
613 } 620 }
614 621
615 if (e->target_offset + sizeof(struct ip6t_entry_target) > 622 if (e->target_offset + sizeof(struct ip6t_entry_target) >
616 e->next_offset) 623 e->next_offset)
617 return -EINVAL; 624 return -EINVAL;
618 625
619 t = ip6t_get_target(e); 626 t = ip6t_get_target(e);
620 if (e->target_offset + t->u.target_size > e->next_offset) 627 if (e->target_offset + t->u.target_size > e->next_offset)
621 return -EINVAL; 628 return -EINVAL;
622 629
623 return 0; 630 return 0;
624 } 631 }
625 632
626 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par, 633 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
627 unsigned int *i) 634 unsigned int *i)
628 { 635 {
629 const struct ip6t_ip6 *ipv6 = par->entryinfo; 636 const struct ip6t_ip6 *ipv6 = par->entryinfo;
630 int ret; 637 int ret;
631 638
632 par->match = m->u.kernel.match; 639 par->match = m->u.kernel.match;
633 par->matchinfo = m->data; 640 par->matchinfo = m->data;
634 641
635 ret = xt_check_match(par, m->u.match_size - sizeof(*m), 642 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
636 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); 643 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
637 if (ret < 0) { 644 if (ret < 0) {
638 duprintf("ip_tables: check failed for `%s'.\n", 645 duprintf("ip_tables: check failed for `%s'.\n",
639 par.match->name); 646 par.match->name);
640 return ret; 647 return ret;
641 } 648 }
642 ++*i; 649 ++*i;
643 return 0; 650 return 0;
644 } 651 }
645 652
646 static int 653 static int
647 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par, 654 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
648 unsigned int *i) 655 unsigned int *i)
649 { 656 {
650 struct xt_match *match; 657 struct xt_match *match;
651 int ret; 658 int ret;
652 659
653 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name, 660 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
654 m->u.user.revision), 661 m->u.user.revision),
655 "ip6t_%s", m->u.user.name); 662 "ip6t_%s", m->u.user.name);
656 if (IS_ERR(match) || !match) { 663 if (IS_ERR(match) || !match) {
657 duprintf("find_check_match: `%s' not found\n", m->u.user.name); 664 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
658 return match ? PTR_ERR(match) : -ENOENT; 665 return match ? PTR_ERR(match) : -ENOENT;
659 } 666 }
660 m->u.kernel.match = match; 667 m->u.kernel.match = match;
661 668
662 ret = check_match(m, par, i); 669 ret = check_match(m, par, i);
663 if (ret) 670 if (ret)
664 goto err; 671 goto err;
665 672
666 return 0; 673 return 0;
667 err: 674 err:
668 module_put(m->u.kernel.match->me); 675 module_put(m->u.kernel.match->me);
669 return ret; 676 return ret;
670 } 677 }
671 678
672 static int check_target(struct ip6t_entry *e, struct net *net, const char *name) 679 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
673 { 680 {
674 struct ip6t_entry_target *t = ip6t_get_target(e); 681 struct ip6t_entry_target *t = ip6t_get_target(e);
675 struct xt_tgchk_param par = { 682 struct xt_tgchk_param par = {
676 .net = net, 683 .net = net,
677 .table = name, 684 .table = name,
678 .entryinfo = e, 685 .entryinfo = e,
679 .target = t->u.kernel.target, 686 .target = t->u.kernel.target,
680 .targinfo = t->data, 687 .targinfo = t->data,
681 .hook_mask = e->comefrom, 688 .hook_mask = e->comefrom,
682 .family = NFPROTO_IPV6, 689 .family = NFPROTO_IPV6,
683 }; 690 };
684 int ret; 691 int ret;
685 692
686 t = ip6t_get_target(e); 693 t = ip6t_get_target(e);
687 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 694 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
688 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); 695 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
689 if (ret < 0) { 696 if (ret < 0) {
690 duprintf("ip_tables: check failed for `%s'.\n", 697 duprintf("ip_tables: check failed for `%s'.\n",
691 t->u.kernel.target->name); 698 t->u.kernel.target->name);
692 return ret; 699 return ret;
693 } 700 }
694 return 0; 701 return 0;
695 } 702 }
696 703
697 static int 704 static int
698 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, 705 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
699 unsigned int size, unsigned int *i) 706 unsigned int size, unsigned int *i)
700 { 707 {
701 struct ip6t_entry_target *t; 708 struct ip6t_entry_target *t;
702 struct xt_target *target; 709 struct xt_target *target;
703 int ret; 710 int ret;
704 unsigned int j; 711 unsigned int j;
705 struct xt_mtchk_param mtpar; 712 struct xt_mtchk_param mtpar;
706 713
707 ret = check_entry(e, name); 714 ret = check_entry(e, name);
708 if (ret) 715 if (ret)
709 return ret; 716 return ret;
710 717
711 j = 0; 718 j = 0;
712 mtpar.net = net; 719 mtpar.net = net;
713 mtpar.table = name; 720 mtpar.table = name;
714 mtpar.entryinfo = &e->ipv6; 721 mtpar.entryinfo = &e->ipv6;
715 mtpar.hook_mask = e->comefrom; 722 mtpar.hook_mask = e->comefrom;
716 mtpar.family = NFPROTO_IPV6; 723 mtpar.family = NFPROTO_IPV6;
717 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j); 724 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
718 if (ret != 0) 725 if (ret != 0)
719 goto cleanup_matches; 726 goto cleanup_matches;
720 727
721 t = ip6t_get_target(e); 728 t = ip6t_get_target(e);
722 target = try_then_request_module(xt_find_target(AF_INET6, 729 target = try_then_request_module(xt_find_target(AF_INET6,
723 t->u.user.name, 730 t->u.user.name,
724 t->u.user.revision), 731 t->u.user.revision),
725 "ip6t_%s", t->u.user.name); 732 "ip6t_%s", t->u.user.name);
726 if (IS_ERR(target) || !target) { 733 if (IS_ERR(target) || !target) {
727 duprintf("find_check_entry: `%s' not found\n", t->u.user.name); 734 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
728 ret = target ? PTR_ERR(target) : -ENOENT; 735 ret = target ? PTR_ERR(target) : -ENOENT;
729 goto cleanup_matches; 736 goto cleanup_matches;
730 } 737 }
731 t->u.kernel.target = target; 738 t->u.kernel.target = target;
732 739
733 ret = check_target(e, net, name); 740 ret = check_target(e, net, name);
734 if (ret) 741 if (ret)
735 goto err; 742 goto err;
736 743
737 (*i)++; 744 (*i)++;
738 return 0; 745 return 0;
739 err: 746 err:
740 module_put(t->u.kernel.target->me); 747 module_put(t->u.kernel.target->me);
741 cleanup_matches: 748 cleanup_matches:
742 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j); 749 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
743 return ret; 750 return ret;
744 } 751 }
745 752
746 static bool check_underflow(struct ip6t_entry *e) 753 static bool check_underflow(struct ip6t_entry *e)
747 { 754 {
748 const struct ip6t_entry_target *t; 755 const struct ip6t_entry_target *t;
749 unsigned int verdict; 756 unsigned int verdict;
750 757
751 if (!unconditional(&e->ipv6)) 758 if (!unconditional(&e->ipv6))
752 return false; 759 return false;
753 t = ip6t_get_target(e); 760 t = ip6t_get_target(e);
754 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 761 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
755 return false; 762 return false;
756 verdict = ((struct ip6t_standard_target *)t)->verdict; 763 verdict = ((struct ip6t_standard_target *)t)->verdict;
757 verdict = -verdict - 1; 764 verdict = -verdict - 1;
758 return verdict == NF_DROP || verdict == NF_ACCEPT; 765 return verdict == NF_DROP || verdict == NF_ACCEPT;
759 } 766 }
760 767
761 static int 768 static int
762 check_entry_size_and_hooks(struct ip6t_entry *e, 769 check_entry_size_and_hooks(struct ip6t_entry *e,
763 struct xt_table_info *newinfo, 770 struct xt_table_info *newinfo,
764 unsigned char *base, 771 unsigned char *base,
765 unsigned char *limit, 772 unsigned char *limit,
766 const unsigned int *hook_entries, 773 const unsigned int *hook_entries,
767 const unsigned int *underflows, 774 const unsigned int *underflows,
768 unsigned int valid_hooks, 775 unsigned int valid_hooks,
769 unsigned int *i) 776 unsigned int *i)
770 { 777 {
771 unsigned int h; 778 unsigned int h;
772 779
773 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || 780 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
774 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { 781 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
775 duprintf("Bad offset %p\n", e); 782 duprintf("Bad offset %p\n", e);
776 return -EINVAL; 783 return -EINVAL;
777 } 784 }
778 785
779 if (e->next_offset 786 if (e->next_offset
780 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) { 787 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
781 duprintf("checking: element %p size %u\n", 788 duprintf("checking: element %p size %u\n",
782 e, e->next_offset); 789 e, e->next_offset);
783 return -EINVAL; 790 return -EINVAL;
784 } 791 }
785 792
786 /* Check hooks & underflows */ 793 /* Check hooks & underflows */
787 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 794 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
788 if (!(valid_hooks & (1 << h))) 795 if (!(valid_hooks & (1 << h)))
789 continue; 796 continue;
790 if ((unsigned char *)e - base == hook_entries[h]) 797 if ((unsigned char *)e - base == hook_entries[h])
791 newinfo->hook_entry[h] = hook_entries[h]; 798 newinfo->hook_entry[h] = hook_entries[h];
792 if ((unsigned char *)e - base == underflows[h]) { 799 if ((unsigned char *)e - base == underflows[h]) {
793 if (!check_underflow(e)) { 800 if (!check_underflow(e)) {
794 pr_err("Underflows must be unconditional and " 801 pr_err("Underflows must be unconditional and "
795 "use the STANDARD target with " 802 "use the STANDARD target with "
796 "ACCEPT/DROP\n"); 803 "ACCEPT/DROP\n");
797 return -EINVAL; 804 return -EINVAL;
798 } 805 }
799 newinfo->underflow[h] = underflows[h]; 806 newinfo->underflow[h] = underflows[h];
800 } 807 }
801 } 808 }
802 809
803 /* Clear counters and comefrom */ 810 /* Clear counters and comefrom */
804 e->counters = ((struct xt_counters) { 0, 0 }); 811 e->counters = ((struct xt_counters) { 0, 0 });
805 e->comefrom = 0; 812 e->comefrom = 0;
806 813
807 (*i)++; 814 (*i)++;
808 return 0; 815 return 0;
809 } 816 }
810 817
811 static int 818 static int
812 cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i) 819 cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i)
813 { 820 {
814 struct xt_tgdtor_param par; 821 struct xt_tgdtor_param par;
815 struct ip6t_entry_target *t; 822 struct ip6t_entry_target *t;
816 823
817 if (i && (*i)-- == 0) 824 if (i && (*i)-- == 0)
818 return 1; 825 return 1;
819 826
820 /* Cleanup all matches */ 827 /* Cleanup all matches */
821 IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL); 828 IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL);
822 t = ip6t_get_target(e); 829 t = ip6t_get_target(e);
823 830
824 par.net = net; 831 par.net = net;
825 par.target = t->u.kernel.target; 832 par.target = t->u.kernel.target;
826 par.targinfo = t->data; 833 par.targinfo = t->data;
827 par.family = NFPROTO_IPV6; 834 par.family = NFPROTO_IPV6;
828 if (par.target->destroy != NULL) 835 if (par.target->destroy != NULL)
829 par.target->destroy(&par); 836 par.target->destroy(&par);
830 module_put(par.target->me); 837 module_put(par.target->me);
831 return 0; 838 return 0;
832 } 839 }
833 840
834 /* Checks and translates the user-supplied table segment (held in 841 /* Checks and translates the user-supplied table segment (held in
835 newinfo) */ 842 newinfo) */
836 static int 843 static int
837 translate_table(struct net *net, 844 translate_table(struct net *net,
838 const char *name, 845 const char *name,
839 unsigned int valid_hooks, 846 unsigned int valid_hooks,
840 struct xt_table_info *newinfo, 847 struct xt_table_info *newinfo,
841 void *entry0, 848 void *entry0,
842 unsigned int size, 849 unsigned int size,
843 unsigned int number, 850 unsigned int number,
844 const unsigned int *hook_entries, 851 const unsigned int *hook_entries,
845 const unsigned int *underflows) 852 const unsigned int *underflows)
846 { 853 {
847 unsigned int i; 854 unsigned int i;
848 int ret; 855 int ret;
849 856
850 newinfo->size = size; 857 newinfo->size = size;
851 newinfo->number = number; 858 newinfo->number = number;
852 859
853 /* Init all hooks to impossible value. */ 860 /* Init all hooks to impossible value. */
854 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 861 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
855 newinfo->hook_entry[i] = 0xFFFFFFFF; 862 newinfo->hook_entry[i] = 0xFFFFFFFF;
856 newinfo->underflow[i] = 0xFFFFFFFF; 863 newinfo->underflow[i] = 0xFFFFFFFF;
857 } 864 }
858 865
859 duprintf("translate_table: size %u\n", newinfo->size); 866 duprintf("translate_table: size %u\n", newinfo->size);
860 i = 0; 867 i = 0;
861 /* Walk through entries, checking offsets. */ 868 /* Walk through entries, checking offsets. */
862 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, 869 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
863 check_entry_size_and_hooks, 870 check_entry_size_and_hooks,
864 newinfo, 871 newinfo,
865 entry0, 872 entry0,
866 entry0 + size, 873 entry0 + size,
867 hook_entries, underflows, valid_hooks, &i); 874 hook_entries, underflows, valid_hooks, &i);
868 if (ret != 0) 875 if (ret != 0)
869 return ret; 876 return ret;
870 877
871 if (i != number) { 878 if (i != number) {
872 duprintf("translate_table: %u not %u entries\n", 879 duprintf("translate_table: %u not %u entries\n",
873 i, number); 880 i, number);
874 return -EINVAL; 881 return -EINVAL;
875 } 882 }
876 883
877 /* Check hooks all assigned */ 884 /* Check hooks all assigned */
878 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 885 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
879 /* Only hooks which are valid */ 886 /* Only hooks which are valid */
880 if (!(valid_hooks & (1 << i))) 887 if (!(valid_hooks & (1 << i)))
881 continue; 888 continue;
882 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 889 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
883 duprintf("Invalid hook entry %u %u\n", 890 duprintf("Invalid hook entry %u %u\n",
884 i, hook_entries[i]); 891 i, hook_entries[i]);
885 return -EINVAL; 892 return -EINVAL;
886 } 893 }
887 if (newinfo->underflow[i] == 0xFFFFFFFF) { 894 if (newinfo->underflow[i] == 0xFFFFFFFF) {
888 duprintf("Invalid underflow %u %u\n", 895 duprintf("Invalid underflow %u %u\n",
889 i, underflows[i]); 896 i, underflows[i]);
890 return -EINVAL; 897 return -EINVAL;
891 } 898 }
892 } 899 }
893 900
894 if (!mark_source_chains(newinfo, valid_hooks, entry0)) 901 if (!mark_source_chains(newinfo, valid_hooks, entry0))
895 return -ELOOP; 902 return -ELOOP;
896 903
897 /* Finally, each sanity check must pass */ 904 /* Finally, each sanity check must pass */
898 i = 0; 905 i = 0;
899 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, 906 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
900 find_check_entry, net, name, size, &i); 907 find_check_entry, net, name, size, &i);
901 908
902 if (ret != 0) { 909 if (ret != 0) {
903 IP6T_ENTRY_ITERATE(entry0, newinfo->size, 910 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
904 cleanup_entry, net, &i); 911 cleanup_entry, net, &i);
905 return ret; 912 return ret;
906 } 913 }
907 914
908 /* And one copy for every other CPU */ 915 /* And one copy for every other CPU */
909 for_each_possible_cpu(i) { 916 for_each_possible_cpu(i) {
910 if (newinfo->entries[i] && newinfo->entries[i] != entry0) 917 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
911 memcpy(newinfo->entries[i], entry0, newinfo->size); 918 memcpy(newinfo->entries[i], entry0, newinfo->size);
912 } 919 }
913 920
914 return ret; 921 return ret;
915 } 922 }
916 923
917 /* Gets counters. */ 924 /* Gets counters. */
918 static inline int 925 static inline int
919 add_entry_to_counter(const struct ip6t_entry *e, 926 add_entry_to_counter(const struct ip6t_entry *e,
920 struct xt_counters total[], 927 struct xt_counters total[],
921 unsigned int *i) 928 unsigned int *i)
922 { 929 {
923 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); 930 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
924 931
925 (*i)++; 932 (*i)++;
926 return 0; 933 return 0;
927 } 934 }
928 935
929 static inline int 936 static inline int
930 set_entry_to_counter(const struct ip6t_entry *e, 937 set_entry_to_counter(const struct ip6t_entry *e,
931 struct ip6t_counters total[], 938 struct ip6t_counters total[],
932 unsigned int *i) 939 unsigned int *i)
933 { 940 {
934 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); 941 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
935 942
936 (*i)++; 943 (*i)++;
937 return 0; 944 return 0;
938 } 945 }
939 946
940 static void 947 static void
941 get_counters(const struct xt_table_info *t, 948 get_counters(const struct xt_table_info *t,
942 struct xt_counters counters[]) 949 struct xt_counters counters[])
943 { 950 {
944 unsigned int cpu; 951 unsigned int cpu;
945 unsigned int i; 952 unsigned int i;
946 unsigned int curcpu; 953 unsigned int curcpu;
947 954
948 /* Instead of clearing (by a previous call to memset()) 955 /* Instead of clearing (by a previous call to memset())
949 * the counters and using adds, we set the counters 956 * the counters and using adds, we set the counters
950 * with data used by 'current' CPU 957 * with data used by 'current' CPU
951 * 958 *
952 * Bottom half has to be disabled to prevent deadlock 959 * Bottom half has to be disabled to prevent deadlock
953 * if new softirq were to run and call ipt_do_table 960 * if new softirq were to run and call ipt_do_table
954 */ 961 */
955 local_bh_disable(); 962 local_bh_disable();
956 curcpu = smp_processor_id(); 963 curcpu = smp_processor_id();
957 964
958 i = 0; 965 i = 0;
959 IP6T_ENTRY_ITERATE(t->entries[curcpu], 966 IP6T_ENTRY_ITERATE(t->entries[curcpu],
960 t->size, 967 t->size,
961 set_entry_to_counter, 968 set_entry_to_counter,
962 counters, 969 counters,
963 &i); 970 &i);
964 971
965 for_each_possible_cpu(cpu) { 972 for_each_possible_cpu(cpu) {
966 if (cpu == curcpu) 973 if (cpu == curcpu)
967 continue; 974 continue;
968 i = 0; 975 i = 0;
969 xt_info_wrlock(cpu); 976 xt_info_wrlock(cpu);
970 IP6T_ENTRY_ITERATE(t->entries[cpu], 977 IP6T_ENTRY_ITERATE(t->entries[cpu],
971 t->size, 978 t->size,
972 add_entry_to_counter, 979 add_entry_to_counter,
973 counters, 980 counters,
974 &i); 981 &i);
975 xt_info_wrunlock(cpu); 982 xt_info_wrunlock(cpu);
976 } 983 }
977 local_bh_enable(); 984 local_bh_enable();
978 } 985 }
979 986
980 static struct xt_counters *alloc_counters(struct xt_table *table) 987 static struct xt_counters *alloc_counters(struct xt_table *table)
981 { 988 {
982 unsigned int countersize; 989 unsigned int countersize;
983 struct xt_counters *counters; 990 struct xt_counters *counters;
984 struct xt_table_info *private = table->private; 991 struct xt_table_info *private = table->private;
985 992
986 /* We need atomic snapshot of counters: rest doesn't change 993 /* We need atomic snapshot of counters: rest doesn't change
987 (other than comefrom, which userspace doesn't care 994 (other than comefrom, which userspace doesn't care
988 about). */ 995 about). */
989 countersize = sizeof(struct xt_counters) * private->number; 996 countersize = sizeof(struct xt_counters) * private->number;
990 counters = vmalloc_node(countersize, numa_node_id()); 997 counters = vmalloc_node(countersize, numa_node_id());
991 998
992 if (counters == NULL) 999 if (counters == NULL)
993 return ERR_PTR(-ENOMEM); 1000 return ERR_PTR(-ENOMEM);
994 1001
995 get_counters(private, counters); 1002 get_counters(private, counters);
996 1003
997 return counters; 1004 return counters;
998 } 1005 }
999 1006
1000 static int 1007 static int
1001 copy_entries_to_user(unsigned int total_size, 1008 copy_entries_to_user(unsigned int total_size,
1002 struct xt_table *table, 1009 struct xt_table *table,
1003 void __user *userptr) 1010 void __user *userptr)
1004 { 1011 {
1005 unsigned int off, num; 1012 unsigned int off, num;
1006 struct ip6t_entry *e; 1013 struct ip6t_entry *e;
1007 struct xt_counters *counters; 1014 struct xt_counters *counters;
1008 const struct xt_table_info *private = table->private; 1015 const struct xt_table_info *private = table->private;
1009 int ret = 0; 1016 int ret = 0;
1010 const void *loc_cpu_entry; 1017 const void *loc_cpu_entry;
1011 1018
1012 counters = alloc_counters(table); 1019 counters = alloc_counters(table);
1013 if (IS_ERR(counters)) 1020 if (IS_ERR(counters))
1014 return PTR_ERR(counters); 1021 return PTR_ERR(counters);
1015 1022
1016 /* choose the copy that is on our node/cpu, ... 1023 /* choose the copy that is on our node/cpu, ...
1017 * This choice is lazy (because current thread is 1024 * This choice is lazy (because current thread is
1018 * allowed to migrate to another cpu) 1025 * allowed to migrate to another cpu)
1019 */ 1026 */
1020 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1027 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1021 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 1028 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1022 ret = -EFAULT; 1029 ret = -EFAULT;
1023 goto free_counters; 1030 goto free_counters;
1024 } 1031 }
1025 1032
1026 /* FIXME: use iterator macros --RR */ 1033 /* FIXME: use iterator macros --RR */
1027 /* ... then go back and fix counters and names */ 1034 /* ... then go back and fix counters and names */
1028 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 1035 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1029 unsigned int i; 1036 unsigned int i;
1030 const struct ip6t_entry_match *m; 1037 const struct ip6t_entry_match *m;
1031 const struct ip6t_entry_target *t; 1038 const struct ip6t_entry_target *t;
1032 1039
1033 e = (struct ip6t_entry *)(loc_cpu_entry + off); 1040 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1034 if (copy_to_user(userptr + off 1041 if (copy_to_user(userptr + off
1035 + offsetof(struct ip6t_entry, counters), 1042 + offsetof(struct ip6t_entry, counters),
1036 &counters[num], 1043 &counters[num],
1037 sizeof(counters[num])) != 0) { 1044 sizeof(counters[num])) != 0) {
1038 ret = -EFAULT; 1045 ret = -EFAULT;
1039 goto free_counters; 1046 goto free_counters;
1040 } 1047 }
1041 1048
1042 for (i = sizeof(struct ip6t_entry); 1049 for (i = sizeof(struct ip6t_entry);
1043 i < e->target_offset; 1050 i < e->target_offset;
1044 i += m->u.match_size) { 1051 i += m->u.match_size) {
1045 m = (void *)e + i; 1052 m = (void *)e + i;
1046 1053
1047 if (copy_to_user(userptr + off + i 1054 if (copy_to_user(userptr + off + i
1048 + offsetof(struct ip6t_entry_match, 1055 + offsetof(struct ip6t_entry_match,
1049 u.user.name), 1056 u.user.name),
1050 m->u.kernel.match->name, 1057 m->u.kernel.match->name,
1051 strlen(m->u.kernel.match->name)+1) 1058 strlen(m->u.kernel.match->name)+1)
1052 != 0) { 1059 != 0) {
1053 ret = -EFAULT; 1060 ret = -EFAULT;
1054 goto free_counters; 1061 goto free_counters;
1055 } 1062 }
1056 } 1063 }
1057 1064
1058 t = ip6t_get_target(e); 1065 t = ip6t_get_target(e);
1059 if (copy_to_user(userptr + off + e->target_offset 1066 if (copy_to_user(userptr + off + e->target_offset
1060 + offsetof(struct ip6t_entry_target, 1067 + offsetof(struct ip6t_entry_target,
1061 u.user.name), 1068 u.user.name),
1062 t->u.kernel.target->name, 1069 t->u.kernel.target->name,
1063 strlen(t->u.kernel.target->name)+1) != 0) { 1070 strlen(t->u.kernel.target->name)+1) != 0) {
1064 ret = -EFAULT; 1071 ret = -EFAULT;
1065 goto free_counters; 1072 goto free_counters;
1066 } 1073 }
1067 } 1074 }
1068 1075
1069 free_counters: 1076 free_counters:
1070 vfree(counters); 1077 vfree(counters);
1071 return ret; 1078 return ret;
1072 } 1079 }
1073 1080
1074 #ifdef CONFIG_COMPAT 1081 #ifdef CONFIG_COMPAT
1075 static void compat_standard_from_user(void *dst, void *src) 1082 static void compat_standard_from_user(void *dst, void *src)
1076 { 1083 {
1077 int v = *(compat_int_t *)src; 1084 int v = *(compat_int_t *)src;
1078 1085
1079 if (v > 0) 1086 if (v > 0)
1080 v += xt_compat_calc_jump(AF_INET6, v); 1087 v += xt_compat_calc_jump(AF_INET6, v);
1081 memcpy(dst, &v, sizeof(v)); 1088 memcpy(dst, &v, sizeof(v));
1082 } 1089 }
1083 1090
1084 static int compat_standard_to_user(void __user *dst, void *src) 1091 static int compat_standard_to_user(void __user *dst, void *src)
1085 { 1092 {
1086 compat_int_t cv = *(int *)src; 1093 compat_int_t cv = *(int *)src;
1087 1094
1088 if (cv > 0) 1095 if (cv > 0)
1089 cv -= xt_compat_calc_jump(AF_INET6, cv); 1096 cv -= xt_compat_calc_jump(AF_INET6, cv);
1090 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1097 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1091 } 1098 }
1092 1099
1093 static inline int 1100 static inline int
1094 compat_calc_match(struct ip6t_entry_match *m, int *size) 1101 compat_calc_match(struct ip6t_entry_match *m, int *size)
1095 { 1102 {
1096 *size += xt_compat_match_offset(m->u.kernel.match); 1103 *size += xt_compat_match_offset(m->u.kernel.match);
1097 return 0; 1104 return 0;
1098 } 1105 }
1099 1106
1100 static int compat_calc_entry(struct ip6t_entry *e, 1107 static int compat_calc_entry(struct ip6t_entry *e,
1101 const struct xt_table_info *info, 1108 const struct xt_table_info *info,
1102 void *base, struct xt_table_info *newinfo) 1109 void *base, struct xt_table_info *newinfo)
1103 { 1110 {
1104 struct ip6t_entry_target *t; 1111 struct ip6t_entry_target *t;
1105 unsigned int entry_offset; 1112 unsigned int entry_offset;
1106 int off, i, ret; 1113 int off, i, ret;
1107 1114
1108 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1115 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1109 entry_offset = (void *)e - base; 1116 entry_offset = (void *)e - base;
1110 IP6T_MATCH_ITERATE(e, compat_calc_match, &off); 1117 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1111 t = ip6t_get_target(e); 1118 t = ip6t_get_target(e);
1112 off += xt_compat_target_offset(t->u.kernel.target); 1119 off += xt_compat_target_offset(t->u.kernel.target);
1113 newinfo->size -= off; 1120 newinfo->size -= off;
1114 ret = xt_compat_add_offset(AF_INET6, entry_offset, off); 1121 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1115 if (ret) 1122 if (ret)
1116 return ret; 1123 return ret;
1117 1124
1118 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1125 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1119 if (info->hook_entry[i] && 1126 if (info->hook_entry[i] &&
1120 (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) 1127 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1121 newinfo->hook_entry[i] -= off; 1128 newinfo->hook_entry[i] -= off;
1122 if (info->underflow[i] && 1129 if (info->underflow[i] &&
1123 (e < (struct ip6t_entry *)(base + info->underflow[i]))) 1130 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1124 newinfo->underflow[i] -= off; 1131 newinfo->underflow[i] -= off;
1125 } 1132 }
1126 return 0; 1133 return 0;
1127 } 1134 }
1128 1135
1129 static int compat_table_info(const struct xt_table_info *info, 1136 static int compat_table_info(const struct xt_table_info *info,
1130 struct xt_table_info *newinfo) 1137 struct xt_table_info *newinfo)
1131 { 1138 {
1132 void *loc_cpu_entry; 1139 void *loc_cpu_entry;
1133 1140
1134 if (!newinfo || !info) 1141 if (!newinfo || !info)
1135 return -EINVAL; 1142 return -EINVAL;
1136 1143
1137 /* we dont care about newinfo->entries[] */ 1144 /* we dont care about newinfo->entries[] */
1138 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1145 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1139 newinfo->initial_entries = 0; 1146 newinfo->initial_entries = 0;
1140 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1147 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1141 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size, 1148 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1142 compat_calc_entry, info, loc_cpu_entry, 1149 compat_calc_entry, info, loc_cpu_entry,
1143 newinfo); 1150 newinfo);
1144 } 1151 }
1145 #endif 1152 #endif
1146 1153
1147 static int get_info(struct net *net, void __user *user, int *len, int compat) 1154 static int get_info(struct net *net, void __user *user, int *len, int compat)
1148 { 1155 {
1149 char name[IP6T_TABLE_MAXNAMELEN]; 1156 char name[IP6T_TABLE_MAXNAMELEN];
1150 struct xt_table *t; 1157 struct xt_table *t;
1151 int ret; 1158 int ret;
1152 1159
1153 if (*len != sizeof(struct ip6t_getinfo)) { 1160 if (*len != sizeof(struct ip6t_getinfo)) {
1154 duprintf("length %u != %zu\n", *len, 1161 duprintf("length %u != %zu\n", *len,
1155 sizeof(struct ip6t_getinfo)); 1162 sizeof(struct ip6t_getinfo));
1156 return -EINVAL; 1163 return -EINVAL;
1157 } 1164 }
1158 1165
1159 if (copy_from_user(name, user, sizeof(name)) != 0) 1166 if (copy_from_user(name, user, sizeof(name)) != 0)
1160 return -EFAULT; 1167 return -EFAULT;
1161 1168
1162 name[IP6T_TABLE_MAXNAMELEN-1] = '\0'; 1169 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1163 #ifdef CONFIG_COMPAT 1170 #ifdef CONFIG_COMPAT
1164 if (compat) 1171 if (compat)
1165 xt_compat_lock(AF_INET6); 1172 xt_compat_lock(AF_INET6);
1166 #endif 1173 #endif
1167 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), 1174 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1168 "ip6table_%s", name); 1175 "ip6table_%s", name);
1169 if (t && !IS_ERR(t)) { 1176 if (t && !IS_ERR(t)) {
1170 struct ip6t_getinfo info; 1177 struct ip6t_getinfo info;
1171 const struct xt_table_info *private = t->private; 1178 const struct xt_table_info *private = t->private;
1172 #ifdef CONFIG_COMPAT 1179 #ifdef CONFIG_COMPAT
1173 struct xt_table_info tmp; 1180 struct xt_table_info tmp;
1174 1181
1175 if (compat) { 1182 if (compat) {
1176 ret = compat_table_info(private, &tmp); 1183 ret = compat_table_info(private, &tmp);
1177 xt_compat_flush_offsets(AF_INET6); 1184 xt_compat_flush_offsets(AF_INET6);
1178 private = &tmp; 1185 private = &tmp;
1179 } 1186 }
1180 #endif 1187 #endif
1181 info.valid_hooks = t->valid_hooks; 1188 info.valid_hooks = t->valid_hooks;
1182 memcpy(info.hook_entry, private->hook_entry, 1189 memcpy(info.hook_entry, private->hook_entry,
1183 sizeof(info.hook_entry)); 1190 sizeof(info.hook_entry));
1184 memcpy(info.underflow, private->underflow, 1191 memcpy(info.underflow, private->underflow,
1185 sizeof(info.underflow)); 1192 sizeof(info.underflow));
1186 info.num_entries = private->number; 1193 info.num_entries = private->number;
1187 info.size = private->size; 1194 info.size = private->size;
1188 strcpy(info.name, name); 1195 strcpy(info.name, name);
1189 1196
1190 if (copy_to_user(user, &info, *len) != 0) 1197 if (copy_to_user(user, &info, *len) != 0)
1191 ret = -EFAULT; 1198 ret = -EFAULT;
1192 else 1199 else
1193 ret = 0; 1200 ret = 0;
1194 1201
1195 xt_table_unlock(t); 1202 xt_table_unlock(t);
1196 module_put(t->me); 1203 module_put(t->me);
1197 } else 1204 } else
1198 ret = t ? PTR_ERR(t) : -ENOENT; 1205 ret = t ? PTR_ERR(t) : -ENOENT;
1199 #ifdef CONFIG_COMPAT 1206 #ifdef CONFIG_COMPAT
1200 if (compat) 1207 if (compat)
1201 xt_compat_unlock(AF_INET6); 1208 xt_compat_unlock(AF_INET6);
1202 #endif 1209 #endif
1203 return ret; 1210 return ret;
1204 } 1211 }
1205 1212
1206 static int 1213 static int
1207 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len) 1214 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1208 { 1215 {
1209 int ret; 1216 int ret;
1210 struct ip6t_get_entries get; 1217 struct ip6t_get_entries get;
1211 struct xt_table *t; 1218 struct xt_table *t;
1212 1219
1213 if (*len < sizeof(get)) { 1220 if (*len < sizeof(get)) {
1214 duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); 1221 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1215 return -EINVAL; 1222 return -EINVAL;
1216 } 1223 }
1217 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1224 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1218 return -EFAULT; 1225 return -EFAULT;
1219 if (*len != sizeof(struct ip6t_get_entries) + get.size) { 1226 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1220 duprintf("get_entries: %u != %zu\n", 1227 duprintf("get_entries: %u != %zu\n",
1221 *len, sizeof(get) + get.size); 1228 *len, sizeof(get) + get.size);
1222 return -EINVAL; 1229 return -EINVAL;
1223 } 1230 }
1224 1231
1225 t = xt_find_table_lock(net, AF_INET6, get.name); 1232 t = xt_find_table_lock(net, AF_INET6, get.name);
1226 if (t && !IS_ERR(t)) { 1233 if (t && !IS_ERR(t)) {
1227 struct xt_table_info *private = t->private; 1234 struct xt_table_info *private = t->private;
1228 duprintf("t->private->number = %u\n", private->number); 1235 duprintf("t->private->number = %u\n", private->number);
1229 if (get.size == private->size) 1236 if (get.size == private->size)
1230 ret = copy_entries_to_user(private->size, 1237 ret = copy_entries_to_user(private->size,
1231 t, uptr->entrytable); 1238 t, uptr->entrytable);
1232 else { 1239 else {
1233 duprintf("get_entries: I've got %u not %u!\n", 1240 duprintf("get_entries: I've got %u not %u!\n",
1234 private->size, get.size); 1241 private->size, get.size);
1235 ret = -EAGAIN; 1242 ret = -EAGAIN;
1236 } 1243 }
1237 module_put(t->me); 1244 module_put(t->me);
1238 xt_table_unlock(t); 1245 xt_table_unlock(t);
1239 } else 1246 } else
1240 ret = t ? PTR_ERR(t) : -ENOENT; 1247 ret = t ? PTR_ERR(t) : -ENOENT;
1241 1248
1242 return ret; 1249 return ret;
1243 } 1250 }
1244 1251
1245 static int 1252 static int
1246 __do_replace(struct net *net, const char *name, unsigned int valid_hooks, 1253 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1247 struct xt_table_info *newinfo, unsigned int num_counters, 1254 struct xt_table_info *newinfo, unsigned int num_counters,
1248 void __user *counters_ptr) 1255 void __user *counters_ptr)
1249 { 1256 {
1250 int ret; 1257 int ret;
1251 struct xt_table *t; 1258 struct xt_table *t;
1252 struct xt_table_info *oldinfo; 1259 struct xt_table_info *oldinfo;
1253 struct xt_counters *counters; 1260 struct xt_counters *counters;
1254 const void *loc_cpu_old_entry; 1261 const void *loc_cpu_old_entry;
1255 1262
1256 ret = 0; 1263 ret = 0;
1257 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1264 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1258 numa_node_id()); 1265 numa_node_id());
1259 if (!counters) { 1266 if (!counters) {
1260 ret = -ENOMEM; 1267 ret = -ENOMEM;
1261 goto out; 1268 goto out;
1262 } 1269 }
1263 1270
1264 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), 1271 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1265 "ip6table_%s", name); 1272 "ip6table_%s", name);
1266 if (!t || IS_ERR(t)) { 1273 if (!t || IS_ERR(t)) {
1267 ret = t ? PTR_ERR(t) : -ENOENT; 1274 ret = t ? PTR_ERR(t) : -ENOENT;
1268 goto free_newinfo_counters_untrans; 1275 goto free_newinfo_counters_untrans;
1269 } 1276 }
1270 1277
1271 /* You lied! */ 1278 /* You lied! */
1272 if (valid_hooks != t->valid_hooks) { 1279 if (valid_hooks != t->valid_hooks) {
1273 duprintf("Valid hook crap: %08X vs %08X\n", 1280 duprintf("Valid hook crap: %08X vs %08X\n",
1274 valid_hooks, t->valid_hooks); 1281 valid_hooks, t->valid_hooks);
1275 ret = -EINVAL; 1282 ret = -EINVAL;
1276 goto put_module; 1283 goto put_module;
1277 } 1284 }
1278 1285
1279 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); 1286 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1280 if (!oldinfo) 1287 if (!oldinfo)
1281 goto put_module; 1288 goto put_module;
1282 1289
1283 /* Update module usage count based on number of rules */ 1290 /* Update module usage count based on number of rules */
1284 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 1291 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1285 oldinfo->number, oldinfo->initial_entries, newinfo->number); 1292 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1286 if ((oldinfo->number > oldinfo->initial_entries) || 1293 if ((oldinfo->number > oldinfo->initial_entries) ||
1287 (newinfo->number <= oldinfo->initial_entries)) 1294 (newinfo->number <= oldinfo->initial_entries))
1288 module_put(t->me); 1295 module_put(t->me);
1289 if ((oldinfo->number > oldinfo->initial_entries) && 1296 if ((oldinfo->number > oldinfo->initial_entries) &&
1290 (newinfo->number <= oldinfo->initial_entries)) 1297 (newinfo->number <= oldinfo->initial_entries))
1291 module_put(t->me); 1298 module_put(t->me);
1292 1299
1293 /* Get the old counters, and synchronize with replace */ 1300 /* Get the old counters, and synchronize with replace */
1294 get_counters(oldinfo, counters); 1301 get_counters(oldinfo, counters);
1295 1302
1296 /* Decrease module usage counts and free resource */ 1303 /* Decrease module usage counts and free resource */
1297 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1304 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1298 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1305 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1299 net, NULL); 1306 net, NULL);
1300 xt_free_table_info(oldinfo); 1307 xt_free_table_info(oldinfo);
1301 if (copy_to_user(counters_ptr, counters, 1308 if (copy_to_user(counters_ptr, counters,
1302 sizeof(struct xt_counters) * num_counters) != 0) 1309 sizeof(struct xt_counters) * num_counters) != 0)
1303 ret = -EFAULT; 1310 ret = -EFAULT;
1304 vfree(counters); 1311 vfree(counters);
1305 xt_table_unlock(t); 1312 xt_table_unlock(t);
1306 return ret; 1313 return ret;
1307 1314
1308 put_module: 1315 put_module:
1309 module_put(t->me); 1316 module_put(t->me);
1310 xt_table_unlock(t); 1317 xt_table_unlock(t);
1311 free_newinfo_counters_untrans: 1318 free_newinfo_counters_untrans:
1312 vfree(counters); 1319 vfree(counters);
1313 out: 1320 out:
1314 return ret; 1321 return ret;
1315 } 1322 }
1316 1323
1317 static int 1324 static int
1318 do_replace(struct net *net, void __user *user, unsigned int len) 1325 do_replace(struct net *net, void __user *user, unsigned int len)
1319 { 1326 {
1320 int ret; 1327 int ret;
1321 struct ip6t_replace tmp; 1328 struct ip6t_replace tmp;
1322 struct xt_table_info *newinfo; 1329 struct xt_table_info *newinfo;
1323 void *loc_cpu_entry; 1330 void *loc_cpu_entry;
1324 1331
1325 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1332 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1326 return -EFAULT; 1333 return -EFAULT;
1327 1334
1328 /* overflow check */ 1335 /* overflow check */
1329 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1336 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1330 return -ENOMEM; 1337 return -ENOMEM;
1331 1338
1332 newinfo = xt_alloc_table_info(tmp.size); 1339 newinfo = xt_alloc_table_info(tmp.size);
1333 if (!newinfo) 1340 if (!newinfo)
1334 return -ENOMEM; 1341 return -ENOMEM;
1335 1342
1336 /* choose the copy that is on our node/cpu */ 1343 /* choose the copy that is on our node/cpu */
1337 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1344 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1338 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), 1345 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1339 tmp.size) != 0) { 1346 tmp.size) != 0) {
1340 ret = -EFAULT; 1347 ret = -EFAULT;
1341 goto free_newinfo; 1348 goto free_newinfo;
1342 } 1349 }
1343 1350
1344 ret = translate_table(net, tmp.name, tmp.valid_hooks, 1351 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1345 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, 1352 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1346 tmp.hook_entry, tmp.underflow); 1353 tmp.hook_entry, tmp.underflow);
1347 if (ret != 0) 1354 if (ret != 0)
1348 goto free_newinfo; 1355 goto free_newinfo;
1349 1356
1350 duprintf("ip_tables: Translated table\n"); 1357 duprintf("ip_tables: Translated table\n");
1351 1358
1352 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1359 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1353 tmp.num_counters, tmp.counters); 1360 tmp.num_counters, tmp.counters);
1354 if (ret) 1361 if (ret)
1355 goto free_newinfo_untrans; 1362 goto free_newinfo_untrans;
1356 return 0; 1363 return 0;
1357 1364
1358 free_newinfo_untrans: 1365 free_newinfo_untrans:
1359 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL); 1366 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1360 free_newinfo: 1367 free_newinfo:
1361 xt_free_table_info(newinfo); 1368 xt_free_table_info(newinfo);
1362 return ret; 1369 return ret;
1363 } 1370 }
1364 1371
1365 /* We're lazy, and add to the first CPU; overflow works its fey magic 1372 /* We're lazy, and add to the first CPU; overflow works its fey magic
1366 * and everything is OK. */ 1373 * and everything is OK. */
1367 static int 1374 static int
1368 add_counter_to_entry(struct ip6t_entry *e, 1375 add_counter_to_entry(struct ip6t_entry *e,
1369 const struct xt_counters addme[], 1376 const struct xt_counters addme[],
1370 unsigned int *i) 1377 unsigned int *i)
1371 { 1378 {
1372 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 1379 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1373 1380
1374 (*i)++; 1381 (*i)++;
1375 return 0; 1382 return 0;
1376 } 1383 }
1377 1384
1378 static int 1385 static int
1379 do_add_counters(struct net *net, void __user *user, unsigned int len, 1386 do_add_counters(struct net *net, void __user *user, unsigned int len,
1380 int compat) 1387 int compat)
1381 { 1388 {
1382 unsigned int i, curcpu; 1389 unsigned int i, curcpu;
1383 struct xt_counters_info tmp; 1390 struct xt_counters_info tmp;
1384 struct xt_counters *paddc; 1391 struct xt_counters *paddc;
1385 unsigned int num_counters; 1392 unsigned int num_counters;
1386 char *name; 1393 char *name;
1387 int size; 1394 int size;
1388 void *ptmp; 1395 void *ptmp;
1389 struct xt_table *t; 1396 struct xt_table *t;
1390 const struct xt_table_info *private; 1397 const struct xt_table_info *private;
1391 int ret = 0; 1398 int ret = 0;
1392 const void *loc_cpu_entry; 1399 const void *loc_cpu_entry;
1393 #ifdef CONFIG_COMPAT 1400 #ifdef CONFIG_COMPAT
1394 struct compat_xt_counters_info compat_tmp; 1401 struct compat_xt_counters_info compat_tmp;
1395 1402
1396 if (compat) { 1403 if (compat) {
1397 ptmp = &compat_tmp; 1404 ptmp = &compat_tmp;
1398 size = sizeof(struct compat_xt_counters_info); 1405 size = sizeof(struct compat_xt_counters_info);
1399 } else 1406 } else
1400 #endif 1407 #endif
1401 { 1408 {
1402 ptmp = &tmp; 1409 ptmp = &tmp;
1403 size = sizeof(struct xt_counters_info); 1410 size = sizeof(struct xt_counters_info);
1404 } 1411 }
1405 1412
1406 if (copy_from_user(ptmp, user, size) != 0) 1413 if (copy_from_user(ptmp, user, size) != 0)
1407 return -EFAULT; 1414 return -EFAULT;
1408 1415
1409 #ifdef CONFIG_COMPAT 1416 #ifdef CONFIG_COMPAT
1410 if (compat) { 1417 if (compat) {
1411 num_counters = compat_tmp.num_counters; 1418 num_counters = compat_tmp.num_counters;
1412 name = compat_tmp.name; 1419 name = compat_tmp.name;
1413 } else 1420 } else
1414 #endif 1421 #endif
1415 { 1422 {
1416 num_counters = tmp.num_counters; 1423 num_counters = tmp.num_counters;
1417 name = tmp.name; 1424 name = tmp.name;
1418 } 1425 }
1419 1426
1420 if (len != size + num_counters * sizeof(struct xt_counters)) 1427 if (len != size + num_counters * sizeof(struct xt_counters))
1421 return -EINVAL; 1428 return -EINVAL;
1422 1429
1423 paddc = vmalloc_node(len - size, numa_node_id()); 1430 paddc = vmalloc_node(len - size, numa_node_id());
1424 if (!paddc) 1431 if (!paddc)
1425 return -ENOMEM; 1432 return -ENOMEM;
1426 1433
1427 if (copy_from_user(paddc, user + size, len - size) != 0) { 1434 if (copy_from_user(paddc, user + size, len - size) != 0) {
1428 ret = -EFAULT; 1435 ret = -EFAULT;
1429 goto free; 1436 goto free;
1430 } 1437 }
1431 1438
1432 t = xt_find_table_lock(net, AF_INET6, name); 1439 t = xt_find_table_lock(net, AF_INET6, name);
1433 if (!t || IS_ERR(t)) { 1440 if (!t || IS_ERR(t)) {
1434 ret = t ? PTR_ERR(t) : -ENOENT; 1441 ret = t ? PTR_ERR(t) : -ENOENT;
1435 goto free; 1442 goto free;
1436 } 1443 }
1437 1444
1438 1445
1439 local_bh_disable(); 1446 local_bh_disable();
1440 private = t->private; 1447 private = t->private;
1441 if (private->number != num_counters) { 1448 if (private->number != num_counters) {
1442 ret = -EINVAL; 1449 ret = -EINVAL;
1443 goto unlock_up_free; 1450 goto unlock_up_free;
1444 } 1451 }
1445 1452
1446 i = 0; 1453 i = 0;
1447 /* Choose the copy that is on our node */ 1454 /* Choose the copy that is on our node */
1448 curcpu = smp_processor_id(); 1455 curcpu = smp_processor_id();
1449 xt_info_wrlock(curcpu); 1456 xt_info_wrlock(curcpu);
1450 loc_cpu_entry = private->entries[curcpu]; 1457 loc_cpu_entry = private->entries[curcpu];
1451 IP6T_ENTRY_ITERATE(loc_cpu_entry, 1458 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1452 private->size, 1459 private->size,
1453 add_counter_to_entry, 1460 add_counter_to_entry,
1454 paddc, 1461 paddc,
1455 &i); 1462 &i);
1456 xt_info_wrunlock(curcpu); 1463 xt_info_wrunlock(curcpu);
1457 1464
1458 unlock_up_free: 1465 unlock_up_free:
1459 local_bh_enable(); 1466 local_bh_enable();
1460 xt_table_unlock(t); 1467 xt_table_unlock(t);
1461 module_put(t->me); 1468 module_put(t->me);
1462 free: 1469 free:
1463 vfree(paddc); 1470 vfree(paddc);
1464 1471
1465 return ret; 1472 return ret;
1466 } 1473 }
1467 1474
1468 #ifdef CONFIG_COMPAT 1475 #ifdef CONFIG_COMPAT
1469 struct compat_ip6t_replace { 1476 struct compat_ip6t_replace {
1470 char name[IP6T_TABLE_MAXNAMELEN]; 1477 char name[IP6T_TABLE_MAXNAMELEN];
1471 u32 valid_hooks; 1478 u32 valid_hooks;
1472 u32 num_entries; 1479 u32 num_entries;
1473 u32 size; 1480 u32 size;
1474 u32 hook_entry[NF_INET_NUMHOOKS]; 1481 u32 hook_entry[NF_INET_NUMHOOKS];
1475 u32 underflow[NF_INET_NUMHOOKS]; 1482 u32 underflow[NF_INET_NUMHOOKS];
1476 u32 num_counters; 1483 u32 num_counters;
1477 compat_uptr_t counters; /* struct ip6t_counters * */ 1484 compat_uptr_t counters; /* struct ip6t_counters * */
1478 struct compat_ip6t_entry entries[0]; 1485 struct compat_ip6t_entry entries[0];
1479 }; 1486 };
1480 1487
1481 static int 1488 static int
1482 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, 1489 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1483 unsigned int *size, struct xt_counters *counters, 1490 unsigned int *size, struct xt_counters *counters,
1484 unsigned int *i) 1491 unsigned int *i)
1485 { 1492 {
1486 struct ip6t_entry_target *t; 1493 struct ip6t_entry_target *t;
1487 struct compat_ip6t_entry __user *ce; 1494 struct compat_ip6t_entry __user *ce;
1488 u_int16_t target_offset, next_offset; 1495 u_int16_t target_offset, next_offset;
1489 compat_uint_t origsize; 1496 compat_uint_t origsize;
1490 int ret; 1497 int ret;
1491 1498
1492 ret = -EFAULT; 1499 ret = -EFAULT;
1493 origsize = *size; 1500 origsize = *size;
1494 ce = (struct compat_ip6t_entry __user *)*dstptr; 1501 ce = (struct compat_ip6t_entry __user *)*dstptr;
1495 if (copy_to_user(ce, e, sizeof(struct ip6t_entry))) 1502 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1496 goto out; 1503 goto out;
1497 1504
1498 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1505 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1499 goto out; 1506 goto out;
1500 1507
1501 *dstptr += sizeof(struct compat_ip6t_entry); 1508 *dstptr += sizeof(struct compat_ip6t_entry);
1502 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1509 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1503 1510
1504 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size); 1511 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1505 target_offset = e->target_offset - (origsize - *size); 1512 target_offset = e->target_offset - (origsize - *size);
1506 if (ret) 1513 if (ret)
1507 goto out; 1514 goto out;
1508 t = ip6t_get_target(e); 1515 t = ip6t_get_target(e);
1509 ret = xt_compat_target_to_user(t, dstptr, size); 1516 ret = xt_compat_target_to_user(t, dstptr, size);
1510 if (ret) 1517 if (ret)
1511 goto out; 1518 goto out;
1512 ret = -EFAULT; 1519 ret = -EFAULT;
1513 next_offset = e->next_offset - (origsize - *size); 1520 next_offset = e->next_offset - (origsize - *size);
1514 if (put_user(target_offset, &ce->target_offset)) 1521 if (put_user(target_offset, &ce->target_offset))
1515 goto out; 1522 goto out;
1516 if (put_user(next_offset, &ce->next_offset)) 1523 if (put_user(next_offset, &ce->next_offset))
1517 goto out; 1524 goto out;
1518 1525
1519 (*i)++; 1526 (*i)++;
1520 return 0; 1527 return 0;
1521 out: 1528 out:
1522 return ret; 1529 return ret;
1523 } 1530 }
1524 1531
1525 static int 1532 static int
1526 compat_find_calc_match(struct ip6t_entry_match *m, 1533 compat_find_calc_match(struct ip6t_entry_match *m,
1527 const char *name, 1534 const char *name,
1528 const struct ip6t_ip6 *ipv6, 1535 const struct ip6t_ip6 *ipv6,
1529 unsigned int hookmask, 1536 unsigned int hookmask,
1530 int *size, unsigned int *i) 1537 int *size, unsigned int *i)
1531 { 1538 {
1532 struct xt_match *match; 1539 struct xt_match *match;
1533 1540
1534 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name, 1541 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1535 m->u.user.revision), 1542 m->u.user.revision),
1536 "ip6t_%s", m->u.user.name); 1543 "ip6t_%s", m->u.user.name);
1537 if (IS_ERR(match) || !match) { 1544 if (IS_ERR(match) || !match) {
1538 duprintf("compat_check_calc_match: `%s' not found\n", 1545 duprintf("compat_check_calc_match: `%s' not found\n",
1539 m->u.user.name); 1546 m->u.user.name);
1540 return match ? PTR_ERR(match) : -ENOENT; 1547 return match ? PTR_ERR(match) : -ENOENT;
1541 } 1548 }
1542 m->u.kernel.match = match; 1549 m->u.kernel.match = match;
1543 *size += xt_compat_match_offset(match); 1550 *size += xt_compat_match_offset(match);
1544 1551
1545 (*i)++; 1552 (*i)++;
1546 return 0; 1553 return 0;
1547 } 1554 }
1548 1555
1549 static int 1556 static int
1550 compat_release_match(struct ip6t_entry_match *m, unsigned int *i) 1557 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1551 { 1558 {
1552 if (i && (*i)-- == 0) 1559 if (i && (*i)-- == 0)
1553 return 1; 1560 return 1;
1554 1561
1555 module_put(m->u.kernel.match->me); 1562 module_put(m->u.kernel.match->me);
1556 return 0; 1563 return 0;
1557 } 1564 }
1558 1565
1559 static int 1566 static int
1560 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i) 1567 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1561 { 1568 {
1562 struct ip6t_entry_target *t; 1569 struct ip6t_entry_target *t;
1563 1570
1564 if (i && (*i)-- == 0) 1571 if (i && (*i)-- == 0)
1565 return 1; 1572 return 1;
1566 1573
1567 /* Cleanup all matches */ 1574 /* Cleanup all matches */
1568 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL); 1575 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1569 t = compat_ip6t_get_target(e); 1576 t = compat_ip6t_get_target(e);
1570 module_put(t->u.kernel.target->me); 1577 module_put(t->u.kernel.target->me);
1571 return 0; 1578 return 0;
1572 } 1579 }
1573 1580
1574 static int 1581 static int
1575 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, 1582 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1576 struct xt_table_info *newinfo, 1583 struct xt_table_info *newinfo,
1577 unsigned int *size, 1584 unsigned int *size,
1578 unsigned char *base, 1585 unsigned char *base,
1579 unsigned char *limit, 1586 unsigned char *limit,
1580 unsigned int *hook_entries, 1587 unsigned int *hook_entries,
1581 unsigned int *underflows, 1588 unsigned int *underflows,
1582 unsigned int *i, 1589 unsigned int *i,
1583 const char *name) 1590 const char *name)
1584 { 1591 {
1585 struct ip6t_entry_target *t; 1592 struct ip6t_entry_target *t;
1586 struct xt_target *target; 1593 struct xt_target *target;
1587 unsigned int entry_offset; 1594 unsigned int entry_offset;
1588 unsigned int j; 1595 unsigned int j;
1589 int ret, off, h; 1596 int ret, off, h;
1590 1597
1591 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1598 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1592 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || 1599 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1593 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { 1600 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1594 duprintf("Bad offset %p, limit = %p\n", e, limit); 1601 duprintf("Bad offset %p, limit = %p\n", e, limit);
1595 return -EINVAL; 1602 return -EINVAL;
1596 } 1603 }
1597 1604
1598 if (e->next_offset < sizeof(struct compat_ip6t_entry) + 1605 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1599 sizeof(struct compat_xt_entry_target)) { 1606 sizeof(struct compat_xt_entry_target)) {
1600 duprintf("checking: element %p size %u\n", 1607 duprintf("checking: element %p size %u\n",
1601 e, e->next_offset); 1608 e, e->next_offset);
1602 return -EINVAL; 1609 return -EINVAL;
1603 } 1610 }
1604 1611
1605 /* For purposes of check_entry casting the compat entry is fine */ 1612 /* For purposes of check_entry casting the compat entry is fine */
1606 ret = check_entry((struct ip6t_entry *)e, name); 1613 ret = check_entry((struct ip6t_entry *)e, name);
1607 if (ret) 1614 if (ret)
1608 return ret; 1615 return ret;
1609 1616
1610 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1617 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1611 entry_offset = (void *)e - (void *)base; 1618 entry_offset = (void *)e - (void *)base;
1612 j = 0; 1619 j = 0;
1613 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name, 1620 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1614 &e->ipv6, e->comefrom, &off, &j); 1621 &e->ipv6, e->comefrom, &off, &j);
1615 if (ret != 0) 1622 if (ret != 0)
1616 goto release_matches; 1623 goto release_matches;
1617 1624
1618 t = compat_ip6t_get_target(e); 1625 t = compat_ip6t_get_target(e);
1619 target = try_then_request_module(xt_find_target(AF_INET6, 1626 target = try_then_request_module(xt_find_target(AF_INET6,
1620 t->u.user.name, 1627 t->u.user.name,
1621 t->u.user.revision), 1628 t->u.user.revision),
1622 "ip6t_%s", t->u.user.name); 1629 "ip6t_%s", t->u.user.name);
1623 if (IS_ERR(target) || !target) { 1630 if (IS_ERR(target) || !target) {
1624 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", 1631 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1625 t->u.user.name); 1632 t->u.user.name);
1626 ret = target ? PTR_ERR(target) : -ENOENT; 1633 ret = target ? PTR_ERR(target) : -ENOENT;
1627 goto release_matches; 1634 goto release_matches;
1628 } 1635 }
1629 t->u.kernel.target = target; 1636 t->u.kernel.target = target;
1630 1637
1631 off += xt_compat_target_offset(target); 1638 off += xt_compat_target_offset(target);
1632 *size += off; 1639 *size += off;
1633 ret = xt_compat_add_offset(AF_INET6, entry_offset, off); 1640 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1634 if (ret) 1641 if (ret)
1635 goto out; 1642 goto out;
1636 1643
1637 /* Check hooks & underflows */ 1644 /* Check hooks & underflows */
1638 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 1645 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1639 if ((unsigned char *)e - base == hook_entries[h]) 1646 if ((unsigned char *)e - base == hook_entries[h])
1640 newinfo->hook_entry[h] = hook_entries[h]; 1647 newinfo->hook_entry[h] = hook_entries[h];
1641 if ((unsigned char *)e - base == underflows[h]) 1648 if ((unsigned char *)e - base == underflows[h])
1642 newinfo->underflow[h] = underflows[h]; 1649 newinfo->underflow[h] = underflows[h];
1643 } 1650 }
1644 1651
1645 /* Clear counters and comefrom */ 1652 /* Clear counters and comefrom */
1646 memset(&e->counters, 0, sizeof(e->counters)); 1653 memset(&e->counters, 0, sizeof(e->counters));
1647 e->comefrom = 0; 1654 e->comefrom = 0;
1648 1655
1649 (*i)++; 1656 (*i)++;
1650 return 0; 1657 return 0;
1651 1658
1652 out: 1659 out:
1653 module_put(t->u.kernel.target->me); 1660 module_put(t->u.kernel.target->me);
1654 release_matches: 1661 release_matches:
1655 IP6T_MATCH_ITERATE(e, compat_release_match, &j); 1662 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1656 return ret; 1663 return ret;
1657 } 1664 }
1658 1665
1659 static int 1666 static int
1660 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, 1667 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1661 unsigned int *size, const char *name, 1668 unsigned int *size, const char *name,
1662 struct xt_table_info *newinfo, unsigned char *base) 1669 struct xt_table_info *newinfo, unsigned char *base)
1663 { 1670 {
1664 struct ip6t_entry_target *t; 1671 struct ip6t_entry_target *t;
1665 struct xt_target *target; 1672 struct xt_target *target;
1666 struct ip6t_entry *de; 1673 struct ip6t_entry *de;
1667 unsigned int origsize; 1674 unsigned int origsize;
1668 int ret, h; 1675 int ret, h;
1669 1676
1670 ret = 0; 1677 ret = 0;
1671 origsize = *size; 1678 origsize = *size;
1672 de = (struct ip6t_entry *)*dstptr; 1679 de = (struct ip6t_entry *)*dstptr;
1673 memcpy(de, e, sizeof(struct ip6t_entry)); 1680 memcpy(de, e, sizeof(struct ip6t_entry));
1674 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1681 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1675 1682
1676 *dstptr += sizeof(struct ip6t_entry); 1683 *dstptr += sizeof(struct ip6t_entry);
1677 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1684 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1678 1685
1679 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user, 1686 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1680 dstptr, size); 1687 dstptr, size);
1681 if (ret) 1688 if (ret)
1682 return ret; 1689 return ret;
1683 de->target_offset = e->target_offset - (origsize - *size); 1690 de->target_offset = e->target_offset - (origsize - *size);
1684 t = compat_ip6t_get_target(e); 1691 t = compat_ip6t_get_target(e);
1685 target = t->u.kernel.target; 1692 target = t->u.kernel.target;
1686 xt_compat_target_from_user(t, dstptr, size); 1693 xt_compat_target_from_user(t, dstptr, size);
1687 1694
1688 de->next_offset = e->next_offset - (origsize - *size); 1695 de->next_offset = e->next_offset - (origsize - *size);
1689 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 1696 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1690 if ((unsigned char *)de - base < newinfo->hook_entry[h]) 1697 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1691 newinfo->hook_entry[h] -= origsize - *size; 1698 newinfo->hook_entry[h] -= origsize - *size;
1692 if ((unsigned char *)de - base < newinfo->underflow[h]) 1699 if ((unsigned char *)de - base < newinfo->underflow[h])
1693 newinfo->underflow[h] -= origsize - *size; 1700 newinfo->underflow[h] -= origsize - *size;
1694 } 1701 }
1695 return ret; 1702 return ret;
1696 } 1703 }
1697 1704
1698 static int compat_check_entry(struct ip6t_entry *e, struct net *net, 1705 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1699 const char *name, unsigned int *i) 1706 const char *name, unsigned int *i)
1700 { 1707 {
1701 unsigned int j; 1708 unsigned int j;
1702 int ret; 1709 int ret;
1703 struct xt_mtchk_param mtpar; 1710 struct xt_mtchk_param mtpar;
1704 1711
1705 j = 0; 1712 j = 0;
1706 mtpar.net = net; 1713 mtpar.net = net;
1707 mtpar.table = name; 1714 mtpar.table = name;
1708 mtpar.entryinfo = &e->ipv6; 1715 mtpar.entryinfo = &e->ipv6;
1709 mtpar.hook_mask = e->comefrom; 1716 mtpar.hook_mask = e->comefrom;
1710 mtpar.family = NFPROTO_IPV6; 1717 mtpar.family = NFPROTO_IPV6;
1711 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j); 1718 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1712 if (ret) 1719 if (ret)
1713 goto cleanup_matches; 1720 goto cleanup_matches;
1714 1721
1715 ret = check_target(e, net, name); 1722 ret = check_target(e, net, name);
1716 if (ret) 1723 if (ret)
1717 goto cleanup_matches; 1724 goto cleanup_matches;
1718 1725
1719 (*i)++; 1726 (*i)++;
1720 return 0; 1727 return 0;
1721 1728
1722 cleanup_matches: 1729 cleanup_matches:
1723 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j); 1730 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
1724 return ret; 1731 return ret;
1725 } 1732 }
1726 1733
1727 static int 1734 static int
1728 translate_compat_table(struct net *net, 1735 translate_compat_table(struct net *net,
1729 const char *name, 1736 const char *name,
1730 unsigned int valid_hooks, 1737 unsigned int valid_hooks,
1731 struct xt_table_info **pinfo, 1738 struct xt_table_info **pinfo,
1732 void **pentry0, 1739 void **pentry0,
1733 unsigned int total_size, 1740 unsigned int total_size,
1734 unsigned int number, 1741 unsigned int number,
1735 unsigned int *hook_entries, 1742 unsigned int *hook_entries,
1736 unsigned int *underflows) 1743 unsigned int *underflows)
1737 { 1744 {
1738 unsigned int i, j; 1745 unsigned int i, j;
1739 struct xt_table_info *newinfo, *info; 1746 struct xt_table_info *newinfo, *info;
1740 void *pos, *entry0, *entry1; 1747 void *pos, *entry0, *entry1;
1741 unsigned int size; 1748 unsigned int size;
1742 int ret; 1749 int ret;
1743 1750
1744 info = *pinfo; 1751 info = *pinfo;
1745 entry0 = *pentry0; 1752 entry0 = *pentry0;
1746 size = total_size; 1753 size = total_size;
1747 info->number = number; 1754 info->number = number;
1748 1755
1749 /* Init all hooks to impossible value. */ 1756 /* Init all hooks to impossible value. */
1750 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1757 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1751 info->hook_entry[i] = 0xFFFFFFFF; 1758 info->hook_entry[i] = 0xFFFFFFFF;
1752 info->underflow[i] = 0xFFFFFFFF; 1759 info->underflow[i] = 0xFFFFFFFF;
1753 } 1760 }
1754 1761
1755 duprintf("translate_compat_table: size %u\n", info->size); 1762 duprintf("translate_compat_table: size %u\n", info->size);
1756 j = 0; 1763 j = 0;
1757 xt_compat_lock(AF_INET6); 1764 xt_compat_lock(AF_INET6);
1758 /* Walk through entries, checking offsets. */ 1765 /* Walk through entries, checking offsets. */
1759 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, 1766 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1760 check_compat_entry_size_and_hooks, 1767 check_compat_entry_size_and_hooks,
1761 info, &size, entry0, 1768 info, &size, entry0,
1762 entry0 + total_size, 1769 entry0 + total_size,
1763 hook_entries, underflows, &j, name); 1770 hook_entries, underflows, &j, name);
1764 if (ret != 0) 1771 if (ret != 0)
1765 goto out_unlock; 1772 goto out_unlock;
1766 1773
1767 ret = -EINVAL; 1774 ret = -EINVAL;
1768 if (j != number) { 1775 if (j != number) {
1769 duprintf("translate_compat_table: %u not %u entries\n", 1776 duprintf("translate_compat_table: %u not %u entries\n",
1770 j, number); 1777 j, number);
1771 goto out_unlock; 1778 goto out_unlock;
1772 } 1779 }
1773 1780
1774 /* Check hooks all assigned */ 1781 /* Check hooks all assigned */
1775 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1782 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1776 /* Only hooks which are valid */ 1783 /* Only hooks which are valid */
1777 if (!(valid_hooks & (1 << i))) 1784 if (!(valid_hooks & (1 << i)))
1778 continue; 1785 continue;
1779 if (info->hook_entry[i] == 0xFFFFFFFF) { 1786 if (info->hook_entry[i] == 0xFFFFFFFF) {
1780 duprintf("Invalid hook entry %u %u\n", 1787 duprintf("Invalid hook entry %u %u\n",
1781 i, hook_entries[i]); 1788 i, hook_entries[i]);
1782 goto out_unlock; 1789 goto out_unlock;
1783 } 1790 }
1784 if (info->underflow[i] == 0xFFFFFFFF) { 1791 if (info->underflow[i] == 0xFFFFFFFF) {
1785 duprintf("Invalid underflow %u %u\n", 1792 duprintf("Invalid underflow %u %u\n",
1786 i, underflows[i]); 1793 i, underflows[i]);
1787 goto out_unlock; 1794 goto out_unlock;
1788 } 1795 }
1789 } 1796 }
1790 1797
1791 ret = -ENOMEM; 1798 ret = -ENOMEM;
1792 newinfo = xt_alloc_table_info(size); 1799 newinfo = xt_alloc_table_info(size);
1793 if (!newinfo) 1800 if (!newinfo)
1794 goto out_unlock; 1801 goto out_unlock;
1795 1802
1796 newinfo->number = number; 1803 newinfo->number = number;
1797 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1804 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1798 newinfo->hook_entry[i] = info->hook_entry[i]; 1805 newinfo->hook_entry[i] = info->hook_entry[i];
1799 newinfo->underflow[i] = info->underflow[i]; 1806 newinfo->underflow[i] = info->underflow[i];
1800 } 1807 }
1801 entry1 = newinfo->entries[raw_smp_processor_id()]; 1808 entry1 = newinfo->entries[raw_smp_processor_id()];
1802 pos = entry1; 1809 pos = entry1;
1803 size = total_size; 1810 size = total_size;
1804 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, 1811 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1805 compat_copy_entry_from_user, 1812 compat_copy_entry_from_user,
1806 &pos, &size, name, newinfo, entry1); 1813 &pos, &size, name, newinfo, entry1);
1807 xt_compat_flush_offsets(AF_INET6); 1814 xt_compat_flush_offsets(AF_INET6);
1808 xt_compat_unlock(AF_INET6); 1815 xt_compat_unlock(AF_INET6);
1809 if (ret) 1816 if (ret)
1810 goto free_newinfo; 1817 goto free_newinfo;
1811 1818
1812 ret = -ELOOP; 1819 ret = -ELOOP;
1813 if (!mark_source_chains(newinfo, valid_hooks, entry1)) 1820 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1814 goto free_newinfo; 1821 goto free_newinfo;
1815 1822
1816 i = 0; 1823 i = 0;
1817 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1824 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1818 net, name, &i); 1825 net, name, &i);
1819 if (ret) { 1826 if (ret) {
1820 j -= i; 1827 j -= i;
1821 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1828 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1822 compat_release_entry, &j); 1829 compat_release_entry, &j);
1823 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i); 1830 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i);
1824 xt_free_table_info(newinfo); 1831 xt_free_table_info(newinfo);
1825 return ret; 1832 return ret;
1826 } 1833 }
1827 1834
1828 /* And one copy for every other CPU */ 1835 /* And one copy for every other CPU */
1829 for_each_possible_cpu(i) 1836 for_each_possible_cpu(i)
1830 if (newinfo->entries[i] && newinfo->entries[i] != entry1) 1837 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1831 memcpy(newinfo->entries[i], entry1, newinfo->size); 1838 memcpy(newinfo->entries[i], entry1, newinfo->size);
1832 1839
1833 *pinfo = newinfo; 1840 *pinfo = newinfo;
1834 *pentry0 = entry1; 1841 *pentry0 = entry1;
1835 xt_free_table_info(info); 1842 xt_free_table_info(info);
1836 return 0; 1843 return 0;
1837 1844
1838 free_newinfo: 1845 free_newinfo:
1839 xt_free_table_info(newinfo); 1846 xt_free_table_info(newinfo);
1840 out: 1847 out:
1841 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1848 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1842 return ret; 1849 return ret;
1843 out_unlock: 1850 out_unlock:
1844 xt_compat_flush_offsets(AF_INET6); 1851 xt_compat_flush_offsets(AF_INET6);
1845 xt_compat_unlock(AF_INET6); 1852 xt_compat_unlock(AF_INET6);
1846 goto out; 1853 goto out;
1847 } 1854 }
1848 1855
1849 static int 1856 static int
1850 compat_do_replace(struct net *net, void __user *user, unsigned int len) 1857 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1851 { 1858 {
1852 int ret; 1859 int ret;
1853 struct compat_ip6t_replace tmp; 1860 struct compat_ip6t_replace tmp;
1854 struct xt_table_info *newinfo; 1861 struct xt_table_info *newinfo;
1855 void *loc_cpu_entry; 1862 void *loc_cpu_entry;
1856 1863
1857 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1864 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1858 return -EFAULT; 1865 return -EFAULT;
1859 1866
1860 /* overflow check */ 1867 /* overflow check */
1861 if (tmp.size >= INT_MAX / num_possible_cpus()) 1868 if (tmp.size >= INT_MAX / num_possible_cpus())
1862 return -ENOMEM; 1869 return -ENOMEM;
1863 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1870 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1864 return -ENOMEM; 1871 return -ENOMEM;
1865 1872
1866 newinfo = xt_alloc_table_info(tmp.size); 1873 newinfo = xt_alloc_table_info(tmp.size);
1867 if (!newinfo) 1874 if (!newinfo)
1868 return -ENOMEM; 1875 return -ENOMEM;
1869 1876
1870 /* choose the copy that is on our node/cpu */ 1877 /* choose the copy that is on our node/cpu */
1871 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1878 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1872 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), 1879 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1873 tmp.size) != 0) { 1880 tmp.size) != 0) {
1874 ret = -EFAULT; 1881 ret = -EFAULT;
1875 goto free_newinfo; 1882 goto free_newinfo;
1876 } 1883 }
1877 1884
1878 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, 1885 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1879 &newinfo, &loc_cpu_entry, tmp.size, 1886 &newinfo, &loc_cpu_entry, tmp.size,
1880 tmp.num_entries, tmp.hook_entry, 1887 tmp.num_entries, tmp.hook_entry,
1881 tmp.underflow); 1888 tmp.underflow);
1882 if (ret != 0) 1889 if (ret != 0)
1883 goto free_newinfo; 1890 goto free_newinfo;
1884 1891
1885 duprintf("compat_do_replace: Translated table\n"); 1892 duprintf("compat_do_replace: Translated table\n");
1886 1893
1887 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1894 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1888 tmp.num_counters, compat_ptr(tmp.counters)); 1895 tmp.num_counters, compat_ptr(tmp.counters));
1889 if (ret) 1896 if (ret)
1890 goto free_newinfo_untrans; 1897 goto free_newinfo_untrans;
1891 return 0; 1898 return 0;
1892 1899
1893 free_newinfo_untrans: 1900 free_newinfo_untrans:
1894 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL); 1901 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1895 free_newinfo: 1902 free_newinfo:
1896 xt_free_table_info(newinfo); 1903 xt_free_table_info(newinfo);
1897 return ret; 1904 return ret;
1898 } 1905 }
1899 1906
1900 static int 1907 static int
1901 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, 1908 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1902 unsigned int len) 1909 unsigned int len)
1903 { 1910 {
1904 int ret; 1911 int ret;
1905 1912
1906 if (!capable(CAP_NET_ADMIN)) 1913 if (!capable(CAP_NET_ADMIN))
1907 return -EPERM; 1914 return -EPERM;
1908 1915
1909 switch (cmd) { 1916 switch (cmd) {
1910 case IP6T_SO_SET_REPLACE: 1917 case IP6T_SO_SET_REPLACE:
1911 ret = compat_do_replace(sock_net(sk), user, len); 1918 ret = compat_do_replace(sock_net(sk), user, len);
1912 break; 1919 break;
1913 1920
1914 case IP6T_SO_SET_ADD_COUNTERS: 1921 case IP6T_SO_SET_ADD_COUNTERS:
1915 ret = do_add_counters(sock_net(sk), user, len, 1); 1922 ret = do_add_counters(sock_net(sk), user, len, 1);
1916 break; 1923 break;
1917 1924
1918 default: 1925 default:
1919 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); 1926 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1920 ret = -EINVAL; 1927 ret = -EINVAL;
1921 } 1928 }
1922 1929
1923 return ret; 1930 return ret;
1924 } 1931 }
1925 1932
1926 struct compat_ip6t_get_entries { 1933 struct compat_ip6t_get_entries {
1927 char name[IP6T_TABLE_MAXNAMELEN]; 1934 char name[IP6T_TABLE_MAXNAMELEN];
1928 compat_uint_t size; 1935 compat_uint_t size;
1929 struct compat_ip6t_entry entrytable[0]; 1936 struct compat_ip6t_entry entrytable[0];
1930 }; 1937 };
1931 1938
1932 static int 1939 static int
1933 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, 1940 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1934 void __user *userptr) 1941 void __user *userptr)
1935 { 1942 {
1936 struct xt_counters *counters; 1943 struct xt_counters *counters;
1937 const struct xt_table_info *private = table->private; 1944 const struct xt_table_info *private = table->private;
1938 void __user *pos; 1945 void __user *pos;
1939 unsigned int size; 1946 unsigned int size;
1940 int ret = 0; 1947 int ret = 0;
1941 const void *loc_cpu_entry; 1948 const void *loc_cpu_entry;
1942 unsigned int i = 0; 1949 unsigned int i = 0;
1943 1950
1944 counters = alloc_counters(table); 1951 counters = alloc_counters(table);
1945 if (IS_ERR(counters)) 1952 if (IS_ERR(counters))
1946 return PTR_ERR(counters); 1953 return PTR_ERR(counters);
1947 1954
1948 /* choose the copy that is on our node/cpu, ... 1955 /* choose the copy that is on our node/cpu, ...
1949 * This choice is lazy (because current thread is 1956 * This choice is lazy (because current thread is
1950 * allowed to migrate to another cpu) 1957 * allowed to migrate to another cpu)
1951 */ 1958 */
1952 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1959 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1953 pos = userptr; 1960 pos = userptr;
1954 size = total_size; 1961 size = total_size;
1955 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size, 1962 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1956 compat_copy_entry_to_user, 1963 compat_copy_entry_to_user,
1957 &pos, &size, counters, &i); 1964 &pos, &size, counters, &i);
1958 1965
1959 vfree(counters); 1966 vfree(counters);
1960 return ret; 1967 return ret;
1961 } 1968 }
1962 1969
1963 static int 1970 static int
1964 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, 1971 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1965 int *len) 1972 int *len)
1966 { 1973 {
1967 int ret; 1974 int ret;
1968 struct compat_ip6t_get_entries get; 1975 struct compat_ip6t_get_entries get;
1969 struct xt_table *t; 1976 struct xt_table *t;
1970 1977
1971 if (*len < sizeof(get)) { 1978 if (*len < sizeof(get)) {
1972 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); 1979 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1973 return -EINVAL; 1980 return -EINVAL;
1974 } 1981 }
1975 1982
1976 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1983 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1977 return -EFAULT; 1984 return -EFAULT;
1978 1985
1979 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { 1986 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1980 duprintf("compat_get_entries: %u != %zu\n", 1987 duprintf("compat_get_entries: %u != %zu\n",
1981 *len, sizeof(get) + get.size); 1988 *len, sizeof(get) + get.size);
1982 return -EINVAL; 1989 return -EINVAL;
1983 } 1990 }
1984 1991
1985 xt_compat_lock(AF_INET6); 1992 xt_compat_lock(AF_INET6);
1986 t = xt_find_table_lock(net, AF_INET6, get.name); 1993 t = xt_find_table_lock(net, AF_INET6, get.name);
1987 if (t && !IS_ERR(t)) { 1994 if (t && !IS_ERR(t)) {
1988 const struct xt_table_info *private = t->private; 1995 const struct xt_table_info *private = t->private;
1989 struct xt_table_info info; 1996 struct xt_table_info info;
1990 duprintf("t->private->number = %u\n", private->number); 1997 duprintf("t->private->number = %u\n", private->number);
1991 ret = compat_table_info(private, &info); 1998 ret = compat_table_info(private, &info);
1992 if (!ret && get.size == info.size) { 1999 if (!ret && get.size == info.size) {
1993 ret = compat_copy_entries_to_user(private->size, 2000 ret = compat_copy_entries_to_user(private->size,
1994 t, uptr->entrytable); 2001 t, uptr->entrytable);
1995 } else if (!ret) { 2002 } else if (!ret) {
1996 duprintf("compat_get_entries: I've got %u not %u!\n", 2003 duprintf("compat_get_entries: I've got %u not %u!\n",
1997 private->size, get.size); 2004 private->size, get.size);
1998 ret = -EAGAIN; 2005 ret = -EAGAIN;
1999 } 2006 }
2000 xt_compat_flush_offsets(AF_INET6); 2007 xt_compat_flush_offsets(AF_INET6);
2001 module_put(t->me); 2008 module_put(t->me);
2002 xt_table_unlock(t); 2009 xt_table_unlock(t);
2003 } else 2010 } else
2004 ret = t ? PTR_ERR(t) : -ENOENT; 2011 ret = t ? PTR_ERR(t) : -ENOENT;
2005 2012
2006 xt_compat_unlock(AF_INET6); 2013 xt_compat_unlock(AF_INET6);
2007 return ret; 2014 return ret;
2008 } 2015 }
2009 2016
2010 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); 2017 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2011 2018
2012 static int 2019 static int
2013 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 2020 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2014 { 2021 {
2015 int ret; 2022 int ret;
2016 2023
2017 if (!capable(CAP_NET_ADMIN)) 2024 if (!capable(CAP_NET_ADMIN))
2018 return -EPERM; 2025 return -EPERM;
2019 2026
2020 switch (cmd) { 2027 switch (cmd) {
2021 case IP6T_SO_GET_INFO: 2028 case IP6T_SO_GET_INFO:
2022 ret = get_info(sock_net(sk), user, len, 1); 2029 ret = get_info(sock_net(sk), user, len, 1);
2023 break; 2030 break;
2024 case IP6T_SO_GET_ENTRIES: 2031 case IP6T_SO_GET_ENTRIES:
2025 ret = compat_get_entries(sock_net(sk), user, len); 2032 ret = compat_get_entries(sock_net(sk), user, len);
2026 break; 2033 break;
2027 default: 2034 default:
2028 ret = do_ip6t_get_ctl(sk, cmd, user, len); 2035 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2029 } 2036 }
2030 return ret; 2037 return ret;
2031 } 2038 }
2032 #endif 2039 #endif
2033 2040
2034 static int 2041 static int
2035 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 2042 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2036 { 2043 {
2037 int ret; 2044 int ret;
2038 2045
2039 if (!capable(CAP_NET_ADMIN)) 2046 if (!capable(CAP_NET_ADMIN))
2040 return -EPERM; 2047 return -EPERM;
2041 2048
2042 switch (cmd) { 2049 switch (cmd) {
2043 case IP6T_SO_SET_REPLACE: 2050 case IP6T_SO_SET_REPLACE:
2044 ret = do_replace(sock_net(sk), user, len); 2051 ret = do_replace(sock_net(sk), user, len);
2045 break; 2052 break;
2046 2053
2047 case IP6T_SO_SET_ADD_COUNTERS: 2054 case IP6T_SO_SET_ADD_COUNTERS:
2048 ret = do_add_counters(sock_net(sk), user, len, 0); 2055 ret = do_add_counters(sock_net(sk), user, len, 0);
2049 break; 2056 break;
2050 2057
2051 default: 2058 default:
2052 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); 2059 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2053 ret = -EINVAL; 2060 ret = -EINVAL;
2054 } 2061 }
2055 2062
2056 return ret; 2063 return ret;
2057 } 2064 }
2058 2065
2059 static int 2066 static int
2060 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 2067 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2061 { 2068 {
2062 int ret; 2069 int ret;
2063 2070
2064 if (!capable(CAP_NET_ADMIN)) 2071 if (!capable(CAP_NET_ADMIN))
2065 return -EPERM; 2072 return -EPERM;
2066 2073
2067 switch (cmd) { 2074 switch (cmd) {
2068 case IP6T_SO_GET_INFO: 2075 case IP6T_SO_GET_INFO:
2069 ret = get_info(sock_net(sk), user, len, 0); 2076 ret = get_info(sock_net(sk), user, len, 0);
2070 break; 2077 break;
2071 2078
2072 case IP6T_SO_GET_ENTRIES: 2079 case IP6T_SO_GET_ENTRIES:
2073 ret = get_entries(sock_net(sk), user, len); 2080 ret = get_entries(sock_net(sk), user, len);
2074 break; 2081 break;
2075 2082
2076 case IP6T_SO_GET_REVISION_MATCH: 2083 case IP6T_SO_GET_REVISION_MATCH:
2077 case IP6T_SO_GET_REVISION_TARGET: { 2084 case IP6T_SO_GET_REVISION_TARGET: {
2078 struct ip6t_get_revision rev; 2085 struct ip6t_get_revision rev;
2079 int target; 2086 int target;
2080 2087
2081 if (*len != sizeof(rev)) { 2088 if (*len != sizeof(rev)) {
2082 ret = -EINVAL; 2089 ret = -EINVAL;
2083 break; 2090 break;
2084 } 2091 }
2085 if (copy_from_user(&rev, user, sizeof(rev)) != 0) { 2092 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2086 ret = -EFAULT; 2093 ret = -EFAULT;
2087 break; 2094 break;
2088 } 2095 }
2089 2096
2090 if (cmd == IP6T_SO_GET_REVISION_TARGET) 2097 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2091 target = 1; 2098 target = 1;
2092 else 2099 else
2093 target = 0; 2100 target = 0;
2094 2101
2095 try_then_request_module(xt_find_revision(AF_INET6, rev.name, 2102 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2096 rev.revision, 2103 rev.revision,
2097 target, &ret), 2104 target, &ret),
2098 "ip6t_%s", rev.name); 2105 "ip6t_%s", rev.name);
2099 break; 2106 break;
2100 } 2107 }
2101 2108
2102 default: 2109 default:
2103 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); 2110 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2104 ret = -EINVAL; 2111 ret = -EINVAL;
2105 } 2112 }
2106 2113
2107 return ret; 2114 return ret;
2108 } 2115 }
2109 2116
2110 struct xt_table *ip6t_register_table(struct net *net, 2117 struct xt_table *ip6t_register_table(struct net *net,
2111 const struct xt_table *table, 2118 const struct xt_table *table,
2112 const struct ip6t_replace *repl) 2119 const struct ip6t_replace *repl)
2113 { 2120 {
2114 int ret; 2121 int ret;
2115 struct xt_table_info *newinfo; 2122 struct xt_table_info *newinfo;
2116 struct xt_table_info bootstrap 2123 struct xt_table_info bootstrap
2117 = { 0, 0, 0, { 0 }, { 0 }, { } }; 2124 = { 0, 0, 0, { 0 }, { 0 }, { } };
2118 void *loc_cpu_entry; 2125 void *loc_cpu_entry;
2119 struct xt_table *new_table; 2126 struct xt_table *new_table;
2120 2127
2121 newinfo = xt_alloc_table_info(repl->size); 2128 newinfo = xt_alloc_table_info(repl->size);
2122 if (!newinfo) { 2129 if (!newinfo) {
2123 ret = -ENOMEM; 2130 ret = -ENOMEM;
2124 goto out; 2131 goto out;
2125 } 2132 }
2126 2133
2127 /* choose the copy on our node/cpu, but dont care about preemption */ 2134 /* choose the copy on our node/cpu, but dont care about preemption */
2128 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2135 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2129 memcpy(loc_cpu_entry, repl->entries, repl->size); 2136 memcpy(loc_cpu_entry, repl->entries, repl->size);
2130 2137
2131 ret = translate_table(net, table->name, table->valid_hooks, 2138 ret = translate_table(net, table->name, table->valid_hooks,
2132 newinfo, loc_cpu_entry, repl->size, 2139 newinfo, loc_cpu_entry, repl->size,
2133 repl->num_entries, 2140 repl->num_entries,
2134 repl->hook_entry, 2141 repl->hook_entry,
2135 repl->underflow); 2142 repl->underflow);
2136 if (ret != 0) 2143 if (ret != 0)
2137 goto out_free; 2144 goto out_free;
2138 2145
2139 new_table = xt_register_table(net, table, &bootstrap, newinfo); 2146 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2140 if (IS_ERR(new_table)) { 2147 if (IS_ERR(new_table)) {
2141 ret = PTR_ERR(new_table); 2148 ret = PTR_ERR(new_table);
2142 goto out_free; 2149 goto out_free;
2143 } 2150 }
2144 return new_table; 2151 return new_table;
2145 2152
2146 out_free: 2153 out_free:
2147 xt_free_table_info(newinfo); 2154 xt_free_table_info(newinfo);
2148 out: 2155 out:
2149 return ERR_PTR(ret); 2156 return ERR_PTR(ret);
2150 } 2157 }
2151 2158
2152 void ip6t_unregister_table(struct net *net, struct xt_table *table) 2159 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2153 { 2160 {
2154 struct xt_table_info *private; 2161 struct xt_table_info *private;
2155 void *loc_cpu_entry; 2162 void *loc_cpu_entry;
2156 struct module *table_owner = table->me; 2163 struct module *table_owner = table->me;
2157 2164
2158 private = xt_unregister_table(table); 2165 private = xt_unregister_table(table);
2159 2166
2160 /* Decrease module usage counts and free resources */ 2167 /* Decrease module usage counts and free resources */
2161 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2168 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2162 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL); 2169 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL);
2163 if (private->number > private->initial_entries) 2170 if (private->number > private->initial_entries)
2164 module_put(table_owner); 2171 module_put(table_owner);
2165 xt_free_table_info(private); 2172 xt_free_table_info(private);
2166 } 2173 }
2167 2174
2168 /* Returns 1 if the type and code is matched by the range, 0 otherwise */ 2175 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2169 static inline bool 2176 static inline bool
2170 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, 2177 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2171 u_int8_t type, u_int8_t code, 2178 u_int8_t type, u_int8_t code,
2172 bool invert) 2179 bool invert)
2173 { 2180 {
2174 return (type == test_type && code >= min_code && code <= max_code) 2181 return (type == test_type && code >= min_code && code <= max_code)
2175 ^ invert; 2182 ^ invert;
2176 } 2183 }
2177 2184
2178 static bool 2185 static bool
2179 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par) 2186 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2180 { 2187 {
2181 const struct icmp6hdr *ic; 2188 const struct icmp6hdr *ic;
2182 struct icmp6hdr _icmph; 2189 struct icmp6hdr _icmph;
2183 const struct ip6t_icmp *icmpinfo = par->matchinfo; 2190 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2184 2191
2185 /* Must not be a fragment. */ 2192 /* Must not be a fragment. */
2186 if (par->fragoff != 0) 2193 if (par->fragoff != 0)
2187 return false; 2194 return false;
2188 2195
2189 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); 2196 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2190 if (ic == NULL) { 2197 if (ic == NULL) {
2191 /* We've been asked to examine this packet, and we 2198 /* We've been asked to examine this packet, and we
2192 * can't. Hence, no choice but to drop. 2199 * can't. Hence, no choice but to drop.
2193 */ 2200 */
2194 duprintf("Dropping evil ICMP tinygram.\n"); 2201 duprintf("Dropping evil ICMP tinygram.\n");
2195 *par->hotdrop = true; 2202 *par->hotdrop = true;
2196 return false; 2203 return false;
2197 } 2204 }
2198 2205
2199 return icmp6_type_code_match(icmpinfo->type, 2206 return icmp6_type_code_match(icmpinfo->type,
2200 icmpinfo->code[0], 2207 icmpinfo->code[0],
2201 icmpinfo->code[1], 2208 icmpinfo->code[1],
2202 ic->icmp6_type, ic->icmp6_code, 2209 ic->icmp6_type, ic->icmp6_code,
2203 !!(icmpinfo->invflags&IP6T_ICMP_INV)); 2210 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2204 } 2211 }
2205 2212
2206 /* Called when user tries to insert an entry of this type. */ 2213 /* Called when user tries to insert an entry of this type. */
2207 static bool icmp6_checkentry(const struct xt_mtchk_param *par) 2214 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2208 { 2215 {
2209 const struct ip6t_icmp *icmpinfo = par->matchinfo; 2216 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2210 2217
2211 /* Must specify no unknown invflags */ 2218 /* Must specify no unknown invflags */
2212 return !(icmpinfo->invflags & ~IP6T_ICMP_INV); 2219 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2213 } 2220 }
2214 2221
2215 /* The built-in targets: standard (NULL) and error. */ 2222 /* The built-in targets: standard (NULL) and error. */
2216 static struct xt_target ip6t_standard_target __read_mostly = { 2223 static struct xt_target ip6t_standard_target __read_mostly = {
2217 .name = IP6T_STANDARD_TARGET, 2224 .name = IP6T_STANDARD_TARGET,
2218 .targetsize = sizeof(int), 2225 .targetsize = sizeof(int),
2219 .family = NFPROTO_IPV6, 2226 .family = NFPROTO_IPV6,
2220 #ifdef CONFIG_COMPAT 2227 #ifdef CONFIG_COMPAT
2221 .compatsize = sizeof(compat_int_t), 2228 .compatsize = sizeof(compat_int_t),
2222 .compat_from_user = compat_standard_from_user, 2229 .compat_from_user = compat_standard_from_user,
2223 .compat_to_user = compat_standard_to_user, 2230 .compat_to_user = compat_standard_to_user,
2224 #endif 2231 #endif
2225 }; 2232 };
2226 2233
2227 static struct xt_target ip6t_error_target __read_mostly = { 2234 static struct xt_target ip6t_error_target __read_mostly = {
2228 .name = IP6T_ERROR_TARGET, 2235 .name = IP6T_ERROR_TARGET,
2229 .target = ip6t_error, 2236 .target = ip6t_error,
2230 .targetsize = IP6T_FUNCTION_MAXNAMELEN, 2237 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2231 .family = NFPROTO_IPV6, 2238 .family = NFPROTO_IPV6,
2232 }; 2239 };
2233 2240
2234 static struct nf_sockopt_ops ip6t_sockopts = { 2241 static struct nf_sockopt_ops ip6t_sockopts = {
2235 .pf = PF_INET6, 2242 .pf = PF_INET6,
2236 .set_optmin = IP6T_BASE_CTL, 2243 .set_optmin = IP6T_BASE_CTL,
2237 .set_optmax = IP6T_SO_SET_MAX+1, 2244 .set_optmax = IP6T_SO_SET_MAX+1,
2238 .set = do_ip6t_set_ctl, 2245 .set = do_ip6t_set_ctl,
2239 #ifdef CONFIG_COMPAT 2246 #ifdef CONFIG_COMPAT
2240 .compat_set = compat_do_ip6t_set_ctl, 2247 .compat_set = compat_do_ip6t_set_ctl,
2241 #endif 2248 #endif
2242 .get_optmin = IP6T_BASE_CTL, 2249 .get_optmin = IP6T_BASE_CTL,
2243 .get_optmax = IP6T_SO_GET_MAX+1, 2250 .get_optmax = IP6T_SO_GET_MAX+1,
2244 .get = do_ip6t_get_ctl, 2251 .get = do_ip6t_get_ctl,
2245 #ifdef CONFIG_COMPAT 2252 #ifdef CONFIG_COMPAT
2246 .compat_get = compat_do_ip6t_get_ctl, 2253 .compat_get = compat_do_ip6t_get_ctl,
2247 #endif 2254 #endif
2248 .owner = THIS_MODULE, 2255 .owner = THIS_MODULE,
2249 }; 2256 };
2250 2257
2251 static struct xt_match icmp6_matchstruct __read_mostly = { 2258 static struct xt_match icmp6_matchstruct __read_mostly = {
2252 .name = "icmp6", 2259 .name = "icmp6",
2253 .match = icmp6_match, 2260 .match = icmp6_match,
2254 .matchsize = sizeof(struct ip6t_icmp), 2261 .matchsize = sizeof(struct ip6t_icmp),
2255 .checkentry = icmp6_checkentry, 2262 .checkentry = icmp6_checkentry,
2256 .proto = IPPROTO_ICMPV6, 2263 .proto = IPPROTO_ICMPV6,
2257 .family = NFPROTO_IPV6, 2264 .family = NFPROTO_IPV6,
2258 }; 2265 };
2259 2266
2260 static int __net_init ip6_tables_net_init(struct net *net) 2267 static int __net_init ip6_tables_net_init(struct net *net)
2261 { 2268 {
2262 return xt_proto_init(net, NFPROTO_IPV6); 2269 return xt_proto_init(net, NFPROTO_IPV6);
2263 } 2270 }
2264 2271
2265 static void __net_exit ip6_tables_net_exit(struct net *net) 2272 static void __net_exit ip6_tables_net_exit(struct net *net)
2266 { 2273 {
2267 xt_proto_fini(net, NFPROTO_IPV6); 2274 xt_proto_fini(net, NFPROTO_IPV6);
2268 } 2275 }
2269 2276
2270 static struct pernet_operations ip6_tables_net_ops = { 2277 static struct pernet_operations ip6_tables_net_ops = {
2271 .init = ip6_tables_net_init, 2278 .init = ip6_tables_net_init,
2272 .exit = ip6_tables_net_exit, 2279 .exit = ip6_tables_net_exit,
2273 }; 2280 };
2274 2281
2275 static int __init ip6_tables_init(void) 2282 static int __init ip6_tables_init(void)
2276 { 2283 {
2277 int ret; 2284 int ret;
2278 2285
2279 ret = register_pernet_subsys(&ip6_tables_net_ops); 2286 ret = register_pernet_subsys(&ip6_tables_net_ops);
2280 if (ret < 0) 2287 if (ret < 0)
2281 goto err1; 2288 goto err1;
2282 2289
2283 /* Noone else will be downing sem now, so we won't sleep */ 2290 /* Noone else will be downing sem now, so we won't sleep */
2284 ret = xt_register_target(&ip6t_standard_target); 2291 ret = xt_register_target(&ip6t_standard_target);
2285 if (ret < 0) 2292 if (ret < 0)
2286 goto err2; 2293 goto err2;
2287 ret = xt_register_target(&ip6t_error_target); 2294 ret = xt_register_target(&ip6t_error_target);
2288 if (ret < 0) 2295 if (ret < 0)
2289 goto err3; 2296 goto err3;
2290 ret = xt_register_match(&icmp6_matchstruct); 2297 ret = xt_register_match(&icmp6_matchstruct);
2291 if (ret < 0) 2298 if (ret < 0)
2292 goto err4; 2299 goto err4;
2293 2300
2294 /* Register setsockopt */ 2301 /* Register setsockopt */
2295 ret = nf_register_sockopt(&ip6t_sockopts); 2302 ret = nf_register_sockopt(&ip6t_sockopts);
2296 if (ret < 0) 2303 if (ret < 0)
2297 goto err5; 2304 goto err5;
2298 2305
2299 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n"); 2306 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2300 return 0; 2307 return 0;
2301 2308
2302 err5: 2309 err5:
2303 xt_unregister_match(&icmp6_matchstruct); 2310 xt_unregister_match(&icmp6_matchstruct);
2304 err4: 2311 err4:
2305 xt_unregister_target(&ip6t_error_target); 2312 xt_unregister_target(&ip6t_error_target);
2306 err3: 2313 err3:
2307 xt_unregister_target(&ip6t_standard_target); 2314 xt_unregister_target(&ip6t_standard_target);
2308 err2: 2315 err2:
2309 unregister_pernet_subsys(&ip6_tables_net_ops); 2316 unregister_pernet_subsys(&ip6_tables_net_ops);
2310 err1: 2317 err1:
2311 return ret; 2318 return ret;
2312 } 2319 }
2313 2320
2314 static void __exit ip6_tables_fini(void) 2321 static void __exit ip6_tables_fini(void)
2315 { 2322 {
2316 nf_unregister_sockopt(&ip6t_sockopts); 2323 nf_unregister_sockopt(&ip6t_sockopts);
2317 2324
2318 xt_unregister_match(&icmp6_matchstruct); 2325 xt_unregister_match(&icmp6_matchstruct);
2319 xt_unregister_target(&ip6t_error_target); 2326 xt_unregister_target(&ip6t_error_target);
2320 xt_unregister_target(&ip6t_standard_target); 2327 xt_unregister_target(&ip6t_standard_target);
2321 2328
2322 unregister_pernet_subsys(&ip6_tables_net_ops); 2329 unregister_pernet_subsys(&ip6_tables_net_ops);
2323 } 2330 }
2324 2331
2325 /* 2332 /*
2326 * find the offset to specified header or the protocol number of last header 2333 * find the offset to specified header or the protocol number of last header
2327 * if target < 0. "last header" is transport protocol header, ESP, or 2334 * if target < 0. "last header" is transport protocol header, ESP, or
2328 * "No next header". 2335 * "No next header".
2329 * 2336 *
2330 * If target header is found, its offset is set in *offset and return protocol 2337 * If target header is found, its offset is set in *offset and return protocol
2331 * number. Otherwise, return -1. 2338 * number. Otherwise, return -1.
2332 * 2339 *
2333 * If the first fragment doesn't contain the final protocol header or 2340 * If the first fragment doesn't contain the final protocol header or
2334 * NEXTHDR_NONE it is considered invalid. 2341 * NEXTHDR_NONE it is considered invalid.
2335 * 2342 *
2336 * Note that non-1st fragment is special case that "the protocol number 2343 * Note that non-1st fragment is special case that "the protocol number
2337 * of last header" is "next header" field in Fragment header. In this case, 2344 * of last header" is "next header" field in Fragment header. In this case,
2338 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff 2345 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2339 * isn't NULL. 2346 * isn't NULL.
2340 * 2347 *
2341 */ 2348 */
2342 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, 2349 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2343 int target, unsigned short *fragoff) 2350 int target, unsigned short *fragoff)
2344 { 2351 {
2345 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); 2352 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2346 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 2353 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2347 unsigned int len = skb->len - start; 2354 unsigned int len = skb->len - start;
2348 2355
2349 if (fragoff) 2356 if (fragoff)
2350 *fragoff = 0; 2357 *fragoff = 0;
2351 2358
2352 while (nexthdr != target) { 2359 while (nexthdr != target) {
2353 struct ipv6_opt_hdr _hdr, *hp; 2360 struct ipv6_opt_hdr _hdr, *hp;
2354 unsigned int hdrlen; 2361 unsigned int hdrlen;
2355 2362
2356 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { 2363 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2357 if (target < 0) 2364 if (target < 0)
2358 break; 2365 break;
2359 return -ENOENT; 2366 return -ENOENT;
2360 } 2367 }
2361 2368
2362 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); 2369 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2363 if (hp == NULL) 2370 if (hp == NULL)
2364 return -EBADMSG; 2371 return -EBADMSG;
2365 if (nexthdr == NEXTHDR_FRAGMENT) { 2372 if (nexthdr == NEXTHDR_FRAGMENT) {
2366 unsigned short _frag_off; 2373 unsigned short _frag_off;
2367 __be16 *fp; 2374 __be16 *fp;
2368 fp = skb_header_pointer(skb, 2375 fp = skb_header_pointer(skb,
2369 start+offsetof(struct frag_hdr, 2376 start+offsetof(struct frag_hdr,
2370 frag_off), 2377 frag_off),
2371 sizeof(_frag_off), 2378 sizeof(_frag_off),
2372 &_frag_off); 2379 &_frag_off);
2373 if (fp == NULL) 2380 if (fp == NULL)
2374 return -EBADMSG; 2381 return -EBADMSG;
2375 2382
2376 _frag_off = ntohs(*fp) & ~0x7; 2383 _frag_off = ntohs(*fp) & ~0x7;
2377 if (_frag_off) { 2384 if (_frag_off) {
2378 if (target < 0 && 2385 if (target < 0 &&
2379 ((!ipv6_ext_hdr(hp->nexthdr)) || 2386 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2380 hp->nexthdr == NEXTHDR_NONE)) { 2387 hp->nexthdr == NEXTHDR_NONE)) {
2381 if (fragoff) 2388 if (fragoff)
2382 *fragoff = _frag_off; 2389 *fragoff = _frag_off;
2383 return hp->nexthdr; 2390 return hp->nexthdr;
2384 } 2391 }
2385 return -ENOENT; 2392 return -ENOENT;
2386 } 2393 }
2387 hdrlen = 8; 2394 hdrlen = 8;
2388 } else if (nexthdr == NEXTHDR_AUTH) 2395 } else if (nexthdr == NEXTHDR_AUTH)
2389 hdrlen = (hp->hdrlen + 2) << 2; 2396 hdrlen = (hp->hdrlen + 2) << 2;
2390 else 2397 else
2391 hdrlen = ipv6_optlen(hp); 2398 hdrlen = ipv6_optlen(hp);
2392 2399
2393 nexthdr = hp->nexthdr; 2400 nexthdr = hp->nexthdr;
2394 len -= hdrlen; 2401 len -= hdrlen;
2395 start += hdrlen; 2402 start += hdrlen;
2396 } 2403 }
2397 2404
2398 *offset = start; 2405 *offset = start;
2399 return nexthdr; 2406 return nexthdr;
2400 } 2407 }
2401 2408
2402 EXPORT_SYMBOL(ip6t_register_table); 2409 EXPORT_SYMBOL(ip6t_register_table);
2403 EXPORT_SYMBOL(ip6t_unregister_table); 2410 EXPORT_SYMBOL(ip6t_unregister_table);
2404 EXPORT_SYMBOL(ip6t_do_table); 2411 EXPORT_SYMBOL(ip6t_do_table);
2405 EXPORT_SYMBOL(ip6t_ext_hdr); 2412 EXPORT_SYMBOL(ip6t_ext_hdr);
2406 EXPORT_SYMBOL(ipv6_find_hdr); 2413 EXPORT_SYMBOL(ipv6_find_hdr);
2407 2414
2408 module_init(ip6_tables_init); 2415 module_init(ip6_tables_init);
2409 module_exit(ip6_tables_fini); 2416 module_exit(ip6_tables_fini);
2410 2417
net/ipv6/netfilter/ip6table_filter.c
1 /* 1 /*
2 * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. 2 * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x.
3 * 3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> 5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/moduleparam.h> 13 #include <linux/moduleparam.h>
14 #include <linux/netfilter_ipv6/ip6_tables.h> 14 #include <linux/netfilter_ipv6/ip6_tables.h>
15 15
16 MODULE_LICENSE("GPL"); 16 MODULE_LICENSE("GPL");
17 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 17 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
18 MODULE_DESCRIPTION("ip6tables filter table"); 18 MODULE_DESCRIPTION("ip6tables filter table");
19 19
20 #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ 20 #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \
21 (1 << NF_INET_FORWARD) | \ 21 (1 << NF_INET_FORWARD) | \
22 (1 << NF_INET_LOCAL_OUT)) 22 (1 << NF_INET_LOCAL_OUT))
23 23
24 static struct
25 {
26 struct ip6t_replace repl;
27 struct ip6t_standard entries[3];
28 struct ip6t_error term;
29 } initial_table __net_initdata = {
30 .repl = {
31 .name = "filter",
32 .valid_hooks = FILTER_VALID_HOOKS,
33 .num_entries = 4,
34 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
35 .hook_entry = {
36 [NF_INET_LOCAL_IN] = 0,
37 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
38 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
39 },
40 .underflow = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
44 },
45 },
46 .entries = {
47 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
48 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
49 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
50 },
51 .term = IP6T_ERROR_INIT, /* ERROR */
52 };
53
54 static const struct xt_table packet_filter = { 24 static const struct xt_table packet_filter = {
55 .name = "filter", 25 .name = "filter",
56 .valid_hooks = FILTER_VALID_HOOKS, 26 .valid_hooks = FILTER_VALID_HOOKS,
57 .me = THIS_MODULE, 27 .me = THIS_MODULE,
58 .af = NFPROTO_IPV6, 28 .af = NFPROTO_IPV6,
59 .priority = NF_IP6_PRI_FILTER, 29 .priority = NF_IP6_PRI_FILTER,
60 }; 30 };
61 31
62 /* The work comes in here from netfilter.c. */ 32 /* The work comes in here from netfilter.c. */
63 static unsigned int 33 static unsigned int
64 ip6table_filter_hook(unsigned int hook, struct sk_buff *skb, 34 ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
65 const struct net_device *in, const struct net_device *out, 35 const struct net_device *in, const struct net_device *out,
66 int (*okfn)(struct sk_buff *)) 36 int (*okfn)(struct sk_buff *))
67 { 37 {
68 const struct net *net = dev_net((in != NULL) ? in : out); 38 const struct net *net = dev_net((in != NULL) ? in : out);
69 39
70 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter); 40 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
71 } 41 }
72 42
73 static struct nf_hook_ops *filter_ops __read_mostly; 43 static struct nf_hook_ops *filter_ops __read_mostly;
74 44
75 /* Default to forward because I got too much mail already. */ 45 /* Default to forward because I got too much mail already. */
76 static int forward = NF_ACCEPT; 46 static int forward = NF_ACCEPT;
77 module_param(forward, bool, 0000); 47 module_param(forward, bool, 0000);
78 48
79 static int __net_init ip6table_filter_net_init(struct net *net) 49 static int __net_init ip6table_filter_net_init(struct net *net)
80 { 50 {
81 /* Register table */ 51 struct ip6t_replace *repl;
52
53 repl = ip6t_alloc_initial_table(&packet_filter);
54 if (repl == NULL)
55 return -ENOMEM;
56 /* Entry 1 is the FORWARD hook */
57 ((struct ip6t_standard *)repl->entries)[1].target.verdict =
58 -forward - 1;
59
82 net->ipv6.ip6table_filter = 60 net->ipv6.ip6table_filter =
83 ip6t_register_table(net, &packet_filter, &initial_table.repl); 61 ip6t_register_table(net, &packet_filter, repl);
62 kfree(repl);
84 if (IS_ERR(net->ipv6.ip6table_filter)) 63 if (IS_ERR(net->ipv6.ip6table_filter))
85 return PTR_ERR(net->ipv6.ip6table_filter); 64 return PTR_ERR(net->ipv6.ip6table_filter);
86 return 0; 65 return 0;
87 } 66 }
88 67
89 static void __net_exit ip6table_filter_net_exit(struct net *net) 68 static void __net_exit ip6table_filter_net_exit(struct net *net)
90 { 69 {
91 ip6t_unregister_table(net, net->ipv6.ip6table_filter); 70 ip6t_unregister_table(net, net->ipv6.ip6table_filter);
92 } 71 }
93 72
94 static struct pernet_operations ip6table_filter_net_ops = { 73 static struct pernet_operations ip6table_filter_net_ops = {
95 .init = ip6table_filter_net_init, 74 .init = ip6table_filter_net_init,
96 .exit = ip6table_filter_net_exit, 75 .exit = ip6table_filter_net_exit,
97 }; 76 };
98 77
99 static int __init ip6table_filter_init(void) 78 static int __init ip6table_filter_init(void)
100 { 79 {
101 int ret; 80 int ret;
102 81
103 if (forward < 0 || forward > NF_MAX_VERDICT) { 82 if (forward < 0 || forward > NF_MAX_VERDICT) {
104 printk("iptables forward must be 0 or 1\n"); 83 printk("iptables forward must be 0 or 1\n");
105 return -EINVAL; 84 return -EINVAL;
106 } 85 }
107
108 /* Entry 1 is the FORWARD hook */
109 initial_table.entries[1].target.verdict = -forward - 1;
110 86
111 ret = register_pernet_subsys(&ip6table_filter_net_ops); 87 ret = register_pernet_subsys(&ip6table_filter_net_ops);
112 if (ret < 0) 88 if (ret < 0)
113 return ret; 89 return ret;
114 90
115 /* Register hooks */ 91 /* Register hooks */
116 filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook); 92 filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook);
117 if (IS_ERR(filter_ops)) { 93 if (IS_ERR(filter_ops)) {
118 ret = PTR_ERR(filter_ops); 94 ret = PTR_ERR(filter_ops);
119 goto cleanup_table; 95 goto cleanup_table;
120 } 96 }
121 97
122 return ret; 98 return ret;
123 99
124 cleanup_table: 100 cleanup_table:
125 unregister_pernet_subsys(&ip6table_filter_net_ops); 101 unregister_pernet_subsys(&ip6table_filter_net_ops);
126 return ret; 102 return ret;
127 } 103 }
128 104
net/ipv6/netfilter/ip6table_mangle.c
1 /* 1 /*
2 * IPv6 packet mangling table, a port of the IPv4 mangle table to IPv6 2 * IPv6 packet mangling table, a port of the IPv4 mangle table to IPv6
3 * 3 *
4 * Copyright (C) 2000-2001 by Harald Welte <laforge@gnumonks.org> 4 * Copyright (C) 2000-2001 by Harald Welte <laforge@gnumonks.org>
5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> 5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/netfilter_ipv6/ip6_tables.h> 12 #include <linux/netfilter_ipv6/ip6_tables.h>
13 13
14 MODULE_LICENSE("GPL"); 14 MODULE_LICENSE("GPL");
15 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 15 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
16 MODULE_DESCRIPTION("ip6tables mangle table"); 16 MODULE_DESCRIPTION("ip6tables mangle table");
17 17
18 #define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ 18 #define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
19 (1 << NF_INET_LOCAL_IN) | \ 19 (1 << NF_INET_LOCAL_IN) | \
20 (1 << NF_INET_FORWARD) | \ 20 (1 << NF_INET_FORWARD) | \
21 (1 << NF_INET_LOCAL_OUT) | \ 21 (1 << NF_INET_LOCAL_OUT) | \
22 (1 << NF_INET_POST_ROUTING)) 22 (1 << NF_INET_POST_ROUTING))
23 23
24 static const struct
25 {
26 struct ip6t_replace repl;
27 struct ip6t_standard entries[5];
28 struct ip6t_error term;
29 } initial_table __net_initdata = {
30 .repl = {
31 .name = "mangle",
32 .valid_hooks = MANGLE_VALID_HOOKS,
33 .num_entries = 6,
34 .size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
35 .hook_entry = {
36 [NF_INET_PRE_ROUTING] = 0,
37 [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
38 [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
39 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
40 [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
41 },
42 .underflow = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
45 [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
46 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
47 [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
48 },
49 },
50 .entries = {
51 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 },
57 .term = IP6T_ERROR_INIT, /* ERROR */
58 };
59
60 static const struct xt_table packet_mangler = { 24 static const struct xt_table packet_mangler = {
61 .name = "mangle", 25 .name = "mangle",
62 .valid_hooks = MANGLE_VALID_HOOKS, 26 .valid_hooks = MANGLE_VALID_HOOKS,
63 .me = THIS_MODULE, 27 .me = THIS_MODULE,
64 .af = NFPROTO_IPV6, 28 .af = NFPROTO_IPV6,
65 .priority = NF_IP6_PRI_MANGLE, 29 .priority = NF_IP6_PRI_MANGLE,
66 }; 30 };
67 31
68 static unsigned int 32 static unsigned int
69 ip6t_local_out_hook(unsigned int hook, 33 ip6t_local_out_hook(unsigned int hook,
70 struct sk_buff *skb, 34 struct sk_buff *skb,
71 const struct net_device *out, 35 const struct net_device *out,
72 int (*okfn)(struct sk_buff *)) 36 int (*okfn)(struct sk_buff *))
73 { 37 {
74 38
75 unsigned int ret; 39 unsigned int ret;
76 struct in6_addr saddr, daddr; 40 struct in6_addr saddr, daddr;
77 u_int8_t hop_limit; 41 u_int8_t hop_limit;
78 u_int32_t flowlabel, mark; 42 u_int32_t flowlabel, mark;
79 43
80 #if 0 44 #if 0
81 /* root is playing with raw sockets. */ 45 /* root is playing with raw sockets. */
82 if (skb->len < sizeof(struct iphdr) || 46 if (skb->len < sizeof(struct iphdr) ||
83 ip_hdrlen(skb) < sizeof(struct iphdr)) { 47 ip_hdrlen(skb) < sizeof(struct iphdr)) {
84 if (net_ratelimit()) 48 if (net_ratelimit())
85 printk("ip6t_hook: happy cracking.\n"); 49 printk("ip6t_hook: happy cracking.\n");
86 return NF_ACCEPT; 50 return NF_ACCEPT;
87 } 51 }
88 #endif 52 #endif
89 53
90 /* save source/dest address, mark, hoplimit, flowlabel, priority, */ 54 /* save source/dest address, mark, hoplimit, flowlabel, priority, */
91 memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); 55 memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
92 memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); 56 memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
93 mark = skb->mark; 57 mark = skb->mark;
94 hop_limit = ipv6_hdr(skb)->hop_limit; 58 hop_limit = ipv6_hdr(skb)->hop_limit;
95 59
96 /* flowlabel and prio (includes version, which shouldn't change either */ 60 /* flowlabel and prio (includes version, which shouldn't change either */
97 flowlabel = *((u_int32_t *)ipv6_hdr(skb)); 61 flowlabel = *((u_int32_t *)ipv6_hdr(skb));
98 62
99 ret = ip6t_do_table(skb, hook, NULL, out, 63 ret = ip6t_do_table(skb, hook, NULL, out,
100 dev_net(out)->ipv6.ip6table_mangle); 64 dev_net(out)->ipv6.ip6table_mangle);
101 65
102 if (ret != NF_DROP && ret != NF_STOLEN && 66 if (ret != NF_DROP && ret != NF_STOLEN &&
103 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || 67 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
104 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || 68 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
105 skb->mark != mark || 69 skb->mark != mark ||
106 ipv6_hdr(skb)->hop_limit != hop_limit)) 70 ipv6_hdr(skb)->hop_limit != hop_limit))
107 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; 71 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
108 72
109 return ret; 73 return ret;
110 } 74 }
111 75
112 /* The work comes in here from netfilter.c. */ 76 /* The work comes in here from netfilter.c. */
113 static unsigned int 77 static unsigned int
114 ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb, 78 ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
115 const struct net_device *in, const struct net_device *out, 79 const struct net_device *in, const struct net_device *out,
116 int (*okfn)(struct sk_buff *)) 80 int (*okfn)(struct sk_buff *))
117 { 81 {
118 if (hook == NF_INET_LOCAL_OUT) 82 if (hook == NF_INET_LOCAL_OUT)
119 return ip6t_local_out_hook(hook, skb, out, okfn); 83 return ip6t_local_out_hook(hook, skb, out, okfn);
120 84
121 /* INPUT/FORWARD */ 85 /* INPUT/FORWARD */
122 return ip6t_do_table(skb, hook, in, out, 86 return ip6t_do_table(skb, hook, in, out,
123 dev_net(in)->ipv6.ip6table_mangle); 87 dev_net(in)->ipv6.ip6table_mangle);
124 } 88 }
125 89
126 static struct nf_hook_ops *mangle_ops __read_mostly; 90 static struct nf_hook_ops *mangle_ops __read_mostly;
127 static int __net_init ip6table_mangle_net_init(struct net *net) 91 static int __net_init ip6table_mangle_net_init(struct net *net)
128 { 92 {
129 /* Register table */ 93 struct ip6t_replace *repl;
94
95 repl = ip6t_alloc_initial_table(&packet_mangler);
96 if (repl == NULL)
97 return -ENOMEM;
130 net->ipv6.ip6table_mangle = 98 net->ipv6.ip6table_mangle =
131 ip6t_register_table(net, &packet_mangler, &initial_table.repl); 99 ip6t_register_table(net, &packet_mangler, repl);
100 kfree(repl);
132 if (IS_ERR(net->ipv6.ip6table_mangle)) 101 if (IS_ERR(net->ipv6.ip6table_mangle))
133 return PTR_ERR(net->ipv6.ip6table_mangle); 102 return PTR_ERR(net->ipv6.ip6table_mangle);
134 return 0; 103 return 0;
135 } 104 }
136 105
137 static void __net_exit ip6table_mangle_net_exit(struct net *net) 106 static void __net_exit ip6table_mangle_net_exit(struct net *net)
138 { 107 {
139 ip6t_unregister_table(net, net->ipv6.ip6table_mangle); 108 ip6t_unregister_table(net, net->ipv6.ip6table_mangle);
140 } 109 }
141 110
142 static struct pernet_operations ip6table_mangle_net_ops = { 111 static struct pernet_operations ip6table_mangle_net_ops = {
143 .init = ip6table_mangle_net_init, 112 .init = ip6table_mangle_net_init,
144 .exit = ip6table_mangle_net_exit, 113 .exit = ip6table_mangle_net_exit,
145 }; 114 };
146 115
147 static int __init ip6table_mangle_init(void) 116 static int __init ip6table_mangle_init(void)
148 { 117 {
149 int ret; 118 int ret;
150 119
151 ret = register_pernet_subsys(&ip6table_mangle_net_ops); 120 ret = register_pernet_subsys(&ip6table_mangle_net_ops);
152 if (ret < 0) 121 if (ret < 0)
153 return ret; 122 return ret;
154 123
155 /* Register hooks */ 124 /* Register hooks */
156 mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook); 125 mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook);
157 if (IS_ERR(mangle_ops)) { 126 if (IS_ERR(mangle_ops)) {
158 ret = PTR_ERR(mangle_ops); 127 ret = PTR_ERR(mangle_ops);
159 goto cleanup_table; 128 goto cleanup_table;
160 } 129 }
161 130
162 return ret; 131 return ret;
163 132
164 cleanup_table: 133 cleanup_table:
165 unregister_pernet_subsys(&ip6table_mangle_net_ops); 134 unregister_pernet_subsys(&ip6table_mangle_net_ops);
166 return ret; 135 return ret;
167 } 136 }
168 137
169 static void __exit ip6table_mangle_fini(void) 138 static void __exit ip6table_mangle_fini(void)
170 { 139 {
171 xt_hook_unlink(&packet_mangler, mangle_ops); 140 xt_hook_unlink(&packet_mangler, mangle_ops);
172 unregister_pernet_subsys(&ip6table_mangle_net_ops); 141 unregister_pernet_subsys(&ip6table_mangle_net_ops);
net/ipv6/netfilter/ip6table_raw.c
1 /* 1 /*
2 * IPv6 raw table, a port of the IPv4 raw table to IPv6 2 * IPv6 raw table, a port of the IPv4 raw table to IPv6
3 * 3 *
4 * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> 4 * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5 */ 5 */
6 #include <linux/module.h> 6 #include <linux/module.h>
7 #include <linux/netfilter_ipv6/ip6_tables.h> 7 #include <linux/netfilter_ipv6/ip6_tables.h>
8 8
9 #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 9 #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
10 10
11 static const struct
12 {
13 struct ip6t_replace repl;
14 struct ip6t_standard entries[2];
15 struct ip6t_error term;
16 } initial_table __net_initdata = {
17 .repl = {
18 .name = "raw",
19 .valid_hooks = RAW_VALID_HOOKS,
20 .num_entries = 3,
21 .size = sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error),
22 .hook_entry = {
23 [NF_INET_PRE_ROUTING] = 0,
24 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
25 },
26 .underflow = {
27 [NF_INET_PRE_ROUTING] = 0,
28 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
29 },
30 },
31 .entries = {
32 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
33 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
34 },
35 .term = IP6T_ERROR_INIT, /* ERROR */
36 };
37
38 static const struct xt_table packet_raw = { 11 static const struct xt_table packet_raw = {
39 .name = "raw", 12 .name = "raw",
40 .valid_hooks = RAW_VALID_HOOKS, 13 .valid_hooks = RAW_VALID_HOOKS,
41 .me = THIS_MODULE, 14 .me = THIS_MODULE,
42 .af = NFPROTO_IPV6, 15 .af = NFPROTO_IPV6,
43 .priority = NF_IP6_PRI_FIRST, 16 .priority = NF_IP6_PRI_FIRST,
44 }; 17 };
45 18
46 /* The work comes in here from netfilter.c. */ 19 /* The work comes in here from netfilter.c. */
47 static unsigned int 20 static unsigned int
48 ip6table_raw_hook(unsigned int hook, struct sk_buff *skb, 21 ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
49 const struct net_device *in, const struct net_device *out, 22 const struct net_device *in, const struct net_device *out,
50 int (*okfn)(struct sk_buff *)) 23 int (*okfn)(struct sk_buff *))
51 { 24 {
52 const struct net *net = dev_net((in != NULL) ? in : out); 25 const struct net *net = dev_net((in != NULL) ? in : out);
53 26
54 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw); 27 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
55 } 28 }
56 29
57 static struct nf_hook_ops *rawtable_ops __read_mostly; 30 static struct nf_hook_ops *rawtable_ops __read_mostly;
58 31
59 static int __net_init ip6table_raw_net_init(struct net *net) 32 static int __net_init ip6table_raw_net_init(struct net *net)
60 { 33 {
61 /* Register table */ 34 struct ip6t_replace *repl;
35
36 repl = ip6t_alloc_initial_table(&packet_raw);
37 if (repl == NULL)
38 return -ENOMEM;
62 net->ipv6.ip6table_raw = 39 net->ipv6.ip6table_raw =
63 ip6t_register_table(net, &packet_raw, &initial_table.repl); 40 ip6t_register_table(net, &packet_raw, repl);
41 kfree(repl);
64 if (IS_ERR(net->ipv6.ip6table_raw)) 42 if (IS_ERR(net->ipv6.ip6table_raw))
65 return PTR_ERR(net->ipv6.ip6table_raw); 43 return PTR_ERR(net->ipv6.ip6table_raw);
66 return 0; 44 return 0;
67 } 45 }
68 46
69 static void __net_exit ip6table_raw_net_exit(struct net *net) 47 static void __net_exit ip6table_raw_net_exit(struct net *net)
70 { 48 {
71 ip6t_unregister_table(net, net->ipv6.ip6table_raw); 49 ip6t_unregister_table(net, net->ipv6.ip6table_raw);
72 } 50 }
73 51
74 static struct pernet_operations ip6table_raw_net_ops = { 52 static struct pernet_operations ip6table_raw_net_ops = {
75 .init = ip6table_raw_net_init, 53 .init = ip6table_raw_net_init,
76 .exit = ip6table_raw_net_exit, 54 .exit = ip6table_raw_net_exit,
77 }; 55 };
78 56
79 static int __init ip6table_raw_init(void) 57 static int __init ip6table_raw_init(void)
80 { 58 {
81 int ret; 59 int ret;
82 60
83 ret = register_pernet_subsys(&ip6table_raw_net_ops); 61 ret = register_pernet_subsys(&ip6table_raw_net_ops);
84 if (ret < 0) 62 if (ret < 0)
85 return ret; 63 return ret;
86 64
87 /* Register hooks */ 65 /* Register hooks */
88 rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook); 66 rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook);
89 if (IS_ERR(rawtable_ops)) { 67 if (IS_ERR(rawtable_ops)) {
90 ret = PTR_ERR(rawtable_ops); 68 ret = PTR_ERR(rawtable_ops);
91 goto cleanup_table; 69 goto cleanup_table;
92 } 70 }
93 71
94 return ret; 72 return ret;
95 73
96 cleanup_table: 74 cleanup_table:
97 unregister_pernet_subsys(&ip6table_raw_net_ops); 75 unregister_pernet_subsys(&ip6table_raw_net_ops);
98 return ret; 76 return ret;
99 } 77 }
100 78
101 static void __exit ip6table_raw_fini(void) 79 static void __exit ip6table_raw_fini(void)
102 { 80 {
103 xt_hook_unlink(&packet_raw, rawtable_ops); 81 xt_hook_unlink(&packet_raw, rawtable_ops);
104 unregister_pernet_subsys(&ip6table_raw_net_ops); 82 unregister_pernet_subsys(&ip6table_raw_net_ops);
105 } 83 }
net/ipv6/netfilter/ip6table_security.c
1 /* 1 /*
2 * "security" table for IPv6 2 * "security" table for IPv6
3 * 3 *
4 * This is for use by Mandatory Access Control (MAC) security models, 4 * This is for use by Mandatory Access Control (MAC) security models,
5 * which need to be able to manage security policy in separate context 5 * which need to be able to manage security policy in separate context
6 * to DAC. 6 * to DAC.
7 * 7 *
8 * Based on iptable_mangle.c 8 * Based on iptable_mangle.c
9 * 9 *
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org> 11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com> 12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as 15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation. 16 * published by the Free Software Foundation.
17 */ 17 */
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/netfilter_ipv6/ip6_tables.h> 19 #include <linux/netfilter_ipv6/ip6_tables.h>
20 20
21 MODULE_LICENSE("GPL"); 21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); 22 MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
23 MODULE_DESCRIPTION("ip6tables security table, for MAC rules"); 23 MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
24 24
25 #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \ 25 #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
26 (1 << NF_INET_FORWARD) | \ 26 (1 << NF_INET_FORWARD) | \
27 (1 << NF_INET_LOCAL_OUT) 27 (1 << NF_INET_LOCAL_OUT)
28 28
29 static const struct
30 {
31 struct ip6t_replace repl;
32 struct ip6t_standard entries[3];
33 struct ip6t_error term;
34 } initial_table __net_initdata = {
35 .repl = {
36 .name = "security",
37 .valid_hooks = SECURITY_VALID_HOOKS,
38 .num_entries = 4,
39 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
40 .hook_entry = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
44 },
45 .underflow = {
46 [NF_INET_LOCAL_IN] = 0,
47 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
48 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
49 },
50 },
51 .entries = {
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 },
56 .term = IP6T_ERROR_INIT, /* ERROR */
57 };
58
59 static const struct xt_table security_table = { 29 static const struct xt_table security_table = {
60 .name = "security", 30 .name = "security",
61 .valid_hooks = SECURITY_VALID_HOOKS, 31 .valid_hooks = SECURITY_VALID_HOOKS,
62 .me = THIS_MODULE, 32 .me = THIS_MODULE,
63 .af = NFPROTO_IPV6, 33 .af = NFPROTO_IPV6,
64 .priority = NF_IP6_PRI_SECURITY, 34 .priority = NF_IP6_PRI_SECURITY,
65 }; 35 };
66 36
67 static unsigned int 37 static unsigned int
68 ip6table_security_hook(unsigned int hook, struct sk_buff *skb, 38 ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
69 const struct net_device *in, 39 const struct net_device *in,
70 const struct net_device *out, 40 const struct net_device *out,
71 int (*okfn)(struct sk_buff *)) 41 int (*okfn)(struct sk_buff *))
72 { 42 {
73 const struct net *net = dev_net((in != NULL) ? in : out); 43 const struct net *net = dev_net((in != NULL) ? in : out);
74 44
75 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security); 45 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
76 } 46 }
77 47
78 static struct nf_hook_ops *sectbl_ops __read_mostly; 48 static struct nf_hook_ops *sectbl_ops __read_mostly;
79 49
80 static int __net_init ip6table_security_net_init(struct net *net) 50 static int __net_init ip6table_security_net_init(struct net *net)
81 { 51 {
82 net->ipv6.ip6table_security = 52 struct ip6t_replace *repl;
83 ip6t_register_table(net, &security_table, &initial_table.repl);
84 53
54 repl = ip6t_alloc_initial_table(&security_table);
55 if (repl == NULL)
56 return -ENOMEM;
57 net->ipv6.ip6table_security =
58 ip6t_register_table(net, &security_table, repl);
59 kfree(repl);
85 if (IS_ERR(net->ipv6.ip6table_security)) 60 if (IS_ERR(net->ipv6.ip6table_security))
86 return PTR_ERR(net->ipv6.ip6table_security); 61 return PTR_ERR(net->ipv6.ip6table_security);
87 62
88 return 0; 63 return 0;
89 } 64 }
90 65
91 static void __net_exit ip6table_security_net_exit(struct net *net) 66 static void __net_exit ip6table_security_net_exit(struct net *net)
92 { 67 {
93 ip6t_unregister_table(net, net->ipv6.ip6table_security); 68 ip6t_unregister_table(net, net->ipv6.ip6table_security);
94 } 69 }
95 70
96 static struct pernet_operations ip6table_security_net_ops = { 71 static struct pernet_operations ip6table_security_net_ops = {
97 .init = ip6table_security_net_init, 72 .init = ip6table_security_net_init,
98 .exit = ip6table_security_net_exit, 73 .exit = ip6table_security_net_exit,
99 }; 74 };
100 75
101 static int __init ip6table_security_init(void) 76 static int __init ip6table_security_init(void)
102 { 77 {
103 int ret; 78 int ret;
104 79
105 ret = register_pernet_subsys(&ip6table_security_net_ops); 80 ret = register_pernet_subsys(&ip6table_security_net_ops);
106 if (ret < 0) 81 if (ret < 0)
107 return ret; 82 return ret;
108 83
109 sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook); 84 sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook);
110 if (IS_ERR(sectbl_ops)) { 85 if (IS_ERR(sectbl_ops)) {
111 ret = PTR_ERR(sectbl_ops); 86 ret = PTR_ERR(sectbl_ops);
112 goto cleanup_table; 87 goto cleanup_table;
113 } 88 }
114 89
115 return ret; 90 return ret;
116 91
117 cleanup_table: 92 cleanup_table:
118 unregister_pernet_subsys(&ip6table_security_net_ops); 93 unregister_pernet_subsys(&ip6table_security_net_ops);
119 return ret; 94 return ret;
120 } 95 }
121 96
122 static void __exit ip6table_security_fini(void) 97 static void __exit ip6table_security_fini(void)
123 { 98 {
124 xt_hook_unlink(&security_table, sectbl_ops); 99 xt_hook_unlink(&security_table, sectbl_ops);
net/netfilter/x_tables.c
1 /* 1 /*
2 * x_tables core - Backend for {ip,ip6,arp}_tables 2 * x_tables core - Backend for {ip,ip6,arp}_tables
3 * 3 *
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> 4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * 5 *
6 * Based on existing ip_tables code which is 6 * Based on existing ip_tables code which is
7 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 7 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> 8 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 * 13 *
14 */ 14 */
15 15
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/socket.h> 17 #include <linux/socket.h>
18 #include <linux/net.h> 18 #include <linux/net.h>
19 #include <linux/proc_fs.h> 19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h> 20 #include <linux/seq_file.h>
21 #include <linux/string.h> 21 #include <linux/string.h>
22 #include <linux/vmalloc.h> 22 #include <linux/vmalloc.h>
23 #include <linux/mutex.h> 23 #include <linux/mutex.h>
24 #include <linux/mm.h> 24 #include <linux/mm.h>
25 #include <net/net_namespace.h> 25 #include <net/net_namespace.h>
26 26
27 #include <linux/netfilter/x_tables.h> 27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_arp.h> 28 #include <linux/netfilter_arp.h>
29 29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter_arp/arp_tables.h>
30 32
31 MODULE_LICENSE("GPL"); 33 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 34 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
33 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); 35 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
34 36
35 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) 37 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
36 38
37 struct compat_delta { 39 struct compat_delta {
38 struct compat_delta *next; 40 struct compat_delta *next;
39 unsigned int offset; 41 unsigned int offset;
40 short delta; 42 short delta;
41 }; 43 };
42 44
43 struct xt_af { 45 struct xt_af {
44 struct mutex mutex; 46 struct mutex mutex;
45 struct list_head match; 47 struct list_head match;
46 struct list_head target; 48 struct list_head target;
47 #ifdef CONFIG_COMPAT 49 #ifdef CONFIG_COMPAT
48 struct mutex compat_mutex; 50 struct mutex compat_mutex;
49 struct compat_delta *compat_offsets; 51 struct compat_delta *compat_offsets;
50 #endif 52 #endif
51 }; 53 };
52 54
53 static struct xt_af *xt; 55 static struct xt_af *xt;
54 56
55 #ifdef DEBUG_IP_FIREWALL_USER 57 #ifdef DEBUG_IP_FIREWALL_USER
56 #define duprintf(format, args...) printk(format , ## args) 58 #define duprintf(format, args...) printk(format , ## args)
57 #else 59 #else
58 #define duprintf(format, args...) 60 #define duprintf(format, args...)
59 #endif 61 #endif
60 62
61 static const char *const xt_prefix[NFPROTO_NUMPROTO] = { 63 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
62 [NFPROTO_UNSPEC] = "x", 64 [NFPROTO_UNSPEC] = "x",
63 [NFPROTO_IPV4] = "ip", 65 [NFPROTO_IPV4] = "ip",
64 [NFPROTO_ARP] = "arp", 66 [NFPROTO_ARP] = "arp",
65 [NFPROTO_BRIDGE] = "eb", 67 [NFPROTO_BRIDGE] = "eb",
66 [NFPROTO_IPV6] = "ip6", 68 [NFPROTO_IPV6] = "ip6",
67 }; 69 };
68 70
69 /* Registration hooks for targets. */ 71 /* Registration hooks for targets. */
70 int 72 int
71 xt_register_target(struct xt_target *target) 73 xt_register_target(struct xt_target *target)
72 { 74 {
73 u_int8_t af = target->family; 75 u_int8_t af = target->family;
74 int ret; 76 int ret;
75 77
76 ret = mutex_lock_interruptible(&xt[af].mutex); 78 ret = mutex_lock_interruptible(&xt[af].mutex);
77 if (ret != 0) 79 if (ret != 0)
78 return ret; 80 return ret;
79 list_add(&target->list, &xt[af].target); 81 list_add(&target->list, &xt[af].target);
80 mutex_unlock(&xt[af].mutex); 82 mutex_unlock(&xt[af].mutex);
81 return ret; 83 return ret;
82 } 84 }
83 EXPORT_SYMBOL(xt_register_target); 85 EXPORT_SYMBOL(xt_register_target);
84 86
85 void 87 void
86 xt_unregister_target(struct xt_target *target) 88 xt_unregister_target(struct xt_target *target)
87 { 89 {
88 u_int8_t af = target->family; 90 u_int8_t af = target->family;
89 91
90 mutex_lock(&xt[af].mutex); 92 mutex_lock(&xt[af].mutex);
91 list_del(&target->list); 93 list_del(&target->list);
92 mutex_unlock(&xt[af].mutex); 94 mutex_unlock(&xt[af].mutex);
93 } 95 }
94 EXPORT_SYMBOL(xt_unregister_target); 96 EXPORT_SYMBOL(xt_unregister_target);
95 97
96 int 98 int
97 xt_register_targets(struct xt_target *target, unsigned int n) 99 xt_register_targets(struct xt_target *target, unsigned int n)
98 { 100 {
99 unsigned int i; 101 unsigned int i;
100 int err = 0; 102 int err = 0;
101 103
102 for (i = 0; i < n; i++) { 104 for (i = 0; i < n; i++) {
103 err = xt_register_target(&target[i]); 105 err = xt_register_target(&target[i]);
104 if (err) 106 if (err)
105 goto err; 107 goto err;
106 } 108 }
107 return err; 109 return err;
108 110
109 err: 111 err:
110 if (i > 0) 112 if (i > 0)
111 xt_unregister_targets(target, i); 113 xt_unregister_targets(target, i);
112 return err; 114 return err;
113 } 115 }
114 EXPORT_SYMBOL(xt_register_targets); 116 EXPORT_SYMBOL(xt_register_targets);
115 117
116 void 118 void
117 xt_unregister_targets(struct xt_target *target, unsigned int n) 119 xt_unregister_targets(struct xt_target *target, unsigned int n)
118 { 120 {
119 unsigned int i; 121 unsigned int i;
120 122
121 for (i = 0; i < n; i++) 123 for (i = 0; i < n; i++)
122 xt_unregister_target(&target[i]); 124 xt_unregister_target(&target[i]);
123 } 125 }
124 EXPORT_SYMBOL(xt_unregister_targets); 126 EXPORT_SYMBOL(xt_unregister_targets);
125 127
126 int 128 int
127 xt_register_match(struct xt_match *match) 129 xt_register_match(struct xt_match *match)
128 { 130 {
129 u_int8_t af = match->family; 131 u_int8_t af = match->family;
130 int ret; 132 int ret;
131 133
132 ret = mutex_lock_interruptible(&xt[af].mutex); 134 ret = mutex_lock_interruptible(&xt[af].mutex);
133 if (ret != 0) 135 if (ret != 0)
134 return ret; 136 return ret;
135 137
136 list_add(&match->list, &xt[af].match); 138 list_add(&match->list, &xt[af].match);
137 mutex_unlock(&xt[af].mutex); 139 mutex_unlock(&xt[af].mutex);
138 140
139 return ret; 141 return ret;
140 } 142 }
141 EXPORT_SYMBOL(xt_register_match); 143 EXPORT_SYMBOL(xt_register_match);
142 144
143 void 145 void
144 xt_unregister_match(struct xt_match *match) 146 xt_unregister_match(struct xt_match *match)
145 { 147 {
146 u_int8_t af = match->family; 148 u_int8_t af = match->family;
147 149
148 mutex_lock(&xt[af].mutex); 150 mutex_lock(&xt[af].mutex);
149 list_del(&match->list); 151 list_del(&match->list);
150 mutex_unlock(&xt[af].mutex); 152 mutex_unlock(&xt[af].mutex);
151 } 153 }
152 EXPORT_SYMBOL(xt_unregister_match); 154 EXPORT_SYMBOL(xt_unregister_match);
153 155
154 int 156 int
155 xt_register_matches(struct xt_match *match, unsigned int n) 157 xt_register_matches(struct xt_match *match, unsigned int n)
156 { 158 {
157 unsigned int i; 159 unsigned int i;
158 int err = 0; 160 int err = 0;
159 161
160 for (i = 0; i < n; i++) { 162 for (i = 0; i < n; i++) {
161 err = xt_register_match(&match[i]); 163 err = xt_register_match(&match[i]);
162 if (err) 164 if (err)
163 goto err; 165 goto err;
164 } 166 }
165 return err; 167 return err;
166 168
167 err: 169 err:
168 if (i > 0) 170 if (i > 0)
169 xt_unregister_matches(match, i); 171 xt_unregister_matches(match, i);
170 return err; 172 return err;
171 } 173 }
172 EXPORT_SYMBOL(xt_register_matches); 174 EXPORT_SYMBOL(xt_register_matches);
173 175
174 void 176 void
175 xt_unregister_matches(struct xt_match *match, unsigned int n) 177 xt_unregister_matches(struct xt_match *match, unsigned int n)
176 { 178 {
177 unsigned int i; 179 unsigned int i;
178 180
179 for (i = 0; i < n; i++) 181 for (i = 0; i < n; i++)
180 xt_unregister_match(&match[i]); 182 xt_unregister_match(&match[i]);
181 } 183 }
182 EXPORT_SYMBOL(xt_unregister_matches); 184 EXPORT_SYMBOL(xt_unregister_matches);
183 185
184 186
185 /* 187 /*
186 * These are weird, but module loading must not be done with mutex 188 * These are weird, but module loading must not be done with mutex
187 * held (since they will register), and we have to have a single 189 * held (since they will register), and we have to have a single
188 * function to use try_then_request_module(). 190 * function to use try_then_request_module().
189 */ 191 */
190 192
191 /* Find match, grabs ref. Returns ERR_PTR() on error. */ 193 /* Find match, grabs ref. Returns ERR_PTR() on error. */
192 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) 194 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
193 { 195 {
194 struct xt_match *m; 196 struct xt_match *m;
195 int err = 0; 197 int err = 0;
196 198
197 if (mutex_lock_interruptible(&xt[af].mutex) != 0) 199 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
198 return ERR_PTR(-EINTR); 200 return ERR_PTR(-EINTR);
199 201
200 list_for_each_entry(m, &xt[af].match, list) { 202 list_for_each_entry(m, &xt[af].match, list) {
201 if (strcmp(m->name, name) == 0) { 203 if (strcmp(m->name, name) == 0) {
202 if (m->revision == revision) { 204 if (m->revision == revision) {
203 if (try_module_get(m->me)) { 205 if (try_module_get(m->me)) {
204 mutex_unlock(&xt[af].mutex); 206 mutex_unlock(&xt[af].mutex);
205 return m; 207 return m;
206 } 208 }
207 } else 209 } else
208 err = -EPROTOTYPE; /* Found something. */ 210 err = -EPROTOTYPE; /* Found something. */
209 } 211 }
210 } 212 }
211 mutex_unlock(&xt[af].mutex); 213 mutex_unlock(&xt[af].mutex);
212 214
213 if (af != NFPROTO_UNSPEC) 215 if (af != NFPROTO_UNSPEC)
214 /* Try searching again in the family-independent list */ 216 /* Try searching again in the family-independent list */
215 return xt_find_match(NFPROTO_UNSPEC, name, revision); 217 return xt_find_match(NFPROTO_UNSPEC, name, revision);
216 218
217 return ERR_PTR(err); 219 return ERR_PTR(err);
218 } 220 }
219 EXPORT_SYMBOL(xt_find_match); 221 EXPORT_SYMBOL(xt_find_match);
220 222
221 /* Find target, grabs ref. Returns ERR_PTR() on error. */ 223 /* Find target, grabs ref. Returns ERR_PTR() on error. */
222 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) 224 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
223 { 225 {
224 struct xt_target *t; 226 struct xt_target *t;
225 int err = 0; 227 int err = 0;
226 228
227 if (mutex_lock_interruptible(&xt[af].mutex) != 0) 229 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
228 return ERR_PTR(-EINTR); 230 return ERR_PTR(-EINTR);
229 231
230 list_for_each_entry(t, &xt[af].target, list) { 232 list_for_each_entry(t, &xt[af].target, list) {
231 if (strcmp(t->name, name) == 0) { 233 if (strcmp(t->name, name) == 0) {
232 if (t->revision == revision) { 234 if (t->revision == revision) {
233 if (try_module_get(t->me)) { 235 if (try_module_get(t->me)) {
234 mutex_unlock(&xt[af].mutex); 236 mutex_unlock(&xt[af].mutex);
235 return t; 237 return t;
236 } 238 }
237 } else 239 } else
238 err = -EPROTOTYPE; /* Found something. */ 240 err = -EPROTOTYPE; /* Found something. */
239 } 241 }
240 } 242 }
241 mutex_unlock(&xt[af].mutex); 243 mutex_unlock(&xt[af].mutex);
242 244
243 if (af != NFPROTO_UNSPEC) 245 if (af != NFPROTO_UNSPEC)
244 /* Try searching again in the family-independent list */ 246 /* Try searching again in the family-independent list */
245 return xt_find_target(NFPROTO_UNSPEC, name, revision); 247 return xt_find_target(NFPROTO_UNSPEC, name, revision);
246 248
247 return ERR_PTR(err); 249 return ERR_PTR(err);
248 } 250 }
249 EXPORT_SYMBOL(xt_find_target); 251 EXPORT_SYMBOL(xt_find_target);
250 252
251 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) 253 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
252 { 254 {
253 struct xt_target *target; 255 struct xt_target *target;
254 256
255 target = try_then_request_module(xt_find_target(af, name, revision), 257 target = try_then_request_module(xt_find_target(af, name, revision),
256 "%st_%s", xt_prefix[af], name); 258 "%st_%s", xt_prefix[af], name);
257 if (IS_ERR(target) || !target) 259 if (IS_ERR(target) || !target)
258 return NULL; 260 return NULL;
259 return target; 261 return target;
260 } 262 }
261 EXPORT_SYMBOL_GPL(xt_request_find_target); 263 EXPORT_SYMBOL_GPL(xt_request_find_target);
262 264
263 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) 265 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
264 { 266 {
265 const struct xt_match *m; 267 const struct xt_match *m;
266 int have_rev = 0; 268 int have_rev = 0;
267 269
268 list_for_each_entry(m, &xt[af].match, list) { 270 list_for_each_entry(m, &xt[af].match, list) {
269 if (strcmp(m->name, name) == 0) { 271 if (strcmp(m->name, name) == 0) {
270 if (m->revision > *bestp) 272 if (m->revision > *bestp)
271 *bestp = m->revision; 273 *bestp = m->revision;
272 if (m->revision == revision) 274 if (m->revision == revision)
273 have_rev = 1; 275 have_rev = 1;
274 } 276 }
275 } 277 }
276 278
277 if (af != NFPROTO_UNSPEC && !have_rev) 279 if (af != NFPROTO_UNSPEC && !have_rev)
278 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); 280 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
279 281
280 return have_rev; 282 return have_rev;
281 } 283 }
282 284
283 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) 285 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
284 { 286 {
285 const struct xt_target *t; 287 const struct xt_target *t;
286 int have_rev = 0; 288 int have_rev = 0;
287 289
288 list_for_each_entry(t, &xt[af].target, list) { 290 list_for_each_entry(t, &xt[af].target, list) {
289 if (strcmp(t->name, name) == 0) { 291 if (strcmp(t->name, name) == 0) {
290 if (t->revision > *bestp) 292 if (t->revision > *bestp)
291 *bestp = t->revision; 293 *bestp = t->revision;
292 if (t->revision == revision) 294 if (t->revision == revision)
293 have_rev = 1; 295 have_rev = 1;
294 } 296 }
295 } 297 }
296 298
297 if (af != NFPROTO_UNSPEC && !have_rev) 299 if (af != NFPROTO_UNSPEC && !have_rev)
298 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); 300 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
299 301
300 return have_rev; 302 return have_rev;
301 } 303 }
302 304
303 /* Returns true or false (if no such extension at all) */ 305 /* Returns true or false (if no such extension at all) */
304 int xt_find_revision(u8 af, const char *name, u8 revision, int target, 306 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
305 int *err) 307 int *err)
306 { 308 {
307 int have_rev, best = -1; 309 int have_rev, best = -1;
308 310
309 if (mutex_lock_interruptible(&xt[af].mutex) != 0) { 311 if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
310 *err = -EINTR; 312 *err = -EINTR;
311 return 1; 313 return 1;
312 } 314 }
313 if (target == 1) 315 if (target == 1)
314 have_rev = target_revfn(af, name, revision, &best); 316 have_rev = target_revfn(af, name, revision, &best);
315 else 317 else
316 have_rev = match_revfn(af, name, revision, &best); 318 have_rev = match_revfn(af, name, revision, &best);
317 mutex_unlock(&xt[af].mutex); 319 mutex_unlock(&xt[af].mutex);
318 320
319 /* Nothing at all? Return 0 to try loading module. */ 321 /* Nothing at all? Return 0 to try loading module. */
320 if (best == -1) { 322 if (best == -1) {
321 *err = -ENOENT; 323 *err = -ENOENT;
322 return 0; 324 return 0;
323 } 325 }
324 326
325 *err = best; 327 *err = best;
326 if (!have_rev) 328 if (!have_rev)
327 *err = -EPROTONOSUPPORT; 329 *err = -EPROTONOSUPPORT;
328 return 1; 330 return 1;
329 } 331 }
330 EXPORT_SYMBOL_GPL(xt_find_revision); 332 EXPORT_SYMBOL_GPL(xt_find_revision);
331 333
332 static char *textify_hooks(char *buf, size_t size, unsigned int mask) 334 static char *textify_hooks(char *buf, size_t size, unsigned int mask)
333 { 335 {
334 static const char *const names[] = { 336 static const char *const names[] = {
335 "PREROUTING", "INPUT", "FORWARD", 337 "PREROUTING", "INPUT", "FORWARD",
336 "OUTPUT", "POSTROUTING", "BROUTING", 338 "OUTPUT", "POSTROUTING", "BROUTING",
337 }; 339 };
338 unsigned int i; 340 unsigned int i;
339 char *p = buf; 341 char *p = buf;
340 bool np = false; 342 bool np = false;
341 int res; 343 int res;
342 344
343 *p = '\0'; 345 *p = '\0';
344 for (i = 0; i < ARRAY_SIZE(names); ++i) { 346 for (i = 0; i < ARRAY_SIZE(names); ++i) {
345 if (!(mask & (1 << i))) 347 if (!(mask & (1 << i)))
346 continue; 348 continue;
347 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); 349 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
348 if (res > 0) { 350 if (res > 0) {
349 size -= res; 351 size -= res;
350 p += res; 352 p += res;
351 } 353 }
352 np = true; 354 np = true;
353 } 355 }
354 356
355 return buf; 357 return buf;
356 } 358 }
357 359
358 int xt_check_match(struct xt_mtchk_param *par, 360 int xt_check_match(struct xt_mtchk_param *par,
359 unsigned int size, u_int8_t proto, bool inv_proto) 361 unsigned int size, u_int8_t proto, bool inv_proto)
360 { 362 {
361 if (XT_ALIGN(par->match->matchsize) != size && 363 if (XT_ALIGN(par->match->matchsize) != size &&
362 par->match->matchsize != -1) { 364 par->match->matchsize != -1) {
363 /* 365 /*
364 * ebt_among is exempt from centralized matchsize checking 366 * ebt_among is exempt from centralized matchsize checking
365 * because it uses a dynamic-size data set. 367 * because it uses a dynamic-size data set.
366 */ 368 */
367 pr_err("%s_tables: %s match: invalid size %Zu != %u\n", 369 pr_err("%s_tables: %s match: invalid size %Zu != %u\n",
368 xt_prefix[par->family], par->match->name, 370 xt_prefix[par->family], par->match->name,
369 XT_ALIGN(par->match->matchsize), size); 371 XT_ALIGN(par->match->matchsize), size);
370 return -EINVAL; 372 return -EINVAL;
371 } 373 }
372 if (par->match->table != NULL && 374 if (par->match->table != NULL &&
373 strcmp(par->match->table, par->table) != 0) { 375 strcmp(par->match->table, par->table) != 0) {
374 pr_err("%s_tables: %s match: only valid in %s table, not %s\n", 376 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
375 xt_prefix[par->family], par->match->name, 377 xt_prefix[par->family], par->match->name,
376 par->match->table, par->table); 378 par->match->table, par->table);
377 return -EINVAL; 379 return -EINVAL;
378 } 380 }
379 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { 381 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
380 char used[64], allow[64]; 382 char used[64], allow[64];
381 383
382 pr_err("%s_tables: %s match: used from hooks %s, but only " 384 pr_err("%s_tables: %s match: used from hooks %s, but only "
383 "valid from %s\n", 385 "valid from %s\n",
384 xt_prefix[par->family], par->match->name, 386 xt_prefix[par->family], par->match->name,
385 textify_hooks(used, sizeof(used), par->hook_mask), 387 textify_hooks(used, sizeof(used), par->hook_mask),
386 textify_hooks(allow, sizeof(allow), par->match->hooks)); 388 textify_hooks(allow, sizeof(allow), par->match->hooks));
387 return -EINVAL; 389 return -EINVAL;
388 } 390 }
389 if (par->match->proto && (par->match->proto != proto || inv_proto)) { 391 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
390 pr_err("%s_tables: %s match: only valid for protocol %u\n", 392 pr_err("%s_tables: %s match: only valid for protocol %u\n",
391 xt_prefix[par->family], par->match->name, 393 xt_prefix[par->family], par->match->name,
392 par->match->proto); 394 par->match->proto);
393 return -EINVAL; 395 return -EINVAL;
394 } 396 }
395 if (par->match->checkentry != NULL && !par->match->checkentry(par)) 397 if (par->match->checkentry != NULL && !par->match->checkentry(par))
396 return -EINVAL; 398 return -EINVAL;
397 return 0; 399 return 0;
398 } 400 }
399 EXPORT_SYMBOL_GPL(xt_check_match); 401 EXPORT_SYMBOL_GPL(xt_check_match);
400 402
401 #ifdef CONFIG_COMPAT 403 #ifdef CONFIG_COMPAT
402 int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta) 404 int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
403 { 405 {
404 struct compat_delta *tmp; 406 struct compat_delta *tmp;
405 407
406 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL); 408 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
407 if (!tmp) 409 if (!tmp)
408 return -ENOMEM; 410 return -ENOMEM;
409 411
410 tmp->offset = offset; 412 tmp->offset = offset;
411 tmp->delta = delta; 413 tmp->delta = delta;
412 414
413 if (xt[af].compat_offsets) { 415 if (xt[af].compat_offsets) {
414 tmp->next = xt[af].compat_offsets->next; 416 tmp->next = xt[af].compat_offsets->next;
415 xt[af].compat_offsets->next = tmp; 417 xt[af].compat_offsets->next = tmp;
416 } else { 418 } else {
417 xt[af].compat_offsets = tmp; 419 xt[af].compat_offsets = tmp;
418 tmp->next = NULL; 420 tmp->next = NULL;
419 } 421 }
420 return 0; 422 return 0;
421 } 423 }
422 EXPORT_SYMBOL_GPL(xt_compat_add_offset); 424 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
423 425
424 void xt_compat_flush_offsets(u_int8_t af) 426 void xt_compat_flush_offsets(u_int8_t af)
425 { 427 {
426 struct compat_delta *tmp, *next; 428 struct compat_delta *tmp, *next;
427 429
428 if (xt[af].compat_offsets) { 430 if (xt[af].compat_offsets) {
429 for (tmp = xt[af].compat_offsets; tmp; tmp = next) { 431 for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
430 next = tmp->next; 432 next = tmp->next;
431 kfree(tmp); 433 kfree(tmp);
432 } 434 }
433 xt[af].compat_offsets = NULL; 435 xt[af].compat_offsets = NULL;
434 } 436 }
435 } 437 }
436 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); 438 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
437 439
438 short xt_compat_calc_jump(u_int8_t af, unsigned int offset) 440 short xt_compat_calc_jump(u_int8_t af, unsigned int offset)
439 { 441 {
440 struct compat_delta *tmp; 442 struct compat_delta *tmp;
441 short delta; 443 short delta;
442 444
443 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) 445 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
444 if (tmp->offset < offset) 446 if (tmp->offset < offset)
445 delta += tmp->delta; 447 delta += tmp->delta;
446 return delta; 448 return delta;
447 } 449 }
448 EXPORT_SYMBOL_GPL(xt_compat_calc_jump); 450 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
449 451
450 int xt_compat_match_offset(const struct xt_match *match) 452 int xt_compat_match_offset(const struct xt_match *match)
451 { 453 {
452 u_int16_t csize = match->compatsize ? : match->matchsize; 454 u_int16_t csize = match->compatsize ? : match->matchsize;
453 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); 455 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
454 } 456 }
455 EXPORT_SYMBOL_GPL(xt_compat_match_offset); 457 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
456 458
457 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, 459 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
458 unsigned int *size) 460 unsigned int *size)
459 { 461 {
460 const struct xt_match *match = m->u.kernel.match; 462 const struct xt_match *match = m->u.kernel.match;
461 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; 463 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
462 int pad, off = xt_compat_match_offset(match); 464 int pad, off = xt_compat_match_offset(match);
463 u_int16_t msize = cm->u.user.match_size; 465 u_int16_t msize = cm->u.user.match_size;
464 466
465 m = *dstptr; 467 m = *dstptr;
466 memcpy(m, cm, sizeof(*cm)); 468 memcpy(m, cm, sizeof(*cm));
467 if (match->compat_from_user) 469 if (match->compat_from_user)
468 match->compat_from_user(m->data, cm->data); 470 match->compat_from_user(m->data, cm->data);
469 else 471 else
470 memcpy(m->data, cm->data, msize - sizeof(*cm)); 472 memcpy(m->data, cm->data, msize - sizeof(*cm));
471 pad = XT_ALIGN(match->matchsize) - match->matchsize; 473 pad = XT_ALIGN(match->matchsize) - match->matchsize;
472 if (pad > 0) 474 if (pad > 0)
473 memset(m->data + match->matchsize, 0, pad); 475 memset(m->data + match->matchsize, 0, pad);
474 476
475 msize += off; 477 msize += off;
476 m->u.user.match_size = msize; 478 m->u.user.match_size = msize;
477 479
478 *size += off; 480 *size += off;
479 *dstptr += msize; 481 *dstptr += msize;
480 return 0; 482 return 0;
481 } 483 }
482 EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 484 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
483 485
484 int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, 486 int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
485 unsigned int *size) 487 unsigned int *size)
486 { 488 {
487 const struct xt_match *match = m->u.kernel.match; 489 const struct xt_match *match = m->u.kernel.match;
488 struct compat_xt_entry_match __user *cm = *dstptr; 490 struct compat_xt_entry_match __user *cm = *dstptr;
489 int off = xt_compat_match_offset(match); 491 int off = xt_compat_match_offset(match);
490 u_int16_t msize = m->u.user.match_size - off; 492 u_int16_t msize = m->u.user.match_size - off;
491 493
492 if (copy_to_user(cm, m, sizeof(*cm)) || 494 if (copy_to_user(cm, m, sizeof(*cm)) ||
493 put_user(msize, &cm->u.user.match_size) || 495 put_user(msize, &cm->u.user.match_size) ||
494 copy_to_user(cm->u.user.name, m->u.kernel.match->name, 496 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
495 strlen(m->u.kernel.match->name) + 1)) 497 strlen(m->u.kernel.match->name) + 1))
496 return -EFAULT; 498 return -EFAULT;
497 499
498 if (match->compat_to_user) { 500 if (match->compat_to_user) {
499 if (match->compat_to_user((void __user *)cm->data, m->data)) 501 if (match->compat_to_user((void __user *)cm->data, m->data))
500 return -EFAULT; 502 return -EFAULT;
501 } else { 503 } else {
502 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) 504 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
503 return -EFAULT; 505 return -EFAULT;
504 } 506 }
505 507
506 *size -= off; 508 *size -= off;
507 *dstptr += msize; 509 *dstptr += msize;
508 return 0; 510 return 0;
509 } 511 }
510 EXPORT_SYMBOL_GPL(xt_compat_match_to_user); 512 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
511 #endif /* CONFIG_COMPAT */ 513 #endif /* CONFIG_COMPAT */
512 514
513 int xt_check_target(struct xt_tgchk_param *par, 515 int xt_check_target(struct xt_tgchk_param *par,
514 unsigned int size, u_int8_t proto, bool inv_proto) 516 unsigned int size, u_int8_t proto, bool inv_proto)
515 { 517 {
516 if (XT_ALIGN(par->target->targetsize) != size) { 518 if (XT_ALIGN(par->target->targetsize) != size) {
517 pr_err("%s_tables: %s target: invalid size %Zu != %u\n", 519 pr_err("%s_tables: %s target: invalid size %Zu != %u\n",
518 xt_prefix[par->family], par->target->name, 520 xt_prefix[par->family], par->target->name,
519 XT_ALIGN(par->target->targetsize), size); 521 XT_ALIGN(par->target->targetsize), size);
520 return -EINVAL; 522 return -EINVAL;
521 } 523 }
522 if (par->target->table != NULL && 524 if (par->target->table != NULL &&
523 strcmp(par->target->table, par->table) != 0) { 525 strcmp(par->target->table, par->table) != 0) {
524 pr_err("%s_tables: %s target: only valid in %s table, not %s\n", 526 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
525 xt_prefix[par->family], par->target->name, 527 xt_prefix[par->family], par->target->name,
526 par->target->table, par->table); 528 par->target->table, par->table);
527 return -EINVAL; 529 return -EINVAL;
528 } 530 }
529 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { 531 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
530 char used[64], allow[64]; 532 char used[64], allow[64];
531 533
532 pr_err("%s_tables: %s target: used from hooks %s, but only " 534 pr_err("%s_tables: %s target: used from hooks %s, but only "
533 "usable from %s\n", 535 "usable from %s\n",
534 xt_prefix[par->family], par->target->name, 536 xt_prefix[par->family], par->target->name,
535 textify_hooks(used, sizeof(used), par->hook_mask), 537 textify_hooks(used, sizeof(used), par->hook_mask),
536 textify_hooks(allow, sizeof(allow), par->target->hooks)); 538 textify_hooks(allow, sizeof(allow), par->target->hooks));
537 return -EINVAL; 539 return -EINVAL;
538 } 540 }
539 if (par->target->proto && (par->target->proto != proto || inv_proto)) { 541 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
540 pr_err("%s_tables: %s target: only valid for protocol %u\n", 542 pr_err("%s_tables: %s target: only valid for protocol %u\n",
541 xt_prefix[par->family], par->target->name, 543 xt_prefix[par->family], par->target->name,
542 par->target->proto); 544 par->target->proto);
543 return -EINVAL; 545 return -EINVAL;
544 } 546 }
545 if (par->target->checkentry != NULL && !par->target->checkentry(par)) 547 if (par->target->checkentry != NULL && !par->target->checkentry(par))
546 return -EINVAL; 548 return -EINVAL;
547 return 0; 549 return 0;
548 } 550 }
549 EXPORT_SYMBOL_GPL(xt_check_target); 551 EXPORT_SYMBOL_GPL(xt_check_target);
550 552
551 #ifdef CONFIG_COMPAT 553 #ifdef CONFIG_COMPAT
552 int xt_compat_target_offset(const struct xt_target *target) 554 int xt_compat_target_offset(const struct xt_target *target)
553 { 555 {
554 u_int16_t csize = target->compatsize ? : target->targetsize; 556 u_int16_t csize = target->compatsize ? : target->targetsize;
555 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); 557 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
556 } 558 }
557 EXPORT_SYMBOL_GPL(xt_compat_target_offset); 559 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
558 560
559 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, 561 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
560 unsigned int *size) 562 unsigned int *size)
561 { 563 {
562 const struct xt_target *target = t->u.kernel.target; 564 const struct xt_target *target = t->u.kernel.target;
563 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; 565 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
564 int pad, off = xt_compat_target_offset(target); 566 int pad, off = xt_compat_target_offset(target);
565 u_int16_t tsize = ct->u.user.target_size; 567 u_int16_t tsize = ct->u.user.target_size;
566 568
567 t = *dstptr; 569 t = *dstptr;
568 memcpy(t, ct, sizeof(*ct)); 570 memcpy(t, ct, sizeof(*ct));
569 if (target->compat_from_user) 571 if (target->compat_from_user)
570 target->compat_from_user(t->data, ct->data); 572 target->compat_from_user(t->data, ct->data);
571 else 573 else
572 memcpy(t->data, ct->data, tsize - sizeof(*ct)); 574 memcpy(t->data, ct->data, tsize - sizeof(*ct));
573 pad = XT_ALIGN(target->targetsize) - target->targetsize; 575 pad = XT_ALIGN(target->targetsize) - target->targetsize;
574 if (pad > 0) 576 if (pad > 0)
575 memset(t->data + target->targetsize, 0, pad); 577 memset(t->data + target->targetsize, 0, pad);
576 578
577 tsize += off; 579 tsize += off;
578 t->u.user.target_size = tsize; 580 t->u.user.target_size = tsize;
579 581
580 *size += off; 582 *size += off;
581 *dstptr += tsize; 583 *dstptr += tsize;
582 } 584 }
583 EXPORT_SYMBOL_GPL(xt_compat_target_from_user); 585 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
584 586
585 int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr, 587 int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
586 unsigned int *size) 588 unsigned int *size)
587 { 589 {
588 const struct xt_target *target = t->u.kernel.target; 590 const struct xt_target *target = t->u.kernel.target;
589 struct compat_xt_entry_target __user *ct = *dstptr; 591 struct compat_xt_entry_target __user *ct = *dstptr;
590 int off = xt_compat_target_offset(target); 592 int off = xt_compat_target_offset(target);
591 u_int16_t tsize = t->u.user.target_size - off; 593 u_int16_t tsize = t->u.user.target_size - off;
592 594
593 if (copy_to_user(ct, t, sizeof(*ct)) || 595 if (copy_to_user(ct, t, sizeof(*ct)) ||
594 put_user(tsize, &ct->u.user.target_size) || 596 put_user(tsize, &ct->u.user.target_size) ||
595 copy_to_user(ct->u.user.name, t->u.kernel.target->name, 597 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
596 strlen(t->u.kernel.target->name) + 1)) 598 strlen(t->u.kernel.target->name) + 1))
597 return -EFAULT; 599 return -EFAULT;
598 600
599 if (target->compat_to_user) { 601 if (target->compat_to_user) {
600 if (target->compat_to_user((void __user *)ct->data, t->data)) 602 if (target->compat_to_user((void __user *)ct->data, t->data))
601 return -EFAULT; 603 return -EFAULT;
602 } else { 604 } else {
603 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) 605 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
604 return -EFAULT; 606 return -EFAULT;
605 } 607 }
606 608
607 *size -= off; 609 *size -= off;
608 *dstptr += tsize; 610 *dstptr += tsize;
609 return 0; 611 return 0;
610 } 612 }
611 EXPORT_SYMBOL_GPL(xt_compat_target_to_user); 613 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
612 #endif 614 #endif
613 615
614 struct xt_table_info *xt_alloc_table_info(unsigned int size) 616 struct xt_table_info *xt_alloc_table_info(unsigned int size)
615 { 617 {
616 struct xt_table_info *newinfo; 618 struct xt_table_info *newinfo;
617 int cpu; 619 int cpu;
618 620
619 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ 621 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
620 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) 622 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
621 return NULL; 623 return NULL;
622 624
623 newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL); 625 newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
624 if (!newinfo) 626 if (!newinfo)
625 return NULL; 627 return NULL;
626 628
627 newinfo->size = size; 629 newinfo->size = size;
628 630
629 for_each_possible_cpu(cpu) { 631 for_each_possible_cpu(cpu) {
630 if (size <= PAGE_SIZE) 632 if (size <= PAGE_SIZE)
631 newinfo->entries[cpu] = kmalloc_node(size, 633 newinfo->entries[cpu] = kmalloc_node(size,
632 GFP_KERNEL, 634 GFP_KERNEL,
633 cpu_to_node(cpu)); 635 cpu_to_node(cpu));
634 else 636 else
635 newinfo->entries[cpu] = vmalloc_node(size, 637 newinfo->entries[cpu] = vmalloc_node(size,
636 cpu_to_node(cpu)); 638 cpu_to_node(cpu));
637 639
638 if (newinfo->entries[cpu] == NULL) { 640 if (newinfo->entries[cpu] == NULL) {
639 xt_free_table_info(newinfo); 641 xt_free_table_info(newinfo);
640 return NULL; 642 return NULL;
641 } 643 }
642 } 644 }
643 645
644 return newinfo; 646 return newinfo;
645 } 647 }
646 EXPORT_SYMBOL(xt_alloc_table_info); 648 EXPORT_SYMBOL(xt_alloc_table_info);
647 649
648 void xt_free_table_info(struct xt_table_info *info) 650 void xt_free_table_info(struct xt_table_info *info)
649 { 651 {
650 int cpu; 652 int cpu;
651 653
652 for_each_possible_cpu(cpu) { 654 for_each_possible_cpu(cpu) {
653 if (info->size <= PAGE_SIZE) 655 if (info->size <= PAGE_SIZE)
654 kfree(info->entries[cpu]); 656 kfree(info->entries[cpu]);
655 else 657 else
656 vfree(info->entries[cpu]); 658 vfree(info->entries[cpu]);
657 } 659 }
658 kfree(info); 660 kfree(info);
659 } 661 }
660 EXPORT_SYMBOL(xt_free_table_info); 662 EXPORT_SYMBOL(xt_free_table_info);
661 663
662 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ 664 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
663 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 665 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
664 const char *name) 666 const char *name)
665 { 667 {
666 struct xt_table *t; 668 struct xt_table *t;
667 669
668 if (mutex_lock_interruptible(&xt[af].mutex) != 0) 670 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
669 return ERR_PTR(-EINTR); 671 return ERR_PTR(-EINTR);
670 672
671 list_for_each_entry(t, &net->xt.tables[af], list) 673 list_for_each_entry(t, &net->xt.tables[af], list)
672 if (strcmp(t->name, name) == 0 && try_module_get(t->me)) 674 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
673 return t; 675 return t;
674 mutex_unlock(&xt[af].mutex); 676 mutex_unlock(&xt[af].mutex);
675 return NULL; 677 return NULL;
676 } 678 }
677 EXPORT_SYMBOL_GPL(xt_find_table_lock); 679 EXPORT_SYMBOL_GPL(xt_find_table_lock);
678 680
679 void xt_table_unlock(struct xt_table *table) 681 void xt_table_unlock(struct xt_table *table)
680 { 682 {
681 mutex_unlock(&xt[table->af].mutex); 683 mutex_unlock(&xt[table->af].mutex);
682 } 684 }
683 EXPORT_SYMBOL_GPL(xt_table_unlock); 685 EXPORT_SYMBOL_GPL(xt_table_unlock);
684 686
685 #ifdef CONFIG_COMPAT 687 #ifdef CONFIG_COMPAT
686 void xt_compat_lock(u_int8_t af) 688 void xt_compat_lock(u_int8_t af)
687 { 689 {
688 mutex_lock(&xt[af].compat_mutex); 690 mutex_lock(&xt[af].compat_mutex);
689 } 691 }
690 EXPORT_SYMBOL_GPL(xt_compat_lock); 692 EXPORT_SYMBOL_GPL(xt_compat_lock);
691 693
692 void xt_compat_unlock(u_int8_t af) 694 void xt_compat_unlock(u_int8_t af)
693 { 695 {
694 mutex_unlock(&xt[af].compat_mutex); 696 mutex_unlock(&xt[af].compat_mutex);
695 } 697 }
696 EXPORT_SYMBOL_GPL(xt_compat_unlock); 698 EXPORT_SYMBOL_GPL(xt_compat_unlock);
697 #endif 699 #endif
698 700
699 DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); 701 DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
700 EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); 702 EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
701 703
702 704
703 struct xt_table_info * 705 struct xt_table_info *
704 xt_replace_table(struct xt_table *table, 706 xt_replace_table(struct xt_table *table,
705 unsigned int num_counters, 707 unsigned int num_counters,
706 struct xt_table_info *newinfo, 708 struct xt_table_info *newinfo,
707 int *error) 709 int *error)
708 { 710 {
709 struct xt_table_info *private; 711 struct xt_table_info *private;
710 712
711 /* Do the substitution. */ 713 /* Do the substitution. */
712 local_bh_disable(); 714 local_bh_disable();
713 private = table->private; 715 private = table->private;
714 716
715 /* Check inside lock: is the old number correct? */ 717 /* Check inside lock: is the old number correct? */
716 if (num_counters != private->number) { 718 if (num_counters != private->number) {
717 duprintf("num_counters != table->private->number (%u/%u)\n", 719 duprintf("num_counters != table->private->number (%u/%u)\n",
718 num_counters, private->number); 720 num_counters, private->number);
719 local_bh_enable(); 721 local_bh_enable();
720 *error = -EAGAIN; 722 *error = -EAGAIN;
721 return NULL; 723 return NULL;
722 } 724 }
723 725
724 table->private = newinfo; 726 table->private = newinfo;
725 newinfo->initial_entries = private->initial_entries; 727 newinfo->initial_entries = private->initial_entries;
726 728
727 /* 729 /*
728 * Even though table entries have now been swapped, other CPU's 730 * Even though table entries have now been swapped, other CPU's
729 * may still be using the old entries. This is okay, because 731 * may still be using the old entries. This is okay, because
730 * resynchronization happens because of the locking done 732 * resynchronization happens because of the locking done
731 * during the get_counters() routine. 733 * during the get_counters() routine.
732 */ 734 */
733 local_bh_enable(); 735 local_bh_enable();
734 736
735 return private; 737 return private;
736 } 738 }
737 EXPORT_SYMBOL_GPL(xt_replace_table); 739 EXPORT_SYMBOL_GPL(xt_replace_table);
738 740
739 struct xt_table *xt_register_table(struct net *net, 741 struct xt_table *xt_register_table(struct net *net,
740 const struct xt_table *input_table, 742 const struct xt_table *input_table,
741 struct xt_table_info *bootstrap, 743 struct xt_table_info *bootstrap,
742 struct xt_table_info *newinfo) 744 struct xt_table_info *newinfo)
743 { 745 {
744 int ret; 746 int ret;
745 struct xt_table_info *private; 747 struct xt_table_info *private;
746 struct xt_table *t, *table; 748 struct xt_table *t, *table;
747 749
748 /* Don't add one object to multiple lists. */ 750 /* Don't add one object to multiple lists. */
749 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); 751 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
750 if (!table) { 752 if (!table) {
751 ret = -ENOMEM; 753 ret = -ENOMEM;
752 goto out; 754 goto out;
753 } 755 }
754 756
755 ret = mutex_lock_interruptible(&xt[table->af].mutex); 757 ret = mutex_lock_interruptible(&xt[table->af].mutex);
756 if (ret != 0) 758 if (ret != 0)
757 goto out_free; 759 goto out_free;
758 760
759 /* Don't autoload: we'd eat our tail... */ 761 /* Don't autoload: we'd eat our tail... */
760 list_for_each_entry(t, &net->xt.tables[table->af], list) { 762 list_for_each_entry(t, &net->xt.tables[table->af], list) {
761 if (strcmp(t->name, table->name) == 0) { 763 if (strcmp(t->name, table->name) == 0) {
762 ret = -EEXIST; 764 ret = -EEXIST;
763 goto unlock; 765 goto unlock;
764 } 766 }
765 } 767 }
766 768
767 /* Simplifies replace_table code. */ 769 /* Simplifies replace_table code. */
768 table->private = bootstrap; 770 table->private = bootstrap;
769 771
770 if (!xt_replace_table(table, 0, newinfo, &ret)) 772 if (!xt_replace_table(table, 0, newinfo, &ret))
771 goto unlock; 773 goto unlock;
772 774
773 private = table->private; 775 private = table->private;
774 duprintf("table->private->number = %u\n", private->number); 776 duprintf("table->private->number = %u\n", private->number);
775 777
776 /* save number of initial entries */ 778 /* save number of initial entries */
777 private->initial_entries = private->number; 779 private->initial_entries = private->number;
778 780
779 list_add(&table->list, &net->xt.tables[table->af]); 781 list_add(&table->list, &net->xt.tables[table->af]);
780 mutex_unlock(&xt[table->af].mutex); 782 mutex_unlock(&xt[table->af].mutex);
781 return table; 783 return table;
782 784
783 unlock: 785 unlock:
784 mutex_unlock(&xt[table->af].mutex); 786 mutex_unlock(&xt[table->af].mutex);
785 out_free: 787 out_free:
786 kfree(table); 788 kfree(table);
787 out: 789 out:
788 return ERR_PTR(ret); 790 return ERR_PTR(ret);
789 } 791 }
790 EXPORT_SYMBOL_GPL(xt_register_table); 792 EXPORT_SYMBOL_GPL(xt_register_table);
791 793
792 void *xt_unregister_table(struct xt_table *table) 794 void *xt_unregister_table(struct xt_table *table)
793 { 795 {
794 struct xt_table_info *private; 796 struct xt_table_info *private;
795 797
796 mutex_lock(&xt[table->af].mutex); 798 mutex_lock(&xt[table->af].mutex);
797 private = table->private; 799 private = table->private;
798 list_del(&table->list); 800 list_del(&table->list);
799 mutex_unlock(&xt[table->af].mutex); 801 mutex_unlock(&xt[table->af].mutex);
800 kfree(table); 802 kfree(table);
801 803
802 return private; 804 return private;
803 } 805 }
804 EXPORT_SYMBOL_GPL(xt_unregister_table); 806 EXPORT_SYMBOL_GPL(xt_unregister_table);
805 807
806 #ifdef CONFIG_PROC_FS 808 #ifdef CONFIG_PROC_FS
807 struct xt_names_priv { 809 struct xt_names_priv {
808 struct seq_net_private p; 810 struct seq_net_private p;
809 u_int8_t af; 811 u_int8_t af;
810 }; 812 };
811 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) 813 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
812 { 814 {
813 struct xt_names_priv *priv = seq->private; 815 struct xt_names_priv *priv = seq->private;
814 struct net *net = seq_file_net(seq); 816 struct net *net = seq_file_net(seq);
815 u_int8_t af = priv->af; 817 u_int8_t af = priv->af;
816 818
817 mutex_lock(&xt[af].mutex); 819 mutex_lock(&xt[af].mutex);
818 return seq_list_start(&net->xt.tables[af], *pos); 820 return seq_list_start(&net->xt.tables[af], *pos);
819 } 821 }
820 822
821 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) 823 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
822 { 824 {
823 struct xt_names_priv *priv = seq->private; 825 struct xt_names_priv *priv = seq->private;
824 struct net *net = seq_file_net(seq); 826 struct net *net = seq_file_net(seq);
825 u_int8_t af = priv->af; 827 u_int8_t af = priv->af;
826 828
827 return seq_list_next(v, &net->xt.tables[af], pos); 829 return seq_list_next(v, &net->xt.tables[af], pos);
828 } 830 }
829 831
830 static void xt_table_seq_stop(struct seq_file *seq, void *v) 832 static void xt_table_seq_stop(struct seq_file *seq, void *v)
831 { 833 {
832 struct xt_names_priv *priv = seq->private; 834 struct xt_names_priv *priv = seq->private;
833 u_int8_t af = priv->af; 835 u_int8_t af = priv->af;
834 836
835 mutex_unlock(&xt[af].mutex); 837 mutex_unlock(&xt[af].mutex);
836 } 838 }
837 839
838 static int xt_table_seq_show(struct seq_file *seq, void *v) 840 static int xt_table_seq_show(struct seq_file *seq, void *v)
839 { 841 {
840 struct xt_table *table = list_entry(v, struct xt_table, list); 842 struct xt_table *table = list_entry(v, struct xt_table, list);
841 843
842 if (strlen(table->name)) 844 if (strlen(table->name))
843 return seq_printf(seq, "%s\n", table->name); 845 return seq_printf(seq, "%s\n", table->name);
844 else 846 else
845 return 0; 847 return 0;
846 } 848 }
847 849
848 static const struct seq_operations xt_table_seq_ops = { 850 static const struct seq_operations xt_table_seq_ops = {
849 .start = xt_table_seq_start, 851 .start = xt_table_seq_start,
850 .next = xt_table_seq_next, 852 .next = xt_table_seq_next,
851 .stop = xt_table_seq_stop, 853 .stop = xt_table_seq_stop,
852 .show = xt_table_seq_show, 854 .show = xt_table_seq_show,
853 }; 855 };
854 856
855 static int xt_table_open(struct inode *inode, struct file *file) 857 static int xt_table_open(struct inode *inode, struct file *file)
856 { 858 {
857 int ret; 859 int ret;
858 struct xt_names_priv *priv; 860 struct xt_names_priv *priv;
859 861
860 ret = seq_open_net(inode, file, &xt_table_seq_ops, 862 ret = seq_open_net(inode, file, &xt_table_seq_ops,
861 sizeof(struct xt_names_priv)); 863 sizeof(struct xt_names_priv));
862 if (!ret) { 864 if (!ret) {
863 priv = ((struct seq_file *)file->private_data)->private; 865 priv = ((struct seq_file *)file->private_data)->private;
864 priv->af = (unsigned long)PDE(inode)->data; 866 priv->af = (unsigned long)PDE(inode)->data;
865 } 867 }
866 return ret; 868 return ret;
867 } 869 }
868 870
869 static const struct file_operations xt_table_ops = { 871 static const struct file_operations xt_table_ops = {
870 .owner = THIS_MODULE, 872 .owner = THIS_MODULE,
871 .open = xt_table_open, 873 .open = xt_table_open,
872 .read = seq_read, 874 .read = seq_read,
873 .llseek = seq_lseek, 875 .llseek = seq_lseek,
874 .release = seq_release_net, 876 .release = seq_release_net,
875 }; 877 };
876 878
877 /* 879 /*
878 * Traverse state for ip{,6}_{tables,matches} for helping crossing 880 * Traverse state for ip{,6}_{tables,matches} for helping crossing
879 * the multi-AF mutexes. 881 * the multi-AF mutexes.
880 */ 882 */
881 struct nf_mttg_trav { 883 struct nf_mttg_trav {
882 struct list_head *head, *curr; 884 struct list_head *head, *curr;
883 uint8_t class, nfproto; 885 uint8_t class, nfproto;
884 }; 886 };
885 887
886 enum { 888 enum {
887 MTTG_TRAV_INIT, 889 MTTG_TRAV_INIT,
888 MTTG_TRAV_NFP_UNSPEC, 890 MTTG_TRAV_NFP_UNSPEC,
889 MTTG_TRAV_NFP_SPEC, 891 MTTG_TRAV_NFP_SPEC,
890 MTTG_TRAV_DONE, 892 MTTG_TRAV_DONE,
891 }; 893 };
892 894
893 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, 895 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
894 bool is_target) 896 bool is_target)
895 { 897 {
896 static const uint8_t next_class[] = { 898 static const uint8_t next_class[] = {
897 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, 899 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
898 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, 900 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
899 }; 901 };
900 struct nf_mttg_trav *trav = seq->private; 902 struct nf_mttg_trav *trav = seq->private;
901 903
902 switch (trav->class) { 904 switch (trav->class) {
903 case MTTG_TRAV_INIT: 905 case MTTG_TRAV_INIT:
904 trav->class = MTTG_TRAV_NFP_UNSPEC; 906 trav->class = MTTG_TRAV_NFP_UNSPEC;
905 mutex_lock(&xt[NFPROTO_UNSPEC].mutex); 907 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
906 trav->head = trav->curr = is_target ? 908 trav->head = trav->curr = is_target ?
907 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; 909 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
908 break; 910 break;
909 case MTTG_TRAV_NFP_UNSPEC: 911 case MTTG_TRAV_NFP_UNSPEC:
910 trav->curr = trav->curr->next; 912 trav->curr = trav->curr->next;
911 if (trav->curr != trav->head) 913 if (trav->curr != trav->head)
912 break; 914 break;
913 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); 915 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
914 mutex_lock(&xt[trav->nfproto].mutex); 916 mutex_lock(&xt[trav->nfproto].mutex);
915 trav->head = trav->curr = is_target ? 917 trav->head = trav->curr = is_target ?
916 &xt[trav->nfproto].target : &xt[trav->nfproto].match; 918 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
917 trav->class = next_class[trav->class]; 919 trav->class = next_class[trav->class];
918 break; 920 break;
919 case MTTG_TRAV_NFP_SPEC: 921 case MTTG_TRAV_NFP_SPEC:
920 trav->curr = trav->curr->next; 922 trav->curr = trav->curr->next;
921 if (trav->curr != trav->head) 923 if (trav->curr != trav->head)
922 break; 924 break;
923 /* fallthru, _stop will unlock */ 925 /* fallthru, _stop will unlock */
924 default: 926 default:
925 return NULL; 927 return NULL;
926 } 928 }
927 929
928 if (ppos != NULL) 930 if (ppos != NULL)
929 ++*ppos; 931 ++*ppos;
930 return trav; 932 return trav;
931 } 933 }
932 934
933 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, 935 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
934 bool is_target) 936 bool is_target)
935 { 937 {
936 struct nf_mttg_trav *trav = seq->private; 938 struct nf_mttg_trav *trav = seq->private;
937 unsigned int j; 939 unsigned int j;
938 940
939 trav->class = MTTG_TRAV_INIT; 941 trav->class = MTTG_TRAV_INIT;
940 for (j = 0; j < *pos; ++j) 942 for (j = 0; j < *pos; ++j)
941 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) 943 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
942 return NULL; 944 return NULL;
943 return trav; 945 return trav;
944 } 946 }
945 947
946 static void xt_mttg_seq_stop(struct seq_file *seq, void *v) 948 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
947 { 949 {
948 struct nf_mttg_trav *trav = seq->private; 950 struct nf_mttg_trav *trav = seq->private;
949 951
950 switch (trav->class) { 952 switch (trav->class) {
951 case MTTG_TRAV_NFP_UNSPEC: 953 case MTTG_TRAV_NFP_UNSPEC:
952 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); 954 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
953 break; 955 break;
954 case MTTG_TRAV_NFP_SPEC: 956 case MTTG_TRAV_NFP_SPEC:
955 mutex_unlock(&xt[trav->nfproto].mutex); 957 mutex_unlock(&xt[trav->nfproto].mutex);
956 break; 958 break;
957 } 959 }
958 } 960 }
959 961
960 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) 962 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
961 { 963 {
962 return xt_mttg_seq_start(seq, pos, false); 964 return xt_mttg_seq_start(seq, pos, false);
963 } 965 }
964 966
965 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) 967 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
966 { 968 {
967 return xt_mttg_seq_next(seq, v, ppos, false); 969 return xt_mttg_seq_next(seq, v, ppos, false);
968 } 970 }
969 971
970 static int xt_match_seq_show(struct seq_file *seq, void *v) 972 static int xt_match_seq_show(struct seq_file *seq, void *v)
971 { 973 {
972 const struct nf_mttg_trav *trav = seq->private; 974 const struct nf_mttg_trav *trav = seq->private;
973 const struct xt_match *match; 975 const struct xt_match *match;
974 976
975 switch (trav->class) { 977 switch (trav->class) {
976 case MTTG_TRAV_NFP_UNSPEC: 978 case MTTG_TRAV_NFP_UNSPEC:
977 case MTTG_TRAV_NFP_SPEC: 979 case MTTG_TRAV_NFP_SPEC:
978 if (trav->curr == trav->head) 980 if (trav->curr == trav->head)
979 return 0; 981 return 0;
980 match = list_entry(trav->curr, struct xt_match, list); 982 match = list_entry(trav->curr, struct xt_match, list);
981 return (*match->name == '\0') ? 0 : 983 return (*match->name == '\0') ? 0 :
982 seq_printf(seq, "%s\n", match->name); 984 seq_printf(seq, "%s\n", match->name);
983 } 985 }
984 return 0; 986 return 0;
985 } 987 }
986 988
987 static const struct seq_operations xt_match_seq_ops = { 989 static const struct seq_operations xt_match_seq_ops = {
988 .start = xt_match_seq_start, 990 .start = xt_match_seq_start,
989 .next = xt_match_seq_next, 991 .next = xt_match_seq_next,
990 .stop = xt_mttg_seq_stop, 992 .stop = xt_mttg_seq_stop,
991 .show = xt_match_seq_show, 993 .show = xt_match_seq_show,
992 }; 994 };
993 995
994 static int xt_match_open(struct inode *inode, struct file *file) 996 static int xt_match_open(struct inode *inode, struct file *file)
995 { 997 {
996 struct seq_file *seq; 998 struct seq_file *seq;
997 struct nf_mttg_trav *trav; 999 struct nf_mttg_trav *trav;
998 int ret; 1000 int ret;
999 1001
1000 trav = kmalloc(sizeof(*trav), GFP_KERNEL); 1002 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1001 if (trav == NULL) 1003 if (trav == NULL)
1002 return -ENOMEM; 1004 return -ENOMEM;
1003 1005
1004 ret = seq_open(file, &xt_match_seq_ops); 1006 ret = seq_open(file, &xt_match_seq_ops);
1005 if (ret < 0) { 1007 if (ret < 0) {
1006 kfree(trav); 1008 kfree(trav);
1007 return ret; 1009 return ret;
1008 } 1010 }
1009 1011
1010 seq = file->private_data; 1012 seq = file->private_data;
1011 seq->private = trav; 1013 seq->private = trav;
1012 trav->nfproto = (unsigned long)PDE(inode)->data; 1014 trav->nfproto = (unsigned long)PDE(inode)->data;
1013 return 0; 1015 return 0;
1014 } 1016 }
1015 1017
1016 static const struct file_operations xt_match_ops = { 1018 static const struct file_operations xt_match_ops = {
1017 .owner = THIS_MODULE, 1019 .owner = THIS_MODULE,
1018 .open = xt_match_open, 1020 .open = xt_match_open,
1019 .read = seq_read, 1021 .read = seq_read,
1020 .llseek = seq_lseek, 1022 .llseek = seq_lseek,
1021 .release = seq_release_private, 1023 .release = seq_release_private,
1022 }; 1024 };
1023 1025
1024 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) 1026 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1025 { 1027 {
1026 return xt_mttg_seq_start(seq, pos, true); 1028 return xt_mttg_seq_start(seq, pos, true);
1027 } 1029 }
1028 1030
1029 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) 1031 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1030 { 1032 {
1031 return xt_mttg_seq_next(seq, v, ppos, true); 1033 return xt_mttg_seq_next(seq, v, ppos, true);
1032 } 1034 }
1033 1035
1034 static int xt_target_seq_show(struct seq_file *seq, void *v) 1036 static int xt_target_seq_show(struct seq_file *seq, void *v)
1035 { 1037 {
1036 const struct nf_mttg_trav *trav = seq->private; 1038 const struct nf_mttg_trav *trav = seq->private;
1037 const struct xt_target *target; 1039 const struct xt_target *target;
1038 1040
1039 switch (trav->class) { 1041 switch (trav->class) {
1040 case MTTG_TRAV_NFP_UNSPEC: 1042 case MTTG_TRAV_NFP_UNSPEC:
1041 case MTTG_TRAV_NFP_SPEC: 1043 case MTTG_TRAV_NFP_SPEC:
1042 if (trav->curr == trav->head) 1044 if (trav->curr == trav->head)
1043 return 0; 1045 return 0;
1044 target = list_entry(trav->curr, struct xt_target, list); 1046 target = list_entry(trav->curr, struct xt_target, list);
1045 return (*target->name == '\0') ? 0 : 1047 return (*target->name == '\0') ? 0 :
1046 seq_printf(seq, "%s\n", target->name); 1048 seq_printf(seq, "%s\n", target->name);
1047 } 1049 }
1048 return 0; 1050 return 0;
1049 } 1051 }
1050 1052
1051 static const struct seq_operations xt_target_seq_ops = { 1053 static const struct seq_operations xt_target_seq_ops = {
1052 .start = xt_target_seq_start, 1054 .start = xt_target_seq_start,
1053 .next = xt_target_seq_next, 1055 .next = xt_target_seq_next,
1054 .stop = xt_mttg_seq_stop, 1056 .stop = xt_mttg_seq_stop,
1055 .show = xt_target_seq_show, 1057 .show = xt_target_seq_show,
1056 }; 1058 };
1057 1059
1058 static int xt_target_open(struct inode *inode, struct file *file) 1060 static int xt_target_open(struct inode *inode, struct file *file)
1059 { 1061 {
1060 struct seq_file *seq; 1062 struct seq_file *seq;
1061 struct nf_mttg_trav *trav; 1063 struct nf_mttg_trav *trav;
1062 int ret; 1064 int ret;
1063 1065
1064 trav = kmalloc(sizeof(*trav), GFP_KERNEL); 1066 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1065 if (trav == NULL) 1067 if (trav == NULL)
1066 return -ENOMEM; 1068 return -ENOMEM;
1067 1069
1068 ret = seq_open(file, &xt_target_seq_ops); 1070 ret = seq_open(file, &xt_target_seq_ops);
1069 if (ret < 0) { 1071 if (ret < 0) {
1070 kfree(trav); 1072 kfree(trav);
1071 return ret; 1073 return ret;
1072 } 1074 }
1073 1075
1074 seq = file->private_data; 1076 seq = file->private_data;
1075 seq->private = trav; 1077 seq->private = trav;
1076 trav->nfproto = (unsigned long)PDE(inode)->data; 1078 trav->nfproto = (unsigned long)PDE(inode)->data;
1077 return 0; 1079 return 0;
1078 } 1080 }
1079 1081
1080 static const struct file_operations xt_target_ops = { 1082 static const struct file_operations xt_target_ops = {
1081 .owner = THIS_MODULE, 1083 .owner = THIS_MODULE,
1082 .open = xt_target_open, 1084 .open = xt_target_open,
1083 .read = seq_read, 1085 .read = seq_read,
1084 .llseek = seq_lseek, 1086 .llseek = seq_lseek,
1085 .release = seq_release_private, 1087 .release = seq_release_private,
1086 }; 1088 };
1087 1089
1088 #define FORMAT_TABLES "_tables_names" 1090 #define FORMAT_TABLES "_tables_names"
1089 #define FORMAT_MATCHES "_tables_matches" 1091 #define FORMAT_MATCHES "_tables_matches"
1090 #define FORMAT_TARGETS "_tables_targets" 1092 #define FORMAT_TARGETS "_tables_targets"
1091 1093
1092 #endif /* CONFIG_PROC_FS */ 1094 #endif /* CONFIG_PROC_FS */
1093 1095
1094 /** 1096 /**
1095 * xt_hook_link - set up hooks for a new table 1097 * xt_hook_link - set up hooks for a new table
1096 * @table: table with metadata needed to set up hooks 1098 * @table: table with metadata needed to set up hooks
1097 * @fn: Hook function 1099 * @fn: Hook function
1098 * 1100 *
1099 * This function will take care of creating and registering the necessary 1101 * This function will take care of creating and registering the necessary
1100 * Netfilter hooks for XT tables. 1102 * Netfilter hooks for XT tables.
1101 */ 1103 */
1102 struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn) 1104 struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1103 { 1105 {
1104 unsigned int hook_mask = table->valid_hooks; 1106 unsigned int hook_mask = table->valid_hooks;
1105 uint8_t i, num_hooks = hweight32(hook_mask); 1107 uint8_t i, num_hooks = hweight32(hook_mask);
1106 uint8_t hooknum; 1108 uint8_t hooknum;
1107 struct nf_hook_ops *ops; 1109 struct nf_hook_ops *ops;
1108 int ret; 1110 int ret;
1109 1111
1110 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); 1112 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1111 if (ops == NULL) 1113 if (ops == NULL)
1112 return ERR_PTR(-ENOMEM); 1114 return ERR_PTR(-ENOMEM);
1113 1115
1114 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; 1116 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1115 hook_mask >>= 1, ++hooknum) { 1117 hook_mask >>= 1, ++hooknum) {
1116 if (!(hook_mask & 1)) 1118 if (!(hook_mask & 1))
1117 continue; 1119 continue;
1118 ops[i].hook = fn; 1120 ops[i].hook = fn;
1119 ops[i].owner = table->me; 1121 ops[i].owner = table->me;
1120 ops[i].pf = table->af; 1122 ops[i].pf = table->af;
1121 ops[i].hooknum = hooknum; 1123 ops[i].hooknum = hooknum;
1122 ops[i].priority = table->priority; 1124 ops[i].priority = table->priority;
1123 ++i; 1125 ++i;
1124 } 1126 }
1125 1127
1126 ret = nf_register_hooks(ops, num_hooks); 1128 ret = nf_register_hooks(ops, num_hooks);
1127 if (ret < 0) { 1129 if (ret < 0) {
1128 kfree(ops); 1130 kfree(ops);
1129 return ERR_PTR(ret); 1131 return ERR_PTR(ret);
1130 } 1132 }
1131 1133
1132 return ops; 1134 return ops;
1133 } 1135 }
1134 EXPORT_SYMBOL_GPL(xt_hook_link); 1136 EXPORT_SYMBOL_GPL(xt_hook_link);
1135 1137
1136 /** 1138 /**
1137 * xt_hook_unlink - remove hooks for a table 1139 * xt_hook_unlink - remove hooks for a table
1138 * @ops: nf_hook_ops array as returned by nf_hook_link 1140 * @ops: nf_hook_ops array as returned by nf_hook_link
1139 * @hook_mask: the very same mask that was passed to nf_hook_link 1141 * @hook_mask: the very same mask that was passed to nf_hook_link
1140 */ 1142 */
1141 void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops) 1143 void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1142 { 1144 {
1143 nf_unregister_hooks(ops, hweight32(table->valid_hooks)); 1145 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1144 kfree(ops); 1146 kfree(ops);
1145 } 1147 }
1146 EXPORT_SYMBOL_GPL(xt_hook_unlink); 1148 EXPORT_SYMBOL_GPL(xt_hook_unlink);
1147 1149
1148 int xt_proto_init(struct net *net, u_int8_t af) 1150 int xt_proto_init(struct net *net, u_int8_t af)
1149 { 1151 {
1150 #ifdef CONFIG_PROC_FS 1152 #ifdef CONFIG_PROC_FS
1151 char buf[XT_FUNCTION_MAXNAMELEN]; 1153 char buf[XT_FUNCTION_MAXNAMELEN];
1152 struct proc_dir_entry *proc; 1154 struct proc_dir_entry *proc;
1153 #endif 1155 #endif
1154 1156
1155 if (af >= ARRAY_SIZE(xt_prefix)) 1157 if (af >= ARRAY_SIZE(xt_prefix))
1156 return -EINVAL; 1158 return -EINVAL;
1157 1159
1158 1160
1159 #ifdef CONFIG_PROC_FS 1161 #ifdef CONFIG_PROC_FS
1160 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1162 strlcpy(buf, xt_prefix[af], sizeof(buf));
1161 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1163 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1162 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, 1164 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1163 (void *)(unsigned long)af); 1165 (void *)(unsigned long)af);
1164 if (!proc) 1166 if (!proc)
1165 goto out; 1167 goto out;
1166 1168
1167 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1169 strlcpy(buf, xt_prefix[af], sizeof(buf));
1168 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1170 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1169 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, 1171 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1170 (void *)(unsigned long)af); 1172 (void *)(unsigned long)af);
1171 if (!proc) 1173 if (!proc)
1172 goto out_remove_tables; 1174 goto out_remove_tables;
1173 1175
1174 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1176 strlcpy(buf, xt_prefix[af], sizeof(buf));
1175 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 1177 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1176 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, 1178 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1177 (void *)(unsigned long)af); 1179 (void *)(unsigned long)af);
1178 if (!proc) 1180 if (!proc)
1179 goto out_remove_matches; 1181 goto out_remove_matches;
1180 #endif 1182 #endif
1181 1183
1182 return 0; 1184 return 0;
1183 1185
1184 #ifdef CONFIG_PROC_FS 1186 #ifdef CONFIG_PROC_FS
1185 out_remove_matches: 1187 out_remove_matches:
1186 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1188 strlcpy(buf, xt_prefix[af], sizeof(buf));
1187 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1189 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1188 proc_net_remove(net, buf); 1190 proc_net_remove(net, buf);
1189 1191
1190 out_remove_tables: 1192 out_remove_tables:
1191 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1193 strlcpy(buf, xt_prefix[af], sizeof(buf));
1192 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1194 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1193 proc_net_remove(net, buf); 1195 proc_net_remove(net, buf);
1194 out: 1196 out:
1195 return -1; 1197 return -1;
1196 #endif 1198 #endif
1197 } 1199 }
1198 EXPORT_SYMBOL_GPL(xt_proto_init); 1200 EXPORT_SYMBOL_GPL(xt_proto_init);
1199 1201
1200 void xt_proto_fini(struct net *net, u_int8_t af) 1202 void xt_proto_fini(struct net *net, u_int8_t af)
1201 { 1203 {
1202 #ifdef CONFIG_PROC_FS 1204 #ifdef CONFIG_PROC_FS
1203 char buf[XT_FUNCTION_MAXNAMELEN]; 1205 char buf[XT_FUNCTION_MAXNAMELEN];
1204 1206
1205 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1207 strlcpy(buf, xt_prefix[af], sizeof(buf));
1206 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1208 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1207 proc_net_remove(net, buf); 1209 proc_net_remove(net, buf);
1208 1210
1209 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1211 strlcpy(buf, xt_prefix[af], sizeof(buf));
1210 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 1212 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1211 proc_net_remove(net, buf); 1213 proc_net_remove(net, buf);
1212 1214
1213 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1215 strlcpy(buf, xt_prefix[af], sizeof(buf));
1214 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1216 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1215 proc_net_remove(net, buf); 1217 proc_net_remove(net, buf);
1216 #endif /*CONFIG_PROC_FS*/ 1218 #endif /*CONFIG_PROC_FS*/
1217 } 1219 }
1218 EXPORT_SYMBOL_GPL(xt_proto_fini); 1220 EXPORT_SYMBOL_GPL(xt_proto_fini);
1219 1221
1220 static int __net_init xt_net_init(struct net *net) 1222 static int __net_init xt_net_init(struct net *net)
1221 { 1223 {
1222 int i; 1224 int i;
1223 1225
1224 for (i = 0; i < NFPROTO_NUMPROTO; i++) 1226 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1225 INIT_LIST_HEAD(&net->xt.tables[i]); 1227 INIT_LIST_HEAD(&net->xt.tables[i]);
1226 return 0; 1228 return 0;
1227 } 1229 }
1228 1230
1229 static struct pernet_operations xt_net_ops = { 1231 static struct pernet_operations xt_net_ops = {
1230 .init = xt_net_init, 1232 .init = xt_net_init,
1231 }; 1233 };
1232 1234
1233 static int __init xt_init(void) 1235 static int __init xt_init(void)
1234 { 1236 {
1235 unsigned int i; 1237 unsigned int i;
1236 int rv; 1238 int rv;
1237 1239
1238 for_each_possible_cpu(i) { 1240 for_each_possible_cpu(i) {
1239 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1241 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1240 spin_lock_init(&lock->lock); 1242 spin_lock_init(&lock->lock);
1241 lock->readers = 0; 1243 lock->readers = 0;
1242 } 1244 }
1243 1245
1244 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); 1246 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1245 if (!xt) 1247 if (!xt)
1246 return -ENOMEM; 1248 return -ENOMEM;
1247 1249
1248 for (i = 0; i < NFPROTO_NUMPROTO; i++) { 1250 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1249 mutex_init(&xt[i].mutex); 1251 mutex_init(&xt[i].mutex);
1250 #ifdef CONFIG_COMPAT 1252 #ifdef CONFIG_COMPAT
1251 mutex_init(&xt[i].compat_mutex); 1253 mutex_init(&xt[i].compat_mutex);
1252 xt[i].compat_offsets = NULL; 1254 xt[i].compat_offsets = NULL;
1253 #endif 1255 #endif
1254 INIT_LIST_HEAD(&xt[i].target); 1256 INIT_LIST_HEAD(&xt[i].target);
1255 INIT_LIST_HEAD(&xt[i].match); 1257 INIT_LIST_HEAD(&xt[i].match);
1256 } 1258 }
1257 rv = register_pernet_subsys(&xt_net_ops); 1259 rv = register_pernet_subsys(&xt_net_ops);
1258 if (rv < 0) 1260 if (rv < 0)
1259 kfree(xt); 1261 kfree(xt);
1260 return rv; 1262 return rv;
1261 } 1263 }
1262 1264
1263 static void __exit xt_fini(void) 1265 static void __exit xt_fini(void)
1264 { 1266 {
1265 unregister_pernet_subsys(&xt_net_ops); 1267 unregister_pernet_subsys(&xt_net_ops);
1266 kfree(xt); 1268 kfree(xt);
1267 } 1269 }
1268 1270
1269 module_init(xt_init); 1271 module_init(xt_init);
1270 module_exit(xt_fini); 1272 module_exit(xt_fini);
1271 1273
1272 1274
net/netfilter/xt_repldata.h
File was created 1 /*
2 * Today's hack: quantum tunneling in structs
3 *
4 * 'entries' and 'term' are never anywhere referenced by word in code. In fact,
5 * they serve as the hanging-off data accessed through repl.data[].
6 */
7
8 #define xt_alloc_initial_table(type, typ2) ({ \
9 unsigned int hook_mask = info->valid_hooks; \
10 unsigned int nhooks = hweight32(hook_mask); \
11 unsigned int bytes = 0, hooknum = 0, i = 0; \
12 struct { \
13 struct type##_replace repl; \
14 struct type##_standard entries[nhooks]; \
15 struct type##_error term; \
16 } *tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); \
17 if (tbl == NULL) \
18 return NULL; \
19 strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \
20 tbl->term = (struct type##_error)typ2##_ERROR_INIT; \
21 tbl->repl.valid_hooks = hook_mask; \
22 tbl->repl.num_entries = nhooks + 1; \
23 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \
24 sizeof(struct type##_error); \
25 for (; hook_mask != 0; hook_mask >>= 1, ++hooknum) { \
26 if (!(hook_mask & 1)) \
27 continue; \
28 tbl->repl.hook_entry[hooknum] = bytes; \
29 tbl->repl.underflow[hooknum] = bytes; \
30 tbl->entries[i++] = (struct type##_standard) \
31 typ2##_STANDARD_INIT(NF_ACCEPT); \
32 bytes += sizeof(struct type##_standard); \
33 } \
34 tbl; \
35 })
36