Commit 654d0fbdc8fe1041918741ed5b6abc8ad6b4c1d8
Committed by
Patrick McHardy
1 parent
af5676039a
Exists in
master
and in
4 other branches
netfilter: cleanup printk messages
Make sure all printk messages have a severity level. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
Showing 24 changed files with 64 additions and 64 deletions Inline Diff
- net/ipv4/netfilter/arp_tables.c
- net/ipv4/netfilter/ip_tables.c
- net/ipv4/netfilter/iptable_filter.c
- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
- net/ipv4/netfilter/nf_nat_h323.c
- net/ipv4/netfilter/nf_nat_snmp_basic.c
- net/ipv4/netfilter/nf_nat_standalone.c
- net/ipv6/netfilter/ip6_tables.c
- net/ipv6/netfilter/ip6table_filter.c
- net/ipv6/netfilter/ip6table_mangle.c
- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
- net/netfilter/nf_conntrack_amanda.c
- net/netfilter/nf_conntrack_core.c
- net/netfilter/nf_conntrack_ftp.c
- net/netfilter/nf_conntrack_h323_main.c
- net/netfilter/nf_conntrack_irc.c
- net/netfilter/nf_conntrack_netlink.c
- net/netfilter/nf_conntrack_proto_sctp.c
- net/netfilter/nf_conntrack_sip.c
- net/netfilter/nf_conntrack_standalone.c
- net/netfilter/nf_conntrack_tftp.c
- net/netfilter/nf_internals.h
- net/netfilter/nfnetlink.c
- net/netfilter/nfnetlink_log.c
net/ipv4/netfilter/arp_tables.c
1 | /* | 1 | /* |
2 | * Packet matching code for ARP packets. | 2 | * Packet matching code for ARP packets. |
3 | * | 3 | * |
4 | * Based heavily, if not almost entirely, upon ip_tables.c framework. | 4 | * Based heavily, if not almost entirely, upon ip_tables.c framework. |
5 | * | 5 | * |
6 | * Some ARP specific bits are: | 6 | * Some ARP specific bits are: |
7 | * | 7 | * |
8 | * Copyright (C) 2002 David S. Miller (davem@redhat.com) | 8 | * Copyright (C) 2002 David S. Miller (davem@redhat.com) |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/capability.h> | 15 | #include <linux/capability.h> |
16 | #include <linux/if_arp.h> | 16 | #include <linux/if_arp.h> |
17 | #include <linux/kmod.h> | 17 | #include <linux/kmod.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <net/compat.h> | 24 | #include <net/compat.h> |
25 | #include <net/sock.h> | 25 | #include <net/sock.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | #include <linux/netfilter_arp/arp_tables.h> | 29 | #include <linux/netfilter_arp/arp_tables.h> |
30 | #include "../../netfilter/xt_repldata.h" | 30 | #include "../../netfilter/xt_repldata.h" |
31 | 31 | ||
32 | MODULE_LICENSE("GPL"); | 32 | MODULE_LICENSE("GPL"); |
33 | MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); | 33 | MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); |
34 | MODULE_DESCRIPTION("arptables core"); | 34 | MODULE_DESCRIPTION("arptables core"); |
35 | 35 | ||
36 | /*#define DEBUG_ARP_TABLES*/ | 36 | /*#define DEBUG_ARP_TABLES*/ |
37 | /*#define DEBUG_ARP_TABLES_USER*/ | 37 | /*#define DEBUG_ARP_TABLES_USER*/ |
38 | 38 | ||
39 | #ifdef DEBUG_ARP_TABLES | 39 | #ifdef DEBUG_ARP_TABLES |
40 | #define dprintf(format, args...) printk(format , ## args) | 40 | #define dprintf(format, args...) printk(format , ## args) |
41 | #else | 41 | #else |
42 | #define dprintf(format, args...) | 42 | #define dprintf(format, args...) |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #ifdef DEBUG_ARP_TABLES_USER | 45 | #ifdef DEBUG_ARP_TABLES_USER |
46 | #define duprintf(format, args...) printk(format , ## args) | 46 | #define duprintf(format, args...) printk(format , ## args) |
47 | #else | 47 | #else |
48 | #define duprintf(format, args...) | 48 | #define duprintf(format, args...) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #ifdef CONFIG_NETFILTER_DEBUG | 51 | #ifdef CONFIG_NETFILTER_DEBUG |
52 | #define ARP_NF_ASSERT(x) WARN_ON(!(x)) | 52 | #define ARP_NF_ASSERT(x) WARN_ON(!(x)) |
53 | #else | 53 | #else |
54 | #define ARP_NF_ASSERT(x) | 54 | #define ARP_NF_ASSERT(x) |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | void *arpt_alloc_initial_table(const struct xt_table *info) | 57 | void *arpt_alloc_initial_table(const struct xt_table *info) |
58 | { | 58 | { |
59 | return xt_alloc_initial_table(arpt, ARPT); | 59 | return xt_alloc_initial_table(arpt, ARPT); |
60 | } | 60 | } |
61 | EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); | 61 | EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); |
62 | 62 | ||
63 | static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, | 63 | static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, |
64 | const char *hdr_addr, int len) | 64 | const char *hdr_addr, int len) |
65 | { | 65 | { |
66 | int i, ret; | 66 | int i, ret; |
67 | 67 | ||
68 | if (len > ARPT_DEV_ADDR_LEN_MAX) | 68 | if (len > ARPT_DEV_ADDR_LEN_MAX) |
69 | len = ARPT_DEV_ADDR_LEN_MAX; | 69 | len = ARPT_DEV_ADDR_LEN_MAX; |
70 | 70 | ||
71 | ret = 0; | 71 | ret = 0; |
72 | for (i = 0; i < len; i++) | 72 | for (i = 0; i < len; i++) |
73 | ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; | 73 | ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; |
74 | 74 | ||
75 | return (ret != 0); | 75 | return (ret != 0); |
76 | } | 76 | } |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * Unfortunatly, _b and _mask are not aligned to an int (or long int) | 79 | * Unfortunatly, _b and _mask are not aligned to an int (or long int) |
80 | * Some arches dont care, unrolling the loop is a win on them. | 80 | * Some arches dont care, unrolling the loop is a win on them. |
81 | * For other arches, we only have a 16bit alignement. | 81 | * For other arches, we only have a 16bit alignement. |
82 | */ | 82 | */ |
83 | static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) | 83 | static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) |
84 | { | 84 | { |
85 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 85 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
86 | unsigned long ret = ifname_compare_aligned(_a, _b, _mask); | 86 | unsigned long ret = ifname_compare_aligned(_a, _b, _mask); |
87 | #else | 87 | #else |
88 | unsigned long ret = 0; | 88 | unsigned long ret = 0; |
89 | const u16 *a = (const u16 *)_a; | 89 | const u16 *a = (const u16 *)_a; |
90 | const u16 *b = (const u16 *)_b; | 90 | const u16 *b = (const u16 *)_b; |
91 | const u16 *mask = (const u16 *)_mask; | 91 | const u16 *mask = (const u16 *)_mask; |
92 | int i; | 92 | int i; |
93 | 93 | ||
94 | for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) | 94 | for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) |
95 | ret |= (a[i] ^ b[i]) & mask[i]; | 95 | ret |= (a[i] ^ b[i]) & mask[i]; |
96 | #endif | 96 | #endif |
97 | return ret; | 97 | return ret; |
98 | } | 98 | } |
99 | 99 | ||
100 | /* Returns whether packet matches rule or not. */ | 100 | /* Returns whether packet matches rule or not. */ |
101 | static inline int arp_packet_match(const struct arphdr *arphdr, | 101 | static inline int arp_packet_match(const struct arphdr *arphdr, |
102 | struct net_device *dev, | 102 | struct net_device *dev, |
103 | const char *indev, | 103 | const char *indev, |
104 | const char *outdev, | 104 | const char *outdev, |
105 | const struct arpt_arp *arpinfo) | 105 | const struct arpt_arp *arpinfo) |
106 | { | 106 | { |
107 | const char *arpptr = (char *)(arphdr + 1); | 107 | const char *arpptr = (char *)(arphdr + 1); |
108 | const char *src_devaddr, *tgt_devaddr; | 108 | const char *src_devaddr, *tgt_devaddr; |
109 | __be32 src_ipaddr, tgt_ipaddr; | 109 | __be32 src_ipaddr, tgt_ipaddr; |
110 | long ret; | 110 | long ret; |
111 | 111 | ||
112 | #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) | 112 | #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) |
113 | 113 | ||
114 | if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, | 114 | if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, |
115 | ARPT_INV_ARPOP)) { | 115 | ARPT_INV_ARPOP)) { |
116 | dprintf("ARP operation field mismatch.\n"); | 116 | dprintf("ARP operation field mismatch.\n"); |
117 | dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", | 117 | dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", |
118 | arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); | 118 | arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); |
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, | 122 | if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, |
123 | ARPT_INV_ARPHRD)) { | 123 | ARPT_INV_ARPHRD)) { |
124 | dprintf("ARP hardware address format mismatch.\n"); | 124 | dprintf("ARP hardware address format mismatch.\n"); |
125 | dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", | 125 | dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", |
126 | arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); | 126 | arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); |
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, | 130 | if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, |
131 | ARPT_INV_ARPPRO)) { | 131 | ARPT_INV_ARPPRO)) { |
132 | dprintf("ARP protocol address format mismatch.\n"); | 132 | dprintf("ARP protocol address format mismatch.\n"); |
133 | dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", | 133 | dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", |
134 | arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); | 134 | arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); |
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, | 138 | if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, |
139 | ARPT_INV_ARPHLN)) { | 139 | ARPT_INV_ARPHLN)) { |
140 | dprintf("ARP hardware address length mismatch.\n"); | 140 | dprintf("ARP hardware address length mismatch.\n"); |
141 | dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", | 141 | dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", |
142 | arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); | 142 | arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); |
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | src_devaddr = arpptr; | 146 | src_devaddr = arpptr; |
147 | arpptr += dev->addr_len; | 147 | arpptr += dev->addr_len; |
148 | memcpy(&src_ipaddr, arpptr, sizeof(u32)); | 148 | memcpy(&src_ipaddr, arpptr, sizeof(u32)); |
149 | arpptr += sizeof(u32); | 149 | arpptr += sizeof(u32); |
150 | tgt_devaddr = arpptr; | 150 | tgt_devaddr = arpptr; |
151 | arpptr += dev->addr_len; | 151 | arpptr += dev->addr_len; |
152 | memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); | 152 | memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); |
153 | 153 | ||
154 | if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), | 154 | if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), |
155 | ARPT_INV_SRCDEVADDR) || | 155 | ARPT_INV_SRCDEVADDR) || |
156 | FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), | 156 | FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), |
157 | ARPT_INV_TGTDEVADDR)) { | 157 | ARPT_INV_TGTDEVADDR)) { |
158 | dprintf("Source or target device address mismatch.\n"); | 158 | dprintf("Source or target device address mismatch.\n"); |
159 | 159 | ||
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | 162 | ||
163 | if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, | 163 | if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, |
164 | ARPT_INV_SRCIP) || | 164 | ARPT_INV_SRCIP) || |
165 | FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), | 165 | FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), |
166 | ARPT_INV_TGTIP)) { | 166 | ARPT_INV_TGTIP)) { |
167 | dprintf("Source or target IP address mismatch.\n"); | 167 | dprintf("Source or target IP address mismatch.\n"); |
168 | 168 | ||
169 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", | 169 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", |
170 | &src_ipaddr, | 170 | &src_ipaddr, |
171 | &arpinfo->smsk.s_addr, | 171 | &arpinfo->smsk.s_addr, |
172 | &arpinfo->src.s_addr, | 172 | &arpinfo->src.s_addr, |
173 | arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); | 173 | arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); |
174 | dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", | 174 | dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", |
175 | &tgt_ipaddr, | 175 | &tgt_ipaddr, |
176 | &arpinfo->tmsk.s_addr, | 176 | &arpinfo->tmsk.s_addr, |
177 | &arpinfo->tgt.s_addr, | 177 | &arpinfo->tgt.s_addr, |
178 | arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); | 178 | arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); |
179 | return 0; | 179 | return 0; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* Look for ifname matches. */ | 182 | /* Look for ifname matches. */ |
183 | ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); | 183 | ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); |
184 | 184 | ||
185 | if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { | 185 | if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { |
186 | dprintf("VIA in mismatch (%s vs %s).%s\n", | 186 | dprintf("VIA in mismatch (%s vs %s).%s\n", |
187 | indev, arpinfo->iniface, | 187 | indev, arpinfo->iniface, |
188 | arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":""); | 188 | arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":""); |
189 | return 0; | 189 | return 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); | 192 | ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); |
193 | 193 | ||
194 | if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { | 194 | if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { |
195 | dprintf("VIA out mismatch (%s vs %s).%s\n", | 195 | dprintf("VIA out mismatch (%s vs %s).%s\n", |
196 | outdev, arpinfo->outiface, | 196 | outdev, arpinfo->outiface, |
197 | arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":""); | 197 | arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":""); |
198 | return 0; | 198 | return 0; |
199 | } | 199 | } |
200 | 200 | ||
201 | return 1; | 201 | return 1; |
202 | #undef FWINV | 202 | #undef FWINV |
203 | } | 203 | } |
204 | 204 | ||
205 | static inline int arp_checkentry(const struct arpt_arp *arp) | 205 | static inline int arp_checkentry(const struct arpt_arp *arp) |
206 | { | 206 | { |
207 | if (arp->flags & ~ARPT_F_MASK) { | 207 | if (arp->flags & ~ARPT_F_MASK) { |
208 | duprintf("Unknown flag bits set: %08X\n", | 208 | duprintf("Unknown flag bits set: %08X\n", |
209 | arp->flags & ~ARPT_F_MASK); | 209 | arp->flags & ~ARPT_F_MASK); |
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
212 | if (arp->invflags & ~ARPT_INV_MASK) { | 212 | if (arp->invflags & ~ARPT_INV_MASK) { |
213 | duprintf("Unknown invflag bits set: %08X\n", | 213 | duprintf("Unknown invflag bits set: %08X\n", |
214 | arp->invflags & ~ARPT_INV_MASK); | 214 | arp->invflags & ~ARPT_INV_MASK); |
215 | return 0; | 215 | return 0; |
216 | } | 216 | } |
217 | 217 | ||
218 | return 1; | 218 | return 1; |
219 | } | 219 | } |
220 | 220 | ||
221 | static unsigned int | 221 | static unsigned int |
222 | arpt_error(struct sk_buff *skb, const struct xt_action_param *par) | 222 | arpt_error(struct sk_buff *skb, const struct xt_action_param *par) |
223 | { | 223 | { |
224 | if (net_ratelimit()) | 224 | if (net_ratelimit()) |
225 | printk("arp_tables: error: '%s'\n", | 225 | pr_err("arp_tables: error: '%s'\n", |
226 | (const char *)par->targinfo); | 226 | (const char *)par->targinfo); |
227 | 227 | ||
228 | return NF_DROP; | 228 | return NF_DROP; |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline const struct arpt_entry_target * | 231 | static inline const struct arpt_entry_target * |
232 | arpt_get_target_c(const struct arpt_entry *e) | 232 | arpt_get_target_c(const struct arpt_entry *e) |
233 | { | 233 | { |
234 | return arpt_get_target((struct arpt_entry *)e); | 234 | return arpt_get_target((struct arpt_entry *)e); |
235 | } | 235 | } |
236 | 236 | ||
237 | static inline struct arpt_entry * | 237 | static inline struct arpt_entry * |
238 | get_entry(const void *base, unsigned int offset) | 238 | get_entry(const void *base, unsigned int offset) |
239 | { | 239 | { |
240 | return (struct arpt_entry *)(base + offset); | 240 | return (struct arpt_entry *)(base + offset); |
241 | } | 241 | } |
242 | 242 | ||
243 | static inline __pure | 243 | static inline __pure |
244 | struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) | 244 | struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) |
245 | { | 245 | { |
246 | return (void *)entry + entry->next_offset; | 246 | return (void *)entry + entry->next_offset; |
247 | } | 247 | } |
248 | 248 | ||
249 | unsigned int arpt_do_table(struct sk_buff *skb, | 249 | unsigned int arpt_do_table(struct sk_buff *skb, |
250 | unsigned int hook, | 250 | unsigned int hook, |
251 | const struct net_device *in, | 251 | const struct net_device *in, |
252 | const struct net_device *out, | 252 | const struct net_device *out, |
253 | struct xt_table *table) | 253 | struct xt_table *table) |
254 | { | 254 | { |
255 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 255 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
256 | unsigned int verdict = NF_DROP; | 256 | unsigned int verdict = NF_DROP; |
257 | const struct arphdr *arp; | 257 | const struct arphdr *arp; |
258 | struct arpt_entry *e, *back; | 258 | struct arpt_entry *e, *back; |
259 | const char *indev, *outdev; | 259 | const char *indev, *outdev; |
260 | void *table_base; | 260 | void *table_base; |
261 | const struct xt_table_info *private; | 261 | const struct xt_table_info *private; |
262 | struct xt_action_param acpar; | 262 | struct xt_action_param acpar; |
263 | 263 | ||
264 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) | 264 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) |
265 | return NF_DROP; | 265 | return NF_DROP; |
266 | 266 | ||
267 | indev = in ? in->name : nulldevname; | 267 | indev = in ? in->name : nulldevname; |
268 | outdev = out ? out->name : nulldevname; | 268 | outdev = out ? out->name : nulldevname; |
269 | 269 | ||
270 | xt_info_rdlock_bh(); | 270 | xt_info_rdlock_bh(); |
271 | private = table->private; | 271 | private = table->private; |
272 | table_base = private->entries[smp_processor_id()]; | 272 | table_base = private->entries[smp_processor_id()]; |
273 | 273 | ||
274 | e = get_entry(table_base, private->hook_entry[hook]); | 274 | e = get_entry(table_base, private->hook_entry[hook]); |
275 | back = get_entry(table_base, private->underflow[hook]); | 275 | back = get_entry(table_base, private->underflow[hook]); |
276 | 276 | ||
277 | acpar.in = in; | 277 | acpar.in = in; |
278 | acpar.out = out; | 278 | acpar.out = out; |
279 | acpar.hooknum = hook; | 279 | acpar.hooknum = hook; |
280 | acpar.family = NFPROTO_ARP; | 280 | acpar.family = NFPROTO_ARP; |
281 | acpar.hotdrop = false; | 281 | acpar.hotdrop = false; |
282 | 282 | ||
283 | arp = arp_hdr(skb); | 283 | arp = arp_hdr(skb); |
284 | do { | 284 | do { |
285 | const struct arpt_entry_target *t; | 285 | const struct arpt_entry_target *t; |
286 | int hdr_len; | 286 | int hdr_len; |
287 | 287 | ||
288 | if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { | 288 | if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { |
289 | e = arpt_next_entry(e); | 289 | e = arpt_next_entry(e); |
290 | continue; | 290 | continue; |
291 | } | 291 | } |
292 | 292 | ||
293 | hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + | 293 | hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + |
294 | (2 * skb->dev->addr_len); | 294 | (2 * skb->dev->addr_len); |
295 | ADD_COUNTER(e->counters, hdr_len, 1); | 295 | ADD_COUNTER(e->counters, hdr_len, 1); |
296 | 296 | ||
297 | t = arpt_get_target_c(e); | 297 | t = arpt_get_target_c(e); |
298 | 298 | ||
299 | /* Standard target? */ | 299 | /* Standard target? */ |
300 | if (!t->u.kernel.target->target) { | 300 | if (!t->u.kernel.target->target) { |
301 | int v; | 301 | int v; |
302 | 302 | ||
303 | v = ((struct arpt_standard_target *)t)->verdict; | 303 | v = ((struct arpt_standard_target *)t)->verdict; |
304 | if (v < 0) { | 304 | if (v < 0) { |
305 | /* Pop from stack? */ | 305 | /* Pop from stack? */ |
306 | if (v != ARPT_RETURN) { | 306 | if (v != ARPT_RETURN) { |
307 | verdict = (unsigned)(-v) - 1; | 307 | verdict = (unsigned)(-v) - 1; |
308 | break; | 308 | break; |
309 | } | 309 | } |
310 | e = back; | 310 | e = back; |
311 | back = get_entry(table_base, back->comefrom); | 311 | back = get_entry(table_base, back->comefrom); |
312 | continue; | 312 | continue; |
313 | } | 313 | } |
314 | if (table_base + v | 314 | if (table_base + v |
315 | != arpt_next_entry(e)) { | 315 | != arpt_next_entry(e)) { |
316 | /* Save old back ptr in next entry */ | 316 | /* Save old back ptr in next entry */ |
317 | struct arpt_entry *next = arpt_next_entry(e); | 317 | struct arpt_entry *next = arpt_next_entry(e); |
318 | next->comefrom = (void *)back - table_base; | 318 | next->comefrom = (void *)back - table_base; |
319 | 319 | ||
320 | /* set back pointer to next entry */ | 320 | /* set back pointer to next entry */ |
321 | back = next; | 321 | back = next; |
322 | } | 322 | } |
323 | 323 | ||
324 | e = get_entry(table_base, v); | 324 | e = get_entry(table_base, v); |
325 | continue; | 325 | continue; |
326 | } | 326 | } |
327 | 327 | ||
328 | /* Targets which reenter must return | 328 | /* Targets which reenter must return |
329 | * abs. verdicts | 329 | * abs. verdicts |
330 | */ | 330 | */ |
331 | acpar.target = t->u.kernel.target; | 331 | acpar.target = t->u.kernel.target; |
332 | acpar.targinfo = t->data; | 332 | acpar.targinfo = t->data; |
333 | verdict = t->u.kernel.target->target(skb, &acpar); | 333 | verdict = t->u.kernel.target->target(skb, &acpar); |
334 | 334 | ||
335 | /* Target might have changed stuff. */ | 335 | /* Target might have changed stuff. */ |
336 | arp = arp_hdr(skb); | 336 | arp = arp_hdr(skb); |
337 | 337 | ||
338 | if (verdict == ARPT_CONTINUE) | 338 | if (verdict == ARPT_CONTINUE) |
339 | e = arpt_next_entry(e); | 339 | e = arpt_next_entry(e); |
340 | else | 340 | else |
341 | /* Verdict */ | 341 | /* Verdict */ |
342 | break; | 342 | break; |
343 | } while (!acpar.hotdrop); | 343 | } while (!acpar.hotdrop); |
344 | xt_info_rdunlock_bh(); | 344 | xt_info_rdunlock_bh(); |
345 | 345 | ||
346 | if (acpar.hotdrop) | 346 | if (acpar.hotdrop) |
347 | return NF_DROP; | 347 | return NF_DROP; |
348 | else | 348 | else |
349 | return verdict; | 349 | return verdict; |
350 | } | 350 | } |
351 | 351 | ||
352 | /* All zeroes == unconditional rule. */ | 352 | /* All zeroes == unconditional rule. */ |
353 | static inline bool unconditional(const struct arpt_arp *arp) | 353 | static inline bool unconditional(const struct arpt_arp *arp) |
354 | { | 354 | { |
355 | static const struct arpt_arp uncond; | 355 | static const struct arpt_arp uncond; |
356 | 356 | ||
357 | return memcmp(arp, &uncond, sizeof(uncond)) == 0; | 357 | return memcmp(arp, &uncond, sizeof(uncond)) == 0; |
358 | } | 358 | } |
359 | 359 | ||
360 | /* Figures out from what hook each rule can be called: returns 0 if | 360 | /* Figures out from what hook each rule can be called: returns 0 if |
361 | * there are loops. Puts hook bitmask in comefrom. | 361 | * there are loops. Puts hook bitmask in comefrom. |
362 | */ | 362 | */ |
363 | static int mark_source_chains(const struct xt_table_info *newinfo, | 363 | static int mark_source_chains(const struct xt_table_info *newinfo, |
364 | unsigned int valid_hooks, void *entry0) | 364 | unsigned int valid_hooks, void *entry0) |
365 | { | 365 | { |
366 | unsigned int hook; | 366 | unsigned int hook; |
367 | 367 | ||
368 | /* No recursion; use packet counter to save back ptrs (reset | 368 | /* No recursion; use packet counter to save back ptrs (reset |
369 | * to 0 as we leave), and comefrom to save source hook bitmask. | 369 | * to 0 as we leave), and comefrom to save source hook bitmask. |
370 | */ | 370 | */ |
371 | for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { | 371 | for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { |
372 | unsigned int pos = newinfo->hook_entry[hook]; | 372 | unsigned int pos = newinfo->hook_entry[hook]; |
373 | struct arpt_entry *e | 373 | struct arpt_entry *e |
374 | = (struct arpt_entry *)(entry0 + pos); | 374 | = (struct arpt_entry *)(entry0 + pos); |
375 | 375 | ||
376 | if (!(valid_hooks & (1 << hook))) | 376 | if (!(valid_hooks & (1 << hook))) |
377 | continue; | 377 | continue; |
378 | 378 | ||
379 | /* Set initial back pointer. */ | 379 | /* Set initial back pointer. */ |
380 | e->counters.pcnt = pos; | 380 | e->counters.pcnt = pos; |
381 | 381 | ||
382 | for (;;) { | 382 | for (;;) { |
383 | const struct arpt_standard_target *t | 383 | const struct arpt_standard_target *t |
384 | = (void *)arpt_get_target_c(e); | 384 | = (void *)arpt_get_target_c(e); |
385 | int visited = e->comefrom & (1 << hook); | 385 | int visited = e->comefrom & (1 << hook); |
386 | 386 | ||
387 | if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { | 387 | if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { |
388 | printk("arptables: loop hook %u pos %u %08X.\n", | 388 | pr_notice("arptables: loop hook %u pos %u %08X.\n", |
389 | hook, pos, e->comefrom); | 389 | hook, pos, e->comefrom); |
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
392 | e->comefrom | 392 | e->comefrom |
393 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); | 393 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); |
394 | 394 | ||
395 | /* Unconditional return/END. */ | 395 | /* Unconditional return/END. */ |
396 | if ((e->target_offset == sizeof(struct arpt_entry) && | 396 | if ((e->target_offset == sizeof(struct arpt_entry) && |
397 | (strcmp(t->target.u.user.name, | 397 | (strcmp(t->target.u.user.name, |
398 | ARPT_STANDARD_TARGET) == 0) && | 398 | ARPT_STANDARD_TARGET) == 0) && |
399 | t->verdict < 0 && unconditional(&e->arp)) || | 399 | t->verdict < 0 && unconditional(&e->arp)) || |
400 | visited) { | 400 | visited) { |
401 | unsigned int oldpos, size; | 401 | unsigned int oldpos, size; |
402 | 402 | ||
403 | if ((strcmp(t->target.u.user.name, | 403 | if ((strcmp(t->target.u.user.name, |
404 | ARPT_STANDARD_TARGET) == 0) && | 404 | ARPT_STANDARD_TARGET) == 0) && |
405 | t->verdict < -NF_MAX_VERDICT - 1) { | 405 | t->verdict < -NF_MAX_VERDICT - 1) { |
406 | duprintf("mark_source_chains: bad " | 406 | duprintf("mark_source_chains: bad " |
407 | "negative verdict (%i)\n", | 407 | "negative verdict (%i)\n", |
408 | t->verdict); | 408 | t->verdict); |
409 | return 0; | 409 | return 0; |
410 | } | 410 | } |
411 | 411 | ||
412 | /* Return: backtrack through the last | 412 | /* Return: backtrack through the last |
413 | * big jump. | 413 | * big jump. |
414 | */ | 414 | */ |
415 | do { | 415 | do { |
416 | e->comefrom ^= (1<<NF_ARP_NUMHOOKS); | 416 | e->comefrom ^= (1<<NF_ARP_NUMHOOKS); |
417 | oldpos = pos; | 417 | oldpos = pos; |
418 | pos = e->counters.pcnt; | 418 | pos = e->counters.pcnt; |
419 | e->counters.pcnt = 0; | 419 | e->counters.pcnt = 0; |
420 | 420 | ||
421 | /* We're at the start. */ | 421 | /* We're at the start. */ |
422 | if (pos == oldpos) | 422 | if (pos == oldpos) |
423 | goto next; | 423 | goto next; |
424 | 424 | ||
425 | e = (struct arpt_entry *) | 425 | e = (struct arpt_entry *) |
426 | (entry0 + pos); | 426 | (entry0 + pos); |
427 | } while (oldpos == pos + e->next_offset); | 427 | } while (oldpos == pos + e->next_offset); |
428 | 428 | ||
429 | /* Move along one */ | 429 | /* Move along one */ |
430 | size = e->next_offset; | 430 | size = e->next_offset; |
431 | e = (struct arpt_entry *) | 431 | e = (struct arpt_entry *) |
432 | (entry0 + pos + size); | 432 | (entry0 + pos + size); |
433 | e->counters.pcnt = pos; | 433 | e->counters.pcnt = pos; |
434 | pos += size; | 434 | pos += size; |
435 | } else { | 435 | } else { |
436 | int newpos = t->verdict; | 436 | int newpos = t->verdict; |
437 | 437 | ||
438 | if (strcmp(t->target.u.user.name, | 438 | if (strcmp(t->target.u.user.name, |
439 | ARPT_STANDARD_TARGET) == 0 && | 439 | ARPT_STANDARD_TARGET) == 0 && |
440 | newpos >= 0) { | 440 | newpos >= 0) { |
441 | if (newpos > newinfo->size - | 441 | if (newpos > newinfo->size - |
442 | sizeof(struct arpt_entry)) { | 442 | sizeof(struct arpt_entry)) { |
443 | duprintf("mark_source_chains: " | 443 | duprintf("mark_source_chains: " |
444 | "bad verdict (%i)\n", | 444 | "bad verdict (%i)\n", |
445 | newpos); | 445 | newpos); |
446 | return 0; | 446 | return 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | /* This a jump; chase it. */ | 449 | /* This a jump; chase it. */ |
450 | duprintf("Jump rule %u -> %u\n", | 450 | duprintf("Jump rule %u -> %u\n", |
451 | pos, newpos); | 451 | pos, newpos); |
452 | } else { | 452 | } else { |
453 | /* ... this is a fallthru */ | 453 | /* ... this is a fallthru */ |
454 | newpos = pos + e->next_offset; | 454 | newpos = pos + e->next_offset; |
455 | } | 455 | } |
456 | e = (struct arpt_entry *) | 456 | e = (struct arpt_entry *) |
457 | (entry0 + newpos); | 457 | (entry0 + newpos); |
458 | e->counters.pcnt = pos; | 458 | e->counters.pcnt = pos; |
459 | pos = newpos; | 459 | pos = newpos; |
460 | } | 460 | } |
461 | } | 461 | } |
462 | next: | 462 | next: |
463 | duprintf("Finished chain %u\n", hook); | 463 | duprintf("Finished chain %u\n", hook); |
464 | } | 464 | } |
465 | return 1; | 465 | return 1; |
466 | } | 466 | } |
467 | 467 | ||
468 | static inline int check_entry(const struct arpt_entry *e, const char *name) | 468 | static inline int check_entry(const struct arpt_entry *e, const char *name) |
469 | { | 469 | { |
470 | const struct arpt_entry_target *t; | 470 | const struct arpt_entry_target *t; |
471 | 471 | ||
472 | if (!arp_checkentry(&e->arp)) { | 472 | if (!arp_checkentry(&e->arp)) { |
473 | duprintf("arp_tables: arp check failed %p %s.\n", e, name); | 473 | duprintf("arp_tables: arp check failed %p %s.\n", e, name); |
474 | return -EINVAL; | 474 | return -EINVAL; |
475 | } | 475 | } |
476 | 476 | ||
477 | if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset) | 477 | if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset) |
478 | return -EINVAL; | 478 | return -EINVAL; |
479 | 479 | ||
480 | t = arpt_get_target_c(e); | 480 | t = arpt_get_target_c(e); |
481 | if (e->target_offset + t->u.target_size > e->next_offset) | 481 | if (e->target_offset + t->u.target_size > e->next_offset) |
482 | return -EINVAL; | 482 | return -EINVAL; |
483 | 483 | ||
484 | return 0; | 484 | return 0; |
485 | } | 485 | } |
486 | 486 | ||
487 | static inline int check_target(struct arpt_entry *e, const char *name) | 487 | static inline int check_target(struct arpt_entry *e, const char *name) |
488 | { | 488 | { |
489 | struct arpt_entry_target *t = arpt_get_target(e); | 489 | struct arpt_entry_target *t = arpt_get_target(e); |
490 | int ret; | 490 | int ret; |
491 | struct xt_tgchk_param par = { | 491 | struct xt_tgchk_param par = { |
492 | .table = name, | 492 | .table = name, |
493 | .entryinfo = e, | 493 | .entryinfo = e, |
494 | .target = t->u.kernel.target, | 494 | .target = t->u.kernel.target, |
495 | .targinfo = t->data, | 495 | .targinfo = t->data, |
496 | .hook_mask = e->comefrom, | 496 | .hook_mask = e->comefrom, |
497 | .family = NFPROTO_ARP, | 497 | .family = NFPROTO_ARP, |
498 | }; | 498 | }; |
499 | 499 | ||
500 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); | 500 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); |
501 | if (ret < 0) { | 501 | if (ret < 0) { |
502 | duprintf("arp_tables: check failed for `%s'.\n", | 502 | duprintf("arp_tables: check failed for `%s'.\n", |
503 | t->u.kernel.target->name); | 503 | t->u.kernel.target->name); |
504 | return ret; | 504 | return ret; |
505 | } | 505 | } |
506 | return 0; | 506 | return 0; |
507 | } | 507 | } |
508 | 508 | ||
509 | static inline int | 509 | static inline int |
510 | find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) | 510 | find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) |
511 | { | 511 | { |
512 | struct arpt_entry_target *t; | 512 | struct arpt_entry_target *t; |
513 | struct xt_target *target; | 513 | struct xt_target *target; |
514 | int ret; | 514 | int ret; |
515 | 515 | ||
516 | ret = check_entry(e, name); | 516 | ret = check_entry(e, name); |
517 | if (ret) | 517 | if (ret) |
518 | return ret; | 518 | return ret; |
519 | 519 | ||
520 | t = arpt_get_target(e); | 520 | t = arpt_get_target(e); |
521 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, | 521 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, |
522 | t->u.user.revision); | 522 | t->u.user.revision); |
523 | if (IS_ERR(target)) { | 523 | if (IS_ERR(target)) { |
524 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 524 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
525 | ret = PTR_ERR(target); | 525 | ret = PTR_ERR(target); |
526 | goto out; | 526 | goto out; |
527 | } | 527 | } |
528 | t->u.kernel.target = target; | 528 | t->u.kernel.target = target; |
529 | 529 | ||
530 | ret = check_target(e, name); | 530 | ret = check_target(e, name); |
531 | if (ret) | 531 | if (ret) |
532 | goto err; | 532 | goto err; |
533 | return 0; | 533 | return 0; |
534 | err: | 534 | err: |
535 | module_put(t->u.kernel.target->me); | 535 | module_put(t->u.kernel.target->me); |
536 | out: | 536 | out: |
537 | return ret; | 537 | return ret; |
538 | } | 538 | } |
539 | 539 | ||
540 | static bool check_underflow(const struct arpt_entry *e) | 540 | static bool check_underflow(const struct arpt_entry *e) |
541 | { | 541 | { |
542 | const struct arpt_entry_target *t; | 542 | const struct arpt_entry_target *t; |
543 | unsigned int verdict; | 543 | unsigned int verdict; |
544 | 544 | ||
545 | if (!unconditional(&e->arp)) | 545 | if (!unconditional(&e->arp)) |
546 | return false; | 546 | return false; |
547 | t = arpt_get_target_c(e); | 547 | t = arpt_get_target_c(e); |
548 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 548 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
549 | return false; | 549 | return false; |
550 | verdict = ((struct arpt_standard_target *)t)->verdict; | 550 | verdict = ((struct arpt_standard_target *)t)->verdict; |
551 | verdict = -verdict - 1; | 551 | verdict = -verdict - 1; |
552 | return verdict == NF_DROP || verdict == NF_ACCEPT; | 552 | return verdict == NF_DROP || verdict == NF_ACCEPT; |
553 | } | 553 | } |
554 | 554 | ||
555 | static inline int check_entry_size_and_hooks(struct arpt_entry *e, | 555 | static inline int check_entry_size_and_hooks(struct arpt_entry *e, |
556 | struct xt_table_info *newinfo, | 556 | struct xt_table_info *newinfo, |
557 | const unsigned char *base, | 557 | const unsigned char *base, |
558 | const unsigned char *limit, | 558 | const unsigned char *limit, |
559 | const unsigned int *hook_entries, | 559 | const unsigned int *hook_entries, |
560 | const unsigned int *underflows, | 560 | const unsigned int *underflows, |
561 | unsigned int valid_hooks) | 561 | unsigned int valid_hooks) |
562 | { | 562 | { |
563 | unsigned int h; | 563 | unsigned int h; |
564 | 564 | ||
565 | if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || | 565 | if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || |
566 | (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { | 566 | (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { |
567 | duprintf("Bad offset %p\n", e); | 567 | duprintf("Bad offset %p\n", e); |
568 | return -EINVAL; | 568 | return -EINVAL; |
569 | } | 569 | } |
570 | 570 | ||
571 | if (e->next_offset | 571 | if (e->next_offset |
572 | < sizeof(struct arpt_entry) + sizeof(struct arpt_entry_target)) { | 572 | < sizeof(struct arpt_entry) + sizeof(struct arpt_entry_target)) { |
573 | duprintf("checking: element %p size %u\n", | 573 | duprintf("checking: element %p size %u\n", |
574 | e, e->next_offset); | 574 | e, e->next_offset); |
575 | return -EINVAL; | 575 | return -EINVAL; |
576 | } | 576 | } |
577 | 577 | ||
578 | /* Check hooks & underflows */ | 578 | /* Check hooks & underflows */ |
579 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 579 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
580 | if (!(valid_hooks & (1 << h))) | 580 | if (!(valid_hooks & (1 << h))) |
581 | continue; | 581 | continue; |
582 | if ((unsigned char *)e - base == hook_entries[h]) | 582 | if ((unsigned char *)e - base == hook_entries[h]) |
583 | newinfo->hook_entry[h] = hook_entries[h]; | 583 | newinfo->hook_entry[h] = hook_entries[h]; |
584 | if ((unsigned char *)e - base == underflows[h]) { | 584 | if ((unsigned char *)e - base == underflows[h]) { |
585 | if (!check_underflow(e)) { | 585 | if (!check_underflow(e)) { |
586 | pr_err("Underflows must be unconditional and " | 586 | pr_err("Underflows must be unconditional and " |
587 | "use the STANDARD target with " | 587 | "use the STANDARD target with " |
588 | "ACCEPT/DROP\n"); | 588 | "ACCEPT/DROP\n"); |
589 | return -EINVAL; | 589 | return -EINVAL; |
590 | } | 590 | } |
591 | newinfo->underflow[h] = underflows[h]; | 591 | newinfo->underflow[h] = underflows[h]; |
592 | } | 592 | } |
593 | } | 593 | } |
594 | 594 | ||
595 | /* Clear counters and comefrom */ | 595 | /* Clear counters and comefrom */ |
596 | e->counters = ((struct xt_counters) { 0, 0 }); | 596 | e->counters = ((struct xt_counters) { 0, 0 }); |
597 | e->comefrom = 0; | 597 | e->comefrom = 0; |
598 | return 0; | 598 | return 0; |
599 | } | 599 | } |
600 | 600 | ||
601 | static inline void cleanup_entry(struct arpt_entry *e) | 601 | static inline void cleanup_entry(struct arpt_entry *e) |
602 | { | 602 | { |
603 | struct xt_tgdtor_param par; | 603 | struct xt_tgdtor_param par; |
604 | struct arpt_entry_target *t; | 604 | struct arpt_entry_target *t; |
605 | 605 | ||
606 | t = arpt_get_target(e); | 606 | t = arpt_get_target(e); |
607 | par.target = t->u.kernel.target; | 607 | par.target = t->u.kernel.target; |
608 | par.targinfo = t->data; | 608 | par.targinfo = t->data; |
609 | par.family = NFPROTO_ARP; | 609 | par.family = NFPROTO_ARP; |
610 | if (par.target->destroy != NULL) | 610 | if (par.target->destroy != NULL) |
611 | par.target->destroy(&par); | 611 | par.target->destroy(&par); |
612 | module_put(par.target->me); | 612 | module_put(par.target->me); |
613 | } | 613 | } |
614 | 614 | ||
615 | /* Checks and translates the user-supplied table segment (held in | 615 | /* Checks and translates the user-supplied table segment (held in |
616 | * newinfo). | 616 | * newinfo). |
617 | */ | 617 | */ |
618 | static int translate_table(struct xt_table_info *newinfo, void *entry0, | 618 | static int translate_table(struct xt_table_info *newinfo, void *entry0, |
619 | const struct arpt_replace *repl) | 619 | const struct arpt_replace *repl) |
620 | { | 620 | { |
621 | struct arpt_entry *iter; | 621 | struct arpt_entry *iter; |
622 | unsigned int i; | 622 | unsigned int i; |
623 | int ret = 0; | 623 | int ret = 0; |
624 | 624 | ||
625 | newinfo->size = repl->size; | 625 | newinfo->size = repl->size; |
626 | newinfo->number = repl->num_entries; | 626 | newinfo->number = repl->num_entries; |
627 | 627 | ||
628 | /* Init all hooks to impossible value. */ | 628 | /* Init all hooks to impossible value. */ |
629 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 629 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
630 | newinfo->hook_entry[i] = 0xFFFFFFFF; | 630 | newinfo->hook_entry[i] = 0xFFFFFFFF; |
631 | newinfo->underflow[i] = 0xFFFFFFFF; | 631 | newinfo->underflow[i] = 0xFFFFFFFF; |
632 | } | 632 | } |
633 | 633 | ||
634 | duprintf("translate_table: size %u\n", newinfo->size); | 634 | duprintf("translate_table: size %u\n", newinfo->size); |
635 | i = 0; | 635 | i = 0; |
636 | 636 | ||
637 | /* Walk through entries, checking offsets. */ | 637 | /* Walk through entries, checking offsets. */ |
638 | xt_entry_foreach(iter, entry0, newinfo->size) { | 638 | xt_entry_foreach(iter, entry0, newinfo->size) { |
639 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, | 639 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, |
640 | entry0 + repl->size, | 640 | entry0 + repl->size, |
641 | repl->hook_entry, | 641 | repl->hook_entry, |
642 | repl->underflow, | 642 | repl->underflow, |
643 | repl->valid_hooks); | 643 | repl->valid_hooks); |
644 | if (ret != 0) | 644 | if (ret != 0) |
645 | break; | 645 | break; |
646 | ++i; | 646 | ++i; |
647 | if (strcmp(arpt_get_target(iter)->u.user.name, | 647 | if (strcmp(arpt_get_target(iter)->u.user.name, |
648 | XT_ERROR_TARGET) == 0) | 648 | XT_ERROR_TARGET) == 0) |
649 | ++newinfo->stacksize; | 649 | ++newinfo->stacksize; |
650 | } | 650 | } |
651 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); | 651 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); |
652 | if (ret != 0) | 652 | if (ret != 0) |
653 | return ret; | 653 | return ret; |
654 | 654 | ||
655 | if (i != repl->num_entries) { | 655 | if (i != repl->num_entries) { |
656 | duprintf("translate_table: %u not %u entries\n", | 656 | duprintf("translate_table: %u not %u entries\n", |
657 | i, repl->num_entries); | 657 | i, repl->num_entries); |
658 | return -EINVAL; | 658 | return -EINVAL; |
659 | } | 659 | } |
660 | 660 | ||
661 | /* Check hooks all assigned */ | 661 | /* Check hooks all assigned */ |
662 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 662 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
663 | /* Only hooks which are valid */ | 663 | /* Only hooks which are valid */ |
664 | if (!(repl->valid_hooks & (1 << i))) | 664 | if (!(repl->valid_hooks & (1 << i))) |
665 | continue; | 665 | continue; |
666 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 666 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
667 | duprintf("Invalid hook entry %u %u\n", | 667 | duprintf("Invalid hook entry %u %u\n", |
668 | i, repl->hook_entry[i]); | 668 | i, repl->hook_entry[i]); |
669 | return -EINVAL; | 669 | return -EINVAL; |
670 | } | 670 | } |
671 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 671 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
672 | duprintf("Invalid underflow %u %u\n", | 672 | duprintf("Invalid underflow %u %u\n", |
673 | i, repl->underflow[i]); | 673 | i, repl->underflow[i]); |
674 | return -EINVAL; | 674 | return -EINVAL; |
675 | } | 675 | } |
676 | } | 676 | } |
677 | 677 | ||
678 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { | 678 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { |
679 | duprintf("Looping hook\n"); | 679 | duprintf("Looping hook\n"); |
680 | return -ELOOP; | 680 | return -ELOOP; |
681 | } | 681 | } |
682 | 682 | ||
683 | /* Finally, each sanity check must pass */ | 683 | /* Finally, each sanity check must pass */ |
684 | i = 0; | 684 | i = 0; |
685 | xt_entry_foreach(iter, entry0, newinfo->size) { | 685 | xt_entry_foreach(iter, entry0, newinfo->size) { |
686 | ret = find_check_entry(iter, repl->name, repl->size); | 686 | ret = find_check_entry(iter, repl->name, repl->size); |
687 | if (ret != 0) | 687 | if (ret != 0) |
688 | break; | 688 | break; |
689 | ++i; | 689 | ++i; |
690 | } | 690 | } |
691 | 691 | ||
692 | if (ret != 0) { | 692 | if (ret != 0) { |
693 | xt_entry_foreach(iter, entry0, newinfo->size) { | 693 | xt_entry_foreach(iter, entry0, newinfo->size) { |
694 | if (i-- == 0) | 694 | if (i-- == 0) |
695 | break; | 695 | break; |
696 | cleanup_entry(iter); | 696 | cleanup_entry(iter); |
697 | } | 697 | } |
698 | return ret; | 698 | return ret; |
699 | } | 699 | } |
700 | 700 | ||
701 | /* And one copy for every other CPU */ | 701 | /* And one copy for every other CPU */ |
702 | for_each_possible_cpu(i) { | 702 | for_each_possible_cpu(i) { |
703 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 703 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
704 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 704 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
705 | } | 705 | } |
706 | 706 | ||
707 | return ret; | 707 | return ret; |
708 | } | 708 | } |
709 | 709 | ||
710 | static void get_counters(const struct xt_table_info *t, | 710 | static void get_counters(const struct xt_table_info *t, |
711 | struct xt_counters counters[]) | 711 | struct xt_counters counters[]) |
712 | { | 712 | { |
713 | struct arpt_entry *iter; | 713 | struct arpt_entry *iter; |
714 | unsigned int cpu; | 714 | unsigned int cpu; |
715 | unsigned int i; | 715 | unsigned int i; |
716 | unsigned int curcpu; | 716 | unsigned int curcpu; |
717 | 717 | ||
718 | /* Instead of clearing (by a previous call to memset()) | 718 | /* Instead of clearing (by a previous call to memset()) |
719 | * the counters and using adds, we set the counters | 719 | * the counters and using adds, we set the counters |
720 | * with data used by 'current' CPU | 720 | * with data used by 'current' CPU |
721 | * | 721 | * |
722 | * Bottom half has to be disabled to prevent deadlock | 722 | * Bottom half has to be disabled to prevent deadlock |
723 | * if new softirq were to run and call ipt_do_table | 723 | * if new softirq were to run and call ipt_do_table |
724 | */ | 724 | */ |
725 | local_bh_disable(); | 725 | local_bh_disable(); |
726 | curcpu = smp_processor_id(); | 726 | curcpu = smp_processor_id(); |
727 | 727 | ||
728 | i = 0; | 728 | i = 0; |
729 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | 729 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { |
730 | SET_COUNTER(counters[i], iter->counters.bcnt, | 730 | SET_COUNTER(counters[i], iter->counters.bcnt, |
731 | iter->counters.pcnt); | 731 | iter->counters.pcnt); |
732 | ++i; | 732 | ++i; |
733 | } | 733 | } |
734 | 734 | ||
735 | for_each_possible_cpu(cpu) { | 735 | for_each_possible_cpu(cpu) { |
736 | if (cpu == curcpu) | 736 | if (cpu == curcpu) |
737 | continue; | 737 | continue; |
738 | i = 0; | 738 | i = 0; |
739 | xt_info_wrlock(cpu); | 739 | xt_info_wrlock(cpu); |
740 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 740 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
741 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 741 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
742 | iter->counters.pcnt); | 742 | iter->counters.pcnt); |
743 | ++i; | 743 | ++i; |
744 | } | 744 | } |
745 | xt_info_wrunlock(cpu); | 745 | xt_info_wrunlock(cpu); |
746 | } | 746 | } |
747 | local_bh_enable(); | 747 | local_bh_enable(); |
748 | } | 748 | } |
749 | 749 | ||
750 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 750 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
751 | { | 751 | { |
752 | unsigned int countersize; | 752 | unsigned int countersize; |
753 | struct xt_counters *counters; | 753 | struct xt_counters *counters; |
754 | const struct xt_table_info *private = table->private; | 754 | const struct xt_table_info *private = table->private; |
755 | 755 | ||
756 | /* We need atomic snapshot of counters: rest doesn't change | 756 | /* We need atomic snapshot of counters: rest doesn't change |
757 | * (other than comefrom, which userspace doesn't care | 757 | * (other than comefrom, which userspace doesn't care |
758 | * about). | 758 | * about). |
759 | */ | 759 | */ |
760 | countersize = sizeof(struct xt_counters) * private->number; | 760 | countersize = sizeof(struct xt_counters) * private->number; |
761 | counters = vmalloc_node(countersize, numa_node_id()); | 761 | counters = vmalloc_node(countersize, numa_node_id()); |
762 | 762 | ||
763 | if (counters == NULL) | 763 | if (counters == NULL) |
764 | return ERR_PTR(-ENOMEM); | 764 | return ERR_PTR(-ENOMEM); |
765 | 765 | ||
766 | get_counters(private, counters); | 766 | get_counters(private, counters); |
767 | 767 | ||
768 | return counters; | 768 | return counters; |
769 | } | 769 | } |
770 | 770 | ||
771 | static int copy_entries_to_user(unsigned int total_size, | 771 | static int copy_entries_to_user(unsigned int total_size, |
772 | const struct xt_table *table, | 772 | const struct xt_table *table, |
773 | void __user *userptr) | 773 | void __user *userptr) |
774 | { | 774 | { |
775 | unsigned int off, num; | 775 | unsigned int off, num; |
776 | const struct arpt_entry *e; | 776 | const struct arpt_entry *e; |
777 | struct xt_counters *counters; | 777 | struct xt_counters *counters; |
778 | struct xt_table_info *private = table->private; | 778 | struct xt_table_info *private = table->private; |
779 | int ret = 0; | 779 | int ret = 0; |
780 | void *loc_cpu_entry; | 780 | void *loc_cpu_entry; |
781 | 781 | ||
782 | counters = alloc_counters(table); | 782 | counters = alloc_counters(table); |
783 | if (IS_ERR(counters)) | 783 | if (IS_ERR(counters)) |
784 | return PTR_ERR(counters); | 784 | return PTR_ERR(counters); |
785 | 785 | ||
786 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 786 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
787 | /* ... then copy entire thing ... */ | 787 | /* ... then copy entire thing ... */ |
788 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { | 788 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { |
789 | ret = -EFAULT; | 789 | ret = -EFAULT; |
790 | goto free_counters; | 790 | goto free_counters; |
791 | } | 791 | } |
792 | 792 | ||
793 | /* FIXME: use iterator macros --RR */ | 793 | /* FIXME: use iterator macros --RR */ |
794 | /* ... then go back and fix counters and names */ | 794 | /* ... then go back and fix counters and names */ |
795 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ | 795 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ |
796 | const struct arpt_entry_target *t; | 796 | const struct arpt_entry_target *t; |
797 | 797 | ||
798 | e = (struct arpt_entry *)(loc_cpu_entry + off); | 798 | e = (struct arpt_entry *)(loc_cpu_entry + off); |
799 | if (copy_to_user(userptr + off | 799 | if (copy_to_user(userptr + off |
800 | + offsetof(struct arpt_entry, counters), | 800 | + offsetof(struct arpt_entry, counters), |
801 | &counters[num], | 801 | &counters[num], |
802 | sizeof(counters[num])) != 0) { | 802 | sizeof(counters[num])) != 0) { |
803 | ret = -EFAULT; | 803 | ret = -EFAULT; |
804 | goto free_counters; | 804 | goto free_counters; |
805 | } | 805 | } |
806 | 806 | ||
807 | t = arpt_get_target_c(e); | 807 | t = arpt_get_target_c(e); |
808 | if (copy_to_user(userptr + off + e->target_offset | 808 | if (copy_to_user(userptr + off + e->target_offset |
809 | + offsetof(struct arpt_entry_target, | 809 | + offsetof(struct arpt_entry_target, |
810 | u.user.name), | 810 | u.user.name), |
811 | t->u.kernel.target->name, | 811 | t->u.kernel.target->name, |
812 | strlen(t->u.kernel.target->name)+1) != 0) { | 812 | strlen(t->u.kernel.target->name)+1) != 0) { |
813 | ret = -EFAULT; | 813 | ret = -EFAULT; |
814 | goto free_counters; | 814 | goto free_counters; |
815 | } | 815 | } |
816 | } | 816 | } |
817 | 817 | ||
818 | free_counters: | 818 | free_counters: |
819 | vfree(counters); | 819 | vfree(counters); |
820 | return ret; | 820 | return ret; |
821 | } | 821 | } |
822 | 822 | ||
823 | #ifdef CONFIG_COMPAT | 823 | #ifdef CONFIG_COMPAT |
824 | static void compat_standard_from_user(void *dst, const void *src) | 824 | static void compat_standard_from_user(void *dst, const void *src) |
825 | { | 825 | { |
826 | int v = *(compat_int_t *)src; | 826 | int v = *(compat_int_t *)src; |
827 | 827 | ||
828 | if (v > 0) | 828 | if (v > 0) |
829 | v += xt_compat_calc_jump(NFPROTO_ARP, v); | 829 | v += xt_compat_calc_jump(NFPROTO_ARP, v); |
830 | memcpy(dst, &v, sizeof(v)); | 830 | memcpy(dst, &v, sizeof(v)); |
831 | } | 831 | } |
832 | 832 | ||
833 | static int compat_standard_to_user(void __user *dst, const void *src) | 833 | static int compat_standard_to_user(void __user *dst, const void *src) |
834 | { | 834 | { |
835 | compat_int_t cv = *(int *)src; | 835 | compat_int_t cv = *(int *)src; |
836 | 836 | ||
837 | if (cv > 0) | 837 | if (cv > 0) |
838 | cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); | 838 | cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); |
839 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; | 839 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; |
840 | } | 840 | } |
841 | 841 | ||
842 | static int compat_calc_entry(const struct arpt_entry *e, | 842 | static int compat_calc_entry(const struct arpt_entry *e, |
843 | const struct xt_table_info *info, | 843 | const struct xt_table_info *info, |
844 | const void *base, struct xt_table_info *newinfo) | 844 | const void *base, struct xt_table_info *newinfo) |
845 | { | 845 | { |
846 | const struct arpt_entry_target *t; | 846 | const struct arpt_entry_target *t; |
847 | unsigned int entry_offset; | 847 | unsigned int entry_offset; |
848 | int off, i, ret; | 848 | int off, i, ret; |
849 | 849 | ||
850 | off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); | 850 | off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); |
851 | entry_offset = (void *)e - base; | 851 | entry_offset = (void *)e - base; |
852 | 852 | ||
853 | t = arpt_get_target_c(e); | 853 | t = arpt_get_target_c(e); |
854 | off += xt_compat_target_offset(t->u.kernel.target); | 854 | off += xt_compat_target_offset(t->u.kernel.target); |
855 | newinfo->size -= off; | 855 | newinfo->size -= off; |
856 | ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); | 856 | ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); |
857 | if (ret) | 857 | if (ret) |
858 | return ret; | 858 | return ret; |
859 | 859 | ||
860 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 860 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
861 | if (info->hook_entry[i] && | 861 | if (info->hook_entry[i] && |
862 | (e < (struct arpt_entry *)(base + info->hook_entry[i]))) | 862 | (e < (struct arpt_entry *)(base + info->hook_entry[i]))) |
863 | newinfo->hook_entry[i] -= off; | 863 | newinfo->hook_entry[i] -= off; |
864 | if (info->underflow[i] && | 864 | if (info->underflow[i] && |
865 | (e < (struct arpt_entry *)(base + info->underflow[i]))) | 865 | (e < (struct arpt_entry *)(base + info->underflow[i]))) |
866 | newinfo->underflow[i] -= off; | 866 | newinfo->underflow[i] -= off; |
867 | } | 867 | } |
868 | return 0; | 868 | return 0; |
869 | } | 869 | } |
870 | 870 | ||
871 | static int compat_table_info(const struct xt_table_info *info, | 871 | static int compat_table_info(const struct xt_table_info *info, |
872 | struct xt_table_info *newinfo) | 872 | struct xt_table_info *newinfo) |
873 | { | 873 | { |
874 | struct arpt_entry *iter; | 874 | struct arpt_entry *iter; |
875 | void *loc_cpu_entry; | 875 | void *loc_cpu_entry; |
876 | int ret; | 876 | int ret; |
877 | 877 | ||
878 | if (!newinfo || !info) | 878 | if (!newinfo || !info) |
879 | return -EINVAL; | 879 | return -EINVAL; |
880 | 880 | ||
881 | /* we dont care about newinfo->entries[] */ | 881 | /* we dont care about newinfo->entries[] */ |
882 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | 882 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); |
883 | newinfo->initial_entries = 0; | 883 | newinfo->initial_entries = 0; |
884 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; | 884 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; |
885 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { | 885 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { |
886 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); | 886 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); |
887 | if (ret != 0) | 887 | if (ret != 0) |
888 | return ret; | 888 | return ret; |
889 | } | 889 | } |
890 | return 0; | 890 | return 0; |
891 | } | 891 | } |
892 | #endif | 892 | #endif |
893 | 893 | ||
894 | static int get_info(struct net *net, void __user *user, | 894 | static int get_info(struct net *net, void __user *user, |
895 | const int *len, int compat) | 895 | const int *len, int compat) |
896 | { | 896 | { |
897 | char name[ARPT_TABLE_MAXNAMELEN]; | 897 | char name[ARPT_TABLE_MAXNAMELEN]; |
898 | struct xt_table *t; | 898 | struct xt_table *t; |
899 | int ret; | 899 | int ret; |
900 | 900 | ||
901 | if (*len != sizeof(struct arpt_getinfo)) { | 901 | if (*len != sizeof(struct arpt_getinfo)) { |
902 | duprintf("length %u != %Zu\n", *len, | 902 | duprintf("length %u != %Zu\n", *len, |
903 | sizeof(struct arpt_getinfo)); | 903 | sizeof(struct arpt_getinfo)); |
904 | return -EINVAL; | 904 | return -EINVAL; |
905 | } | 905 | } |
906 | 906 | ||
907 | if (copy_from_user(name, user, sizeof(name)) != 0) | 907 | if (copy_from_user(name, user, sizeof(name)) != 0) |
908 | return -EFAULT; | 908 | return -EFAULT; |
909 | 909 | ||
910 | name[ARPT_TABLE_MAXNAMELEN-1] = '\0'; | 910 | name[ARPT_TABLE_MAXNAMELEN-1] = '\0'; |
911 | #ifdef CONFIG_COMPAT | 911 | #ifdef CONFIG_COMPAT |
912 | if (compat) | 912 | if (compat) |
913 | xt_compat_lock(NFPROTO_ARP); | 913 | xt_compat_lock(NFPROTO_ARP); |
914 | #endif | 914 | #endif |
915 | t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), | 915 | t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), |
916 | "arptable_%s", name); | 916 | "arptable_%s", name); |
917 | if (t && !IS_ERR(t)) { | 917 | if (t && !IS_ERR(t)) { |
918 | struct arpt_getinfo info; | 918 | struct arpt_getinfo info; |
919 | const struct xt_table_info *private = t->private; | 919 | const struct xt_table_info *private = t->private; |
920 | #ifdef CONFIG_COMPAT | 920 | #ifdef CONFIG_COMPAT |
921 | struct xt_table_info tmp; | 921 | struct xt_table_info tmp; |
922 | 922 | ||
923 | if (compat) { | 923 | if (compat) { |
924 | ret = compat_table_info(private, &tmp); | 924 | ret = compat_table_info(private, &tmp); |
925 | xt_compat_flush_offsets(NFPROTO_ARP); | 925 | xt_compat_flush_offsets(NFPROTO_ARP); |
926 | private = &tmp; | 926 | private = &tmp; |
927 | } | 927 | } |
928 | #endif | 928 | #endif |
929 | info.valid_hooks = t->valid_hooks; | 929 | info.valid_hooks = t->valid_hooks; |
930 | memcpy(info.hook_entry, private->hook_entry, | 930 | memcpy(info.hook_entry, private->hook_entry, |
931 | sizeof(info.hook_entry)); | 931 | sizeof(info.hook_entry)); |
932 | memcpy(info.underflow, private->underflow, | 932 | memcpy(info.underflow, private->underflow, |
933 | sizeof(info.underflow)); | 933 | sizeof(info.underflow)); |
934 | info.num_entries = private->number; | 934 | info.num_entries = private->number; |
935 | info.size = private->size; | 935 | info.size = private->size; |
936 | strcpy(info.name, name); | 936 | strcpy(info.name, name); |
937 | 937 | ||
938 | if (copy_to_user(user, &info, *len) != 0) | 938 | if (copy_to_user(user, &info, *len) != 0) |
939 | ret = -EFAULT; | 939 | ret = -EFAULT; |
940 | else | 940 | else |
941 | ret = 0; | 941 | ret = 0; |
942 | xt_table_unlock(t); | 942 | xt_table_unlock(t); |
943 | module_put(t->me); | 943 | module_put(t->me); |
944 | } else | 944 | } else |
945 | ret = t ? PTR_ERR(t) : -ENOENT; | 945 | ret = t ? PTR_ERR(t) : -ENOENT; |
946 | #ifdef CONFIG_COMPAT | 946 | #ifdef CONFIG_COMPAT |
947 | if (compat) | 947 | if (compat) |
948 | xt_compat_unlock(NFPROTO_ARP); | 948 | xt_compat_unlock(NFPROTO_ARP); |
949 | #endif | 949 | #endif |
950 | return ret; | 950 | return ret; |
951 | } | 951 | } |
952 | 952 | ||
953 | static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, | 953 | static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, |
954 | const int *len) | 954 | const int *len) |
955 | { | 955 | { |
956 | int ret; | 956 | int ret; |
957 | struct arpt_get_entries get; | 957 | struct arpt_get_entries get; |
958 | struct xt_table *t; | 958 | struct xt_table *t; |
959 | 959 | ||
960 | if (*len < sizeof(get)) { | 960 | if (*len < sizeof(get)) { |
961 | duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); | 961 | duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); |
962 | return -EINVAL; | 962 | return -EINVAL; |
963 | } | 963 | } |
964 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 964 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
965 | return -EFAULT; | 965 | return -EFAULT; |
966 | if (*len != sizeof(struct arpt_get_entries) + get.size) { | 966 | if (*len != sizeof(struct arpt_get_entries) + get.size) { |
967 | duprintf("get_entries: %u != %Zu\n", *len, | 967 | duprintf("get_entries: %u != %Zu\n", *len, |
968 | sizeof(struct arpt_get_entries) + get.size); | 968 | sizeof(struct arpt_get_entries) + get.size); |
969 | return -EINVAL; | 969 | return -EINVAL; |
970 | } | 970 | } |
971 | 971 | ||
972 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); | 972 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); |
973 | if (t && !IS_ERR(t)) { | 973 | if (t && !IS_ERR(t)) { |
974 | const struct xt_table_info *private = t->private; | 974 | const struct xt_table_info *private = t->private; |
975 | 975 | ||
976 | duprintf("t->private->number = %u\n", | 976 | duprintf("t->private->number = %u\n", |
977 | private->number); | 977 | private->number); |
978 | if (get.size == private->size) | 978 | if (get.size == private->size) |
979 | ret = copy_entries_to_user(private->size, | 979 | ret = copy_entries_to_user(private->size, |
980 | t, uptr->entrytable); | 980 | t, uptr->entrytable); |
981 | else { | 981 | else { |
982 | duprintf("get_entries: I've got %u not %u!\n", | 982 | duprintf("get_entries: I've got %u not %u!\n", |
983 | private->size, get.size); | 983 | private->size, get.size); |
984 | ret = -EAGAIN; | 984 | ret = -EAGAIN; |
985 | } | 985 | } |
986 | module_put(t->me); | 986 | module_put(t->me); |
987 | xt_table_unlock(t); | 987 | xt_table_unlock(t); |
988 | } else | 988 | } else |
989 | ret = t ? PTR_ERR(t) : -ENOENT; | 989 | ret = t ? PTR_ERR(t) : -ENOENT; |
990 | 990 | ||
991 | return ret; | 991 | return ret; |
992 | } | 992 | } |
993 | 993 | ||
994 | static int __do_replace(struct net *net, const char *name, | 994 | static int __do_replace(struct net *net, const char *name, |
995 | unsigned int valid_hooks, | 995 | unsigned int valid_hooks, |
996 | struct xt_table_info *newinfo, | 996 | struct xt_table_info *newinfo, |
997 | unsigned int num_counters, | 997 | unsigned int num_counters, |
998 | void __user *counters_ptr) | 998 | void __user *counters_ptr) |
999 | { | 999 | { |
1000 | int ret; | 1000 | int ret; |
1001 | struct xt_table *t; | 1001 | struct xt_table *t; |
1002 | struct xt_table_info *oldinfo; | 1002 | struct xt_table_info *oldinfo; |
1003 | struct xt_counters *counters; | 1003 | struct xt_counters *counters; |
1004 | void *loc_cpu_old_entry; | 1004 | void *loc_cpu_old_entry; |
1005 | struct arpt_entry *iter; | 1005 | struct arpt_entry *iter; |
1006 | 1006 | ||
1007 | ret = 0; | 1007 | ret = 0; |
1008 | counters = vmalloc_node(num_counters * sizeof(struct xt_counters), | 1008 | counters = vmalloc_node(num_counters * sizeof(struct xt_counters), |
1009 | numa_node_id()); | 1009 | numa_node_id()); |
1010 | if (!counters) { | 1010 | if (!counters) { |
1011 | ret = -ENOMEM; | 1011 | ret = -ENOMEM; |
1012 | goto out; | 1012 | goto out; |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), | 1015 | t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), |
1016 | "arptable_%s", name); | 1016 | "arptable_%s", name); |
1017 | if (!t || IS_ERR(t)) { | 1017 | if (!t || IS_ERR(t)) { |
1018 | ret = t ? PTR_ERR(t) : -ENOENT; | 1018 | ret = t ? PTR_ERR(t) : -ENOENT; |
1019 | goto free_newinfo_counters_untrans; | 1019 | goto free_newinfo_counters_untrans; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | /* You lied! */ | 1022 | /* You lied! */ |
1023 | if (valid_hooks != t->valid_hooks) { | 1023 | if (valid_hooks != t->valid_hooks) { |
1024 | duprintf("Valid hook crap: %08X vs %08X\n", | 1024 | duprintf("Valid hook crap: %08X vs %08X\n", |
1025 | valid_hooks, t->valid_hooks); | 1025 | valid_hooks, t->valid_hooks); |
1026 | ret = -EINVAL; | 1026 | ret = -EINVAL; |
1027 | goto put_module; | 1027 | goto put_module; |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); | 1030 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); |
1031 | if (!oldinfo) | 1031 | if (!oldinfo) |
1032 | goto put_module; | 1032 | goto put_module; |
1033 | 1033 | ||
1034 | /* Update module usage count based on number of rules */ | 1034 | /* Update module usage count based on number of rules */ |
1035 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", | 1035 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", |
1036 | oldinfo->number, oldinfo->initial_entries, newinfo->number); | 1036 | oldinfo->number, oldinfo->initial_entries, newinfo->number); |
1037 | if ((oldinfo->number > oldinfo->initial_entries) || | 1037 | if ((oldinfo->number > oldinfo->initial_entries) || |
1038 | (newinfo->number <= oldinfo->initial_entries)) | 1038 | (newinfo->number <= oldinfo->initial_entries)) |
1039 | module_put(t->me); | 1039 | module_put(t->me); |
1040 | if ((oldinfo->number > oldinfo->initial_entries) && | 1040 | if ((oldinfo->number > oldinfo->initial_entries) && |
1041 | (newinfo->number <= oldinfo->initial_entries)) | 1041 | (newinfo->number <= oldinfo->initial_entries)) |
1042 | module_put(t->me); | 1042 | module_put(t->me); |
1043 | 1043 | ||
1044 | /* Get the old counters, and synchronize with replace */ | 1044 | /* Get the old counters, and synchronize with replace */ |
1045 | get_counters(oldinfo, counters); | 1045 | get_counters(oldinfo, counters); |
1046 | 1046 | ||
1047 | /* Decrease module usage counts and free resource */ | 1047 | /* Decrease module usage counts and free resource */ |
1048 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1048 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1049 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) | 1049 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) |
1050 | cleanup_entry(iter); | 1050 | cleanup_entry(iter); |
1051 | 1051 | ||
1052 | xt_free_table_info(oldinfo); | 1052 | xt_free_table_info(oldinfo); |
1053 | if (copy_to_user(counters_ptr, counters, | 1053 | if (copy_to_user(counters_ptr, counters, |
1054 | sizeof(struct xt_counters) * num_counters) != 0) | 1054 | sizeof(struct xt_counters) * num_counters) != 0) |
1055 | ret = -EFAULT; | 1055 | ret = -EFAULT; |
1056 | vfree(counters); | 1056 | vfree(counters); |
1057 | xt_table_unlock(t); | 1057 | xt_table_unlock(t); |
1058 | return ret; | 1058 | return ret; |
1059 | 1059 | ||
1060 | put_module: | 1060 | put_module: |
1061 | module_put(t->me); | 1061 | module_put(t->me); |
1062 | xt_table_unlock(t); | 1062 | xt_table_unlock(t); |
1063 | free_newinfo_counters_untrans: | 1063 | free_newinfo_counters_untrans: |
1064 | vfree(counters); | 1064 | vfree(counters); |
1065 | out: | 1065 | out: |
1066 | return ret; | 1066 | return ret; |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | static int do_replace(struct net *net, const void __user *user, | 1069 | static int do_replace(struct net *net, const void __user *user, |
1070 | unsigned int len) | 1070 | unsigned int len) |
1071 | { | 1071 | { |
1072 | int ret; | 1072 | int ret; |
1073 | struct arpt_replace tmp; | 1073 | struct arpt_replace tmp; |
1074 | struct xt_table_info *newinfo; | 1074 | struct xt_table_info *newinfo; |
1075 | void *loc_cpu_entry; | 1075 | void *loc_cpu_entry; |
1076 | struct arpt_entry *iter; | 1076 | struct arpt_entry *iter; |
1077 | 1077 | ||
1078 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1078 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1079 | return -EFAULT; | 1079 | return -EFAULT; |
1080 | 1080 | ||
1081 | /* overflow check */ | 1081 | /* overflow check */ |
1082 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1082 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1083 | return -ENOMEM; | 1083 | return -ENOMEM; |
1084 | 1084 | ||
1085 | newinfo = xt_alloc_table_info(tmp.size); | 1085 | newinfo = xt_alloc_table_info(tmp.size); |
1086 | if (!newinfo) | 1086 | if (!newinfo) |
1087 | return -ENOMEM; | 1087 | return -ENOMEM; |
1088 | 1088 | ||
1089 | /* choose the copy that is on our node/cpu */ | 1089 | /* choose the copy that is on our node/cpu */ |
1090 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1090 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1091 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1091 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1092 | tmp.size) != 0) { | 1092 | tmp.size) != 0) { |
1093 | ret = -EFAULT; | 1093 | ret = -EFAULT; |
1094 | goto free_newinfo; | 1094 | goto free_newinfo; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | ret = translate_table(newinfo, loc_cpu_entry, &tmp); | 1097 | ret = translate_table(newinfo, loc_cpu_entry, &tmp); |
1098 | if (ret != 0) | 1098 | if (ret != 0) |
1099 | goto free_newinfo; | 1099 | goto free_newinfo; |
1100 | 1100 | ||
1101 | duprintf("arp_tables: Translated table\n"); | 1101 | duprintf("arp_tables: Translated table\n"); |
1102 | 1102 | ||
1103 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1103 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1104 | tmp.num_counters, tmp.counters); | 1104 | tmp.num_counters, tmp.counters); |
1105 | if (ret) | 1105 | if (ret) |
1106 | goto free_newinfo_untrans; | 1106 | goto free_newinfo_untrans; |
1107 | return 0; | 1107 | return 0; |
1108 | 1108 | ||
1109 | free_newinfo_untrans: | 1109 | free_newinfo_untrans: |
1110 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1110 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1111 | cleanup_entry(iter); | 1111 | cleanup_entry(iter); |
1112 | free_newinfo: | 1112 | free_newinfo: |
1113 | xt_free_table_info(newinfo); | 1113 | xt_free_table_info(newinfo); |
1114 | return ret; | 1114 | return ret; |
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | static int do_add_counters(struct net *net, const void __user *user, | 1117 | static int do_add_counters(struct net *net, const void __user *user, |
1118 | unsigned int len, int compat) | 1118 | unsigned int len, int compat) |
1119 | { | 1119 | { |
1120 | unsigned int i, curcpu; | 1120 | unsigned int i, curcpu; |
1121 | struct xt_counters_info tmp; | 1121 | struct xt_counters_info tmp; |
1122 | struct xt_counters *paddc; | 1122 | struct xt_counters *paddc; |
1123 | unsigned int num_counters; | 1123 | unsigned int num_counters; |
1124 | const char *name; | 1124 | const char *name; |
1125 | int size; | 1125 | int size; |
1126 | void *ptmp; | 1126 | void *ptmp; |
1127 | struct xt_table *t; | 1127 | struct xt_table *t; |
1128 | const struct xt_table_info *private; | 1128 | const struct xt_table_info *private; |
1129 | int ret = 0; | 1129 | int ret = 0; |
1130 | void *loc_cpu_entry; | 1130 | void *loc_cpu_entry; |
1131 | struct arpt_entry *iter; | 1131 | struct arpt_entry *iter; |
1132 | #ifdef CONFIG_COMPAT | 1132 | #ifdef CONFIG_COMPAT |
1133 | struct compat_xt_counters_info compat_tmp; | 1133 | struct compat_xt_counters_info compat_tmp; |
1134 | 1134 | ||
1135 | if (compat) { | 1135 | if (compat) { |
1136 | ptmp = &compat_tmp; | 1136 | ptmp = &compat_tmp; |
1137 | size = sizeof(struct compat_xt_counters_info); | 1137 | size = sizeof(struct compat_xt_counters_info); |
1138 | } else | 1138 | } else |
1139 | #endif | 1139 | #endif |
1140 | { | 1140 | { |
1141 | ptmp = &tmp; | 1141 | ptmp = &tmp; |
1142 | size = sizeof(struct xt_counters_info); | 1142 | size = sizeof(struct xt_counters_info); |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | if (copy_from_user(ptmp, user, size) != 0) | 1145 | if (copy_from_user(ptmp, user, size) != 0) |
1146 | return -EFAULT; | 1146 | return -EFAULT; |
1147 | 1147 | ||
1148 | #ifdef CONFIG_COMPAT | 1148 | #ifdef CONFIG_COMPAT |
1149 | if (compat) { | 1149 | if (compat) { |
1150 | num_counters = compat_tmp.num_counters; | 1150 | num_counters = compat_tmp.num_counters; |
1151 | name = compat_tmp.name; | 1151 | name = compat_tmp.name; |
1152 | } else | 1152 | } else |
1153 | #endif | 1153 | #endif |
1154 | { | 1154 | { |
1155 | num_counters = tmp.num_counters; | 1155 | num_counters = tmp.num_counters; |
1156 | name = tmp.name; | 1156 | name = tmp.name; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | if (len != size + num_counters * sizeof(struct xt_counters)) | 1159 | if (len != size + num_counters * sizeof(struct xt_counters)) |
1160 | return -EINVAL; | 1160 | return -EINVAL; |
1161 | 1161 | ||
1162 | paddc = vmalloc_node(len - size, numa_node_id()); | 1162 | paddc = vmalloc_node(len - size, numa_node_id()); |
1163 | if (!paddc) | 1163 | if (!paddc) |
1164 | return -ENOMEM; | 1164 | return -ENOMEM; |
1165 | 1165 | ||
1166 | if (copy_from_user(paddc, user + size, len - size) != 0) { | 1166 | if (copy_from_user(paddc, user + size, len - size) != 0) { |
1167 | ret = -EFAULT; | 1167 | ret = -EFAULT; |
1168 | goto free; | 1168 | goto free; |
1169 | } | 1169 | } |
1170 | 1170 | ||
1171 | t = xt_find_table_lock(net, NFPROTO_ARP, name); | 1171 | t = xt_find_table_lock(net, NFPROTO_ARP, name); |
1172 | if (!t || IS_ERR(t)) { | 1172 | if (!t || IS_ERR(t)) { |
1173 | ret = t ? PTR_ERR(t) : -ENOENT; | 1173 | ret = t ? PTR_ERR(t) : -ENOENT; |
1174 | goto free; | 1174 | goto free; |
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | local_bh_disable(); | 1177 | local_bh_disable(); |
1178 | private = t->private; | 1178 | private = t->private; |
1179 | if (private->number != num_counters) { | 1179 | if (private->number != num_counters) { |
1180 | ret = -EINVAL; | 1180 | ret = -EINVAL; |
1181 | goto unlock_up_free; | 1181 | goto unlock_up_free; |
1182 | } | 1182 | } |
1183 | 1183 | ||
1184 | i = 0; | 1184 | i = 0; |
1185 | /* Choose the copy that is on our node */ | 1185 | /* Choose the copy that is on our node */ |
1186 | curcpu = smp_processor_id(); | 1186 | curcpu = smp_processor_id(); |
1187 | loc_cpu_entry = private->entries[curcpu]; | 1187 | loc_cpu_entry = private->entries[curcpu]; |
1188 | xt_info_wrlock(curcpu); | 1188 | xt_info_wrlock(curcpu); |
1189 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { | 1189 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { |
1190 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); | 1190 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); |
1191 | ++i; | 1191 | ++i; |
1192 | } | 1192 | } |
1193 | xt_info_wrunlock(curcpu); | 1193 | xt_info_wrunlock(curcpu); |
1194 | unlock_up_free: | 1194 | unlock_up_free: |
1195 | local_bh_enable(); | 1195 | local_bh_enable(); |
1196 | xt_table_unlock(t); | 1196 | xt_table_unlock(t); |
1197 | module_put(t->me); | 1197 | module_put(t->me); |
1198 | free: | 1198 | free: |
1199 | vfree(paddc); | 1199 | vfree(paddc); |
1200 | 1200 | ||
1201 | return ret; | 1201 | return ret; |
1202 | } | 1202 | } |
1203 | 1203 | ||
1204 | #ifdef CONFIG_COMPAT | 1204 | #ifdef CONFIG_COMPAT |
1205 | static inline void compat_release_entry(struct compat_arpt_entry *e) | 1205 | static inline void compat_release_entry(struct compat_arpt_entry *e) |
1206 | { | 1206 | { |
1207 | struct arpt_entry_target *t; | 1207 | struct arpt_entry_target *t; |
1208 | 1208 | ||
1209 | t = compat_arpt_get_target(e); | 1209 | t = compat_arpt_get_target(e); |
1210 | module_put(t->u.kernel.target->me); | 1210 | module_put(t->u.kernel.target->me); |
1211 | } | 1211 | } |
1212 | 1212 | ||
1213 | static inline int | 1213 | static inline int |
1214 | check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, | 1214 | check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, |
1215 | struct xt_table_info *newinfo, | 1215 | struct xt_table_info *newinfo, |
1216 | unsigned int *size, | 1216 | unsigned int *size, |
1217 | const unsigned char *base, | 1217 | const unsigned char *base, |
1218 | const unsigned char *limit, | 1218 | const unsigned char *limit, |
1219 | const unsigned int *hook_entries, | 1219 | const unsigned int *hook_entries, |
1220 | const unsigned int *underflows, | 1220 | const unsigned int *underflows, |
1221 | const char *name) | 1221 | const char *name) |
1222 | { | 1222 | { |
1223 | struct arpt_entry_target *t; | 1223 | struct arpt_entry_target *t; |
1224 | struct xt_target *target; | 1224 | struct xt_target *target; |
1225 | unsigned int entry_offset; | 1225 | unsigned int entry_offset; |
1226 | int ret, off, h; | 1226 | int ret, off, h; |
1227 | 1227 | ||
1228 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1228 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1229 | if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || | 1229 | if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || |
1230 | (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { | 1230 | (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { |
1231 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1231 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1232 | return -EINVAL; | 1232 | return -EINVAL; |
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | if (e->next_offset < sizeof(struct compat_arpt_entry) + | 1235 | if (e->next_offset < sizeof(struct compat_arpt_entry) + |
1236 | sizeof(struct compat_xt_entry_target)) { | 1236 | sizeof(struct compat_xt_entry_target)) { |
1237 | duprintf("checking: element %p size %u\n", | 1237 | duprintf("checking: element %p size %u\n", |
1238 | e, e->next_offset); | 1238 | e, e->next_offset); |
1239 | return -EINVAL; | 1239 | return -EINVAL; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | /* For purposes of check_entry casting the compat entry is fine */ | 1242 | /* For purposes of check_entry casting the compat entry is fine */ |
1243 | ret = check_entry((struct arpt_entry *)e, name); | 1243 | ret = check_entry((struct arpt_entry *)e, name); |
1244 | if (ret) | 1244 | if (ret) |
1245 | return ret; | 1245 | return ret; |
1246 | 1246 | ||
1247 | off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); | 1247 | off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); |
1248 | entry_offset = (void *)e - (void *)base; | 1248 | entry_offset = (void *)e - (void *)base; |
1249 | 1249 | ||
1250 | t = compat_arpt_get_target(e); | 1250 | t = compat_arpt_get_target(e); |
1251 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, | 1251 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, |
1252 | t->u.user.revision); | 1252 | t->u.user.revision); |
1253 | if (IS_ERR(target)) { | 1253 | if (IS_ERR(target)) { |
1254 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1254 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1255 | t->u.user.name); | 1255 | t->u.user.name); |
1256 | ret = PTR_ERR(target); | 1256 | ret = PTR_ERR(target); |
1257 | goto out; | 1257 | goto out; |
1258 | } | 1258 | } |
1259 | t->u.kernel.target = target; | 1259 | t->u.kernel.target = target; |
1260 | 1260 | ||
1261 | off += xt_compat_target_offset(target); | 1261 | off += xt_compat_target_offset(target); |
1262 | *size += off; | 1262 | *size += off; |
1263 | ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); | 1263 | ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); |
1264 | if (ret) | 1264 | if (ret) |
1265 | goto release_target; | 1265 | goto release_target; |
1266 | 1266 | ||
1267 | /* Check hooks & underflows */ | 1267 | /* Check hooks & underflows */ |
1268 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 1268 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
1269 | if ((unsigned char *)e - base == hook_entries[h]) | 1269 | if ((unsigned char *)e - base == hook_entries[h]) |
1270 | newinfo->hook_entry[h] = hook_entries[h]; | 1270 | newinfo->hook_entry[h] = hook_entries[h]; |
1271 | if ((unsigned char *)e - base == underflows[h]) | 1271 | if ((unsigned char *)e - base == underflows[h]) |
1272 | newinfo->underflow[h] = underflows[h]; | 1272 | newinfo->underflow[h] = underflows[h]; |
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | /* Clear counters and comefrom */ | 1275 | /* Clear counters and comefrom */ |
1276 | memset(&e->counters, 0, sizeof(e->counters)); | 1276 | memset(&e->counters, 0, sizeof(e->counters)); |
1277 | e->comefrom = 0; | 1277 | e->comefrom = 0; |
1278 | return 0; | 1278 | return 0; |
1279 | 1279 | ||
1280 | release_target: | 1280 | release_target: |
1281 | module_put(t->u.kernel.target->me); | 1281 | module_put(t->u.kernel.target->me); |
1282 | out: | 1282 | out: |
1283 | return ret; | 1283 | return ret; |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static int | 1286 | static int |
1287 | compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, | 1287 | compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, |
1288 | unsigned int *size, const char *name, | 1288 | unsigned int *size, const char *name, |
1289 | struct xt_table_info *newinfo, unsigned char *base) | 1289 | struct xt_table_info *newinfo, unsigned char *base) |
1290 | { | 1290 | { |
1291 | struct arpt_entry_target *t; | 1291 | struct arpt_entry_target *t; |
1292 | struct xt_target *target; | 1292 | struct xt_target *target; |
1293 | struct arpt_entry *de; | 1293 | struct arpt_entry *de; |
1294 | unsigned int origsize; | 1294 | unsigned int origsize; |
1295 | int ret, h; | 1295 | int ret, h; |
1296 | 1296 | ||
1297 | ret = 0; | 1297 | ret = 0; |
1298 | origsize = *size; | 1298 | origsize = *size; |
1299 | de = (struct arpt_entry *)*dstptr; | 1299 | de = (struct arpt_entry *)*dstptr; |
1300 | memcpy(de, e, sizeof(struct arpt_entry)); | 1300 | memcpy(de, e, sizeof(struct arpt_entry)); |
1301 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); | 1301 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); |
1302 | 1302 | ||
1303 | *dstptr += sizeof(struct arpt_entry); | 1303 | *dstptr += sizeof(struct arpt_entry); |
1304 | *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); | 1304 | *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); |
1305 | 1305 | ||
1306 | de->target_offset = e->target_offset - (origsize - *size); | 1306 | de->target_offset = e->target_offset - (origsize - *size); |
1307 | t = compat_arpt_get_target(e); | 1307 | t = compat_arpt_get_target(e); |
1308 | target = t->u.kernel.target; | 1308 | target = t->u.kernel.target; |
1309 | xt_compat_target_from_user(t, dstptr, size); | 1309 | xt_compat_target_from_user(t, dstptr, size); |
1310 | 1310 | ||
1311 | de->next_offset = e->next_offset - (origsize - *size); | 1311 | de->next_offset = e->next_offset - (origsize - *size); |
1312 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 1312 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
1313 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) | 1313 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) |
1314 | newinfo->hook_entry[h] -= origsize - *size; | 1314 | newinfo->hook_entry[h] -= origsize - *size; |
1315 | if ((unsigned char *)de - base < newinfo->underflow[h]) | 1315 | if ((unsigned char *)de - base < newinfo->underflow[h]) |
1316 | newinfo->underflow[h] -= origsize - *size; | 1316 | newinfo->underflow[h] -= origsize - *size; |
1317 | } | 1317 | } |
1318 | return ret; | 1318 | return ret; |
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | static int translate_compat_table(const char *name, | 1321 | static int translate_compat_table(const char *name, |
1322 | unsigned int valid_hooks, | 1322 | unsigned int valid_hooks, |
1323 | struct xt_table_info **pinfo, | 1323 | struct xt_table_info **pinfo, |
1324 | void **pentry0, | 1324 | void **pentry0, |
1325 | unsigned int total_size, | 1325 | unsigned int total_size, |
1326 | unsigned int number, | 1326 | unsigned int number, |
1327 | unsigned int *hook_entries, | 1327 | unsigned int *hook_entries, |
1328 | unsigned int *underflows) | 1328 | unsigned int *underflows) |
1329 | { | 1329 | { |
1330 | unsigned int i, j; | 1330 | unsigned int i, j; |
1331 | struct xt_table_info *newinfo, *info; | 1331 | struct xt_table_info *newinfo, *info; |
1332 | void *pos, *entry0, *entry1; | 1332 | void *pos, *entry0, *entry1; |
1333 | struct compat_arpt_entry *iter0; | 1333 | struct compat_arpt_entry *iter0; |
1334 | struct arpt_entry *iter1; | 1334 | struct arpt_entry *iter1; |
1335 | unsigned int size; | 1335 | unsigned int size; |
1336 | int ret = 0; | 1336 | int ret = 0; |
1337 | 1337 | ||
1338 | info = *pinfo; | 1338 | info = *pinfo; |
1339 | entry0 = *pentry0; | 1339 | entry0 = *pentry0; |
1340 | size = total_size; | 1340 | size = total_size; |
1341 | info->number = number; | 1341 | info->number = number; |
1342 | 1342 | ||
1343 | /* Init all hooks to impossible value. */ | 1343 | /* Init all hooks to impossible value. */ |
1344 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 1344 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
1345 | info->hook_entry[i] = 0xFFFFFFFF; | 1345 | info->hook_entry[i] = 0xFFFFFFFF; |
1346 | info->underflow[i] = 0xFFFFFFFF; | 1346 | info->underflow[i] = 0xFFFFFFFF; |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | duprintf("translate_compat_table: size %u\n", info->size); | 1349 | duprintf("translate_compat_table: size %u\n", info->size); |
1350 | j = 0; | 1350 | j = 0; |
1351 | xt_compat_lock(NFPROTO_ARP); | 1351 | xt_compat_lock(NFPROTO_ARP); |
1352 | /* Walk through entries, checking offsets. */ | 1352 | /* Walk through entries, checking offsets. */ |
1353 | xt_entry_foreach(iter0, entry0, total_size) { | 1353 | xt_entry_foreach(iter0, entry0, total_size) { |
1354 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, | 1354 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, |
1355 | entry0, | 1355 | entry0, |
1356 | entry0 + total_size, | 1356 | entry0 + total_size, |
1357 | hook_entries, | 1357 | hook_entries, |
1358 | underflows, | 1358 | underflows, |
1359 | name); | 1359 | name); |
1360 | if (ret != 0) | 1360 | if (ret != 0) |
1361 | goto out_unlock; | 1361 | goto out_unlock; |
1362 | ++j; | 1362 | ++j; |
1363 | } | 1363 | } |
1364 | 1364 | ||
1365 | ret = -EINVAL; | 1365 | ret = -EINVAL; |
1366 | if (j != number) { | 1366 | if (j != number) { |
1367 | duprintf("translate_compat_table: %u not %u entries\n", | 1367 | duprintf("translate_compat_table: %u not %u entries\n", |
1368 | j, number); | 1368 | j, number); |
1369 | goto out_unlock; | 1369 | goto out_unlock; |
1370 | } | 1370 | } |
1371 | 1371 | ||
1372 | /* Check hooks all assigned */ | 1372 | /* Check hooks all assigned */ |
1373 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 1373 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
1374 | /* Only hooks which are valid */ | 1374 | /* Only hooks which are valid */ |
1375 | if (!(valid_hooks & (1 << i))) | 1375 | if (!(valid_hooks & (1 << i))) |
1376 | continue; | 1376 | continue; |
1377 | if (info->hook_entry[i] == 0xFFFFFFFF) { | 1377 | if (info->hook_entry[i] == 0xFFFFFFFF) { |
1378 | duprintf("Invalid hook entry %u %u\n", | 1378 | duprintf("Invalid hook entry %u %u\n", |
1379 | i, hook_entries[i]); | 1379 | i, hook_entries[i]); |
1380 | goto out_unlock; | 1380 | goto out_unlock; |
1381 | } | 1381 | } |
1382 | if (info->underflow[i] == 0xFFFFFFFF) { | 1382 | if (info->underflow[i] == 0xFFFFFFFF) { |
1383 | duprintf("Invalid underflow %u %u\n", | 1383 | duprintf("Invalid underflow %u %u\n", |
1384 | i, underflows[i]); | 1384 | i, underflows[i]); |
1385 | goto out_unlock; | 1385 | goto out_unlock; |
1386 | } | 1386 | } |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | ret = -ENOMEM; | 1389 | ret = -ENOMEM; |
1390 | newinfo = xt_alloc_table_info(size); | 1390 | newinfo = xt_alloc_table_info(size); |
1391 | if (!newinfo) | 1391 | if (!newinfo) |
1392 | goto out_unlock; | 1392 | goto out_unlock; |
1393 | 1393 | ||
1394 | newinfo->number = number; | 1394 | newinfo->number = number; |
1395 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 1395 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
1396 | newinfo->hook_entry[i] = info->hook_entry[i]; | 1396 | newinfo->hook_entry[i] = info->hook_entry[i]; |
1397 | newinfo->underflow[i] = info->underflow[i]; | 1397 | newinfo->underflow[i] = info->underflow[i]; |
1398 | } | 1398 | } |
1399 | entry1 = newinfo->entries[raw_smp_processor_id()]; | 1399 | entry1 = newinfo->entries[raw_smp_processor_id()]; |
1400 | pos = entry1; | 1400 | pos = entry1; |
1401 | size = total_size; | 1401 | size = total_size; |
1402 | xt_entry_foreach(iter0, entry0, total_size) { | 1402 | xt_entry_foreach(iter0, entry0, total_size) { |
1403 | ret = compat_copy_entry_from_user(iter0, &pos, &size, | 1403 | ret = compat_copy_entry_from_user(iter0, &pos, &size, |
1404 | name, newinfo, entry1); | 1404 | name, newinfo, entry1); |
1405 | if (ret != 0) | 1405 | if (ret != 0) |
1406 | break; | 1406 | break; |
1407 | } | 1407 | } |
1408 | xt_compat_flush_offsets(NFPROTO_ARP); | 1408 | xt_compat_flush_offsets(NFPROTO_ARP); |
1409 | xt_compat_unlock(NFPROTO_ARP); | 1409 | xt_compat_unlock(NFPROTO_ARP); |
1410 | if (ret) | 1410 | if (ret) |
1411 | goto free_newinfo; | 1411 | goto free_newinfo; |
1412 | 1412 | ||
1413 | ret = -ELOOP; | 1413 | ret = -ELOOP; |
1414 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) | 1414 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) |
1415 | goto free_newinfo; | 1415 | goto free_newinfo; |
1416 | 1416 | ||
1417 | i = 0; | 1417 | i = 0; |
1418 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1418 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1419 | ret = check_target(iter1, name); | 1419 | ret = check_target(iter1, name); |
1420 | if (ret != 0) | 1420 | if (ret != 0) |
1421 | break; | 1421 | break; |
1422 | ++i; | 1422 | ++i; |
1423 | } | 1423 | } |
1424 | if (ret) { | 1424 | if (ret) { |
1425 | /* | 1425 | /* |
1426 | * The first i matches need cleanup_entry (calls ->destroy) | 1426 | * The first i matches need cleanup_entry (calls ->destroy) |
1427 | * because they had called ->check already. The other j-i | 1427 | * because they had called ->check already. The other j-i |
1428 | * entries need only release. | 1428 | * entries need only release. |
1429 | */ | 1429 | */ |
1430 | int skip = i; | 1430 | int skip = i; |
1431 | j -= i; | 1431 | j -= i; |
1432 | xt_entry_foreach(iter0, entry0, newinfo->size) { | 1432 | xt_entry_foreach(iter0, entry0, newinfo->size) { |
1433 | if (skip-- > 0) | 1433 | if (skip-- > 0) |
1434 | continue; | 1434 | continue; |
1435 | if (j-- == 0) | 1435 | if (j-- == 0) |
1436 | break; | 1436 | break; |
1437 | compat_release_entry(iter0); | 1437 | compat_release_entry(iter0); |
1438 | } | 1438 | } |
1439 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1439 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1440 | if (i-- == 0) | 1440 | if (i-- == 0) |
1441 | break; | 1441 | break; |
1442 | cleanup_entry(iter1); | 1442 | cleanup_entry(iter1); |
1443 | } | 1443 | } |
1444 | xt_free_table_info(newinfo); | 1444 | xt_free_table_info(newinfo); |
1445 | return ret; | 1445 | return ret; |
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | /* And one copy for every other CPU */ | 1448 | /* And one copy for every other CPU */ |
1449 | for_each_possible_cpu(i) | 1449 | for_each_possible_cpu(i) |
1450 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) | 1450 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) |
1451 | memcpy(newinfo->entries[i], entry1, newinfo->size); | 1451 | memcpy(newinfo->entries[i], entry1, newinfo->size); |
1452 | 1452 | ||
1453 | *pinfo = newinfo; | 1453 | *pinfo = newinfo; |
1454 | *pentry0 = entry1; | 1454 | *pentry0 = entry1; |
1455 | xt_free_table_info(info); | 1455 | xt_free_table_info(info); |
1456 | return 0; | 1456 | return 0; |
1457 | 1457 | ||
1458 | free_newinfo: | 1458 | free_newinfo: |
1459 | xt_free_table_info(newinfo); | 1459 | xt_free_table_info(newinfo); |
1460 | out: | 1460 | out: |
1461 | xt_entry_foreach(iter0, entry0, total_size) { | 1461 | xt_entry_foreach(iter0, entry0, total_size) { |
1462 | if (j-- == 0) | 1462 | if (j-- == 0) |
1463 | break; | 1463 | break; |
1464 | compat_release_entry(iter0); | 1464 | compat_release_entry(iter0); |
1465 | } | 1465 | } |
1466 | return ret; | 1466 | return ret; |
1467 | out_unlock: | 1467 | out_unlock: |
1468 | xt_compat_flush_offsets(NFPROTO_ARP); | 1468 | xt_compat_flush_offsets(NFPROTO_ARP); |
1469 | xt_compat_unlock(NFPROTO_ARP); | 1469 | xt_compat_unlock(NFPROTO_ARP); |
1470 | goto out; | 1470 | goto out; |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | struct compat_arpt_replace { | 1473 | struct compat_arpt_replace { |
1474 | char name[ARPT_TABLE_MAXNAMELEN]; | 1474 | char name[ARPT_TABLE_MAXNAMELEN]; |
1475 | u32 valid_hooks; | 1475 | u32 valid_hooks; |
1476 | u32 num_entries; | 1476 | u32 num_entries; |
1477 | u32 size; | 1477 | u32 size; |
1478 | u32 hook_entry[NF_ARP_NUMHOOKS]; | 1478 | u32 hook_entry[NF_ARP_NUMHOOKS]; |
1479 | u32 underflow[NF_ARP_NUMHOOKS]; | 1479 | u32 underflow[NF_ARP_NUMHOOKS]; |
1480 | u32 num_counters; | 1480 | u32 num_counters; |
1481 | compat_uptr_t counters; | 1481 | compat_uptr_t counters; |
1482 | struct compat_arpt_entry entries[0]; | 1482 | struct compat_arpt_entry entries[0]; |
1483 | }; | 1483 | }; |
1484 | 1484 | ||
1485 | static int compat_do_replace(struct net *net, void __user *user, | 1485 | static int compat_do_replace(struct net *net, void __user *user, |
1486 | unsigned int len) | 1486 | unsigned int len) |
1487 | { | 1487 | { |
1488 | int ret; | 1488 | int ret; |
1489 | struct compat_arpt_replace tmp; | 1489 | struct compat_arpt_replace tmp; |
1490 | struct xt_table_info *newinfo; | 1490 | struct xt_table_info *newinfo; |
1491 | void *loc_cpu_entry; | 1491 | void *loc_cpu_entry; |
1492 | struct arpt_entry *iter; | 1492 | struct arpt_entry *iter; |
1493 | 1493 | ||
1494 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1494 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1495 | return -EFAULT; | 1495 | return -EFAULT; |
1496 | 1496 | ||
1497 | /* overflow check */ | 1497 | /* overflow check */ |
1498 | if (tmp.size >= INT_MAX / num_possible_cpus()) | 1498 | if (tmp.size >= INT_MAX / num_possible_cpus()) |
1499 | return -ENOMEM; | 1499 | return -ENOMEM; |
1500 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1500 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1501 | return -ENOMEM; | 1501 | return -ENOMEM; |
1502 | 1502 | ||
1503 | newinfo = xt_alloc_table_info(tmp.size); | 1503 | newinfo = xt_alloc_table_info(tmp.size); |
1504 | if (!newinfo) | 1504 | if (!newinfo) |
1505 | return -ENOMEM; | 1505 | return -ENOMEM; |
1506 | 1506 | ||
1507 | /* choose the copy that is on our node/cpu */ | 1507 | /* choose the copy that is on our node/cpu */ |
1508 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1508 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1509 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { | 1509 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { |
1510 | ret = -EFAULT; | 1510 | ret = -EFAULT; |
1511 | goto free_newinfo; | 1511 | goto free_newinfo; |
1512 | } | 1512 | } |
1513 | 1513 | ||
1514 | ret = translate_compat_table(tmp.name, tmp.valid_hooks, | 1514 | ret = translate_compat_table(tmp.name, tmp.valid_hooks, |
1515 | &newinfo, &loc_cpu_entry, tmp.size, | 1515 | &newinfo, &loc_cpu_entry, tmp.size, |
1516 | tmp.num_entries, tmp.hook_entry, | 1516 | tmp.num_entries, tmp.hook_entry, |
1517 | tmp.underflow); | 1517 | tmp.underflow); |
1518 | if (ret != 0) | 1518 | if (ret != 0) |
1519 | goto free_newinfo; | 1519 | goto free_newinfo; |
1520 | 1520 | ||
1521 | duprintf("compat_do_replace: Translated table\n"); | 1521 | duprintf("compat_do_replace: Translated table\n"); |
1522 | 1522 | ||
1523 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1523 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1524 | tmp.num_counters, compat_ptr(tmp.counters)); | 1524 | tmp.num_counters, compat_ptr(tmp.counters)); |
1525 | if (ret) | 1525 | if (ret) |
1526 | goto free_newinfo_untrans; | 1526 | goto free_newinfo_untrans; |
1527 | return 0; | 1527 | return 0; |
1528 | 1528 | ||
1529 | free_newinfo_untrans: | 1529 | free_newinfo_untrans: |
1530 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1530 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1531 | cleanup_entry(iter); | 1531 | cleanup_entry(iter); |
1532 | free_newinfo: | 1532 | free_newinfo: |
1533 | xt_free_table_info(newinfo); | 1533 | xt_free_table_info(newinfo); |
1534 | return ret; | 1534 | return ret; |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, | 1537 | static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, |
1538 | unsigned int len) | 1538 | unsigned int len) |
1539 | { | 1539 | { |
1540 | int ret; | 1540 | int ret; |
1541 | 1541 | ||
1542 | if (!capable(CAP_NET_ADMIN)) | 1542 | if (!capable(CAP_NET_ADMIN)) |
1543 | return -EPERM; | 1543 | return -EPERM; |
1544 | 1544 | ||
1545 | switch (cmd) { | 1545 | switch (cmd) { |
1546 | case ARPT_SO_SET_REPLACE: | 1546 | case ARPT_SO_SET_REPLACE: |
1547 | ret = compat_do_replace(sock_net(sk), user, len); | 1547 | ret = compat_do_replace(sock_net(sk), user, len); |
1548 | break; | 1548 | break; |
1549 | 1549 | ||
1550 | case ARPT_SO_SET_ADD_COUNTERS: | 1550 | case ARPT_SO_SET_ADD_COUNTERS: |
1551 | ret = do_add_counters(sock_net(sk), user, len, 1); | 1551 | ret = do_add_counters(sock_net(sk), user, len, 1); |
1552 | break; | 1552 | break; |
1553 | 1553 | ||
1554 | default: | 1554 | default: |
1555 | duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); | 1555 | duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); |
1556 | ret = -EINVAL; | 1556 | ret = -EINVAL; |
1557 | } | 1557 | } |
1558 | 1558 | ||
1559 | return ret; | 1559 | return ret; |
1560 | } | 1560 | } |
1561 | 1561 | ||
1562 | static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, | 1562 | static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, |
1563 | compat_uint_t *size, | 1563 | compat_uint_t *size, |
1564 | struct xt_counters *counters, | 1564 | struct xt_counters *counters, |
1565 | unsigned int i) | 1565 | unsigned int i) |
1566 | { | 1566 | { |
1567 | struct arpt_entry_target *t; | 1567 | struct arpt_entry_target *t; |
1568 | struct compat_arpt_entry __user *ce; | 1568 | struct compat_arpt_entry __user *ce; |
1569 | u_int16_t target_offset, next_offset; | 1569 | u_int16_t target_offset, next_offset; |
1570 | compat_uint_t origsize; | 1570 | compat_uint_t origsize; |
1571 | int ret; | 1571 | int ret; |
1572 | 1572 | ||
1573 | origsize = *size; | 1573 | origsize = *size; |
1574 | ce = (struct compat_arpt_entry __user *)*dstptr; | 1574 | ce = (struct compat_arpt_entry __user *)*dstptr; |
1575 | if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || | 1575 | if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || |
1576 | copy_to_user(&ce->counters, &counters[i], | 1576 | copy_to_user(&ce->counters, &counters[i], |
1577 | sizeof(counters[i])) != 0) | 1577 | sizeof(counters[i])) != 0) |
1578 | return -EFAULT; | 1578 | return -EFAULT; |
1579 | 1579 | ||
1580 | *dstptr += sizeof(struct compat_arpt_entry); | 1580 | *dstptr += sizeof(struct compat_arpt_entry); |
1581 | *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); | 1581 | *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); |
1582 | 1582 | ||
1583 | target_offset = e->target_offset - (origsize - *size); | 1583 | target_offset = e->target_offset - (origsize - *size); |
1584 | 1584 | ||
1585 | t = arpt_get_target(e); | 1585 | t = arpt_get_target(e); |
1586 | ret = xt_compat_target_to_user(t, dstptr, size); | 1586 | ret = xt_compat_target_to_user(t, dstptr, size); |
1587 | if (ret) | 1587 | if (ret) |
1588 | return ret; | 1588 | return ret; |
1589 | next_offset = e->next_offset - (origsize - *size); | 1589 | next_offset = e->next_offset - (origsize - *size); |
1590 | if (put_user(target_offset, &ce->target_offset) != 0 || | 1590 | if (put_user(target_offset, &ce->target_offset) != 0 || |
1591 | put_user(next_offset, &ce->next_offset) != 0) | 1591 | put_user(next_offset, &ce->next_offset) != 0) |
1592 | return -EFAULT; | 1592 | return -EFAULT; |
1593 | return 0; | 1593 | return 0; |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | static int compat_copy_entries_to_user(unsigned int total_size, | 1596 | static int compat_copy_entries_to_user(unsigned int total_size, |
1597 | struct xt_table *table, | 1597 | struct xt_table *table, |
1598 | void __user *userptr) | 1598 | void __user *userptr) |
1599 | { | 1599 | { |
1600 | struct xt_counters *counters; | 1600 | struct xt_counters *counters; |
1601 | const struct xt_table_info *private = table->private; | 1601 | const struct xt_table_info *private = table->private; |
1602 | void __user *pos; | 1602 | void __user *pos; |
1603 | unsigned int size; | 1603 | unsigned int size; |
1604 | int ret = 0; | 1604 | int ret = 0; |
1605 | void *loc_cpu_entry; | 1605 | void *loc_cpu_entry; |
1606 | unsigned int i = 0; | 1606 | unsigned int i = 0; |
1607 | struct arpt_entry *iter; | 1607 | struct arpt_entry *iter; |
1608 | 1608 | ||
1609 | counters = alloc_counters(table); | 1609 | counters = alloc_counters(table); |
1610 | if (IS_ERR(counters)) | 1610 | if (IS_ERR(counters)) |
1611 | return PTR_ERR(counters); | 1611 | return PTR_ERR(counters); |
1612 | 1612 | ||
1613 | /* choose the copy on our node/cpu */ | 1613 | /* choose the copy on our node/cpu */ |
1614 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1614 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
1615 | pos = userptr; | 1615 | pos = userptr; |
1616 | size = total_size; | 1616 | size = total_size; |
1617 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { | 1617 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { |
1618 | ret = compat_copy_entry_to_user(iter, &pos, | 1618 | ret = compat_copy_entry_to_user(iter, &pos, |
1619 | &size, counters, i++); | 1619 | &size, counters, i++); |
1620 | if (ret != 0) | 1620 | if (ret != 0) |
1621 | break; | 1621 | break; |
1622 | } | 1622 | } |
1623 | vfree(counters); | 1623 | vfree(counters); |
1624 | return ret; | 1624 | return ret; |
1625 | } | 1625 | } |
1626 | 1626 | ||
1627 | struct compat_arpt_get_entries { | 1627 | struct compat_arpt_get_entries { |
1628 | char name[ARPT_TABLE_MAXNAMELEN]; | 1628 | char name[ARPT_TABLE_MAXNAMELEN]; |
1629 | compat_uint_t size; | 1629 | compat_uint_t size; |
1630 | struct compat_arpt_entry entrytable[0]; | 1630 | struct compat_arpt_entry entrytable[0]; |
1631 | }; | 1631 | }; |
1632 | 1632 | ||
1633 | static int compat_get_entries(struct net *net, | 1633 | static int compat_get_entries(struct net *net, |
1634 | struct compat_arpt_get_entries __user *uptr, | 1634 | struct compat_arpt_get_entries __user *uptr, |
1635 | int *len) | 1635 | int *len) |
1636 | { | 1636 | { |
1637 | int ret; | 1637 | int ret; |
1638 | struct compat_arpt_get_entries get; | 1638 | struct compat_arpt_get_entries get; |
1639 | struct xt_table *t; | 1639 | struct xt_table *t; |
1640 | 1640 | ||
1641 | if (*len < sizeof(get)) { | 1641 | if (*len < sizeof(get)) { |
1642 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); | 1642 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); |
1643 | return -EINVAL; | 1643 | return -EINVAL; |
1644 | } | 1644 | } |
1645 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1645 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1646 | return -EFAULT; | 1646 | return -EFAULT; |
1647 | if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { | 1647 | if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { |
1648 | duprintf("compat_get_entries: %u != %zu\n", | 1648 | duprintf("compat_get_entries: %u != %zu\n", |
1649 | *len, sizeof(get) + get.size); | 1649 | *len, sizeof(get) + get.size); |
1650 | return -EINVAL; | 1650 | return -EINVAL; |
1651 | } | 1651 | } |
1652 | 1652 | ||
1653 | xt_compat_lock(NFPROTO_ARP); | 1653 | xt_compat_lock(NFPROTO_ARP); |
1654 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); | 1654 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); |
1655 | if (t && !IS_ERR(t)) { | 1655 | if (t && !IS_ERR(t)) { |
1656 | const struct xt_table_info *private = t->private; | 1656 | const struct xt_table_info *private = t->private; |
1657 | struct xt_table_info info; | 1657 | struct xt_table_info info; |
1658 | 1658 | ||
1659 | duprintf("t->private->number = %u\n", private->number); | 1659 | duprintf("t->private->number = %u\n", private->number); |
1660 | ret = compat_table_info(private, &info); | 1660 | ret = compat_table_info(private, &info); |
1661 | if (!ret && get.size == info.size) { | 1661 | if (!ret && get.size == info.size) { |
1662 | ret = compat_copy_entries_to_user(private->size, | 1662 | ret = compat_copy_entries_to_user(private->size, |
1663 | t, uptr->entrytable); | 1663 | t, uptr->entrytable); |
1664 | } else if (!ret) { | 1664 | } else if (!ret) { |
1665 | duprintf("compat_get_entries: I've got %u not %u!\n", | 1665 | duprintf("compat_get_entries: I've got %u not %u!\n", |
1666 | private->size, get.size); | 1666 | private->size, get.size); |
1667 | ret = -EAGAIN; | 1667 | ret = -EAGAIN; |
1668 | } | 1668 | } |
1669 | xt_compat_flush_offsets(NFPROTO_ARP); | 1669 | xt_compat_flush_offsets(NFPROTO_ARP); |
1670 | module_put(t->me); | 1670 | module_put(t->me); |
1671 | xt_table_unlock(t); | 1671 | xt_table_unlock(t); |
1672 | } else | 1672 | } else |
1673 | ret = t ? PTR_ERR(t) : -ENOENT; | 1673 | ret = t ? PTR_ERR(t) : -ENOENT; |
1674 | 1674 | ||
1675 | xt_compat_unlock(NFPROTO_ARP); | 1675 | xt_compat_unlock(NFPROTO_ARP); |
1676 | return ret; | 1676 | return ret; |
1677 | } | 1677 | } |
1678 | 1678 | ||
1679 | static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); | 1679 | static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); |
1680 | 1680 | ||
1681 | static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, | 1681 | static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, |
1682 | int *len) | 1682 | int *len) |
1683 | { | 1683 | { |
1684 | int ret; | 1684 | int ret; |
1685 | 1685 | ||
1686 | if (!capable(CAP_NET_ADMIN)) | 1686 | if (!capable(CAP_NET_ADMIN)) |
1687 | return -EPERM; | 1687 | return -EPERM; |
1688 | 1688 | ||
1689 | switch (cmd) { | 1689 | switch (cmd) { |
1690 | case ARPT_SO_GET_INFO: | 1690 | case ARPT_SO_GET_INFO: |
1691 | ret = get_info(sock_net(sk), user, len, 1); | 1691 | ret = get_info(sock_net(sk), user, len, 1); |
1692 | break; | 1692 | break; |
1693 | case ARPT_SO_GET_ENTRIES: | 1693 | case ARPT_SO_GET_ENTRIES: |
1694 | ret = compat_get_entries(sock_net(sk), user, len); | 1694 | ret = compat_get_entries(sock_net(sk), user, len); |
1695 | break; | 1695 | break; |
1696 | default: | 1696 | default: |
1697 | ret = do_arpt_get_ctl(sk, cmd, user, len); | 1697 | ret = do_arpt_get_ctl(sk, cmd, user, len); |
1698 | } | 1698 | } |
1699 | return ret; | 1699 | return ret; |
1700 | } | 1700 | } |
1701 | #endif | 1701 | #endif |
1702 | 1702 | ||
1703 | static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | 1703 | static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) |
1704 | { | 1704 | { |
1705 | int ret; | 1705 | int ret; |
1706 | 1706 | ||
1707 | if (!capable(CAP_NET_ADMIN)) | 1707 | if (!capable(CAP_NET_ADMIN)) |
1708 | return -EPERM; | 1708 | return -EPERM; |
1709 | 1709 | ||
1710 | switch (cmd) { | 1710 | switch (cmd) { |
1711 | case ARPT_SO_SET_REPLACE: | 1711 | case ARPT_SO_SET_REPLACE: |
1712 | ret = do_replace(sock_net(sk), user, len); | 1712 | ret = do_replace(sock_net(sk), user, len); |
1713 | break; | 1713 | break; |
1714 | 1714 | ||
1715 | case ARPT_SO_SET_ADD_COUNTERS: | 1715 | case ARPT_SO_SET_ADD_COUNTERS: |
1716 | ret = do_add_counters(sock_net(sk), user, len, 0); | 1716 | ret = do_add_counters(sock_net(sk), user, len, 0); |
1717 | break; | 1717 | break; |
1718 | 1718 | ||
1719 | default: | 1719 | default: |
1720 | duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); | 1720 | duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); |
1721 | ret = -EINVAL; | 1721 | ret = -EINVAL; |
1722 | } | 1722 | } |
1723 | 1723 | ||
1724 | return ret; | 1724 | return ret; |
1725 | } | 1725 | } |
1726 | 1726 | ||
1727 | static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 1727 | static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
1728 | { | 1728 | { |
1729 | int ret; | 1729 | int ret; |
1730 | 1730 | ||
1731 | if (!capable(CAP_NET_ADMIN)) | 1731 | if (!capable(CAP_NET_ADMIN)) |
1732 | return -EPERM; | 1732 | return -EPERM; |
1733 | 1733 | ||
1734 | switch (cmd) { | 1734 | switch (cmd) { |
1735 | case ARPT_SO_GET_INFO: | 1735 | case ARPT_SO_GET_INFO: |
1736 | ret = get_info(sock_net(sk), user, len, 0); | 1736 | ret = get_info(sock_net(sk), user, len, 0); |
1737 | break; | 1737 | break; |
1738 | 1738 | ||
1739 | case ARPT_SO_GET_ENTRIES: | 1739 | case ARPT_SO_GET_ENTRIES: |
1740 | ret = get_entries(sock_net(sk), user, len); | 1740 | ret = get_entries(sock_net(sk), user, len); |
1741 | break; | 1741 | break; |
1742 | 1742 | ||
1743 | case ARPT_SO_GET_REVISION_TARGET: { | 1743 | case ARPT_SO_GET_REVISION_TARGET: { |
1744 | struct xt_get_revision rev; | 1744 | struct xt_get_revision rev; |
1745 | 1745 | ||
1746 | if (*len != sizeof(rev)) { | 1746 | if (*len != sizeof(rev)) { |
1747 | ret = -EINVAL; | 1747 | ret = -EINVAL; |
1748 | break; | 1748 | break; |
1749 | } | 1749 | } |
1750 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { | 1750 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { |
1751 | ret = -EFAULT; | 1751 | ret = -EFAULT; |
1752 | break; | 1752 | break; |
1753 | } | 1753 | } |
1754 | 1754 | ||
1755 | try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, | 1755 | try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, |
1756 | rev.revision, 1, &ret), | 1756 | rev.revision, 1, &ret), |
1757 | "arpt_%s", rev.name); | 1757 | "arpt_%s", rev.name); |
1758 | break; | 1758 | break; |
1759 | } | 1759 | } |
1760 | 1760 | ||
1761 | default: | 1761 | default: |
1762 | duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); | 1762 | duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); |
1763 | ret = -EINVAL; | 1763 | ret = -EINVAL; |
1764 | } | 1764 | } |
1765 | 1765 | ||
1766 | return ret; | 1766 | return ret; |
1767 | } | 1767 | } |
1768 | 1768 | ||
1769 | struct xt_table *arpt_register_table(struct net *net, | 1769 | struct xt_table *arpt_register_table(struct net *net, |
1770 | const struct xt_table *table, | 1770 | const struct xt_table *table, |
1771 | const struct arpt_replace *repl) | 1771 | const struct arpt_replace *repl) |
1772 | { | 1772 | { |
1773 | int ret; | 1773 | int ret; |
1774 | struct xt_table_info *newinfo; | 1774 | struct xt_table_info *newinfo; |
1775 | struct xt_table_info bootstrap = {0}; | 1775 | struct xt_table_info bootstrap = {0}; |
1776 | void *loc_cpu_entry; | 1776 | void *loc_cpu_entry; |
1777 | struct xt_table *new_table; | 1777 | struct xt_table *new_table; |
1778 | 1778 | ||
1779 | newinfo = xt_alloc_table_info(repl->size); | 1779 | newinfo = xt_alloc_table_info(repl->size); |
1780 | if (!newinfo) { | 1780 | if (!newinfo) { |
1781 | ret = -ENOMEM; | 1781 | ret = -ENOMEM; |
1782 | goto out; | 1782 | goto out; |
1783 | } | 1783 | } |
1784 | 1784 | ||
1785 | /* choose the copy on our node/cpu */ | 1785 | /* choose the copy on our node/cpu */ |
1786 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1786 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1787 | memcpy(loc_cpu_entry, repl->entries, repl->size); | 1787 | memcpy(loc_cpu_entry, repl->entries, repl->size); |
1788 | 1788 | ||
1789 | ret = translate_table(newinfo, loc_cpu_entry, repl); | 1789 | ret = translate_table(newinfo, loc_cpu_entry, repl); |
1790 | duprintf("arpt_register_table: translate table gives %d\n", ret); | 1790 | duprintf("arpt_register_table: translate table gives %d\n", ret); |
1791 | if (ret != 0) | 1791 | if (ret != 0) |
1792 | goto out_free; | 1792 | goto out_free; |
1793 | 1793 | ||
1794 | new_table = xt_register_table(net, table, &bootstrap, newinfo); | 1794 | new_table = xt_register_table(net, table, &bootstrap, newinfo); |
1795 | if (IS_ERR(new_table)) { | 1795 | if (IS_ERR(new_table)) { |
1796 | ret = PTR_ERR(new_table); | 1796 | ret = PTR_ERR(new_table); |
1797 | goto out_free; | 1797 | goto out_free; |
1798 | } | 1798 | } |
1799 | return new_table; | 1799 | return new_table; |
1800 | 1800 | ||
1801 | out_free: | 1801 | out_free: |
1802 | xt_free_table_info(newinfo); | 1802 | xt_free_table_info(newinfo); |
1803 | out: | 1803 | out: |
1804 | return ERR_PTR(ret); | 1804 | return ERR_PTR(ret); |
1805 | } | 1805 | } |
1806 | 1806 | ||
1807 | void arpt_unregister_table(struct xt_table *table) | 1807 | void arpt_unregister_table(struct xt_table *table) |
1808 | { | 1808 | { |
1809 | struct xt_table_info *private; | 1809 | struct xt_table_info *private; |
1810 | void *loc_cpu_entry; | 1810 | void *loc_cpu_entry; |
1811 | struct module *table_owner = table->me; | 1811 | struct module *table_owner = table->me; |
1812 | struct arpt_entry *iter; | 1812 | struct arpt_entry *iter; |
1813 | 1813 | ||
1814 | private = xt_unregister_table(table); | 1814 | private = xt_unregister_table(table); |
1815 | 1815 | ||
1816 | /* Decrease module usage counts and free resources */ | 1816 | /* Decrease module usage counts and free resources */ |
1817 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1817 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
1818 | xt_entry_foreach(iter, loc_cpu_entry, private->size) | 1818 | xt_entry_foreach(iter, loc_cpu_entry, private->size) |
1819 | cleanup_entry(iter); | 1819 | cleanup_entry(iter); |
1820 | if (private->number > private->initial_entries) | 1820 | if (private->number > private->initial_entries) |
1821 | module_put(table_owner); | 1821 | module_put(table_owner); |
1822 | xt_free_table_info(private); | 1822 | xt_free_table_info(private); |
1823 | } | 1823 | } |
1824 | 1824 | ||
1825 | /* The built-in targets: standard (NULL) and error. */ | 1825 | /* The built-in targets: standard (NULL) and error. */ |
1826 | static struct xt_target arpt_builtin_tg[] __read_mostly = { | 1826 | static struct xt_target arpt_builtin_tg[] __read_mostly = { |
1827 | { | 1827 | { |
1828 | .name = ARPT_STANDARD_TARGET, | 1828 | .name = ARPT_STANDARD_TARGET, |
1829 | .targetsize = sizeof(int), | 1829 | .targetsize = sizeof(int), |
1830 | .family = NFPROTO_ARP, | 1830 | .family = NFPROTO_ARP, |
1831 | #ifdef CONFIG_COMPAT | 1831 | #ifdef CONFIG_COMPAT |
1832 | .compatsize = sizeof(compat_int_t), | 1832 | .compatsize = sizeof(compat_int_t), |
1833 | .compat_from_user = compat_standard_from_user, | 1833 | .compat_from_user = compat_standard_from_user, |
1834 | .compat_to_user = compat_standard_to_user, | 1834 | .compat_to_user = compat_standard_to_user, |
1835 | #endif | 1835 | #endif |
1836 | }, | 1836 | }, |
1837 | { | 1837 | { |
1838 | .name = ARPT_ERROR_TARGET, | 1838 | .name = ARPT_ERROR_TARGET, |
1839 | .target = arpt_error, | 1839 | .target = arpt_error, |
1840 | .targetsize = ARPT_FUNCTION_MAXNAMELEN, | 1840 | .targetsize = ARPT_FUNCTION_MAXNAMELEN, |
1841 | .family = NFPROTO_ARP, | 1841 | .family = NFPROTO_ARP, |
1842 | }, | 1842 | }, |
1843 | }; | 1843 | }; |
1844 | 1844 | ||
1845 | static struct nf_sockopt_ops arpt_sockopts = { | 1845 | static struct nf_sockopt_ops arpt_sockopts = { |
1846 | .pf = PF_INET, | 1846 | .pf = PF_INET, |
1847 | .set_optmin = ARPT_BASE_CTL, | 1847 | .set_optmin = ARPT_BASE_CTL, |
1848 | .set_optmax = ARPT_SO_SET_MAX+1, | 1848 | .set_optmax = ARPT_SO_SET_MAX+1, |
1849 | .set = do_arpt_set_ctl, | 1849 | .set = do_arpt_set_ctl, |
1850 | #ifdef CONFIG_COMPAT | 1850 | #ifdef CONFIG_COMPAT |
1851 | .compat_set = compat_do_arpt_set_ctl, | 1851 | .compat_set = compat_do_arpt_set_ctl, |
1852 | #endif | 1852 | #endif |
1853 | .get_optmin = ARPT_BASE_CTL, | 1853 | .get_optmin = ARPT_BASE_CTL, |
1854 | .get_optmax = ARPT_SO_GET_MAX+1, | 1854 | .get_optmax = ARPT_SO_GET_MAX+1, |
1855 | .get = do_arpt_get_ctl, | 1855 | .get = do_arpt_get_ctl, |
1856 | #ifdef CONFIG_COMPAT | 1856 | #ifdef CONFIG_COMPAT |
1857 | .compat_get = compat_do_arpt_get_ctl, | 1857 | .compat_get = compat_do_arpt_get_ctl, |
1858 | #endif | 1858 | #endif |
1859 | .owner = THIS_MODULE, | 1859 | .owner = THIS_MODULE, |
1860 | }; | 1860 | }; |
1861 | 1861 | ||
1862 | static int __net_init arp_tables_net_init(struct net *net) | 1862 | static int __net_init arp_tables_net_init(struct net *net) |
1863 | { | 1863 | { |
1864 | return xt_proto_init(net, NFPROTO_ARP); | 1864 | return xt_proto_init(net, NFPROTO_ARP); |
1865 | } | 1865 | } |
1866 | 1866 | ||
1867 | static void __net_exit arp_tables_net_exit(struct net *net) | 1867 | static void __net_exit arp_tables_net_exit(struct net *net) |
1868 | { | 1868 | { |
1869 | xt_proto_fini(net, NFPROTO_ARP); | 1869 | xt_proto_fini(net, NFPROTO_ARP); |
1870 | } | 1870 | } |
1871 | 1871 | ||
1872 | static struct pernet_operations arp_tables_net_ops = { | 1872 | static struct pernet_operations arp_tables_net_ops = { |
1873 | .init = arp_tables_net_init, | 1873 | .init = arp_tables_net_init, |
1874 | .exit = arp_tables_net_exit, | 1874 | .exit = arp_tables_net_exit, |
1875 | }; | 1875 | }; |
1876 | 1876 | ||
1877 | static int __init arp_tables_init(void) | 1877 | static int __init arp_tables_init(void) |
1878 | { | 1878 | { |
1879 | int ret; | 1879 | int ret; |
1880 | 1880 | ||
1881 | ret = register_pernet_subsys(&arp_tables_net_ops); | 1881 | ret = register_pernet_subsys(&arp_tables_net_ops); |
1882 | if (ret < 0) | 1882 | if (ret < 0) |
1883 | goto err1; | 1883 | goto err1; |
1884 | 1884 | ||
1885 | /* Noone else will be downing sem now, so we won't sleep */ | 1885 | /* Noone else will be downing sem now, so we won't sleep */ |
1886 | ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); | 1886 | ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1887 | if (ret < 0) | 1887 | if (ret < 0) |
1888 | goto err2; | 1888 | goto err2; |
1889 | 1889 | ||
1890 | /* Register setsockopt */ | 1890 | /* Register setsockopt */ |
1891 | ret = nf_register_sockopt(&arpt_sockopts); | 1891 | ret = nf_register_sockopt(&arpt_sockopts); |
1892 | if (ret < 0) | 1892 | if (ret < 0) |
1893 | goto err4; | 1893 | goto err4; |
1894 | 1894 | ||
1895 | printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n"); | 1895 | printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n"); |
1896 | return 0; | 1896 | return 0; |
1897 | 1897 | ||
1898 | err4: | 1898 | err4: |
1899 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); | 1899 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1900 | err2: | 1900 | err2: |
1901 | unregister_pernet_subsys(&arp_tables_net_ops); | 1901 | unregister_pernet_subsys(&arp_tables_net_ops); |
1902 | err1: | 1902 | err1: |
1903 | return ret; | 1903 | return ret; |
1904 | } | 1904 | } |
1905 | 1905 | ||
1906 | static void __exit arp_tables_fini(void) | 1906 | static void __exit arp_tables_fini(void) |
1907 | { | 1907 | { |
1908 | nf_unregister_sockopt(&arpt_sockopts); | 1908 | nf_unregister_sockopt(&arpt_sockopts); |
1909 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); | 1909 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1910 | unregister_pernet_subsys(&arp_tables_net_ops); | 1910 | unregister_pernet_subsys(&arp_tables_net_ops); |
1911 | } | 1911 | } |
1912 | 1912 | ||
1913 | EXPORT_SYMBOL(arpt_register_table); | 1913 | EXPORT_SYMBOL(arpt_register_table); |
1914 | EXPORT_SYMBOL(arpt_unregister_table); | 1914 | EXPORT_SYMBOL(arpt_unregister_table); |
1915 | EXPORT_SYMBOL(arpt_do_table); | 1915 | EXPORT_SYMBOL(arpt_do_table); |
1916 | 1916 | ||
1917 | module_init(arp_tables_init); | 1917 | module_init(arp_tables_init); |
1918 | module_exit(arp_tables_fini); | 1918 | module_exit(arp_tables_fini); |
1919 | 1919 |
net/ipv4/netfilter/ip_tables.c
1 | /* | 1 | /* |
2 | * Packet matching code. | 2 | * Packet matching code. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | 4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
5 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | 5 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | #include <linux/cache.h> | 12 | #include <linux/cache.h> |
13 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/kmod.h> | 15 | #include <linux/kmod.h> |
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/icmp.h> | 19 | #include <linux/icmp.h> |
20 | #include <net/ip.h> | 20 | #include <net/ip.h> |
21 | #include <net/compat.h> | 21 | #include <net/compat.h> |
22 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/cpumask.h> | 26 | #include <linux/cpumask.h> |
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | #include <linux/netfilter_ipv4/ip_tables.h> | 29 | #include <linux/netfilter_ipv4/ip_tables.h> |
30 | #include <net/netfilter/nf_log.h> | 30 | #include <net/netfilter/nf_log.h> |
31 | #include "../../netfilter/xt_repldata.h" | 31 | #include "../../netfilter/xt_repldata.h" |
32 | 32 | ||
33 | MODULE_LICENSE("GPL"); | 33 | MODULE_LICENSE("GPL"); |
34 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 34 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
35 | MODULE_DESCRIPTION("IPv4 packet filter"); | 35 | MODULE_DESCRIPTION("IPv4 packet filter"); |
36 | 36 | ||
37 | /*#define DEBUG_IP_FIREWALL*/ | 37 | /*#define DEBUG_IP_FIREWALL*/ |
38 | /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ | 38 | /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ |
39 | /*#define DEBUG_IP_FIREWALL_USER*/ | 39 | /*#define DEBUG_IP_FIREWALL_USER*/ |
40 | 40 | ||
41 | #ifdef DEBUG_IP_FIREWALL | 41 | #ifdef DEBUG_IP_FIREWALL |
42 | #define dprintf(format, args...) pr_info(format , ## args) | 42 | #define dprintf(format, args...) pr_info(format , ## args) |
43 | #else | 43 | #else |
44 | #define dprintf(format, args...) | 44 | #define dprintf(format, args...) |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #ifdef DEBUG_IP_FIREWALL_USER | 47 | #ifdef DEBUG_IP_FIREWALL_USER |
48 | #define duprintf(format, args...) pr_info(format , ## args) | 48 | #define duprintf(format, args...) pr_info(format , ## args) |
49 | #else | 49 | #else |
50 | #define duprintf(format, args...) | 50 | #define duprintf(format, args...) |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifdef CONFIG_NETFILTER_DEBUG | 53 | #ifdef CONFIG_NETFILTER_DEBUG |
54 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) | 54 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) |
55 | #else | 55 | #else |
56 | #define IP_NF_ASSERT(x) | 56 | #define IP_NF_ASSERT(x) |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #if 0 | 59 | #if 0 |
60 | /* All the better to debug you with... */ | 60 | /* All the better to debug you with... */ |
61 | #define static | 61 | #define static |
62 | #define inline | 62 | #define inline |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | void *ipt_alloc_initial_table(const struct xt_table *info) | 65 | void *ipt_alloc_initial_table(const struct xt_table *info) |
66 | { | 66 | { |
67 | return xt_alloc_initial_table(ipt, IPT); | 67 | return xt_alloc_initial_table(ipt, IPT); |
68 | } | 68 | } |
69 | EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); | 69 | EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | We keep a set of rules for each CPU, so we can avoid write-locking | 72 | We keep a set of rules for each CPU, so we can avoid write-locking |
73 | them in the softirq when updating the counters and therefore | 73 | them in the softirq when updating the counters and therefore |
74 | only need to read-lock in the softirq; doing a write_lock_bh() in user | 74 | only need to read-lock in the softirq; doing a write_lock_bh() in user |
75 | context stops packets coming through and allows user context to read | 75 | context stops packets coming through and allows user context to read |
76 | the counters or update the rules. | 76 | the counters or update the rules. |
77 | 77 | ||
78 | Hence the start of any table is given by get_table() below. */ | 78 | Hence the start of any table is given by get_table() below. */ |
79 | 79 | ||
80 | /* Returns whether matches rule or not. */ | 80 | /* Returns whether matches rule or not. */ |
81 | /* Performance critical - called for every packet */ | 81 | /* Performance critical - called for every packet */ |
82 | static inline bool | 82 | static inline bool |
83 | ip_packet_match(const struct iphdr *ip, | 83 | ip_packet_match(const struct iphdr *ip, |
84 | const char *indev, | 84 | const char *indev, |
85 | const char *outdev, | 85 | const char *outdev, |
86 | const struct ipt_ip *ipinfo, | 86 | const struct ipt_ip *ipinfo, |
87 | int isfrag) | 87 | int isfrag) |
88 | { | 88 | { |
89 | unsigned long ret; | 89 | unsigned long ret; |
90 | 90 | ||
91 | #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) | 91 | #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) |
92 | 92 | ||
93 | if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, | 93 | if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, |
94 | IPT_INV_SRCIP) || | 94 | IPT_INV_SRCIP) || |
95 | FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, | 95 | FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, |
96 | IPT_INV_DSTIP)) { | 96 | IPT_INV_DSTIP)) { |
97 | dprintf("Source or dest mismatch.\n"); | 97 | dprintf("Source or dest mismatch.\n"); |
98 | 98 | ||
99 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", | 99 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", |
100 | &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, | 100 | &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, |
101 | ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); | 101 | ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); |
102 | dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", | 102 | dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", |
103 | &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, | 103 | &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, |
104 | ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); | 104 | ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); |
105 | return false; | 105 | return false; |
106 | } | 106 | } |
107 | 107 | ||
108 | ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); | 108 | ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); |
109 | 109 | ||
110 | if (FWINV(ret != 0, IPT_INV_VIA_IN)) { | 110 | if (FWINV(ret != 0, IPT_INV_VIA_IN)) { |
111 | dprintf("VIA in mismatch (%s vs %s).%s\n", | 111 | dprintf("VIA in mismatch (%s vs %s).%s\n", |
112 | indev, ipinfo->iniface, | 112 | indev, ipinfo->iniface, |
113 | ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":""); | 113 | ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":""); |
114 | return false; | 114 | return false; |
115 | } | 115 | } |
116 | 116 | ||
117 | ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); | 117 | ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); |
118 | 118 | ||
119 | if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { | 119 | if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { |
120 | dprintf("VIA out mismatch (%s vs %s).%s\n", | 120 | dprintf("VIA out mismatch (%s vs %s).%s\n", |
121 | outdev, ipinfo->outiface, | 121 | outdev, ipinfo->outiface, |
122 | ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":""); | 122 | ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":""); |
123 | return false; | 123 | return false; |
124 | } | 124 | } |
125 | 125 | ||
126 | /* Check specific protocol */ | 126 | /* Check specific protocol */ |
127 | if (ipinfo->proto && | 127 | if (ipinfo->proto && |
128 | FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { | 128 | FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { |
129 | dprintf("Packet protocol %hi does not match %hi.%s\n", | 129 | dprintf("Packet protocol %hi does not match %hi.%s\n", |
130 | ip->protocol, ipinfo->proto, | 130 | ip->protocol, ipinfo->proto, |
131 | ipinfo->invflags&IPT_INV_PROTO ? " (INV)":""); | 131 | ipinfo->invflags&IPT_INV_PROTO ? " (INV)":""); |
132 | return false; | 132 | return false; |
133 | } | 133 | } |
134 | 134 | ||
135 | /* If we have a fragment rule but the packet is not a fragment | 135 | /* If we have a fragment rule but the packet is not a fragment |
136 | * then we return zero */ | 136 | * then we return zero */ |
137 | if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { | 137 | if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { |
138 | dprintf("Fragment rule but not fragment.%s\n", | 138 | dprintf("Fragment rule but not fragment.%s\n", |
139 | ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); | 139 | ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); |
140 | return false; | 140 | return false; |
141 | } | 141 | } |
142 | 142 | ||
143 | return true; | 143 | return true; |
144 | } | 144 | } |
145 | 145 | ||
146 | static bool | 146 | static bool |
147 | ip_checkentry(const struct ipt_ip *ip) | 147 | ip_checkentry(const struct ipt_ip *ip) |
148 | { | 148 | { |
149 | if (ip->flags & ~IPT_F_MASK) { | 149 | if (ip->flags & ~IPT_F_MASK) { |
150 | duprintf("Unknown flag bits set: %08X\n", | 150 | duprintf("Unknown flag bits set: %08X\n", |
151 | ip->flags & ~IPT_F_MASK); | 151 | ip->flags & ~IPT_F_MASK); |
152 | return false; | 152 | return false; |
153 | } | 153 | } |
154 | if (ip->invflags & ~IPT_INV_MASK) { | 154 | if (ip->invflags & ~IPT_INV_MASK) { |
155 | duprintf("Unknown invflag bits set: %08X\n", | 155 | duprintf("Unknown invflag bits set: %08X\n", |
156 | ip->invflags & ~IPT_INV_MASK); | 156 | ip->invflags & ~IPT_INV_MASK); |
157 | return false; | 157 | return false; |
158 | } | 158 | } |
159 | return true; | 159 | return true; |
160 | } | 160 | } |
161 | 161 | ||
162 | static unsigned int | 162 | static unsigned int |
163 | ipt_error(struct sk_buff *skb, const struct xt_action_param *par) | 163 | ipt_error(struct sk_buff *skb, const struct xt_action_param *par) |
164 | { | 164 | { |
165 | if (net_ratelimit()) | 165 | if (net_ratelimit()) |
166 | pr_info("error: `%s'\n", (const char *)par->targinfo); | 166 | pr_info("error: `%s'\n", (const char *)par->targinfo); |
167 | 167 | ||
168 | return NF_DROP; | 168 | return NF_DROP; |
169 | } | 169 | } |
170 | 170 | ||
171 | /* Performance critical */ | 171 | /* Performance critical */ |
172 | static inline struct ipt_entry * | 172 | static inline struct ipt_entry * |
173 | get_entry(const void *base, unsigned int offset) | 173 | get_entry(const void *base, unsigned int offset) |
174 | { | 174 | { |
175 | return (struct ipt_entry *)(base + offset); | 175 | return (struct ipt_entry *)(base + offset); |
176 | } | 176 | } |
177 | 177 | ||
178 | /* All zeroes == unconditional rule. */ | 178 | /* All zeroes == unconditional rule. */ |
179 | /* Mildly perf critical (only if packet tracing is on) */ | 179 | /* Mildly perf critical (only if packet tracing is on) */ |
180 | static inline bool unconditional(const struct ipt_ip *ip) | 180 | static inline bool unconditional(const struct ipt_ip *ip) |
181 | { | 181 | { |
182 | static const struct ipt_ip uncond; | 182 | static const struct ipt_ip uncond; |
183 | 183 | ||
184 | return memcmp(ip, &uncond, sizeof(uncond)) == 0; | 184 | return memcmp(ip, &uncond, sizeof(uncond)) == 0; |
185 | #undef FWINV | 185 | #undef FWINV |
186 | } | 186 | } |
187 | 187 | ||
188 | /* for const-correctness */ | 188 | /* for const-correctness */ |
189 | static inline const struct ipt_entry_target * | 189 | static inline const struct ipt_entry_target * |
190 | ipt_get_target_c(const struct ipt_entry *e) | 190 | ipt_get_target_c(const struct ipt_entry *e) |
191 | { | 191 | { |
192 | return ipt_get_target((struct ipt_entry *)e); | 192 | return ipt_get_target((struct ipt_entry *)e); |
193 | } | 193 | } |
194 | 194 | ||
195 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 195 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
196 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 196 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
197 | static const char *const hooknames[] = { | 197 | static const char *const hooknames[] = { |
198 | [NF_INET_PRE_ROUTING] = "PREROUTING", | 198 | [NF_INET_PRE_ROUTING] = "PREROUTING", |
199 | [NF_INET_LOCAL_IN] = "INPUT", | 199 | [NF_INET_LOCAL_IN] = "INPUT", |
200 | [NF_INET_FORWARD] = "FORWARD", | 200 | [NF_INET_FORWARD] = "FORWARD", |
201 | [NF_INET_LOCAL_OUT] = "OUTPUT", | 201 | [NF_INET_LOCAL_OUT] = "OUTPUT", |
202 | [NF_INET_POST_ROUTING] = "POSTROUTING", | 202 | [NF_INET_POST_ROUTING] = "POSTROUTING", |
203 | }; | 203 | }; |
204 | 204 | ||
205 | enum nf_ip_trace_comments { | 205 | enum nf_ip_trace_comments { |
206 | NF_IP_TRACE_COMMENT_RULE, | 206 | NF_IP_TRACE_COMMENT_RULE, |
207 | NF_IP_TRACE_COMMENT_RETURN, | 207 | NF_IP_TRACE_COMMENT_RETURN, |
208 | NF_IP_TRACE_COMMENT_POLICY, | 208 | NF_IP_TRACE_COMMENT_POLICY, |
209 | }; | 209 | }; |
210 | 210 | ||
211 | static const char *const comments[] = { | 211 | static const char *const comments[] = { |
212 | [NF_IP_TRACE_COMMENT_RULE] = "rule", | 212 | [NF_IP_TRACE_COMMENT_RULE] = "rule", |
213 | [NF_IP_TRACE_COMMENT_RETURN] = "return", | 213 | [NF_IP_TRACE_COMMENT_RETURN] = "return", |
214 | [NF_IP_TRACE_COMMENT_POLICY] = "policy", | 214 | [NF_IP_TRACE_COMMENT_POLICY] = "policy", |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static struct nf_loginfo trace_loginfo = { | 217 | static struct nf_loginfo trace_loginfo = { |
218 | .type = NF_LOG_TYPE_LOG, | 218 | .type = NF_LOG_TYPE_LOG, |
219 | .u = { | 219 | .u = { |
220 | .log = { | 220 | .log = { |
221 | .level = 4, | 221 | .level = 4, |
222 | .logflags = NF_LOG_MASK, | 222 | .logflags = NF_LOG_MASK, |
223 | }, | 223 | }, |
224 | }, | 224 | }, |
225 | }; | 225 | }; |
226 | 226 | ||
227 | /* Mildly perf critical (only if packet tracing is on) */ | 227 | /* Mildly perf critical (only if packet tracing is on) */ |
228 | static inline int | 228 | static inline int |
229 | get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, | 229 | get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, |
230 | const char *hookname, const char **chainname, | 230 | const char *hookname, const char **chainname, |
231 | const char **comment, unsigned int *rulenum) | 231 | const char **comment, unsigned int *rulenum) |
232 | { | 232 | { |
233 | const struct ipt_standard_target *t = (void *)ipt_get_target_c(s); | 233 | const struct ipt_standard_target *t = (void *)ipt_get_target_c(s); |
234 | 234 | ||
235 | if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) { | 235 | if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) { |
236 | /* Head of user chain: ERROR target with chainname */ | 236 | /* Head of user chain: ERROR target with chainname */ |
237 | *chainname = t->target.data; | 237 | *chainname = t->target.data; |
238 | (*rulenum) = 0; | 238 | (*rulenum) = 0; |
239 | } else if (s == e) { | 239 | } else if (s == e) { |
240 | (*rulenum)++; | 240 | (*rulenum)++; |
241 | 241 | ||
242 | if (s->target_offset == sizeof(struct ipt_entry) && | 242 | if (s->target_offset == sizeof(struct ipt_entry) && |
243 | strcmp(t->target.u.kernel.target->name, | 243 | strcmp(t->target.u.kernel.target->name, |
244 | IPT_STANDARD_TARGET) == 0 && | 244 | IPT_STANDARD_TARGET) == 0 && |
245 | t->verdict < 0 && | 245 | t->verdict < 0 && |
246 | unconditional(&s->ip)) { | 246 | unconditional(&s->ip)) { |
247 | /* Tail of chains: STANDARD target (return/policy) */ | 247 | /* Tail of chains: STANDARD target (return/policy) */ |
248 | *comment = *chainname == hookname | 248 | *comment = *chainname == hookname |
249 | ? comments[NF_IP_TRACE_COMMENT_POLICY] | 249 | ? comments[NF_IP_TRACE_COMMENT_POLICY] |
250 | : comments[NF_IP_TRACE_COMMENT_RETURN]; | 250 | : comments[NF_IP_TRACE_COMMENT_RETURN]; |
251 | } | 251 | } |
252 | return 1; | 252 | return 1; |
253 | } else | 253 | } else |
254 | (*rulenum)++; | 254 | (*rulenum)++; |
255 | 255 | ||
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | static void trace_packet(const struct sk_buff *skb, | 259 | static void trace_packet(const struct sk_buff *skb, |
260 | unsigned int hook, | 260 | unsigned int hook, |
261 | const struct net_device *in, | 261 | const struct net_device *in, |
262 | const struct net_device *out, | 262 | const struct net_device *out, |
263 | const char *tablename, | 263 | const char *tablename, |
264 | const struct xt_table_info *private, | 264 | const struct xt_table_info *private, |
265 | const struct ipt_entry *e) | 265 | const struct ipt_entry *e) |
266 | { | 266 | { |
267 | const void *table_base; | 267 | const void *table_base; |
268 | const struct ipt_entry *root; | 268 | const struct ipt_entry *root; |
269 | const char *hookname, *chainname, *comment; | 269 | const char *hookname, *chainname, *comment; |
270 | const struct ipt_entry *iter; | 270 | const struct ipt_entry *iter; |
271 | unsigned int rulenum = 0; | 271 | unsigned int rulenum = 0; |
272 | 272 | ||
273 | table_base = private->entries[smp_processor_id()]; | 273 | table_base = private->entries[smp_processor_id()]; |
274 | root = get_entry(table_base, private->hook_entry[hook]); | 274 | root = get_entry(table_base, private->hook_entry[hook]); |
275 | 275 | ||
276 | hookname = chainname = hooknames[hook]; | 276 | hookname = chainname = hooknames[hook]; |
277 | comment = comments[NF_IP_TRACE_COMMENT_RULE]; | 277 | comment = comments[NF_IP_TRACE_COMMENT_RULE]; |
278 | 278 | ||
279 | xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) | 279 | xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) |
280 | if (get_chainname_rulenum(iter, e, hookname, | 280 | if (get_chainname_rulenum(iter, e, hookname, |
281 | &chainname, &comment, &rulenum) != 0) | 281 | &chainname, &comment, &rulenum) != 0) |
282 | break; | 282 | break; |
283 | 283 | ||
284 | nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, | 284 | nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, |
285 | "TRACE: %s:%s:%s:%u ", | 285 | "TRACE: %s:%s:%s:%u ", |
286 | tablename, chainname, comment, rulenum); | 286 | tablename, chainname, comment, rulenum); |
287 | } | 287 | } |
288 | #endif | 288 | #endif |
289 | 289 | ||
290 | static inline __pure | 290 | static inline __pure |
291 | struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) | 291 | struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) |
292 | { | 292 | { |
293 | return (void *)entry + entry->next_offset; | 293 | return (void *)entry + entry->next_offset; |
294 | } | 294 | } |
295 | 295 | ||
296 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ | 296 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ |
297 | unsigned int | 297 | unsigned int |
298 | ipt_do_table(struct sk_buff *skb, | 298 | ipt_do_table(struct sk_buff *skb, |
299 | unsigned int hook, | 299 | unsigned int hook, |
300 | const struct net_device *in, | 300 | const struct net_device *in, |
301 | const struct net_device *out, | 301 | const struct net_device *out, |
302 | struct xt_table *table) | 302 | struct xt_table *table) |
303 | { | 303 | { |
304 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 304 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
305 | const struct iphdr *ip; | 305 | const struct iphdr *ip; |
306 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 306 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
307 | unsigned int verdict = NF_DROP; | 307 | unsigned int verdict = NF_DROP; |
308 | const char *indev, *outdev; | 308 | const char *indev, *outdev; |
309 | const void *table_base; | 309 | const void *table_base; |
310 | struct ipt_entry *e, **jumpstack; | 310 | struct ipt_entry *e, **jumpstack; |
311 | unsigned int *stackptr, origptr, cpu; | 311 | unsigned int *stackptr, origptr, cpu; |
312 | const struct xt_table_info *private; | 312 | const struct xt_table_info *private; |
313 | struct xt_action_param acpar; | 313 | struct xt_action_param acpar; |
314 | 314 | ||
315 | /* Initialization */ | 315 | /* Initialization */ |
316 | ip = ip_hdr(skb); | 316 | ip = ip_hdr(skb); |
317 | indev = in ? in->name : nulldevname; | 317 | indev = in ? in->name : nulldevname; |
318 | outdev = out ? out->name : nulldevname; | 318 | outdev = out ? out->name : nulldevname; |
319 | /* We handle fragments by dealing with the first fragment as | 319 | /* We handle fragments by dealing with the first fragment as |
320 | * if it was a normal packet. All other fragments are treated | 320 | * if it was a normal packet. All other fragments are treated |
321 | * normally, except that they will NEVER match rules that ask | 321 | * normally, except that they will NEVER match rules that ask |
322 | * things we don't know, ie. tcp syn flag or ports). If the | 322 | * things we don't know, ie. tcp syn flag or ports). If the |
323 | * rule is also a fragment-specific rule, non-fragments won't | 323 | * rule is also a fragment-specific rule, non-fragments won't |
324 | * match it. */ | 324 | * match it. */ |
325 | acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; | 325 | acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; |
326 | acpar.thoff = ip_hdrlen(skb); | 326 | acpar.thoff = ip_hdrlen(skb); |
327 | acpar.hotdrop = false; | 327 | acpar.hotdrop = false; |
328 | acpar.in = in; | 328 | acpar.in = in; |
329 | acpar.out = out; | 329 | acpar.out = out; |
330 | acpar.family = NFPROTO_IPV4; | 330 | acpar.family = NFPROTO_IPV4; |
331 | acpar.hooknum = hook; | 331 | acpar.hooknum = hook; |
332 | 332 | ||
333 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 333 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
334 | xt_info_rdlock_bh(); | 334 | xt_info_rdlock_bh(); |
335 | private = table->private; | 335 | private = table->private; |
336 | cpu = smp_processor_id(); | 336 | cpu = smp_processor_id(); |
337 | table_base = private->entries[cpu]; | 337 | table_base = private->entries[cpu]; |
338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; | 338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; |
339 | stackptr = &private->stackptr[cpu]; | 339 | stackptr = &private->stackptr[cpu]; |
340 | origptr = *stackptr; | 340 | origptr = *stackptr; |
341 | 341 | ||
342 | e = get_entry(table_base, private->hook_entry[hook]); | 342 | e = get_entry(table_base, private->hook_entry[hook]); |
343 | 343 | ||
344 | pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n", | 344 | pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n", |
345 | table->name, hook, origptr, | 345 | table->name, hook, origptr, |
346 | get_entry(table_base, private->underflow[hook])); | 346 | get_entry(table_base, private->underflow[hook])); |
347 | 347 | ||
348 | do { | 348 | do { |
349 | const struct ipt_entry_target *t; | 349 | const struct ipt_entry_target *t; |
350 | const struct xt_entry_match *ematch; | 350 | const struct xt_entry_match *ematch; |
351 | 351 | ||
352 | IP_NF_ASSERT(e); | 352 | IP_NF_ASSERT(e); |
353 | if (!ip_packet_match(ip, indev, outdev, | 353 | if (!ip_packet_match(ip, indev, outdev, |
354 | &e->ip, acpar.fragoff)) { | 354 | &e->ip, acpar.fragoff)) { |
355 | no_match: | 355 | no_match: |
356 | e = ipt_next_entry(e); | 356 | e = ipt_next_entry(e); |
357 | continue; | 357 | continue; |
358 | } | 358 | } |
359 | 359 | ||
360 | xt_ematch_foreach(ematch, e) { | 360 | xt_ematch_foreach(ematch, e) { |
361 | acpar.match = ematch->u.kernel.match; | 361 | acpar.match = ematch->u.kernel.match; |
362 | acpar.matchinfo = ematch->data; | 362 | acpar.matchinfo = ematch->data; |
363 | if (!acpar.match->match(skb, &acpar)) | 363 | if (!acpar.match->match(skb, &acpar)) |
364 | goto no_match; | 364 | goto no_match; |
365 | } | 365 | } |
366 | 366 | ||
367 | ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); | 367 | ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); |
368 | 368 | ||
369 | t = ipt_get_target(e); | 369 | t = ipt_get_target(e); |
370 | IP_NF_ASSERT(t->u.kernel.target); | 370 | IP_NF_ASSERT(t->u.kernel.target); |
371 | 371 | ||
372 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 372 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
373 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 373 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
374 | /* The packet is traced: log it */ | 374 | /* The packet is traced: log it */ |
375 | if (unlikely(skb->nf_trace)) | 375 | if (unlikely(skb->nf_trace)) |
376 | trace_packet(skb, hook, in, out, | 376 | trace_packet(skb, hook, in, out, |
377 | table->name, private, e); | 377 | table->name, private, e); |
378 | #endif | 378 | #endif |
379 | /* Standard target? */ | 379 | /* Standard target? */ |
380 | if (!t->u.kernel.target->target) { | 380 | if (!t->u.kernel.target->target) { |
381 | int v; | 381 | int v; |
382 | 382 | ||
383 | v = ((struct ipt_standard_target *)t)->verdict; | 383 | v = ((struct ipt_standard_target *)t)->verdict; |
384 | if (v < 0) { | 384 | if (v < 0) { |
385 | /* Pop from stack? */ | 385 | /* Pop from stack? */ |
386 | if (v != IPT_RETURN) { | 386 | if (v != IPT_RETURN) { |
387 | verdict = (unsigned)(-v) - 1; | 387 | verdict = (unsigned)(-v) - 1; |
388 | break; | 388 | break; |
389 | } | 389 | } |
390 | if (*stackptr == 0) { | 390 | if (*stackptr == 0) { |
391 | e = get_entry(table_base, | 391 | e = get_entry(table_base, |
392 | private->underflow[hook]); | 392 | private->underflow[hook]); |
393 | pr_debug("Underflow (this is normal) " | 393 | pr_debug("Underflow (this is normal) " |
394 | "to %p\n", e); | 394 | "to %p\n", e); |
395 | } else { | 395 | } else { |
396 | e = jumpstack[--*stackptr]; | 396 | e = jumpstack[--*stackptr]; |
397 | pr_debug("Pulled %p out from pos %u\n", | 397 | pr_debug("Pulled %p out from pos %u\n", |
398 | e, *stackptr); | 398 | e, *stackptr); |
399 | e = ipt_next_entry(e); | 399 | e = ipt_next_entry(e); |
400 | } | 400 | } |
401 | continue; | 401 | continue; |
402 | } | 402 | } |
403 | if (table_base + v != ipt_next_entry(e) && | 403 | if (table_base + v != ipt_next_entry(e) && |
404 | !(e->ip.flags & IPT_F_GOTO)) { | 404 | !(e->ip.flags & IPT_F_GOTO)) { |
405 | if (*stackptr >= private->stacksize) { | 405 | if (*stackptr >= private->stacksize) { |
406 | verdict = NF_DROP; | 406 | verdict = NF_DROP; |
407 | break; | 407 | break; |
408 | } | 408 | } |
409 | jumpstack[(*stackptr)++] = e; | 409 | jumpstack[(*stackptr)++] = e; |
410 | pr_debug("Pushed %p into pos %u\n", | 410 | pr_debug("Pushed %p into pos %u\n", |
411 | e, *stackptr - 1); | 411 | e, *stackptr - 1); |
412 | } | 412 | } |
413 | 413 | ||
414 | e = get_entry(table_base, v); | 414 | e = get_entry(table_base, v); |
415 | continue; | 415 | continue; |
416 | } | 416 | } |
417 | 417 | ||
418 | acpar.target = t->u.kernel.target; | 418 | acpar.target = t->u.kernel.target; |
419 | acpar.targinfo = t->data; | 419 | acpar.targinfo = t->data; |
420 | 420 | ||
421 | verdict = t->u.kernel.target->target(skb, &acpar); | 421 | verdict = t->u.kernel.target->target(skb, &acpar); |
422 | /* Target might have changed stuff. */ | 422 | /* Target might have changed stuff. */ |
423 | ip = ip_hdr(skb); | 423 | ip = ip_hdr(skb); |
424 | if (verdict == IPT_CONTINUE) | 424 | if (verdict == IPT_CONTINUE) |
425 | e = ipt_next_entry(e); | 425 | e = ipt_next_entry(e); |
426 | else | 426 | else |
427 | /* Verdict */ | 427 | /* Verdict */ |
428 | break; | 428 | break; |
429 | } while (!acpar.hotdrop); | 429 | } while (!acpar.hotdrop); |
430 | xt_info_rdunlock_bh(); | 430 | xt_info_rdunlock_bh(); |
431 | pr_debug("Exiting %s; resetting sp from %u to %u\n", | 431 | pr_debug("Exiting %s; resetting sp from %u to %u\n", |
432 | __func__, *stackptr, origptr); | 432 | __func__, *stackptr, origptr); |
433 | *stackptr = origptr; | 433 | *stackptr = origptr; |
434 | #ifdef DEBUG_ALLOW_ALL | 434 | #ifdef DEBUG_ALLOW_ALL |
435 | return NF_ACCEPT; | 435 | return NF_ACCEPT; |
436 | #else | 436 | #else |
437 | if (acpar.hotdrop) | 437 | if (acpar.hotdrop) |
438 | return NF_DROP; | 438 | return NF_DROP; |
439 | else return verdict; | 439 | else return verdict; |
440 | #endif | 440 | #endif |
441 | } | 441 | } |
442 | 442 | ||
443 | /* Figures out from what hook each rule can be called: returns 0 if | 443 | /* Figures out from what hook each rule can be called: returns 0 if |
444 | there are loops. Puts hook bitmask in comefrom. */ | 444 | there are loops. Puts hook bitmask in comefrom. */ |
445 | static int | 445 | static int |
446 | mark_source_chains(const struct xt_table_info *newinfo, | 446 | mark_source_chains(const struct xt_table_info *newinfo, |
447 | unsigned int valid_hooks, void *entry0) | 447 | unsigned int valid_hooks, void *entry0) |
448 | { | 448 | { |
449 | unsigned int hook; | 449 | unsigned int hook; |
450 | 450 | ||
451 | /* No recursion; use packet counter to save back ptrs (reset | 451 | /* No recursion; use packet counter to save back ptrs (reset |
452 | to 0 as we leave), and comefrom to save source hook bitmask */ | 452 | to 0 as we leave), and comefrom to save source hook bitmask */ |
453 | for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { | 453 | for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { |
454 | unsigned int pos = newinfo->hook_entry[hook]; | 454 | unsigned int pos = newinfo->hook_entry[hook]; |
455 | struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); | 455 | struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); |
456 | 456 | ||
457 | if (!(valid_hooks & (1 << hook))) | 457 | if (!(valid_hooks & (1 << hook))) |
458 | continue; | 458 | continue; |
459 | 459 | ||
460 | /* Set initial back pointer. */ | 460 | /* Set initial back pointer. */ |
461 | e->counters.pcnt = pos; | 461 | e->counters.pcnt = pos; |
462 | 462 | ||
463 | for (;;) { | 463 | for (;;) { |
464 | const struct ipt_standard_target *t | 464 | const struct ipt_standard_target *t |
465 | = (void *)ipt_get_target_c(e); | 465 | = (void *)ipt_get_target_c(e); |
466 | int visited = e->comefrom & (1 << hook); | 466 | int visited = e->comefrom & (1 << hook); |
467 | 467 | ||
468 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { | 468 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { |
469 | printk("iptables: loop hook %u pos %u %08X.\n", | 469 | pr_err("iptables: loop hook %u pos %u %08X.\n", |
470 | hook, pos, e->comefrom); | 470 | hook, pos, e->comefrom); |
471 | return 0; | 471 | return 0; |
472 | } | 472 | } |
473 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); | 473 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); |
474 | 474 | ||
475 | /* Unconditional return/END. */ | 475 | /* Unconditional return/END. */ |
476 | if ((e->target_offset == sizeof(struct ipt_entry) && | 476 | if ((e->target_offset == sizeof(struct ipt_entry) && |
477 | (strcmp(t->target.u.user.name, | 477 | (strcmp(t->target.u.user.name, |
478 | IPT_STANDARD_TARGET) == 0) && | 478 | IPT_STANDARD_TARGET) == 0) && |
479 | t->verdict < 0 && unconditional(&e->ip)) || | 479 | t->verdict < 0 && unconditional(&e->ip)) || |
480 | visited) { | 480 | visited) { |
481 | unsigned int oldpos, size; | 481 | unsigned int oldpos, size; |
482 | 482 | ||
483 | if ((strcmp(t->target.u.user.name, | 483 | if ((strcmp(t->target.u.user.name, |
484 | IPT_STANDARD_TARGET) == 0) && | 484 | IPT_STANDARD_TARGET) == 0) && |
485 | t->verdict < -NF_MAX_VERDICT - 1) { | 485 | t->verdict < -NF_MAX_VERDICT - 1) { |
486 | duprintf("mark_source_chains: bad " | 486 | duprintf("mark_source_chains: bad " |
487 | "negative verdict (%i)\n", | 487 | "negative verdict (%i)\n", |
488 | t->verdict); | 488 | t->verdict); |
489 | return 0; | 489 | return 0; |
490 | } | 490 | } |
491 | 491 | ||
492 | /* Return: backtrack through the last | 492 | /* Return: backtrack through the last |
493 | big jump. */ | 493 | big jump. */ |
494 | do { | 494 | do { |
495 | e->comefrom ^= (1<<NF_INET_NUMHOOKS); | 495 | e->comefrom ^= (1<<NF_INET_NUMHOOKS); |
496 | #ifdef DEBUG_IP_FIREWALL_USER | 496 | #ifdef DEBUG_IP_FIREWALL_USER |
497 | if (e->comefrom | 497 | if (e->comefrom |
498 | & (1 << NF_INET_NUMHOOKS)) { | 498 | & (1 << NF_INET_NUMHOOKS)) { |
499 | duprintf("Back unset " | 499 | duprintf("Back unset " |
500 | "on hook %u " | 500 | "on hook %u " |
501 | "rule %u\n", | 501 | "rule %u\n", |
502 | hook, pos); | 502 | hook, pos); |
503 | } | 503 | } |
504 | #endif | 504 | #endif |
505 | oldpos = pos; | 505 | oldpos = pos; |
506 | pos = e->counters.pcnt; | 506 | pos = e->counters.pcnt; |
507 | e->counters.pcnt = 0; | 507 | e->counters.pcnt = 0; |
508 | 508 | ||
509 | /* We're at the start. */ | 509 | /* We're at the start. */ |
510 | if (pos == oldpos) | 510 | if (pos == oldpos) |
511 | goto next; | 511 | goto next; |
512 | 512 | ||
513 | e = (struct ipt_entry *) | 513 | e = (struct ipt_entry *) |
514 | (entry0 + pos); | 514 | (entry0 + pos); |
515 | } while (oldpos == pos + e->next_offset); | 515 | } while (oldpos == pos + e->next_offset); |
516 | 516 | ||
517 | /* Move along one */ | 517 | /* Move along one */ |
518 | size = e->next_offset; | 518 | size = e->next_offset; |
519 | e = (struct ipt_entry *) | 519 | e = (struct ipt_entry *) |
520 | (entry0 + pos + size); | 520 | (entry0 + pos + size); |
521 | e->counters.pcnt = pos; | 521 | e->counters.pcnt = pos; |
522 | pos += size; | 522 | pos += size; |
523 | } else { | 523 | } else { |
524 | int newpos = t->verdict; | 524 | int newpos = t->verdict; |
525 | 525 | ||
526 | if (strcmp(t->target.u.user.name, | 526 | if (strcmp(t->target.u.user.name, |
527 | IPT_STANDARD_TARGET) == 0 && | 527 | IPT_STANDARD_TARGET) == 0 && |
528 | newpos >= 0) { | 528 | newpos >= 0) { |
529 | if (newpos > newinfo->size - | 529 | if (newpos > newinfo->size - |
530 | sizeof(struct ipt_entry)) { | 530 | sizeof(struct ipt_entry)) { |
531 | duprintf("mark_source_chains: " | 531 | duprintf("mark_source_chains: " |
532 | "bad verdict (%i)\n", | 532 | "bad verdict (%i)\n", |
533 | newpos); | 533 | newpos); |
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | /* This a jump; chase it. */ | 536 | /* This a jump; chase it. */ |
537 | duprintf("Jump rule %u -> %u\n", | 537 | duprintf("Jump rule %u -> %u\n", |
538 | pos, newpos); | 538 | pos, newpos); |
539 | } else { | 539 | } else { |
540 | /* ... this is a fallthru */ | 540 | /* ... this is a fallthru */ |
541 | newpos = pos + e->next_offset; | 541 | newpos = pos + e->next_offset; |
542 | } | 542 | } |
543 | e = (struct ipt_entry *) | 543 | e = (struct ipt_entry *) |
544 | (entry0 + newpos); | 544 | (entry0 + newpos); |
545 | e->counters.pcnt = pos; | 545 | e->counters.pcnt = pos; |
546 | pos = newpos; | 546 | pos = newpos; |
547 | } | 547 | } |
548 | } | 548 | } |
549 | next: | 549 | next: |
550 | duprintf("Finished chain %u\n", hook); | 550 | duprintf("Finished chain %u\n", hook); |
551 | } | 551 | } |
552 | return 1; | 552 | return 1; |
553 | } | 553 | } |
554 | 554 | ||
555 | static void cleanup_match(struct ipt_entry_match *m, struct net *net) | 555 | static void cleanup_match(struct ipt_entry_match *m, struct net *net) |
556 | { | 556 | { |
557 | struct xt_mtdtor_param par; | 557 | struct xt_mtdtor_param par; |
558 | 558 | ||
559 | par.net = net; | 559 | par.net = net; |
560 | par.match = m->u.kernel.match; | 560 | par.match = m->u.kernel.match; |
561 | par.matchinfo = m->data; | 561 | par.matchinfo = m->data; |
562 | par.family = NFPROTO_IPV4; | 562 | par.family = NFPROTO_IPV4; |
563 | if (par.match->destroy != NULL) | 563 | if (par.match->destroy != NULL) |
564 | par.match->destroy(&par); | 564 | par.match->destroy(&par); |
565 | module_put(par.match->me); | 565 | module_put(par.match->me); |
566 | } | 566 | } |
567 | 567 | ||
568 | static int | 568 | static int |
569 | check_entry(const struct ipt_entry *e, const char *name) | 569 | check_entry(const struct ipt_entry *e, const char *name) |
570 | { | 570 | { |
571 | const struct ipt_entry_target *t; | 571 | const struct ipt_entry_target *t; |
572 | 572 | ||
573 | if (!ip_checkentry(&e->ip)) { | 573 | if (!ip_checkentry(&e->ip)) { |
574 | duprintf("ip check failed %p %s.\n", e, par->match->name); | 574 | duprintf("ip check failed %p %s.\n", e, par->match->name); |
575 | return -EINVAL; | 575 | return -EINVAL; |
576 | } | 576 | } |
577 | 577 | ||
578 | if (e->target_offset + sizeof(struct ipt_entry_target) > | 578 | if (e->target_offset + sizeof(struct ipt_entry_target) > |
579 | e->next_offset) | 579 | e->next_offset) |
580 | return -EINVAL; | 580 | return -EINVAL; |
581 | 581 | ||
582 | t = ipt_get_target_c(e); | 582 | t = ipt_get_target_c(e); |
583 | if (e->target_offset + t->u.target_size > e->next_offset) | 583 | if (e->target_offset + t->u.target_size > e->next_offset) |
584 | return -EINVAL; | 584 | return -EINVAL; |
585 | 585 | ||
586 | return 0; | 586 | return 0; |
587 | } | 587 | } |
588 | 588 | ||
589 | static int | 589 | static int |
590 | check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par) | 590 | check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par) |
591 | { | 591 | { |
592 | const struct ipt_ip *ip = par->entryinfo; | 592 | const struct ipt_ip *ip = par->entryinfo; |
593 | int ret; | 593 | int ret; |
594 | 594 | ||
595 | par->match = m->u.kernel.match; | 595 | par->match = m->u.kernel.match; |
596 | par->matchinfo = m->data; | 596 | par->matchinfo = m->data; |
597 | 597 | ||
598 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), | 598 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), |
599 | ip->proto, ip->invflags & IPT_INV_PROTO); | 599 | ip->proto, ip->invflags & IPT_INV_PROTO); |
600 | if (ret < 0) { | 600 | if (ret < 0) { |
601 | duprintf("check failed for `%s'.\n", par->match->name); | 601 | duprintf("check failed for `%s'.\n", par->match->name); |
602 | return ret; | 602 | return ret; |
603 | } | 603 | } |
604 | return 0; | 604 | return 0; |
605 | } | 605 | } |
606 | 606 | ||
607 | static int | 607 | static int |
608 | find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par) | 608 | find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par) |
609 | { | 609 | { |
610 | struct xt_match *match; | 610 | struct xt_match *match; |
611 | int ret; | 611 | int ret; |
612 | 612 | ||
613 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, | 613 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, |
614 | m->u.user.revision); | 614 | m->u.user.revision); |
615 | if (IS_ERR(match)) { | 615 | if (IS_ERR(match)) { |
616 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); | 616 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); |
617 | return PTR_ERR(match); | 617 | return PTR_ERR(match); |
618 | } | 618 | } |
619 | m->u.kernel.match = match; | 619 | m->u.kernel.match = match; |
620 | 620 | ||
621 | ret = check_match(m, par); | 621 | ret = check_match(m, par); |
622 | if (ret) | 622 | if (ret) |
623 | goto err; | 623 | goto err; |
624 | 624 | ||
625 | return 0; | 625 | return 0; |
626 | err: | 626 | err: |
627 | module_put(m->u.kernel.match->me); | 627 | module_put(m->u.kernel.match->me); |
628 | return ret; | 628 | return ret; |
629 | } | 629 | } |
630 | 630 | ||
631 | static int check_target(struct ipt_entry *e, struct net *net, const char *name) | 631 | static int check_target(struct ipt_entry *e, struct net *net, const char *name) |
632 | { | 632 | { |
633 | struct ipt_entry_target *t = ipt_get_target(e); | 633 | struct ipt_entry_target *t = ipt_get_target(e); |
634 | struct xt_tgchk_param par = { | 634 | struct xt_tgchk_param par = { |
635 | .net = net, | 635 | .net = net, |
636 | .table = name, | 636 | .table = name, |
637 | .entryinfo = e, | 637 | .entryinfo = e, |
638 | .target = t->u.kernel.target, | 638 | .target = t->u.kernel.target, |
639 | .targinfo = t->data, | 639 | .targinfo = t->data, |
640 | .hook_mask = e->comefrom, | 640 | .hook_mask = e->comefrom, |
641 | .family = NFPROTO_IPV4, | 641 | .family = NFPROTO_IPV4, |
642 | }; | 642 | }; |
643 | int ret; | 643 | int ret; |
644 | 644 | ||
645 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), | 645 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), |
646 | e->ip.proto, e->ip.invflags & IPT_INV_PROTO); | 646 | e->ip.proto, e->ip.invflags & IPT_INV_PROTO); |
647 | if (ret < 0) { | 647 | if (ret < 0) { |
648 | duprintf("check failed for `%s'.\n", | 648 | duprintf("check failed for `%s'.\n", |
649 | t->u.kernel.target->name); | 649 | t->u.kernel.target->name); |
650 | return ret; | 650 | return ret; |
651 | } | 651 | } |
652 | return 0; | 652 | return 0; |
653 | } | 653 | } |
654 | 654 | ||
655 | static int | 655 | static int |
656 | find_check_entry(struct ipt_entry *e, struct net *net, const char *name, | 656 | find_check_entry(struct ipt_entry *e, struct net *net, const char *name, |
657 | unsigned int size) | 657 | unsigned int size) |
658 | { | 658 | { |
659 | struct ipt_entry_target *t; | 659 | struct ipt_entry_target *t; |
660 | struct xt_target *target; | 660 | struct xt_target *target; |
661 | int ret; | 661 | int ret; |
662 | unsigned int j; | 662 | unsigned int j; |
663 | struct xt_mtchk_param mtpar; | 663 | struct xt_mtchk_param mtpar; |
664 | struct xt_entry_match *ematch; | 664 | struct xt_entry_match *ematch; |
665 | 665 | ||
666 | ret = check_entry(e, name); | 666 | ret = check_entry(e, name); |
667 | if (ret) | 667 | if (ret) |
668 | return ret; | 668 | return ret; |
669 | 669 | ||
670 | j = 0; | 670 | j = 0; |
671 | mtpar.net = net; | 671 | mtpar.net = net; |
672 | mtpar.table = name; | 672 | mtpar.table = name; |
673 | mtpar.entryinfo = &e->ip; | 673 | mtpar.entryinfo = &e->ip; |
674 | mtpar.hook_mask = e->comefrom; | 674 | mtpar.hook_mask = e->comefrom; |
675 | mtpar.family = NFPROTO_IPV4; | 675 | mtpar.family = NFPROTO_IPV4; |
676 | xt_ematch_foreach(ematch, e) { | 676 | xt_ematch_foreach(ematch, e) { |
677 | ret = find_check_match(ematch, &mtpar); | 677 | ret = find_check_match(ematch, &mtpar); |
678 | if (ret != 0) | 678 | if (ret != 0) |
679 | goto cleanup_matches; | 679 | goto cleanup_matches; |
680 | ++j; | 680 | ++j; |
681 | } | 681 | } |
682 | 682 | ||
683 | t = ipt_get_target(e); | 683 | t = ipt_get_target(e); |
684 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, | 684 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, |
685 | t->u.user.revision); | 685 | t->u.user.revision); |
686 | if (IS_ERR(target)) { | 686 | if (IS_ERR(target)) { |
687 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 687 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
688 | ret = PTR_ERR(target); | 688 | ret = PTR_ERR(target); |
689 | goto cleanup_matches; | 689 | goto cleanup_matches; |
690 | } | 690 | } |
691 | t->u.kernel.target = target; | 691 | t->u.kernel.target = target; |
692 | 692 | ||
693 | ret = check_target(e, net, name); | 693 | ret = check_target(e, net, name); |
694 | if (ret) | 694 | if (ret) |
695 | goto err; | 695 | goto err; |
696 | return 0; | 696 | return 0; |
697 | err: | 697 | err: |
698 | module_put(t->u.kernel.target->me); | 698 | module_put(t->u.kernel.target->me); |
699 | cleanup_matches: | 699 | cleanup_matches: |
700 | xt_ematch_foreach(ematch, e) { | 700 | xt_ematch_foreach(ematch, e) { |
701 | if (j-- == 0) | 701 | if (j-- == 0) |
702 | break; | 702 | break; |
703 | cleanup_match(ematch, net); | 703 | cleanup_match(ematch, net); |
704 | } | 704 | } |
705 | return ret; | 705 | return ret; |
706 | } | 706 | } |
707 | 707 | ||
708 | static bool check_underflow(const struct ipt_entry *e) | 708 | static bool check_underflow(const struct ipt_entry *e) |
709 | { | 709 | { |
710 | const struct ipt_entry_target *t; | 710 | const struct ipt_entry_target *t; |
711 | unsigned int verdict; | 711 | unsigned int verdict; |
712 | 712 | ||
713 | if (!unconditional(&e->ip)) | 713 | if (!unconditional(&e->ip)) |
714 | return false; | 714 | return false; |
715 | t = ipt_get_target_c(e); | 715 | t = ipt_get_target_c(e); |
716 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 716 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
717 | return false; | 717 | return false; |
718 | verdict = ((struct ipt_standard_target *)t)->verdict; | 718 | verdict = ((struct ipt_standard_target *)t)->verdict; |
719 | verdict = -verdict - 1; | 719 | verdict = -verdict - 1; |
720 | return verdict == NF_DROP || verdict == NF_ACCEPT; | 720 | return verdict == NF_DROP || verdict == NF_ACCEPT; |
721 | } | 721 | } |
722 | 722 | ||
723 | static int | 723 | static int |
724 | check_entry_size_and_hooks(struct ipt_entry *e, | 724 | check_entry_size_and_hooks(struct ipt_entry *e, |
725 | struct xt_table_info *newinfo, | 725 | struct xt_table_info *newinfo, |
726 | const unsigned char *base, | 726 | const unsigned char *base, |
727 | const unsigned char *limit, | 727 | const unsigned char *limit, |
728 | const unsigned int *hook_entries, | 728 | const unsigned int *hook_entries, |
729 | const unsigned int *underflows, | 729 | const unsigned int *underflows, |
730 | unsigned int valid_hooks) | 730 | unsigned int valid_hooks) |
731 | { | 731 | { |
732 | unsigned int h; | 732 | unsigned int h; |
733 | 733 | ||
734 | if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || | 734 | if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || |
735 | (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { | 735 | (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { |
736 | duprintf("Bad offset %p\n", e); | 736 | duprintf("Bad offset %p\n", e); |
737 | return -EINVAL; | 737 | return -EINVAL; |
738 | } | 738 | } |
739 | 739 | ||
740 | if (e->next_offset | 740 | if (e->next_offset |
741 | < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) { | 741 | < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) { |
742 | duprintf("checking: element %p size %u\n", | 742 | duprintf("checking: element %p size %u\n", |
743 | e, e->next_offset); | 743 | e, e->next_offset); |
744 | return -EINVAL; | 744 | return -EINVAL; |
745 | } | 745 | } |
746 | 746 | ||
747 | /* Check hooks & underflows */ | 747 | /* Check hooks & underflows */ |
748 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 748 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
749 | if (!(valid_hooks & (1 << h))) | 749 | if (!(valid_hooks & (1 << h))) |
750 | continue; | 750 | continue; |
751 | if ((unsigned char *)e - base == hook_entries[h]) | 751 | if ((unsigned char *)e - base == hook_entries[h]) |
752 | newinfo->hook_entry[h] = hook_entries[h]; | 752 | newinfo->hook_entry[h] = hook_entries[h]; |
753 | if ((unsigned char *)e - base == underflows[h]) { | 753 | if ((unsigned char *)e - base == underflows[h]) { |
754 | if (!check_underflow(e)) { | 754 | if (!check_underflow(e)) { |
755 | pr_err("Underflows must be unconditional and " | 755 | pr_err("Underflows must be unconditional and " |
756 | "use the STANDARD target with " | 756 | "use the STANDARD target with " |
757 | "ACCEPT/DROP\n"); | 757 | "ACCEPT/DROP\n"); |
758 | return -EINVAL; | 758 | return -EINVAL; |
759 | } | 759 | } |
760 | newinfo->underflow[h] = underflows[h]; | 760 | newinfo->underflow[h] = underflows[h]; |
761 | } | 761 | } |
762 | } | 762 | } |
763 | 763 | ||
764 | /* Clear counters and comefrom */ | 764 | /* Clear counters and comefrom */ |
765 | e->counters = ((struct xt_counters) { 0, 0 }); | 765 | e->counters = ((struct xt_counters) { 0, 0 }); |
766 | e->comefrom = 0; | 766 | e->comefrom = 0; |
767 | return 0; | 767 | return 0; |
768 | } | 768 | } |
769 | 769 | ||
770 | static void | 770 | static void |
771 | cleanup_entry(struct ipt_entry *e, struct net *net) | 771 | cleanup_entry(struct ipt_entry *e, struct net *net) |
772 | { | 772 | { |
773 | struct xt_tgdtor_param par; | 773 | struct xt_tgdtor_param par; |
774 | struct ipt_entry_target *t; | 774 | struct ipt_entry_target *t; |
775 | struct xt_entry_match *ematch; | 775 | struct xt_entry_match *ematch; |
776 | 776 | ||
777 | /* Cleanup all matches */ | 777 | /* Cleanup all matches */ |
778 | xt_ematch_foreach(ematch, e) | 778 | xt_ematch_foreach(ematch, e) |
779 | cleanup_match(ematch, net); | 779 | cleanup_match(ematch, net); |
780 | t = ipt_get_target(e); | 780 | t = ipt_get_target(e); |
781 | 781 | ||
782 | par.net = net; | 782 | par.net = net; |
783 | par.target = t->u.kernel.target; | 783 | par.target = t->u.kernel.target; |
784 | par.targinfo = t->data; | 784 | par.targinfo = t->data; |
785 | par.family = NFPROTO_IPV4; | 785 | par.family = NFPROTO_IPV4; |
786 | if (par.target->destroy != NULL) | 786 | if (par.target->destroy != NULL) |
787 | par.target->destroy(&par); | 787 | par.target->destroy(&par); |
788 | module_put(par.target->me); | 788 | module_put(par.target->me); |
789 | } | 789 | } |
790 | 790 | ||
791 | /* Checks and translates the user-supplied table segment (held in | 791 | /* Checks and translates the user-supplied table segment (held in |
792 | newinfo) */ | 792 | newinfo) */ |
793 | static int | 793 | static int |
794 | translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | 794 | translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, |
795 | const struct ipt_replace *repl) | 795 | const struct ipt_replace *repl) |
796 | { | 796 | { |
797 | struct ipt_entry *iter; | 797 | struct ipt_entry *iter; |
798 | unsigned int i; | 798 | unsigned int i; |
799 | int ret = 0; | 799 | int ret = 0; |
800 | 800 | ||
801 | newinfo->size = repl->size; | 801 | newinfo->size = repl->size; |
802 | newinfo->number = repl->num_entries; | 802 | newinfo->number = repl->num_entries; |
803 | 803 | ||
804 | /* Init all hooks to impossible value. */ | 804 | /* Init all hooks to impossible value. */ |
805 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 805 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
806 | newinfo->hook_entry[i] = 0xFFFFFFFF; | 806 | newinfo->hook_entry[i] = 0xFFFFFFFF; |
807 | newinfo->underflow[i] = 0xFFFFFFFF; | 807 | newinfo->underflow[i] = 0xFFFFFFFF; |
808 | } | 808 | } |
809 | 809 | ||
810 | duprintf("translate_table: size %u\n", newinfo->size); | 810 | duprintf("translate_table: size %u\n", newinfo->size); |
811 | i = 0; | 811 | i = 0; |
812 | /* Walk through entries, checking offsets. */ | 812 | /* Walk through entries, checking offsets. */ |
813 | xt_entry_foreach(iter, entry0, newinfo->size) { | 813 | xt_entry_foreach(iter, entry0, newinfo->size) { |
814 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, | 814 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, |
815 | entry0 + repl->size, | 815 | entry0 + repl->size, |
816 | repl->hook_entry, | 816 | repl->hook_entry, |
817 | repl->underflow, | 817 | repl->underflow, |
818 | repl->valid_hooks); | 818 | repl->valid_hooks); |
819 | if (ret != 0) | 819 | if (ret != 0) |
820 | return ret; | 820 | return ret; |
821 | ++i; | 821 | ++i; |
822 | if (strcmp(ipt_get_target(iter)->u.user.name, | 822 | if (strcmp(ipt_get_target(iter)->u.user.name, |
823 | XT_ERROR_TARGET) == 0) | 823 | XT_ERROR_TARGET) == 0) |
824 | ++newinfo->stacksize; | 824 | ++newinfo->stacksize; |
825 | } | 825 | } |
826 | 826 | ||
827 | if (i != repl->num_entries) { | 827 | if (i != repl->num_entries) { |
828 | duprintf("translate_table: %u not %u entries\n", | 828 | duprintf("translate_table: %u not %u entries\n", |
829 | i, repl->num_entries); | 829 | i, repl->num_entries); |
830 | return -EINVAL; | 830 | return -EINVAL; |
831 | } | 831 | } |
832 | 832 | ||
833 | /* Check hooks all assigned */ | 833 | /* Check hooks all assigned */ |
834 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 834 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
835 | /* Only hooks which are valid */ | 835 | /* Only hooks which are valid */ |
836 | if (!(repl->valid_hooks & (1 << i))) | 836 | if (!(repl->valid_hooks & (1 << i))) |
837 | continue; | 837 | continue; |
838 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 838 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
839 | duprintf("Invalid hook entry %u %u\n", | 839 | duprintf("Invalid hook entry %u %u\n", |
840 | i, repl->hook_entry[i]); | 840 | i, repl->hook_entry[i]); |
841 | return -EINVAL; | 841 | return -EINVAL; |
842 | } | 842 | } |
843 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 843 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
844 | duprintf("Invalid underflow %u %u\n", | 844 | duprintf("Invalid underflow %u %u\n", |
845 | i, repl->underflow[i]); | 845 | i, repl->underflow[i]); |
846 | return -EINVAL; | 846 | return -EINVAL; |
847 | } | 847 | } |
848 | } | 848 | } |
849 | 849 | ||
850 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) | 850 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) |
851 | return -ELOOP; | 851 | return -ELOOP; |
852 | 852 | ||
853 | /* Finally, each sanity check must pass */ | 853 | /* Finally, each sanity check must pass */ |
854 | i = 0; | 854 | i = 0; |
855 | xt_entry_foreach(iter, entry0, newinfo->size) { | 855 | xt_entry_foreach(iter, entry0, newinfo->size) { |
856 | ret = find_check_entry(iter, net, repl->name, repl->size); | 856 | ret = find_check_entry(iter, net, repl->name, repl->size); |
857 | if (ret != 0) | 857 | if (ret != 0) |
858 | break; | 858 | break; |
859 | ++i; | 859 | ++i; |
860 | } | 860 | } |
861 | 861 | ||
862 | if (ret != 0) { | 862 | if (ret != 0) { |
863 | xt_entry_foreach(iter, entry0, newinfo->size) { | 863 | xt_entry_foreach(iter, entry0, newinfo->size) { |
864 | if (i-- == 0) | 864 | if (i-- == 0) |
865 | break; | 865 | break; |
866 | cleanup_entry(iter, net); | 866 | cleanup_entry(iter, net); |
867 | } | 867 | } |
868 | return ret; | 868 | return ret; |
869 | } | 869 | } |
870 | 870 | ||
871 | /* And one copy for every other CPU */ | 871 | /* And one copy for every other CPU */ |
872 | for_each_possible_cpu(i) { | 872 | for_each_possible_cpu(i) { |
873 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 873 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
874 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 874 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
875 | } | 875 | } |
876 | 876 | ||
877 | return ret; | 877 | return ret; |
878 | } | 878 | } |
879 | 879 | ||
880 | static void | 880 | static void |
881 | get_counters(const struct xt_table_info *t, | 881 | get_counters(const struct xt_table_info *t, |
882 | struct xt_counters counters[]) | 882 | struct xt_counters counters[]) |
883 | { | 883 | { |
884 | struct ipt_entry *iter; | 884 | struct ipt_entry *iter; |
885 | unsigned int cpu; | 885 | unsigned int cpu; |
886 | unsigned int i; | 886 | unsigned int i; |
887 | unsigned int curcpu; | 887 | unsigned int curcpu; |
888 | 888 | ||
889 | /* Instead of clearing (by a previous call to memset()) | 889 | /* Instead of clearing (by a previous call to memset()) |
890 | * the counters and using adds, we set the counters | 890 | * the counters and using adds, we set the counters |
891 | * with data used by 'current' CPU. | 891 | * with data used by 'current' CPU. |
892 | * | 892 | * |
893 | * Bottom half has to be disabled to prevent deadlock | 893 | * Bottom half has to be disabled to prevent deadlock |
894 | * if new softirq were to run and call ipt_do_table | 894 | * if new softirq were to run and call ipt_do_table |
895 | */ | 895 | */ |
896 | local_bh_disable(); | 896 | local_bh_disable(); |
897 | curcpu = smp_processor_id(); | 897 | curcpu = smp_processor_id(); |
898 | 898 | ||
899 | i = 0; | 899 | i = 0; |
900 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | 900 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { |
901 | SET_COUNTER(counters[i], iter->counters.bcnt, | 901 | SET_COUNTER(counters[i], iter->counters.bcnt, |
902 | iter->counters.pcnt); | 902 | iter->counters.pcnt); |
903 | ++i; | 903 | ++i; |
904 | } | 904 | } |
905 | 905 | ||
906 | for_each_possible_cpu(cpu) { | 906 | for_each_possible_cpu(cpu) { |
907 | if (cpu == curcpu) | 907 | if (cpu == curcpu) |
908 | continue; | 908 | continue; |
909 | i = 0; | 909 | i = 0; |
910 | xt_info_wrlock(cpu); | 910 | xt_info_wrlock(cpu); |
911 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 911 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
912 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 912 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
913 | iter->counters.pcnt); | 913 | iter->counters.pcnt); |
914 | ++i; /* macro does multi eval of i */ | 914 | ++i; /* macro does multi eval of i */ |
915 | } | 915 | } |
916 | xt_info_wrunlock(cpu); | 916 | xt_info_wrunlock(cpu); |
917 | } | 917 | } |
918 | local_bh_enable(); | 918 | local_bh_enable(); |
919 | } | 919 | } |
920 | 920 | ||
921 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 921 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
922 | { | 922 | { |
923 | unsigned int countersize; | 923 | unsigned int countersize; |
924 | struct xt_counters *counters; | 924 | struct xt_counters *counters; |
925 | const struct xt_table_info *private = table->private; | 925 | const struct xt_table_info *private = table->private; |
926 | 926 | ||
927 | /* We need atomic snapshot of counters: rest doesn't change | 927 | /* We need atomic snapshot of counters: rest doesn't change |
928 | (other than comefrom, which userspace doesn't care | 928 | (other than comefrom, which userspace doesn't care |
929 | about). */ | 929 | about). */ |
930 | countersize = sizeof(struct xt_counters) * private->number; | 930 | countersize = sizeof(struct xt_counters) * private->number; |
931 | counters = vmalloc_node(countersize, numa_node_id()); | 931 | counters = vmalloc_node(countersize, numa_node_id()); |
932 | 932 | ||
933 | if (counters == NULL) | 933 | if (counters == NULL) |
934 | return ERR_PTR(-ENOMEM); | 934 | return ERR_PTR(-ENOMEM); |
935 | 935 | ||
936 | get_counters(private, counters); | 936 | get_counters(private, counters); |
937 | 937 | ||
938 | return counters; | 938 | return counters; |
939 | } | 939 | } |
940 | 940 | ||
941 | static int | 941 | static int |
942 | copy_entries_to_user(unsigned int total_size, | 942 | copy_entries_to_user(unsigned int total_size, |
943 | const struct xt_table *table, | 943 | const struct xt_table *table, |
944 | void __user *userptr) | 944 | void __user *userptr) |
945 | { | 945 | { |
946 | unsigned int off, num; | 946 | unsigned int off, num; |
947 | const struct ipt_entry *e; | 947 | const struct ipt_entry *e; |
948 | struct xt_counters *counters; | 948 | struct xt_counters *counters; |
949 | const struct xt_table_info *private = table->private; | 949 | const struct xt_table_info *private = table->private; |
950 | int ret = 0; | 950 | int ret = 0; |
951 | const void *loc_cpu_entry; | 951 | const void *loc_cpu_entry; |
952 | 952 | ||
953 | counters = alloc_counters(table); | 953 | counters = alloc_counters(table); |
954 | if (IS_ERR(counters)) | 954 | if (IS_ERR(counters)) |
955 | return PTR_ERR(counters); | 955 | return PTR_ERR(counters); |
956 | 956 | ||
957 | /* choose the copy that is on our node/cpu, ... | 957 | /* choose the copy that is on our node/cpu, ... |
958 | * This choice is lazy (because current thread is | 958 | * This choice is lazy (because current thread is |
959 | * allowed to migrate to another cpu) | 959 | * allowed to migrate to another cpu) |
960 | */ | 960 | */ |
961 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 961 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
962 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { | 962 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { |
963 | ret = -EFAULT; | 963 | ret = -EFAULT; |
964 | goto free_counters; | 964 | goto free_counters; |
965 | } | 965 | } |
966 | 966 | ||
967 | /* FIXME: use iterator macros --RR */ | 967 | /* FIXME: use iterator macros --RR */ |
968 | /* ... then go back and fix counters and names */ | 968 | /* ... then go back and fix counters and names */ |
969 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ | 969 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ |
970 | unsigned int i; | 970 | unsigned int i; |
971 | const struct ipt_entry_match *m; | 971 | const struct ipt_entry_match *m; |
972 | const struct ipt_entry_target *t; | 972 | const struct ipt_entry_target *t; |
973 | 973 | ||
974 | e = (struct ipt_entry *)(loc_cpu_entry + off); | 974 | e = (struct ipt_entry *)(loc_cpu_entry + off); |
975 | if (copy_to_user(userptr + off | 975 | if (copy_to_user(userptr + off |
976 | + offsetof(struct ipt_entry, counters), | 976 | + offsetof(struct ipt_entry, counters), |
977 | &counters[num], | 977 | &counters[num], |
978 | sizeof(counters[num])) != 0) { | 978 | sizeof(counters[num])) != 0) { |
979 | ret = -EFAULT; | 979 | ret = -EFAULT; |
980 | goto free_counters; | 980 | goto free_counters; |
981 | } | 981 | } |
982 | 982 | ||
983 | for (i = sizeof(struct ipt_entry); | 983 | for (i = sizeof(struct ipt_entry); |
984 | i < e->target_offset; | 984 | i < e->target_offset; |
985 | i += m->u.match_size) { | 985 | i += m->u.match_size) { |
986 | m = (void *)e + i; | 986 | m = (void *)e + i; |
987 | 987 | ||
988 | if (copy_to_user(userptr + off + i | 988 | if (copy_to_user(userptr + off + i |
989 | + offsetof(struct ipt_entry_match, | 989 | + offsetof(struct ipt_entry_match, |
990 | u.user.name), | 990 | u.user.name), |
991 | m->u.kernel.match->name, | 991 | m->u.kernel.match->name, |
992 | strlen(m->u.kernel.match->name)+1) | 992 | strlen(m->u.kernel.match->name)+1) |
993 | != 0) { | 993 | != 0) { |
994 | ret = -EFAULT; | 994 | ret = -EFAULT; |
995 | goto free_counters; | 995 | goto free_counters; |
996 | } | 996 | } |
997 | } | 997 | } |
998 | 998 | ||
999 | t = ipt_get_target_c(e); | 999 | t = ipt_get_target_c(e); |
1000 | if (copy_to_user(userptr + off + e->target_offset | 1000 | if (copy_to_user(userptr + off + e->target_offset |
1001 | + offsetof(struct ipt_entry_target, | 1001 | + offsetof(struct ipt_entry_target, |
1002 | u.user.name), | 1002 | u.user.name), |
1003 | t->u.kernel.target->name, | 1003 | t->u.kernel.target->name, |
1004 | strlen(t->u.kernel.target->name)+1) != 0) { | 1004 | strlen(t->u.kernel.target->name)+1) != 0) { |
1005 | ret = -EFAULT; | 1005 | ret = -EFAULT; |
1006 | goto free_counters; | 1006 | goto free_counters; |
1007 | } | 1007 | } |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | free_counters: | 1010 | free_counters: |
1011 | vfree(counters); | 1011 | vfree(counters); |
1012 | return ret; | 1012 | return ret; |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | #ifdef CONFIG_COMPAT | 1015 | #ifdef CONFIG_COMPAT |
1016 | static void compat_standard_from_user(void *dst, const void *src) | 1016 | static void compat_standard_from_user(void *dst, const void *src) |
1017 | { | 1017 | { |
1018 | int v = *(compat_int_t *)src; | 1018 | int v = *(compat_int_t *)src; |
1019 | 1019 | ||
1020 | if (v > 0) | 1020 | if (v > 0) |
1021 | v += xt_compat_calc_jump(AF_INET, v); | 1021 | v += xt_compat_calc_jump(AF_INET, v); |
1022 | memcpy(dst, &v, sizeof(v)); | 1022 | memcpy(dst, &v, sizeof(v)); |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | static int compat_standard_to_user(void __user *dst, const void *src) | 1025 | static int compat_standard_to_user(void __user *dst, const void *src) |
1026 | { | 1026 | { |
1027 | compat_int_t cv = *(int *)src; | 1027 | compat_int_t cv = *(int *)src; |
1028 | 1028 | ||
1029 | if (cv > 0) | 1029 | if (cv > 0) |
1030 | cv -= xt_compat_calc_jump(AF_INET, cv); | 1030 | cv -= xt_compat_calc_jump(AF_INET, cv); |
1031 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; | 1031 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; |
1032 | } | 1032 | } |
1033 | 1033 | ||
1034 | static int compat_calc_entry(const struct ipt_entry *e, | 1034 | static int compat_calc_entry(const struct ipt_entry *e, |
1035 | const struct xt_table_info *info, | 1035 | const struct xt_table_info *info, |
1036 | const void *base, struct xt_table_info *newinfo) | 1036 | const void *base, struct xt_table_info *newinfo) |
1037 | { | 1037 | { |
1038 | const struct xt_entry_match *ematch; | 1038 | const struct xt_entry_match *ematch; |
1039 | const struct ipt_entry_target *t; | 1039 | const struct ipt_entry_target *t; |
1040 | unsigned int entry_offset; | 1040 | unsigned int entry_offset; |
1041 | int off, i, ret; | 1041 | int off, i, ret; |
1042 | 1042 | ||
1043 | off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); | 1043 | off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); |
1044 | entry_offset = (void *)e - base; | 1044 | entry_offset = (void *)e - base; |
1045 | xt_ematch_foreach(ematch, e) | 1045 | xt_ematch_foreach(ematch, e) |
1046 | off += xt_compat_match_offset(ematch->u.kernel.match); | 1046 | off += xt_compat_match_offset(ematch->u.kernel.match); |
1047 | t = ipt_get_target_c(e); | 1047 | t = ipt_get_target_c(e); |
1048 | off += xt_compat_target_offset(t->u.kernel.target); | 1048 | off += xt_compat_target_offset(t->u.kernel.target); |
1049 | newinfo->size -= off; | 1049 | newinfo->size -= off; |
1050 | ret = xt_compat_add_offset(AF_INET, entry_offset, off); | 1050 | ret = xt_compat_add_offset(AF_INET, entry_offset, off); |
1051 | if (ret) | 1051 | if (ret) |
1052 | return ret; | 1052 | return ret; |
1053 | 1053 | ||
1054 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1054 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1055 | if (info->hook_entry[i] && | 1055 | if (info->hook_entry[i] && |
1056 | (e < (struct ipt_entry *)(base + info->hook_entry[i]))) | 1056 | (e < (struct ipt_entry *)(base + info->hook_entry[i]))) |
1057 | newinfo->hook_entry[i] -= off; | 1057 | newinfo->hook_entry[i] -= off; |
1058 | if (info->underflow[i] && | 1058 | if (info->underflow[i] && |
1059 | (e < (struct ipt_entry *)(base + info->underflow[i]))) | 1059 | (e < (struct ipt_entry *)(base + info->underflow[i]))) |
1060 | newinfo->underflow[i] -= off; | 1060 | newinfo->underflow[i] -= off; |
1061 | } | 1061 | } |
1062 | return 0; | 1062 | return 0; |
1063 | } | 1063 | } |
1064 | 1064 | ||
1065 | static int compat_table_info(const struct xt_table_info *info, | 1065 | static int compat_table_info(const struct xt_table_info *info, |
1066 | struct xt_table_info *newinfo) | 1066 | struct xt_table_info *newinfo) |
1067 | { | 1067 | { |
1068 | struct ipt_entry *iter; | 1068 | struct ipt_entry *iter; |
1069 | void *loc_cpu_entry; | 1069 | void *loc_cpu_entry; |
1070 | int ret; | 1070 | int ret; |
1071 | 1071 | ||
1072 | if (!newinfo || !info) | 1072 | if (!newinfo || !info) |
1073 | return -EINVAL; | 1073 | return -EINVAL; |
1074 | 1074 | ||
1075 | /* we dont care about newinfo->entries[] */ | 1075 | /* we dont care about newinfo->entries[] */ |
1076 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | 1076 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); |
1077 | newinfo->initial_entries = 0; | 1077 | newinfo->initial_entries = 0; |
1078 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; | 1078 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; |
1079 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { | 1079 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { |
1080 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); | 1080 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); |
1081 | if (ret != 0) | 1081 | if (ret != 0) |
1082 | return ret; | 1082 | return ret; |
1083 | } | 1083 | } |
1084 | return 0; | 1084 | return 0; |
1085 | } | 1085 | } |
1086 | #endif | 1086 | #endif |
1087 | 1087 | ||
1088 | static int get_info(struct net *net, void __user *user, | 1088 | static int get_info(struct net *net, void __user *user, |
1089 | const int *len, int compat) | 1089 | const int *len, int compat) |
1090 | { | 1090 | { |
1091 | char name[IPT_TABLE_MAXNAMELEN]; | 1091 | char name[IPT_TABLE_MAXNAMELEN]; |
1092 | struct xt_table *t; | 1092 | struct xt_table *t; |
1093 | int ret; | 1093 | int ret; |
1094 | 1094 | ||
1095 | if (*len != sizeof(struct ipt_getinfo)) { | 1095 | if (*len != sizeof(struct ipt_getinfo)) { |
1096 | duprintf("length %u != %zu\n", *len, | 1096 | duprintf("length %u != %zu\n", *len, |
1097 | sizeof(struct ipt_getinfo)); | 1097 | sizeof(struct ipt_getinfo)); |
1098 | return -EINVAL; | 1098 | return -EINVAL; |
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | if (copy_from_user(name, user, sizeof(name)) != 0) | 1101 | if (copy_from_user(name, user, sizeof(name)) != 0) |
1102 | return -EFAULT; | 1102 | return -EFAULT; |
1103 | 1103 | ||
1104 | name[IPT_TABLE_MAXNAMELEN-1] = '\0'; | 1104 | name[IPT_TABLE_MAXNAMELEN-1] = '\0'; |
1105 | #ifdef CONFIG_COMPAT | 1105 | #ifdef CONFIG_COMPAT |
1106 | if (compat) | 1106 | if (compat) |
1107 | xt_compat_lock(AF_INET); | 1107 | xt_compat_lock(AF_INET); |
1108 | #endif | 1108 | #endif |
1109 | t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), | 1109 | t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), |
1110 | "iptable_%s", name); | 1110 | "iptable_%s", name); |
1111 | if (t && !IS_ERR(t)) { | 1111 | if (t && !IS_ERR(t)) { |
1112 | struct ipt_getinfo info; | 1112 | struct ipt_getinfo info; |
1113 | const struct xt_table_info *private = t->private; | 1113 | const struct xt_table_info *private = t->private; |
1114 | #ifdef CONFIG_COMPAT | 1114 | #ifdef CONFIG_COMPAT |
1115 | struct xt_table_info tmp; | 1115 | struct xt_table_info tmp; |
1116 | 1116 | ||
1117 | if (compat) { | 1117 | if (compat) { |
1118 | ret = compat_table_info(private, &tmp); | 1118 | ret = compat_table_info(private, &tmp); |
1119 | xt_compat_flush_offsets(AF_INET); | 1119 | xt_compat_flush_offsets(AF_INET); |
1120 | private = &tmp; | 1120 | private = &tmp; |
1121 | } | 1121 | } |
1122 | #endif | 1122 | #endif |
1123 | info.valid_hooks = t->valid_hooks; | 1123 | info.valid_hooks = t->valid_hooks; |
1124 | memcpy(info.hook_entry, private->hook_entry, | 1124 | memcpy(info.hook_entry, private->hook_entry, |
1125 | sizeof(info.hook_entry)); | 1125 | sizeof(info.hook_entry)); |
1126 | memcpy(info.underflow, private->underflow, | 1126 | memcpy(info.underflow, private->underflow, |
1127 | sizeof(info.underflow)); | 1127 | sizeof(info.underflow)); |
1128 | info.num_entries = private->number; | 1128 | info.num_entries = private->number; |
1129 | info.size = private->size; | 1129 | info.size = private->size; |
1130 | strcpy(info.name, name); | 1130 | strcpy(info.name, name); |
1131 | 1131 | ||
1132 | if (copy_to_user(user, &info, *len) != 0) | 1132 | if (copy_to_user(user, &info, *len) != 0) |
1133 | ret = -EFAULT; | 1133 | ret = -EFAULT; |
1134 | else | 1134 | else |
1135 | ret = 0; | 1135 | ret = 0; |
1136 | 1136 | ||
1137 | xt_table_unlock(t); | 1137 | xt_table_unlock(t); |
1138 | module_put(t->me); | 1138 | module_put(t->me); |
1139 | } else | 1139 | } else |
1140 | ret = t ? PTR_ERR(t) : -ENOENT; | 1140 | ret = t ? PTR_ERR(t) : -ENOENT; |
1141 | #ifdef CONFIG_COMPAT | 1141 | #ifdef CONFIG_COMPAT |
1142 | if (compat) | 1142 | if (compat) |
1143 | xt_compat_unlock(AF_INET); | 1143 | xt_compat_unlock(AF_INET); |
1144 | #endif | 1144 | #endif |
1145 | return ret; | 1145 | return ret; |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | static int | 1148 | static int |
1149 | get_entries(struct net *net, struct ipt_get_entries __user *uptr, | 1149 | get_entries(struct net *net, struct ipt_get_entries __user *uptr, |
1150 | const int *len) | 1150 | const int *len) |
1151 | { | 1151 | { |
1152 | int ret; | 1152 | int ret; |
1153 | struct ipt_get_entries get; | 1153 | struct ipt_get_entries get; |
1154 | struct xt_table *t; | 1154 | struct xt_table *t; |
1155 | 1155 | ||
1156 | if (*len < sizeof(get)) { | 1156 | if (*len < sizeof(get)) { |
1157 | duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); | 1157 | duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); |
1158 | return -EINVAL; | 1158 | return -EINVAL; |
1159 | } | 1159 | } |
1160 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1160 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1161 | return -EFAULT; | 1161 | return -EFAULT; |
1162 | if (*len != sizeof(struct ipt_get_entries) + get.size) { | 1162 | if (*len != sizeof(struct ipt_get_entries) + get.size) { |
1163 | duprintf("get_entries: %u != %zu\n", | 1163 | duprintf("get_entries: %u != %zu\n", |
1164 | *len, sizeof(get) + get.size); | 1164 | *len, sizeof(get) + get.size); |
1165 | return -EINVAL; | 1165 | return -EINVAL; |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | t = xt_find_table_lock(net, AF_INET, get.name); | 1168 | t = xt_find_table_lock(net, AF_INET, get.name); |
1169 | if (t && !IS_ERR(t)) { | 1169 | if (t && !IS_ERR(t)) { |
1170 | const struct xt_table_info *private = t->private; | 1170 | const struct xt_table_info *private = t->private; |
1171 | duprintf("t->private->number = %u\n", private->number); | 1171 | duprintf("t->private->number = %u\n", private->number); |
1172 | if (get.size == private->size) | 1172 | if (get.size == private->size) |
1173 | ret = copy_entries_to_user(private->size, | 1173 | ret = copy_entries_to_user(private->size, |
1174 | t, uptr->entrytable); | 1174 | t, uptr->entrytable); |
1175 | else { | 1175 | else { |
1176 | duprintf("get_entries: I've got %u not %u!\n", | 1176 | duprintf("get_entries: I've got %u not %u!\n", |
1177 | private->size, get.size); | 1177 | private->size, get.size); |
1178 | ret = -EAGAIN; | 1178 | ret = -EAGAIN; |
1179 | } | 1179 | } |
1180 | module_put(t->me); | 1180 | module_put(t->me); |
1181 | xt_table_unlock(t); | 1181 | xt_table_unlock(t); |
1182 | } else | 1182 | } else |
1183 | ret = t ? PTR_ERR(t) : -ENOENT; | 1183 | ret = t ? PTR_ERR(t) : -ENOENT; |
1184 | 1184 | ||
1185 | return ret; | 1185 | return ret; |
1186 | } | 1186 | } |
1187 | 1187 | ||
1188 | static int | 1188 | static int |
1189 | __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | 1189 | __do_replace(struct net *net, const char *name, unsigned int valid_hooks, |
1190 | struct xt_table_info *newinfo, unsigned int num_counters, | 1190 | struct xt_table_info *newinfo, unsigned int num_counters, |
1191 | void __user *counters_ptr) | 1191 | void __user *counters_ptr) |
1192 | { | 1192 | { |
1193 | int ret; | 1193 | int ret; |
1194 | struct xt_table *t; | 1194 | struct xt_table *t; |
1195 | struct xt_table_info *oldinfo; | 1195 | struct xt_table_info *oldinfo; |
1196 | struct xt_counters *counters; | 1196 | struct xt_counters *counters; |
1197 | void *loc_cpu_old_entry; | 1197 | void *loc_cpu_old_entry; |
1198 | struct ipt_entry *iter; | 1198 | struct ipt_entry *iter; |
1199 | 1199 | ||
1200 | ret = 0; | 1200 | ret = 0; |
1201 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 1201 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); |
1202 | if (!counters) { | 1202 | if (!counters) { |
1203 | ret = -ENOMEM; | 1203 | ret = -ENOMEM; |
1204 | goto out; | 1204 | goto out; |
1205 | } | 1205 | } |
1206 | 1206 | ||
1207 | t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), | 1207 | t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), |
1208 | "iptable_%s", name); | 1208 | "iptable_%s", name); |
1209 | if (!t || IS_ERR(t)) { | 1209 | if (!t || IS_ERR(t)) { |
1210 | ret = t ? PTR_ERR(t) : -ENOENT; | 1210 | ret = t ? PTR_ERR(t) : -ENOENT; |
1211 | goto free_newinfo_counters_untrans; | 1211 | goto free_newinfo_counters_untrans; |
1212 | } | 1212 | } |
1213 | 1213 | ||
1214 | /* You lied! */ | 1214 | /* You lied! */ |
1215 | if (valid_hooks != t->valid_hooks) { | 1215 | if (valid_hooks != t->valid_hooks) { |
1216 | duprintf("Valid hook crap: %08X vs %08X\n", | 1216 | duprintf("Valid hook crap: %08X vs %08X\n", |
1217 | valid_hooks, t->valid_hooks); | 1217 | valid_hooks, t->valid_hooks); |
1218 | ret = -EINVAL; | 1218 | ret = -EINVAL; |
1219 | goto put_module; | 1219 | goto put_module; |
1220 | } | 1220 | } |
1221 | 1221 | ||
1222 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); | 1222 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); |
1223 | if (!oldinfo) | 1223 | if (!oldinfo) |
1224 | goto put_module; | 1224 | goto put_module; |
1225 | 1225 | ||
1226 | /* Update module usage count based on number of rules */ | 1226 | /* Update module usage count based on number of rules */ |
1227 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", | 1227 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", |
1228 | oldinfo->number, oldinfo->initial_entries, newinfo->number); | 1228 | oldinfo->number, oldinfo->initial_entries, newinfo->number); |
1229 | if ((oldinfo->number > oldinfo->initial_entries) || | 1229 | if ((oldinfo->number > oldinfo->initial_entries) || |
1230 | (newinfo->number <= oldinfo->initial_entries)) | 1230 | (newinfo->number <= oldinfo->initial_entries)) |
1231 | module_put(t->me); | 1231 | module_put(t->me); |
1232 | if ((oldinfo->number > oldinfo->initial_entries) && | 1232 | if ((oldinfo->number > oldinfo->initial_entries) && |
1233 | (newinfo->number <= oldinfo->initial_entries)) | 1233 | (newinfo->number <= oldinfo->initial_entries)) |
1234 | module_put(t->me); | 1234 | module_put(t->me); |
1235 | 1235 | ||
1236 | /* Get the old counters, and synchronize with replace */ | 1236 | /* Get the old counters, and synchronize with replace */ |
1237 | get_counters(oldinfo, counters); | 1237 | get_counters(oldinfo, counters); |
1238 | 1238 | ||
1239 | /* Decrease module usage counts and free resource */ | 1239 | /* Decrease module usage counts and free resource */ |
1240 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1240 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1241 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) | 1241 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) |
1242 | cleanup_entry(iter, net); | 1242 | cleanup_entry(iter, net); |
1243 | 1243 | ||
1244 | xt_free_table_info(oldinfo); | 1244 | xt_free_table_info(oldinfo); |
1245 | if (copy_to_user(counters_ptr, counters, | 1245 | if (copy_to_user(counters_ptr, counters, |
1246 | sizeof(struct xt_counters) * num_counters) != 0) | 1246 | sizeof(struct xt_counters) * num_counters) != 0) |
1247 | ret = -EFAULT; | 1247 | ret = -EFAULT; |
1248 | vfree(counters); | 1248 | vfree(counters); |
1249 | xt_table_unlock(t); | 1249 | xt_table_unlock(t); |
1250 | return ret; | 1250 | return ret; |
1251 | 1251 | ||
1252 | put_module: | 1252 | put_module: |
1253 | module_put(t->me); | 1253 | module_put(t->me); |
1254 | xt_table_unlock(t); | 1254 | xt_table_unlock(t); |
1255 | free_newinfo_counters_untrans: | 1255 | free_newinfo_counters_untrans: |
1256 | vfree(counters); | 1256 | vfree(counters); |
1257 | out: | 1257 | out: |
1258 | return ret; | 1258 | return ret; |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | static int | 1261 | static int |
1262 | do_replace(struct net *net, const void __user *user, unsigned int len) | 1262 | do_replace(struct net *net, const void __user *user, unsigned int len) |
1263 | { | 1263 | { |
1264 | int ret; | 1264 | int ret; |
1265 | struct ipt_replace tmp; | 1265 | struct ipt_replace tmp; |
1266 | struct xt_table_info *newinfo; | 1266 | struct xt_table_info *newinfo; |
1267 | void *loc_cpu_entry; | 1267 | void *loc_cpu_entry; |
1268 | struct ipt_entry *iter; | 1268 | struct ipt_entry *iter; |
1269 | 1269 | ||
1270 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1270 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1271 | return -EFAULT; | 1271 | return -EFAULT; |
1272 | 1272 | ||
1273 | /* overflow check */ | 1273 | /* overflow check */ |
1274 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1274 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1275 | return -ENOMEM; | 1275 | return -ENOMEM; |
1276 | 1276 | ||
1277 | newinfo = xt_alloc_table_info(tmp.size); | 1277 | newinfo = xt_alloc_table_info(tmp.size); |
1278 | if (!newinfo) | 1278 | if (!newinfo) |
1279 | return -ENOMEM; | 1279 | return -ENOMEM; |
1280 | 1280 | ||
1281 | /* choose the copy that is on our node/cpu */ | 1281 | /* choose the copy that is on our node/cpu */ |
1282 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1282 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1283 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1283 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1284 | tmp.size) != 0) { | 1284 | tmp.size) != 0) { |
1285 | ret = -EFAULT; | 1285 | ret = -EFAULT; |
1286 | goto free_newinfo; | 1286 | goto free_newinfo; |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); | 1289 | ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); |
1290 | if (ret != 0) | 1290 | if (ret != 0) |
1291 | goto free_newinfo; | 1291 | goto free_newinfo; |
1292 | 1292 | ||
1293 | duprintf("Translated table\n"); | 1293 | duprintf("Translated table\n"); |
1294 | 1294 | ||
1295 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1295 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1296 | tmp.num_counters, tmp.counters); | 1296 | tmp.num_counters, tmp.counters); |
1297 | if (ret) | 1297 | if (ret) |
1298 | goto free_newinfo_untrans; | 1298 | goto free_newinfo_untrans; |
1299 | return 0; | 1299 | return 0; |
1300 | 1300 | ||
1301 | free_newinfo_untrans: | 1301 | free_newinfo_untrans: |
1302 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1302 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1303 | cleanup_entry(iter, net); | 1303 | cleanup_entry(iter, net); |
1304 | free_newinfo: | 1304 | free_newinfo: |
1305 | xt_free_table_info(newinfo); | 1305 | xt_free_table_info(newinfo); |
1306 | return ret; | 1306 | return ret; |
1307 | } | 1307 | } |
1308 | 1308 | ||
1309 | static int | 1309 | static int |
1310 | do_add_counters(struct net *net, const void __user *user, | 1310 | do_add_counters(struct net *net, const void __user *user, |
1311 | unsigned int len, int compat) | 1311 | unsigned int len, int compat) |
1312 | { | 1312 | { |
1313 | unsigned int i, curcpu; | 1313 | unsigned int i, curcpu; |
1314 | struct xt_counters_info tmp; | 1314 | struct xt_counters_info tmp; |
1315 | struct xt_counters *paddc; | 1315 | struct xt_counters *paddc; |
1316 | unsigned int num_counters; | 1316 | unsigned int num_counters; |
1317 | const char *name; | 1317 | const char *name; |
1318 | int size; | 1318 | int size; |
1319 | void *ptmp; | 1319 | void *ptmp; |
1320 | struct xt_table *t; | 1320 | struct xt_table *t; |
1321 | const struct xt_table_info *private; | 1321 | const struct xt_table_info *private; |
1322 | int ret = 0; | 1322 | int ret = 0; |
1323 | void *loc_cpu_entry; | 1323 | void *loc_cpu_entry; |
1324 | struct ipt_entry *iter; | 1324 | struct ipt_entry *iter; |
1325 | #ifdef CONFIG_COMPAT | 1325 | #ifdef CONFIG_COMPAT |
1326 | struct compat_xt_counters_info compat_tmp; | 1326 | struct compat_xt_counters_info compat_tmp; |
1327 | 1327 | ||
1328 | if (compat) { | 1328 | if (compat) { |
1329 | ptmp = &compat_tmp; | 1329 | ptmp = &compat_tmp; |
1330 | size = sizeof(struct compat_xt_counters_info); | 1330 | size = sizeof(struct compat_xt_counters_info); |
1331 | } else | 1331 | } else |
1332 | #endif | 1332 | #endif |
1333 | { | 1333 | { |
1334 | ptmp = &tmp; | 1334 | ptmp = &tmp; |
1335 | size = sizeof(struct xt_counters_info); | 1335 | size = sizeof(struct xt_counters_info); |
1336 | } | 1336 | } |
1337 | 1337 | ||
1338 | if (copy_from_user(ptmp, user, size) != 0) | 1338 | if (copy_from_user(ptmp, user, size) != 0) |
1339 | return -EFAULT; | 1339 | return -EFAULT; |
1340 | 1340 | ||
1341 | #ifdef CONFIG_COMPAT | 1341 | #ifdef CONFIG_COMPAT |
1342 | if (compat) { | 1342 | if (compat) { |
1343 | num_counters = compat_tmp.num_counters; | 1343 | num_counters = compat_tmp.num_counters; |
1344 | name = compat_tmp.name; | 1344 | name = compat_tmp.name; |
1345 | } else | 1345 | } else |
1346 | #endif | 1346 | #endif |
1347 | { | 1347 | { |
1348 | num_counters = tmp.num_counters; | 1348 | num_counters = tmp.num_counters; |
1349 | name = tmp.name; | 1349 | name = tmp.name; |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | if (len != size + num_counters * sizeof(struct xt_counters)) | 1352 | if (len != size + num_counters * sizeof(struct xt_counters)) |
1353 | return -EINVAL; | 1353 | return -EINVAL; |
1354 | 1354 | ||
1355 | paddc = vmalloc_node(len - size, numa_node_id()); | 1355 | paddc = vmalloc_node(len - size, numa_node_id()); |
1356 | if (!paddc) | 1356 | if (!paddc) |
1357 | return -ENOMEM; | 1357 | return -ENOMEM; |
1358 | 1358 | ||
1359 | if (copy_from_user(paddc, user + size, len - size) != 0) { | 1359 | if (copy_from_user(paddc, user + size, len - size) != 0) { |
1360 | ret = -EFAULT; | 1360 | ret = -EFAULT; |
1361 | goto free; | 1361 | goto free; |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | t = xt_find_table_lock(net, AF_INET, name); | 1364 | t = xt_find_table_lock(net, AF_INET, name); |
1365 | if (!t || IS_ERR(t)) { | 1365 | if (!t || IS_ERR(t)) { |
1366 | ret = t ? PTR_ERR(t) : -ENOENT; | 1366 | ret = t ? PTR_ERR(t) : -ENOENT; |
1367 | goto free; | 1367 | goto free; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | local_bh_disable(); | 1370 | local_bh_disable(); |
1371 | private = t->private; | 1371 | private = t->private; |
1372 | if (private->number != num_counters) { | 1372 | if (private->number != num_counters) { |
1373 | ret = -EINVAL; | 1373 | ret = -EINVAL; |
1374 | goto unlock_up_free; | 1374 | goto unlock_up_free; |
1375 | } | 1375 | } |
1376 | 1376 | ||
1377 | i = 0; | 1377 | i = 0; |
1378 | /* Choose the copy that is on our node */ | 1378 | /* Choose the copy that is on our node */ |
1379 | curcpu = smp_processor_id(); | 1379 | curcpu = smp_processor_id(); |
1380 | loc_cpu_entry = private->entries[curcpu]; | 1380 | loc_cpu_entry = private->entries[curcpu]; |
1381 | xt_info_wrlock(curcpu); | 1381 | xt_info_wrlock(curcpu); |
1382 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { | 1382 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { |
1383 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); | 1383 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); |
1384 | ++i; | 1384 | ++i; |
1385 | } | 1385 | } |
1386 | xt_info_wrunlock(curcpu); | 1386 | xt_info_wrunlock(curcpu); |
1387 | unlock_up_free: | 1387 | unlock_up_free: |
1388 | local_bh_enable(); | 1388 | local_bh_enable(); |
1389 | xt_table_unlock(t); | 1389 | xt_table_unlock(t); |
1390 | module_put(t->me); | 1390 | module_put(t->me); |
1391 | free: | 1391 | free: |
1392 | vfree(paddc); | 1392 | vfree(paddc); |
1393 | 1393 | ||
1394 | return ret; | 1394 | return ret; |
1395 | } | 1395 | } |
1396 | 1396 | ||
1397 | #ifdef CONFIG_COMPAT | 1397 | #ifdef CONFIG_COMPAT |
1398 | struct compat_ipt_replace { | 1398 | struct compat_ipt_replace { |
1399 | char name[IPT_TABLE_MAXNAMELEN]; | 1399 | char name[IPT_TABLE_MAXNAMELEN]; |
1400 | u32 valid_hooks; | 1400 | u32 valid_hooks; |
1401 | u32 num_entries; | 1401 | u32 num_entries; |
1402 | u32 size; | 1402 | u32 size; |
1403 | u32 hook_entry[NF_INET_NUMHOOKS]; | 1403 | u32 hook_entry[NF_INET_NUMHOOKS]; |
1404 | u32 underflow[NF_INET_NUMHOOKS]; | 1404 | u32 underflow[NF_INET_NUMHOOKS]; |
1405 | u32 num_counters; | 1405 | u32 num_counters; |
1406 | compat_uptr_t counters; /* struct ipt_counters * */ | 1406 | compat_uptr_t counters; /* struct ipt_counters * */ |
1407 | struct compat_ipt_entry entries[0]; | 1407 | struct compat_ipt_entry entries[0]; |
1408 | }; | 1408 | }; |
1409 | 1409 | ||
1410 | static int | 1410 | static int |
1411 | compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, | 1411 | compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, |
1412 | unsigned int *size, struct xt_counters *counters, | 1412 | unsigned int *size, struct xt_counters *counters, |
1413 | unsigned int i) | 1413 | unsigned int i) |
1414 | { | 1414 | { |
1415 | struct ipt_entry_target *t; | 1415 | struct ipt_entry_target *t; |
1416 | struct compat_ipt_entry __user *ce; | 1416 | struct compat_ipt_entry __user *ce; |
1417 | u_int16_t target_offset, next_offset; | 1417 | u_int16_t target_offset, next_offset; |
1418 | compat_uint_t origsize; | 1418 | compat_uint_t origsize; |
1419 | const struct xt_entry_match *ematch; | 1419 | const struct xt_entry_match *ematch; |
1420 | int ret = 0; | 1420 | int ret = 0; |
1421 | 1421 | ||
1422 | origsize = *size; | 1422 | origsize = *size; |
1423 | ce = (struct compat_ipt_entry __user *)*dstptr; | 1423 | ce = (struct compat_ipt_entry __user *)*dstptr; |
1424 | if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || | 1424 | if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || |
1425 | copy_to_user(&ce->counters, &counters[i], | 1425 | copy_to_user(&ce->counters, &counters[i], |
1426 | sizeof(counters[i])) != 0) | 1426 | sizeof(counters[i])) != 0) |
1427 | return -EFAULT; | 1427 | return -EFAULT; |
1428 | 1428 | ||
1429 | *dstptr += sizeof(struct compat_ipt_entry); | 1429 | *dstptr += sizeof(struct compat_ipt_entry); |
1430 | *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); | 1430 | *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); |
1431 | 1431 | ||
1432 | xt_ematch_foreach(ematch, e) { | 1432 | xt_ematch_foreach(ematch, e) { |
1433 | ret = xt_compat_match_to_user(ematch, dstptr, size); | 1433 | ret = xt_compat_match_to_user(ematch, dstptr, size); |
1434 | if (ret != 0) | 1434 | if (ret != 0) |
1435 | return ret; | 1435 | return ret; |
1436 | } | 1436 | } |
1437 | target_offset = e->target_offset - (origsize - *size); | 1437 | target_offset = e->target_offset - (origsize - *size); |
1438 | t = ipt_get_target(e); | 1438 | t = ipt_get_target(e); |
1439 | ret = xt_compat_target_to_user(t, dstptr, size); | 1439 | ret = xt_compat_target_to_user(t, dstptr, size); |
1440 | if (ret) | 1440 | if (ret) |
1441 | return ret; | 1441 | return ret; |
1442 | next_offset = e->next_offset - (origsize - *size); | 1442 | next_offset = e->next_offset - (origsize - *size); |
1443 | if (put_user(target_offset, &ce->target_offset) != 0 || | 1443 | if (put_user(target_offset, &ce->target_offset) != 0 || |
1444 | put_user(next_offset, &ce->next_offset) != 0) | 1444 | put_user(next_offset, &ce->next_offset) != 0) |
1445 | return -EFAULT; | 1445 | return -EFAULT; |
1446 | return 0; | 1446 | return 0; |
1447 | } | 1447 | } |
1448 | 1448 | ||
1449 | static int | 1449 | static int |
1450 | compat_find_calc_match(struct ipt_entry_match *m, | 1450 | compat_find_calc_match(struct ipt_entry_match *m, |
1451 | const char *name, | 1451 | const char *name, |
1452 | const struct ipt_ip *ip, | 1452 | const struct ipt_ip *ip, |
1453 | unsigned int hookmask, | 1453 | unsigned int hookmask, |
1454 | int *size) | 1454 | int *size) |
1455 | { | 1455 | { |
1456 | struct xt_match *match; | 1456 | struct xt_match *match; |
1457 | 1457 | ||
1458 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, | 1458 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, |
1459 | m->u.user.revision); | 1459 | m->u.user.revision); |
1460 | if (IS_ERR(match)) { | 1460 | if (IS_ERR(match)) { |
1461 | duprintf("compat_check_calc_match: `%s' not found\n", | 1461 | duprintf("compat_check_calc_match: `%s' not found\n", |
1462 | m->u.user.name); | 1462 | m->u.user.name); |
1463 | return PTR_ERR(match); | 1463 | return PTR_ERR(match); |
1464 | } | 1464 | } |
1465 | m->u.kernel.match = match; | 1465 | m->u.kernel.match = match; |
1466 | *size += xt_compat_match_offset(match); | 1466 | *size += xt_compat_match_offset(match); |
1467 | return 0; | 1467 | return 0; |
1468 | } | 1468 | } |
1469 | 1469 | ||
1470 | static void compat_release_entry(struct compat_ipt_entry *e) | 1470 | static void compat_release_entry(struct compat_ipt_entry *e) |
1471 | { | 1471 | { |
1472 | struct ipt_entry_target *t; | 1472 | struct ipt_entry_target *t; |
1473 | struct xt_entry_match *ematch; | 1473 | struct xt_entry_match *ematch; |
1474 | 1474 | ||
1475 | /* Cleanup all matches */ | 1475 | /* Cleanup all matches */ |
1476 | xt_ematch_foreach(ematch, e) | 1476 | xt_ematch_foreach(ematch, e) |
1477 | module_put(ematch->u.kernel.match->me); | 1477 | module_put(ematch->u.kernel.match->me); |
1478 | t = compat_ipt_get_target(e); | 1478 | t = compat_ipt_get_target(e); |
1479 | module_put(t->u.kernel.target->me); | 1479 | module_put(t->u.kernel.target->me); |
1480 | } | 1480 | } |
1481 | 1481 | ||
1482 | static int | 1482 | static int |
1483 | check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, | 1483 | check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, |
1484 | struct xt_table_info *newinfo, | 1484 | struct xt_table_info *newinfo, |
1485 | unsigned int *size, | 1485 | unsigned int *size, |
1486 | const unsigned char *base, | 1486 | const unsigned char *base, |
1487 | const unsigned char *limit, | 1487 | const unsigned char *limit, |
1488 | const unsigned int *hook_entries, | 1488 | const unsigned int *hook_entries, |
1489 | const unsigned int *underflows, | 1489 | const unsigned int *underflows, |
1490 | const char *name) | 1490 | const char *name) |
1491 | { | 1491 | { |
1492 | struct xt_entry_match *ematch; | 1492 | struct xt_entry_match *ematch; |
1493 | struct ipt_entry_target *t; | 1493 | struct ipt_entry_target *t; |
1494 | struct xt_target *target; | 1494 | struct xt_target *target; |
1495 | unsigned int entry_offset; | 1495 | unsigned int entry_offset; |
1496 | unsigned int j; | 1496 | unsigned int j; |
1497 | int ret, off, h; | 1497 | int ret, off, h; |
1498 | 1498 | ||
1499 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1499 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1500 | if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || | 1500 | if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || |
1501 | (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { | 1501 | (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { |
1502 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1502 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1503 | return -EINVAL; | 1503 | return -EINVAL; |
1504 | } | 1504 | } |
1505 | 1505 | ||
1506 | if (e->next_offset < sizeof(struct compat_ipt_entry) + | 1506 | if (e->next_offset < sizeof(struct compat_ipt_entry) + |
1507 | sizeof(struct compat_xt_entry_target)) { | 1507 | sizeof(struct compat_xt_entry_target)) { |
1508 | duprintf("checking: element %p size %u\n", | 1508 | duprintf("checking: element %p size %u\n", |
1509 | e, e->next_offset); | 1509 | e, e->next_offset); |
1510 | return -EINVAL; | 1510 | return -EINVAL; |
1511 | } | 1511 | } |
1512 | 1512 | ||
1513 | /* For purposes of check_entry casting the compat entry is fine */ | 1513 | /* For purposes of check_entry casting the compat entry is fine */ |
1514 | ret = check_entry((struct ipt_entry *)e, name); | 1514 | ret = check_entry((struct ipt_entry *)e, name); |
1515 | if (ret) | 1515 | if (ret) |
1516 | return ret; | 1516 | return ret; |
1517 | 1517 | ||
1518 | off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); | 1518 | off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); |
1519 | entry_offset = (void *)e - (void *)base; | 1519 | entry_offset = (void *)e - (void *)base; |
1520 | j = 0; | 1520 | j = 0; |
1521 | xt_ematch_foreach(ematch, e) { | 1521 | xt_ematch_foreach(ematch, e) { |
1522 | ret = compat_find_calc_match(ematch, name, | 1522 | ret = compat_find_calc_match(ematch, name, |
1523 | &e->ip, e->comefrom, &off); | 1523 | &e->ip, e->comefrom, &off); |
1524 | if (ret != 0) | 1524 | if (ret != 0) |
1525 | goto release_matches; | 1525 | goto release_matches; |
1526 | ++j; | 1526 | ++j; |
1527 | } | 1527 | } |
1528 | 1528 | ||
1529 | t = compat_ipt_get_target(e); | 1529 | t = compat_ipt_get_target(e); |
1530 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, | 1530 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, |
1531 | t->u.user.revision); | 1531 | t->u.user.revision); |
1532 | if (IS_ERR(target)) { | 1532 | if (IS_ERR(target)) { |
1533 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1533 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1534 | t->u.user.name); | 1534 | t->u.user.name); |
1535 | ret = PTR_ERR(target); | 1535 | ret = PTR_ERR(target); |
1536 | goto release_matches; | 1536 | goto release_matches; |
1537 | } | 1537 | } |
1538 | t->u.kernel.target = target; | 1538 | t->u.kernel.target = target; |
1539 | 1539 | ||
1540 | off += xt_compat_target_offset(target); | 1540 | off += xt_compat_target_offset(target); |
1541 | *size += off; | 1541 | *size += off; |
1542 | ret = xt_compat_add_offset(AF_INET, entry_offset, off); | 1542 | ret = xt_compat_add_offset(AF_INET, entry_offset, off); |
1543 | if (ret) | 1543 | if (ret) |
1544 | goto out; | 1544 | goto out; |
1545 | 1545 | ||
1546 | /* Check hooks & underflows */ | 1546 | /* Check hooks & underflows */ |
1547 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 1547 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
1548 | if ((unsigned char *)e - base == hook_entries[h]) | 1548 | if ((unsigned char *)e - base == hook_entries[h]) |
1549 | newinfo->hook_entry[h] = hook_entries[h]; | 1549 | newinfo->hook_entry[h] = hook_entries[h]; |
1550 | if ((unsigned char *)e - base == underflows[h]) | 1550 | if ((unsigned char *)e - base == underflows[h]) |
1551 | newinfo->underflow[h] = underflows[h]; | 1551 | newinfo->underflow[h] = underflows[h]; |
1552 | } | 1552 | } |
1553 | 1553 | ||
1554 | /* Clear counters and comefrom */ | 1554 | /* Clear counters and comefrom */ |
1555 | memset(&e->counters, 0, sizeof(e->counters)); | 1555 | memset(&e->counters, 0, sizeof(e->counters)); |
1556 | e->comefrom = 0; | 1556 | e->comefrom = 0; |
1557 | return 0; | 1557 | return 0; |
1558 | 1558 | ||
1559 | out: | 1559 | out: |
1560 | module_put(t->u.kernel.target->me); | 1560 | module_put(t->u.kernel.target->me); |
1561 | release_matches: | 1561 | release_matches: |
1562 | xt_ematch_foreach(ematch, e) { | 1562 | xt_ematch_foreach(ematch, e) { |
1563 | if (j-- == 0) | 1563 | if (j-- == 0) |
1564 | break; | 1564 | break; |
1565 | module_put(ematch->u.kernel.match->me); | 1565 | module_put(ematch->u.kernel.match->me); |
1566 | } | 1566 | } |
1567 | return ret; | 1567 | return ret; |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | static int | 1570 | static int |
1571 | compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, | 1571 | compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, |
1572 | unsigned int *size, const char *name, | 1572 | unsigned int *size, const char *name, |
1573 | struct xt_table_info *newinfo, unsigned char *base) | 1573 | struct xt_table_info *newinfo, unsigned char *base) |
1574 | { | 1574 | { |
1575 | struct ipt_entry_target *t; | 1575 | struct ipt_entry_target *t; |
1576 | struct xt_target *target; | 1576 | struct xt_target *target; |
1577 | struct ipt_entry *de; | 1577 | struct ipt_entry *de; |
1578 | unsigned int origsize; | 1578 | unsigned int origsize; |
1579 | int ret, h; | 1579 | int ret, h; |
1580 | struct xt_entry_match *ematch; | 1580 | struct xt_entry_match *ematch; |
1581 | 1581 | ||
1582 | ret = 0; | 1582 | ret = 0; |
1583 | origsize = *size; | 1583 | origsize = *size; |
1584 | de = (struct ipt_entry *)*dstptr; | 1584 | de = (struct ipt_entry *)*dstptr; |
1585 | memcpy(de, e, sizeof(struct ipt_entry)); | 1585 | memcpy(de, e, sizeof(struct ipt_entry)); |
1586 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); | 1586 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); |
1587 | 1587 | ||
1588 | *dstptr += sizeof(struct ipt_entry); | 1588 | *dstptr += sizeof(struct ipt_entry); |
1589 | *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); | 1589 | *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); |
1590 | 1590 | ||
1591 | xt_ematch_foreach(ematch, e) { | 1591 | xt_ematch_foreach(ematch, e) { |
1592 | ret = xt_compat_match_from_user(ematch, dstptr, size); | 1592 | ret = xt_compat_match_from_user(ematch, dstptr, size); |
1593 | if (ret != 0) | 1593 | if (ret != 0) |
1594 | return ret; | 1594 | return ret; |
1595 | } | 1595 | } |
1596 | de->target_offset = e->target_offset - (origsize - *size); | 1596 | de->target_offset = e->target_offset - (origsize - *size); |
1597 | t = compat_ipt_get_target(e); | 1597 | t = compat_ipt_get_target(e); |
1598 | target = t->u.kernel.target; | 1598 | target = t->u.kernel.target; |
1599 | xt_compat_target_from_user(t, dstptr, size); | 1599 | xt_compat_target_from_user(t, dstptr, size); |
1600 | 1600 | ||
1601 | de->next_offset = e->next_offset - (origsize - *size); | 1601 | de->next_offset = e->next_offset - (origsize - *size); |
1602 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 1602 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
1603 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) | 1603 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) |
1604 | newinfo->hook_entry[h] -= origsize - *size; | 1604 | newinfo->hook_entry[h] -= origsize - *size; |
1605 | if ((unsigned char *)de - base < newinfo->underflow[h]) | 1605 | if ((unsigned char *)de - base < newinfo->underflow[h]) |
1606 | newinfo->underflow[h] -= origsize - *size; | 1606 | newinfo->underflow[h] -= origsize - *size; |
1607 | } | 1607 | } |
1608 | return ret; | 1608 | return ret; |
1609 | } | 1609 | } |
1610 | 1610 | ||
1611 | static int | 1611 | static int |
1612 | compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) | 1612 | compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) |
1613 | { | 1613 | { |
1614 | struct xt_entry_match *ematch; | 1614 | struct xt_entry_match *ematch; |
1615 | struct xt_mtchk_param mtpar; | 1615 | struct xt_mtchk_param mtpar; |
1616 | unsigned int j; | 1616 | unsigned int j; |
1617 | int ret = 0; | 1617 | int ret = 0; |
1618 | 1618 | ||
1619 | j = 0; | 1619 | j = 0; |
1620 | mtpar.net = net; | 1620 | mtpar.net = net; |
1621 | mtpar.table = name; | 1621 | mtpar.table = name; |
1622 | mtpar.entryinfo = &e->ip; | 1622 | mtpar.entryinfo = &e->ip; |
1623 | mtpar.hook_mask = e->comefrom; | 1623 | mtpar.hook_mask = e->comefrom; |
1624 | mtpar.family = NFPROTO_IPV4; | 1624 | mtpar.family = NFPROTO_IPV4; |
1625 | xt_ematch_foreach(ematch, e) { | 1625 | xt_ematch_foreach(ematch, e) { |
1626 | ret = check_match(ematch, &mtpar); | 1626 | ret = check_match(ematch, &mtpar); |
1627 | if (ret != 0) | 1627 | if (ret != 0) |
1628 | goto cleanup_matches; | 1628 | goto cleanup_matches; |
1629 | ++j; | 1629 | ++j; |
1630 | } | 1630 | } |
1631 | 1631 | ||
1632 | ret = check_target(e, net, name); | 1632 | ret = check_target(e, net, name); |
1633 | if (ret) | 1633 | if (ret) |
1634 | goto cleanup_matches; | 1634 | goto cleanup_matches; |
1635 | return 0; | 1635 | return 0; |
1636 | 1636 | ||
1637 | cleanup_matches: | 1637 | cleanup_matches: |
1638 | xt_ematch_foreach(ematch, e) { | 1638 | xt_ematch_foreach(ematch, e) { |
1639 | if (j-- == 0) | 1639 | if (j-- == 0) |
1640 | break; | 1640 | break; |
1641 | cleanup_match(ematch, net); | 1641 | cleanup_match(ematch, net); |
1642 | } | 1642 | } |
1643 | return ret; | 1643 | return ret; |
1644 | } | 1644 | } |
1645 | 1645 | ||
1646 | static int | 1646 | static int |
1647 | translate_compat_table(struct net *net, | 1647 | translate_compat_table(struct net *net, |
1648 | const char *name, | 1648 | const char *name, |
1649 | unsigned int valid_hooks, | 1649 | unsigned int valid_hooks, |
1650 | struct xt_table_info **pinfo, | 1650 | struct xt_table_info **pinfo, |
1651 | void **pentry0, | 1651 | void **pentry0, |
1652 | unsigned int total_size, | 1652 | unsigned int total_size, |
1653 | unsigned int number, | 1653 | unsigned int number, |
1654 | unsigned int *hook_entries, | 1654 | unsigned int *hook_entries, |
1655 | unsigned int *underflows) | 1655 | unsigned int *underflows) |
1656 | { | 1656 | { |
1657 | unsigned int i, j; | 1657 | unsigned int i, j; |
1658 | struct xt_table_info *newinfo, *info; | 1658 | struct xt_table_info *newinfo, *info; |
1659 | void *pos, *entry0, *entry1; | 1659 | void *pos, *entry0, *entry1; |
1660 | struct compat_ipt_entry *iter0; | 1660 | struct compat_ipt_entry *iter0; |
1661 | struct ipt_entry *iter1; | 1661 | struct ipt_entry *iter1; |
1662 | unsigned int size; | 1662 | unsigned int size; |
1663 | int ret; | 1663 | int ret; |
1664 | 1664 | ||
1665 | info = *pinfo; | 1665 | info = *pinfo; |
1666 | entry0 = *pentry0; | 1666 | entry0 = *pentry0; |
1667 | size = total_size; | 1667 | size = total_size; |
1668 | info->number = number; | 1668 | info->number = number; |
1669 | 1669 | ||
1670 | /* Init all hooks to impossible value. */ | 1670 | /* Init all hooks to impossible value. */ |
1671 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1671 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1672 | info->hook_entry[i] = 0xFFFFFFFF; | 1672 | info->hook_entry[i] = 0xFFFFFFFF; |
1673 | info->underflow[i] = 0xFFFFFFFF; | 1673 | info->underflow[i] = 0xFFFFFFFF; |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | duprintf("translate_compat_table: size %u\n", info->size); | 1676 | duprintf("translate_compat_table: size %u\n", info->size); |
1677 | j = 0; | 1677 | j = 0; |
1678 | xt_compat_lock(AF_INET); | 1678 | xt_compat_lock(AF_INET); |
1679 | /* Walk through entries, checking offsets. */ | 1679 | /* Walk through entries, checking offsets. */ |
1680 | xt_entry_foreach(iter0, entry0, total_size) { | 1680 | xt_entry_foreach(iter0, entry0, total_size) { |
1681 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, | 1681 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, |
1682 | entry0, | 1682 | entry0, |
1683 | entry0 + total_size, | 1683 | entry0 + total_size, |
1684 | hook_entries, | 1684 | hook_entries, |
1685 | underflows, | 1685 | underflows, |
1686 | name); | 1686 | name); |
1687 | if (ret != 0) | 1687 | if (ret != 0) |
1688 | goto out_unlock; | 1688 | goto out_unlock; |
1689 | ++j; | 1689 | ++j; |
1690 | } | 1690 | } |
1691 | 1691 | ||
1692 | ret = -EINVAL; | 1692 | ret = -EINVAL; |
1693 | if (j != number) { | 1693 | if (j != number) { |
1694 | duprintf("translate_compat_table: %u not %u entries\n", | 1694 | duprintf("translate_compat_table: %u not %u entries\n", |
1695 | j, number); | 1695 | j, number); |
1696 | goto out_unlock; | 1696 | goto out_unlock; |
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | /* Check hooks all assigned */ | 1699 | /* Check hooks all assigned */ |
1700 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1700 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1701 | /* Only hooks which are valid */ | 1701 | /* Only hooks which are valid */ |
1702 | if (!(valid_hooks & (1 << i))) | 1702 | if (!(valid_hooks & (1 << i))) |
1703 | continue; | 1703 | continue; |
1704 | if (info->hook_entry[i] == 0xFFFFFFFF) { | 1704 | if (info->hook_entry[i] == 0xFFFFFFFF) { |
1705 | duprintf("Invalid hook entry %u %u\n", | 1705 | duprintf("Invalid hook entry %u %u\n", |
1706 | i, hook_entries[i]); | 1706 | i, hook_entries[i]); |
1707 | goto out_unlock; | 1707 | goto out_unlock; |
1708 | } | 1708 | } |
1709 | if (info->underflow[i] == 0xFFFFFFFF) { | 1709 | if (info->underflow[i] == 0xFFFFFFFF) { |
1710 | duprintf("Invalid underflow %u %u\n", | 1710 | duprintf("Invalid underflow %u %u\n", |
1711 | i, underflows[i]); | 1711 | i, underflows[i]); |
1712 | goto out_unlock; | 1712 | goto out_unlock; |
1713 | } | 1713 | } |
1714 | } | 1714 | } |
1715 | 1715 | ||
1716 | ret = -ENOMEM; | 1716 | ret = -ENOMEM; |
1717 | newinfo = xt_alloc_table_info(size); | 1717 | newinfo = xt_alloc_table_info(size); |
1718 | if (!newinfo) | 1718 | if (!newinfo) |
1719 | goto out_unlock; | 1719 | goto out_unlock; |
1720 | 1720 | ||
1721 | newinfo->number = number; | 1721 | newinfo->number = number; |
1722 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1722 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1723 | newinfo->hook_entry[i] = info->hook_entry[i]; | 1723 | newinfo->hook_entry[i] = info->hook_entry[i]; |
1724 | newinfo->underflow[i] = info->underflow[i]; | 1724 | newinfo->underflow[i] = info->underflow[i]; |
1725 | } | 1725 | } |
1726 | entry1 = newinfo->entries[raw_smp_processor_id()]; | 1726 | entry1 = newinfo->entries[raw_smp_processor_id()]; |
1727 | pos = entry1; | 1727 | pos = entry1; |
1728 | size = total_size; | 1728 | size = total_size; |
1729 | xt_entry_foreach(iter0, entry0, total_size) { | 1729 | xt_entry_foreach(iter0, entry0, total_size) { |
1730 | ret = compat_copy_entry_from_user(iter0, &pos, &size, | 1730 | ret = compat_copy_entry_from_user(iter0, &pos, &size, |
1731 | name, newinfo, entry1); | 1731 | name, newinfo, entry1); |
1732 | if (ret != 0) | 1732 | if (ret != 0) |
1733 | break; | 1733 | break; |
1734 | } | 1734 | } |
1735 | xt_compat_flush_offsets(AF_INET); | 1735 | xt_compat_flush_offsets(AF_INET); |
1736 | xt_compat_unlock(AF_INET); | 1736 | xt_compat_unlock(AF_INET); |
1737 | if (ret) | 1737 | if (ret) |
1738 | goto free_newinfo; | 1738 | goto free_newinfo; |
1739 | 1739 | ||
1740 | ret = -ELOOP; | 1740 | ret = -ELOOP; |
1741 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) | 1741 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) |
1742 | goto free_newinfo; | 1742 | goto free_newinfo; |
1743 | 1743 | ||
1744 | i = 0; | 1744 | i = 0; |
1745 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1745 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1746 | ret = compat_check_entry(iter1, net, name); | 1746 | ret = compat_check_entry(iter1, net, name); |
1747 | if (ret != 0) | 1747 | if (ret != 0) |
1748 | break; | 1748 | break; |
1749 | ++i; | 1749 | ++i; |
1750 | } | 1750 | } |
1751 | if (ret) { | 1751 | if (ret) { |
1752 | /* | 1752 | /* |
1753 | * The first i matches need cleanup_entry (calls ->destroy) | 1753 | * The first i matches need cleanup_entry (calls ->destroy) |
1754 | * because they had called ->check already. The other j-i | 1754 | * because they had called ->check already. The other j-i |
1755 | * entries need only release. | 1755 | * entries need only release. |
1756 | */ | 1756 | */ |
1757 | int skip = i; | 1757 | int skip = i; |
1758 | j -= i; | 1758 | j -= i; |
1759 | xt_entry_foreach(iter0, entry0, newinfo->size) { | 1759 | xt_entry_foreach(iter0, entry0, newinfo->size) { |
1760 | if (skip-- > 0) | 1760 | if (skip-- > 0) |
1761 | continue; | 1761 | continue; |
1762 | if (j-- == 0) | 1762 | if (j-- == 0) |
1763 | break; | 1763 | break; |
1764 | compat_release_entry(iter0); | 1764 | compat_release_entry(iter0); |
1765 | } | 1765 | } |
1766 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1766 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1767 | if (i-- == 0) | 1767 | if (i-- == 0) |
1768 | break; | 1768 | break; |
1769 | cleanup_entry(iter1, net); | 1769 | cleanup_entry(iter1, net); |
1770 | } | 1770 | } |
1771 | xt_free_table_info(newinfo); | 1771 | xt_free_table_info(newinfo); |
1772 | return ret; | 1772 | return ret; |
1773 | } | 1773 | } |
1774 | 1774 | ||
1775 | /* And one copy for every other CPU */ | 1775 | /* And one copy for every other CPU */ |
1776 | for_each_possible_cpu(i) | 1776 | for_each_possible_cpu(i) |
1777 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) | 1777 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) |
1778 | memcpy(newinfo->entries[i], entry1, newinfo->size); | 1778 | memcpy(newinfo->entries[i], entry1, newinfo->size); |
1779 | 1779 | ||
1780 | *pinfo = newinfo; | 1780 | *pinfo = newinfo; |
1781 | *pentry0 = entry1; | 1781 | *pentry0 = entry1; |
1782 | xt_free_table_info(info); | 1782 | xt_free_table_info(info); |
1783 | return 0; | 1783 | return 0; |
1784 | 1784 | ||
1785 | free_newinfo: | 1785 | free_newinfo: |
1786 | xt_free_table_info(newinfo); | 1786 | xt_free_table_info(newinfo); |
1787 | out: | 1787 | out: |
1788 | xt_entry_foreach(iter0, entry0, total_size) { | 1788 | xt_entry_foreach(iter0, entry0, total_size) { |
1789 | if (j-- == 0) | 1789 | if (j-- == 0) |
1790 | break; | 1790 | break; |
1791 | compat_release_entry(iter0); | 1791 | compat_release_entry(iter0); |
1792 | } | 1792 | } |
1793 | return ret; | 1793 | return ret; |
1794 | out_unlock: | 1794 | out_unlock: |
1795 | xt_compat_flush_offsets(AF_INET); | 1795 | xt_compat_flush_offsets(AF_INET); |
1796 | xt_compat_unlock(AF_INET); | 1796 | xt_compat_unlock(AF_INET); |
1797 | goto out; | 1797 | goto out; |
1798 | } | 1798 | } |
1799 | 1799 | ||
1800 | static int | 1800 | static int |
1801 | compat_do_replace(struct net *net, void __user *user, unsigned int len) | 1801 | compat_do_replace(struct net *net, void __user *user, unsigned int len) |
1802 | { | 1802 | { |
1803 | int ret; | 1803 | int ret; |
1804 | struct compat_ipt_replace tmp; | 1804 | struct compat_ipt_replace tmp; |
1805 | struct xt_table_info *newinfo; | 1805 | struct xt_table_info *newinfo; |
1806 | void *loc_cpu_entry; | 1806 | void *loc_cpu_entry; |
1807 | struct ipt_entry *iter; | 1807 | struct ipt_entry *iter; |
1808 | 1808 | ||
1809 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1809 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1810 | return -EFAULT; | 1810 | return -EFAULT; |
1811 | 1811 | ||
1812 | /* overflow check */ | 1812 | /* overflow check */ |
1813 | if (tmp.size >= INT_MAX / num_possible_cpus()) | 1813 | if (tmp.size >= INT_MAX / num_possible_cpus()) |
1814 | return -ENOMEM; | 1814 | return -ENOMEM; |
1815 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1815 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1816 | return -ENOMEM; | 1816 | return -ENOMEM; |
1817 | 1817 | ||
1818 | newinfo = xt_alloc_table_info(tmp.size); | 1818 | newinfo = xt_alloc_table_info(tmp.size); |
1819 | if (!newinfo) | 1819 | if (!newinfo) |
1820 | return -ENOMEM; | 1820 | return -ENOMEM; |
1821 | 1821 | ||
1822 | /* choose the copy that is on our node/cpu */ | 1822 | /* choose the copy that is on our node/cpu */ |
1823 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1823 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1824 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1824 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1825 | tmp.size) != 0) { | 1825 | tmp.size) != 0) { |
1826 | ret = -EFAULT; | 1826 | ret = -EFAULT; |
1827 | goto free_newinfo; | 1827 | goto free_newinfo; |
1828 | } | 1828 | } |
1829 | 1829 | ||
1830 | ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, | 1830 | ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, |
1831 | &newinfo, &loc_cpu_entry, tmp.size, | 1831 | &newinfo, &loc_cpu_entry, tmp.size, |
1832 | tmp.num_entries, tmp.hook_entry, | 1832 | tmp.num_entries, tmp.hook_entry, |
1833 | tmp.underflow); | 1833 | tmp.underflow); |
1834 | if (ret != 0) | 1834 | if (ret != 0) |
1835 | goto free_newinfo; | 1835 | goto free_newinfo; |
1836 | 1836 | ||
1837 | duprintf("compat_do_replace: Translated table\n"); | 1837 | duprintf("compat_do_replace: Translated table\n"); |
1838 | 1838 | ||
1839 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1839 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1840 | tmp.num_counters, compat_ptr(tmp.counters)); | 1840 | tmp.num_counters, compat_ptr(tmp.counters)); |
1841 | if (ret) | 1841 | if (ret) |
1842 | goto free_newinfo_untrans; | 1842 | goto free_newinfo_untrans; |
1843 | return 0; | 1843 | return 0; |
1844 | 1844 | ||
1845 | free_newinfo_untrans: | 1845 | free_newinfo_untrans: |
1846 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1846 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1847 | cleanup_entry(iter, net); | 1847 | cleanup_entry(iter, net); |
1848 | free_newinfo: | 1848 | free_newinfo: |
1849 | xt_free_table_info(newinfo); | 1849 | xt_free_table_info(newinfo); |
1850 | return ret; | 1850 | return ret; |
1851 | } | 1851 | } |
1852 | 1852 | ||
1853 | static int | 1853 | static int |
1854 | compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, | 1854 | compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, |
1855 | unsigned int len) | 1855 | unsigned int len) |
1856 | { | 1856 | { |
1857 | int ret; | 1857 | int ret; |
1858 | 1858 | ||
1859 | if (!capable(CAP_NET_ADMIN)) | 1859 | if (!capable(CAP_NET_ADMIN)) |
1860 | return -EPERM; | 1860 | return -EPERM; |
1861 | 1861 | ||
1862 | switch (cmd) { | 1862 | switch (cmd) { |
1863 | case IPT_SO_SET_REPLACE: | 1863 | case IPT_SO_SET_REPLACE: |
1864 | ret = compat_do_replace(sock_net(sk), user, len); | 1864 | ret = compat_do_replace(sock_net(sk), user, len); |
1865 | break; | 1865 | break; |
1866 | 1866 | ||
1867 | case IPT_SO_SET_ADD_COUNTERS: | 1867 | case IPT_SO_SET_ADD_COUNTERS: |
1868 | ret = do_add_counters(sock_net(sk), user, len, 1); | 1868 | ret = do_add_counters(sock_net(sk), user, len, 1); |
1869 | break; | 1869 | break; |
1870 | 1870 | ||
1871 | default: | 1871 | default: |
1872 | duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); | 1872 | duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); |
1873 | ret = -EINVAL; | 1873 | ret = -EINVAL; |
1874 | } | 1874 | } |
1875 | 1875 | ||
1876 | return ret; | 1876 | return ret; |
1877 | } | 1877 | } |
1878 | 1878 | ||
1879 | struct compat_ipt_get_entries { | 1879 | struct compat_ipt_get_entries { |
1880 | char name[IPT_TABLE_MAXNAMELEN]; | 1880 | char name[IPT_TABLE_MAXNAMELEN]; |
1881 | compat_uint_t size; | 1881 | compat_uint_t size; |
1882 | struct compat_ipt_entry entrytable[0]; | 1882 | struct compat_ipt_entry entrytable[0]; |
1883 | }; | 1883 | }; |
1884 | 1884 | ||
1885 | static int | 1885 | static int |
1886 | compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, | 1886 | compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, |
1887 | void __user *userptr) | 1887 | void __user *userptr) |
1888 | { | 1888 | { |
1889 | struct xt_counters *counters; | 1889 | struct xt_counters *counters; |
1890 | const struct xt_table_info *private = table->private; | 1890 | const struct xt_table_info *private = table->private; |
1891 | void __user *pos; | 1891 | void __user *pos; |
1892 | unsigned int size; | 1892 | unsigned int size; |
1893 | int ret = 0; | 1893 | int ret = 0; |
1894 | const void *loc_cpu_entry; | 1894 | const void *loc_cpu_entry; |
1895 | unsigned int i = 0; | 1895 | unsigned int i = 0; |
1896 | struct ipt_entry *iter; | 1896 | struct ipt_entry *iter; |
1897 | 1897 | ||
1898 | counters = alloc_counters(table); | 1898 | counters = alloc_counters(table); |
1899 | if (IS_ERR(counters)) | 1899 | if (IS_ERR(counters)) |
1900 | return PTR_ERR(counters); | 1900 | return PTR_ERR(counters); |
1901 | 1901 | ||
1902 | /* choose the copy that is on our node/cpu, ... | 1902 | /* choose the copy that is on our node/cpu, ... |
1903 | * This choice is lazy (because current thread is | 1903 | * This choice is lazy (because current thread is |
1904 | * allowed to migrate to another cpu) | 1904 | * allowed to migrate to another cpu) |
1905 | */ | 1905 | */ |
1906 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1906 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
1907 | pos = userptr; | 1907 | pos = userptr; |
1908 | size = total_size; | 1908 | size = total_size; |
1909 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { | 1909 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { |
1910 | ret = compat_copy_entry_to_user(iter, &pos, | 1910 | ret = compat_copy_entry_to_user(iter, &pos, |
1911 | &size, counters, i++); | 1911 | &size, counters, i++); |
1912 | if (ret != 0) | 1912 | if (ret != 0) |
1913 | break; | 1913 | break; |
1914 | } | 1914 | } |
1915 | 1915 | ||
1916 | vfree(counters); | 1916 | vfree(counters); |
1917 | return ret; | 1917 | return ret; |
1918 | } | 1918 | } |
1919 | 1919 | ||
1920 | static int | 1920 | static int |
1921 | compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, | 1921 | compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, |
1922 | int *len) | 1922 | int *len) |
1923 | { | 1923 | { |
1924 | int ret; | 1924 | int ret; |
1925 | struct compat_ipt_get_entries get; | 1925 | struct compat_ipt_get_entries get; |
1926 | struct xt_table *t; | 1926 | struct xt_table *t; |
1927 | 1927 | ||
1928 | if (*len < sizeof(get)) { | 1928 | if (*len < sizeof(get)) { |
1929 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); | 1929 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); |
1930 | return -EINVAL; | 1930 | return -EINVAL; |
1931 | } | 1931 | } |
1932 | 1932 | ||
1933 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1933 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1934 | return -EFAULT; | 1934 | return -EFAULT; |
1935 | 1935 | ||
1936 | if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { | 1936 | if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { |
1937 | duprintf("compat_get_entries: %u != %zu\n", | 1937 | duprintf("compat_get_entries: %u != %zu\n", |
1938 | *len, sizeof(get) + get.size); | 1938 | *len, sizeof(get) + get.size); |
1939 | return -EINVAL; | 1939 | return -EINVAL; |
1940 | } | 1940 | } |
1941 | 1941 | ||
1942 | xt_compat_lock(AF_INET); | 1942 | xt_compat_lock(AF_INET); |
1943 | t = xt_find_table_lock(net, AF_INET, get.name); | 1943 | t = xt_find_table_lock(net, AF_INET, get.name); |
1944 | if (t && !IS_ERR(t)) { | 1944 | if (t && !IS_ERR(t)) { |
1945 | const struct xt_table_info *private = t->private; | 1945 | const struct xt_table_info *private = t->private; |
1946 | struct xt_table_info info; | 1946 | struct xt_table_info info; |
1947 | duprintf("t->private->number = %u\n", private->number); | 1947 | duprintf("t->private->number = %u\n", private->number); |
1948 | ret = compat_table_info(private, &info); | 1948 | ret = compat_table_info(private, &info); |
1949 | if (!ret && get.size == info.size) { | 1949 | if (!ret && get.size == info.size) { |
1950 | ret = compat_copy_entries_to_user(private->size, | 1950 | ret = compat_copy_entries_to_user(private->size, |
1951 | t, uptr->entrytable); | 1951 | t, uptr->entrytable); |
1952 | } else if (!ret) { | 1952 | } else if (!ret) { |
1953 | duprintf("compat_get_entries: I've got %u not %u!\n", | 1953 | duprintf("compat_get_entries: I've got %u not %u!\n", |
1954 | private->size, get.size); | 1954 | private->size, get.size); |
1955 | ret = -EAGAIN; | 1955 | ret = -EAGAIN; |
1956 | } | 1956 | } |
1957 | xt_compat_flush_offsets(AF_INET); | 1957 | xt_compat_flush_offsets(AF_INET); |
1958 | module_put(t->me); | 1958 | module_put(t->me); |
1959 | xt_table_unlock(t); | 1959 | xt_table_unlock(t); |
1960 | } else | 1960 | } else |
1961 | ret = t ? PTR_ERR(t) : -ENOENT; | 1961 | ret = t ? PTR_ERR(t) : -ENOENT; |
1962 | 1962 | ||
1963 | xt_compat_unlock(AF_INET); | 1963 | xt_compat_unlock(AF_INET); |
1964 | return ret; | 1964 | return ret; |
1965 | } | 1965 | } |
1966 | 1966 | ||
1967 | static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); | 1967 | static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); |
1968 | 1968 | ||
1969 | static int | 1969 | static int |
1970 | compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 1970 | compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
1971 | { | 1971 | { |
1972 | int ret; | 1972 | int ret; |
1973 | 1973 | ||
1974 | if (!capable(CAP_NET_ADMIN)) | 1974 | if (!capable(CAP_NET_ADMIN)) |
1975 | return -EPERM; | 1975 | return -EPERM; |
1976 | 1976 | ||
1977 | switch (cmd) { | 1977 | switch (cmd) { |
1978 | case IPT_SO_GET_INFO: | 1978 | case IPT_SO_GET_INFO: |
1979 | ret = get_info(sock_net(sk), user, len, 1); | 1979 | ret = get_info(sock_net(sk), user, len, 1); |
1980 | break; | 1980 | break; |
1981 | case IPT_SO_GET_ENTRIES: | 1981 | case IPT_SO_GET_ENTRIES: |
1982 | ret = compat_get_entries(sock_net(sk), user, len); | 1982 | ret = compat_get_entries(sock_net(sk), user, len); |
1983 | break; | 1983 | break; |
1984 | default: | 1984 | default: |
1985 | ret = do_ipt_get_ctl(sk, cmd, user, len); | 1985 | ret = do_ipt_get_ctl(sk, cmd, user, len); |
1986 | } | 1986 | } |
1987 | return ret; | 1987 | return ret; |
1988 | } | 1988 | } |
1989 | #endif | 1989 | #endif |
1990 | 1990 | ||
1991 | static int | 1991 | static int |
1992 | do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | 1992 | do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) |
1993 | { | 1993 | { |
1994 | int ret; | 1994 | int ret; |
1995 | 1995 | ||
1996 | if (!capable(CAP_NET_ADMIN)) | 1996 | if (!capable(CAP_NET_ADMIN)) |
1997 | return -EPERM; | 1997 | return -EPERM; |
1998 | 1998 | ||
1999 | switch (cmd) { | 1999 | switch (cmd) { |
2000 | case IPT_SO_SET_REPLACE: | 2000 | case IPT_SO_SET_REPLACE: |
2001 | ret = do_replace(sock_net(sk), user, len); | 2001 | ret = do_replace(sock_net(sk), user, len); |
2002 | break; | 2002 | break; |
2003 | 2003 | ||
2004 | case IPT_SO_SET_ADD_COUNTERS: | 2004 | case IPT_SO_SET_ADD_COUNTERS: |
2005 | ret = do_add_counters(sock_net(sk), user, len, 0); | 2005 | ret = do_add_counters(sock_net(sk), user, len, 0); |
2006 | break; | 2006 | break; |
2007 | 2007 | ||
2008 | default: | 2008 | default: |
2009 | duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); | 2009 | duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); |
2010 | ret = -EINVAL; | 2010 | ret = -EINVAL; |
2011 | } | 2011 | } |
2012 | 2012 | ||
2013 | return ret; | 2013 | return ret; |
2014 | } | 2014 | } |
2015 | 2015 | ||
2016 | static int | 2016 | static int |
2017 | do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 2017 | do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
2018 | { | 2018 | { |
2019 | int ret; | 2019 | int ret; |
2020 | 2020 | ||
2021 | if (!capable(CAP_NET_ADMIN)) | 2021 | if (!capable(CAP_NET_ADMIN)) |
2022 | return -EPERM; | 2022 | return -EPERM; |
2023 | 2023 | ||
2024 | switch (cmd) { | 2024 | switch (cmd) { |
2025 | case IPT_SO_GET_INFO: | 2025 | case IPT_SO_GET_INFO: |
2026 | ret = get_info(sock_net(sk), user, len, 0); | 2026 | ret = get_info(sock_net(sk), user, len, 0); |
2027 | break; | 2027 | break; |
2028 | 2028 | ||
2029 | case IPT_SO_GET_ENTRIES: | 2029 | case IPT_SO_GET_ENTRIES: |
2030 | ret = get_entries(sock_net(sk), user, len); | 2030 | ret = get_entries(sock_net(sk), user, len); |
2031 | break; | 2031 | break; |
2032 | 2032 | ||
2033 | case IPT_SO_GET_REVISION_MATCH: | 2033 | case IPT_SO_GET_REVISION_MATCH: |
2034 | case IPT_SO_GET_REVISION_TARGET: { | 2034 | case IPT_SO_GET_REVISION_TARGET: { |
2035 | struct ipt_get_revision rev; | 2035 | struct ipt_get_revision rev; |
2036 | int target; | 2036 | int target; |
2037 | 2037 | ||
2038 | if (*len != sizeof(rev)) { | 2038 | if (*len != sizeof(rev)) { |
2039 | ret = -EINVAL; | 2039 | ret = -EINVAL; |
2040 | break; | 2040 | break; |
2041 | } | 2041 | } |
2042 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { | 2042 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { |
2043 | ret = -EFAULT; | 2043 | ret = -EFAULT; |
2044 | break; | 2044 | break; |
2045 | } | 2045 | } |
2046 | 2046 | ||
2047 | if (cmd == IPT_SO_GET_REVISION_TARGET) | 2047 | if (cmd == IPT_SO_GET_REVISION_TARGET) |
2048 | target = 1; | 2048 | target = 1; |
2049 | else | 2049 | else |
2050 | target = 0; | 2050 | target = 0; |
2051 | 2051 | ||
2052 | try_then_request_module(xt_find_revision(AF_INET, rev.name, | 2052 | try_then_request_module(xt_find_revision(AF_INET, rev.name, |
2053 | rev.revision, | 2053 | rev.revision, |
2054 | target, &ret), | 2054 | target, &ret), |
2055 | "ipt_%s", rev.name); | 2055 | "ipt_%s", rev.name); |
2056 | break; | 2056 | break; |
2057 | } | 2057 | } |
2058 | 2058 | ||
2059 | default: | 2059 | default: |
2060 | duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); | 2060 | duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); |
2061 | ret = -EINVAL; | 2061 | ret = -EINVAL; |
2062 | } | 2062 | } |
2063 | 2063 | ||
2064 | return ret; | 2064 | return ret; |
2065 | } | 2065 | } |
2066 | 2066 | ||
2067 | struct xt_table *ipt_register_table(struct net *net, | 2067 | struct xt_table *ipt_register_table(struct net *net, |
2068 | const struct xt_table *table, | 2068 | const struct xt_table *table, |
2069 | const struct ipt_replace *repl) | 2069 | const struct ipt_replace *repl) |
2070 | { | 2070 | { |
2071 | int ret; | 2071 | int ret; |
2072 | struct xt_table_info *newinfo; | 2072 | struct xt_table_info *newinfo; |
2073 | struct xt_table_info bootstrap = {0}; | 2073 | struct xt_table_info bootstrap = {0}; |
2074 | void *loc_cpu_entry; | 2074 | void *loc_cpu_entry; |
2075 | struct xt_table *new_table; | 2075 | struct xt_table *new_table; |
2076 | 2076 | ||
2077 | newinfo = xt_alloc_table_info(repl->size); | 2077 | newinfo = xt_alloc_table_info(repl->size); |
2078 | if (!newinfo) { | 2078 | if (!newinfo) { |
2079 | ret = -ENOMEM; | 2079 | ret = -ENOMEM; |
2080 | goto out; | 2080 | goto out; |
2081 | } | 2081 | } |
2082 | 2082 | ||
2083 | /* choose the copy on our node/cpu, but dont care about preemption */ | 2083 | /* choose the copy on our node/cpu, but dont care about preemption */ |
2084 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 2084 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
2085 | memcpy(loc_cpu_entry, repl->entries, repl->size); | 2085 | memcpy(loc_cpu_entry, repl->entries, repl->size); |
2086 | 2086 | ||
2087 | ret = translate_table(net, newinfo, loc_cpu_entry, repl); | 2087 | ret = translate_table(net, newinfo, loc_cpu_entry, repl); |
2088 | if (ret != 0) | 2088 | if (ret != 0) |
2089 | goto out_free; | 2089 | goto out_free; |
2090 | 2090 | ||
2091 | new_table = xt_register_table(net, table, &bootstrap, newinfo); | 2091 | new_table = xt_register_table(net, table, &bootstrap, newinfo); |
2092 | if (IS_ERR(new_table)) { | 2092 | if (IS_ERR(new_table)) { |
2093 | ret = PTR_ERR(new_table); | 2093 | ret = PTR_ERR(new_table); |
2094 | goto out_free; | 2094 | goto out_free; |
2095 | } | 2095 | } |
2096 | 2096 | ||
2097 | return new_table; | 2097 | return new_table; |
2098 | 2098 | ||
2099 | out_free: | 2099 | out_free: |
2100 | xt_free_table_info(newinfo); | 2100 | xt_free_table_info(newinfo); |
2101 | out: | 2101 | out: |
2102 | return ERR_PTR(ret); | 2102 | return ERR_PTR(ret); |
2103 | } | 2103 | } |
2104 | 2104 | ||
2105 | void ipt_unregister_table(struct net *net, struct xt_table *table) | 2105 | void ipt_unregister_table(struct net *net, struct xt_table *table) |
2106 | { | 2106 | { |
2107 | struct xt_table_info *private; | 2107 | struct xt_table_info *private; |
2108 | void *loc_cpu_entry; | 2108 | void *loc_cpu_entry; |
2109 | struct module *table_owner = table->me; | 2109 | struct module *table_owner = table->me; |
2110 | struct ipt_entry *iter; | 2110 | struct ipt_entry *iter; |
2111 | 2111 | ||
2112 | private = xt_unregister_table(table); | 2112 | private = xt_unregister_table(table); |
2113 | 2113 | ||
2114 | /* Decrease module usage counts and free resources */ | 2114 | /* Decrease module usage counts and free resources */ |
2115 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 2115 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
2116 | xt_entry_foreach(iter, loc_cpu_entry, private->size) | 2116 | xt_entry_foreach(iter, loc_cpu_entry, private->size) |
2117 | cleanup_entry(iter, net); | 2117 | cleanup_entry(iter, net); |
2118 | if (private->number > private->initial_entries) | 2118 | if (private->number > private->initial_entries) |
2119 | module_put(table_owner); | 2119 | module_put(table_owner); |
2120 | xt_free_table_info(private); | 2120 | xt_free_table_info(private); |
2121 | } | 2121 | } |
2122 | 2122 | ||
2123 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ | 2123 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ |
2124 | static inline bool | 2124 | static inline bool |
2125 | icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, | 2125 | icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, |
2126 | u_int8_t type, u_int8_t code, | 2126 | u_int8_t type, u_int8_t code, |
2127 | bool invert) | 2127 | bool invert) |
2128 | { | 2128 | { |
2129 | return ((test_type == 0xFF) || | 2129 | return ((test_type == 0xFF) || |
2130 | (type == test_type && code >= min_code && code <= max_code)) | 2130 | (type == test_type && code >= min_code && code <= max_code)) |
2131 | ^ invert; | 2131 | ^ invert; |
2132 | } | 2132 | } |
2133 | 2133 | ||
2134 | static bool | 2134 | static bool |
2135 | icmp_match(const struct sk_buff *skb, struct xt_action_param *par) | 2135 | icmp_match(const struct sk_buff *skb, struct xt_action_param *par) |
2136 | { | 2136 | { |
2137 | const struct icmphdr *ic; | 2137 | const struct icmphdr *ic; |
2138 | struct icmphdr _icmph; | 2138 | struct icmphdr _icmph; |
2139 | const struct ipt_icmp *icmpinfo = par->matchinfo; | 2139 | const struct ipt_icmp *icmpinfo = par->matchinfo; |
2140 | 2140 | ||
2141 | /* Must not be a fragment. */ | 2141 | /* Must not be a fragment. */ |
2142 | if (par->fragoff != 0) | 2142 | if (par->fragoff != 0) |
2143 | return false; | 2143 | return false; |
2144 | 2144 | ||
2145 | ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); | 2145 | ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); |
2146 | if (ic == NULL) { | 2146 | if (ic == NULL) { |
2147 | /* We've been asked to examine this packet, and we | 2147 | /* We've been asked to examine this packet, and we |
2148 | * can't. Hence, no choice but to drop. | 2148 | * can't. Hence, no choice but to drop. |
2149 | */ | 2149 | */ |
2150 | duprintf("Dropping evil ICMP tinygram.\n"); | 2150 | duprintf("Dropping evil ICMP tinygram.\n"); |
2151 | par->hotdrop = true; | 2151 | par->hotdrop = true; |
2152 | return false; | 2152 | return false; |
2153 | } | 2153 | } |
2154 | 2154 | ||
2155 | return icmp_type_code_match(icmpinfo->type, | 2155 | return icmp_type_code_match(icmpinfo->type, |
2156 | icmpinfo->code[0], | 2156 | icmpinfo->code[0], |
2157 | icmpinfo->code[1], | 2157 | icmpinfo->code[1], |
2158 | ic->type, ic->code, | 2158 | ic->type, ic->code, |
2159 | !!(icmpinfo->invflags&IPT_ICMP_INV)); | 2159 | !!(icmpinfo->invflags&IPT_ICMP_INV)); |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | static int icmp_checkentry(const struct xt_mtchk_param *par) | 2162 | static int icmp_checkentry(const struct xt_mtchk_param *par) |
2163 | { | 2163 | { |
2164 | const struct ipt_icmp *icmpinfo = par->matchinfo; | 2164 | const struct ipt_icmp *icmpinfo = par->matchinfo; |
2165 | 2165 | ||
2166 | /* Must specify no unknown invflags */ | 2166 | /* Must specify no unknown invflags */ |
2167 | return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; | 2167 | return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; |
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | static struct xt_target ipt_builtin_tg[] __read_mostly = { | 2170 | static struct xt_target ipt_builtin_tg[] __read_mostly = { |
2171 | { | 2171 | { |
2172 | .name = IPT_STANDARD_TARGET, | 2172 | .name = IPT_STANDARD_TARGET, |
2173 | .targetsize = sizeof(int), | 2173 | .targetsize = sizeof(int), |
2174 | .family = NFPROTO_IPV4, | 2174 | .family = NFPROTO_IPV4, |
2175 | #ifdef CONFIG_COMPAT | 2175 | #ifdef CONFIG_COMPAT |
2176 | .compatsize = sizeof(compat_int_t), | 2176 | .compatsize = sizeof(compat_int_t), |
2177 | .compat_from_user = compat_standard_from_user, | 2177 | .compat_from_user = compat_standard_from_user, |
2178 | .compat_to_user = compat_standard_to_user, | 2178 | .compat_to_user = compat_standard_to_user, |
2179 | #endif | 2179 | #endif |
2180 | }, | 2180 | }, |
2181 | { | 2181 | { |
2182 | .name = IPT_ERROR_TARGET, | 2182 | .name = IPT_ERROR_TARGET, |
2183 | .target = ipt_error, | 2183 | .target = ipt_error, |
2184 | .targetsize = IPT_FUNCTION_MAXNAMELEN, | 2184 | .targetsize = IPT_FUNCTION_MAXNAMELEN, |
2185 | .family = NFPROTO_IPV4, | 2185 | .family = NFPROTO_IPV4, |
2186 | }, | 2186 | }, |
2187 | }; | 2187 | }; |
2188 | 2188 | ||
2189 | static struct nf_sockopt_ops ipt_sockopts = { | 2189 | static struct nf_sockopt_ops ipt_sockopts = { |
2190 | .pf = PF_INET, | 2190 | .pf = PF_INET, |
2191 | .set_optmin = IPT_BASE_CTL, | 2191 | .set_optmin = IPT_BASE_CTL, |
2192 | .set_optmax = IPT_SO_SET_MAX+1, | 2192 | .set_optmax = IPT_SO_SET_MAX+1, |
2193 | .set = do_ipt_set_ctl, | 2193 | .set = do_ipt_set_ctl, |
2194 | #ifdef CONFIG_COMPAT | 2194 | #ifdef CONFIG_COMPAT |
2195 | .compat_set = compat_do_ipt_set_ctl, | 2195 | .compat_set = compat_do_ipt_set_ctl, |
2196 | #endif | 2196 | #endif |
2197 | .get_optmin = IPT_BASE_CTL, | 2197 | .get_optmin = IPT_BASE_CTL, |
2198 | .get_optmax = IPT_SO_GET_MAX+1, | 2198 | .get_optmax = IPT_SO_GET_MAX+1, |
2199 | .get = do_ipt_get_ctl, | 2199 | .get = do_ipt_get_ctl, |
2200 | #ifdef CONFIG_COMPAT | 2200 | #ifdef CONFIG_COMPAT |
2201 | .compat_get = compat_do_ipt_get_ctl, | 2201 | .compat_get = compat_do_ipt_get_ctl, |
2202 | #endif | 2202 | #endif |
2203 | .owner = THIS_MODULE, | 2203 | .owner = THIS_MODULE, |
2204 | }; | 2204 | }; |
2205 | 2205 | ||
2206 | static struct xt_match ipt_builtin_mt[] __read_mostly = { | 2206 | static struct xt_match ipt_builtin_mt[] __read_mostly = { |
2207 | { | 2207 | { |
2208 | .name = "icmp", | 2208 | .name = "icmp", |
2209 | .match = icmp_match, | 2209 | .match = icmp_match, |
2210 | .matchsize = sizeof(struct ipt_icmp), | 2210 | .matchsize = sizeof(struct ipt_icmp), |
2211 | .checkentry = icmp_checkentry, | 2211 | .checkentry = icmp_checkentry, |
2212 | .proto = IPPROTO_ICMP, | 2212 | .proto = IPPROTO_ICMP, |
2213 | .family = NFPROTO_IPV4, | 2213 | .family = NFPROTO_IPV4, |
2214 | }, | 2214 | }, |
2215 | }; | 2215 | }; |
2216 | 2216 | ||
2217 | static int __net_init ip_tables_net_init(struct net *net) | 2217 | static int __net_init ip_tables_net_init(struct net *net) |
2218 | { | 2218 | { |
2219 | return xt_proto_init(net, NFPROTO_IPV4); | 2219 | return xt_proto_init(net, NFPROTO_IPV4); |
2220 | } | 2220 | } |
2221 | 2221 | ||
2222 | static void __net_exit ip_tables_net_exit(struct net *net) | 2222 | static void __net_exit ip_tables_net_exit(struct net *net) |
2223 | { | 2223 | { |
2224 | xt_proto_fini(net, NFPROTO_IPV4); | 2224 | xt_proto_fini(net, NFPROTO_IPV4); |
2225 | } | 2225 | } |
2226 | 2226 | ||
2227 | static struct pernet_operations ip_tables_net_ops = { | 2227 | static struct pernet_operations ip_tables_net_ops = { |
2228 | .init = ip_tables_net_init, | 2228 | .init = ip_tables_net_init, |
2229 | .exit = ip_tables_net_exit, | 2229 | .exit = ip_tables_net_exit, |
2230 | }; | 2230 | }; |
2231 | 2231 | ||
2232 | static int __init ip_tables_init(void) | 2232 | static int __init ip_tables_init(void) |
2233 | { | 2233 | { |
2234 | int ret; | 2234 | int ret; |
2235 | 2235 | ||
2236 | ret = register_pernet_subsys(&ip_tables_net_ops); | 2236 | ret = register_pernet_subsys(&ip_tables_net_ops); |
2237 | if (ret < 0) | 2237 | if (ret < 0) |
2238 | goto err1; | 2238 | goto err1; |
2239 | 2239 | ||
2240 | /* Noone else will be downing sem now, so we won't sleep */ | 2240 | /* Noone else will be downing sem now, so we won't sleep */ |
2241 | ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); | 2241 | ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2242 | if (ret < 0) | 2242 | if (ret < 0) |
2243 | goto err2; | 2243 | goto err2; |
2244 | ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); | 2244 | ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2245 | if (ret < 0) | 2245 | if (ret < 0) |
2246 | goto err4; | 2246 | goto err4; |
2247 | 2247 | ||
2248 | /* Register setsockopt */ | 2248 | /* Register setsockopt */ |
2249 | ret = nf_register_sockopt(&ipt_sockopts); | 2249 | ret = nf_register_sockopt(&ipt_sockopts); |
2250 | if (ret < 0) | 2250 | if (ret < 0) |
2251 | goto err5; | 2251 | goto err5; |
2252 | 2252 | ||
2253 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); | 2253 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); |
2254 | return 0; | 2254 | return 0; |
2255 | 2255 | ||
2256 | err5: | 2256 | err5: |
2257 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); | 2257 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2258 | err4: | 2258 | err4: |
2259 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); | 2259 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2260 | err2: | 2260 | err2: |
2261 | unregister_pernet_subsys(&ip_tables_net_ops); | 2261 | unregister_pernet_subsys(&ip_tables_net_ops); |
2262 | err1: | 2262 | err1: |
2263 | return ret; | 2263 | return ret; |
2264 | } | 2264 | } |
2265 | 2265 | ||
2266 | static void __exit ip_tables_fini(void) | 2266 | static void __exit ip_tables_fini(void) |
2267 | { | 2267 | { |
2268 | nf_unregister_sockopt(&ipt_sockopts); | 2268 | nf_unregister_sockopt(&ipt_sockopts); |
2269 | 2269 | ||
2270 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); | 2270 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2271 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); | 2271 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2272 | unregister_pernet_subsys(&ip_tables_net_ops); | 2272 | unregister_pernet_subsys(&ip_tables_net_ops); |
2273 | } | 2273 | } |
2274 | 2274 | ||
2275 | EXPORT_SYMBOL(ipt_register_table); | 2275 | EXPORT_SYMBOL(ipt_register_table); |
2276 | EXPORT_SYMBOL(ipt_unregister_table); | 2276 | EXPORT_SYMBOL(ipt_unregister_table); |
2277 | EXPORT_SYMBOL(ipt_do_table); | 2277 | EXPORT_SYMBOL(ipt_do_table); |
2278 | module_init(ip_tables_init); | 2278 | module_init(ip_tables_init); |
2279 | module_exit(ip_tables_fini); | 2279 | module_exit(ip_tables_fini); |
2280 | 2280 |
net/ipv4/netfilter/iptable_filter.c
1 | /* | 1 | /* |
2 | * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. | 2 | * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | 4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
5 | * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> | 5 | * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/netfilter_ipv4/ip_tables.h> | 15 | #include <linux/netfilter_ipv4/ip_tables.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <net/ip.h> | 17 | #include <net/ip.h> |
18 | 18 | ||
19 | MODULE_LICENSE("GPL"); | 19 | MODULE_LICENSE("GPL"); |
20 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 20 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
21 | MODULE_DESCRIPTION("iptables filter table"); | 21 | MODULE_DESCRIPTION("iptables filter table"); |
22 | 22 | ||
23 | #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ | 23 | #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ |
24 | (1 << NF_INET_FORWARD) | \ | 24 | (1 << NF_INET_FORWARD) | \ |
25 | (1 << NF_INET_LOCAL_OUT)) | 25 | (1 << NF_INET_LOCAL_OUT)) |
26 | 26 | ||
27 | static const struct xt_table packet_filter = { | 27 | static const struct xt_table packet_filter = { |
28 | .name = "filter", | 28 | .name = "filter", |
29 | .valid_hooks = FILTER_VALID_HOOKS, | 29 | .valid_hooks = FILTER_VALID_HOOKS, |
30 | .me = THIS_MODULE, | 30 | .me = THIS_MODULE, |
31 | .af = NFPROTO_IPV4, | 31 | .af = NFPROTO_IPV4, |
32 | .priority = NF_IP_PRI_FILTER, | 32 | .priority = NF_IP_PRI_FILTER, |
33 | }; | 33 | }; |
34 | 34 | ||
35 | static unsigned int | 35 | static unsigned int |
36 | iptable_filter_hook(unsigned int hook, struct sk_buff *skb, | 36 | iptable_filter_hook(unsigned int hook, struct sk_buff *skb, |
37 | const struct net_device *in, const struct net_device *out, | 37 | const struct net_device *in, const struct net_device *out, |
38 | int (*okfn)(struct sk_buff *)) | 38 | int (*okfn)(struct sk_buff *)) |
39 | { | 39 | { |
40 | const struct net *net; | 40 | const struct net *net; |
41 | 41 | ||
42 | if (hook == NF_INET_LOCAL_OUT && | 42 | if (hook == NF_INET_LOCAL_OUT && |
43 | (skb->len < sizeof(struct iphdr) || | 43 | (skb->len < sizeof(struct iphdr) || |
44 | ip_hdrlen(skb) < sizeof(struct iphdr))) | 44 | ip_hdrlen(skb) < sizeof(struct iphdr))) |
45 | /* root is playing with raw sockets. */ | 45 | /* root is playing with raw sockets. */ |
46 | return NF_ACCEPT; | 46 | return NF_ACCEPT; |
47 | 47 | ||
48 | net = dev_net((in != NULL) ? in : out); | 48 | net = dev_net((in != NULL) ? in : out); |
49 | return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter); | 49 | return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter); |
50 | } | 50 | } |
51 | 51 | ||
52 | static struct nf_hook_ops *filter_ops __read_mostly; | 52 | static struct nf_hook_ops *filter_ops __read_mostly; |
53 | 53 | ||
54 | /* Default to forward because I got too much mail already. */ | 54 | /* Default to forward because I got too much mail already. */ |
55 | static int forward = NF_ACCEPT; | 55 | static int forward = NF_ACCEPT; |
56 | module_param(forward, bool, 0000); | 56 | module_param(forward, bool, 0000); |
57 | 57 | ||
58 | static int __net_init iptable_filter_net_init(struct net *net) | 58 | static int __net_init iptable_filter_net_init(struct net *net) |
59 | { | 59 | { |
60 | struct ipt_replace *repl; | 60 | struct ipt_replace *repl; |
61 | 61 | ||
62 | repl = ipt_alloc_initial_table(&packet_filter); | 62 | repl = ipt_alloc_initial_table(&packet_filter); |
63 | if (repl == NULL) | 63 | if (repl == NULL) |
64 | return -ENOMEM; | 64 | return -ENOMEM; |
65 | /* Entry 1 is the FORWARD hook */ | 65 | /* Entry 1 is the FORWARD hook */ |
66 | ((struct ipt_standard *)repl->entries)[1].target.verdict = | 66 | ((struct ipt_standard *)repl->entries)[1].target.verdict = |
67 | -forward - 1; | 67 | -forward - 1; |
68 | 68 | ||
69 | net->ipv4.iptable_filter = | 69 | net->ipv4.iptable_filter = |
70 | ipt_register_table(net, &packet_filter, repl); | 70 | ipt_register_table(net, &packet_filter, repl); |
71 | kfree(repl); | 71 | kfree(repl); |
72 | if (IS_ERR(net->ipv4.iptable_filter)) | 72 | if (IS_ERR(net->ipv4.iptable_filter)) |
73 | return PTR_ERR(net->ipv4.iptable_filter); | 73 | return PTR_ERR(net->ipv4.iptable_filter); |
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | static void __net_exit iptable_filter_net_exit(struct net *net) | 77 | static void __net_exit iptable_filter_net_exit(struct net *net) |
78 | { | 78 | { |
79 | ipt_unregister_table(net, net->ipv4.iptable_filter); | 79 | ipt_unregister_table(net, net->ipv4.iptable_filter); |
80 | } | 80 | } |
81 | 81 | ||
82 | static struct pernet_operations iptable_filter_net_ops = { | 82 | static struct pernet_operations iptable_filter_net_ops = { |
83 | .init = iptable_filter_net_init, | 83 | .init = iptable_filter_net_init, |
84 | .exit = iptable_filter_net_exit, | 84 | .exit = iptable_filter_net_exit, |
85 | }; | 85 | }; |
86 | 86 | ||
87 | static int __init iptable_filter_init(void) | 87 | static int __init iptable_filter_init(void) |
88 | { | 88 | { |
89 | int ret; | 89 | int ret; |
90 | 90 | ||
91 | if (forward < 0 || forward > NF_MAX_VERDICT) { | 91 | if (forward < 0 || forward > NF_MAX_VERDICT) { |
92 | printk("iptables forward must be 0 or 1\n"); | 92 | pr_err("iptables forward must be 0 or 1\n"); |
93 | return -EINVAL; | 93 | return -EINVAL; |
94 | } | 94 | } |
95 | 95 | ||
96 | ret = register_pernet_subsys(&iptable_filter_net_ops); | 96 | ret = register_pernet_subsys(&iptable_filter_net_ops); |
97 | if (ret < 0) | 97 | if (ret < 0) |
98 | return ret; | 98 | return ret; |
99 | 99 | ||
100 | /* Register hooks */ | 100 | /* Register hooks */ |
101 | filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook); | 101 | filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook); |
102 | if (IS_ERR(filter_ops)) { | 102 | if (IS_ERR(filter_ops)) { |
103 | ret = PTR_ERR(filter_ops); | 103 | ret = PTR_ERR(filter_ops); |
104 | goto cleanup_table; | 104 | goto cleanup_table; |
105 | } | 105 | } |
106 | 106 | ||
107 | return ret; | 107 | return ret; |
108 | 108 | ||
109 | cleanup_table: | 109 | cleanup_table: |
110 | unregister_pernet_subsys(&iptable_filter_net_ops); | 110 | unregister_pernet_subsys(&iptable_filter_net_ops); |
111 | return ret; | 111 | return ret; |
112 | } | 112 | } |
113 | 113 | ||
114 | static void __exit iptable_filter_fini(void) | 114 | static void __exit iptable_filter_fini(void) |
115 | { | 115 | { |
116 | xt_hook_unlink(&packet_filter, filter_ops); | 116 | xt_hook_unlink(&packet_filter, filter_ops); |
117 | unregister_pernet_subsys(&iptable_filter_net_ops); | 117 | unregister_pernet_subsys(&iptable_filter_net_ops); |
118 | } | 118 | } |
119 | 119 | ||
120 | module_init(iptable_filter_init); | 120 | module_init(iptable_filter_init); |
121 | module_exit(iptable_filter_fini); | 121 | module_exit(iptable_filter_fini); |
122 | 122 |
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
1 | 1 | ||
2 | /* (C) 1999-2001 Paul `Rusty' Russell | 2 | /* (C) 1999-2001 Paul `Rusty' Russell |
3 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> | 3 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/ip.h> | 11 | #include <linux/ip.h> |
12 | #include <linux/netfilter.h> | 12 | #include <linux/netfilter.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/icmp.h> | 15 | #include <linux/icmp.h> |
16 | #include <linux/sysctl.h> | 16 | #include <linux/sysctl.h> |
17 | #include <net/route.h> | 17 | #include <net/route.h> |
18 | #include <net/ip.h> | 18 | #include <net/ip.h> |
19 | 19 | ||
20 | #include <linux/netfilter_ipv4.h> | 20 | #include <linux/netfilter_ipv4.h> |
21 | #include <net/netfilter/nf_conntrack.h> | 21 | #include <net/netfilter/nf_conntrack.h> |
22 | #include <net/netfilter/nf_conntrack_helper.h> | 22 | #include <net/netfilter/nf_conntrack_helper.h> |
23 | #include <net/netfilter/nf_conntrack_l4proto.h> | 23 | #include <net/netfilter/nf_conntrack_l4proto.h> |
24 | #include <net/netfilter/nf_conntrack_l3proto.h> | 24 | #include <net/netfilter/nf_conntrack_l3proto.h> |
25 | #include <net/netfilter/nf_conntrack_zones.h> | 25 | #include <net/netfilter/nf_conntrack_zones.h> |
26 | #include <net/netfilter/nf_conntrack_core.h> | 26 | #include <net/netfilter/nf_conntrack_core.h> |
27 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> | 27 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> |
28 | #include <net/netfilter/nf_nat_helper.h> | 28 | #include <net/netfilter/nf_nat_helper.h> |
29 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> | 29 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> |
30 | #include <net/netfilter/nf_log.h> | 30 | #include <net/netfilter/nf_log.h> |
31 | 31 | ||
32 | int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, | 32 | int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, |
33 | struct nf_conn *ct, | 33 | struct nf_conn *ct, |
34 | enum ip_conntrack_info ctinfo); | 34 | enum ip_conntrack_info ctinfo); |
35 | EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook); | 35 | EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook); |
36 | 36 | ||
37 | static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, | 37 | static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, |
38 | struct nf_conntrack_tuple *tuple) | 38 | struct nf_conntrack_tuple *tuple) |
39 | { | 39 | { |
40 | const __be32 *ap; | 40 | const __be32 *ap; |
41 | __be32 _addrs[2]; | 41 | __be32 _addrs[2]; |
42 | ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr), | 42 | ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr), |
43 | sizeof(u_int32_t) * 2, _addrs); | 43 | sizeof(u_int32_t) * 2, _addrs); |
44 | if (ap == NULL) | 44 | if (ap == NULL) |
45 | return false; | 45 | return false; |
46 | 46 | ||
47 | tuple->src.u3.ip = ap[0]; | 47 | tuple->src.u3.ip = ap[0]; |
48 | tuple->dst.u3.ip = ap[1]; | 48 | tuple->dst.u3.ip = ap[1]; |
49 | 49 | ||
50 | return true; | 50 | return true; |
51 | } | 51 | } |
52 | 52 | ||
53 | static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple, | 53 | static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple, |
54 | const struct nf_conntrack_tuple *orig) | 54 | const struct nf_conntrack_tuple *orig) |
55 | { | 55 | { |
56 | tuple->src.u3.ip = orig->dst.u3.ip; | 56 | tuple->src.u3.ip = orig->dst.u3.ip; |
57 | tuple->dst.u3.ip = orig->src.u3.ip; | 57 | tuple->dst.u3.ip = orig->src.u3.ip; |
58 | 58 | ||
59 | return true; | 59 | return true; |
60 | } | 60 | } |
61 | 61 | ||
62 | static int ipv4_print_tuple(struct seq_file *s, | 62 | static int ipv4_print_tuple(struct seq_file *s, |
63 | const struct nf_conntrack_tuple *tuple) | 63 | const struct nf_conntrack_tuple *tuple) |
64 | { | 64 | { |
65 | return seq_printf(s, "src=%pI4 dst=%pI4 ", | 65 | return seq_printf(s, "src=%pI4 dst=%pI4 ", |
66 | &tuple->src.u3.ip, &tuple->dst.u3.ip); | 66 | &tuple->src.u3.ip, &tuple->dst.u3.ip); |
67 | } | 67 | } |
68 | 68 | ||
69 | static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | 69 | static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, |
70 | unsigned int *dataoff, u_int8_t *protonum) | 70 | unsigned int *dataoff, u_int8_t *protonum) |
71 | { | 71 | { |
72 | const struct iphdr *iph; | 72 | const struct iphdr *iph; |
73 | struct iphdr _iph; | 73 | struct iphdr _iph; |
74 | 74 | ||
75 | iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); | 75 | iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); |
76 | if (iph == NULL) | 76 | if (iph == NULL) |
77 | return -NF_DROP; | 77 | return -NF_DROP; |
78 | 78 | ||
79 | /* Conntrack defragments packets, we might still see fragments | 79 | /* Conntrack defragments packets, we might still see fragments |
80 | * inside ICMP packets though. */ | 80 | * inside ICMP packets though. */ |
81 | if (iph->frag_off & htons(IP_OFFSET)) | 81 | if (iph->frag_off & htons(IP_OFFSET)) |
82 | return -NF_DROP; | 82 | return -NF_DROP; |
83 | 83 | ||
84 | *dataoff = nhoff + (iph->ihl << 2); | 84 | *dataoff = nhoff + (iph->ihl << 2); |
85 | *protonum = iph->protocol; | 85 | *protonum = iph->protocol; |
86 | 86 | ||
87 | return NF_ACCEPT; | 87 | return NF_ACCEPT; |
88 | } | 88 | } |
89 | 89 | ||
90 | static unsigned int ipv4_confirm(unsigned int hooknum, | 90 | static unsigned int ipv4_confirm(unsigned int hooknum, |
91 | struct sk_buff *skb, | 91 | struct sk_buff *skb, |
92 | const struct net_device *in, | 92 | const struct net_device *in, |
93 | const struct net_device *out, | 93 | const struct net_device *out, |
94 | int (*okfn)(struct sk_buff *)) | 94 | int (*okfn)(struct sk_buff *)) |
95 | { | 95 | { |
96 | struct nf_conn *ct; | 96 | struct nf_conn *ct; |
97 | enum ip_conntrack_info ctinfo; | 97 | enum ip_conntrack_info ctinfo; |
98 | const struct nf_conn_help *help; | 98 | const struct nf_conn_help *help; |
99 | const struct nf_conntrack_helper *helper; | 99 | const struct nf_conntrack_helper *helper; |
100 | unsigned int ret; | 100 | unsigned int ret; |
101 | 101 | ||
102 | /* This is where we call the helper: as the packet goes out. */ | 102 | /* This is where we call the helper: as the packet goes out. */ |
103 | ct = nf_ct_get(skb, &ctinfo); | 103 | ct = nf_ct_get(skb, &ctinfo); |
104 | if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) | 104 | if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) |
105 | goto out; | 105 | goto out; |
106 | 106 | ||
107 | help = nfct_help(ct); | 107 | help = nfct_help(ct); |
108 | if (!help) | 108 | if (!help) |
109 | goto out; | 109 | goto out; |
110 | 110 | ||
111 | /* rcu_read_lock()ed by nf_hook_slow */ | 111 | /* rcu_read_lock()ed by nf_hook_slow */ |
112 | helper = rcu_dereference(help->helper); | 112 | helper = rcu_dereference(help->helper); |
113 | if (!helper) | 113 | if (!helper) |
114 | goto out; | 114 | goto out; |
115 | 115 | ||
116 | ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), | 116 | ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), |
117 | ct, ctinfo); | 117 | ct, ctinfo); |
118 | if (ret != NF_ACCEPT) { | 118 | if (ret != NF_ACCEPT) { |
119 | nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL, | 119 | nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL, |
120 | "nf_ct_%s: dropping packet", helper->name); | 120 | "nf_ct_%s: dropping packet", helper->name); |
121 | return ret; | 121 | return ret; |
122 | } | 122 | } |
123 | 123 | ||
124 | if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { | 124 | if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { |
125 | typeof(nf_nat_seq_adjust_hook) seq_adjust; | 125 | typeof(nf_nat_seq_adjust_hook) seq_adjust; |
126 | 126 | ||
127 | seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); | 127 | seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); |
128 | if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) { | 128 | if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) { |
129 | NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); | 129 | NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); |
130 | return NF_DROP; | 130 | return NF_DROP; |
131 | } | 131 | } |
132 | } | 132 | } |
133 | out: | 133 | out: |
134 | /* We've seen it coming out the other side: confirm it */ | 134 | /* We've seen it coming out the other side: confirm it */ |
135 | return nf_conntrack_confirm(skb); | 135 | return nf_conntrack_confirm(skb); |
136 | } | 136 | } |
137 | 137 | ||
138 | static unsigned int ipv4_conntrack_in(unsigned int hooknum, | 138 | static unsigned int ipv4_conntrack_in(unsigned int hooknum, |
139 | struct sk_buff *skb, | 139 | struct sk_buff *skb, |
140 | const struct net_device *in, | 140 | const struct net_device *in, |
141 | const struct net_device *out, | 141 | const struct net_device *out, |
142 | int (*okfn)(struct sk_buff *)) | 142 | int (*okfn)(struct sk_buff *)) |
143 | { | 143 | { |
144 | return nf_conntrack_in(dev_net(in), PF_INET, hooknum, skb); | 144 | return nf_conntrack_in(dev_net(in), PF_INET, hooknum, skb); |
145 | } | 145 | } |
146 | 146 | ||
147 | static unsigned int ipv4_conntrack_local(unsigned int hooknum, | 147 | static unsigned int ipv4_conntrack_local(unsigned int hooknum, |
148 | struct sk_buff *skb, | 148 | struct sk_buff *skb, |
149 | const struct net_device *in, | 149 | const struct net_device *in, |
150 | const struct net_device *out, | 150 | const struct net_device *out, |
151 | int (*okfn)(struct sk_buff *)) | 151 | int (*okfn)(struct sk_buff *)) |
152 | { | 152 | { |
153 | /* root is playing with raw sockets. */ | 153 | /* root is playing with raw sockets. */ |
154 | if (skb->len < sizeof(struct iphdr) || | 154 | if (skb->len < sizeof(struct iphdr) || |
155 | ip_hdrlen(skb) < sizeof(struct iphdr)) | 155 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
156 | return NF_ACCEPT; | 156 | return NF_ACCEPT; |
157 | return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb); | 157 | return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb); |
158 | } | 158 | } |
159 | 159 | ||
160 | /* Connection tracking may drop packets, but never alters them, so | 160 | /* Connection tracking may drop packets, but never alters them, so |
161 | make it the first hook. */ | 161 | make it the first hook. */ |
162 | static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = { | 162 | static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = { |
163 | { | 163 | { |
164 | .hook = ipv4_conntrack_in, | 164 | .hook = ipv4_conntrack_in, |
165 | .owner = THIS_MODULE, | 165 | .owner = THIS_MODULE, |
166 | .pf = NFPROTO_IPV4, | 166 | .pf = NFPROTO_IPV4, |
167 | .hooknum = NF_INET_PRE_ROUTING, | 167 | .hooknum = NF_INET_PRE_ROUTING, |
168 | .priority = NF_IP_PRI_CONNTRACK, | 168 | .priority = NF_IP_PRI_CONNTRACK, |
169 | }, | 169 | }, |
170 | { | 170 | { |
171 | .hook = ipv4_conntrack_local, | 171 | .hook = ipv4_conntrack_local, |
172 | .owner = THIS_MODULE, | 172 | .owner = THIS_MODULE, |
173 | .pf = NFPROTO_IPV4, | 173 | .pf = NFPROTO_IPV4, |
174 | .hooknum = NF_INET_LOCAL_OUT, | 174 | .hooknum = NF_INET_LOCAL_OUT, |
175 | .priority = NF_IP_PRI_CONNTRACK, | 175 | .priority = NF_IP_PRI_CONNTRACK, |
176 | }, | 176 | }, |
177 | { | 177 | { |
178 | .hook = ipv4_confirm, | 178 | .hook = ipv4_confirm, |
179 | .owner = THIS_MODULE, | 179 | .owner = THIS_MODULE, |
180 | .pf = NFPROTO_IPV4, | 180 | .pf = NFPROTO_IPV4, |
181 | .hooknum = NF_INET_POST_ROUTING, | 181 | .hooknum = NF_INET_POST_ROUTING, |
182 | .priority = NF_IP_PRI_CONNTRACK_CONFIRM, | 182 | .priority = NF_IP_PRI_CONNTRACK_CONFIRM, |
183 | }, | 183 | }, |
184 | { | 184 | { |
185 | .hook = ipv4_confirm, | 185 | .hook = ipv4_confirm, |
186 | .owner = THIS_MODULE, | 186 | .owner = THIS_MODULE, |
187 | .pf = NFPROTO_IPV4, | 187 | .pf = NFPROTO_IPV4, |
188 | .hooknum = NF_INET_LOCAL_IN, | 188 | .hooknum = NF_INET_LOCAL_IN, |
189 | .priority = NF_IP_PRI_CONNTRACK_CONFIRM, | 189 | .priority = NF_IP_PRI_CONNTRACK_CONFIRM, |
190 | }, | 190 | }, |
191 | }; | 191 | }; |
192 | 192 | ||
193 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | 193 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
194 | static int log_invalid_proto_min = 0; | 194 | static int log_invalid_proto_min = 0; |
195 | static int log_invalid_proto_max = 255; | 195 | static int log_invalid_proto_max = 255; |
196 | 196 | ||
197 | static ctl_table ip_ct_sysctl_table[] = { | 197 | static ctl_table ip_ct_sysctl_table[] = { |
198 | { | 198 | { |
199 | .procname = "ip_conntrack_max", | 199 | .procname = "ip_conntrack_max", |
200 | .data = &nf_conntrack_max, | 200 | .data = &nf_conntrack_max, |
201 | .maxlen = sizeof(int), | 201 | .maxlen = sizeof(int), |
202 | .mode = 0644, | 202 | .mode = 0644, |
203 | .proc_handler = proc_dointvec, | 203 | .proc_handler = proc_dointvec, |
204 | }, | 204 | }, |
205 | { | 205 | { |
206 | .procname = "ip_conntrack_count", | 206 | .procname = "ip_conntrack_count", |
207 | .data = &init_net.ct.count, | 207 | .data = &init_net.ct.count, |
208 | .maxlen = sizeof(int), | 208 | .maxlen = sizeof(int), |
209 | .mode = 0444, | 209 | .mode = 0444, |
210 | .proc_handler = proc_dointvec, | 210 | .proc_handler = proc_dointvec, |
211 | }, | 211 | }, |
212 | { | 212 | { |
213 | .procname = "ip_conntrack_buckets", | 213 | .procname = "ip_conntrack_buckets", |
214 | .data = &init_net.ct.htable_size, | 214 | .data = &init_net.ct.htable_size, |
215 | .maxlen = sizeof(unsigned int), | 215 | .maxlen = sizeof(unsigned int), |
216 | .mode = 0444, | 216 | .mode = 0444, |
217 | .proc_handler = proc_dointvec, | 217 | .proc_handler = proc_dointvec, |
218 | }, | 218 | }, |
219 | { | 219 | { |
220 | .procname = "ip_conntrack_checksum", | 220 | .procname = "ip_conntrack_checksum", |
221 | .data = &init_net.ct.sysctl_checksum, | 221 | .data = &init_net.ct.sysctl_checksum, |
222 | .maxlen = sizeof(int), | 222 | .maxlen = sizeof(int), |
223 | .mode = 0644, | 223 | .mode = 0644, |
224 | .proc_handler = proc_dointvec, | 224 | .proc_handler = proc_dointvec, |
225 | }, | 225 | }, |
226 | { | 226 | { |
227 | .procname = "ip_conntrack_log_invalid", | 227 | .procname = "ip_conntrack_log_invalid", |
228 | .data = &init_net.ct.sysctl_log_invalid, | 228 | .data = &init_net.ct.sysctl_log_invalid, |
229 | .maxlen = sizeof(unsigned int), | 229 | .maxlen = sizeof(unsigned int), |
230 | .mode = 0644, | 230 | .mode = 0644, |
231 | .proc_handler = proc_dointvec_minmax, | 231 | .proc_handler = proc_dointvec_minmax, |
232 | .extra1 = &log_invalid_proto_min, | 232 | .extra1 = &log_invalid_proto_min, |
233 | .extra2 = &log_invalid_proto_max, | 233 | .extra2 = &log_invalid_proto_max, |
234 | }, | 234 | }, |
235 | { } | 235 | { } |
236 | }; | 236 | }; |
237 | #endif /* CONFIG_SYSCTL && CONFIG_NF_CONNTRACK_PROC_COMPAT */ | 237 | #endif /* CONFIG_SYSCTL && CONFIG_NF_CONNTRACK_PROC_COMPAT */ |
238 | 238 | ||
239 | /* Fast function for those who don't want to parse /proc (and I don't | 239 | /* Fast function for those who don't want to parse /proc (and I don't |
240 | blame them). */ | 240 | blame them). */ |
241 | /* Reversing the socket's dst/src point of view gives us the reply | 241 | /* Reversing the socket's dst/src point of view gives us the reply |
242 | mapping. */ | 242 | mapping. */ |
243 | static int | 243 | static int |
244 | getorigdst(struct sock *sk, int optval, void __user *user, int *len) | 244 | getorigdst(struct sock *sk, int optval, void __user *user, int *len) |
245 | { | 245 | { |
246 | const struct inet_sock *inet = inet_sk(sk); | 246 | const struct inet_sock *inet = inet_sk(sk); |
247 | const struct nf_conntrack_tuple_hash *h; | 247 | const struct nf_conntrack_tuple_hash *h; |
248 | struct nf_conntrack_tuple tuple; | 248 | struct nf_conntrack_tuple tuple; |
249 | 249 | ||
250 | memset(&tuple, 0, sizeof(tuple)); | 250 | memset(&tuple, 0, sizeof(tuple)); |
251 | tuple.src.u3.ip = inet->inet_rcv_saddr; | 251 | tuple.src.u3.ip = inet->inet_rcv_saddr; |
252 | tuple.src.u.tcp.port = inet->inet_sport; | 252 | tuple.src.u.tcp.port = inet->inet_sport; |
253 | tuple.dst.u3.ip = inet->inet_daddr; | 253 | tuple.dst.u3.ip = inet->inet_daddr; |
254 | tuple.dst.u.tcp.port = inet->inet_dport; | 254 | tuple.dst.u.tcp.port = inet->inet_dport; |
255 | tuple.src.l3num = PF_INET; | 255 | tuple.src.l3num = PF_INET; |
256 | tuple.dst.protonum = sk->sk_protocol; | 256 | tuple.dst.protonum = sk->sk_protocol; |
257 | 257 | ||
258 | /* We only do TCP and SCTP at the moment: is there a better way? */ | 258 | /* We only do TCP and SCTP at the moment: is there a better way? */ |
259 | if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) { | 259 | if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) { |
260 | pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n"); | 260 | pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n"); |
261 | return -ENOPROTOOPT; | 261 | return -ENOPROTOOPT; |
262 | } | 262 | } |
263 | 263 | ||
264 | if ((unsigned int) *len < sizeof(struct sockaddr_in)) { | 264 | if ((unsigned int) *len < sizeof(struct sockaddr_in)) { |
265 | pr_debug("SO_ORIGINAL_DST: len %d not %Zu\n", | 265 | pr_debug("SO_ORIGINAL_DST: len %d not %Zu\n", |
266 | *len, sizeof(struct sockaddr_in)); | 266 | *len, sizeof(struct sockaddr_in)); |
267 | return -EINVAL; | 267 | return -EINVAL; |
268 | } | 268 | } |
269 | 269 | ||
270 | h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple); | 270 | h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple); |
271 | if (h) { | 271 | if (h) { |
272 | struct sockaddr_in sin; | 272 | struct sockaddr_in sin; |
273 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); | 273 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); |
274 | 274 | ||
275 | sin.sin_family = AF_INET; | 275 | sin.sin_family = AF_INET; |
276 | sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL] | 276 | sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL] |
277 | .tuple.dst.u.tcp.port; | 277 | .tuple.dst.u.tcp.port; |
278 | sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] | 278 | sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] |
279 | .tuple.dst.u3.ip; | 279 | .tuple.dst.u3.ip; |
280 | memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); | 280 | memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); |
281 | 281 | ||
282 | pr_debug("SO_ORIGINAL_DST: %pI4 %u\n", | 282 | pr_debug("SO_ORIGINAL_DST: %pI4 %u\n", |
283 | &sin.sin_addr.s_addr, ntohs(sin.sin_port)); | 283 | &sin.sin_addr.s_addr, ntohs(sin.sin_port)); |
284 | nf_ct_put(ct); | 284 | nf_ct_put(ct); |
285 | if (copy_to_user(user, &sin, sizeof(sin)) != 0) | 285 | if (copy_to_user(user, &sin, sizeof(sin)) != 0) |
286 | return -EFAULT; | 286 | return -EFAULT; |
287 | else | 287 | else |
288 | return 0; | 288 | return 0; |
289 | } | 289 | } |
290 | pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n", | 290 | pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n", |
291 | &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port), | 291 | &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port), |
292 | &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port)); | 292 | &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port)); |
293 | return -ENOENT; | 293 | return -ENOENT; |
294 | } | 294 | } |
295 | 295 | ||
296 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 296 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
297 | 297 | ||
298 | #include <linux/netfilter/nfnetlink.h> | 298 | #include <linux/netfilter/nfnetlink.h> |
299 | #include <linux/netfilter/nfnetlink_conntrack.h> | 299 | #include <linux/netfilter/nfnetlink_conntrack.h> |
300 | 300 | ||
301 | static int ipv4_tuple_to_nlattr(struct sk_buff *skb, | 301 | static int ipv4_tuple_to_nlattr(struct sk_buff *skb, |
302 | const struct nf_conntrack_tuple *tuple) | 302 | const struct nf_conntrack_tuple *tuple) |
303 | { | 303 | { |
304 | NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip); | 304 | NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip); |
305 | NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip); | 305 | NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip); |
306 | return 0; | 306 | return 0; |
307 | 307 | ||
308 | nla_put_failure: | 308 | nla_put_failure: |
309 | return -1; | 309 | return -1; |
310 | } | 310 | } |
311 | 311 | ||
312 | static const struct nla_policy ipv4_nla_policy[CTA_IP_MAX+1] = { | 312 | static const struct nla_policy ipv4_nla_policy[CTA_IP_MAX+1] = { |
313 | [CTA_IP_V4_SRC] = { .type = NLA_U32 }, | 313 | [CTA_IP_V4_SRC] = { .type = NLA_U32 }, |
314 | [CTA_IP_V4_DST] = { .type = NLA_U32 }, | 314 | [CTA_IP_V4_DST] = { .type = NLA_U32 }, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static int ipv4_nlattr_to_tuple(struct nlattr *tb[], | 317 | static int ipv4_nlattr_to_tuple(struct nlattr *tb[], |
318 | struct nf_conntrack_tuple *t) | 318 | struct nf_conntrack_tuple *t) |
319 | { | 319 | { |
320 | if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST]) | 320 | if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST]) |
321 | return -EINVAL; | 321 | return -EINVAL; |
322 | 322 | ||
323 | t->src.u3.ip = nla_get_be32(tb[CTA_IP_V4_SRC]); | 323 | t->src.u3.ip = nla_get_be32(tb[CTA_IP_V4_SRC]); |
324 | t->dst.u3.ip = nla_get_be32(tb[CTA_IP_V4_DST]); | 324 | t->dst.u3.ip = nla_get_be32(tb[CTA_IP_V4_DST]); |
325 | 325 | ||
326 | return 0; | 326 | return 0; |
327 | } | 327 | } |
328 | 328 | ||
329 | static int ipv4_nlattr_tuple_size(void) | 329 | static int ipv4_nlattr_tuple_size(void) |
330 | { | 330 | { |
331 | return nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1); | 331 | return nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1); |
332 | } | 332 | } |
333 | #endif | 333 | #endif |
334 | 334 | ||
335 | static struct nf_sockopt_ops so_getorigdst = { | 335 | static struct nf_sockopt_ops so_getorigdst = { |
336 | .pf = PF_INET, | 336 | .pf = PF_INET, |
337 | .get_optmin = SO_ORIGINAL_DST, | 337 | .get_optmin = SO_ORIGINAL_DST, |
338 | .get_optmax = SO_ORIGINAL_DST+1, | 338 | .get_optmax = SO_ORIGINAL_DST+1, |
339 | .get = &getorigdst, | 339 | .get = &getorigdst, |
340 | .owner = THIS_MODULE, | 340 | .owner = THIS_MODULE, |
341 | }; | 341 | }; |
342 | 342 | ||
343 | struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { | 343 | struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { |
344 | .l3proto = PF_INET, | 344 | .l3proto = PF_INET, |
345 | .name = "ipv4", | 345 | .name = "ipv4", |
346 | .pkt_to_tuple = ipv4_pkt_to_tuple, | 346 | .pkt_to_tuple = ipv4_pkt_to_tuple, |
347 | .invert_tuple = ipv4_invert_tuple, | 347 | .invert_tuple = ipv4_invert_tuple, |
348 | .print_tuple = ipv4_print_tuple, | 348 | .print_tuple = ipv4_print_tuple, |
349 | .get_l4proto = ipv4_get_l4proto, | 349 | .get_l4proto = ipv4_get_l4proto, |
350 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 350 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
351 | .tuple_to_nlattr = ipv4_tuple_to_nlattr, | 351 | .tuple_to_nlattr = ipv4_tuple_to_nlattr, |
352 | .nlattr_tuple_size = ipv4_nlattr_tuple_size, | 352 | .nlattr_tuple_size = ipv4_nlattr_tuple_size, |
353 | .nlattr_to_tuple = ipv4_nlattr_to_tuple, | 353 | .nlattr_to_tuple = ipv4_nlattr_to_tuple, |
354 | .nla_policy = ipv4_nla_policy, | 354 | .nla_policy = ipv4_nla_policy, |
355 | #endif | 355 | #endif |
356 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | 356 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
357 | .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path, | 357 | .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path, |
358 | .ctl_table = ip_ct_sysctl_table, | 358 | .ctl_table = ip_ct_sysctl_table, |
359 | #endif | 359 | #endif |
360 | .me = THIS_MODULE, | 360 | .me = THIS_MODULE, |
361 | }; | 361 | }; |
362 | 362 | ||
363 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, | 363 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, |
364 | &nf_conntrack_htable_size, 0600); | 364 | &nf_conntrack_htable_size, 0600); |
365 | 365 | ||
366 | MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET)); | 366 | MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET)); |
367 | MODULE_ALIAS("ip_conntrack"); | 367 | MODULE_ALIAS("ip_conntrack"); |
368 | MODULE_LICENSE("GPL"); | 368 | MODULE_LICENSE("GPL"); |
369 | 369 | ||
370 | static int __init nf_conntrack_l3proto_ipv4_init(void) | 370 | static int __init nf_conntrack_l3proto_ipv4_init(void) |
371 | { | 371 | { |
372 | int ret = 0; | 372 | int ret = 0; |
373 | 373 | ||
374 | need_conntrack(); | 374 | need_conntrack(); |
375 | nf_defrag_ipv4_enable(); | 375 | nf_defrag_ipv4_enable(); |
376 | 376 | ||
377 | ret = nf_register_sockopt(&so_getorigdst); | 377 | ret = nf_register_sockopt(&so_getorigdst); |
378 | if (ret < 0) { | 378 | if (ret < 0) { |
379 | printk(KERN_ERR "Unable to register netfilter socket option\n"); | 379 | printk(KERN_ERR "Unable to register netfilter socket option\n"); |
380 | return ret; | 380 | return ret; |
381 | } | 381 | } |
382 | 382 | ||
383 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4); | 383 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4); |
384 | if (ret < 0) { | 384 | if (ret < 0) { |
385 | printk("nf_conntrack_ipv4: can't register tcp.\n"); | 385 | pr_err("nf_conntrack_ipv4: can't register tcp.\n"); |
386 | goto cleanup_sockopt; | 386 | goto cleanup_sockopt; |
387 | } | 387 | } |
388 | 388 | ||
389 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4); | 389 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4); |
390 | if (ret < 0) { | 390 | if (ret < 0) { |
391 | printk("nf_conntrack_ipv4: can't register udp.\n"); | 391 | pr_err("nf_conntrack_ipv4: can't register udp.\n"); |
392 | goto cleanup_tcp; | 392 | goto cleanup_tcp; |
393 | } | 393 | } |
394 | 394 | ||
395 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp); | 395 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp); |
396 | if (ret < 0) { | 396 | if (ret < 0) { |
397 | printk("nf_conntrack_ipv4: can't register icmp.\n"); | 397 | pr_err("nf_conntrack_ipv4: can't register icmp.\n"); |
398 | goto cleanup_udp; | 398 | goto cleanup_udp; |
399 | } | 399 | } |
400 | 400 | ||
401 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4); | 401 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4); |
402 | if (ret < 0) { | 402 | if (ret < 0) { |
403 | printk("nf_conntrack_ipv4: can't register ipv4\n"); | 403 | pr_err("nf_conntrack_ipv4: can't register ipv4\n"); |
404 | goto cleanup_icmp; | 404 | goto cleanup_icmp; |
405 | } | 405 | } |
406 | 406 | ||
407 | ret = nf_register_hooks(ipv4_conntrack_ops, | 407 | ret = nf_register_hooks(ipv4_conntrack_ops, |
408 | ARRAY_SIZE(ipv4_conntrack_ops)); | 408 | ARRAY_SIZE(ipv4_conntrack_ops)); |
409 | if (ret < 0) { | 409 | if (ret < 0) { |
410 | printk("nf_conntrack_ipv4: can't register hooks.\n"); | 410 | pr_err("nf_conntrack_ipv4: can't register hooks.\n"); |
411 | goto cleanup_ipv4; | 411 | goto cleanup_ipv4; |
412 | } | 412 | } |
413 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | 413 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
414 | ret = nf_conntrack_ipv4_compat_init(); | 414 | ret = nf_conntrack_ipv4_compat_init(); |
415 | if (ret < 0) | 415 | if (ret < 0) |
416 | goto cleanup_hooks; | 416 | goto cleanup_hooks; |
417 | #endif | 417 | #endif |
418 | return ret; | 418 | return ret; |
419 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | 419 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
420 | cleanup_hooks: | 420 | cleanup_hooks: |
421 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); | 421 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); |
422 | #endif | 422 | #endif |
423 | cleanup_ipv4: | 423 | cleanup_ipv4: |
424 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); | 424 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); |
425 | cleanup_icmp: | 425 | cleanup_icmp: |
426 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp); | 426 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp); |
427 | cleanup_udp: | 427 | cleanup_udp: |
428 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4); | 428 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4); |
429 | cleanup_tcp: | 429 | cleanup_tcp: |
430 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4); | 430 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4); |
431 | cleanup_sockopt: | 431 | cleanup_sockopt: |
432 | nf_unregister_sockopt(&so_getorigdst); | 432 | nf_unregister_sockopt(&so_getorigdst); |
433 | return ret; | 433 | return ret; |
434 | } | 434 | } |
435 | 435 | ||
436 | static void __exit nf_conntrack_l3proto_ipv4_fini(void) | 436 | static void __exit nf_conntrack_l3proto_ipv4_fini(void) |
437 | { | 437 | { |
438 | synchronize_net(); | 438 | synchronize_net(); |
439 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | 439 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
440 | nf_conntrack_ipv4_compat_fini(); | 440 | nf_conntrack_ipv4_compat_fini(); |
441 | #endif | 441 | #endif |
442 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); | 442 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); |
443 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); | 443 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); |
444 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp); | 444 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp); |
445 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4); | 445 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4); |
446 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4); | 446 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4); |
447 | nf_unregister_sockopt(&so_getorigdst); | 447 | nf_unregister_sockopt(&so_getorigdst); |
448 | } | 448 | } |
449 | 449 | ||
450 | module_init(nf_conntrack_l3proto_ipv4_init); | 450 | module_init(nf_conntrack_l3proto_ipv4_init); |
451 | module_exit(nf_conntrack_l3proto_ipv4_fini); | 451 | module_exit(nf_conntrack_l3proto_ipv4_fini); |
452 | 452 | ||
453 | void need_ipv4_conntrack(void) | 453 | void need_ipv4_conntrack(void) |
454 | { | 454 | { |
455 | return; | 455 | return; |
456 | } | 456 | } |
457 | EXPORT_SYMBOL_GPL(need_ipv4_conntrack); | 457 | EXPORT_SYMBOL_GPL(need_ipv4_conntrack); |
458 | 458 |
net/ipv4/netfilter/nf_nat_h323.c
1 | /* | 1 | /* |
2 | * H.323 extension for NAT alteration. | 2 | * H.323 extension for NAT alteration. |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> | 4 | * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> |
5 | * | 5 | * |
6 | * This source code is licensed under General Public License version 2. | 6 | * This source code is licensed under General Public License version 2. |
7 | * | 7 | * |
8 | * Based on the 'brute force' H.323 NAT module by | 8 | * Based on the 'brute force' H.323 NAT module by |
9 | * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 9 | * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/tcp.h> | 13 | #include <linux/tcp.h> |
14 | #include <net/tcp.h> | 14 | #include <net/tcp.h> |
15 | 15 | ||
16 | #include <net/netfilter/nf_nat.h> | 16 | #include <net/netfilter/nf_nat.h> |
17 | #include <net/netfilter/nf_nat_helper.h> | 17 | #include <net/netfilter/nf_nat_helper.h> |
18 | #include <net/netfilter/nf_nat_rule.h> | 18 | #include <net/netfilter/nf_nat_rule.h> |
19 | #include <net/netfilter/nf_conntrack_helper.h> | 19 | #include <net/netfilter/nf_conntrack_helper.h> |
20 | #include <net/netfilter/nf_conntrack_expect.h> | 20 | #include <net/netfilter/nf_conntrack_expect.h> |
21 | #include <linux/netfilter/nf_conntrack_h323.h> | 21 | #include <linux/netfilter/nf_conntrack_h323.h> |
22 | 22 | ||
23 | /****************************************************************************/ | 23 | /****************************************************************************/ |
24 | static int set_addr(struct sk_buff *skb, | 24 | static int set_addr(struct sk_buff *skb, |
25 | unsigned char **data, int dataoff, | 25 | unsigned char **data, int dataoff, |
26 | unsigned int addroff, __be32 ip, __be16 port) | 26 | unsigned int addroff, __be32 ip, __be16 port) |
27 | { | 27 | { |
28 | enum ip_conntrack_info ctinfo; | 28 | enum ip_conntrack_info ctinfo; |
29 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 29 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
30 | struct { | 30 | struct { |
31 | __be32 ip; | 31 | __be32 ip; |
32 | __be16 port; | 32 | __be16 port; |
33 | } __attribute__ ((__packed__)) buf; | 33 | } __attribute__ ((__packed__)) buf; |
34 | const struct tcphdr *th; | 34 | const struct tcphdr *th; |
35 | struct tcphdr _tcph; | 35 | struct tcphdr _tcph; |
36 | 36 | ||
37 | buf.ip = ip; | 37 | buf.ip = ip; |
38 | buf.port = port; | 38 | buf.port = port; |
39 | addroff += dataoff; | 39 | addroff += dataoff; |
40 | 40 | ||
41 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) { | 41 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) { |
42 | if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, | 42 | if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, |
43 | addroff, sizeof(buf), | 43 | addroff, sizeof(buf), |
44 | (char *) &buf, sizeof(buf))) { | 44 | (char *) &buf, sizeof(buf))) { |
45 | if (net_ratelimit()) | 45 | if (net_ratelimit()) |
46 | printk("nf_nat_h323: nf_nat_mangle_tcp_packet" | 46 | pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet" |
47 | " error\n"); | 47 | " error\n"); |
48 | return -1; | 48 | return -1; |
49 | } | 49 | } |
50 | 50 | ||
51 | /* Relocate data pointer */ | 51 | /* Relocate data pointer */ |
52 | th = skb_header_pointer(skb, ip_hdrlen(skb), | 52 | th = skb_header_pointer(skb, ip_hdrlen(skb), |
53 | sizeof(_tcph), &_tcph); | 53 | sizeof(_tcph), &_tcph); |
54 | if (th == NULL) | 54 | if (th == NULL) |
55 | return -1; | 55 | return -1; |
56 | *data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff; | 56 | *data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff; |
57 | } else { | 57 | } else { |
58 | if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, | 58 | if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, |
59 | addroff, sizeof(buf), | 59 | addroff, sizeof(buf), |
60 | (char *) &buf, sizeof(buf))) { | 60 | (char *) &buf, sizeof(buf))) { |
61 | if (net_ratelimit()) | 61 | if (net_ratelimit()) |
62 | printk("nf_nat_h323: nf_nat_mangle_udp_packet" | 62 | pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet" |
63 | " error\n"); | 63 | " error\n"); |
64 | return -1; | 64 | return -1; |
65 | } | 65 | } |
66 | /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy | 66 | /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy |
67 | * or pull everything in a linear buffer, so we can safely | 67 | * or pull everything in a linear buffer, so we can safely |
68 | * use the skb pointers now */ | 68 | * use the skb pointers now */ |
69 | *data = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); | 69 | *data = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); |
70 | } | 70 | } |
71 | 71 | ||
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | /****************************************************************************/ | 75 | /****************************************************************************/ |
76 | static int set_h225_addr(struct sk_buff *skb, | 76 | static int set_h225_addr(struct sk_buff *skb, |
77 | unsigned char **data, int dataoff, | 77 | unsigned char **data, int dataoff, |
78 | TransportAddress *taddr, | 78 | TransportAddress *taddr, |
79 | union nf_inet_addr *addr, __be16 port) | 79 | union nf_inet_addr *addr, __be16 port) |
80 | { | 80 | { |
81 | return set_addr(skb, data, dataoff, taddr->ipAddress.ip, | 81 | return set_addr(skb, data, dataoff, taddr->ipAddress.ip, |
82 | addr->ip, port); | 82 | addr->ip, port); |
83 | } | 83 | } |
84 | 84 | ||
85 | /****************************************************************************/ | 85 | /****************************************************************************/ |
86 | static int set_h245_addr(struct sk_buff *skb, | 86 | static int set_h245_addr(struct sk_buff *skb, |
87 | unsigned char **data, int dataoff, | 87 | unsigned char **data, int dataoff, |
88 | H245_TransportAddress *taddr, | 88 | H245_TransportAddress *taddr, |
89 | union nf_inet_addr *addr, __be16 port) | 89 | union nf_inet_addr *addr, __be16 port) |
90 | { | 90 | { |
91 | return set_addr(skb, data, dataoff, | 91 | return set_addr(skb, data, dataoff, |
92 | taddr->unicastAddress.iPAddress.network, | 92 | taddr->unicastAddress.iPAddress.network, |
93 | addr->ip, port); | 93 | addr->ip, port); |
94 | } | 94 | } |
95 | 95 | ||
96 | /****************************************************************************/ | 96 | /****************************************************************************/ |
97 | static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, | 97 | static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, |
98 | enum ip_conntrack_info ctinfo, | 98 | enum ip_conntrack_info ctinfo, |
99 | unsigned char **data, | 99 | unsigned char **data, |
100 | TransportAddress *taddr, int count) | 100 | TransportAddress *taddr, int count) |
101 | { | 101 | { |
102 | const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 102 | const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
103 | int dir = CTINFO2DIR(ctinfo); | 103 | int dir = CTINFO2DIR(ctinfo); |
104 | int i; | 104 | int i; |
105 | __be16 port; | 105 | __be16 port; |
106 | union nf_inet_addr addr; | 106 | union nf_inet_addr addr; |
107 | 107 | ||
108 | for (i = 0; i < count; i++) { | 108 | for (i = 0; i < count; i++) { |
109 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port)) { | 109 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port)) { |
110 | if (addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && | 110 | if (addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && |
111 | port == info->sig_port[dir]) { | 111 | port == info->sig_port[dir]) { |
112 | /* GW->GK */ | 112 | /* GW->GK */ |
113 | 113 | ||
114 | /* Fix for Gnomemeeting */ | 114 | /* Fix for Gnomemeeting */ |
115 | if (i > 0 && | 115 | if (i > 0 && |
116 | get_h225_addr(ct, *data, &taddr[0], | 116 | get_h225_addr(ct, *data, &taddr[0], |
117 | &addr, &port) && | 117 | &addr, &port) && |
118 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) | 118 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) |
119 | i = 0; | 119 | i = 0; |
120 | 120 | ||
121 | pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n", | 121 | pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n", |
122 | &addr.ip, port, | 122 | &addr.ip, port, |
123 | &ct->tuplehash[!dir].tuple.dst.u3.ip, | 123 | &ct->tuplehash[!dir].tuple.dst.u3.ip, |
124 | info->sig_port[!dir]); | 124 | info->sig_port[!dir]); |
125 | return set_h225_addr(skb, data, 0, &taddr[i], | 125 | return set_h225_addr(skb, data, 0, &taddr[i], |
126 | &ct->tuplehash[!dir]. | 126 | &ct->tuplehash[!dir]. |
127 | tuple.dst.u3, | 127 | tuple.dst.u3, |
128 | info->sig_port[!dir]); | 128 | info->sig_port[!dir]); |
129 | } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && | 129 | } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && |
130 | port == info->sig_port[dir]) { | 130 | port == info->sig_port[dir]) { |
131 | /* GK->GW */ | 131 | /* GK->GW */ |
132 | pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n", | 132 | pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n", |
133 | &addr.ip, port, | 133 | &addr.ip, port, |
134 | &ct->tuplehash[!dir].tuple.src.u3.ip, | 134 | &ct->tuplehash[!dir].tuple.src.u3.ip, |
135 | info->sig_port[!dir]); | 135 | info->sig_port[!dir]); |
136 | return set_h225_addr(skb, data, 0, &taddr[i], | 136 | return set_h225_addr(skb, data, 0, &taddr[i], |
137 | &ct->tuplehash[!dir]. | 137 | &ct->tuplehash[!dir]. |
138 | tuple.src.u3, | 138 | tuple.src.u3, |
139 | info->sig_port[!dir]); | 139 | info->sig_port[!dir]); |
140 | } | 140 | } |
141 | } | 141 | } |
142 | } | 142 | } |
143 | 143 | ||
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | 146 | ||
147 | /****************************************************************************/ | 147 | /****************************************************************************/ |
148 | static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct, | 148 | static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct, |
149 | enum ip_conntrack_info ctinfo, | 149 | enum ip_conntrack_info ctinfo, |
150 | unsigned char **data, | 150 | unsigned char **data, |
151 | TransportAddress *taddr, int count) | 151 | TransportAddress *taddr, int count) |
152 | { | 152 | { |
153 | int dir = CTINFO2DIR(ctinfo); | 153 | int dir = CTINFO2DIR(ctinfo); |
154 | int i; | 154 | int i; |
155 | __be16 port; | 155 | __be16 port; |
156 | union nf_inet_addr addr; | 156 | union nf_inet_addr addr; |
157 | 157 | ||
158 | for (i = 0; i < count; i++) { | 158 | for (i = 0; i < count; i++) { |
159 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && | 159 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && |
160 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && | 160 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && |
161 | port == ct->tuplehash[dir].tuple.src.u.udp.port) { | 161 | port == ct->tuplehash[dir].tuple.src.u.udp.port) { |
162 | pr_debug("nf_nat_ras: set rasAddress %pI4:%hu->%pI4:%hu\n", | 162 | pr_debug("nf_nat_ras: set rasAddress %pI4:%hu->%pI4:%hu\n", |
163 | &addr.ip, ntohs(port), | 163 | &addr.ip, ntohs(port), |
164 | &ct->tuplehash[!dir].tuple.dst.u3.ip, | 164 | &ct->tuplehash[!dir].tuple.dst.u3.ip, |
165 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); | 165 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); |
166 | return set_h225_addr(skb, data, 0, &taddr[i], | 166 | return set_h225_addr(skb, data, 0, &taddr[i], |
167 | &ct->tuplehash[!dir].tuple.dst.u3, | 167 | &ct->tuplehash[!dir].tuple.dst.u3, |
168 | ct->tuplehash[!dir].tuple. | 168 | ct->tuplehash[!dir].tuple. |
169 | dst.u.udp.port); | 169 | dst.u.udp.port); |
170 | } | 170 | } |
171 | } | 171 | } |
172 | 172 | ||
173 | return 0; | 173 | return 0; |
174 | } | 174 | } |
175 | 175 | ||
176 | /****************************************************************************/ | 176 | /****************************************************************************/ |
177 | static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | 177 | static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, |
178 | enum ip_conntrack_info ctinfo, | 178 | enum ip_conntrack_info ctinfo, |
179 | unsigned char **data, int dataoff, | 179 | unsigned char **data, int dataoff, |
180 | H245_TransportAddress *taddr, | 180 | H245_TransportAddress *taddr, |
181 | __be16 port, __be16 rtp_port, | 181 | __be16 port, __be16 rtp_port, |
182 | struct nf_conntrack_expect *rtp_exp, | 182 | struct nf_conntrack_expect *rtp_exp, |
183 | struct nf_conntrack_expect *rtcp_exp) | 183 | struct nf_conntrack_expect *rtcp_exp) |
184 | { | 184 | { |
185 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 185 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
186 | int dir = CTINFO2DIR(ctinfo); | 186 | int dir = CTINFO2DIR(ctinfo); |
187 | int i; | 187 | int i; |
188 | u_int16_t nated_port; | 188 | u_int16_t nated_port; |
189 | 189 | ||
190 | /* Set expectations for NAT */ | 190 | /* Set expectations for NAT */ |
191 | rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port; | 191 | rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port; |
192 | rtp_exp->expectfn = nf_nat_follow_master; | 192 | rtp_exp->expectfn = nf_nat_follow_master; |
193 | rtp_exp->dir = !dir; | 193 | rtp_exp->dir = !dir; |
194 | rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port; | 194 | rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port; |
195 | rtcp_exp->expectfn = nf_nat_follow_master; | 195 | rtcp_exp->expectfn = nf_nat_follow_master; |
196 | rtcp_exp->dir = !dir; | 196 | rtcp_exp->dir = !dir; |
197 | 197 | ||
198 | /* Lookup existing expects */ | 198 | /* Lookup existing expects */ |
199 | for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) { | 199 | for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) { |
200 | if (info->rtp_port[i][dir] == rtp_port) { | 200 | if (info->rtp_port[i][dir] == rtp_port) { |
201 | /* Expected */ | 201 | /* Expected */ |
202 | 202 | ||
203 | /* Use allocated ports first. This will refresh | 203 | /* Use allocated ports first. This will refresh |
204 | * the expects */ | 204 | * the expects */ |
205 | rtp_exp->tuple.dst.u.udp.port = info->rtp_port[i][dir]; | 205 | rtp_exp->tuple.dst.u.udp.port = info->rtp_port[i][dir]; |
206 | rtcp_exp->tuple.dst.u.udp.port = | 206 | rtcp_exp->tuple.dst.u.udp.port = |
207 | htons(ntohs(info->rtp_port[i][dir]) + 1); | 207 | htons(ntohs(info->rtp_port[i][dir]) + 1); |
208 | break; | 208 | break; |
209 | } else if (info->rtp_port[i][dir] == 0) { | 209 | } else if (info->rtp_port[i][dir] == 0) { |
210 | /* Not expected */ | 210 | /* Not expected */ |
211 | break; | 211 | break; |
212 | } | 212 | } |
213 | } | 213 | } |
214 | 214 | ||
215 | /* Run out of expectations */ | 215 | /* Run out of expectations */ |
216 | if (i >= H323_RTP_CHANNEL_MAX) { | 216 | if (i >= H323_RTP_CHANNEL_MAX) { |
217 | if (net_ratelimit()) | 217 | if (net_ratelimit()) |
218 | printk("nf_nat_h323: out of expectations\n"); | 218 | pr_notice("nf_nat_h323: out of expectations\n"); |
219 | return 0; | 219 | return 0; |
220 | } | 220 | } |
221 | 221 | ||
222 | /* Try to get a pair of ports. */ | 222 | /* Try to get a pair of ports. */ |
223 | for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port); | 223 | for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port); |
224 | nated_port != 0; nated_port += 2) { | 224 | nated_port != 0; nated_port += 2) { |
225 | rtp_exp->tuple.dst.u.udp.port = htons(nated_port); | 225 | rtp_exp->tuple.dst.u.udp.port = htons(nated_port); |
226 | if (nf_ct_expect_related(rtp_exp) == 0) { | 226 | if (nf_ct_expect_related(rtp_exp) == 0) { |
227 | rtcp_exp->tuple.dst.u.udp.port = | 227 | rtcp_exp->tuple.dst.u.udp.port = |
228 | htons(nated_port + 1); | 228 | htons(nated_port + 1); |
229 | if (nf_ct_expect_related(rtcp_exp) == 0) | 229 | if (nf_ct_expect_related(rtcp_exp) == 0) |
230 | break; | 230 | break; |
231 | nf_ct_unexpect_related(rtp_exp); | 231 | nf_ct_unexpect_related(rtp_exp); |
232 | } | 232 | } |
233 | } | 233 | } |
234 | 234 | ||
235 | if (nated_port == 0) { /* No port available */ | 235 | if (nated_port == 0) { /* No port available */ |
236 | if (net_ratelimit()) | 236 | if (net_ratelimit()) |
237 | printk("nf_nat_h323: out of RTP ports\n"); | 237 | pr_notice("nf_nat_h323: out of RTP ports\n"); |
238 | return 0; | 238 | return 0; |
239 | } | 239 | } |
240 | 240 | ||
241 | /* Modify signal */ | 241 | /* Modify signal */ |
242 | if (set_h245_addr(skb, data, dataoff, taddr, | 242 | if (set_h245_addr(skb, data, dataoff, taddr, |
243 | &ct->tuplehash[!dir].tuple.dst.u3, | 243 | &ct->tuplehash[!dir].tuple.dst.u3, |
244 | htons((port & htons(1)) ? nated_port + 1 : | 244 | htons((port & htons(1)) ? nated_port + 1 : |
245 | nated_port)) == 0) { | 245 | nated_port)) == 0) { |
246 | /* Save ports */ | 246 | /* Save ports */ |
247 | info->rtp_port[i][dir] = rtp_port; | 247 | info->rtp_port[i][dir] = rtp_port; |
248 | info->rtp_port[i][!dir] = htons(nated_port); | 248 | info->rtp_port[i][!dir] = htons(nated_port); |
249 | } else { | 249 | } else { |
250 | nf_ct_unexpect_related(rtp_exp); | 250 | nf_ct_unexpect_related(rtp_exp); |
251 | nf_ct_unexpect_related(rtcp_exp); | 251 | nf_ct_unexpect_related(rtcp_exp); |
252 | return -1; | 252 | return -1; |
253 | } | 253 | } |
254 | 254 | ||
255 | /* Success */ | 255 | /* Success */ |
256 | pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n", | 256 | pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n", |
257 | &rtp_exp->tuple.src.u3.ip, | 257 | &rtp_exp->tuple.src.u3.ip, |
258 | ntohs(rtp_exp->tuple.src.u.udp.port), | 258 | ntohs(rtp_exp->tuple.src.u.udp.port), |
259 | &rtp_exp->tuple.dst.u3.ip, | 259 | &rtp_exp->tuple.dst.u3.ip, |
260 | ntohs(rtp_exp->tuple.dst.u.udp.port)); | 260 | ntohs(rtp_exp->tuple.dst.u.udp.port)); |
261 | pr_debug("nf_nat_h323: expect RTCP %pI4:%hu->%pI4:%hu\n", | 261 | pr_debug("nf_nat_h323: expect RTCP %pI4:%hu->%pI4:%hu\n", |
262 | &rtcp_exp->tuple.src.u3.ip, | 262 | &rtcp_exp->tuple.src.u3.ip, |
263 | ntohs(rtcp_exp->tuple.src.u.udp.port), | 263 | ntohs(rtcp_exp->tuple.src.u.udp.port), |
264 | &rtcp_exp->tuple.dst.u3.ip, | 264 | &rtcp_exp->tuple.dst.u3.ip, |
265 | ntohs(rtcp_exp->tuple.dst.u.udp.port)); | 265 | ntohs(rtcp_exp->tuple.dst.u.udp.port)); |
266 | 266 | ||
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
270 | /****************************************************************************/ | 270 | /****************************************************************************/ |
271 | static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, | 271 | static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, |
272 | enum ip_conntrack_info ctinfo, | 272 | enum ip_conntrack_info ctinfo, |
273 | unsigned char **data, int dataoff, | 273 | unsigned char **data, int dataoff, |
274 | H245_TransportAddress *taddr, __be16 port, | 274 | H245_TransportAddress *taddr, __be16 port, |
275 | struct nf_conntrack_expect *exp) | 275 | struct nf_conntrack_expect *exp) |
276 | { | 276 | { |
277 | int dir = CTINFO2DIR(ctinfo); | 277 | int dir = CTINFO2DIR(ctinfo); |
278 | u_int16_t nated_port = ntohs(port); | 278 | u_int16_t nated_port = ntohs(port); |
279 | 279 | ||
280 | /* Set expectations for NAT */ | 280 | /* Set expectations for NAT */ |
281 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | 281 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; |
282 | exp->expectfn = nf_nat_follow_master; | 282 | exp->expectfn = nf_nat_follow_master; |
283 | exp->dir = !dir; | 283 | exp->dir = !dir; |
284 | 284 | ||
285 | /* Try to get same port: if not, try to change it. */ | 285 | /* Try to get same port: if not, try to change it. */ |
286 | for (; nated_port != 0; nated_port++) { | 286 | for (; nated_port != 0; nated_port++) { |
287 | exp->tuple.dst.u.tcp.port = htons(nated_port); | 287 | exp->tuple.dst.u.tcp.port = htons(nated_port); |
288 | if (nf_ct_expect_related(exp) == 0) | 288 | if (nf_ct_expect_related(exp) == 0) |
289 | break; | 289 | break; |
290 | } | 290 | } |
291 | 291 | ||
292 | if (nated_port == 0) { /* No port available */ | 292 | if (nated_port == 0) { /* No port available */ |
293 | if (net_ratelimit()) | 293 | if (net_ratelimit()) |
294 | printk("nf_nat_h323: out of TCP ports\n"); | 294 | pr_notice("nf_nat_h323: out of TCP ports\n"); |
295 | return 0; | 295 | return 0; |
296 | } | 296 | } |
297 | 297 | ||
298 | /* Modify signal */ | 298 | /* Modify signal */ |
299 | if (set_h245_addr(skb, data, dataoff, taddr, | 299 | if (set_h245_addr(skb, data, dataoff, taddr, |
300 | &ct->tuplehash[!dir].tuple.dst.u3, | 300 | &ct->tuplehash[!dir].tuple.dst.u3, |
301 | htons(nated_port)) < 0) { | 301 | htons(nated_port)) < 0) { |
302 | nf_ct_unexpect_related(exp); | 302 | nf_ct_unexpect_related(exp); |
303 | return -1; | 303 | return -1; |
304 | } | 304 | } |
305 | 305 | ||
306 | pr_debug("nf_nat_h323: expect T.120 %pI4:%hu->%pI4:%hu\n", | 306 | pr_debug("nf_nat_h323: expect T.120 %pI4:%hu->%pI4:%hu\n", |
307 | &exp->tuple.src.u3.ip, | 307 | &exp->tuple.src.u3.ip, |
308 | ntohs(exp->tuple.src.u.tcp.port), | 308 | ntohs(exp->tuple.src.u.tcp.port), |
309 | &exp->tuple.dst.u3.ip, | 309 | &exp->tuple.dst.u3.ip, |
310 | ntohs(exp->tuple.dst.u.tcp.port)); | 310 | ntohs(exp->tuple.dst.u.tcp.port)); |
311 | 311 | ||
312 | return 0; | 312 | return 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | /****************************************************************************/ | 315 | /****************************************************************************/ |
316 | static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, | 316 | static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, |
317 | enum ip_conntrack_info ctinfo, | 317 | enum ip_conntrack_info ctinfo, |
318 | unsigned char **data, int dataoff, | 318 | unsigned char **data, int dataoff, |
319 | TransportAddress *taddr, __be16 port, | 319 | TransportAddress *taddr, __be16 port, |
320 | struct nf_conntrack_expect *exp) | 320 | struct nf_conntrack_expect *exp) |
321 | { | 321 | { |
322 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 322 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
323 | int dir = CTINFO2DIR(ctinfo); | 323 | int dir = CTINFO2DIR(ctinfo); |
324 | u_int16_t nated_port = ntohs(port); | 324 | u_int16_t nated_port = ntohs(port); |
325 | 325 | ||
326 | /* Set expectations for NAT */ | 326 | /* Set expectations for NAT */ |
327 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | 327 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; |
328 | exp->expectfn = nf_nat_follow_master; | 328 | exp->expectfn = nf_nat_follow_master; |
329 | exp->dir = !dir; | 329 | exp->dir = !dir; |
330 | 330 | ||
331 | /* Check existing expects */ | 331 | /* Check existing expects */ |
332 | if (info->sig_port[dir] == port) | 332 | if (info->sig_port[dir] == port) |
333 | nated_port = ntohs(info->sig_port[!dir]); | 333 | nated_port = ntohs(info->sig_port[!dir]); |
334 | 334 | ||
335 | /* Try to get same port: if not, try to change it. */ | 335 | /* Try to get same port: if not, try to change it. */ |
336 | for (; nated_port != 0; nated_port++) { | 336 | for (; nated_port != 0; nated_port++) { |
337 | exp->tuple.dst.u.tcp.port = htons(nated_port); | 337 | exp->tuple.dst.u.tcp.port = htons(nated_port); |
338 | if (nf_ct_expect_related(exp) == 0) | 338 | if (nf_ct_expect_related(exp) == 0) |
339 | break; | 339 | break; |
340 | } | 340 | } |
341 | 341 | ||
342 | if (nated_port == 0) { /* No port available */ | 342 | if (nated_port == 0) { /* No port available */ |
343 | if (net_ratelimit()) | 343 | if (net_ratelimit()) |
344 | printk("nf_nat_q931: out of TCP ports\n"); | 344 | pr_notice("nf_nat_q931: out of TCP ports\n"); |
345 | return 0; | 345 | return 0; |
346 | } | 346 | } |
347 | 347 | ||
348 | /* Modify signal */ | 348 | /* Modify signal */ |
349 | if (set_h225_addr(skb, data, dataoff, taddr, | 349 | if (set_h225_addr(skb, data, dataoff, taddr, |
350 | &ct->tuplehash[!dir].tuple.dst.u3, | 350 | &ct->tuplehash[!dir].tuple.dst.u3, |
351 | htons(nated_port)) == 0) { | 351 | htons(nated_port)) == 0) { |
352 | /* Save ports */ | 352 | /* Save ports */ |
353 | info->sig_port[dir] = port; | 353 | info->sig_port[dir] = port; |
354 | info->sig_port[!dir] = htons(nated_port); | 354 | info->sig_port[!dir] = htons(nated_port); |
355 | } else { | 355 | } else { |
356 | nf_ct_unexpect_related(exp); | 356 | nf_ct_unexpect_related(exp); |
357 | return -1; | 357 | return -1; |
358 | } | 358 | } |
359 | 359 | ||
360 | pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n", | 360 | pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n", |
361 | &exp->tuple.src.u3.ip, | 361 | &exp->tuple.src.u3.ip, |
362 | ntohs(exp->tuple.src.u.tcp.port), | 362 | ntohs(exp->tuple.src.u.tcp.port), |
363 | &exp->tuple.dst.u3.ip, | 363 | &exp->tuple.dst.u3.ip, |
364 | ntohs(exp->tuple.dst.u.tcp.port)); | 364 | ntohs(exp->tuple.dst.u.tcp.port)); |
365 | 365 | ||
366 | return 0; | 366 | return 0; |
367 | } | 367 | } |
368 | 368 | ||
369 | /**************************************************************************** | 369 | /**************************************************************************** |
370 | * This conntrack expect function replaces nf_conntrack_q931_expect() | 370 | * This conntrack expect function replaces nf_conntrack_q931_expect() |
371 | * which was set by nf_conntrack_h323.c. | 371 | * which was set by nf_conntrack_h323.c. |
372 | ****************************************************************************/ | 372 | ****************************************************************************/ |
373 | static void ip_nat_q931_expect(struct nf_conn *new, | 373 | static void ip_nat_q931_expect(struct nf_conn *new, |
374 | struct nf_conntrack_expect *this) | 374 | struct nf_conntrack_expect *this) |
375 | { | 375 | { |
376 | struct nf_nat_range range; | 376 | struct nf_nat_range range; |
377 | 377 | ||
378 | if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ | 378 | if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ |
379 | nf_nat_follow_master(new, this); | 379 | nf_nat_follow_master(new, this); |
380 | return; | 380 | return; |
381 | } | 381 | } |
382 | 382 | ||
383 | /* This must be a fresh one. */ | 383 | /* This must be a fresh one. */ |
384 | BUG_ON(new->status & IPS_NAT_DONE_MASK); | 384 | BUG_ON(new->status & IPS_NAT_DONE_MASK); |
385 | 385 | ||
386 | /* Change src to where master sends to */ | 386 | /* Change src to where master sends to */ |
387 | range.flags = IP_NAT_RANGE_MAP_IPS; | 387 | range.flags = IP_NAT_RANGE_MAP_IPS; |
388 | range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; | 388 | range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; |
389 | nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); | 389 | nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); |
390 | 390 | ||
391 | /* For DST manip, map port here to where it's expected. */ | 391 | /* For DST manip, map port here to where it's expected. */ |
392 | range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); | 392 | range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); |
393 | range.min = range.max = this->saved_proto; | 393 | range.min = range.max = this->saved_proto; |
394 | range.min_ip = range.max_ip = | 394 | range.min_ip = range.max_ip = |
395 | new->master->tuplehash[!this->dir].tuple.src.u3.ip; | 395 | new->master->tuplehash[!this->dir].tuple.src.u3.ip; |
396 | nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); | 396 | nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); |
397 | } | 397 | } |
398 | 398 | ||
399 | /****************************************************************************/ | 399 | /****************************************************************************/ |
400 | static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, | 400 | static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, |
401 | enum ip_conntrack_info ctinfo, | 401 | enum ip_conntrack_info ctinfo, |
402 | unsigned char **data, TransportAddress *taddr, int idx, | 402 | unsigned char **data, TransportAddress *taddr, int idx, |
403 | __be16 port, struct nf_conntrack_expect *exp) | 403 | __be16 port, struct nf_conntrack_expect *exp) |
404 | { | 404 | { |
405 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 405 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
406 | int dir = CTINFO2DIR(ctinfo); | 406 | int dir = CTINFO2DIR(ctinfo); |
407 | u_int16_t nated_port = ntohs(port); | 407 | u_int16_t nated_port = ntohs(port); |
408 | union nf_inet_addr addr; | 408 | union nf_inet_addr addr; |
409 | 409 | ||
410 | /* Set expectations for NAT */ | 410 | /* Set expectations for NAT */ |
411 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | 411 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; |
412 | exp->expectfn = ip_nat_q931_expect; | 412 | exp->expectfn = ip_nat_q931_expect; |
413 | exp->dir = !dir; | 413 | exp->dir = !dir; |
414 | 414 | ||
415 | /* Check existing expects */ | 415 | /* Check existing expects */ |
416 | if (info->sig_port[dir] == port) | 416 | if (info->sig_port[dir] == port) |
417 | nated_port = ntohs(info->sig_port[!dir]); | 417 | nated_port = ntohs(info->sig_port[!dir]); |
418 | 418 | ||
419 | /* Try to get same port: if not, try to change it. */ | 419 | /* Try to get same port: if not, try to change it. */ |
420 | for (; nated_port != 0; nated_port++) { | 420 | for (; nated_port != 0; nated_port++) { |
421 | exp->tuple.dst.u.tcp.port = htons(nated_port); | 421 | exp->tuple.dst.u.tcp.port = htons(nated_port); |
422 | if (nf_ct_expect_related(exp) == 0) | 422 | if (nf_ct_expect_related(exp) == 0) |
423 | break; | 423 | break; |
424 | } | 424 | } |
425 | 425 | ||
426 | if (nated_port == 0) { /* No port available */ | 426 | if (nated_port == 0) { /* No port available */ |
427 | if (net_ratelimit()) | 427 | if (net_ratelimit()) |
428 | printk("nf_nat_ras: out of TCP ports\n"); | 428 | pr_notice("nf_nat_ras: out of TCP ports\n"); |
429 | return 0; | 429 | return 0; |
430 | } | 430 | } |
431 | 431 | ||
432 | /* Modify signal */ | 432 | /* Modify signal */ |
433 | if (set_h225_addr(skb, data, 0, &taddr[idx], | 433 | if (set_h225_addr(skb, data, 0, &taddr[idx], |
434 | &ct->tuplehash[!dir].tuple.dst.u3, | 434 | &ct->tuplehash[!dir].tuple.dst.u3, |
435 | htons(nated_port)) == 0) { | 435 | htons(nated_port)) == 0) { |
436 | /* Save ports */ | 436 | /* Save ports */ |
437 | info->sig_port[dir] = port; | 437 | info->sig_port[dir] = port; |
438 | info->sig_port[!dir] = htons(nated_port); | 438 | info->sig_port[!dir] = htons(nated_port); |
439 | 439 | ||
440 | /* Fix for Gnomemeeting */ | 440 | /* Fix for Gnomemeeting */ |
441 | if (idx > 0 && | 441 | if (idx > 0 && |
442 | get_h225_addr(ct, *data, &taddr[0], &addr, &port) && | 442 | get_h225_addr(ct, *data, &taddr[0], &addr, &port) && |
443 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { | 443 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { |
444 | set_h225_addr(skb, data, 0, &taddr[0], | 444 | set_h225_addr(skb, data, 0, &taddr[0], |
445 | &ct->tuplehash[!dir].tuple.dst.u3, | 445 | &ct->tuplehash[!dir].tuple.dst.u3, |
446 | info->sig_port[!dir]); | 446 | info->sig_port[!dir]); |
447 | } | 447 | } |
448 | } else { | 448 | } else { |
449 | nf_ct_unexpect_related(exp); | 449 | nf_ct_unexpect_related(exp); |
450 | return -1; | 450 | return -1; |
451 | } | 451 | } |
452 | 452 | ||
453 | /* Success */ | 453 | /* Success */ |
454 | pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n", | 454 | pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n", |
455 | &exp->tuple.src.u3.ip, | 455 | &exp->tuple.src.u3.ip, |
456 | ntohs(exp->tuple.src.u.tcp.port), | 456 | ntohs(exp->tuple.src.u.tcp.port), |
457 | &exp->tuple.dst.u3.ip, | 457 | &exp->tuple.dst.u3.ip, |
458 | ntohs(exp->tuple.dst.u.tcp.port)); | 458 | ntohs(exp->tuple.dst.u.tcp.port)); |
459 | 459 | ||
460 | return 0; | 460 | return 0; |
461 | } | 461 | } |
462 | 462 | ||
463 | /****************************************************************************/ | 463 | /****************************************************************************/ |
464 | static void ip_nat_callforwarding_expect(struct nf_conn *new, | 464 | static void ip_nat_callforwarding_expect(struct nf_conn *new, |
465 | struct nf_conntrack_expect *this) | 465 | struct nf_conntrack_expect *this) |
466 | { | 466 | { |
467 | struct nf_nat_range range; | 467 | struct nf_nat_range range; |
468 | 468 | ||
469 | /* This must be a fresh one. */ | 469 | /* This must be a fresh one. */ |
470 | BUG_ON(new->status & IPS_NAT_DONE_MASK); | 470 | BUG_ON(new->status & IPS_NAT_DONE_MASK); |
471 | 471 | ||
472 | /* Change src to where master sends to */ | 472 | /* Change src to where master sends to */ |
473 | range.flags = IP_NAT_RANGE_MAP_IPS; | 473 | range.flags = IP_NAT_RANGE_MAP_IPS; |
474 | range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; | 474 | range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; |
475 | nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); | 475 | nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); |
476 | 476 | ||
477 | /* For DST manip, map port here to where it's expected. */ | 477 | /* For DST manip, map port here to where it's expected. */ |
478 | range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); | 478 | range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); |
479 | range.min = range.max = this->saved_proto; | 479 | range.min = range.max = this->saved_proto; |
480 | range.min_ip = range.max_ip = this->saved_ip; | 480 | range.min_ip = range.max_ip = this->saved_ip; |
481 | nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); | 481 | nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); |
482 | } | 482 | } |
483 | 483 | ||
484 | /****************************************************************************/ | 484 | /****************************************************************************/ |
485 | static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, | 485 | static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, |
486 | enum ip_conntrack_info ctinfo, | 486 | enum ip_conntrack_info ctinfo, |
487 | unsigned char **data, int dataoff, | 487 | unsigned char **data, int dataoff, |
488 | TransportAddress *taddr, __be16 port, | 488 | TransportAddress *taddr, __be16 port, |
489 | struct nf_conntrack_expect *exp) | 489 | struct nf_conntrack_expect *exp) |
490 | { | 490 | { |
491 | int dir = CTINFO2DIR(ctinfo); | 491 | int dir = CTINFO2DIR(ctinfo); |
492 | u_int16_t nated_port; | 492 | u_int16_t nated_port; |
493 | 493 | ||
494 | /* Set expectations for NAT */ | 494 | /* Set expectations for NAT */ |
495 | exp->saved_ip = exp->tuple.dst.u3.ip; | 495 | exp->saved_ip = exp->tuple.dst.u3.ip; |
496 | exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip; | 496 | exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip; |
497 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | 497 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; |
498 | exp->expectfn = ip_nat_callforwarding_expect; | 498 | exp->expectfn = ip_nat_callforwarding_expect; |
499 | exp->dir = !dir; | 499 | exp->dir = !dir; |
500 | 500 | ||
501 | /* Try to get same port: if not, try to change it. */ | 501 | /* Try to get same port: if not, try to change it. */ |
502 | for (nated_port = ntohs(port); nated_port != 0; nated_port++) { | 502 | for (nated_port = ntohs(port); nated_port != 0; nated_port++) { |
503 | exp->tuple.dst.u.tcp.port = htons(nated_port); | 503 | exp->tuple.dst.u.tcp.port = htons(nated_port); |
504 | if (nf_ct_expect_related(exp) == 0) | 504 | if (nf_ct_expect_related(exp) == 0) |
505 | break; | 505 | break; |
506 | } | 506 | } |
507 | 507 | ||
508 | if (nated_port == 0) { /* No port available */ | 508 | if (nated_port == 0) { /* No port available */ |
509 | if (net_ratelimit()) | 509 | if (net_ratelimit()) |
510 | printk("nf_nat_q931: out of TCP ports\n"); | 510 | pr_notice("nf_nat_q931: out of TCP ports\n"); |
511 | return 0; | 511 | return 0; |
512 | } | 512 | } |
513 | 513 | ||
514 | /* Modify signal */ | 514 | /* Modify signal */ |
515 | if (!set_h225_addr(skb, data, dataoff, taddr, | 515 | if (!set_h225_addr(skb, data, dataoff, taddr, |
516 | &ct->tuplehash[!dir].tuple.dst.u3, | 516 | &ct->tuplehash[!dir].tuple.dst.u3, |
517 | htons(nated_port)) == 0) { | 517 | htons(nated_port)) == 0) { |
518 | nf_ct_unexpect_related(exp); | 518 | nf_ct_unexpect_related(exp); |
519 | return -1; | 519 | return -1; |
520 | } | 520 | } |
521 | 521 | ||
522 | /* Success */ | 522 | /* Success */ |
523 | pr_debug("nf_nat_q931: expect Call Forwarding %pI4:%hu->%pI4:%hu\n", | 523 | pr_debug("nf_nat_q931: expect Call Forwarding %pI4:%hu->%pI4:%hu\n", |
524 | &exp->tuple.src.u3.ip, | 524 | &exp->tuple.src.u3.ip, |
525 | ntohs(exp->tuple.src.u.tcp.port), | 525 | ntohs(exp->tuple.src.u.tcp.port), |
526 | &exp->tuple.dst.u3.ip, | 526 | &exp->tuple.dst.u3.ip, |
527 | ntohs(exp->tuple.dst.u.tcp.port)); | 527 | ntohs(exp->tuple.dst.u.tcp.port)); |
528 | 528 | ||
529 | return 0; | 529 | return 0; |
530 | } | 530 | } |
531 | 531 | ||
532 | /****************************************************************************/ | 532 | /****************************************************************************/ |
533 | static int __init init(void) | 533 | static int __init init(void) |
534 | { | 534 | { |
535 | BUG_ON(set_h245_addr_hook != NULL); | 535 | BUG_ON(set_h245_addr_hook != NULL); |
536 | BUG_ON(set_h225_addr_hook != NULL); | 536 | BUG_ON(set_h225_addr_hook != NULL); |
537 | BUG_ON(set_sig_addr_hook != NULL); | 537 | BUG_ON(set_sig_addr_hook != NULL); |
538 | BUG_ON(set_ras_addr_hook != NULL); | 538 | BUG_ON(set_ras_addr_hook != NULL); |
539 | BUG_ON(nat_rtp_rtcp_hook != NULL); | 539 | BUG_ON(nat_rtp_rtcp_hook != NULL); |
540 | BUG_ON(nat_t120_hook != NULL); | 540 | BUG_ON(nat_t120_hook != NULL); |
541 | BUG_ON(nat_h245_hook != NULL); | 541 | BUG_ON(nat_h245_hook != NULL); |
542 | BUG_ON(nat_callforwarding_hook != NULL); | 542 | BUG_ON(nat_callforwarding_hook != NULL); |
543 | BUG_ON(nat_q931_hook != NULL); | 543 | BUG_ON(nat_q931_hook != NULL); |
544 | 544 | ||
545 | rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); | 545 | rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); |
546 | rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); | 546 | rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); |
547 | rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); | 547 | rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); |
548 | rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); | 548 | rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); |
549 | rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); | 549 | rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); |
550 | rcu_assign_pointer(nat_t120_hook, nat_t120); | 550 | rcu_assign_pointer(nat_t120_hook, nat_t120); |
551 | rcu_assign_pointer(nat_h245_hook, nat_h245); | 551 | rcu_assign_pointer(nat_h245_hook, nat_h245); |
552 | rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); | 552 | rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); |
553 | rcu_assign_pointer(nat_q931_hook, nat_q931); | 553 | rcu_assign_pointer(nat_q931_hook, nat_q931); |
554 | return 0; | 554 | return 0; |
555 | } | 555 | } |
556 | 556 | ||
557 | /****************************************************************************/ | 557 | /****************************************************************************/ |
558 | static void __exit fini(void) | 558 | static void __exit fini(void) |
559 | { | 559 | { |
560 | rcu_assign_pointer(set_h245_addr_hook, NULL); | 560 | rcu_assign_pointer(set_h245_addr_hook, NULL); |
561 | rcu_assign_pointer(set_h225_addr_hook, NULL); | 561 | rcu_assign_pointer(set_h225_addr_hook, NULL); |
562 | rcu_assign_pointer(set_sig_addr_hook, NULL); | 562 | rcu_assign_pointer(set_sig_addr_hook, NULL); |
563 | rcu_assign_pointer(set_ras_addr_hook, NULL); | 563 | rcu_assign_pointer(set_ras_addr_hook, NULL); |
564 | rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); | 564 | rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); |
565 | rcu_assign_pointer(nat_t120_hook, NULL); | 565 | rcu_assign_pointer(nat_t120_hook, NULL); |
566 | rcu_assign_pointer(nat_h245_hook, NULL); | 566 | rcu_assign_pointer(nat_h245_hook, NULL); |
567 | rcu_assign_pointer(nat_callforwarding_hook, NULL); | 567 | rcu_assign_pointer(nat_callforwarding_hook, NULL); |
568 | rcu_assign_pointer(nat_q931_hook, NULL); | 568 | rcu_assign_pointer(nat_q931_hook, NULL); |
569 | synchronize_rcu(); | 569 | synchronize_rcu(); |
570 | } | 570 | } |
571 | 571 | ||
572 | /****************************************************************************/ | 572 | /****************************************************************************/ |
573 | module_init(init); | 573 | module_init(init); |
574 | module_exit(fini); | 574 | module_exit(fini); |
575 | 575 | ||
576 | MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); | 576 | MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); |
577 | MODULE_DESCRIPTION("H.323 NAT helper"); | 577 | MODULE_DESCRIPTION("H.323 NAT helper"); |
578 | MODULE_LICENSE("GPL"); | 578 | MODULE_LICENSE("GPL"); |
579 | MODULE_ALIAS("ip_nat_h323"); | 579 | MODULE_ALIAS("ip_nat_h323"); |
580 | 580 |
net/ipv4/netfilter/nf_nat_snmp_basic.c
1 | /* | 1 | /* |
2 | * nf_nat_snmp_basic.c | 2 | * nf_nat_snmp_basic.c |
3 | * | 3 | * |
4 | * Basic SNMP Application Layer Gateway | 4 | * Basic SNMP Application Layer Gateway |
5 | * | 5 | * |
6 | * This IP NAT module is intended for use with SNMP network | 6 | * This IP NAT module is intended for use with SNMP network |
7 | * discovery and monitoring applications where target networks use | 7 | * discovery and monitoring applications where target networks use |
8 | * conflicting private address realms. | 8 | * conflicting private address realms. |
9 | * | 9 | * |
10 | * Static NAT is used to remap the networks from the view of the network | 10 | * Static NAT is used to remap the networks from the view of the network |
11 | * management system at the IP layer, and this module remaps some application | 11 | * management system at the IP layer, and this module remaps some application |
12 | * layer addresses to match. | 12 | * layer addresses to match. |
13 | * | 13 | * |
14 | * The simplest form of ALG is performed, where only tagged IP addresses | 14 | * The simplest form of ALG is performed, where only tagged IP addresses |
15 | * are modified. The module does not need to be MIB aware and only scans | 15 | * are modified. The module does not need to be MIB aware and only scans |
16 | * messages at the ASN.1/BER level. | 16 | * messages at the ASN.1/BER level. |
17 | * | 17 | * |
18 | * Currently, only SNMPv1 and SNMPv2 are supported. | 18 | * Currently, only SNMPv1 and SNMPv2 are supported. |
19 | * | 19 | * |
20 | * More information on ALG and associated issues can be found in | 20 | * More information on ALG and associated issues can be found in |
21 | * RFC 2962 | 21 | * RFC 2962 |
22 | * | 22 | * |
23 | * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory | 23 | * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory |
24 | * McLean & Jochen Friedrich, stripped down for use in the kernel. | 24 | * McLean & Jochen Friedrich, stripped down for use in the kernel. |
25 | * | 25 | * |
26 | * Copyright (c) 2000 RP Internet (www.rpi.net.au). | 26 | * Copyright (c) 2000 RP Internet (www.rpi.net.au). |
27 | * | 27 | * |
28 | * This program is free software; you can redistribute it and/or modify | 28 | * This program is free software; you can redistribute it and/or modify |
29 | * it under the terms of the GNU General Public License as published by | 29 | * it under the terms of the GNU General Public License as published by |
30 | * the Free Software Foundation; either version 2 of the License, or | 30 | * the Free Software Foundation; either version 2 of the License, or |
31 | * (at your option) any later version. | 31 | * (at your option) any later version. |
32 | * This program is distributed in the hope that it will be useful, | 32 | * This program is distributed in the hope that it will be useful, |
33 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 33 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
34 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 34 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
35 | * GNU General Public License for more details. | 35 | * GNU General Public License for more details. |
36 | * You should have received a copy of the GNU General Public License | 36 | * You should have received a copy of the GNU General Public License |
37 | * along with this program; if not, write to the Free Software | 37 | * along with this program; if not, write to the Free Software |
38 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 38 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
39 | * | 39 | * |
40 | * Author: James Morris <jmorris@intercode.com.au> | 40 | * Author: James Morris <jmorris@intercode.com.au> |
41 | */ | 41 | */ |
42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
43 | #include <linux/moduleparam.h> | 43 | #include <linux/moduleparam.h> |
44 | #include <linux/types.h> | 44 | #include <linux/types.h> |
45 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/in.h> | 47 | #include <linux/in.h> |
48 | #include <linux/ip.h> | 48 | #include <linux/ip.h> |
49 | #include <linux/udp.h> | 49 | #include <linux/udp.h> |
50 | #include <net/checksum.h> | 50 | #include <net/checksum.h> |
51 | #include <net/udp.h> | 51 | #include <net/udp.h> |
52 | 52 | ||
53 | #include <net/netfilter/nf_nat.h> | 53 | #include <net/netfilter/nf_nat.h> |
54 | #include <net/netfilter/nf_conntrack_expect.h> | 54 | #include <net/netfilter/nf_conntrack_expect.h> |
55 | #include <net/netfilter/nf_conntrack_helper.h> | 55 | #include <net/netfilter/nf_conntrack_helper.h> |
56 | #include <net/netfilter/nf_nat_helper.h> | 56 | #include <net/netfilter/nf_nat_helper.h> |
57 | 57 | ||
58 | MODULE_LICENSE("GPL"); | 58 | MODULE_LICENSE("GPL"); |
59 | MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); | 59 | MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); |
60 | MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway"); | 60 | MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway"); |
61 | MODULE_ALIAS("ip_nat_snmp_basic"); | 61 | MODULE_ALIAS("ip_nat_snmp_basic"); |
62 | 62 | ||
63 | #define SNMP_PORT 161 | 63 | #define SNMP_PORT 161 |
64 | #define SNMP_TRAP_PORT 162 | 64 | #define SNMP_TRAP_PORT 162 |
65 | #define NOCT1(n) (*(u8 *)(n)) | 65 | #define NOCT1(n) (*(u8 *)(n)) |
66 | 66 | ||
67 | static int debug; | 67 | static int debug; |
68 | static DEFINE_SPINLOCK(snmp_lock); | 68 | static DEFINE_SPINLOCK(snmp_lock); |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Application layer address mapping mimics the NAT mapping, but | 71 | * Application layer address mapping mimics the NAT mapping, but |
72 | * only for the first octet in this case (a more flexible system | 72 | * only for the first octet in this case (a more flexible system |
73 | * can be implemented if needed). | 73 | * can be implemented if needed). |
74 | */ | 74 | */ |
75 | struct oct1_map | 75 | struct oct1_map |
76 | { | 76 | { |
77 | u_int8_t from; | 77 | u_int8_t from; |
78 | u_int8_t to; | 78 | u_int8_t to; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | 81 | ||
82 | /***************************************************************************** | 82 | /***************************************************************************** |
83 | * | 83 | * |
84 | * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse) | 84 | * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse) |
85 | * | 85 | * |
86 | *****************************************************************************/ | 86 | *****************************************************************************/ |
87 | 87 | ||
88 | /* Class */ | 88 | /* Class */ |
89 | #define ASN1_UNI 0 /* Universal */ | 89 | #define ASN1_UNI 0 /* Universal */ |
90 | #define ASN1_APL 1 /* Application */ | 90 | #define ASN1_APL 1 /* Application */ |
91 | #define ASN1_CTX 2 /* Context */ | 91 | #define ASN1_CTX 2 /* Context */ |
92 | #define ASN1_PRV 3 /* Private */ | 92 | #define ASN1_PRV 3 /* Private */ |
93 | 93 | ||
94 | /* Tag */ | 94 | /* Tag */ |
95 | #define ASN1_EOC 0 /* End Of Contents */ | 95 | #define ASN1_EOC 0 /* End Of Contents */ |
96 | #define ASN1_BOL 1 /* Boolean */ | 96 | #define ASN1_BOL 1 /* Boolean */ |
97 | #define ASN1_INT 2 /* Integer */ | 97 | #define ASN1_INT 2 /* Integer */ |
98 | #define ASN1_BTS 3 /* Bit String */ | 98 | #define ASN1_BTS 3 /* Bit String */ |
99 | #define ASN1_OTS 4 /* Octet String */ | 99 | #define ASN1_OTS 4 /* Octet String */ |
100 | #define ASN1_NUL 5 /* Null */ | 100 | #define ASN1_NUL 5 /* Null */ |
101 | #define ASN1_OJI 6 /* Object Identifier */ | 101 | #define ASN1_OJI 6 /* Object Identifier */ |
102 | #define ASN1_OJD 7 /* Object Description */ | 102 | #define ASN1_OJD 7 /* Object Description */ |
103 | #define ASN1_EXT 8 /* External */ | 103 | #define ASN1_EXT 8 /* External */ |
104 | #define ASN1_SEQ 16 /* Sequence */ | 104 | #define ASN1_SEQ 16 /* Sequence */ |
105 | #define ASN1_SET 17 /* Set */ | 105 | #define ASN1_SET 17 /* Set */ |
106 | #define ASN1_NUMSTR 18 /* Numerical String */ | 106 | #define ASN1_NUMSTR 18 /* Numerical String */ |
107 | #define ASN1_PRNSTR 19 /* Printable String */ | 107 | #define ASN1_PRNSTR 19 /* Printable String */ |
108 | #define ASN1_TEXSTR 20 /* Teletext String */ | 108 | #define ASN1_TEXSTR 20 /* Teletext String */ |
109 | #define ASN1_VIDSTR 21 /* Video String */ | 109 | #define ASN1_VIDSTR 21 /* Video String */ |
110 | #define ASN1_IA5STR 22 /* IA5 String */ | 110 | #define ASN1_IA5STR 22 /* IA5 String */ |
111 | #define ASN1_UNITIM 23 /* Universal Time */ | 111 | #define ASN1_UNITIM 23 /* Universal Time */ |
112 | #define ASN1_GENTIM 24 /* General Time */ | 112 | #define ASN1_GENTIM 24 /* General Time */ |
113 | #define ASN1_GRASTR 25 /* Graphical String */ | 113 | #define ASN1_GRASTR 25 /* Graphical String */ |
114 | #define ASN1_VISSTR 26 /* Visible String */ | 114 | #define ASN1_VISSTR 26 /* Visible String */ |
115 | #define ASN1_GENSTR 27 /* General String */ | 115 | #define ASN1_GENSTR 27 /* General String */ |
116 | 116 | ||
117 | /* Primitive / Constructed methods*/ | 117 | /* Primitive / Constructed methods*/ |
118 | #define ASN1_PRI 0 /* Primitive */ | 118 | #define ASN1_PRI 0 /* Primitive */ |
119 | #define ASN1_CON 1 /* Constructed */ | 119 | #define ASN1_CON 1 /* Constructed */ |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Error codes. | 122 | * Error codes. |
123 | */ | 123 | */ |
124 | #define ASN1_ERR_NOERROR 0 | 124 | #define ASN1_ERR_NOERROR 0 |
125 | #define ASN1_ERR_DEC_EMPTY 2 | 125 | #define ASN1_ERR_DEC_EMPTY 2 |
126 | #define ASN1_ERR_DEC_EOC_MISMATCH 3 | 126 | #define ASN1_ERR_DEC_EOC_MISMATCH 3 |
127 | #define ASN1_ERR_DEC_LENGTH_MISMATCH 4 | 127 | #define ASN1_ERR_DEC_LENGTH_MISMATCH 4 |
128 | #define ASN1_ERR_DEC_BADVALUE 5 | 128 | #define ASN1_ERR_DEC_BADVALUE 5 |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * ASN.1 context. | 131 | * ASN.1 context. |
132 | */ | 132 | */ |
133 | struct asn1_ctx | 133 | struct asn1_ctx |
134 | { | 134 | { |
135 | int error; /* Error condition */ | 135 | int error; /* Error condition */ |
136 | unsigned char *pointer; /* Octet just to be decoded */ | 136 | unsigned char *pointer; /* Octet just to be decoded */ |
137 | unsigned char *begin; /* First octet */ | 137 | unsigned char *begin; /* First octet */ |
138 | unsigned char *end; /* Octet after last octet */ | 138 | unsigned char *end; /* Octet after last octet */ |
139 | }; | 139 | }; |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * Octet string (not null terminated) | 142 | * Octet string (not null terminated) |
143 | */ | 143 | */ |
144 | struct asn1_octstr | 144 | struct asn1_octstr |
145 | { | 145 | { |
146 | unsigned char *data; | 146 | unsigned char *data; |
147 | unsigned int len; | 147 | unsigned int len; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | static void asn1_open(struct asn1_ctx *ctx, | 150 | static void asn1_open(struct asn1_ctx *ctx, |
151 | unsigned char *buf, | 151 | unsigned char *buf, |
152 | unsigned int len) | 152 | unsigned int len) |
153 | { | 153 | { |
154 | ctx->begin = buf; | 154 | ctx->begin = buf; |
155 | ctx->end = buf + len; | 155 | ctx->end = buf + len; |
156 | ctx->pointer = buf; | 156 | ctx->pointer = buf; |
157 | ctx->error = ASN1_ERR_NOERROR; | 157 | ctx->error = ASN1_ERR_NOERROR; |
158 | } | 158 | } |
159 | 159 | ||
160 | static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) | 160 | static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) |
161 | { | 161 | { |
162 | if (ctx->pointer >= ctx->end) { | 162 | if (ctx->pointer >= ctx->end) { |
163 | ctx->error = ASN1_ERR_DEC_EMPTY; | 163 | ctx->error = ASN1_ERR_DEC_EMPTY; |
164 | return 0; | 164 | return 0; |
165 | } | 165 | } |
166 | *ch = *(ctx->pointer)++; | 166 | *ch = *(ctx->pointer)++; |
167 | return 1; | 167 | return 1; |
168 | } | 168 | } |
169 | 169 | ||
170 | static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) | 170 | static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) |
171 | { | 171 | { |
172 | unsigned char ch; | 172 | unsigned char ch; |
173 | 173 | ||
174 | *tag = 0; | 174 | *tag = 0; |
175 | 175 | ||
176 | do | 176 | do |
177 | { | 177 | { |
178 | if (!asn1_octet_decode(ctx, &ch)) | 178 | if (!asn1_octet_decode(ctx, &ch)) |
179 | return 0; | 179 | return 0; |
180 | *tag <<= 7; | 180 | *tag <<= 7; |
181 | *tag |= ch & 0x7F; | 181 | *tag |= ch & 0x7F; |
182 | } while ((ch & 0x80) == 0x80); | 182 | } while ((ch & 0x80) == 0x80); |
183 | return 1; | 183 | return 1; |
184 | } | 184 | } |
185 | 185 | ||
186 | static unsigned char asn1_id_decode(struct asn1_ctx *ctx, | 186 | static unsigned char asn1_id_decode(struct asn1_ctx *ctx, |
187 | unsigned int *cls, | 187 | unsigned int *cls, |
188 | unsigned int *con, | 188 | unsigned int *con, |
189 | unsigned int *tag) | 189 | unsigned int *tag) |
190 | { | 190 | { |
191 | unsigned char ch; | 191 | unsigned char ch; |
192 | 192 | ||
193 | if (!asn1_octet_decode(ctx, &ch)) | 193 | if (!asn1_octet_decode(ctx, &ch)) |
194 | return 0; | 194 | return 0; |
195 | 195 | ||
196 | *cls = (ch & 0xC0) >> 6; | 196 | *cls = (ch & 0xC0) >> 6; |
197 | *con = (ch & 0x20) >> 5; | 197 | *con = (ch & 0x20) >> 5; |
198 | *tag = (ch & 0x1F); | 198 | *tag = (ch & 0x1F); |
199 | 199 | ||
200 | if (*tag == 0x1F) { | 200 | if (*tag == 0x1F) { |
201 | if (!asn1_tag_decode(ctx, tag)) | 201 | if (!asn1_tag_decode(ctx, tag)) |
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | return 1; | 204 | return 1; |
205 | } | 205 | } |
206 | 206 | ||
207 | static unsigned char asn1_length_decode(struct asn1_ctx *ctx, | 207 | static unsigned char asn1_length_decode(struct asn1_ctx *ctx, |
208 | unsigned int *def, | 208 | unsigned int *def, |
209 | unsigned int *len) | 209 | unsigned int *len) |
210 | { | 210 | { |
211 | unsigned char ch, cnt; | 211 | unsigned char ch, cnt; |
212 | 212 | ||
213 | if (!asn1_octet_decode(ctx, &ch)) | 213 | if (!asn1_octet_decode(ctx, &ch)) |
214 | return 0; | 214 | return 0; |
215 | 215 | ||
216 | if (ch == 0x80) | 216 | if (ch == 0x80) |
217 | *def = 0; | 217 | *def = 0; |
218 | else { | 218 | else { |
219 | *def = 1; | 219 | *def = 1; |
220 | 220 | ||
221 | if (ch < 0x80) | 221 | if (ch < 0x80) |
222 | *len = ch; | 222 | *len = ch; |
223 | else { | 223 | else { |
224 | cnt = ch & 0x7F; | 224 | cnt = ch & 0x7F; |
225 | *len = 0; | 225 | *len = 0; |
226 | 226 | ||
227 | while (cnt > 0) { | 227 | while (cnt > 0) { |
228 | if (!asn1_octet_decode(ctx, &ch)) | 228 | if (!asn1_octet_decode(ctx, &ch)) |
229 | return 0; | 229 | return 0; |
230 | *len <<= 8; | 230 | *len <<= 8; |
231 | *len |= ch; | 231 | *len |= ch; |
232 | cnt--; | 232 | cnt--; |
233 | } | 233 | } |
234 | } | 234 | } |
235 | } | 235 | } |
236 | 236 | ||
237 | /* don't trust len bigger than ctx buffer */ | 237 | /* don't trust len bigger than ctx buffer */ |
238 | if (*len > ctx->end - ctx->pointer) | 238 | if (*len > ctx->end - ctx->pointer) |
239 | return 0; | 239 | return 0; |
240 | 240 | ||
241 | return 1; | 241 | return 1; |
242 | } | 242 | } |
243 | 243 | ||
244 | static unsigned char asn1_header_decode(struct asn1_ctx *ctx, | 244 | static unsigned char asn1_header_decode(struct asn1_ctx *ctx, |
245 | unsigned char **eoc, | 245 | unsigned char **eoc, |
246 | unsigned int *cls, | 246 | unsigned int *cls, |
247 | unsigned int *con, | 247 | unsigned int *con, |
248 | unsigned int *tag) | 248 | unsigned int *tag) |
249 | { | 249 | { |
250 | unsigned int def, len; | 250 | unsigned int def, len; |
251 | 251 | ||
252 | if (!asn1_id_decode(ctx, cls, con, tag)) | 252 | if (!asn1_id_decode(ctx, cls, con, tag)) |
253 | return 0; | 253 | return 0; |
254 | 254 | ||
255 | def = len = 0; | 255 | def = len = 0; |
256 | if (!asn1_length_decode(ctx, &def, &len)) | 256 | if (!asn1_length_decode(ctx, &def, &len)) |
257 | return 0; | 257 | return 0; |
258 | 258 | ||
259 | /* primitive shall be definite, indefinite shall be constructed */ | 259 | /* primitive shall be definite, indefinite shall be constructed */ |
260 | if (*con == ASN1_PRI && !def) | 260 | if (*con == ASN1_PRI && !def) |
261 | return 0; | 261 | return 0; |
262 | 262 | ||
263 | if (def) | 263 | if (def) |
264 | *eoc = ctx->pointer + len; | 264 | *eoc = ctx->pointer + len; |
265 | else | 265 | else |
266 | *eoc = NULL; | 266 | *eoc = NULL; |
267 | return 1; | 267 | return 1; |
268 | } | 268 | } |
269 | 269 | ||
270 | static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) | 270 | static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) |
271 | { | 271 | { |
272 | unsigned char ch; | 272 | unsigned char ch; |
273 | 273 | ||
274 | if (eoc == NULL) { | 274 | if (eoc == NULL) { |
275 | if (!asn1_octet_decode(ctx, &ch)) | 275 | if (!asn1_octet_decode(ctx, &ch)) |
276 | return 0; | 276 | return 0; |
277 | 277 | ||
278 | if (ch != 0x00) { | 278 | if (ch != 0x00) { |
279 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; | 279 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; |
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
282 | 282 | ||
283 | if (!asn1_octet_decode(ctx, &ch)) | 283 | if (!asn1_octet_decode(ctx, &ch)) |
284 | return 0; | 284 | return 0; |
285 | 285 | ||
286 | if (ch != 0x00) { | 286 | if (ch != 0x00) { |
287 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; | 287 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; |
288 | return 0; | 288 | return 0; |
289 | } | 289 | } |
290 | return 1; | 290 | return 1; |
291 | } else { | 291 | } else { |
292 | if (ctx->pointer != eoc) { | 292 | if (ctx->pointer != eoc) { |
293 | ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH; | 293 | ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH; |
294 | return 0; | 294 | return 0; |
295 | } | 295 | } |
296 | return 1; | 296 | return 1; |
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
300 | static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc) | 300 | static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc) |
301 | { | 301 | { |
302 | ctx->pointer = eoc; | 302 | ctx->pointer = eoc; |
303 | return 1; | 303 | return 1; |
304 | } | 304 | } |
305 | 305 | ||
306 | static unsigned char asn1_long_decode(struct asn1_ctx *ctx, | 306 | static unsigned char asn1_long_decode(struct asn1_ctx *ctx, |
307 | unsigned char *eoc, | 307 | unsigned char *eoc, |
308 | long *integer) | 308 | long *integer) |
309 | { | 309 | { |
310 | unsigned char ch; | 310 | unsigned char ch; |
311 | unsigned int len; | 311 | unsigned int len; |
312 | 312 | ||
313 | if (!asn1_octet_decode(ctx, &ch)) | 313 | if (!asn1_octet_decode(ctx, &ch)) |
314 | return 0; | 314 | return 0; |
315 | 315 | ||
316 | *integer = (signed char) ch; | 316 | *integer = (signed char) ch; |
317 | len = 1; | 317 | len = 1; |
318 | 318 | ||
319 | while (ctx->pointer < eoc) { | 319 | while (ctx->pointer < eoc) { |
320 | if (++len > sizeof (long)) { | 320 | if (++len > sizeof (long)) { |
321 | ctx->error = ASN1_ERR_DEC_BADVALUE; | 321 | ctx->error = ASN1_ERR_DEC_BADVALUE; |
322 | return 0; | 322 | return 0; |
323 | } | 323 | } |
324 | 324 | ||
325 | if (!asn1_octet_decode(ctx, &ch)) | 325 | if (!asn1_octet_decode(ctx, &ch)) |
326 | return 0; | 326 | return 0; |
327 | 327 | ||
328 | *integer <<= 8; | 328 | *integer <<= 8; |
329 | *integer |= ch; | 329 | *integer |= ch; |
330 | } | 330 | } |
331 | return 1; | 331 | return 1; |
332 | } | 332 | } |
333 | 333 | ||
334 | static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, | 334 | static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, |
335 | unsigned char *eoc, | 335 | unsigned char *eoc, |
336 | unsigned int *integer) | 336 | unsigned int *integer) |
337 | { | 337 | { |
338 | unsigned char ch; | 338 | unsigned char ch; |
339 | unsigned int len; | 339 | unsigned int len; |
340 | 340 | ||
341 | if (!asn1_octet_decode(ctx, &ch)) | 341 | if (!asn1_octet_decode(ctx, &ch)) |
342 | return 0; | 342 | return 0; |
343 | 343 | ||
344 | *integer = ch; | 344 | *integer = ch; |
345 | if (ch == 0) len = 0; | 345 | if (ch == 0) len = 0; |
346 | else len = 1; | 346 | else len = 1; |
347 | 347 | ||
348 | while (ctx->pointer < eoc) { | 348 | while (ctx->pointer < eoc) { |
349 | if (++len > sizeof (unsigned int)) { | 349 | if (++len > sizeof (unsigned int)) { |
350 | ctx->error = ASN1_ERR_DEC_BADVALUE; | 350 | ctx->error = ASN1_ERR_DEC_BADVALUE; |
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
353 | 353 | ||
354 | if (!asn1_octet_decode(ctx, &ch)) | 354 | if (!asn1_octet_decode(ctx, &ch)) |
355 | return 0; | 355 | return 0; |
356 | 356 | ||
357 | *integer <<= 8; | 357 | *integer <<= 8; |
358 | *integer |= ch; | 358 | *integer |= ch; |
359 | } | 359 | } |
360 | return 1; | 360 | return 1; |
361 | } | 361 | } |
362 | 362 | ||
363 | static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, | 363 | static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, |
364 | unsigned char *eoc, | 364 | unsigned char *eoc, |
365 | unsigned long *integer) | 365 | unsigned long *integer) |
366 | { | 366 | { |
367 | unsigned char ch; | 367 | unsigned char ch; |
368 | unsigned int len; | 368 | unsigned int len; |
369 | 369 | ||
370 | if (!asn1_octet_decode(ctx, &ch)) | 370 | if (!asn1_octet_decode(ctx, &ch)) |
371 | return 0; | 371 | return 0; |
372 | 372 | ||
373 | *integer = ch; | 373 | *integer = ch; |
374 | if (ch == 0) len = 0; | 374 | if (ch == 0) len = 0; |
375 | else len = 1; | 375 | else len = 1; |
376 | 376 | ||
377 | while (ctx->pointer < eoc) { | 377 | while (ctx->pointer < eoc) { |
378 | if (++len > sizeof (unsigned long)) { | 378 | if (++len > sizeof (unsigned long)) { |
379 | ctx->error = ASN1_ERR_DEC_BADVALUE; | 379 | ctx->error = ASN1_ERR_DEC_BADVALUE; |
380 | return 0; | 380 | return 0; |
381 | } | 381 | } |
382 | 382 | ||
383 | if (!asn1_octet_decode(ctx, &ch)) | 383 | if (!asn1_octet_decode(ctx, &ch)) |
384 | return 0; | 384 | return 0; |
385 | 385 | ||
386 | *integer <<= 8; | 386 | *integer <<= 8; |
387 | *integer |= ch; | 387 | *integer |= ch; |
388 | } | 388 | } |
389 | return 1; | 389 | return 1; |
390 | } | 390 | } |
391 | 391 | ||
392 | static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, | 392 | static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, |
393 | unsigned char *eoc, | 393 | unsigned char *eoc, |
394 | unsigned char **octets, | 394 | unsigned char **octets, |
395 | unsigned int *len) | 395 | unsigned int *len) |
396 | { | 396 | { |
397 | unsigned char *ptr; | 397 | unsigned char *ptr; |
398 | 398 | ||
399 | *len = 0; | 399 | *len = 0; |
400 | 400 | ||
401 | *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); | 401 | *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); |
402 | if (*octets == NULL) { | 402 | if (*octets == NULL) { |
403 | if (net_ratelimit()) | 403 | if (net_ratelimit()) |
404 | printk("OOM in bsalg (%d)\n", __LINE__); | 404 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
405 | return 0; | 405 | return 0; |
406 | } | 406 | } |
407 | 407 | ||
408 | ptr = *octets; | 408 | ptr = *octets; |
409 | while (ctx->pointer < eoc) { | 409 | while (ctx->pointer < eoc) { |
410 | if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { | 410 | if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { |
411 | kfree(*octets); | 411 | kfree(*octets); |
412 | *octets = NULL; | 412 | *octets = NULL; |
413 | return 0; | 413 | return 0; |
414 | } | 414 | } |
415 | (*len)++; | 415 | (*len)++; |
416 | } | 416 | } |
417 | return 1; | 417 | return 1; |
418 | } | 418 | } |
419 | 419 | ||
420 | static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, | 420 | static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, |
421 | unsigned long *subid) | 421 | unsigned long *subid) |
422 | { | 422 | { |
423 | unsigned char ch; | 423 | unsigned char ch; |
424 | 424 | ||
425 | *subid = 0; | 425 | *subid = 0; |
426 | 426 | ||
427 | do { | 427 | do { |
428 | if (!asn1_octet_decode(ctx, &ch)) | 428 | if (!asn1_octet_decode(ctx, &ch)) |
429 | return 0; | 429 | return 0; |
430 | 430 | ||
431 | *subid <<= 7; | 431 | *subid <<= 7; |
432 | *subid |= ch & 0x7F; | 432 | *subid |= ch & 0x7F; |
433 | } while ((ch & 0x80) == 0x80); | 433 | } while ((ch & 0x80) == 0x80); |
434 | return 1; | 434 | return 1; |
435 | } | 435 | } |
436 | 436 | ||
437 | static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, | 437 | static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, |
438 | unsigned char *eoc, | 438 | unsigned char *eoc, |
439 | unsigned long **oid, | 439 | unsigned long **oid, |
440 | unsigned int *len) | 440 | unsigned int *len) |
441 | { | 441 | { |
442 | unsigned long subid; | 442 | unsigned long subid; |
443 | unsigned long *optr; | 443 | unsigned long *optr; |
444 | size_t size; | 444 | size_t size; |
445 | 445 | ||
446 | size = eoc - ctx->pointer + 1; | 446 | size = eoc - ctx->pointer + 1; |
447 | 447 | ||
448 | /* first subid actually encodes first two subids */ | 448 | /* first subid actually encodes first two subids */ |
449 | if (size < 2 || size > ULONG_MAX/sizeof(unsigned long)) | 449 | if (size < 2 || size > ULONG_MAX/sizeof(unsigned long)) |
450 | return 0; | 450 | return 0; |
451 | 451 | ||
452 | *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); | 452 | *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); |
453 | if (*oid == NULL) { | 453 | if (*oid == NULL) { |
454 | if (net_ratelimit()) | 454 | if (net_ratelimit()) |
455 | printk("OOM in bsalg (%d)\n", __LINE__); | 455 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
456 | return 0; | 456 | return 0; |
457 | } | 457 | } |
458 | 458 | ||
459 | optr = *oid; | 459 | optr = *oid; |
460 | 460 | ||
461 | if (!asn1_subid_decode(ctx, &subid)) { | 461 | if (!asn1_subid_decode(ctx, &subid)) { |
462 | kfree(*oid); | 462 | kfree(*oid); |
463 | *oid = NULL; | 463 | *oid = NULL; |
464 | return 0; | 464 | return 0; |
465 | } | 465 | } |
466 | 466 | ||
467 | if (subid < 40) { | 467 | if (subid < 40) { |
468 | optr [0] = 0; | 468 | optr [0] = 0; |
469 | optr [1] = subid; | 469 | optr [1] = subid; |
470 | } else if (subid < 80) { | 470 | } else if (subid < 80) { |
471 | optr [0] = 1; | 471 | optr [0] = 1; |
472 | optr [1] = subid - 40; | 472 | optr [1] = subid - 40; |
473 | } else { | 473 | } else { |
474 | optr [0] = 2; | 474 | optr [0] = 2; |
475 | optr [1] = subid - 80; | 475 | optr [1] = subid - 80; |
476 | } | 476 | } |
477 | 477 | ||
478 | *len = 2; | 478 | *len = 2; |
479 | optr += 2; | 479 | optr += 2; |
480 | 480 | ||
481 | while (ctx->pointer < eoc) { | 481 | while (ctx->pointer < eoc) { |
482 | if (++(*len) > size) { | 482 | if (++(*len) > size) { |
483 | ctx->error = ASN1_ERR_DEC_BADVALUE; | 483 | ctx->error = ASN1_ERR_DEC_BADVALUE; |
484 | kfree(*oid); | 484 | kfree(*oid); |
485 | *oid = NULL; | 485 | *oid = NULL; |
486 | return 0; | 486 | return 0; |
487 | } | 487 | } |
488 | 488 | ||
489 | if (!asn1_subid_decode(ctx, optr++)) { | 489 | if (!asn1_subid_decode(ctx, optr++)) { |
490 | kfree(*oid); | 490 | kfree(*oid); |
491 | *oid = NULL; | 491 | *oid = NULL; |
492 | return 0; | 492 | return 0; |
493 | } | 493 | } |
494 | } | 494 | } |
495 | return 1; | 495 | return 1; |
496 | } | 496 | } |
497 | 497 | ||
498 | /***************************************************************************** | 498 | /***************************************************************************** |
499 | * | 499 | * |
500 | * SNMP decoding routines (gxsnmp author Dirk Wisse) | 500 | * SNMP decoding routines (gxsnmp author Dirk Wisse) |
501 | * | 501 | * |
502 | *****************************************************************************/ | 502 | *****************************************************************************/ |
503 | 503 | ||
504 | /* SNMP Versions */ | 504 | /* SNMP Versions */ |
505 | #define SNMP_V1 0 | 505 | #define SNMP_V1 0 |
506 | #define SNMP_V2C 1 | 506 | #define SNMP_V2C 1 |
507 | #define SNMP_V2 2 | 507 | #define SNMP_V2 2 |
508 | #define SNMP_V3 3 | 508 | #define SNMP_V3 3 |
509 | 509 | ||
510 | /* Default Sizes */ | 510 | /* Default Sizes */ |
511 | #define SNMP_SIZE_COMM 256 | 511 | #define SNMP_SIZE_COMM 256 |
512 | #define SNMP_SIZE_OBJECTID 128 | 512 | #define SNMP_SIZE_OBJECTID 128 |
513 | #define SNMP_SIZE_BUFCHR 256 | 513 | #define SNMP_SIZE_BUFCHR 256 |
514 | #define SNMP_SIZE_BUFINT 128 | 514 | #define SNMP_SIZE_BUFINT 128 |
515 | #define SNMP_SIZE_SMALLOBJECTID 16 | 515 | #define SNMP_SIZE_SMALLOBJECTID 16 |
516 | 516 | ||
517 | /* Requests */ | 517 | /* Requests */ |
518 | #define SNMP_PDU_GET 0 | 518 | #define SNMP_PDU_GET 0 |
519 | #define SNMP_PDU_NEXT 1 | 519 | #define SNMP_PDU_NEXT 1 |
520 | #define SNMP_PDU_RESPONSE 2 | 520 | #define SNMP_PDU_RESPONSE 2 |
521 | #define SNMP_PDU_SET 3 | 521 | #define SNMP_PDU_SET 3 |
522 | #define SNMP_PDU_TRAP1 4 | 522 | #define SNMP_PDU_TRAP1 4 |
523 | #define SNMP_PDU_BULK 5 | 523 | #define SNMP_PDU_BULK 5 |
524 | #define SNMP_PDU_INFORM 6 | 524 | #define SNMP_PDU_INFORM 6 |
525 | #define SNMP_PDU_TRAP2 7 | 525 | #define SNMP_PDU_TRAP2 7 |
526 | 526 | ||
527 | /* Errors */ | 527 | /* Errors */ |
528 | #define SNMP_NOERROR 0 | 528 | #define SNMP_NOERROR 0 |
529 | #define SNMP_TOOBIG 1 | 529 | #define SNMP_TOOBIG 1 |
530 | #define SNMP_NOSUCHNAME 2 | 530 | #define SNMP_NOSUCHNAME 2 |
531 | #define SNMP_BADVALUE 3 | 531 | #define SNMP_BADVALUE 3 |
532 | #define SNMP_READONLY 4 | 532 | #define SNMP_READONLY 4 |
533 | #define SNMP_GENERROR 5 | 533 | #define SNMP_GENERROR 5 |
534 | #define SNMP_NOACCESS 6 | 534 | #define SNMP_NOACCESS 6 |
535 | #define SNMP_WRONGTYPE 7 | 535 | #define SNMP_WRONGTYPE 7 |
536 | #define SNMP_WRONGLENGTH 8 | 536 | #define SNMP_WRONGLENGTH 8 |
537 | #define SNMP_WRONGENCODING 9 | 537 | #define SNMP_WRONGENCODING 9 |
538 | #define SNMP_WRONGVALUE 10 | 538 | #define SNMP_WRONGVALUE 10 |
539 | #define SNMP_NOCREATION 11 | 539 | #define SNMP_NOCREATION 11 |
540 | #define SNMP_INCONSISTENTVALUE 12 | 540 | #define SNMP_INCONSISTENTVALUE 12 |
541 | #define SNMP_RESOURCEUNAVAILABLE 13 | 541 | #define SNMP_RESOURCEUNAVAILABLE 13 |
542 | #define SNMP_COMMITFAILED 14 | 542 | #define SNMP_COMMITFAILED 14 |
543 | #define SNMP_UNDOFAILED 15 | 543 | #define SNMP_UNDOFAILED 15 |
544 | #define SNMP_AUTHORIZATIONERROR 16 | 544 | #define SNMP_AUTHORIZATIONERROR 16 |
545 | #define SNMP_NOTWRITABLE 17 | 545 | #define SNMP_NOTWRITABLE 17 |
546 | #define SNMP_INCONSISTENTNAME 18 | 546 | #define SNMP_INCONSISTENTNAME 18 |
547 | 547 | ||
548 | /* General SNMP V1 Traps */ | 548 | /* General SNMP V1 Traps */ |
549 | #define SNMP_TRAP_COLDSTART 0 | 549 | #define SNMP_TRAP_COLDSTART 0 |
550 | #define SNMP_TRAP_WARMSTART 1 | 550 | #define SNMP_TRAP_WARMSTART 1 |
551 | #define SNMP_TRAP_LINKDOWN 2 | 551 | #define SNMP_TRAP_LINKDOWN 2 |
552 | #define SNMP_TRAP_LINKUP 3 | 552 | #define SNMP_TRAP_LINKUP 3 |
553 | #define SNMP_TRAP_AUTFAILURE 4 | 553 | #define SNMP_TRAP_AUTFAILURE 4 |
554 | #define SNMP_TRAP_EQPNEIGHBORLOSS 5 | 554 | #define SNMP_TRAP_EQPNEIGHBORLOSS 5 |
555 | #define SNMP_TRAP_ENTSPECIFIC 6 | 555 | #define SNMP_TRAP_ENTSPECIFIC 6 |
556 | 556 | ||
557 | /* SNMPv1 Types */ | 557 | /* SNMPv1 Types */ |
558 | #define SNMP_NULL 0 | 558 | #define SNMP_NULL 0 |
559 | #define SNMP_INTEGER 1 /* l */ | 559 | #define SNMP_INTEGER 1 /* l */ |
560 | #define SNMP_OCTETSTR 2 /* c */ | 560 | #define SNMP_OCTETSTR 2 /* c */ |
561 | #define SNMP_DISPLAYSTR 2 /* c */ | 561 | #define SNMP_DISPLAYSTR 2 /* c */ |
562 | #define SNMP_OBJECTID 3 /* ul */ | 562 | #define SNMP_OBJECTID 3 /* ul */ |
563 | #define SNMP_IPADDR 4 /* uc */ | 563 | #define SNMP_IPADDR 4 /* uc */ |
564 | #define SNMP_COUNTER 5 /* ul */ | 564 | #define SNMP_COUNTER 5 /* ul */ |
565 | #define SNMP_GAUGE 6 /* ul */ | 565 | #define SNMP_GAUGE 6 /* ul */ |
566 | #define SNMP_TIMETICKS 7 /* ul */ | 566 | #define SNMP_TIMETICKS 7 /* ul */ |
567 | #define SNMP_OPAQUE 8 /* c */ | 567 | #define SNMP_OPAQUE 8 /* c */ |
568 | 568 | ||
569 | /* Additional SNMPv2 Types */ | 569 | /* Additional SNMPv2 Types */ |
570 | #define SNMP_UINTEGER 5 /* ul */ | 570 | #define SNMP_UINTEGER 5 /* ul */ |
571 | #define SNMP_BITSTR 9 /* uc */ | 571 | #define SNMP_BITSTR 9 /* uc */ |
572 | #define SNMP_NSAP 10 /* uc */ | 572 | #define SNMP_NSAP 10 /* uc */ |
573 | #define SNMP_COUNTER64 11 /* ul */ | 573 | #define SNMP_COUNTER64 11 /* ul */ |
574 | #define SNMP_NOSUCHOBJECT 12 | 574 | #define SNMP_NOSUCHOBJECT 12 |
575 | #define SNMP_NOSUCHINSTANCE 13 | 575 | #define SNMP_NOSUCHINSTANCE 13 |
576 | #define SNMP_ENDOFMIBVIEW 14 | 576 | #define SNMP_ENDOFMIBVIEW 14 |
577 | 577 | ||
578 | union snmp_syntax | 578 | union snmp_syntax |
579 | { | 579 | { |
580 | unsigned char uc[0]; /* 8 bit unsigned */ | 580 | unsigned char uc[0]; /* 8 bit unsigned */ |
581 | char c[0]; /* 8 bit signed */ | 581 | char c[0]; /* 8 bit signed */ |
582 | unsigned long ul[0]; /* 32 bit unsigned */ | 582 | unsigned long ul[0]; /* 32 bit unsigned */ |
583 | long l[0]; /* 32 bit signed */ | 583 | long l[0]; /* 32 bit signed */ |
584 | }; | 584 | }; |
585 | 585 | ||
586 | struct snmp_object | 586 | struct snmp_object |
587 | { | 587 | { |
588 | unsigned long *id; | 588 | unsigned long *id; |
589 | unsigned int id_len; | 589 | unsigned int id_len; |
590 | unsigned short type; | 590 | unsigned short type; |
591 | unsigned int syntax_len; | 591 | unsigned int syntax_len; |
592 | union snmp_syntax syntax; | 592 | union snmp_syntax syntax; |
593 | }; | 593 | }; |
594 | 594 | ||
595 | struct snmp_request | 595 | struct snmp_request |
596 | { | 596 | { |
597 | unsigned long id; | 597 | unsigned long id; |
598 | unsigned int error_status; | 598 | unsigned int error_status; |
599 | unsigned int error_index; | 599 | unsigned int error_index; |
600 | }; | 600 | }; |
601 | 601 | ||
602 | struct snmp_v1_trap | 602 | struct snmp_v1_trap |
603 | { | 603 | { |
604 | unsigned long *id; | 604 | unsigned long *id; |
605 | unsigned int id_len; | 605 | unsigned int id_len; |
606 | unsigned long ip_address; /* pointer */ | 606 | unsigned long ip_address; /* pointer */ |
607 | unsigned int general; | 607 | unsigned int general; |
608 | unsigned int specific; | 608 | unsigned int specific; |
609 | unsigned long time; | 609 | unsigned long time; |
610 | }; | 610 | }; |
611 | 611 | ||
612 | /* SNMP types */ | 612 | /* SNMP types */ |
613 | #define SNMP_IPA 0 | 613 | #define SNMP_IPA 0 |
614 | #define SNMP_CNT 1 | 614 | #define SNMP_CNT 1 |
615 | #define SNMP_GGE 2 | 615 | #define SNMP_GGE 2 |
616 | #define SNMP_TIT 3 | 616 | #define SNMP_TIT 3 |
617 | #define SNMP_OPQ 4 | 617 | #define SNMP_OPQ 4 |
618 | #define SNMP_C64 6 | 618 | #define SNMP_C64 6 |
619 | 619 | ||
620 | /* SNMP errors */ | 620 | /* SNMP errors */ |
621 | #define SERR_NSO 0 | 621 | #define SERR_NSO 0 |
622 | #define SERR_NSI 1 | 622 | #define SERR_NSI 1 |
623 | #define SERR_EOM 2 | 623 | #define SERR_EOM 2 |
624 | 624 | ||
625 | static inline void mangle_address(unsigned char *begin, | 625 | static inline void mangle_address(unsigned char *begin, |
626 | unsigned char *addr, | 626 | unsigned char *addr, |
627 | const struct oct1_map *map, | 627 | const struct oct1_map *map, |
628 | __sum16 *check); | 628 | __sum16 *check); |
629 | struct snmp_cnv | 629 | struct snmp_cnv |
630 | { | 630 | { |
631 | unsigned int class; | 631 | unsigned int class; |
632 | unsigned int tag; | 632 | unsigned int tag; |
633 | int syntax; | 633 | int syntax; |
634 | }; | 634 | }; |
635 | 635 | ||
636 | static const struct snmp_cnv snmp_conv[] = { | 636 | static const struct snmp_cnv snmp_conv[] = { |
637 | {ASN1_UNI, ASN1_NUL, SNMP_NULL}, | 637 | {ASN1_UNI, ASN1_NUL, SNMP_NULL}, |
638 | {ASN1_UNI, ASN1_INT, SNMP_INTEGER}, | 638 | {ASN1_UNI, ASN1_INT, SNMP_INTEGER}, |
639 | {ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR}, | 639 | {ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR}, |
640 | {ASN1_UNI, ASN1_OTS, SNMP_DISPLAYSTR}, | 640 | {ASN1_UNI, ASN1_OTS, SNMP_DISPLAYSTR}, |
641 | {ASN1_UNI, ASN1_OJI, SNMP_OBJECTID}, | 641 | {ASN1_UNI, ASN1_OJI, SNMP_OBJECTID}, |
642 | {ASN1_APL, SNMP_IPA, SNMP_IPADDR}, | 642 | {ASN1_APL, SNMP_IPA, SNMP_IPADDR}, |
643 | {ASN1_APL, SNMP_CNT, SNMP_COUNTER}, /* Counter32 */ | 643 | {ASN1_APL, SNMP_CNT, SNMP_COUNTER}, /* Counter32 */ |
644 | {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */ | 644 | {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */ |
645 | {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS}, | 645 | {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS}, |
646 | {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE}, | 646 | {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE}, |
647 | 647 | ||
648 | /* SNMPv2 data types and errors */ | 648 | /* SNMPv2 data types and errors */ |
649 | {ASN1_UNI, ASN1_BTS, SNMP_BITSTR}, | 649 | {ASN1_UNI, ASN1_BTS, SNMP_BITSTR}, |
650 | {ASN1_APL, SNMP_C64, SNMP_COUNTER64}, | 650 | {ASN1_APL, SNMP_C64, SNMP_COUNTER64}, |
651 | {ASN1_CTX, SERR_NSO, SNMP_NOSUCHOBJECT}, | 651 | {ASN1_CTX, SERR_NSO, SNMP_NOSUCHOBJECT}, |
652 | {ASN1_CTX, SERR_NSI, SNMP_NOSUCHINSTANCE}, | 652 | {ASN1_CTX, SERR_NSI, SNMP_NOSUCHINSTANCE}, |
653 | {ASN1_CTX, SERR_EOM, SNMP_ENDOFMIBVIEW}, | 653 | {ASN1_CTX, SERR_EOM, SNMP_ENDOFMIBVIEW}, |
654 | {0, 0, -1} | 654 | {0, 0, -1} |
655 | }; | 655 | }; |
656 | 656 | ||
657 | static unsigned char snmp_tag_cls2syntax(unsigned int tag, | 657 | static unsigned char snmp_tag_cls2syntax(unsigned int tag, |
658 | unsigned int cls, | 658 | unsigned int cls, |
659 | unsigned short *syntax) | 659 | unsigned short *syntax) |
660 | { | 660 | { |
661 | const struct snmp_cnv *cnv; | 661 | const struct snmp_cnv *cnv; |
662 | 662 | ||
663 | cnv = snmp_conv; | 663 | cnv = snmp_conv; |
664 | 664 | ||
665 | while (cnv->syntax != -1) { | 665 | while (cnv->syntax != -1) { |
666 | if (cnv->tag == tag && cnv->class == cls) { | 666 | if (cnv->tag == tag && cnv->class == cls) { |
667 | *syntax = cnv->syntax; | 667 | *syntax = cnv->syntax; |
668 | return 1; | 668 | return 1; |
669 | } | 669 | } |
670 | cnv++; | 670 | cnv++; |
671 | } | 671 | } |
672 | return 0; | 672 | return 0; |
673 | } | 673 | } |
674 | 674 | ||
675 | static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | 675 | static unsigned char snmp_object_decode(struct asn1_ctx *ctx, |
676 | struct snmp_object **obj) | 676 | struct snmp_object **obj) |
677 | { | 677 | { |
678 | unsigned int cls, con, tag, len, idlen; | 678 | unsigned int cls, con, tag, len, idlen; |
679 | unsigned short type; | 679 | unsigned short type; |
680 | unsigned char *eoc, *end, *p; | 680 | unsigned char *eoc, *end, *p; |
681 | unsigned long *lp, *id; | 681 | unsigned long *lp, *id; |
682 | unsigned long ul; | 682 | unsigned long ul; |
683 | long l; | 683 | long l; |
684 | 684 | ||
685 | *obj = NULL; | 685 | *obj = NULL; |
686 | id = NULL; | 686 | id = NULL; |
687 | 687 | ||
688 | if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag)) | 688 | if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag)) |
689 | return 0; | 689 | return 0; |
690 | 690 | ||
691 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | 691 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) |
692 | return 0; | 692 | return 0; |
693 | 693 | ||
694 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 694 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
695 | return 0; | 695 | return 0; |
696 | 696 | ||
697 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) | 697 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) |
698 | return 0; | 698 | return 0; |
699 | 699 | ||
700 | if (!asn1_oid_decode(ctx, end, &id, &idlen)) | 700 | if (!asn1_oid_decode(ctx, end, &id, &idlen)) |
701 | return 0; | 701 | return 0; |
702 | 702 | ||
703 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) { | 703 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) { |
704 | kfree(id); | 704 | kfree(id); |
705 | return 0; | 705 | return 0; |
706 | } | 706 | } |
707 | 707 | ||
708 | if (con != ASN1_PRI) { | 708 | if (con != ASN1_PRI) { |
709 | kfree(id); | 709 | kfree(id); |
710 | return 0; | 710 | return 0; |
711 | } | 711 | } |
712 | 712 | ||
713 | type = 0; | 713 | type = 0; |
714 | if (!snmp_tag_cls2syntax(tag, cls, &type)) { | 714 | if (!snmp_tag_cls2syntax(tag, cls, &type)) { |
715 | kfree(id); | 715 | kfree(id); |
716 | return 0; | 716 | return 0; |
717 | } | 717 | } |
718 | 718 | ||
719 | l = 0; | 719 | l = 0; |
720 | switch (type) { | 720 | switch (type) { |
721 | case SNMP_INTEGER: | 721 | case SNMP_INTEGER: |
722 | len = sizeof(long); | 722 | len = sizeof(long); |
723 | if (!asn1_long_decode(ctx, end, &l)) { | 723 | if (!asn1_long_decode(ctx, end, &l)) { |
724 | kfree(id); | 724 | kfree(id); |
725 | return 0; | 725 | return 0; |
726 | } | 726 | } |
727 | *obj = kmalloc(sizeof(struct snmp_object) + len, | 727 | *obj = kmalloc(sizeof(struct snmp_object) + len, |
728 | GFP_ATOMIC); | 728 | GFP_ATOMIC); |
729 | if (*obj == NULL) { | 729 | if (*obj == NULL) { |
730 | kfree(id); | 730 | kfree(id); |
731 | if (net_ratelimit()) | 731 | if (net_ratelimit()) |
732 | printk("OOM in bsalg (%d)\n", __LINE__); | 732 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
733 | return 0; | 733 | return 0; |
734 | } | 734 | } |
735 | (*obj)->syntax.l[0] = l; | 735 | (*obj)->syntax.l[0] = l; |
736 | break; | 736 | break; |
737 | case SNMP_OCTETSTR: | 737 | case SNMP_OCTETSTR: |
738 | case SNMP_OPAQUE: | 738 | case SNMP_OPAQUE: |
739 | if (!asn1_octets_decode(ctx, end, &p, &len)) { | 739 | if (!asn1_octets_decode(ctx, end, &p, &len)) { |
740 | kfree(id); | 740 | kfree(id); |
741 | return 0; | 741 | return 0; |
742 | } | 742 | } |
743 | *obj = kmalloc(sizeof(struct snmp_object) + len, | 743 | *obj = kmalloc(sizeof(struct snmp_object) + len, |
744 | GFP_ATOMIC); | 744 | GFP_ATOMIC); |
745 | if (*obj == NULL) { | 745 | if (*obj == NULL) { |
746 | kfree(p); | 746 | kfree(p); |
747 | kfree(id); | 747 | kfree(id); |
748 | if (net_ratelimit()) | 748 | if (net_ratelimit()) |
749 | printk("OOM in bsalg (%d)\n", __LINE__); | 749 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
750 | return 0; | 750 | return 0; |
751 | } | 751 | } |
752 | memcpy((*obj)->syntax.c, p, len); | 752 | memcpy((*obj)->syntax.c, p, len); |
753 | kfree(p); | 753 | kfree(p); |
754 | break; | 754 | break; |
755 | case SNMP_NULL: | 755 | case SNMP_NULL: |
756 | case SNMP_NOSUCHOBJECT: | 756 | case SNMP_NOSUCHOBJECT: |
757 | case SNMP_NOSUCHINSTANCE: | 757 | case SNMP_NOSUCHINSTANCE: |
758 | case SNMP_ENDOFMIBVIEW: | 758 | case SNMP_ENDOFMIBVIEW: |
759 | len = 0; | 759 | len = 0; |
760 | *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); | 760 | *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); |
761 | if (*obj == NULL) { | 761 | if (*obj == NULL) { |
762 | kfree(id); | 762 | kfree(id); |
763 | if (net_ratelimit()) | 763 | if (net_ratelimit()) |
764 | printk("OOM in bsalg (%d)\n", __LINE__); | 764 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
765 | return 0; | 765 | return 0; |
766 | } | 766 | } |
767 | if (!asn1_null_decode(ctx, end)) { | 767 | if (!asn1_null_decode(ctx, end)) { |
768 | kfree(id); | 768 | kfree(id); |
769 | kfree(*obj); | 769 | kfree(*obj); |
770 | *obj = NULL; | 770 | *obj = NULL; |
771 | return 0; | 771 | return 0; |
772 | } | 772 | } |
773 | break; | 773 | break; |
774 | case SNMP_OBJECTID: | 774 | case SNMP_OBJECTID: |
775 | if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) { | 775 | if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) { |
776 | kfree(id); | 776 | kfree(id); |
777 | return 0; | 777 | return 0; |
778 | } | 778 | } |
779 | len *= sizeof(unsigned long); | 779 | len *= sizeof(unsigned long); |
780 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); | 780 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); |
781 | if (*obj == NULL) { | 781 | if (*obj == NULL) { |
782 | kfree(lp); | 782 | kfree(lp); |
783 | kfree(id); | 783 | kfree(id); |
784 | if (net_ratelimit()) | 784 | if (net_ratelimit()) |
785 | printk("OOM in bsalg (%d)\n", __LINE__); | 785 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
786 | return 0; | 786 | return 0; |
787 | } | 787 | } |
788 | memcpy((*obj)->syntax.ul, lp, len); | 788 | memcpy((*obj)->syntax.ul, lp, len); |
789 | kfree(lp); | 789 | kfree(lp); |
790 | break; | 790 | break; |
791 | case SNMP_IPADDR: | 791 | case SNMP_IPADDR: |
792 | if (!asn1_octets_decode(ctx, end, &p, &len)) { | 792 | if (!asn1_octets_decode(ctx, end, &p, &len)) { |
793 | kfree(id); | 793 | kfree(id); |
794 | return 0; | 794 | return 0; |
795 | } | 795 | } |
796 | if (len != 4) { | 796 | if (len != 4) { |
797 | kfree(p); | 797 | kfree(p); |
798 | kfree(id); | 798 | kfree(id); |
799 | return 0; | 799 | return 0; |
800 | } | 800 | } |
801 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); | 801 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); |
802 | if (*obj == NULL) { | 802 | if (*obj == NULL) { |
803 | kfree(p); | 803 | kfree(p); |
804 | kfree(id); | 804 | kfree(id); |
805 | if (net_ratelimit()) | 805 | if (net_ratelimit()) |
806 | printk("OOM in bsalg (%d)\n", __LINE__); | 806 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
807 | return 0; | 807 | return 0; |
808 | } | 808 | } |
809 | memcpy((*obj)->syntax.uc, p, len); | 809 | memcpy((*obj)->syntax.uc, p, len); |
810 | kfree(p); | 810 | kfree(p); |
811 | break; | 811 | break; |
812 | case SNMP_COUNTER: | 812 | case SNMP_COUNTER: |
813 | case SNMP_GAUGE: | 813 | case SNMP_GAUGE: |
814 | case SNMP_TIMETICKS: | 814 | case SNMP_TIMETICKS: |
815 | len = sizeof(unsigned long); | 815 | len = sizeof(unsigned long); |
816 | if (!asn1_ulong_decode(ctx, end, &ul)) { | 816 | if (!asn1_ulong_decode(ctx, end, &ul)) { |
817 | kfree(id); | 817 | kfree(id); |
818 | return 0; | 818 | return 0; |
819 | } | 819 | } |
820 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); | 820 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); |
821 | if (*obj == NULL) { | 821 | if (*obj == NULL) { |
822 | kfree(id); | 822 | kfree(id); |
823 | if (net_ratelimit()) | 823 | if (net_ratelimit()) |
824 | printk("OOM in bsalg (%d)\n", __LINE__); | 824 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
825 | return 0; | 825 | return 0; |
826 | } | 826 | } |
827 | (*obj)->syntax.ul[0] = ul; | 827 | (*obj)->syntax.ul[0] = ul; |
828 | break; | 828 | break; |
829 | default: | 829 | default: |
830 | kfree(id); | 830 | kfree(id); |
831 | return 0; | 831 | return 0; |
832 | } | 832 | } |
833 | 833 | ||
834 | (*obj)->syntax_len = len; | 834 | (*obj)->syntax_len = len; |
835 | (*obj)->type = type; | 835 | (*obj)->type = type; |
836 | (*obj)->id = id; | 836 | (*obj)->id = id; |
837 | (*obj)->id_len = idlen; | 837 | (*obj)->id_len = idlen; |
838 | 838 | ||
839 | if (!asn1_eoc_decode(ctx, eoc)) { | 839 | if (!asn1_eoc_decode(ctx, eoc)) { |
840 | kfree(id); | 840 | kfree(id); |
841 | kfree(*obj); | 841 | kfree(*obj); |
842 | *obj = NULL; | 842 | *obj = NULL; |
843 | return 0; | 843 | return 0; |
844 | } | 844 | } |
845 | return 1; | 845 | return 1; |
846 | } | 846 | } |
847 | 847 | ||
848 | static unsigned char snmp_request_decode(struct asn1_ctx *ctx, | 848 | static unsigned char snmp_request_decode(struct asn1_ctx *ctx, |
849 | struct snmp_request *request) | 849 | struct snmp_request *request) |
850 | { | 850 | { |
851 | unsigned int cls, con, tag; | 851 | unsigned int cls, con, tag; |
852 | unsigned char *end; | 852 | unsigned char *end; |
853 | 853 | ||
854 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 854 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
855 | return 0; | 855 | return 0; |
856 | 856 | ||
857 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 857 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
858 | return 0; | 858 | return 0; |
859 | 859 | ||
860 | if (!asn1_ulong_decode(ctx, end, &request->id)) | 860 | if (!asn1_ulong_decode(ctx, end, &request->id)) |
861 | return 0; | 861 | return 0; |
862 | 862 | ||
863 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 863 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
864 | return 0; | 864 | return 0; |
865 | 865 | ||
866 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 866 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
867 | return 0; | 867 | return 0; |
868 | 868 | ||
869 | if (!asn1_uint_decode(ctx, end, &request->error_status)) | 869 | if (!asn1_uint_decode(ctx, end, &request->error_status)) |
870 | return 0; | 870 | return 0; |
871 | 871 | ||
872 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 872 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
873 | return 0; | 873 | return 0; |
874 | 874 | ||
875 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 875 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
876 | return 0; | 876 | return 0; |
877 | 877 | ||
878 | if (!asn1_uint_decode(ctx, end, &request->error_index)) | 878 | if (!asn1_uint_decode(ctx, end, &request->error_index)) |
879 | return 0; | 879 | return 0; |
880 | 880 | ||
881 | return 1; | 881 | return 1; |
882 | } | 882 | } |
883 | 883 | ||
884 | /* | 884 | /* |
885 | * Fast checksum update for possibly oddly-aligned UDP byte, from the | 885 | * Fast checksum update for possibly oddly-aligned UDP byte, from the |
886 | * code example in the draft. | 886 | * code example in the draft. |
887 | */ | 887 | */ |
888 | static void fast_csum(__sum16 *csum, | 888 | static void fast_csum(__sum16 *csum, |
889 | const unsigned char *optr, | 889 | const unsigned char *optr, |
890 | const unsigned char *nptr, | 890 | const unsigned char *nptr, |
891 | int offset) | 891 | int offset) |
892 | { | 892 | { |
893 | unsigned char s[4]; | 893 | unsigned char s[4]; |
894 | 894 | ||
895 | if (offset & 1) { | 895 | if (offset & 1) { |
896 | s[0] = s[2] = 0; | 896 | s[0] = s[2] = 0; |
897 | s[1] = ~*optr; | 897 | s[1] = ~*optr; |
898 | s[3] = *nptr; | 898 | s[3] = *nptr; |
899 | } else { | 899 | } else { |
900 | s[1] = s[3] = 0; | 900 | s[1] = s[3] = 0; |
901 | s[0] = ~*optr; | 901 | s[0] = ~*optr; |
902 | s[2] = *nptr; | 902 | s[2] = *nptr; |
903 | } | 903 | } |
904 | 904 | ||
905 | *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); | 905 | *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); |
906 | } | 906 | } |
907 | 907 | ||
908 | /* | 908 | /* |
909 | * Mangle IP address. | 909 | * Mangle IP address. |
910 | * - begin points to the start of the snmp messgae | 910 | * - begin points to the start of the snmp messgae |
911 | * - addr points to the start of the address | 911 | * - addr points to the start of the address |
912 | */ | 912 | */ |
913 | static inline void mangle_address(unsigned char *begin, | 913 | static inline void mangle_address(unsigned char *begin, |
914 | unsigned char *addr, | 914 | unsigned char *addr, |
915 | const struct oct1_map *map, | 915 | const struct oct1_map *map, |
916 | __sum16 *check) | 916 | __sum16 *check) |
917 | { | 917 | { |
918 | if (map->from == NOCT1(addr)) { | 918 | if (map->from == NOCT1(addr)) { |
919 | u_int32_t old; | 919 | u_int32_t old; |
920 | 920 | ||
921 | if (debug) | 921 | if (debug) |
922 | memcpy(&old, addr, sizeof(old)); | 922 | memcpy(&old, addr, sizeof(old)); |
923 | 923 | ||
924 | *addr = map->to; | 924 | *addr = map->to; |
925 | 925 | ||
926 | /* Update UDP checksum if being used */ | 926 | /* Update UDP checksum if being used */ |
927 | if (*check) { | 927 | if (*check) { |
928 | fast_csum(check, | 928 | fast_csum(check, |
929 | &map->from, &map->to, addr - begin); | 929 | &map->from, &map->to, addr - begin); |
930 | 930 | ||
931 | } | 931 | } |
932 | 932 | ||
933 | if (debug) | 933 | if (debug) |
934 | printk(KERN_DEBUG "bsalg: mapped %pI4 to %pI4\n", | 934 | printk(KERN_DEBUG "bsalg: mapped %pI4 to %pI4\n", |
935 | &old, addr); | 935 | &old, addr); |
936 | } | 936 | } |
937 | } | 937 | } |
938 | 938 | ||
939 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, | 939 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, |
940 | struct snmp_v1_trap *trap, | 940 | struct snmp_v1_trap *trap, |
941 | const struct oct1_map *map, | 941 | const struct oct1_map *map, |
942 | __sum16 *check) | 942 | __sum16 *check) |
943 | { | 943 | { |
944 | unsigned int cls, con, tag, len; | 944 | unsigned int cls, con, tag, len; |
945 | unsigned char *end; | 945 | unsigned char *end; |
946 | 946 | ||
947 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 947 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
948 | return 0; | 948 | return 0; |
949 | 949 | ||
950 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) | 950 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) |
951 | return 0; | 951 | return 0; |
952 | 952 | ||
953 | if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len)) | 953 | if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len)) |
954 | return 0; | 954 | return 0; |
955 | 955 | ||
956 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 956 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
957 | goto err_id_free; | 957 | goto err_id_free; |
958 | 958 | ||
959 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) || | 959 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) || |
960 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS))) | 960 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS))) |
961 | goto err_id_free; | 961 | goto err_id_free; |
962 | 962 | ||
963 | if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len)) | 963 | if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len)) |
964 | goto err_id_free; | 964 | goto err_id_free; |
965 | 965 | ||
966 | /* IPv4 only */ | 966 | /* IPv4 only */ |
967 | if (len != 4) | 967 | if (len != 4) |
968 | goto err_addr_free; | 968 | goto err_addr_free; |
969 | 969 | ||
970 | mangle_address(ctx->begin, ctx->pointer - 4, map, check); | 970 | mangle_address(ctx->begin, ctx->pointer - 4, map, check); |
971 | 971 | ||
972 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 972 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
973 | goto err_addr_free; | 973 | goto err_addr_free; |
974 | 974 | ||
975 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 975 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
976 | goto err_addr_free; | 976 | goto err_addr_free; |
977 | 977 | ||
978 | if (!asn1_uint_decode(ctx, end, &trap->general)) | 978 | if (!asn1_uint_decode(ctx, end, &trap->general)) |
979 | goto err_addr_free; | 979 | goto err_addr_free; |
980 | 980 | ||
981 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 981 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
982 | goto err_addr_free; | 982 | goto err_addr_free; |
983 | 983 | ||
984 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 984 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
985 | goto err_addr_free; | 985 | goto err_addr_free; |
986 | 986 | ||
987 | if (!asn1_uint_decode(ctx, end, &trap->specific)) | 987 | if (!asn1_uint_decode(ctx, end, &trap->specific)) |
988 | goto err_addr_free; | 988 | goto err_addr_free; |
989 | 989 | ||
990 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 990 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
991 | goto err_addr_free; | 991 | goto err_addr_free; |
992 | 992 | ||
993 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) || | 993 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) || |
994 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT))) | 994 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT))) |
995 | goto err_addr_free; | 995 | goto err_addr_free; |
996 | 996 | ||
997 | if (!asn1_ulong_decode(ctx, end, &trap->time)) | 997 | if (!asn1_ulong_decode(ctx, end, &trap->time)) |
998 | goto err_addr_free; | 998 | goto err_addr_free; |
999 | 999 | ||
1000 | return 1; | 1000 | return 1; |
1001 | 1001 | ||
1002 | err_addr_free: | 1002 | err_addr_free: |
1003 | kfree((unsigned long *)trap->ip_address); | 1003 | kfree((unsigned long *)trap->ip_address); |
1004 | 1004 | ||
1005 | err_id_free: | 1005 | err_id_free: |
1006 | kfree(trap->id); | 1006 | kfree(trap->id); |
1007 | 1007 | ||
1008 | return 0; | 1008 | return 0; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | /***************************************************************************** | 1011 | /***************************************************************************** |
1012 | * | 1012 | * |
1013 | * Misc. routines | 1013 | * Misc. routines |
1014 | * | 1014 | * |
1015 | *****************************************************************************/ | 1015 | *****************************************************************************/ |
1016 | 1016 | ||
1017 | static void hex_dump(const unsigned char *buf, size_t len) | 1017 | static void hex_dump(const unsigned char *buf, size_t len) |
1018 | { | 1018 | { |
1019 | size_t i; | 1019 | size_t i; |
1020 | 1020 | ||
1021 | for (i = 0; i < len; i++) { | 1021 | for (i = 0; i < len; i++) { |
1022 | if (i && !(i % 16)) | 1022 | if (i && !(i % 16)) |
1023 | printk("\n"); | 1023 | printk("\n"); |
1024 | printk("%02x ", *(buf + i)); | 1024 | printk("%02x ", *(buf + i)); |
1025 | } | 1025 | } |
1026 | printk("\n"); | 1026 | printk("\n"); |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | /* | 1029 | /* |
1030 | * Parse and mangle SNMP message according to mapping. | 1030 | * Parse and mangle SNMP message according to mapping. |
1031 | * (And this is the fucking 'basic' method). | 1031 | * (And this is the fucking 'basic' method). |
1032 | */ | 1032 | */ |
1033 | static int snmp_parse_mangle(unsigned char *msg, | 1033 | static int snmp_parse_mangle(unsigned char *msg, |
1034 | u_int16_t len, | 1034 | u_int16_t len, |
1035 | const struct oct1_map *map, | 1035 | const struct oct1_map *map, |
1036 | __sum16 *check) | 1036 | __sum16 *check) |
1037 | { | 1037 | { |
1038 | unsigned char *eoc, *end; | 1038 | unsigned char *eoc, *end; |
1039 | unsigned int cls, con, tag, vers, pdutype; | 1039 | unsigned int cls, con, tag, vers, pdutype; |
1040 | struct asn1_ctx ctx; | 1040 | struct asn1_ctx ctx; |
1041 | struct asn1_octstr comm; | 1041 | struct asn1_octstr comm; |
1042 | struct snmp_object *obj; | 1042 | struct snmp_object *obj; |
1043 | 1043 | ||
1044 | if (debug > 1) | 1044 | if (debug > 1) |
1045 | hex_dump(msg, len); | 1045 | hex_dump(msg, len); |
1046 | 1046 | ||
1047 | asn1_open(&ctx, msg, len); | 1047 | asn1_open(&ctx, msg, len); |
1048 | 1048 | ||
1049 | /* | 1049 | /* |
1050 | * Start of SNMP message. | 1050 | * Start of SNMP message. |
1051 | */ | 1051 | */ |
1052 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) | 1052 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) |
1053 | return 0; | 1053 | return 0; |
1054 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | 1054 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) |
1055 | return 0; | 1055 | return 0; |
1056 | 1056 | ||
1057 | /* | 1057 | /* |
1058 | * Version 1 or 2 handled. | 1058 | * Version 1 or 2 handled. |
1059 | */ | 1059 | */ |
1060 | if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag)) | 1060 | if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag)) |
1061 | return 0; | 1061 | return 0; |
1062 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 1062 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
1063 | return 0; | 1063 | return 0; |
1064 | if (!asn1_uint_decode (&ctx, end, &vers)) | 1064 | if (!asn1_uint_decode (&ctx, end, &vers)) |
1065 | return 0; | 1065 | return 0; |
1066 | if (debug > 1) | 1066 | if (debug > 1) |
1067 | printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1); | 1067 | printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1); |
1068 | if (vers > 1) | 1068 | if (vers > 1) |
1069 | return 1; | 1069 | return 1; |
1070 | 1070 | ||
1071 | /* | 1071 | /* |
1072 | * Community. | 1072 | * Community. |
1073 | */ | 1073 | */ |
1074 | if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag)) | 1074 | if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag)) |
1075 | return 0; | 1075 | return 0; |
1076 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OTS) | 1076 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OTS) |
1077 | return 0; | 1077 | return 0; |
1078 | if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len)) | 1078 | if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len)) |
1079 | return 0; | 1079 | return 0; |
1080 | if (debug > 1) { | 1080 | if (debug > 1) { |
1081 | unsigned int i; | 1081 | unsigned int i; |
1082 | 1082 | ||
1083 | printk(KERN_DEBUG "bsalg: community: "); | 1083 | printk(KERN_DEBUG "bsalg: community: "); |
1084 | for (i = 0; i < comm.len; i++) | 1084 | for (i = 0; i < comm.len; i++) |
1085 | printk("%c", comm.data[i]); | 1085 | printk("%c", comm.data[i]); |
1086 | printk("\n"); | 1086 | printk("\n"); |
1087 | } | 1087 | } |
1088 | kfree(comm.data); | 1088 | kfree(comm.data); |
1089 | 1089 | ||
1090 | /* | 1090 | /* |
1091 | * PDU type | 1091 | * PDU type |
1092 | */ | 1092 | */ |
1093 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype)) | 1093 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype)) |
1094 | return 0; | 1094 | return 0; |
1095 | if (cls != ASN1_CTX || con != ASN1_CON) | 1095 | if (cls != ASN1_CTX || con != ASN1_CON) |
1096 | return 0; | 1096 | return 0; |
1097 | if (debug > 1) { | 1097 | if (debug > 1) { |
1098 | static const unsigned char *const pdus[] = { | 1098 | static const unsigned char *const pdus[] = { |
1099 | [SNMP_PDU_GET] = "get", | 1099 | [SNMP_PDU_GET] = "get", |
1100 | [SNMP_PDU_NEXT] = "get-next", | 1100 | [SNMP_PDU_NEXT] = "get-next", |
1101 | [SNMP_PDU_RESPONSE] = "response", | 1101 | [SNMP_PDU_RESPONSE] = "response", |
1102 | [SNMP_PDU_SET] = "set", | 1102 | [SNMP_PDU_SET] = "set", |
1103 | [SNMP_PDU_TRAP1] = "trapv1", | 1103 | [SNMP_PDU_TRAP1] = "trapv1", |
1104 | [SNMP_PDU_BULK] = "bulk", | 1104 | [SNMP_PDU_BULK] = "bulk", |
1105 | [SNMP_PDU_INFORM] = "inform", | 1105 | [SNMP_PDU_INFORM] = "inform", |
1106 | [SNMP_PDU_TRAP2] = "trapv2" | 1106 | [SNMP_PDU_TRAP2] = "trapv2" |
1107 | }; | 1107 | }; |
1108 | 1108 | ||
1109 | if (pdutype > SNMP_PDU_TRAP2) | 1109 | if (pdutype > SNMP_PDU_TRAP2) |
1110 | printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype); | 1110 | printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype); |
1111 | else | 1111 | else |
1112 | printk(KERN_DEBUG "bsalg: pdu: %s\n", pdus[pdutype]); | 1112 | printk(KERN_DEBUG "bsalg: pdu: %s\n", pdus[pdutype]); |
1113 | } | 1113 | } |
1114 | if (pdutype != SNMP_PDU_RESPONSE && | 1114 | if (pdutype != SNMP_PDU_RESPONSE && |
1115 | pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2) | 1115 | pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2) |
1116 | return 1; | 1116 | return 1; |
1117 | 1117 | ||
1118 | /* | 1118 | /* |
1119 | * Request header or v1 trap | 1119 | * Request header or v1 trap |
1120 | */ | 1120 | */ |
1121 | if (pdutype == SNMP_PDU_TRAP1) { | 1121 | if (pdutype == SNMP_PDU_TRAP1) { |
1122 | struct snmp_v1_trap trap; | 1122 | struct snmp_v1_trap trap; |
1123 | unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); | 1123 | unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); |
1124 | 1124 | ||
1125 | if (ret) { | 1125 | if (ret) { |
1126 | kfree(trap.id); | 1126 | kfree(trap.id); |
1127 | kfree((unsigned long *)trap.ip_address); | 1127 | kfree((unsigned long *)trap.ip_address); |
1128 | } else | 1128 | } else |
1129 | return ret; | 1129 | return ret; |
1130 | 1130 | ||
1131 | } else { | 1131 | } else { |
1132 | struct snmp_request req; | 1132 | struct snmp_request req; |
1133 | 1133 | ||
1134 | if (!snmp_request_decode(&ctx, &req)) | 1134 | if (!snmp_request_decode(&ctx, &req)) |
1135 | return 0; | 1135 | return 0; |
1136 | 1136 | ||
1137 | if (debug > 1) | 1137 | if (debug > 1) |
1138 | printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u " | 1138 | printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u " |
1139 | "error_index=%u\n", req.id, req.error_status, | 1139 | "error_index=%u\n", req.id, req.error_status, |
1140 | req.error_index); | 1140 | req.error_index); |
1141 | } | 1141 | } |
1142 | 1142 | ||
1143 | /* | 1143 | /* |
1144 | * Loop through objects, look for IP addresses to mangle. | 1144 | * Loop through objects, look for IP addresses to mangle. |
1145 | */ | 1145 | */ |
1146 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) | 1146 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) |
1147 | return 0; | 1147 | return 0; |
1148 | 1148 | ||
1149 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | 1149 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) |
1150 | return 0; | 1150 | return 0; |
1151 | 1151 | ||
1152 | while (!asn1_eoc_decode(&ctx, eoc)) { | 1152 | while (!asn1_eoc_decode(&ctx, eoc)) { |
1153 | unsigned int i; | 1153 | unsigned int i; |
1154 | 1154 | ||
1155 | if (!snmp_object_decode(&ctx, &obj)) { | 1155 | if (!snmp_object_decode(&ctx, &obj)) { |
1156 | if (obj) { | 1156 | if (obj) { |
1157 | kfree(obj->id); | 1157 | kfree(obj->id); |
1158 | kfree(obj); | 1158 | kfree(obj); |
1159 | } | 1159 | } |
1160 | return 0; | 1160 | return 0; |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | if (debug > 1) { | 1163 | if (debug > 1) { |
1164 | printk(KERN_DEBUG "bsalg: object: "); | 1164 | printk(KERN_DEBUG "bsalg: object: "); |
1165 | for (i = 0; i < obj->id_len; i++) { | 1165 | for (i = 0; i < obj->id_len; i++) { |
1166 | if (i > 0) | 1166 | if (i > 0) |
1167 | printk("."); | 1167 | printk("."); |
1168 | printk("%lu", obj->id[i]); | 1168 | printk("%lu", obj->id[i]); |
1169 | } | 1169 | } |
1170 | printk(": type=%u\n", obj->type); | 1170 | printk(": type=%u\n", obj->type); |
1171 | 1171 | ||
1172 | } | 1172 | } |
1173 | 1173 | ||
1174 | if (obj->type == SNMP_IPADDR) | 1174 | if (obj->type == SNMP_IPADDR) |
1175 | mangle_address(ctx.begin, ctx.pointer - 4 , map, check); | 1175 | mangle_address(ctx.begin, ctx.pointer - 4 , map, check); |
1176 | 1176 | ||
1177 | kfree(obj->id); | 1177 | kfree(obj->id); |
1178 | kfree(obj); | 1178 | kfree(obj); |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | if (!asn1_eoc_decode(&ctx, eoc)) | 1181 | if (!asn1_eoc_decode(&ctx, eoc)) |
1182 | return 0; | 1182 | return 0; |
1183 | 1183 | ||
1184 | return 1; | 1184 | return 1; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | /***************************************************************************** | 1187 | /***************************************************************************** |
1188 | * | 1188 | * |
1189 | * NAT routines. | 1189 | * NAT routines. |
1190 | * | 1190 | * |
1191 | *****************************************************************************/ | 1191 | *****************************************************************************/ |
1192 | 1192 | ||
1193 | /* | 1193 | /* |
1194 | * SNMP translation routine. | 1194 | * SNMP translation routine. |
1195 | */ | 1195 | */ |
1196 | static int snmp_translate(struct nf_conn *ct, | 1196 | static int snmp_translate(struct nf_conn *ct, |
1197 | enum ip_conntrack_info ctinfo, | 1197 | enum ip_conntrack_info ctinfo, |
1198 | struct sk_buff *skb) | 1198 | struct sk_buff *skb) |
1199 | { | 1199 | { |
1200 | struct iphdr *iph = ip_hdr(skb); | 1200 | struct iphdr *iph = ip_hdr(skb); |
1201 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); | 1201 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); |
1202 | u_int16_t udplen = ntohs(udph->len); | 1202 | u_int16_t udplen = ntohs(udph->len); |
1203 | u_int16_t paylen = udplen - sizeof(struct udphdr); | 1203 | u_int16_t paylen = udplen - sizeof(struct udphdr); |
1204 | int dir = CTINFO2DIR(ctinfo); | 1204 | int dir = CTINFO2DIR(ctinfo); |
1205 | struct oct1_map map; | 1205 | struct oct1_map map; |
1206 | 1206 | ||
1207 | /* | 1207 | /* |
1208 | * Determine mappping for application layer addresses based | 1208 | * Determine mappping for application layer addresses based |
1209 | * on NAT manipulations for the packet. | 1209 | * on NAT manipulations for the packet. |
1210 | */ | 1210 | */ |
1211 | if (dir == IP_CT_DIR_ORIGINAL) { | 1211 | if (dir == IP_CT_DIR_ORIGINAL) { |
1212 | /* SNAT traps */ | 1212 | /* SNAT traps */ |
1213 | map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); | 1213 | map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); |
1214 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); | 1214 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); |
1215 | } else { | 1215 | } else { |
1216 | /* DNAT replies */ | 1216 | /* DNAT replies */ |
1217 | map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); | 1217 | map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); |
1218 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); | 1218 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | if (map.from == map.to) | 1221 | if (map.from == map.to) |
1222 | return NF_ACCEPT; | 1222 | return NF_ACCEPT; |
1223 | 1223 | ||
1224 | if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), | 1224 | if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), |
1225 | paylen, &map, &udph->check)) { | 1225 | paylen, &map, &udph->check)) { |
1226 | if (net_ratelimit()) | 1226 | if (net_ratelimit()) |
1227 | printk(KERN_WARNING "bsalg: parser failed\n"); | 1227 | printk(KERN_WARNING "bsalg: parser failed\n"); |
1228 | return NF_DROP; | 1228 | return NF_DROP; |
1229 | } | 1229 | } |
1230 | return NF_ACCEPT; | 1230 | return NF_ACCEPT; |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | /* We don't actually set up expectations, just adjust internal IP | 1233 | /* We don't actually set up expectations, just adjust internal IP |
1234 | * addresses if this is being NATted */ | 1234 | * addresses if this is being NATted */ |
1235 | static int help(struct sk_buff *skb, unsigned int protoff, | 1235 | static int help(struct sk_buff *skb, unsigned int protoff, |
1236 | struct nf_conn *ct, | 1236 | struct nf_conn *ct, |
1237 | enum ip_conntrack_info ctinfo) | 1237 | enum ip_conntrack_info ctinfo) |
1238 | { | 1238 | { |
1239 | int dir = CTINFO2DIR(ctinfo); | 1239 | int dir = CTINFO2DIR(ctinfo); |
1240 | unsigned int ret; | 1240 | unsigned int ret; |
1241 | const struct iphdr *iph = ip_hdr(skb); | 1241 | const struct iphdr *iph = ip_hdr(skb); |
1242 | const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); | 1242 | const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); |
1243 | 1243 | ||
1244 | /* SNMP replies and originating SNMP traps get mangled */ | 1244 | /* SNMP replies and originating SNMP traps get mangled */ |
1245 | if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY) | 1245 | if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY) |
1246 | return NF_ACCEPT; | 1246 | return NF_ACCEPT; |
1247 | if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) | 1247 | if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) |
1248 | return NF_ACCEPT; | 1248 | return NF_ACCEPT; |
1249 | 1249 | ||
1250 | /* No NAT? */ | 1250 | /* No NAT? */ |
1251 | if (!(ct->status & IPS_NAT_MASK)) | 1251 | if (!(ct->status & IPS_NAT_MASK)) |
1252 | return NF_ACCEPT; | 1252 | return NF_ACCEPT; |
1253 | 1253 | ||
1254 | /* | 1254 | /* |
1255 | * Make sure the packet length is ok. So far, we were only guaranteed | 1255 | * Make sure the packet length is ok. So far, we were only guaranteed |
1256 | * to have a valid length IP header plus 8 bytes, which means we have | 1256 | * to have a valid length IP header plus 8 bytes, which means we have |
1257 | * enough room for a UDP header. Just verify the UDP length field so we | 1257 | * enough room for a UDP header. Just verify the UDP length field so we |
1258 | * can mess around with the payload. | 1258 | * can mess around with the payload. |
1259 | */ | 1259 | */ |
1260 | if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { | 1260 | if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { |
1261 | if (net_ratelimit()) | 1261 | if (net_ratelimit()) |
1262 | printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", | 1262 | printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", |
1263 | &iph->saddr, &iph->daddr); | 1263 | &iph->saddr, &iph->daddr); |
1264 | return NF_DROP; | 1264 | return NF_DROP; |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | if (!skb_make_writable(skb, skb->len)) | 1267 | if (!skb_make_writable(skb, skb->len)) |
1268 | return NF_DROP; | 1268 | return NF_DROP; |
1269 | 1269 | ||
1270 | spin_lock_bh(&snmp_lock); | 1270 | spin_lock_bh(&snmp_lock); |
1271 | ret = snmp_translate(ct, ctinfo, skb); | 1271 | ret = snmp_translate(ct, ctinfo, skb); |
1272 | spin_unlock_bh(&snmp_lock); | 1272 | spin_unlock_bh(&snmp_lock); |
1273 | return ret; | 1273 | return ret; |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | static const struct nf_conntrack_expect_policy snmp_exp_policy = { | 1276 | static const struct nf_conntrack_expect_policy snmp_exp_policy = { |
1277 | .max_expected = 0, | 1277 | .max_expected = 0, |
1278 | .timeout = 180, | 1278 | .timeout = 180, |
1279 | }; | 1279 | }; |
1280 | 1280 | ||
1281 | static struct nf_conntrack_helper snmp_helper __read_mostly = { | 1281 | static struct nf_conntrack_helper snmp_helper __read_mostly = { |
1282 | .me = THIS_MODULE, | 1282 | .me = THIS_MODULE, |
1283 | .help = help, | 1283 | .help = help, |
1284 | .expect_policy = &snmp_exp_policy, | 1284 | .expect_policy = &snmp_exp_policy, |
1285 | .name = "snmp", | 1285 | .name = "snmp", |
1286 | .tuple.src.l3num = AF_INET, | 1286 | .tuple.src.l3num = AF_INET, |
1287 | .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT), | 1287 | .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT), |
1288 | .tuple.dst.protonum = IPPROTO_UDP, | 1288 | .tuple.dst.protonum = IPPROTO_UDP, |
1289 | }; | 1289 | }; |
1290 | 1290 | ||
1291 | static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { | 1291 | static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { |
1292 | .me = THIS_MODULE, | 1292 | .me = THIS_MODULE, |
1293 | .help = help, | 1293 | .help = help, |
1294 | .expect_policy = &snmp_exp_policy, | 1294 | .expect_policy = &snmp_exp_policy, |
1295 | .name = "snmp_trap", | 1295 | .name = "snmp_trap", |
1296 | .tuple.src.l3num = AF_INET, | 1296 | .tuple.src.l3num = AF_INET, |
1297 | .tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT), | 1297 | .tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT), |
1298 | .tuple.dst.protonum = IPPROTO_UDP, | 1298 | .tuple.dst.protonum = IPPROTO_UDP, |
1299 | }; | 1299 | }; |
1300 | 1300 | ||
1301 | /***************************************************************************** | 1301 | /***************************************************************************** |
1302 | * | 1302 | * |
1303 | * Module stuff. | 1303 | * Module stuff. |
1304 | * | 1304 | * |
1305 | *****************************************************************************/ | 1305 | *****************************************************************************/ |
1306 | 1306 | ||
1307 | static int __init nf_nat_snmp_basic_init(void) | 1307 | static int __init nf_nat_snmp_basic_init(void) |
1308 | { | 1308 | { |
1309 | int ret = 0; | 1309 | int ret = 0; |
1310 | 1310 | ||
1311 | ret = nf_conntrack_helper_register(&snmp_helper); | 1311 | ret = nf_conntrack_helper_register(&snmp_helper); |
1312 | if (ret < 0) | 1312 | if (ret < 0) |
1313 | return ret; | 1313 | return ret; |
1314 | ret = nf_conntrack_helper_register(&snmp_trap_helper); | 1314 | ret = nf_conntrack_helper_register(&snmp_trap_helper); |
1315 | if (ret < 0) { | 1315 | if (ret < 0) { |
1316 | nf_conntrack_helper_unregister(&snmp_helper); | 1316 | nf_conntrack_helper_unregister(&snmp_helper); |
1317 | return ret; | 1317 | return ret; |
1318 | } | 1318 | } |
1319 | return ret; | 1319 | return ret; |
1320 | } | 1320 | } |
1321 | 1321 | ||
1322 | static void __exit nf_nat_snmp_basic_fini(void) | 1322 | static void __exit nf_nat_snmp_basic_fini(void) |
1323 | { | 1323 | { |
1324 | nf_conntrack_helper_unregister(&snmp_helper); | 1324 | nf_conntrack_helper_unregister(&snmp_helper); |
1325 | nf_conntrack_helper_unregister(&snmp_trap_helper); | 1325 | nf_conntrack_helper_unregister(&snmp_trap_helper); |
1326 | } | 1326 | } |
1327 | 1327 | ||
1328 | module_init(nf_nat_snmp_basic_init); | 1328 | module_init(nf_nat_snmp_basic_init); |
1329 | module_exit(nf_nat_snmp_basic_fini); | 1329 | module_exit(nf_nat_snmp_basic_fini); |
1330 | 1330 | ||
1331 | module_param(debug, int, 0600); | 1331 | module_param(debug, int, 0600); |
1332 | 1332 |
net/ipv4/netfilter/nf_nat_standalone.c
1 | /* (C) 1999-2001 Paul `Rusty' Russell | 1 | /* (C) 1999-2001 Paul `Rusty' Russell |
2 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | 2 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/icmp.h> | 9 | #include <linux/icmp.h> |
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/ip.h> | 11 | #include <linux/ip.h> |
12 | #include <linux/netfilter.h> | 12 | #include <linux/netfilter.h> |
13 | #include <linux/netfilter_ipv4.h> | 13 | #include <linux/netfilter_ipv4.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
17 | #include <net/ip.h> | 17 | #include <net/ip.h> |
18 | #include <net/checksum.h> | 18 | #include <net/checksum.h> |
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | 20 | ||
21 | #include <net/netfilter/nf_conntrack.h> | 21 | #include <net/netfilter/nf_conntrack.h> |
22 | #include <net/netfilter/nf_conntrack_core.h> | 22 | #include <net/netfilter/nf_conntrack_core.h> |
23 | #include <net/netfilter/nf_conntrack_extend.h> | 23 | #include <net/netfilter/nf_conntrack_extend.h> |
24 | #include <net/netfilter/nf_nat.h> | 24 | #include <net/netfilter/nf_nat.h> |
25 | #include <net/netfilter/nf_nat_rule.h> | 25 | #include <net/netfilter/nf_nat_rule.h> |
26 | #include <net/netfilter/nf_nat_protocol.h> | 26 | #include <net/netfilter/nf_nat_protocol.h> |
27 | #include <net/netfilter/nf_nat_core.h> | 27 | #include <net/netfilter/nf_nat_core.h> |
28 | #include <net/netfilter/nf_nat_helper.h> | 28 | #include <net/netfilter/nf_nat_helper.h> |
29 | #include <linux/netfilter_ipv4/ip_tables.h> | 29 | #include <linux/netfilter_ipv4/ip_tables.h> |
30 | 30 | ||
31 | #ifdef CONFIG_XFRM | 31 | #ifdef CONFIG_XFRM |
32 | static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) | 32 | static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) |
33 | { | 33 | { |
34 | const struct nf_conn *ct; | 34 | const struct nf_conn *ct; |
35 | const struct nf_conntrack_tuple *t; | 35 | const struct nf_conntrack_tuple *t; |
36 | enum ip_conntrack_info ctinfo; | 36 | enum ip_conntrack_info ctinfo; |
37 | enum ip_conntrack_dir dir; | 37 | enum ip_conntrack_dir dir; |
38 | unsigned long statusbit; | 38 | unsigned long statusbit; |
39 | 39 | ||
40 | ct = nf_ct_get(skb, &ctinfo); | 40 | ct = nf_ct_get(skb, &ctinfo); |
41 | if (ct == NULL) | 41 | if (ct == NULL) |
42 | return; | 42 | return; |
43 | dir = CTINFO2DIR(ctinfo); | 43 | dir = CTINFO2DIR(ctinfo); |
44 | t = &ct->tuplehash[dir].tuple; | 44 | t = &ct->tuplehash[dir].tuple; |
45 | 45 | ||
46 | if (dir == IP_CT_DIR_ORIGINAL) | 46 | if (dir == IP_CT_DIR_ORIGINAL) |
47 | statusbit = IPS_DST_NAT; | 47 | statusbit = IPS_DST_NAT; |
48 | else | 48 | else |
49 | statusbit = IPS_SRC_NAT; | 49 | statusbit = IPS_SRC_NAT; |
50 | 50 | ||
51 | if (ct->status & statusbit) { | 51 | if (ct->status & statusbit) { |
52 | fl->fl4_dst = t->dst.u3.ip; | 52 | fl->fl4_dst = t->dst.u3.ip; |
53 | if (t->dst.protonum == IPPROTO_TCP || | 53 | if (t->dst.protonum == IPPROTO_TCP || |
54 | t->dst.protonum == IPPROTO_UDP || | 54 | t->dst.protonum == IPPROTO_UDP || |
55 | t->dst.protonum == IPPROTO_UDPLITE || | 55 | t->dst.protonum == IPPROTO_UDPLITE || |
56 | t->dst.protonum == IPPROTO_DCCP || | 56 | t->dst.protonum == IPPROTO_DCCP || |
57 | t->dst.protonum == IPPROTO_SCTP) | 57 | t->dst.protonum == IPPROTO_SCTP) |
58 | fl->fl_ip_dport = t->dst.u.tcp.port; | 58 | fl->fl_ip_dport = t->dst.u.tcp.port; |
59 | } | 59 | } |
60 | 60 | ||
61 | statusbit ^= IPS_NAT_MASK; | 61 | statusbit ^= IPS_NAT_MASK; |
62 | 62 | ||
63 | if (ct->status & statusbit) { | 63 | if (ct->status & statusbit) { |
64 | fl->fl4_src = t->src.u3.ip; | 64 | fl->fl4_src = t->src.u3.ip; |
65 | if (t->dst.protonum == IPPROTO_TCP || | 65 | if (t->dst.protonum == IPPROTO_TCP || |
66 | t->dst.protonum == IPPROTO_UDP || | 66 | t->dst.protonum == IPPROTO_UDP || |
67 | t->dst.protonum == IPPROTO_UDPLITE || | 67 | t->dst.protonum == IPPROTO_UDPLITE || |
68 | t->dst.protonum == IPPROTO_DCCP || | 68 | t->dst.protonum == IPPROTO_DCCP || |
69 | t->dst.protonum == IPPROTO_SCTP) | 69 | t->dst.protonum == IPPROTO_SCTP) |
70 | fl->fl_ip_sport = t->src.u.tcp.port; | 70 | fl->fl_ip_sport = t->src.u.tcp.port; |
71 | } | 71 | } |
72 | } | 72 | } |
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | static unsigned int | 75 | static unsigned int |
76 | nf_nat_fn(unsigned int hooknum, | 76 | nf_nat_fn(unsigned int hooknum, |
77 | struct sk_buff *skb, | 77 | struct sk_buff *skb, |
78 | const struct net_device *in, | 78 | const struct net_device *in, |
79 | const struct net_device *out, | 79 | const struct net_device *out, |
80 | int (*okfn)(struct sk_buff *)) | 80 | int (*okfn)(struct sk_buff *)) |
81 | { | 81 | { |
82 | struct nf_conn *ct; | 82 | struct nf_conn *ct; |
83 | enum ip_conntrack_info ctinfo; | 83 | enum ip_conntrack_info ctinfo; |
84 | struct nf_conn_nat *nat; | 84 | struct nf_conn_nat *nat; |
85 | /* maniptype == SRC for postrouting. */ | 85 | /* maniptype == SRC for postrouting. */ |
86 | enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); | 86 | enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); |
87 | 87 | ||
88 | /* We never see fragments: conntrack defrags on pre-routing | 88 | /* We never see fragments: conntrack defrags on pre-routing |
89 | and local-out, and nf_nat_out protects post-routing. */ | 89 | and local-out, and nf_nat_out protects post-routing. */ |
90 | NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))); | 90 | NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))); |
91 | 91 | ||
92 | ct = nf_ct_get(skb, &ctinfo); | 92 | ct = nf_ct_get(skb, &ctinfo); |
93 | /* Can't track? It's not due to stress, or conntrack would | 93 | /* Can't track? It's not due to stress, or conntrack would |
94 | have dropped it. Hence it's the user's responsibilty to | 94 | have dropped it. Hence it's the user's responsibilty to |
95 | packet filter it out, or implement conntrack/NAT for that | 95 | packet filter it out, or implement conntrack/NAT for that |
96 | protocol. 8) --RR */ | 96 | protocol. 8) --RR */ |
97 | if (!ct) | 97 | if (!ct) |
98 | return NF_ACCEPT; | 98 | return NF_ACCEPT; |
99 | 99 | ||
100 | /* Don't try to NAT if this packet is not conntracked */ | 100 | /* Don't try to NAT if this packet is not conntracked */ |
101 | if (ct == &nf_conntrack_untracked) | 101 | if (ct == &nf_conntrack_untracked) |
102 | return NF_ACCEPT; | 102 | return NF_ACCEPT; |
103 | 103 | ||
104 | nat = nfct_nat(ct); | 104 | nat = nfct_nat(ct); |
105 | if (!nat) { | 105 | if (!nat) { |
106 | /* NAT module was loaded late. */ | 106 | /* NAT module was loaded late. */ |
107 | if (nf_ct_is_confirmed(ct)) | 107 | if (nf_ct_is_confirmed(ct)) |
108 | return NF_ACCEPT; | 108 | return NF_ACCEPT; |
109 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); | 109 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); |
110 | if (nat == NULL) { | 110 | if (nat == NULL) { |
111 | pr_debug("failed to add NAT extension\n"); | 111 | pr_debug("failed to add NAT extension\n"); |
112 | return NF_ACCEPT; | 112 | return NF_ACCEPT; |
113 | } | 113 | } |
114 | } | 114 | } |
115 | 115 | ||
116 | switch (ctinfo) { | 116 | switch (ctinfo) { |
117 | case IP_CT_RELATED: | 117 | case IP_CT_RELATED: |
118 | case IP_CT_RELATED+IP_CT_IS_REPLY: | 118 | case IP_CT_RELATED+IP_CT_IS_REPLY: |
119 | if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { | 119 | if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { |
120 | if (!nf_nat_icmp_reply_translation(ct, ctinfo, | 120 | if (!nf_nat_icmp_reply_translation(ct, ctinfo, |
121 | hooknum, skb)) | 121 | hooknum, skb)) |
122 | return NF_DROP; | 122 | return NF_DROP; |
123 | else | 123 | else |
124 | return NF_ACCEPT; | 124 | return NF_ACCEPT; |
125 | } | 125 | } |
126 | /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ | 126 | /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ |
127 | case IP_CT_NEW: | 127 | case IP_CT_NEW: |
128 | 128 | ||
129 | /* Seen it before? This can happen for loopback, retrans, | 129 | /* Seen it before? This can happen for loopback, retrans, |
130 | or local packets.. */ | 130 | or local packets.. */ |
131 | if (!nf_nat_initialized(ct, maniptype)) { | 131 | if (!nf_nat_initialized(ct, maniptype)) { |
132 | unsigned int ret; | 132 | unsigned int ret; |
133 | 133 | ||
134 | if (hooknum == NF_INET_LOCAL_IN) | 134 | if (hooknum == NF_INET_LOCAL_IN) |
135 | /* LOCAL_IN hook doesn't have a chain! */ | 135 | /* LOCAL_IN hook doesn't have a chain! */ |
136 | ret = alloc_null_binding(ct, hooknum); | 136 | ret = alloc_null_binding(ct, hooknum); |
137 | else | 137 | else |
138 | ret = nf_nat_rule_find(skb, hooknum, in, out, | 138 | ret = nf_nat_rule_find(skb, hooknum, in, out, |
139 | ct); | 139 | ct); |
140 | 140 | ||
141 | if (ret != NF_ACCEPT) | 141 | if (ret != NF_ACCEPT) |
142 | return ret; | 142 | return ret; |
143 | } else | 143 | } else |
144 | pr_debug("Already setup manip %s for ct %p\n", | 144 | pr_debug("Already setup manip %s for ct %p\n", |
145 | maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", | 145 | maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", |
146 | ct); | 146 | ct); |
147 | break; | 147 | break; |
148 | 148 | ||
149 | default: | 149 | default: |
150 | /* ESTABLISHED */ | 150 | /* ESTABLISHED */ |
151 | NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || | 151 | NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || |
152 | ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); | 152 | ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); |
153 | } | 153 | } |
154 | 154 | ||
155 | return nf_nat_packet(ct, ctinfo, hooknum, skb); | 155 | return nf_nat_packet(ct, ctinfo, hooknum, skb); |
156 | } | 156 | } |
157 | 157 | ||
158 | static unsigned int | 158 | static unsigned int |
159 | nf_nat_in(unsigned int hooknum, | 159 | nf_nat_in(unsigned int hooknum, |
160 | struct sk_buff *skb, | 160 | struct sk_buff *skb, |
161 | const struct net_device *in, | 161 | const struct net_device *in, |
162 | const struct net_device *out, | 162 | const struct net_device *out, |
163 | int (*okfn)(struct sk_buff *)) | 163 | int (*okfn)(struct sk_buff *)) |
164 | { | 164 | { |
165 | unsigned int ret; | 165 | unsigned int ret; |
166 | __be32 daddr = ip_hdr(skb)->daddr; | 166 | __be32 daddr = ip_hdr(skb)->daddr; |
167 | 167 | ||
168 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); | 168 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); |
169 | if (ret != NF_DROP && ret != NF_STOLEN && | 169 | if (ret != NF_DROP && ret != NF_STOLEN && |
170 | daddr != ip_hdr(skb)->daddr) | 170 | daddr != ip_hdr(skb)->daddr) |
171 | skb_dst_drop(skb); | 171 | skb_dst_drop(skb); |
172 | 172 | ||
173 | return ret; | 173 | return ret; |
174 | } | 174 | } |
175 | 175 | ||
176 | static unsigned int | 176 | static unsigned int |
177 | nf_nat_out(unsigned int hooknum, | 177 | nf_nat_out(unsigned int hooknum, |
178 | struct sk_buff *skb, | 178 | struct sk_buff *skb, |
179 | const struct net_device *in, | 179 | const struct net_device *in, |
180 | const struct net_device *out, | 180 | const struct net_device *out, |
181 | int (*okfn)(struct sk_buff *)) | 181 | int (*okfn)(struct sk_buff *)) |
182 | { | 182 | { |
183 | #ifdef CONFIG_XFRM | 183 | #ifdef CONFIG_XFRM |
184 | const struct nf_conn *ct; | 184 | const struct nf_conn *ct; |
185 | enum ip_conntrack_info ctinfo; | 185 | enum ip_conntrack_info ctinfo; |
186 | #endif | 186 | #endif |
187 | unsigned int ret; | 187 | unsigned int ret; |
188 | 188 | ||
189 | /* root is playing with raw sockets. */ | 189 | /* root is playing with raw sockets. */ |
190 | if (skb->len < sizeof(struct iphdr) || | 190 | if (skb->len < sizeof(struct iphdr) || |
191 | ip_hdrlen(skb) < sizeof(struct iphdr)) | 191 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
192 | return NF_ACCEPT; | 192 | return NF_ACCEPT; |
193 | 193 | ||
194 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); | 194 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); |
195 | #ifdef CONFIG_XFRM | 195 | #ifdef CONFIG_XFRM |
196 | if (ret != NF_DROP && ret != NF_STOLEN && | 196 | if (ret != NF_DROP && ret != NF_STOLEN && |
197 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { | 197 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { |
198 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 198 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
199 | 199 | ||
200 | if ((ct->tuplehash[dir].tuple.src.u3.ip != | 200 | if ((ct->tuplehash[dir].tuple.src.u3.ip != |
201 | ct->tuplehash[!dir].tuple.dst.u3.ip) || | 201 | ct->tuplehash[!dir].tuple.dst.u3.ip) || |
202 | (ct->tuplehash[dir].tuple.src.u.all != | 202 | (ct->tuplehash[dir].tuple.src.u.all != |
203 | ct->tuplehash[!dir].tuple.dst.u.all) | 203 | ct->tuplehash[!dir].tuple.dst.u.all) |
204 | ) | 204 | ) |
205 | return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP; | 205 | return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP; |
206 | } | 206 | } |
207 | #endif | 207 | #endif |
208 | return ret; | 208 | return ret; |
209 | } | 209 | } |
210 | 210 | ||
211 | static unsigned int | 211 | static unsigned int |
212 | nf_nat_local_fn(unsigned int hooknum, | 212 | nf_nat_local_fn(unsigned int hooknum, |
213 | struct sk_buff *skb, | 213 | struct sk_buff *skb, |
214 | const struct net_device *in, | 214 | const struct net_device *in, |
215 | const struct net_device *out, | 215 | const struct net_device *out, |
216 | int (*okfn)(struct sk_buff *)) | 216 | int (*okfn)(struct sk_buff *)) |
217 | { | 217 | { |
218 | const struct nf_conn *ct; | 218 | const struct nf_conn *ct; |
219 | enum ip_conntrack_info ctinfo; | 219 | enum ip_conntrack_info ctinfo; |
220 | unsigned int ret; | 220 | unsigned int ret; |
221 | 221 | ||
222 | /* root is playing with raw sockets. */ | 222 | /* root is playing with raw sockets. */ |
223 | if (skb->len < sizeof(struct iphdr) || | 223 | if (skb->len < sizeof(struct iphdr) || |
224 | ip_hdrlen(skb) < sizeof(struct iphdr)) | 224 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
225 | return NF_ACCEPT; | 225 | return NF_ACCEPT; |
226 | 226 | ||
227 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); | 227 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); |
228 | if (ret != NF_DROP && ret != NF_STOLEN && | 228 | if (ret != NF_DROP && ret != NF_STOLEN && |
229 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { | 229 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { |
230 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 230 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
231 | 231 | ||
232 | if (ct->tuplehash[dir].tuple.dst.u3.ip != | 232 | if (ct->tuplehash[dir].tuple.dst.u3.ip != |
233 | ct->tuplehash[!dir].tuple.src.u3.ip) { | 233 | ct->tuplehash[!dir].tuple.src.u3.ip) { |
234 | if (ip_route_me_harder(skb, RTN_UNSPEC)) | 234 | if (ip_route_me_harder(skb, RTN_UNSPEC)) |
235 | ret = NF_DROP; | 235 | ret = NF_DROP; |
236 | } | 236 | } |
237 | #ifdef CONFIG_XFRM | 237 | #ifdef CONFIG_XFRM |
238 | else if (ct->tuplehash[dir].tuple.dst.u.all != | 238 | else if (ct->tuplehash[dir].tuple.dst.u.all != |
239 | ct->tuplehash[!dir].tuple.src.u.all) | 239 | ct->tuplehash[!dir].tuple.src.u.all) |
240 | if (ip_xfrm_me_harder(skb)) | 240 | if (ip_xfrm_me_harder(skb)) |
241 | ret = NF_DROP; | 241 | ret = NF_DROP; |
242 | #endif | 242 | #endif |
243 | } | 243 | } |
244 | return ret; | 244 | return ret; |
245 | } | 245 | } |
246 | 246 | ||
247 | /* We must be after connection tracking and before packet filtering. */ | 247 | /* We must be after connection tracking and before packet filtering. */ |
248 | 248 | ||
249 | static struct nf_hook_ops nf_nat_ops[] __read_mostly = { | 249 | static struct nf_hook_ops nf_nat_ops[] __read_mostly = { |
250 | /* Before packet filtering, change destination */ | 250 | /* Before packet filtering, change destination */ |
251 | { | 251 | { |
252 | .hook = nf_nat_in, | 252 | .hook = nf_nat_in, |
253 | .owner = THIS_MODULE, | 253 | .owner = THIS_MODULE, |
254 | .pf = NFPROTO_IPV4, | 254 | .pf = NFPROTO_IPV4, |
255 | .hooknum = NF_INET_PRE_ROUTING, | 255 | .hooknum = NF_INET_PRE_ROUTING, |
256 | .priority = NF_IP_PRI_NAT_DST, | 256 | .priority = NF_IP_PRI_NAT_DST, |
257 | }, | 257 | }, |
258 | /* After packet filtering, change source */ | 258 | /* After packet filtering, change source */ |
259 | { | 259 | { |
260 | .hook = nf_nat_out, | 260 | .hook = nf_nat_out, |
261 | .owner = THIS_MODULE, | 261 | .owner = THIS_MODULE, |
262 | .pf = NFPROTO_IPV4, | 262 | .pf = NFPROTO_IPV4, |
263 | .hooknum = NF_INET_POST_ROUTING, | 263 | .hooknum = NF_INET_POST_ROUTING, |
264 | .priority = NF_IP_PRI_NAT_SRC, | 264 | .priority = NF_IP_PRI_NAT_SRC, |
265 | }, | 265 | }, |
266 | /* Before packet filtering, change destination */ | 266 | /* Before packet filtering, change destination */ |
267 | { | 267 | { |
268 | .hook = nf_nat_local_fn, | 268 | .hook = nf_nat_local_fn, |
269 | .owner = THIS_MODULE, | 269 | .owner = THIS_MODULE, |
270 | .pf = NFPROTO_IPV4, | 270 | .pf = NFPROTO_IPV4, |
271 | .hooknum = NF_INET_LOCAL_OUT, | 271 | .hooknum = NF_INET_LOCAL_OUT, |
272 | .priority = NF_IP_PRI_NAT_DST, | 272 | .priority = NF_IP_PRI_NAT_DST, |
273 | }, | 273 | }, |
274 | /* After packet filtering, change source */ | 274 | /* After packet filtering, change source */ |
275 | { | 275 | { |
276 | .hook = nf_nat_fn, | 276 | .hook = nf_nat_fn, |
277 | .owner = THIS_MODULE, | 277 | .owner = THIS_MODULE, |
278 | .pf = NFPROTO_IPV4, | 278 | .pf = NFPROTO_IPV4, |
279 | .hooknum = NF_INET_LOCAL_IN, | 279 | .hooknum = NF_INET_LOCAL_IN, |
280 | .priority = NF_IP_PRI_NAT_SRC, | 280 | .priority = NF_IP_PRI_NAT_SRC, |
281 | }, | 281 | }, |
282 | }; | 282 | }; |
283 | 283 | ||
284 | static int __init nf_nat_standalone_init(void) | 284 | static int __init nf_nat_standalone_init(void) |
285 | { | 285 | { |
286 | int ret = 0; | 286 | int ret = 0; |
287 | 287 | ||
288 | need_ipv4_conntrack(); | 288 | need_ipv4_conntrack(); |
289 | 289 | ||
290 | #ifdef CONFIG_XFRM | 290 | #ifdef CONFIG_XFRM |
291 | BUG_ON(ip_nat_decode_session != NULL); | 291 | BUG_ON(ip_nat_decode_session != NULL); |
292 | rcu_assign_pointer(ip_nat_decode_session, nat_decode_session); | 292 | rcu_assign_pointer(ip_nat_decode_session, nat_decode_session); |
293 | #endif | 293 | #endif |
294 | ret = nf_nat_rule_init(); | 294 | ret = nf_nat_rule_init(); |
295 | if (ret < 0) { | 295 | if (ret < 0) { |
296 | printk("nf_nat_init: can't setup rules.\n"); | 296 | pr_err("nf_nat_init: can't setup rules.\n"); |
297 | goto cleanup_decode_session; | 297 | goto cleanup_decode_session; |
298 | } | 298 | } |
299 | ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); | 299 | ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); |
300 | if (ret < 0) { | 300 | if (ret < 0) { |
301 | printk("nf_nat_init: can't register hooks.\n"); | 301 | pr_err("nf_nat_init: can't register hooks.\n"); |
302 | goto cleanup_rule_init; | 302 | goto cleanup_rule_init; |
303 | } | 303 | } |
304 | return ret; | 304 | return ret; |
305 | 305 | ||
306 | cleanup_rule_init: | 306 | cleanup_rule_init: |
307 | nf_nat_rule_cleanup(); | 307 | nf_nat_rule_cleanup(); |
308 | cleanup_decode_session: | 308 | cleanup_decode_session: |
309 | #ifdef CONFIG_XFRM | 309 | #ifdef CONFIG_XFRM |
310 | rcu_assign_pointer(ip_nat_decode_session, NULL); | 310 | rcu_assign_pointer(ip_nat_decode_session, NULL); |
311 | synchronize_net(); | 311 | synchronize_net(); |
312 | #endif | 312 | #endif |
313 | return ret; | 313 | return ret; |
314 | } | 314 | } |
315 | 315 | ||
316 | static void __exit nf_nat_standalone_fini(void) | 316 | static void __exit nf_nat_standalone_fini(void) |
317 | { | 317 | { |
318 | nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); | 318 | nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); |
319 | nf_nat_rule_cleanup(); | 319 | nf_nat_rule_cleanup(); |
320 | #ifdef CONFIG_XFRM | 320 | #ifdef CONFIG_XFRM |
321 | rcu_assign_pointer(ip_nat_decode_session, NULL); | 321 | rcu_assign_pointer(ip_nat_decode_session, NULL); |
322 | synchronize_net(); | 322 | synchronize_net(); |
323 | #endif | 323 | #endif |
324 | /* Conntrack caches are unregistered in nf_conntrack_cleanup */ | 324 | /* Conntrack caches are unregistered in nf_conntrack_cleanup */ |
325 | } | 325 | } |
326 | 326 | ||
327 | module_init(nf_nat_standalone_init); | 327 | module_init(nf_nat_standalone_init); |
328 | module_exit(nf_nat_standalone_fini); | 328 | module_exit(nf_nat_standalone_fini); |
329 | 329 | ||
330 | MODULE_LICENSE("GPL"); | 330 | MODULE_LICENSE("GPL"); |
331 | MODULE_ALIAS("ip_nat"); | 331 | MODULE_ALIAS("ip_nat"); |
332 | 332 |
net/ipv6/netfilter/ip6_tables.c
1 | /* | 1 | /* |
2 | * Packet matching code. | 2 | * Packet matching code. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | 4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
5 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | 5 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
13 | #include <linux/in.h> | 13 | #include <linux/in.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/kmod.h> | 15 | #include <linux/kmod.h> |
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/poison.h> | 19 | #include <linux/poison.h> |
20 | #include <linux/icmpv6.h> | 20 | #include <linux/icmpv6.h> |
21 | #include <net/ipv6.h> | 21 | #include <net/ipv6.h> |
22 | #include <net/compat.h> | 22 | #include <net/compat.h> |
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
25 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
26 | #include <linux/err.h> | 26 | #include <linux/err.h> |
27 | #include <linux/cpumask.h> | 27 | #include <linux/cpumask.h> |
28 | 28 | ||
29 | #include <linux/netfilter_ipv6/ip6_tables.h> | 29 | #include <linux/netfilter_ipv6/ip6_tables.h> |
30 | #include <linux/netfilter/x_tables.h> | 30 | #include <linux/netfilter/x_tables.h> |
31 | #include <net/netfilter/nf_log.h> | 31 | #include <net/netfilter/nf_log.h> |
32 | #include "../../netfilter/xt_repldata.h" | 32 | #include "../../netfilter/xt_repldata.h" |
33 | 33 | ||
34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
35 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 35 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
36 | MODULE_DESCRIPTION("IPv6 packet filter"); | 36 | MODULE_DESCRIPTION("IPv6 packet filter"); |
37 | 37 | ||
38 | /*#define DEBUG_IP_FIREWALL*/ | 38 | /*#define DEBUG_IP_FIREWALL*/ |
39 | /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ | 39 | /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ |
40 | /*#define DEBUG_IP_FIREWALL_USER*/ | 40 | /*#define DEBUG_IP_FIREWALL_USER*/ |
41 | 41 | ||
42 | #ifdef DEBUG_IP_FIREWALL | 42 | #ifdef DEBUG_IP_FIREWALL |
43 | #define dprintf(format, args...) pr_info(format , ## args) | 43 | #define dprintf(format, args...) pr_info(format , ## args) |
44 | #else | 44 | #else |
45 | #define dprintf(format, args...) | 45 | #define dprintf(format, args...) |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #ifdef DEBUG_IP_FIREWALL_USER | 48 | #ifdef DEBUG_IP_FIREWALL_USER |
49 | #define duprintf(format, args...) pr_info(format , ## args) | 49 | #define duprintf(format, args...) pr_info(format , ## args) |
50 | #else | 50 | #else |
51 | #define duprintf(format, args...) | 51 | #define duprintf(format, args...) |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #ifdef CONFIG_NETFILTER_DEBUG | 54 | #ifdef CONFIG_NETFILTER_DEBUG |
55 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) | 55 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) |
56 | #else | 56 | #else |
57 | #define IP_NF_ASSERT(x) | 57 | #define IP_NF_ASSERT(x) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #if 0 | 60 | #if 0 |
61 | /* All the better to debug you with... */ | 61 | /* All the better to debug you with... */ |
62 | #define static | 62 | #define static |
63 | #define inline | 63 | #define inline |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | void *ip6t_alloc_initial_table(const struct xt_table *info) | 66 | void *ip6t_alloc_initial_table(const struct xt_table *info) |
67 | { | 67 | { |
68 | return xt_alloc_initial_table(ip6t, IP6T); | 68 | return xt_alloc_initial_table(ip6t, IP6T); |
69 | } | 69 | } |
70 | EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); | 70 | EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | We keep a set of rules for each CPU, so we can avoid write-locking | 73 | We keep a set of rules for each CPU, so we can avoid write-locking |
74 | them in the softirq when updating the counters and therefore | 74 | them in the softirq when updating the counters and therefore |
75 | only need to read-lock in the softirq; doing a write_lock_bh() in user | 75 | only need to read-lock in the softirq; doing a write_lock_bh() in user |
76 | context stops packets coming through and allows user context to read | 76 | context stops packets coming through and allows user context to read |
77 | the counters or update the rules. | 77 | the counters or update the rules. |
78 | 78 | ||
79 | Hence the start of any table is given by get_table() below. */ | 79 | Hence the start of any table is given by get_table() below. */ |
80 | 80 | ||
81 | /* Check for an extension */ | 81 | /* Check for an extension */ |
82 | int | 82 | int |
83 | ip6t_ext_hdr(u8 nexthdr) | 83 | ip6t_ext_hdr(u8 nexthdr) |
84 | { | 84 | { |
85 | return ( (nexthdr == IPPROTO_HOPOPTS) || | 85 | return ( (nexthdr == IPPROTO_HOPOPTS) || |
86 | (nexthdr == IPPROTO_ROUTING) || | 86 | (nexthdr == IPPROTO_ROUTING) || |
87 | (nexthdr == IPPROTO_FRAGMENT) || | 87 | (nexthdr == IPPROTO_FRAGMENT) || |
88 | (nexthdr == IPPROTO_ESP) || | 88 | (nexthdr == IPPROTO_ESP) || |
89 | (nexthdr == IPPROTO_AH) || | 89 | (nexthdr == IPPROTO_AH) || |
90 | (nexthdr == IPPROTO_NONE) || | 90 | (nexthdr == IPPROTO_NONE) || |
91 | (nexthdr == IPPROTO_DSTOPTS) ); | 91 | (nexthdr == IPPROTO_DSTOPTS) ); |
92 | } | 92 | } |
93 | 93 | ||
94 | /* Returns whether matches rule or not. */ | 94 | /* Returns whether matches rule or not. */ |
95 | /* Performance critical - called for every packet */ | 95 | /* Performance critical - called for every packet */ |
96 | static inline bool | 96 | static inline bool |
97 | ip6_packet_match(const struct sk_buff *skb, | 97 | ip6_packet_match(const struct sk_buff *skb, |
98 | const char *indev, | 98 | const char *indev, |
99 | const char *outdev, | 99 | const char *outdev, |
100 | const struct ip6t_ip6 *ip6info, | 100 | const struct ip6t_ip6 *ip6info, |
101 | unsigned int *protoff, | 101 | unsigned int *protoff, |
102 | int *fragoff, bool *hotdrop) | 102 | int *fragoff, bool *hotdrop) |
103 | { | 103 | { |
104 | unsigned long ret; | 104 | unsigned long ret; |
105 | const struct ipv6hdr *ipv6 = ipv6_hdr(skb); | 105 | const struct ipv6hdr *ipv6 = ipv6_hdr(skb); |
106 | 106 | ||
107 | #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) | 107 | #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) |
108 | 108 | ||
109 | if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, | 109 | if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, |
110 | &ip6info->src), IP6T_INV_SRCIP) || | 110 | &ip6info->src), IP6T_INV_SRCIP) || |
111 | FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, | 111 | FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, |
112 | &ip6info->dst), IP6T_INV_DSTIP)) { | 112 | &ip6info->dst), IP6T_INV_DSTIP)) { |
113 | dprintf("Source or dest mismatch.\n"); | 113 | dprintf("Source or dest mismatch.\n"); |
114 | /* | 114 | /* |
115 | dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, | 115 | dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, |
116 | ipinfo->smsk.s_addr, ipinfo->src.s_addr, | 116 | ipinfo->smsk.s_addr, ipinfo->src.s_addr, |
117 | ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); | 117 | ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); |
118 | dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, | 118 | dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, |
119 | ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, | 119 | ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, |
120 | ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ | 120 | ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ |
121 | return false; | 121 | return false; |
122 | } | 122 | } |
123 | 123 | ||
124 | ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); | 124 | ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); |
125 | 125 | ||
126 | if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { | 126 | if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { |
127 | dprintf("VIA in mismatch (%s vs %s).%s\n", | 127 | dprintf("VIA in mismatch (%s vs %s).%s\n", |
128 | indev, ip6info->iniface, | 128 | indev, ip6info->iniface, |
129 | ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":""); | 129 | ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":""); |
130 | return false; | 130 | return false; |
131 | } | 131 | } |
132 | 132 | ||
133 | ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); | 133 | ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); |
134 | 134 | ||
135 | if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { | 135 | if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { |
136 | dprintf("VIA out mismatch (%s vs %s).%s\n", | 136 | dprintf("VIA out mismatch (%s vs %s).%s\n", |
137 | outdev, ip6info->outiface, | 137 | outdev, ip6info->outiface, |
138 | ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":""); | 138 | ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":""); |
139 | return false; | 139 | return false; |
140 | } | 140 | } |
141 | 141 | ||
142 | /* ... might want to do something with class and flowlabel here ... */ | 142 | /* ... might want to do something with class and flowlabel here ... */ |
143 | 143 | ||
144 | /* look for the desired protocol header */ | 144 | /* look for the desired protocol header */ |
145 | if((ip6info->flags & IP6T_F_PROTO)) { | 145 | if((ip6info->flags & IP6T_F_PROTO)) { |
146 | int protohdr; | 146 | int protohdr; |
147 | unsigned short _frag_off; | 147 | unsigned short _frag_off; |
148 | 148 | ||
149 | protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); | 149 | protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); |
150 | if (protohdr < 0) { | 150 | if (protohdr < 0) { |
151 | if (_frag_off == 0) | 151 | if (_frag_off == 0) |
152 | *hotdrop = true; | 152 | *hotdrop = true; |
153 | return false; | 153 | return false; |
154 | } | 154 | } |
155 | *fragoff = _frag_off; | 155 | *fragoff = _frag_off; |
156 | 156 | ||
157 | dprintf("Packet protocol %hi ?= %s%hi.\n", | 157 | dprintf("Packet protocol %hi ?= %s%hi.\n", |
158 | protohdr, | 158 | protohdr, |
159 | ip6info->invflags & IP6T_INV_PROTO ? "!":"", | 159 | ip6info->invflags & IP6T_INV_PROTO ? "!":"", |
160 | ip6info->proto); | 160 | ip6info->proto); |
161 | 161 | ||
162 | if (ip6info->proto == protohdr) { | 162 | if (ip6info->proto == protohdr) { |
163 | if(ip6info->invflags & IP6T_INV_PROTO) { | 163 | if(ip6info->invflags & IP6T_INV_PROTO) { |
164 | return false; | 164 | return false; |
165 | } | 165 | } |
166 | return true; | 166 | return true; |
167 | } | 167 | } |
168 | 168 | ||
169 | /* We need match for the '-p all', too! */ | 169 | /* We need match for the '-p all', too! */ |
170 | if ((ip6info->proto != 0) && | 170 | if ((ip6info->proto != 0) && |
171 | !(ip6info->invflags & IP6T_INV_PROTO)) | 171 | !(ip6info->invflags & IP6T_INV_PROTO)) |
172 | return false; | 172 | return false; |
173 | } | 173 | } |
174 | return true; | 174 | return true; |
175 | } | 175 | } |
176 | 176 | ||
177 | /* should be ip6 safe */ | 177 | /* should be ip6 safe */ |
178 | static bool | 178 | static bool |
179 | ip6_checkentry(const struct ip6t_ip6 *ipv6) | 179 | ip6_checkentry(const struct ip6t_ip6 *ipv6) |
180 | { | 180 | { |
181 | if (ipv6->flags & ~IP6T_F_MASK) { | 181 | if (ipv6->flags & ~IP6T_F_MASK) { |
182 | duprintf("Unknown flag bits set: %08X\n", | 182 | duprintf("Unknown flag bits set: %08X\n", |
183 | ipv6->flags & ~IP6T_F_MASK); | 183 | ipv6->flags & ~IP6T_F_MASK); |
184 | return false; | 184 | return false; |
185 | } | 185 | } |
186 | if (ipv6->invflags & ~IP6T_INV_MASK) { | 186 | if (ipv6->invflags & ~IP6T_INV_MASK) { |
187 | duprintf("Unknown invflag bits set: %08X\n", | 187 | duprintf("Unknown invflag bits set: %08X\n", |
188 | ipv6->invflags & ~IP6T_INV_MASK); | 188 | ipv6->invflags & ~IP6T_INV_MASK); |
189 | return false; | 189 | return false; |
190 | } | 190 | } |
191 | return true; | 191 | return true; |
192 | } | 192 | } |
193 | 193 | ||
194 | static unsigned int | 194 | static unsigned int |
195 | ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) | 195 | ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) |
196 | { | 196 | { |
197 | if (net_ratelimit()) | 197 | if (net_ratelimit()) |
198 | pr_info("error: `%s'\n", (const char *)par->targinfo); | 198 | pr_info("error: `%s'\n", (const char *)par->targinfo); |
199 | 199 | ||
200 | return NF_DROP; | 200 | return NF_DROP; |
201 | } | 201 | } |
202 | 202 | ||
203 | static inline struct ip6t_entry * | 203 | static inline struct ip6t_entry * |
204 | get_entry(const void *base, unsigned int offset) | 204 | get_entry(const void *base, unsigned int offset) |
205 | { | 205 | { |
206 | return (struct ip6t_entry *)(base + offset); | 206 | return (struct ip6t_entry *)(base + offset); |
207 | } | 207 | } |
208 | 208 | ||
209 | /* All zeroes == unconditional rule. */ | 209 | /* All zeroes == unconditional rule. */ |
210 | /* Mildly perf critical (only if packet tracing is on) */ | 210 | /* Mildly perf critical (only if packet tracing is on) */ |
211 | static inline bool unconditional(const struct ip6t_ip6 *ipv6) | 211 | static inline bool unconditional(const struct ip6t_ip6 *ipv6) |
212 | { | 212 | { |
213 | static const struct ip6t_ip6 uncond; | 213 | static const struct ip6t_ip6 uncond; |
214 | 214 | ||
215 | return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; | 215 | return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; |
216 | } | 216 | } |
217 | 217 | ||
218 | static inline const struct ip6t_entry_target * | 218 | static inline const struct ip6t_entry_target * |
219 | ip6t_get_target_c(const struct ip6t_entry *e) | 219 | ip6t_get_target_c(const struct ip6t_entry *e) |
220 | { | 220 | { |
221 | return ip6t_get_target((struct ip6t_entry *)e); | 221 | return ip6t_get_target((struct ip6t_entry *)e); |
222 | } | 222 | } |
223 | 223 | ||
224 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 224 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
225 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 225 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
226 | /* This cries for unification! */ | 226 | /* This cries for unification! */ |
227 | static const char *const hooknames[] = { | 227 | static const char *const hooknames[] = { |
228 | [NF_INET_PRE_ROUTING] = "PREROUTING", | 228 | [NF_INET_PRE_ROUTING] = "PREROUTING", |
229 | [NF_INET_LOCAL_IN] = "INPUT", | 229 | [NF_INET_LOCAL_IN] = "INPUT", |
230 | [NF_INET_FORWARD] = "FORWARD", | 230 | [NF_INET_FORWARD] = "FORWARD", |
231 | [NF_INET_LOCAL_OUT] = "OUTPUT", | 231 | [NF_INET_LOCAL_OUT] = "OUTPUT", |
232 | [NF_INET_POST_ROUTING] = "POSTROUTING", | 232 | [NF_INET_POST_ROUTING] = "POSTROUTING", |
233 | }; | 233 | }; |
234 | 234 | ||
235 | enum nf_ip_trace_comments { | 235 | enum nf_ip_trace_comments { |
236 | NF_IP6_TRACE_COMMENT_RULE, | 236 | NF_IP6_TRACE_COMMENT_RULE, |
237 | NF_IP6_TRACE_COMMENT_RETURN, | 237 | NF_IP6_TRACE_COMMENT_RETURN, |
238 | NF_IP6_TRACE_COMMENT_POLICY, | 238 | NF_IP6_TRACE_COMMENT_POLICY, |
239 | }; | 239 | }; |
240 | 240 | ||
241 | static const char *const comments[] = { | 241 | static const char *const comments[] = { |
242 | [NF_IP6_TRACE_COMMENT_RULE] = "rule", | 242 | [NF_IP6_TRACE_COMMENT_RULE] = "rule", |
243 | [NF_IP6_TRACE_COMMENT_RETURN] = "return", | 243 | [NF_IP6_TRACE_COMMENT_RETURN] = "return", |
244 | [NF_IP6_TRACE_COMMENT_POLICY] = "policy", | 244 | [NF_IP6_TRACE_COMMENT_POLICY] = "policy", |
245 | }; | 245 | }; |
246 | 246 | ||
247 | static struct nf_loginfo trace_loginfo = { | 247 | static struct nf_loginfo trace_loginfo = { |
248 | .type = NF_LOG_TYPE_LOG, | 248 | .type = NF_LOG_TYPE_LOG, |
249 | .u = { | 249 | .u = { |
250 | .log = { | 250 | .log = { |
251 | .level = 4, | 251 | .level = 4, |
252 | .logflags = NF_LOG_MASK, | 252 | .logflags = NF_LOG_MASK, |
253 | }, | 253 | }, |
254 | }, | 254 | }, |
255 | }; | 255 | }; |
256 | 256 | ||
257 | /* Mildly perf critical (only if packet tracing is on) */ | 257 | /* Mildly perf critical (only if packet tracing is on) */ |
258 | static inline int | 258 | static inline int |
259 | get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, | 259 | get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, |
260 | const char *hookname, const char **chainname, | 260 | const char *hookname, const char **chainname, |
261 | const char **comment, unsigned int *rulenum) | 261 | const char **comment, unsigned int *rulenum) |
262 | { | 262 | { |
263 | const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s); | 263 | const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s); |
264 | 264 | ||
265 | if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) { | 265 | if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) { |
266 | /* Head of user chain: ERROR target with chainname */ | 266 | /* Head of user chain: ERROR target with chainname */ |
267 | *chainname = t->target.data; | 267 | *chainname = t->target.data; |
268 | (*rulenum) = 0; | 268 | (*rulenum) = 0; |
269 | } else if (s == e) { | 269 | } else if (s == e) { |
270 | (*rulenum)++; | 270 | (*rulenum)++; |
271 | 271 | ||
272 | if (s->target_offset == sizeof(struct ip6t_entry) && | 272 | if (s->target_offset == sizeof(struct ip6t_entry) && |
273 | strcmp(t->target.u.kernel.target->name, | 273 | strcmp(t->target.u.kernel.target->name, |
274 | IP6T_STANDARD_TARGET) == 0 && | 274 | IP6T_STANDARD_TARGET) == 0 && |
275 | t->verdict < 0 && | 275 | t->verdict < 0 && |
276 | unconditional(&s->ipv6)) { | 276 | unconditional(&s->ipv6)) { |
277 | /* Tail of chains: STANDARD target (return/policy) */ | 277 | /* Tail of chains: STANDARD target (return/policy) */ |
278 | *comment = *chainname == hookname | 278 | *comment = *chainname == hookname |
279 | ? comments[NF_IP6_TRACE_COMMENT_POLICY] | 279 | ? comments[NF_IP6_TRACE_COMMENT_POLICY] |
280 | : comments[NF_IP6_TRACE_COMMENT_RETURN]; | 280 | : comments[NF_IP6_TRACE_COMMENT_RETURN]; |
281 | } | 281 | } |
282 | return 1; | 282 | return 1; |
283 | } else | 283 | } else |
284 | (*rulenum)++; | 284 | (*rulenum)++; |
285 | 285 | ||
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
288 | 288 | ||
289 | static void trace_packet(const struct sk_buff *skb, | 289 | static void trace_packet(const struct sk_buff *skb, |
290 | unsigned int hook, | 290 | unsigned int hook, |
291 | const struct net_device *in, | 291 | const struct net_device *in, |
292 | const struct net_device *out, | 292 | const struct net_device *out, |
293 | const char *tablename, | 293 | const char *tablename, |
294 | const struct xt_table_info *private, | 294 | const struct xt_table_info *private, |
295 | const struct ip6t_entry *e) | 295 | const struct ip6t_entry *e) |
296 | { | 296 | { |
297 | const void *table_base; | 297 | const void *table_base; |
298 | const struct ip6t_entry *root; | 298 | const struct ip6t_entry *root; |
299 | const char *hookname, *chainname, *comment; | 299 | const char *hookname, *chainname, *comment; |
300 | const struct ip6t_entry *iter; | 300 | const struct ip6t_entry *iter; |
301 | unsigned int rulenum = 0; | 301 | unsigned int rulenum = 0; |
302 | 302 | ||
303 | table_base = private->entries[smp_processor_id()]; | 303 | table_base = private->entries[smp_processor_id()]; |
304 | root = get_entry(table_base, private->hook_entry[hook]); | 304 | root = get_entry(table_base, private->hook_entry[hook]); |
305 | 305 | ||
306 | hookname = chainname = hooknames[hook]; | 306 | hookname = chainname = hooknames[hook]; |
307 | comment = comments[NF_IP6_TRACE_COMMENT_RULE]; | 307 | comment = comments[NF_IP6_TRACE_COMMENT_RULE]; |
308 | 308 | ||
309 | xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) | 309 | xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) |
310 | if (get_chainname_rulenum(iter, e, hookname, | 310 | if (get_chainname_rulenum(iter, e, hookname, |
311 | &chainname, &comment, &rulenum) != 0) | 311 | &chainname, &comment, &rulenum) != 0) |
312 | break; | 312 | break; |
313 | 313 | ||
314 | nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, | 314 | nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, |
315 | "TRACE: %s:%s:%s:%u ", | 315 | "TRACE: %s:%s:%s:%u ", |
316 | tablename, chainname, comment, rulenum); | 316 | tablename, chainname, comment, rulenum); |
317 | } | 317 | } |
318 | #endif | 318 | #endif |
319 | 319 | ||
320 | static inline __pure struct ip6t_entry * | 320 | static inline __pure struct ip6t_entry * |
321 | ip6t_next_entry(const struct ip6t_entry *entry) | 321 | ip6t_next_entry(const struct ip6t_entry *entry) |
322 | { | 322 | { |
323 | return (void *)entry + entry->next_offset; | 323 | return (void *)entry + entry->next_offset; |
324 | } | 324 | } |
325 | 325 | ||
326 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ | 326 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ |
327 | unsigned int | 327 | unsigned int |
328 | ip6t_do_table(struct sk_buff *skb, | 328 | ip6t_do_table(struct sk_buff *skb, |
329 | unsigned int hook, | 329 | unsigned int hook, |
330 | const struct net_device *in, | 330 | const struct net_device *in, |
331 | const struct net_device *out, | 331 | const struct net_device *out, |
332 | struct xt_table *table) | 332 | struct xt_table *table) |
333 | { | 333 | { |
334 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 334 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
335 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 335 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
336 | unsigned int verdict = NF_DROP; | 336 | unsigned int verdict = NF_DROP; |
337 | const char *indev, *outdev; | 337 | const char *indev, *outdev; |
338 | const void *table_base; | 338 | const void *table_base; |
339 | struct ip6t_entry *e, **jumpstack; | 339 | struct ip6t_entry *e, **jumpstack; |
340 | unsigned int *stackptr, origptr, cpu; | 340 | unsigned int *stackptr, origptr, cpu; |
341 | const struct xt_table_info *private; | 341 | const struct xt_table_info *private; |
342 | struct xt_action_param acpar; | 342 | struct xt_action_param acpar; |
343 | 343 | ||
344 | /* Initialization */ | 344 | /* Initialization */ |
345 | indev = in ? in->name : nulldevname; | 345 | indev = in ? in->name : nulldevname; |
346 | outdev = out ? out->name : nulldevname; | 346 | outdev = out ? out->name : nulldevname; |
347 | /* We handle fragments by dealing with the first fragment as | 347 | /* We handle fragments by dealing with the first fragment as |
348 | * if it was a normal packet. All other fragments are treated | 348 | * if it was a normal packet. All other fragments are treated |
349 | * normally, except that they will NEVER match rules that ask | 349 | * normally, except that they will NEVER match rules that ask |
350 | * things we don't know, ie. tcp syn flag or ports). If the | 350 | * things we don't know, ie. tcp syn flag or ports). If the |
351 | * rule is also a fragment-specific rule, non-fragments won't | 351 | * rule is also a fragment-specific rule, non-fragments won't |
352 | * match it. */ | 352 | * match it. */ |
353 | acpar.hotdrop = false; | 353 | acpar.hotdrop = false; |
354 | acpar.in = in; | 354 | acpar.in = in; |
355 | acpar.out = out; | 355 | acpar.out = out; |
356 | acpar.family = NFPROTO_IPV6; | 356 | acpar.family = NFPROTO_IPV6; |
357 | acpar.hooknum = hook; | 357 | acpar.hooknum = hook; |
358 | 358 | ||
359 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 359 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
360 | 360 | ||
361 | xt_info_rdlock_bh(); | 361 | xt_info_rdlock_bh(); |
362 | private = table->private; | 362 | private = table->private; |
363 | cpu = smp_processor_id(); | 363 | cpu = smp_processor_id(); |
364 | table_base = private->entries[cpu]; | 364 | table_base = private->entries[cpu]; |
365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | 365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; |
366 | stackptr = &private->stackptr[cpu]; | 366 | stackptr = &private->stackptr[cpu]; |
367 | origptr = *stackptr; | 367 | origptr = *stackptr; |
368 | 368 | ||
369 | e = get_entry(table_base, private->hook_entry[hook]); | 369 | e = get_entry(table_base, private->hook_entry[hook]); |
370 | 370 | ||
371 | do { | 371 | do { |
372 | const struct ip6t_entry_target *t; | 372 | const struct ip6t_entry_target *t; |
373 | const struct xt_entry_match *ematch; | 373 | const struct xt_entry_match *ematch; |
374 | 374 | ||
375 | IP_NF_ASSERT(e); | 375 | IP_NF_ASSERT(e); |
376 | if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, | 376 | if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, |
377 | &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { | 377 | &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { |
378 | no_match: | 378 | no_match: |
379 | e = ip6t_next_entry(e); | 379 | e = ip6t_next_entry(e); |
380 | continue; | 380 | continue; |
381 | } | 381 | } |
382 | 382 | ||
383 | xt_ematch_foreach(ematch, e) { | 383 | xt_ematch_foreach(ematch, e) { |
384 | acpar.match = ematch->u.kernel.match; | 384 | acpar.match = ematch->u.kernel.match; |
385 | acpar.matchinfo = ematch->data; | 385 | acpar.matchinfo = ematch->data; |
386 | if (!acpar.match->match(skb, &acpar)) | 386 | if (!acpar.match->match(skb, &acpar)) |
387 | goto no_match; | 387 | goto no_match; |
388 | } | 388 | } |
389 | 389 | ||
390 | ADD_COUNTER(e->counters, | 390 | ADD_COUNTER(e->counters, |
391 | ntohs(ipv6_hdr(skb)->payload_len) + | 391 | ntohs(ipv6_hdr(skb)->payload_len) + |
392 | sizeof(struct ipv6hdr), 1); | 392 | sizeof(struct ipv6hdr), 1); |
393 | 393 | ||
394 | t = ip6t_get_target_c(e); | 394 | t = ip6t_get_target_c(e); |
395 | IP_NF_ASSERT(t->u.kernel.target); | 395 | IP_NF_ASSERT(t->u.kernel.target); |
396 | 396 | ||
397 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 397 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
398 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 398 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
399 | /* The packet is traced: log it */ | 399 | /* The packet is traced: log it */ |
400 | if (unlikely(skb->nf_trace)) | 400 | if (unlikely(skb->nf_trace)) |
401 | trace_packet(skb, hook, in, out, | 401 | trace_packet(skb, hook, in, out, |
402 | table->name, private, e); | 402 | table->name, private, e); |
403 | #endif | 403 | #endif |
404 | /* Standard target? */ | 404 | /* Standard target? */ |
405 | if (!t->u.kernel.target->target) { | 405 | if (!t->u.kernel.target->target) { |
406 | int v; | 406 | int v; |
407 | 407 | ||
408 | v = ((struct ip6t_standard_target *)t)->verdict; | 408 | v = ((struct ip6t_standard_target *)t)->verdict; |
409 | if (v < 0) { | 409 | if (v < 0) { |
410 | /* Pop from stack? */ | 410 | /* Pop from stack? */ |
411 | if (v != IP6T_RETURN) { | 411 | if (v != IP6T_RETURN) { |
412 | verdict = (unsigned)(-v) - 1; | 412 | verdict = (unsigned)(-v) - 1; |
413 | break; | 413 | break; |
414 | } | 414 | } |
415 | if (*stackptr == 0) | 415 | if (*stackptr == 0) |
416 | e = get_entry(table_base, | 416 | e = get_entry(table_base, |
417 | private->underflow[hook]); | 417 | private->underflow[hook]); |
418 | else | 418 | else |
419 | e = ip6t_next_entry(jumpstack[--*stackptr]); | 419 | e = ip6t_next_entry(jumpstack[--*stackptr]); |
420 | continue; | 420 | continue; |
421 | } | 421 | } |
422 | if (table_base + v != ip6t_next_entry(e) && | 422 | if (table_base + v != ip6t_next_entry(e) && |
423 | !(e->ipv6.flags & IP6T_F_GOTO)) { | 423 | !(e->ipv6.flags & IP6T_F_GOTO)) { |
424 | if (*stackptr >= private->stacksize) { | 424 | if (*stackptr >= private->stacksize) { |
425 | verdict = NF_DROP; | 425 | verdict = NF_DROP; |
426 | break; | 426 | break; |
427 | } | 427 | } |
428 | jumpstack[(*stackptr)++] = e; | 428 | jumpstack[(*stackptr)++] = e; |
429 | } | 429 | } |
430 | 430 | ||
431 | e = get_entry(table_base, v); | 431 | e = get_entry(table_base, v); |
432 | continue; | 432 | continue; |
433 | } | 433 | } |
434 | 434 | ||
435 | acpar.target = t->u.kernel.target; | 435 | acpar.target = t->u.kernel.target; |
436 | acpar.targinfo = t->data; | 436 | acpar.targinfo = t->data; |
437 | 437 | ||
438 | verdict = t->u.kernel.target->target(skb, &acpar); | 438 | verdict = t->u.kernel.target->target(skb, &acpar); |
439 | if (verdict == IP6T_CONTINUE) | 439 | if (verdict == IP6T_CONTINUE) |
440 | e = ip6t_next_entry(e); | 440 | e = ip6t_next_entry(e); |
441 | else | 441 | else |
442 | /* Verdict */ | 442 | /* Verdict */ |
443 | break; | 443 | break; |
444 | } while (!acpar.hotdrop); | 444 | } while (!acpar.hotdrop); |
445 | 445 | ||
446 | xt_info_rdunlock_bh(); | 446 | xt_info_rdunlock_bh(); |
447 | *stackptr = origptr; | 447 | *stackptr = origptr; |
448 | 448 | ||
449 | #ifdef DEBUG_ALLOW_ALL | 449 | #ifdef DEBUG_ALLOW_ALL |
450 | return NF_ACCEPT; | 450 | return NF_ACCEPT; |
451 | #else | 451 | #else |
452 | if (acpar.hotdrop) | 452 | if (acpar.hotdrop) |
453 | return NF_DROP; | 453 | return NF_DROP; |
454 | else return verdict; | 454 | else return verdict; |
455 | #endif | 455 | #endif |
456 | } | 456 | } |
457 | 457 | ||
458 | /* Figures out from what hook each rule can be called: returns 0 if | 458 | /* Figures out from what hook each rule can be called: returns 0 if |
459 | there are loops. Puts hook bitmask in comefrom. */ | 459 | there are loops. Puts hook bitmask in comefrom. */ |
460 | static int | 460 | static int |
461 | mark_source_chains(const struct xt_table_info *newinfo, | 461 | mark_source_chains(const struct xt_table_info *newinfo, |
462 | unsigned int valid_hooks, void *entry0) | 462 | unsigned int valid_hooks, void *entry0) |
463 | { | 463 | { |
464 | unsigned int hook; | 464 | unsigned int hook; |
465 | 465 | ||
466 | /* No recursion; use packet counter to save back ptrs (reset | 466 | /* No recursion; use packet counter to save back ptrs (reset |
467 | to 0 as we leave), and comefrom to save source hook bitmask */ | 467 | to 0 as we leave), and comefrom to save source hook bitmask */ |
468 | for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { | 468 | for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { |
469 | unsigned int pos = newinfo->hook_entry[hook]; | 469 | unsigned int pos = newinfo->hook_entry[hook]; |
470 | struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); | 470 | struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); |
471 | 471 | ||
472 | if (!(valid_hooks & (1 << hook))) | 472 | if (!(valid_hooks & (1 << hook))) |
473 | continue; | 473 | continue; |
474 | 474 | ||
475 | /* Set initial back pointer. */ | 475 | /* Set initial back pointer. */ |
476 | e->counters.pcnt = pos; | 476 | e->counters.pcnt = pos; |
477 | 477 | ||
478 | for (;;) { | 478 | for (;;) { |
479 | const struct ip6t_standard_target *t | 479 | const struct ip6t_standard_target *t |
480 | = (void *)ip6t_get_target_c(e); | 480 | = (void *)ip6t_get_target_c(e); |
481 | int visited = e->comefrom & (1 << hook); | 481 | int visited = e->comefrom & (1 << hook); |
482 | 482 | ||
483 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { | 483 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { |
484 | printk("iptables: loop hook %u pos %u %08X.\n", | 484 | pr_err("iptables: loop hook %u pos %u %08X.\n", |
485 | hook, pos, e->comefrom); | 485 | hook, pos, e->comefrom); |
486 | return 0; | 486 | return 0; |
487 | } | 487 | } |
488 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); | 488 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); |
489 | 489 | ||
490 | /* Unconditional return/END. */ | 490 | /* Unconditional return/END. */ |
491 | if ((e->target_offset == sizeof(struct ip6t_entry) && | 491 | if ((e->target_offset == sizeof(struct ip6t_entry) && |
492 | (strcmp(t->target.u.user.name, | 492 | (strcmp(t->target.u.user.name, |
493 | IP6T_STANDARD_TARGET) == 0) && | 493 | IP6T_STANDARD_TARGET) == 0) && |
494 | t->verdict < 0 && | 494 | t->verdict < 0 && |
495 | unconditional(&e->ipv6)) || visited) { | 495 | unconditional(&e->ipv6)) || visited) { |
496 | unsigned int oldpos, size; | 496 | unsigned int oldpos, size; |
497 | 497 | ||
498 | if ((strcmp(t->target.u.user.name, | 498 | if ((strcmp(t->target.u.user.name, |
499 | IP6T_STANDARD_TARGET) == 0) && | 499 | IP6T_STANDARD_TARGET) == 0) && |
500 | t->verdict < -NF_MAX_VERDICT - 1) { | 500 | t->verdict < -NF_MAX_VERDICT - 1) { |
501 | duprintf("mark_source_chains: bad " | 501 | duprintf("mark_source_chains: bad " |
502 | "negative verdict (%i)\n", | 502 | "negative verdict (%i)\n", |
503 | t->verdict); | 503 | t->verdict); |
504 | return 0; | 504 | return 0; |
505 | } | 505 | } |
506 | 506 | ||
507 | /* Return: backtrack through the last | 507 | /* Return: backtrack through the last |
508 | big jump. */ | 508 | big jump. */ |
509 | do { | 509 | do { |
510 | e->comefrom ^= (1<<NF_INET_NUMHOOKS); | 510 | e->comefrom ^= (1<<NF_INET_NUMHOOKS); |
511 | #ifdef DEBUG_IP_FIREWALL_USER | 511 | #ifdef DEBUG_IP_FIREWALL_USER |
512 | if (e->comefrom | 512 | if (e->comefrom |
513 | & (1 << NF_INET_NUMHOOKS)) { | 513 | & (1 << NF_INET_NUMHOOKS)) { |
514 | duprintf("Back unset " | 514 | duprintf("Back unset " |
515 | "on hook %u " | 515 | "on hook %u " |
516 | "rule %u\n", | 516 | "rule %u\n", |
517 | hook, pos); | 517 | hook, pos); |
518 | } | 518 | } |
519 | #endif | 519 | #endif |
520 | oldpos = pos; | 520 | oldpos = pos; |
521 | pos = e->counters.pcnt; | 521 | pos = e->counters.pcnt; |
522 | e->counters.pcnt = 0; | 522 | e->counters.pcnt = 0; |
523 | 523 | ||
524 | /* We're at the start. */ | 524 | /* We're at the start. */ |
525 | if (pos == oldpos) | 525 | if (pos == oldpos) |
526 | goto next; | 526 | goto next; |
527 | 527 | ||
528 | e = (struct ip6t_entry *) | 528 | e = (struct ip6t_entry *) |
529 | (entry0 + pos); | 529 | (entry0 + pos); |
530 | } while (oldpos == pos + e->next_offset); | 530 | } while (oldpos == pos + e->next_offset); |
531 | 531 | ||
532 | /* Move along one */ | 532 | /* Move along one */ |
533 | size = e->next_offset; | 533 | size = e->next_offset; |
534 | e = (struct ip6t_entry *) | 534 | e = (struct ip6t_entry *) |
535 | (entry0 + pos + size); | 535 | (entry0 + pos + size); |
536 | e->counters.pcnt = pos; | 536 | e->counters.pcnt = pos; |
537 | pos += size; | 537 | pos += size; |
538 | } else { | 538 | } else { |
539 | int newpos = t->verdict; | 539 | int newpos = t->verdict; |
540 | 540 | ||
541 | if (strcmp(t->target.u.user.name, | 541 | if (strcmp(t->target.u.user.name, |
542 | IP6T_STANDARD_TARGET) == 0 && | 542 | IP6T_STANDARD_TARGET) == 0 && |
543 | newpos >= 0) { | 543 | newpos >= 0) { |
544 | if (newpos > newinfo->size - | 544 | if (newpos > newinfo->size - |
545 | sizeof(struct ip6t_entry)) { | 545 | sizeof(struct ip6t_entry)) { |
546 | duprintf("mark_source_chains: " | 546 | duprintf("mark_source_chains: " |
547 | "bad verdict (%i)\n", | 547 | "bad verdict (%i)\n", |
548 | newpos); | 548 | newpos); |
549 | return 0; | 549 | return 0; |
550 | } | 550 | } |
551 | /* This a jump; chase it. */ | 551 | /* This a jump; chase it. */ |
552 | duprintf("Jump rule %u -> %u\n", | 552 | duprintf("Jump rule %u -> %u\n", |
553 | pos, newpos); | 553 | pos, newpos); |
554 | } else { | 554 | } else { |
555 | /* ... this is a fallthru */ | 555 | /* ... this is a fallthru */ |
556 | newpos = pos + e->next_offset; | 556 | newpos = pos + e->next_offset; |
557 | } | 557 | } |
558 | e = (struct ip6t_entry *) | 558 | e = (struct ip6t_entry *) |
559 | (entry0 + newpos); | 559 | (entry0 + newpos); |
560 | e->counters.pcnt = pos; | 560 | e->counters.pcnt = pos; |
561 | pos = newpos; | 561 | pos = newpos; |
562 | } | 562 | } |
563 | } | 563 | } |
564 | next: | 564 | next: |
565 | duprintf("Finished chain %u\n", hook); | 565 | duprintf("Finished chain %u\n", hook); |
566 | } | 566 | } |
567 | return 1; | 567 | return 1; |
568 | } | 568 | } |
569 | 569 | ||
570 | static void cleanup_match(struct ip6t_entry_match *m, struct net *net) | 570 | static void cleanup_match(struct ip6t_entry_match *m, struct net *net) |
571 | { | 571 | { |
572 | struct xt_mtdtor_param par; | 572 | struct xt_mtdtor_param par; |
573 | 573 | ||
574 | par.net = net; | 574 | par.net = net; |
575 | par.match = m->u.kernel.match; | 575 | par.match = m->u.kernel.match; |
576 | par.matchinfo = m->data; | 576 | par.matchinfo = m->data; |
577 | par.family = NFPROTO_IPV6; | 577 | par.family = NFPROTO_IPV6; |
578 | if (par.match->destroy != NULL) | 578 | if (par.match->destroy != NULL) |
579 | par.match->destroy(&par); | 579 | par.match->destroy(&par); |
580 | module_put(par.match->me); | 580 | module_put(par.match->me); |
581 | } | 581 | } |
582 | 582 | ||
583 | static int | 583 | static int |
584 | check_entry(const struct ip6t_entry *e, const char *name) | 584 | check_entry(const struct ip6t_entry *e, const char *name) |
585 | { | 585 | { |
586 | const struct ip6t_entry_target *t; | 586 | const struct ip6t_entry_target *t; |
587 | 587 | ||
588 | if (!ip6_checkentry(&e->ipv6)) { | 588 | if (!ip6_checkentry(&e->ipv6)) { |
589 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); | 589 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); |
590 | return -EINVAL; | 590 | return -EINVAL; |
591 | } | 591 | } |
592 | 592 | ||
593 | if (e->target_offset + sizeof(struct ip6t_entry_target) > | 593 | if (e->target_offset + sizeof(struct ip6t_entry_target) > |
594 | e->next_offset) | 594 | e->next_offset) |
595 | return -EINVAL; | 595 | return -EINVAL; |
596 | 596 | ||
597 | t = ip6t_get_target_c(e); | 597 | t = ip6t_get_target_c(e); |
598 | if (e->target_offset + t->u.target_size > e->next_offset) | 598 | if (e->target_offset + t->u.target_size > e->next_offset) |
599 | return -EINVAL; | 599 | return -EINVAL; |
600 | 600 | ||
601 | return 0; | 601 | return 0; |
602 | } | 602 | } |
603 | 603 | ||
604 | static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par) | 604 | static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par) |
605 | { | 605 | { |
606 | const struct ip6t_ip6 *ipv6 = par->entryinfo; | 606 | const struct ip6t_ip6 *ipv6 = par->entryinfo; |
607 | int ret; | 607 | int ret; |
608 | 608 | ||
609 | par->match = m->u.kernel.match; | 609 | par->match = m->u.kernel.match; |
610 | par->matchinfo = m->data; | 610 | par->matchinfo = m->data; |
611 | 611 | ||
612 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), | 612 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), |
613 | ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); | 613 | ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); |
614 | if (ret < 0) { | 614 | if (ret < 0) { |
615 | duprintf("ip_tables: check failed for `%s'.\n", | 615 | duprintf("ip_tables: check failed for `%s'.\n", |
616 | par.match->name); | 616 | par.match->name); |
617 | return ret; | 617 | return ret; |
618 | } | 618 | } |
619 | return 0; | 619 | return 0; |
620 | } | 620 | } |
621 | 621 | ||
622 | static int | 622 | static int |
623 | find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par) | 623 | find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par) |
624 | { | 624 | { |
625 | struct xt_match *match; | 625 | struct xt_match *match; |
626 | int ret; | 626 | int ret; |
627 | 627 | ||
628 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, | 628 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, |
629 | m->u.user.revision); | 629 | m->u.user.revision); |
630 | if (IS_ERR(match)) { | 630 | if (IS_ERR(match)) { |
631 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); | 631 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); |
632 | return PTR_ERR(match); | 632 | return PTR_ERR(match); |
633 | } | 633 | } |
634 | m->u.kernel.match = match; | 634 | m->u.kernel.match = match; |
635 | 635 | ||
636 | ret = check_match(m, par); | 636 | ret = check_match(m, par); |
637 | if (ret) | 637 | if (ret) |
638 | goto err; | 638 | goto err; |
639 | 639 | ||
640 | return 0; | 640 | return 0; |
641 | err: | 641 | err: |
642 | module_put(m->u.kernel.match->me); | 642 | module_put(m->u.kernel.match->me); |
643 | return ret; | 643 | return ret; |
644 | } | 644 | } |
645 | 645 | ||
646 | static int check_target(struct ip6t_entry *e, struct net *net, const char *name) | 646 | static int check_target(struct ip6t_entry *e, struct net *net, const char *name) |
647 | { | 647 | { |
648 | struct ip6t_entry_target *t = ip6t_get_target(e); | 648 | struct ip6t_entry_target *t = ip6t_get_target(e); |
649 | struct xt_tgchk_param par = { | 649 | struct xt_tgchk_param par = { |
650 | .net = net, | 650 | .net = net, |
651 | .table = name, | 651 | .table = name, |
652 | .entryinfo = e, | 652 | .entryinfo = e, |
653 | .target = t->u.kernel.target, | 653 | .target = t->u.kernel.target, |
654 | .targinfo = t->data, | 654 | .targinfo = t->data, |
655 | .hook_mask = e->comefrom, | 655 | .hook_mask = e->comefrom, |
656 | .family = NFPROTO_IPV6, | 656 | .family = NFPROTO_IPV6, |
657 | }; | 657 | }; |
658 | int ret; | 658 | int ret; |
659 | 659 | ||
660 | t = ip6t_get_target(e); | 660 | t = ip6t_get_target(e); |
661 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), | 661 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), |
662 | e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); | 662 | e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); |
663 | if (ret < 0) { | 663 | if (ret < 0) { |
664 | duprintf("ip_tables: check failed for `%s'.\n", | 664 | duprintf("ip_tables: check failed for `%s'.\n", |
665 | t->u.kernel.target->name); | 665 | t->u.kernel.target->name); |
666 | return ret; | 666 | return ret; |
667 | } | 667 | } |
668 | return 0; | 668 | return 0; |
669 | } | 669 | } |
670 | 670 | ||
671 | static int | 671 | static int |
672 | find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, | 672 | find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, |
673 | unsigned int size) | 673 | unsigned int size) |
674 | { | 674 | { |
675 | struct ip6t_entry_target *t; | 675 | struct ip6t_entry_target *t; |
676 | struct xt_target *target; | 676 | struct xt_target *target; |
677 | int ret; | 677 | int ret; |
678 | unsigned int j; | 678 | unsigned int j; |
679 | struct xt_mtchk_param mtpar; | 679 | struct xt_mtchk_param mtpar; |
680 | struct xt_entry_match *ematch; | 680 | struct xt_entry_match *ematch; |
681 | 681 | ||
682 | ret = check_entry(e, name); | 682 | ret = check_entry(e, name); |
683 | if (ret) | 683 | if (ret) |
684 | return ret; | 684 | return ret; |
685 | 685 | ||
686 | j = 0; | 686 | j = 0; |
687 | mtpar.net = net; | 687 | mtpar.net = net; |
688 | mtpar.table = name; | 688 | mtpar.table = name; |
689 | mtpar.entryinfo = &e->ipv6; | 689 | mtpar.entryinfo = &e->ipv6; |
690 | mtpar.hook_mask = e->comefrom; | 690 | mtpar.hook_mask = e->comefrom; |
691 | mtpar.family = NFPROTO_IPV6; | 691 | mtpar.family = NFPROTO_IPV6; |
692 | xt_ematch_foreach(ematch, e) { | 692 | xt_ematch_foreach(ematch, e) { |
693 | ret = find_check_match(ematch, &mtpar); | 693 | ret = find_check_match(ematch, &mtpar); |
694 | if (ret != 0) | 694 | if (ret != 0) |
695 | goto cleanup_matches; | 695 | goto cleanup_matches; |
696 | ++j; | 696 | ++j; |
697 | } | 697 | } |
698 | 698 | ||
699 | t = ip6t_get_target(e); | 699 | t = ip6t_get_target(e); |
700 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, | 700 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, |
701 | t->u.user.revision); | 701 | t->u.user.revision); |
702 | if (IS_ERR(target)) { | 702 | if (IS_ERR(target)) { |
703 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 703 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
704 | ret = PTR_ERR(target); | 704 | ret = PTR_ERR(target); |
705 | goto cleanup_matches; | 705 | goto cleanup_matches; |
706 | } | 706 | } |
707 | t->u.kernel.target = target; | 707 | t->u.kernel.target = target; |
708 | 708 | ||
709 | ret = check_target(e, net, name); | 709 | ret = check_target(e, net, name); |
710 | if (ret) | 710 | if (ret) |
711 | goto err; | 711 | goto err; |
712 | return 0; | 712 | return 0; |
713 | err: | 713 | err: |
714 | module_put(t->u.kernel.target->me); | 714 | module_put(t->u.kernel.target->me); |
715 | cleanup_matches: | 715 | cleanup_matches: |
716 | xt_ematch_foreach(ematch, e) { | 716 | xt_ematch_foreach(ematch, e) { |
717 | if (j-- == 0) | 717 | if (j-- == 0) |
718 | break; | 718 | break; |
719 | cleanup_match(ematch, net); | 719 | cleanup_match(ematch, net); |
720 | } | 720 | } |
721 | return ret; | 721 | return ret; |
722 | } | 722 | } |
723 | 723 | ||
724 | static bool check_underflow(const struct ip6t_entry *e) | 724 | static bool check_underflow(const struct ip6t_entry *e) |
725 | { | 725 | { |
726 | const struct ip6t_entry_target *t; | 726 | const struct ip6t_entry_target *t; |
727 | unsigned int verdict; | 727 | unsigned int verdict; |
728 | 728 | ||
729 | if (!unconditional(&e->ipv6)) | 729 | if (!unconditional(&e->ipv6)) |
730 | return false; | 730 | return false; |
731 | t = ip6t_get_target_c(e); | 731 | t = ip6t_get_target_c(e); |
732 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 732 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
733 | return false; | 733 | return false; |
734 | verdict = ((struct ip6t_standard_target *)t)->verdict; | 734 | verdict = ((struct ip6t_standard_target *)t)->verdict; |
735 | verdict = -verdict - 1; | 735 | verdict = -verdict - 1; |
736 | return verdict == NF_DROP || verdict == NF_ACCEPT; | 736 | return verdict == NF_DROP || verdict == NF_ACCEPT; |
737 | } | 737 | } |
738 | 738 | ||
739 | static int | 739 | static int |
740 | check_entry_size_and_hooks(struct ip6t_entry *e, | 740 | check_entry_size_and_hooks(struct ip6t_entry *e, |
741 | struct xt_table_info *newinfo, | 741 | struct xt_table_info *newinfo, |
742 | const unsigned char *base, | 742 | const unsigned char *base, |
743 | const unsigned char *limit, | 743 | const unsigned char *limit, |
744 | const unsigned int *hook_entries, | 744 | const unsigned int *hook_entries, |
745 | const unsigned int *underflows, | 745 | const unsigned int *underflows, |
746 | unsigned int valid_hooks) | 746 | unsigned int valid_hooks) |
747 | { | 747 | { |
748 | unsigned int h; | 748 | unsigned int h; |
749 | 749 | ||
750 | if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || | 750 | if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || |
751 | (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { | 751 | (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { |
752 | duprintf("Bad offset %p\n", e); | 752 | duprintf("Bad offset %p\n", e); |
753 | return -EINVAL; | 753 | return -EINVAL; |
754 | } | 754 | } |
755 | 755 | ||
756 | if (e->next_offset | 756 | if (e->next_offset |
757 | < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) { | 757 | < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) { |
758 | duprintf("checking: element %p size %u\n", | 758 | duprintf("checking: element %p size %u\n", |
759 | e, e->next_offset); | 759 | e, e->next_offset); |
760 | return -EINVAL; | 760 | return -EINVAL; |
761 | } | 761 | } |
762 | 762 | ||
763 | /* Check hooks & underflows */ | 763 | /* Check hooks & underflows */ |
764 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 764 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
765 | if (!(valid_hooks & (1 << h))) | 765 | if (!(valid_hooks & (1 << h))) |
766 | continue; | 766 | continue; |
767 | if ((unsigned char *)e - base == hook_entries[h]) | 767 | if ((unsigned char *)e - base == hook_entries[h]) |
768 | newinfo->hook_entry[h] = hook_entries[h]; | 768 | newinfo->hook_entry[h] = hook_entries[h]; |
769 | if ((unsigned char *)e - base == underflows[h]) { | 769 | if ((unsigned char *)e - base == underflows[h]) { |
770 | if (!check_underflow(e)) { | 770 | if (!check_underflow(e)) { |
771 | pr_err("Underflows must be unconditional and " | 771 | pr_err("Underflows must be unconditional and " |
772 | "use the STANDARD target with " | 772 | "use the STANDARD target with " |
773 | "ACCEPT/DROP\n"); | 773 | "ACCEPT/DROP\n"); |
774 | return -EINVAL; | 774 | return -EINVAL; |
775 | } | 775 | } |
776 | newinfo->underflow[h] = underflows[h]; | 776 | newinfo->underflow[h] = underflows[h]; |
777 | } | 777 | } |
778 | } | 778 | } |
779 | 779 | ||
780 | /* Clear counters and comefrom */ | 780 | /* Clear counters and comefrom */ |
781 | e->counters = ((struct xt_counters) { 0, 0 }); | 781 | e->counters = ((struct xt_counters) { 0, 0 }); |
782 | e->comefrom = 0; | 782 | e->comefrom = 0; |
783 | return 0; | 783 | return 0; |
784 | } | 784 | } |
785 | 785 | ||
786 | static void cleanup_entry(struct ip6t_entry *e, struct net *net) | 786 | static void cleanup_entry(struct ip6t_entry *e, struct net *net) |
787 | { | 787 | { |
788 | struct xt_tgdtor_param par; | 788 | struct xt_tgdtor_param par; |
789 | struct ip6t_entry_target *t; | 789 | struct ip6t_entry_target *t; |
790 | struct xt_entry_match *ematch; | 790 | struct xt_entry_match *ematch; |
791 | 791 | ||
792 | /* Cleanup all matches */ | 792 | /* Cleanup all matches */ |
793 | xt_ematch_foreach(ematch, e) | 793 | xt_ematch_foreach(ematch, e) |
794 | cleanup_match(ematch, net); | 794 | cleanup_match(ematch, net); |
795 | t = ip6t_get_target(e); | 795 | t = ip6t_get_target(e); |
796 | 796 | ||
797 | par.net = net; | 797 | par.net = net; |
798 | par.target = t->u.kernel.target; | 798 | par.target = t->u.kernel.target; |
799 | par.targinfo = t->data; | 799 | par.targinfo = t->data; |
800 | par.family = NFPROTO_IPV6; | 800 | par.family = NFPROTO_IPV6; |
801 | if (par.target->destroy != NULL) | 801 | if (par.target->destroy != NULL) |
802 | par.target->destroy(&par); | 802 | par.target->destroy(&par); |
803 | module_put(par.target->me); | 803 | module_put(par.target->me); |
804 | } | 804 | } |
805 | 805 | ||
806 | /* Checks and translates the user-supplied table segment (held in | 806 | /* Checks and translates the user-supplied table segment (held in |
807 | newinfo) */ | 807 | newinfo) */ |
808 | static int | 808 | static int |
809 | translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | 809 | translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, |
810 | const struct ip6t_replace *repl) | 810 | const struct ip6t_replace *repl) |
811 | { | 811 | { |
812 | struct ip6t_entry *iter; | 812 | struct ip6t_entry *iter; |
813 | unsigned int i; | 813 | unsigned int i; |
814 | int ret = 0; | 814 | int ret = 0; |
815 | 815 | ||
816 | newinfo->size = repl->size; | 816 | newinfo->size = repl->size; |
817 | newinfo->number = repl->num_entries; | 817 | newinfo->number = repl->num_entries; |
818 | 818 | ||
819 | /* Init all hooks to impossible value. */ | 819 | /* Init all hooks to impossible value. */ |
820 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 820 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
821 | newinfo->hook_entry[i] = 0xFFFFFFFF; | 821 | newinfo->hook_entry[i] = 0xFFFFFFFF; |
822 | newinfo->underflow[i] = 0xFFFFFFFF; | 822 | newinfo->underflow[i] = 0xFFFFFFFF; |
823 | } | 823 | } |
824 | 824 | ||
825 | duprintf("translate_table: size %u\n", newinfo->size); | 825 | duprintf("translate_table: size %u\n", newinfo->size); |
826 | i = 0; | 826 | i = 0; |
827 | /* Walk through entries, checking offsets. */ | 827 | /* Walk through entries, checking offsets. */ |
828 | xt_entry_foreach(iter, entry0, newinfo->size) { | 828 | xt_entry_foreach(iter, entry0, newinfo->size) { |
829 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, | 829 | ret = check_entry_size_and_hooks(iter, newinfo, entry0, |
830 | entry0 + repl->size, | 830 | entry0 + repl->size, |
831 | repl->hook_entry, | 831 | repl->hook_entry, |
832 | repl->underflow, | 832 | repl->underflow, |
833 | repl->valid_hooks); | 833 | repl->valid_hooks); |
834 | if (ret != 0) | 834 | if (ret != 0) |
835 | return ret; | 835 | return ret; |
836 | ++i; | 836 | ++i; |
837 | if (strcmp(ip6t_get_target(iter)->u.user.name, | 837 | if (strcmp(ip6t_get_target(iter)->u.user.name, |
838 | XT_ERROR_TARGET) == 0) | 838 | XT_ERROR_TARGET) == 0) |
839 | ++newinfo->stacksize; | 839 | ++newinfo->stacksize; |
840 | } | 840 | } |
841 | 841 | ||
842 | if (i != repl->num_entries) { | 842 | if (i != repl->num_entries) { |
843 | duprintf("translate_table: %u not %u entries\n", | 843 | duprintf("translate_table: %u not %u entries\n", |
844 | i, repl->num_entries); | 844 | i, repl->num_entries); |
845 | return -EINVAL; | 845 | return -EINVAL; |
846 | } | 846 | } |
847 | 847 | ||
848 | /* Check hooks all assigned */ | 848 | /* Check hooks all assigned */ |
849 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 849 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
850 | /* Only hooks which are valid */ | 850 | /* Only hooks which are valid */ |
851 | if (!(repl->valid_hooks & (1 << i))) | 851 | if (!(repl->valid_hooks & (1 << i))) |
852 | continue; | 852 | continue; |
853 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 853 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
854 | duprintf("Invalid hook entry %u %u\n", | 854 | duprintf("Invalid hook entry %u %u\n", |
855 | i, repl->hook_entry[i]); | 855 | i, repl->hook_entry[i]); |
856 | return -EINVAL; | 856 | return -EINVAL; |
857 | } | 857 | } |
858 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 858 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
859 | duprintf("Invalid underflow %u %u\n", | 859 | duprintf("Invalid underflow %u %u\n", |
860 | i, repl->underflow[i]); | 860 | i, repl->underflow[i]); |
861 | return -EINVAL; | 861 | return -EINVAL; |
862 | } | 862 | } |
863 | } | 863 | } |
864 | 864 | ||
865 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) | 865 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) |
866 | return -ELOOP; | 866 | return -ELOOP; |
867 | 867 | ||
868 | /* Finally, each sanity check must pass */ | 868 | /* Finally, each sanity check must pass */ |
869 | i = 0; | 869 | i = 0; |
870 | xt_entry_foreach(iter, entry0, newinfo->size) { | 870 | xt_entry_foreach(iter, entry0, newinfo->size) { |
871 | ret = find_check_entry(iter, net, repl->name, repl->size); | 871 | ret = find_check_entry(iter, net, repl->name, repl->size); |
872 | if (ret != 0) | 872 | if (ret != 0) |
873 | break; | 873 | break; |
874 | ++i; | 874 | ++i; |
875 | } | 875 | } |
876 | 876 | ||
877 | if (ret != 0) { | 877 | if (ret != 0) { |
878 | xt_entry_foreach(iter, entry0, newinfo->size) { | 878 | xt_entry_foreach(iter, entry0, newinfo->size) { |
879 | if (i-- == 0) | 879 | if (i-- == 0) |
880 | break; | 880 | break; |
881 | cleanup_entry(iter, net); | 881 | cleanup_entry(iter, net); |
882 | } | 882 | } |
883 | return ret; | 883 | return ret; |
884 | } | 884 | } |
885 | 885 | ||
886 | /* And one copy for every other CPU */ | 886 | /* And one copy for every other CPU */ |
887 | for_each_possible_cpu(i) { | 887 | for_each_possible_cpu(i) { |
888 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) | 888 | if (newinfo->entries[i] && newinfo->entries[i] != entry0) |
889 | memcpy(newinfo->entries[i], entry0, newinfo->size); | 889 | memcpy(newinfo->entries[i], entry0, newinfo->size); |
890 | } | 890 | } |
891 | 891 | ||
892 | return ret; | 892 | return ret; |
893 | } | 893 | } |
894 | 894 | ||
895 | static void | 895 | static void |
896 | get_counters(const struct xt_table_info *t, | 896 | get_counters(const struct xt_table_info *t, |
897 | struct xt_counters counters[]) | 897 | struct xt_counters counters[]) |
898 | { | 898 | { |
899 | struct ip6t_entry *iter; | 899 | struct ip6t_entry *iter; |
900 | unsigned int cpu; | 900 | unsigned int cpu; |
901 | unsigned int i; | 901 | unsigned int i; |
902 | unsigned int curcpu; | 902 | unsigned int curcpu; |
903 | 903 | ||
904 | /* Instead of clearing (by a previous call to memset()) | 904 | /* Instead of clearing (by a previous call to memset()) |
905 | * the counters and using adds, we set the counters | 905 | * the counters and using adds, we set the counters |
906 | * with data used by 'current' CPU | 906 | * with data used by 'current' CPU |
907 | * | 907 | * |
908 | * Bottom half has to be disabled to prevent deadlock | 908 | * Bottom half has to be disabled to prevent deadlock |
909 | * if new softirq were to run and call ipt_do_table | 909 | * if new softirq were to run and call ipt_do_table |
910 | */ | 910 | */ |
911 | local_bh_disable(); | 911 | local_bh_disable(); |
912 | curcpu = smp_processor_id(); | 912 | curcpu = smp_processor_id(); |
913 | 913 | ||
914 | i = 0; | 914 | i = 0; |
915 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | 915 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { |
916 | SET_COUNTER(counters[i], iter->counters.bcnt, | 916 | SET_COUNTER(counters[i], iter->counters.bcnt, |
917 | iter->counters.pcnt); | 917 | iter->counters.pcnt); |
918 | ++i; | 918 | ++i; |
919 | } | 919 | } |
920 | 920 | ||
921 | for_each_possible_cpu(cpu) { | 921 | for_each_possible_cpu(cpu) { |
922 | if (cpu == curcpu) | 922 | if (cpu == curcpu) |
923 | continue; | 923 | continue; |
924 | i = 0; | 924 | i = 0; |
925 | xt_info_wrlock(cpu); | 925 | xt_info_wrlock(cpu); |
926 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 926 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
927 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 927 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
928 | iter->counters.pcnt); | 928 | iter->counters.pcnt); |
929 | ++i; | 929 | ++i; |
930 | } | 930 | } |
931 | xt_info_wrunlock(cpu); | 931 | xt_info_wrunlock(cpu); |
932 | } | 932 | } |
933 | local_bh_enable(); | 933 | local_bh_enable(); |
934 | } | 934 | } |
935 | 935 | ||
936 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 936 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
937 | { | 937 | { |
938 | unsigned int countersize; | 938 | unsigned int countersize; |
939 | struct xt_counters *counters; | 939 | struct xt_counters *counters; |
940 | const struct xt_table_info *private = table->private; | 940 | const struct xt_table_info *private = table->private; |
941 | 941 | ||
942 | /* We need atomic snapshot of counters: rest doesn't change | 942 | /* We need atomic snapshot of counters: rest doesn't change |
943 | (other than comefrom, which userspace doesn't care | 943 | (other than comefrom, which userspace doesn't care |
944 | about). */ | 944 | about). */ |
945 | countersize = sizeof(struct xt_counters) * private->number; | 945 | countersize = sizeof(struct xt_counters) * private->number; |
946 | counters = vmalloc_node(countersize, numa_node_id()); | 946 | counters = vmalloc_node(countersize, numa_node_id()); |
947 | 947 | ||
948 | if (counters == NULL) | 948 | if (counters == NULL) |
949 | return ERR_PTR(-ENOMEM); | 949 | return ERR_PTR(-ENOMEM); |
950 | 950 | ||
951 | get_counters(private, counters); | 951 | get_counters(private, counters); |
952 | 952 | ||
953 | return counters; | 953 | return counters; |
954 | } | 954 | } |
955 | 955 | ||
956 | static int | 956 | static int |
957 | copy_entries_to_user(unsigned int total_size, | 957 | copy_entries_to_user(unsigned int total_size, |
958 | const struct xt_table *table, | 958 | const struct xt_table *table, |
959 | void __user *userptr) | 959 | void __user *userptr) |
960 | { | 960 | { |
961 | unsigned int off, num; | 961 | unsigned int off, num; |
962 | const struct ip6t_entry *e; | 962 | const struct ip6t_entry *e; |
963 | struct xt_counters *counters; | 963 | struct xt_counters *counters; |
964 | const struct xt_table_info *private = table->private; | 964 | const struct xt_table_info *private = table->private; |
965 | int ret = 0; | 965 | int ret = 0; |
966 | const void *loc_cpu_entry; | 966 | const void *loc_cpu_entry; |
967 | 967 | ||
968 | counters = alloc_counters(table); | 968 | counters = alloc_counters(table); |
969 | if (IS_ERR(counters)) | 969 | if (IS_ERR(counters)) |
970 | return PTR_ERR(counters); | 970 | return PTR_ERR(counters); |
971 | 971 | ||
972 | /* choose the copy that is on our node/cpu, ... | 972 | /* choose the copy that is on our node/cpu, ... |
973 | * This choice is lazy (because current thread is | 973 | * This choice is lazy (because current thread is |
974 | * allowed to migrate to another cpu) | 974 | * allowed to migrate to another cpu) |
975 | */ | 975 | */ |
976 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 976 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
977 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { | 977 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { |
978 | ret = -EFAULT; | 978 | ret = -EFAULT; |
979 | goto free_counters; | 979 | goto free_counters; |
980 | } | 980 | } |
981 | 981 | ||
982 | /* FIXME: use iterator macros --RR */ | 982 | /* FIXME: use iterator macros --RR */ |
983 | /* ... then go back and fix counters and names */ | 983 | /* ... then go back and fix counters and names */ |
984 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ | 984 | for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ |
985 | unsigned int i; | 985 | unsigned int i; |
986 | const struct ip6t_entry_match *m; | 986 | const struct ip6t_entry_match *m; |
987 | const struct ip6t_entry_target *t; | 987 | const struct ip6t_entry_target *t; |
988 | 988 | ||
989 | e = (struct ip6t_entry *)(loc_cpu_entry + off); | 989 | e = (struct ip6t_entry *)(loc_cpu_entry + off); |
990 | if (copy_to_user(userptr + off | 990 | if (copy_to_user(userptr + off |
991 | + offsetof(struct ip6t_entry, counters), | 991 | + offsetof(struct ip6t_entry, counters), |
992 | &counters[num], | 992 | &counters[num], |
993 | sizeof(counters[num])) != 0) { | 993 | sizeof(counters[num])) != 0) { |
994 | ret = -EFAULT; | 994 | ret = -EFAULT; |
995 | goto free_counters; | 995 | goto free_counters; |
996 | } | 996 | } |
997 | 997 | ||
998 | for (i = sizeof(struct ip6t_entry); | 998 | for (i = sizeof(struct ip6t_entry); |
999 | i < e->target_offset; | 999 | i < e->target_offset; |
1000 | i += m->u.match_size) { | 1000 | i += m->u.match_size) { |
1001 | m = (void *)e + i; | 1001 | m = (void *)e + i; |
1002 | 1002 | ||
1003 | if (copy_to_user(userptr + off + i | 1003 | if (copy_to_user(userptr + off + i |
1004 | + offsetof(struct ip6t_entry_match, | 1004 | + offsetof(struct ip6t_entry_match, |
1005 | u.user.name), | 1005 | u.user.name), |
1006 | m->u.kernel.match->name, | 1006 | m->u.kernel.match->name, |
1007 | strlen(m->u.kernel.match->name)+1) | 1007 | strlen(m->u.kernel.match->name)+1) |
1008 | != 0) { | 1008 | != 0) { |
1009 | ret = -EFAULT; | 1009 | ret = -EFAULT; |
1010 | goto free_counters; | 1010 | goto free_counters; |
1011 | } | 1011 | } |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | t = ip6t_get_target_c(e); | 1014 | t = ip6t_get_target_c(e); |
1015 | if (copy_to_user(userptr + off + e->target_offset | 1015 | if (copy_to_user(userptr + off + e->target_offset |
1016 | + offsetof(struct ip6t_entry_target, | 1016 | + offsetof(struct ip6t_entry_target, |
1017 | u.user.name), | 1017 | u.user.name), |
1018 | t->u.kernel.target->name, | 1018 | t->u.kernel.target->name, |
1019 | strlen(t->u.kernel.target->name)+1) != 0) { | 1019 | strlen(t->u.kernel.target->name)+1) != 0) { |
1020 | ret = -EFAULT; | 1020 | ret = -EFAULT; |
1021 | goto free_counters; | 1021 | goto free_counters; |
1022 | } | 1022 | } |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | free_counters: | 1025 | free_counters: |
1026 | vfree(counters); | 1026 | vfree(counters); |
1027 | return ret; | 1027 | return ret; |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | #ifdef CONFIG_COMPAT | 1030 | #ifdef CONFIG_COMPAT |
1031 | static void compat_standard_from_user(void *dst, const void *src) | 1031 | static void compat_standard_from_user(void *dst, const void *src) |
1032 | { | 1032 | { |
1033 | int v = *(compat_int_t *)src; | 1033 | int v = *(compat_int_t *)src; |
1034 | 1034 | ||
1035 | if (v > 0) | 1035 | if (v > 0) |
1036 | v += xt_compat_calc_jump(AF_INET6, v); | 1036 | v += xt_compat_calc_jump(AF_INET6, v); |
1037 | memcpy(dst, &v, sizeof(v)); | 1037 | memcpy(dst, &v, sizeof(v)); |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | static int compat_standard_to_user(void __user *dst, const void *src) | 1040 | static int compat_standard_to_user(void __user *dst, const void *src) |
1041 | { | 1041 | { |
1042 | compat_int_t cv = *(int *)src; | 1042 | compat_int_t cv = *(int *)src; |
1043 | 1043 | ||
1044 | if (cv > 0) | 1044 | if (cv > 0) |
1045 | cv -= xt_compat_calc_jump(AF_INET6, cv); | 1045 | cv -= xt_compat_calc_jump(AF_INET6, cv); |
1046 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; | 1046 | return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; |
1047 | } | 1047 | } |
1048 | 1048 | ||
1049 | static int compat_calc_entry(const struct ip6t_entry *e, | 1049 | static int compat_calc_entry(const struct ip6t_entry *e, |
1050 | const struct xt_table_info *info, | 1050 | const struct xt_table_info *info, |
1051 | const void *base, struct xt_table_info *newinfo) | 1051 | const void *base, struct xt_table_info *newinfo) |
1052 | { | 1052 | { |
1053 | const struct xt_entry_match *ematch; | 1053 | const struct xt_entry_match *ematch; |
1054 | const struct ip6t_entry_target *t; | 1054 | const struct ip6t_entry_target *t; |
1055 | unsigned int entry_offset; | 1055 | unsigned int entry_offset; |
1056 | int off, i, ret; | 1056 | int off, i, ret; |
1057 | 1057 | ||
1058 | off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); | 1058 | off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); |
1059 | entry_offset = (void *)e - base; | 1059 | entry_offset = (void *)e - base; |
1060 | xt_ematch_foreach(ematch, e) | 1060 | xt_ematch_foreach(ematch, e) |
1061 | off += xt_compat_match_offset(ematch->u.kernel.match); | 1061 | off += xt_compat_match_offset(ematch->u.kernel.match); |
1062 | t = ip6t_get_target_c(e); | 1062 | t = ip6t_get_target_c(e); |
1063 | off += xt_compat_target_offset(t->u.kernel.target); | 1063 | off += xt_compat_target_offset(t->u.kernel.target); |
1064 | newinfo->size -= off; | 1064 | newinfo->size -= off; |
1065 | ret = xt_compat_add_offset(AF_INET6, entry_offset, off); | 1065 | ret = xt_compat_add_offset(AF_INET6, entry_offset, off); |
1066 | if (ret) | 1066 | if (ret) |
1067 | return ret; | 1067 | return ret; |
1068 | 1068 | ||
1069 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1069 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1070 | if (info->hook_entry[i] && | 1070 | if (info->hook_entry[i] && |
1071 | (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) | 1071 | (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) |
1072 | newinfo->hook_entry[i] -= off; | 1072 | newinfo->hook_entry[i] -= off; |
1073 | if (info->underflow[i] && | 1073 | if (info->underflow[i] && |
1074 | (e < (struct ip6t_entry *)(base + info->underflow[i]))) | 1074 | (e < (struct ip6t_entry *)(base + info->underflow[i]))) |
1075 | newinfo->underflow[i] -= off; | 1075 | newinfo->underflow[i] -= off; |
1076 | } | 1076 | } |
1077 | return 0; | 1077 | return 0; |
1078 | } | 1078 | } |
1079 | 1079 | ||
1080 | static int compat_table_info(const struct xt_table_info *info, | 1080 | static int compat_table_info(const struct xt_table_info *info, |
1081 | struct xt_table_info *newinfo) | 1081 | struct xt_table_info *newinfo) |
1082 | { | 1082 | { |
1083 | struct ip6t_entry *iter; | 1083 | struct ip6t_entry *iter; |
1084 | void *loc_cpu_entry; | 1084 | void *loc_cpu_entry; |
1085 | int ret; | 1085 | int ret; |
1086 | 1086 | ||
1087 | if (!newinfo || !info) | 1087 | if (!newinfo || !info) |
1088 | return -EINVAL; | 1088 | return -EINVAL; |
1089 | 1089 | ||
1090 | /* we dont care about newinfo->entries[] */ | 1090 | /* we dont care about newinfo->entries[] */ |
1091 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | 1091 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); |
1092 | newinfo->initial_entries = 0; | 1092 | newinfo->initial_entries = 0; |
1093 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; | 1093 | loc_cpu_entry = info->entries[raw_smp_processor_id()]; |
1094 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { | 1094 | xt_entry_foreach(iter, loc_cpu_entry, info->size) { |
1095 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); | 1095 | ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); |
1096 | if (ret != 0) | 1096 | if (ret != 0) |
1097 | return ret; | 1097 | return ret; |
1098 | } | 1098 | } |
1099 | return 0; | 1099 | return 0; |
1100 | } | 1100 | } |
1101 | #endif | 1101 | #endif |
1102 | 1102 | ||
1103 | static int get_info(struct net *net, void __user *user, | 1103 | static int get_info(struct net *net, void __user *user, |
1104 | const int *len, int compat) | 1104 | const int *len, int compat) |
1105 | { | 1105 | { |
1106 | char name[IP6T_TABLE_MAXNAMELEN]; | 1106 | char name[IP6T_TABLE_MAXNAMELEN]; |
1107 | struct xt_table *t; | 1107 | struct xt_table *t; |
1108 | int ret; | 1108 | int ret; |
1109 | 1109 | ||
1110 | if (*len != sizeof(struct ip6t_getinfo)) { | 1110 | if (*len != sizeof(struct ip6t_getinfo)) { |
1111 | duprintf("length %u != %zu\n", *len, | 1111 | duprintf("length %u != %zu\n", *len, |
1112 | sizeof(struct ip6t_getinfo)); | 1112 | sizeof(struct ip6t_getinfo)); |
1113 | return -EINVAL; | 1113 | return -EINVAL; |
1114 | } | 1114 | } |
1115 | 1115 | ||
1116 | if (copy_from_user(name, user, sizeof(name)) != 0) | 1116 | if (copy_from_user(name, user, sizeof(name)) != 0) |
1117 | return -EFAULT; | 1117 | return -EFAULT; |
1118 | 1118 | ||
1119 | name[IP6T_TABLE_MAXNAMELEN-1] = '\0'; | 1119 | name[IP6T_TABLE_MAXNAMELEN-1] = '\0'; |
1120 | #ifdef CONFIG_COMPAT | 1120 | #ifdef CONFIG_COMPAT |
1121 | if (compat) | 1121 | if (compat) |
1122 | xt_compat_lock(AF_INET6); | 1122 | xt_compat_lock(AF_INET6); |
1123 | #endif | 1123 | #endif |
1124 | t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), | 1124 | t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), |
1125 | "ip6table_%s", name); | 1125 | "ip6table_%s", name); |
1126 | if (t && !IS_ERR(t)) { | 1126 | if (t && !IS_ERR(t)) { |
1127 | struct ip6t_getinfo info; | 1127 | struct ip6t_getinfo info; |
1128 | const struct xt_table_info *private = t->private; | 1128 | const struct xt_table_info *private = t->private; |
1129 | #ifdef CONFIG_COMPAT | 1129 | #ifdef CONFIG_COMPAT |
1130 | struct xt_table_info tmp; | 1130 | struct xt_table_info tmp; |
1131 | 1131 | ||
1132 | if (compat) { | 1132 | if (compat) { |
1133 | ret = compat_table_info(private, &tmp); | 1133 | ret = compat_table_info(private, &tmp); |
1134 | xt_compat_flush_offsets(AF_INET6); | 1134 | xt_compat_flush_offsets(AF_INET6); |
1135 | private = &tmp; | 1135 | private = &tmp; |
1136 | } | 1136 | } |
1137 | #endif | 1137 | #endif |
1138 | info.valid_hooks = t->valid_hooks; | 1138 | info.valid_hooks = t->valid_hooks; |
1139 | memcpy(info.hook_entry, private->hook_entry, | 1139 | memcpy(info.hook_entry, private->hook_entry, |
1140 | sizeof(info.hook_entry)); | 1140 | sizeof(info.hook_entry)); |
1141 | memcpy(info.underflow, private->underflow, | 1141 | memcpy(info.underflow, private->underflow, |
1142 | sizeof(info.underflow)); | 1142 | sizeof(info.underflow)); |
1143 | info.num_entries = private->number; | 1143 | info.num_entries = private->number; |
1144 | info.size = private->size; | 1144 | info.size = private->size; |
1145 | strcpy(info.name, name); | 1145 | strcpy(info.name, name); |
1146 | 1146 | ||
1147 | if (copy_to_user(user, &info, *len) != 0) | 1147 | if (copy_to_user(user, &info, *len) != 0) |
1148 | ret = -EFAULT; | 1148 | ret = -EFAULT; |
1149 | else | 1149 | else |
1150 | ret = 0; | 1150 | ret = 0; |
1151 | 1151 | ||
1152 | xt_table_unlock(t); | 1152 | xt_table_unlock(t); |
1153 | module_put(t->me); | 1153 | module_put(t->me); |
1154 | } else | 1154 | } else |
1155 | ret = t ? PTR_ERR(t) : -ENOENT; | 1155 | ret = t ? PTR_ERR(t) : -ENOENT; |
1156 | #ifdef CONFIG_COMPAT | 1156 | #ifdef CONFIG_COMPAT |
1157 | if (compat) | 1157 | if (compat) |
1158 | xt_compat_unlock(AF_INET6); | 1158 | xt_compat_unlock(AF_INET6); |
1159 | #endif | 1159 | #endif |
1160 | return ret; | 1160 | return ret; |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | static int | 1163 | static int |
1164 | get_entries(struct net *net, struct ip6t_get_entries __user *uptr, | 1164 | get_entries(struct net *net, struct ip6t_get_entries __user *uptr, |
1165 | const int *len) | 1165 | const int *len) |
1166 | { | 1166 | { |
1167 | int ret; | 1167 | int ret; |
1168 | struct ip6t_get_entries get; | 1168 | struct ip6t_get_entries get; |
1169 | struct xt_table *t; | 1169 | struct xt_table *t; |
1170 | 1170 | ||
1171 | if (*len < sizeof(get)) { | 1171 | if (*len < sizeof(get)) { |
1172 | duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); | 1172 | duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); |
1173 | return -EINVAL; | 1173 | return -EINVAL; |
1174 | } | 1174 | } |
1175 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1175 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1176 | return -EFAULT; | 1176 | return -EFAULT; |
1177 | if (*len != sizeof(struct ip6t_get_entries) + get.size) { | 1177 | if (*len != sizeof(struct ip6t_get_entries) + get.size) { |
1178 | duprintf("get_entries: %u != %zu\n", | 1178 | duprintf("get_entries: %u != %zu\n", |
1179 | *len, sizeof(get) + get.size); | 1179 | *len, sizeof(get) + get.size); |
1180 | return -EINVAL; | 1180 | return -EINVAL; |
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | t = xt_find_table_lock(net, AF_INET6, get.name); | 1183 | t = xt_find_table_lock(net, AF_INET6, get.name); |
1184 | if (t && !IS_ERR(t)) { | 1184 | if (t && !IS_ERR(t)) { |
1185 | struct xt_table_info *private = t->private; | 1185 | struct xt_table_info *private = t->private; |
1186 | duprintf("t->private->number = %u\n", private->number); | 1186 | duprintf("t->private->number = %u\n", private->number); |
1187 | if (get.size == private->size) | 1187 | if (get.size == private->size) |
1188 | ret = copy_entries_to_user(private->size, | 1188 | ret = copy_entries_to_user(private->size, |
1189 | t, uptr->entrytable); | 1189 | t, uptr->entrytable); |
1190 | else { | 1190 | else { |
1191 | duprintf("get_entries: I've got %u not %u!\n", | 1191 | duprintf("get_entries: I've got %u not %u!\n", |
1192 | private->size, get.size); | 1192 | private->size, get.size); |
1193 | ret = -EAGAIN; | 1193 | ret = -EAGAIN; |
1194 | } | 1194 | } |
1195 | module_put(t->me); | 1195 | module_put(t->me); |
1196 | xt_table_unlock(t); | 1196 | xt_table_unlock(t); |
1197 | } else | 1197 | } else |
1198 | ret = t ? PTR_ERR(t) : -ENOENT; | 1198 | ret = t ? PTR_ERR(t) : -ENOENT; |
1199 | 1199 | ||
1200 | return ret; | 1200 | return ret; |
1201 | } | 1201 | } |
1202 | 1202 | ||
1203 | static int | 1203 | static int |
1204 | __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | 1204 | __do_replace(struct net *net, const char *name, unsigned int valid_hooks, |
1205 | struct xt_table_info *newinfo, unsigned int num_counters, | 1205 | struct xt_table_info *newinfo, unsigned int num_counters, |
1206 | void __user *counters_ptr) | 1206 | void __user *counters_ptr) |
1207 | { | 1207 | { |
1208 | int ret; | 1208 | int ret; |
1209 | struct xt_table *t; | 1209 | struct xt_table *t; |
1210 | struct xt_table_info *oldinfo; | 1210 | struct xt_table_info *oldinfo; |
1211 | struct xt_counters *counters; | 1211 | struct xt_counters *counters; |
1212 | const void *loc_cpu_old_entry; | 1212 | const void *loc_cpu_old_entry; |
1213 | struct ip6t_entry *iter; | 1213 | struct ip6t_entry *iter; |
1214 | 1214 | ||
1215 | ret = 0; | 1215 | ret = 0; |
1216 | counters = vmalloc_node(num_counters * sizeof(struct xt_counters), | 1216 | counters = vmalloc_node(num_counters * sizeof(struct xt_counters), |
1217 | numa_node_id()); | 1217 | numa_node_id()); |
1218 | if (!counters) { | 1218 | if (!counters) { |
1219 | ret = -ENOMEM; | 1219 | ret = -ENOMEM; |
1220 | goto out; | 1220 | goto out; |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), | 1223 | t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), |
1224 | "ip6table_%s", name); | 1224 | "ip6table_%s", name); |
1225 | if (!t || IS_ERR(t)) { | 1225 | if (!t || IS_ERR(t)) { |
1226 | ret = t ? PTR_ERR(t) : -ENOENT; | 1226 | ret = t ? PTR_ERR(t) : -ENOENT; |
1227 | goto free_newinfo_counters_untrans; | 1227 | goto free_newinfo_counters_untrans; |
1228 | } | 1228 | } |
1229 | 1229 | ||
1230 | /* You lied! */ | 1230 | /* You lied! */ |
1231 | if (valid_hooks != t->valid_hooks) { | 1231 | if (valid_hooks != t->valid_hooks) { |
1232 | duprintf("Valid hook crap: %08X vs %08X\n", | 1232 | duprintf("Valid hook crap: %08X vs %08X\n", |
1233 | valid_hooks, t->valid_hooks); | 1233 | valid_hooks, t->valid_hooks); |
1234 | ret = -EINVAL; | 1234 | ret = -EINVAL; |
1235 | goto put_module; | 1235 | goto put_module; |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); | 1238 | oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); |
1239 | if (!oldinfo) | 1239 | if (!oldinfo) |
1240 | goto put_module; | 1240 | goto put_module; |
1241 | 1241 | ||
1242 | /* Update module usage count based on number of rules */ | 1242 | /* Update module usage count based on number of rules */ |
1243 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", | 1243 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", |
1244 | oldinfo->number, oldinfo->initial_entries, newinfo->number); | 1244 | oldinfo->number, oldinfo->initial_entries, newinfo->number); |
1245 | if ((oldinfo->number > oldinfo->initial_entries) || | 1245 | if ((oldinfo->number > oldinfo->initial_entries) || |
1246 | (newinfo->number <= oldinfo->initial_entries)) | 1246 | (newinfo->number <= oldinfo->initial_entries)) |
1247 | module_put(t->me); | 1247 | module_put(t->me); |
1248 | if ((oldinfo->number > oldinfo->initial_entries) && | 1248 | if ((oldinfo->number > oldinfo->initial_entries) && |
1249 | (newinfo->number <= oldinfo->initial_entries)) | 1249 | (newinfo->number <= oldinfo->initial_entries)) |
1250 | module_put(t->me); | 1250 | module_put(t->me); |
1251 | 1251 | ||
1252 | /* Get the old counters, and synchronize with replace */ | 1252 | /* Get the old counters, and synchronize with replace */ |
1253 | get_counters(oldinfo, counters); | 1253 | get_counters(oldinfo, counters); |
1254 | 1254 | ||
1255 | /* Decrease module usage counts and free resource */ | 1255 | /* Decrease module usage counts and free resource */ |
1256 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1256 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1257 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) | 1257 | xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) |
1258 | cleanup_entry(iter, net); | 1258 | cleanup_entry(iter, net); |
1259 | 1259 | ||
1260 | xt_free_table_info(oldinfo); | 1260 | xt_free_table_info(oldinfo); |
1261 | if (copy_to_user(counters_ptr, counters, | 1261 | if (copy_to_user(counters_ptr, counters, |
1262 | sizeof(struct xt_counters) * num_counters) != 0) | 1262 | sizeof(struct xt_counters) * num_counters) != 0) |
1263 | ret = -EFAULT; | 1263 | ret = -EFAULT; |
1264 | vfree(counters); | 1264 | vfree(counters); |
1265 | xt_table_unlock(t); | 1265 | xt_table_unlock(t); |
1266 | return ret; | 1266 | return ret; |
1267 | 1267 | ||
1268 | put_module: | 1268 | put_module: |
1269 | module_put(t->me); | 1269 | module_put(t->me); |
1270 | xt_table_unlock(t); | 1270 | xt_table_unlock(t); |
1271 | free_newinfo_counters_untrans: | 1271 | free_newinfo_counters_untrans: |
1272 | vfree(counters); | 1272 | vfree(counters); |
1273 | out: | 1273 | out: |
1274 | return ret; | 1274 | return ret; |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | static int | 1277 | static int |
1278 | do_replace(struct net *net, const void __user *user, unsigned int len) | 1278 | do_replace(struct net *net, const void __user *user, unsigned int len) |
1279 | { | 1279 | { |
1280 | int ret; | 1280 | int ret; |
1281 | struct ip6t_replace tmp; | 1281 | struct ip6t_replace tmp; |
1282 | struct xt_table_info *newinfo; | 1282 | struct xt_table_info *newinfo; |
1283 | void *loc_cpu_entry; | 1283 | void *loc_cpu_entry; |
1284 | struct ip6t_entry *iter; | 1284 | struct ip6t_entry *iter; |
1285 | 1285 | ||
1286 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1286 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1287 | return -EFAULT; | 1287 | return -EFAULT; |
1288 | 1288 | ||
1289 | /* overflow check */ | 1289 | /* overflow check */ |
1290 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1290 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1291 | return -ENOMEM; | 1291 | return -ENOMEM; |
1292 | 1292 | ||
1293 | newinfo = xt_alloc_table_info(tmp.size); | 1293 | newinfo = xt_alloc_table_info(tmp.size); |
1294 | if (!newinfo) | 1294 | if (!newinfo) |
1295 | return -ENOMEM; | 1295 | return -ENOMEM; |
1296 | 1296 | ||
1297 | /* choose the copy that is on our node/cpu */ | 1297 | /* choose the copy that is on our node/cpu */ |
1298 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1298 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1299 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1299 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1300 | tmp.size) != 0) { | 1300 | tmp.size) != 0) { |
1301 | ret = -EFAULT; | 1301 | ret = -EFAULT; |
1302 | goto free_newinfo; | 1302 | goto free_newinfo; |
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); | 1305 | ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); |
1306 | if (ret != 0) | 1306 | if (ret != 0) |
1307 | goto free_newinfo; | 1307 | goto free_newinfo; |
1308 | 1308 | ||
1309 | duprintf("ip_tables: Translated table\n"); | 1309 | duprintf("ip_tables: Translated table\n"); |
1310 | 1310 | ||
1311 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1311 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1312 | tmp.num_counters, tmp.counters); | 1312 | tmp.num_counters, tmp.counters); |
1313 | if (ret) | 1313 | if (ret) |
1314 | goto free_newinfo_untrans; | 1314 | goto free_newinfo_untrans; |
1315 | return 0; | 1315 | return 0; |
1316 | 1316 | ||
1317 | free_newinfo_untrans: | 1317 | free_newinfo_untrans: |
1318 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1318 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1319 | cleanup_entry(iter, net); | 1319 | cleanup_entry(iter, net); |
1320 | free_newinfo: | 1320 | free_newinfo: |
1321 | xt_free_table_info(newinfo); | 1321 | xt_free_table_info(newinfo); |
1322 | return ret; | 1322 | return ret; |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | static int | 1325 | static int |
1326 | do_add_counters(struct net *net, const void __user *user, unsigned int len, | 1326 | do_add_counters(struct net *net, const void __user *user, unsigned int len, |
1327 | int compat) | 1327 | int compat) |
1328 | { | 1328 | { |
1329 | unsigned int i, curcpu; | 1329 | unsigned int i, curcpu; |
1330 | struct xt_counters_info tmp; | 1330 | struct xt_counters_info tmp; |
1331 | struct xt_counters *paddc; | 1331 | struct xt_counters *paddc; |
1332 | unsigned int num_counters; | 1332 | unsigned int num_counters; |
1333 | char *name; | 1333 | char *name; |
1334 | int size; | 1334 | int size; |
1335 | void *ptmp; | 1335 | void *ptmp; |
1336 | struct xt_table *t; | 1336 | struct xt_table *t; |
1337 | const struct xt_table_info *private; | 1337 | const struct xt_table_info *private; |
1338 | int ret = 0; | 1338 | int ret = 0; |
1339 | const void *loc_cpu_entry; | 1339 | const void *loc_cpu_entry; |
1340 | struct ip6t_entry *iter; | 1340 | struct ip6t_entry *iter; |
1341 | #ifdef CONFIG_COMPAT | 1341 | #ifdef CONFIG_COMPAT |
1342 | struct compat_xt_counters_info compat_tmp; | 1342 | struct compat_xt_counters_info compat_tmp; |
1343 | 1343 | ||
1344 | if (compat) { | 1344 | if (compat) { |
1345 | ptmp = &compat_tmp; | 1345 | ptmp = &compat_tmp; |
1346 | size = sizeof(struct compat_xt_counters_info); | 1346 | size = sizeof(struct compat_xt_counters_info); |
1347 | } else | 1347 | } else |
1348 | #endif | 1348 | #endif |
1349 | { | 1349 | { |
1350 | ptmp = &tmp; | 1350 | ptmp = &tmp; |
1351 | size = sizeof(struct xt_counters_info); | 1351 | size = sizeof(struct xt_counters_info); |
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | if (copy_from_user(ptmp, user, size) != 0) | 1354 | if (copy_from_user(ptmp, user, size) != 0) |
1355 | return -EFAULT; | 1355 | return -EFAULT; |
1356 | 1356 | ||
1357 | #ifdef CONFIG_COMPAT | 1357 | #ifdef CONFIG_COMPAT |
1358 | if (compat) { | 1358 | if (compat) { |
1359 | num_counters = compat_tmp.num_counters; | 1359 | num_counters = compat_tmp.num_counters; |
1360 | name = compat_tmp.name; | 1360 | name = compat_tmp.name; |
1361 | } else | 1361 | } else |
1362 | #endif | 1362 | #endif |
1363 | { | 1363 | { |
1364 | num_counters = tmp.num_counters; | 1364 | num_counters = tmp.num_counters; |
1365 | name = tmp.name; | 1365 | name = tmp.name; |
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | if (len != size + num_counters * sizeof(struct xt_counters)) | 1368 | if (len != size + num_counters * sizeof(struct xt_counters)) |
1369 | return -EINVAL; | 1369 | return -EINVAL; |
1370 | 1370 | ||
1371 | paddc = vmalloc_node(len - size, numa_node_id()); | 1371 | paddc = vmalloc_node(len - size, numa_node_id()); |
1372 | if (!paddc) | 1372 | if (!paddc) |
1373 | return -ENOMEM; | 1373 | return -ENOMEM; |
1374 | 1374 | ||
1375 | if (copy_from_user(paddc, user + size, len - size) != 0) { | 1375 | if (copy_from_user(paddc, user + size, len - size) != 0) { |
1376 | ret = -EFAULT; | 1376 | ret = -EFAULT; |
1377 | goto free; | 1377 | goto free; |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | t = xt_find_table_lock(net, AF_INET6, name); | 1380 | t = xt_find_table_lock(net, AF_INET6, name); |
1381 | if (!t || IS_ERR(t)) { | 1381 | if (!t || IS_ERR(t)) { |
1382 | ret = t ? PTR_ERR(t) : -ENOENT; | 1382 | ret = t ? PTR_ERR(t) : -ENOENT; |
1383 | goto free; | 1383 | goto free; |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | 1386 | ||
1387 | local_bh_disable(); | 1387 | local_bh_disable(); |
1388 | private = t->private; | 1388 | private = t->private; |
1389 | if (private->number != num_counters) { | 1389 | if (private->number != num_counters) { |
1390 | ret = -EINVAL; | 1390 | ret = -EINVAL; |
1391 | goto unlock_up_free; | 1391 | goto unlock_up_free; |
1392 | } | 1392 | } |
1393 | 1393 | ||
1394 | i = 0; | 1394 | i = 0; |
1395 | /* Choose the copy that is on our node */ | 1395 | /* Choose the copy that is on our node */ |
1396 | curcpu = smp_processor_id(); | 1396 | curcpu = smp_processor_id(); |
1397 | xt_info_wrlock(curcpu); | 1397 | xt_info_wrlock(curcpu); |
1398 | loc_cpu_entry = private->entries[curcpu]; | 1398 | loc_cpu_entry = private->entries[curcpu]; |
1399 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { | 1399 | xt_entry_foreach(iter, loc_cpu_entry, private->size) { |
1400 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); | 1400 | ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); |
1401 | ++i; | 1401 | ++i; |
1402 | } | 1402 | } |
1403 | xt_info_wrunlock(curcpu); | 1403 | xt_info_wrunlock(curcpu); |
1404 | 1404 | ||
1405 | unlock_up_free: | 1405 | unlock_up_free: |
1406 | local_bh_enable(); | 1406 | local_bh_enable(); |
1407 | xt_table_unlock(t); | 1407 | xt_table_unlock(t); |
1408 | module_put(t->me); | 1408 | module_put(t->me); |
1409 | free: | 1409 | free: |
1410 | vfree(paddc); | 1410 | vfree(paddc); |
1411 | 1411 | ||
1412 | return ret; | 1412 | return ret; |
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | #ifdef CONFIG_COMPAT | 1415 | #ifdef CONFIG_COMPAT |
1416 | struct compat_ip6t_replace { | 1416 | struct compat_ip6t_replace { |
1417 | char name[IP6T_TABLE_MAXNAMELEN]; | 1417 | char name[IP6T_TABLE_MAXNAMELEN]; |
1418 | u32 valid_hooks; | 1418 | u32 valid_hooks; |
1419 | u32 num_entries; | 1419 | u32 num_entries; |
1420 | u32 size; | 1420 | u32 size; |
1421 | u32 hook_entry[NF_INET_NUMHOOKS]; | 1421 | u32 hook_entry[NF_INET_NUMHOOKS]; |
1422 | u32 underflow[NF_INET_NUMHOOKS]; | 1422 | u32 underflow[NF_INET_NUMHOOKS]; |
1423 | u32 num_counters; | 1423 | u32 num_counters; |
1424 | compat_uptr_t counters; /* struct ip6t_counters * */ | 1424 | compat_uptr_t counters; /* struct ip6t_counters * */ |
1425 | struct compat_ip6t_entry entries[0]; | 1425 | struct compat_ip6t_entry entries[0]; |
1426 | }; | 1426 | }; |
1427 | 1427 | ||
1428 | static int | 1428 | static int |
1429 | compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, | 1429 | compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, |
1430 | unsigned int *size, struct xt_counters *counters, | 1430 | unsigned int *size, struct xt_counters *counters, |
1431 | unsigned int i) | 1431 | unsigned int i) |
1432 | { | 1432 | { |
1433 | struct ip6t_entry_target *t; | 1433 | struct ip6t_entry_target *t; |
1434 | struct compat_ip6t_entry __user *ce; | 1434 | struct compat_ip6t_entry __user *ce; |
1435 | u_int16_t target_offset, next_offset; | 1435 | u_int16_t target_offset, next_offset; |
1436 | compat_uint_t origsize; | 1436 | compat_uint_t origsize; |
1437 | const struct xt_entry_match *ematch; | 1437 | const struct xt_entry_match *ematch; |
1438 | int ret = 0; | 1438 | int ret = 0; |
1439 | 1439 | ||
1440 | origsize = *size; | 1440 | origsize = *size; |
1441 | ce = (struct compat_ip6t_entry __user *)*dstptr; | 1441 | ce = (struct compat_ip6t_entry __user *)*dstptr; |
1442 | if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || | 1442 | if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || |
1443 | copy_to_user(&ce->counters, &counters[i], | 1443 | copy_to_user(&ce->counters, &counters[i], |
1444 | sizeof(counters[i])) != 0) | 1444 | sizeof(counters[i])) != 0) |
1445 | return -EFAULT; | 1445 | return -EFAULT; |
1446 | 1446 | ||
1447 | *dstptr += sizeof(struct compat_ip6t_entry); | 1447 | *dstptr += sizeof(struct compat_ip6t_entry); |
1448 | *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); | 1448 | *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); |
1449 | 1449 | ||
1450 | xt_ematch_foreach(ematch, e) { | 1450 | xt_ematch_foreach(ematch, e) { |
1451 | ret = xt_compat_match_to_user(ematch, dstptr, size); | 1451 | ret = xt_compat_match_to_user(ematch, dstptr, size); |
1452 | if (ret != 0) | 1452 | if (ret != 0) |
1453 | return ret; | 1453 | return ret; |
1454 | } | 1454 | } |
1455 | target_offset = e->target_offset - (origsize - *size); | 1455 | target_offset = e->target_offset - (origsize - *size); |
1456 | t = ip6t_get_target(e); | 1456 | t = ip6t_get_target(e); |
1457 | ret = xt_compat_target_to_user(t, dstptr, size); | 1457 | ret = xt_compat_target_to_user(t, dstptr, size); |
1458 | if (ret) | 1458 | if (ret) |
1459 | return ret; | 1459 | return ret; |
1460 | next_offset = e->next_offset - (origsize - *size); | 1460 | next_offset = e->next_offset - (origsize - *size); |
1461 | if (put_user(target_offset, &ce->target_offset) != 0 || | 1461 | if (put_user(target_offset, &ce->target_offset) != 0 || |
1462 | put_user(next_offset, &ce->next_offset) != 0) | 1462 | put_user(next_offset, &ce->next_offset) != 0) |
1463 | return -EFAULT; | 1463 | return -EFAULT; |
1464 | return 0; | 1464 | return 0; |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | static int | 1467 | static int |
1468 | compat_find_calc_match(struct ip6t_entry_match *m, | 1468 | compat_find_calc_match(struct ip6t_entry_match *m, |
1469 | const char *name, | 1469 | const char *name, |
1470 | const struct ip6t_ip6 *ipv6, | 1470 | const struct ip6t_ip6 *ipv6, |
1471 | unsigned int hookmask, | 1471 | unsigned int hookmask, |
1472 | int *size) | 1472 | int *size) |
1473 | { | 1473 | { |
1474 | struct xt_match *match; | 1474 | struct xt_match *match; |
1475 | 1475 | ||
1476 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, | 1476 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, |
1477 | m->u.user.revision); | 1477 | m->u.user.revision); |
1478 | if (IS_ERR(match)) { | 1478 | if (IS_ERR(match)) { |
1479 | duprintf("compat_check_calc_match: `%s' not found\n", | 1479 | duprintf("compat_check_calc_match: `%s' not found\n", |
1480 | m->u.user.name); | 1480 | m->u.user.name); |
1481 | return PTR_ERR(match); | 1481 | return PTR_ERR(match); |
1482 | } | 1482 | } |
1483 | m->u.kernel.match = match; | 1483 | m->u.kernel.match = match; |
1484 | *size += xt_compat_match_offset(match); | 1484 | *size += xt_compat_match_offset(match); |
1485 | return 0; | 1485 | return 0; |
1486 | } | 1486 | } |
1487 | 1487 | ||
1488 | static void compat_release_entry(struct compat_ip6t_entry *e) | 1488 | static void compat_release_entry(struct compat_ip6t_entry *e) |
1489 | { | 1489 | { |
1490 | struct ip6t_entry_target *t; | 1490 | struct ip6t_entry_target *t; |
1491 | struct xt_entry_match *ematch; | 1491 | struct xt_entry_match *ematch; |
1492 | 1492 | ||
1493 | /* Cleanup all matches */ | 1493 | /* Cleanup all matches */ |
1494 | xt_ematch_foreach(ematch, e) | 1494 | xt_ematch_foreach(ematch, e) |
1495 | module_put(ematch->u.kernel.match->me); | 1495 | module_put(ematch->u.kernel.match->me); |
1496 | t = compat_ip6t_get_target(e); | 1496 | t = compat_ip6t_get_target(e); |
1497 | module_put(t->u.kernel.target->me); | 1497 | module_put(t->u.kernel.target->me); |
1498 | } | 1498 | } |
1499 | 1499 | ||
1500 | static int | 1500 | static int |
1501 | check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, | 1501 | check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, |
1502 | struct xt_table_info *newinfo, | 1502 | struct xt_table_info *newinfo, |
1503 | unsigned int *size, | 1503 | unsigned int *size, |
1504 | const unsigned char *base, | 1504 | const unsigned char *base, |
1505 | const unsigned char *limit, | 1505 | const unsigned char *limit, |
1506 | const unsigned int *hook_entries, | 1506 | const unsigned int *hook_entries, |
1507 | const unsigned int *underflows, | 1507 | const unsigned int *underflows, |
1508 | const char *name) | 1508 | const char *name) |
1509 | { | 1509 | { |
1510 | struct xt_entry_match *ematch; | 1510 | struct xt_entry_match *ematch; |
1511 | struct ip6t_entry_target *t; | 1511 | struct ip6t_entry_target *t; |
1512 | struct xt_target *target; | 1512 | struct xt_target *target; |
1513 | unsigned int entry_offset; | 1513 | unsigned int entry_offset; |
1514 | unsigned int j; | 1514 | unsigned int j; |
1515 | int ret, off, h; | 1515 | int ret, off, h; |
1516 | 1516 | ||
1517 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1517 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1518 | if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || | 1518 | if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || |
1519 | (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { | 1519 | (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { |
1520 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1520 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1521 | return -EINVAL; | 1521 | return -EINVAL; |
1522 | } | 1522 | } |
1523 | 1523 | ||
1524 | if (e->next_offset < sizeof(struct compat_ip6t_entry) + | 1524 | if (e->next_offset < sizeof(struct compat_ip6t_entry) + |
1525 | sizeof(struct compat_xt_entry_target)) { | 1525 | sizeof(struct compat_xt_entry_target)) { |
1526 | duprintf("checking: element %p size %u\n", | 1526 | duprintf("checking: element %p size %u\n", |
1527 | e, e->next_offset); | 1527 | e, e->next_offset); |
1528 | return -EINVAL; | 1528 | return -EINVAL; |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | /* For purposes of check_entry casting the compat entry is fine */ | 1531 | /* For purposes of check_entry casting the compat entry is fine */ |
1532 | ret = check_entry((struct ip6t_entry *)e, name); | 1532 | ret = check_entry((struct ip6t_entry *)e, name); |
1533 | if (ret) | 1533 | if (ret) |
1534 | return ret; | 1534 | return ret; |
1535 | 1535 | ||
1536 | off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); | 1536 | off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); |
1537 | entry_offset = (void *)e - (void *)base; | 1537 | entry_offset = (void *)e - (void *)base; |
1538 | j = 0; | 1538 | j = 0; |
1539 | xt_ematch_foreach(ematch, e) { | 1539 | xt_ematch_foreach(ematch, e) { |
1540 | ret = compat_find_calc_match(ematch, name, | 1540 | ret = compat_find_calc_match(ematch, name, |
1541 | &e->ipv6, e->comefrom, &off); | 1541 | &e->ipv6, e->comefrom, &off); |
1542 | if (ret != 0) | 1542 | if (ret != 0) |
1543 | goto release_matches; | 1543 | goto release_matches; |
1544 | ++j; | 1544 | ++j; |
1545 | } | 1545 | } |
1546 | 1546 | ||
1547 | t = compat_ip6t_get_target(e); | 1547 | t = compat_ip6t_get_target(e); |
1548 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, | 1548 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, |
1549 | t->u.user.revision); | 1549 | t->u.user.revision); |
1550 | if (IS_ERR(target)) { | 1550 | if (IS_ERR(target)) { |
1551 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1551 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1552 | t->u.user.name); | 1552 | t->u.user.name); |
1553 | ret = PTR_ERR(target); | 1553 | ret = PTR_ERR(target); |
1554 | goto release_matches; | 1554 | goto release_matches; |
1555 | } | 1555 | } |
1556 | t->u.kernel.target = target; | 1556 | t->u.kernel.target = target; |
1557 | 1557 | ||
1558 | off += xt_compat_target_offset(target); | 1558 | off += xt_compat_target_offset(target); |
1559 | *size += off; | 1559 | *size += off; |
1560 | ret = xt_compat_add_offset(AF_INET6, entry_offset, off); | 1560 | ret = xt_compat_add_offset(AF_INET6, entry_offset, off); |
1561 | if (ret) | 1561 | if (ret) |
1562 | goto out; | 1562 | goto out; |
1563 | 1563 | ||
1564 | /* Check hooks & underflows */ | 1564 | /* Check hooks & underflows */ |
1565 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 1565 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
1566 | if ((unsigned char *)e - base == hook_entries[h]) | 1566 | if ((unsigned char *)e - base == hook_entries[h]) |
1567 | newinfo->hook_entry[h] = hook_entries[h]; | 1567 | newinfo->hook_entry[h] = hook_entries[h]; |
1568 | if ((unsigned char *)e - base == underflows[h]) | 1568 | if ((unsigned char *)e - base == underflows[h]) |
1569 | newinfo->underflow[h] = underflows[h]; | 1569 | newinfo->underflow[h] = underflows[h]; |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | /* Clear counters and comefrom */ | 1572 | /* Clear counters and comefrom */ |
1573 | memset(&e->counters, 0, sizeof(e->counters)); | 1573 | memset(&e->counters, 0, sizeof(e->counters)); |
1574 | e->comefrom = 0; | 1574 | e->comefrom = 0; |
1575 | return 0; | 1575 | return 0; |
1576 | 1576 | ||
1577 | out: | 1577 | out: |
1578 | module_put(t->u.kernel.target->me); | 1578 | module_put(t->u.kernel.target->me); |
1579 | release_matches: | 1579 | release_matches: |
1580 | xt_ematch_foreach(ematch, e) { | 1580 | xt_ematch_foreach(ematch, e) { |
1581 | if (j-- == 0) | 1581 | if (j-- == 0) |
1582 | break; | 1582 | break; |
1583 | module_put(ematch->u.kernel.match->me); | 1583 | module_put(ematch->u.kernel.match->me); |
1584 | } | 1584 | } |
1585 | return ret; | 1585 | return ret; |
1586 | } | 1586 | } |
1587 | 1587 | ||
1588 | static int | 1588 | static int |
1589 | compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, | 1589 | compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, |
1590 | unsigned int *size, const char *name, | 1590 | unsigned int *size, const char *name, |
1591 | struct xt_table_info *newinfo, unsigned char *base) | 1591 | struct xt_table_info *newinfo, unsigned char *base) |
1592 | { | 1592 | { |
1593 | struct ip6t_entry_target *t; | 1593 | struct ip6t_entry_target *t; |
1594 | struct xt_target *target; | 1594 | struct xt_target *target; |
1595 | struct ip6t_entry *de; | 1595 | struct ip6t_entry *de; |
1596 | unsigned int origsize; | 1596 | unsigned int origsize; |
1597 | int ret, h; | 1597 | int ret, h; |
1598 | struct xt_entry_match *ematch; | 1598 | struct xt_entry_match *ematch; |
1599 | 1599 | ||
1600 | ret = 0; | 1600 | ret = 0; |
1601 | origsize = *size; | 1601 | origsize = *size; |
1602 | de = (struct ip6t_entry *)*dstptr; | 1602 | de = (struct ip6t_entry *)*dstptr; |
1603 | memcpy(de, e, sizeof(struct ip6t_entry)); | 1603 | memcpy(de, e, sizeof(struct ip6t_entry)); |
1604 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); | 1604 | memcpy(&de->counters, &e->counters, sizeof(e->counters)); |
1605 | 1605 | ||
1606 | *dstptr += sizeof(struct ip6t_entry); | 1606 | *dstptr += sizeof(struct ip6t_entry); |
1607 | *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); | 1607 | *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); |
1608 | 1608 | ||
1609 | xt_ematch_foreach(ematch, e) { | 1609 | xt_ematch_foreach(ematch, e) { |
1610 | ret = xt_compat_match_from_user(ematch, dstptr, size); | 1610 | ret = xt_compat_match_from_user(ematch, dstptr, size); |
1611 | if (ret != 0) | 1611 | if (ret != 0) |
1612 | return ret; | 1612 | return ret; |
1613 | } | 1613 | } |
1614 | de->target_offset = e->target_offset - (origsize - *size); | 1614 | de->target_offset = e->target_offset - (origsize - *size); |
1615 | t = compat_ip6t_get_target(e); | 1615 | t = compat_ip6t_get_target(e); |
1616 | target = t->u.kernel.target; | 1616 | target = t->u.kernel.target; |
1617 | xt_compat_target_from_user(t, dstptr, size); | 1617 | xt_compat_target_from_user(t, dstptr, size); |
1618 | 1618 | ||
1619 | de->next_offset = e->next_offset - (origsize - *size); | 1619 | de->next_offset = e->next_offset - (origsize - *size); |
1620 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 1620 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
1621 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) | 1621 | if ((unsigned char *)de - base < newinfo->hook_entry[h]) |
1622 | newinfo->hook_entry[h] -= origsize - *size; | 1622 | newinfo->hook_entry[h] -= origsize - *size; |
1623 | if ((unsigned char *)de - base < newinfo->underflow[h]) | 1623 | if ((unsigned char *)de - base < newinfo->underflow[h]) |
1624 | newinfo->underflow[h] -= origsize - *size; | 1624 | newinfo->underflow[h] -= origsize - *size; |
1625 | } | 1625 | } |
1626 | return ret; | 1626 | return ret; |
1627 | } | 1627 | } |
1628 | 1628 | ||
1629 | static int compat_check_entry(struct ip6t_entry *e, struct net *net, | 1629 | static int compat_check_entry(struct ip6t_entry *e, struct net *net, |
1630 | const char *name) | 1630 | const char *name) |
1631 | { | 1631 | { |
1632 | unsigned int j; | 1632 | unsigned int j; |
1633 | int ret = 0; | 1633 | int ret = 0; |
1634 | struct xt_mtchk_param mtpar; | 1634 | struct xt_mtchk_param mtpar; |
1635 | struct xt_entry_match *ematch; | 1635 | struct xt_entry_match *ematch; |
1636 | 1636 | ||
1637 | j = 0; | 1637 | j = 0; |
1638 | mtpar.net = net; | 1638 | mtpar.net = net; |
1639 | mtpar.table = name; | 1639 | mtpar.table = name; |
1640 | mtpar.entryinfo = &e->ipv6; | 1640 | mtpar.entryinfo = &e->ipv6; |
1641 | mtpar.hook_mask = e->comefrom; | 1641 | mtpar.hook_mask = e->comefrom; |
1642 | mtpar.family = NFPROTO_IPV6; | 1642 | mtpar.family = NFPROTO_IPV6; |
1643 | xt_ematch_foreach(ematch, e) { | 1643 | xt_ematch_foreach(ematch, e) { |
1644 | ret = check_match(ematch, &mtpar); | 1644 | ret = check_match(ematch, &mtpar); |
1645 | if (ret != 0) | 1645 | if (ret != 0) |
1646 | goto cleanup_matches; | 1646 | goto cleanup_matches; |
1647 | ++j; | 1647 | ++j; |
1648 | } | 1648 | } |
1649 | 1649 | ||
1650 | ret = check_target(e, net, name); | 1650 | ret = check_target(e, net, name); |
1651 | if (ret) | 1651 | if (ret) |
1652 | goto cleanup_matches; | 1652 | goto cleanup_matches; |
1653 | return 0; | 1653 | return 0; |
1654 | 1654 | ||
1655 | cleanup_matches: | 1655 | cleanup_matches: |
1656 | xt_ematch_foreach(ematch, e) { | 1656 | xt_ematch_foreach(ematch, e) { |
1657 | if (j-- == 0) | 1657 | if (j-- == 0) |
1658 | break; | 1658 | break; |
1659 | cleanup_match(ematch, net); | 1659 | cleanup_match(ematch, net); |
1660 | } | 1660 | } |
1661 | return ret; | 1661 | return ret; |
1662 | } | 1662 | } |
1663 | 1663 | ||
1664 | static int | 1664 | static int |
1665 | translate_compat_table(struct net *net, | 1665 | translate_compat_table(struct net *net, |
1666 | const char *name, | 1666 | const char *name, |
1667 | unsigned int valid_hooks, | 1667 | unsigned int valid_hooks, |
1668 | struct xt_table_info **pinfo, | 1668 | struct xt_table_info **pinfo, |
1669 | void **pentry0, | 1669 | void **pentry0, |
1670 | unsigned int total_size, | 1670 | unsigned int total_size, |
1671 | unsigned int number, | 1671 | unsigned int number, |
1672 | unsigned int *hook_entries, | 1672 | unsigned int *hook_entries, |
1673 | unsigned int *underflows) | 1673 | unsigned int *underflows) |
1674 | { | 1674 | { |
1675 | unsigned int i, j; | 1675 | unsigned int i, j; |
1676 | struct xt_table_info *newinfo, *info; | 1676 | struct xt_table_info *newinfo, *info; |
1677 | void *pos, *entry0, *entry1; | 1677 | void *pos, *entry0, *entry1; |
1678 | struct compat_ip6t_entry *iter0; | 1678 | struct compat_ip6t_entry *iter0; |
1679 | struct ip6t_entry *iter1; | 1679 | struct ip6t_entry *iter1; |
1680 | unsigned int size; | 1680 | unsigned int size; |
1681 | int ret = 0; | 1681 | int ret = 0; |
1682 | 1682 | ||
1683 | info = *pinfo; | 1683 | info = *pinfo; |
1684 | entry0 = *pentry0; | 1684 | entry0 = *pentry0; |
1685 | size = total_size; | 1685 | size = total_size; |
1686 | info->number = number; | 1686 | info->number = number; |
1687 | 1687 | ||
1688 | /* Init all hooks to impossible value. */ | 1688 | /* Init all hooks to impossible value. */ |
1689 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1689 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1690 | info->hook_entry[i] = 0xFFFFFFFF; | 1690 | info->hook_entry[i] = 0xFFFFFFFF; |
1691 | info->underflow[i] = 0xFFFFFFFF; | 1691 | info->underflow[i] = 0xFFFFFFFF; |
1692 | } | 1692 | } |
1693 | 1693 | ||
1694 | duprintf("translate_compat_table: size %u\n", info->size); | 1694 | duprintf("translate_compat_table: size %u\n", info->size); |
1695 | j = 0; | 1695 | j = 0; |
1696 | xt_compat_lock(AF_INET6); | 1696 | xt_compat_lock(AF_INET6); |
1697 | /* Walk through entries, checking offsets. */ | 1697 | /* Walk through entries, checking offsets. */ |
1698 | xt_entry_foreach(iter0, entry0, total_size) { | 1698 | xt_entry_foreach(iter0, entry0, total_size) { |
1699 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, | 1699 | ret = check_compat_entry_size_and_hooks(iter0, info, &size, |
1700 | entry0, | 1700 | entry0, |
1701 | entry0 + total_size, | 1701 | entry0 + total_size, |
1702 | hook_entries, | 1702 | hook_entries, |
1703 | underflows, | 1703 | underflows, |
1704 | name); | 1704 | name); |
1705 | if (ret != 0) | 1705 | if (ret != 0) |
1706 | goto out_unlock; | 1706 | goto out_unlock; |
1707 | ++j; | 1707 | ++j; |
1708 | } | 1708 | } |
1709 | 1709 | ||
1710 | ret = -EINVAL; | 1710 | ret = -EINVAL; |
1711 | if (j != number) { | 1711 | if (j != number) { |
1712 | duprintf("translate_compat_table: %u not %u entries\n", | 1712 | duprintf("translate_compat_table: %u not %u entries\n", |
1713 | j, number); | 1713 | j, number); |
1714 | goto out_unlock; | 1714 | goto out_unlock; |
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | /* Check hooks all assigned */ | 1717 | /* Check hooks all assigned */ |
1718 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1718 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1719 | /* Only hooks which are valid */ | 1719 | /* Only hooks which are valid */ |
1720 | if (!(valid_hooks & (1 << i))) | 1720 | if (!(valid_hooks & (1 << i))) |
1721 | continue; | 1721 | continue; |
1722 | if (info->hook_entry[i] == 0xFFFFFFFF) { | 1722 | if (info->hook_entry[i] == 0xFFFFFFFF) { |
1723 | duprintf("Invalid hook entry %u %u\n", | 1723 | duprintf("Invalid hook entry %u %u\n", |
1724 | i, hook_entries[i]); | 1724 | i, hook_entries[i]); |
1725 | goto out_unlock; | 1725 | goto out_unlock; |
1726 | } | 1726 | } |
1727 | if (info->underflow[i] == 0xFFFFFFFF) { | 1727 | if (info->underflow[i] == 0xFFFFFFFF) { |
1728 | duprintf("Invalid underflow %u %u\n", | 1728 | duprintf("Invalid underflow %u %u\n", |
1729 | i, underflows[i]); | 1729 | i, underflows[i]); |
1730 | goto out_unlock; | 1730 | goto out_unlock; |
1731 | } | 1731 | } |
1732 | } | 1732 | } |
1733 | 1733 | ||
1734 | ret = -ENOMEM; | 1734 | ret = -ENOMEM; |
1735 | newinfo = xt_alloc_table_info(size); | 1735 | newinfo = xt_alloc_table_info(size); |
1736 | if (!newinfo) | 1736 | if (!newinfo) |
1737 | goto out_unlock; | 1737 | goto out_unlock; |
1738 | 1738 | ||
1739 | newinfo->number = number; | 1739 | newinfo->number = number; |
1740 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { | 1740 | for (i = 0; i < NF_INET_NUMHOOKS; i++) { |
1741 | newinfo->hook_entry[i] = info->hook_entry[i]; | 1741 | newinfo->hook_entry[i] = info->hook_entry[i]; |
1742 | newinfo->underflow[i] = info->underflow[i]; | 1742 | newinfo->underflow[i] = info->underflow[i]; |
1743 | } | 1743 | } |
1744 | entry1 = newinfo->entries[raw_smp_processor_id()]; | 1744 | entry1 = newinfo->entries[raw_smp_processor_id()]; |
1745 | pos = entry1; | 1745 | pos = entry1; |
1746 | size = total_size; | 1746 | size = total_size; |
1747 | xt_entry_foreach(iter0, entry0, total_size) { | 1747 | xt_entry_foreach(iter0, entry0, total_size) { |
1748 | ret = compat_copy_entry_from_user(iter0, &pos, &size, | 1748 | ret = compat_copy_entry_from_user(iter0, &pos, &size, |
1749 | name, newinfo, entry1); | 1749 | name, newinfo, entry1); |
1750 | if (ret != 0) | 1750 | if (ret != 0) |
1751 | break; | 1751 | break; |
1752 | } | 1752 | } |
1753 | xt_compat_flush_offsets(AF_INET6); | 1753 | xt_compat_flush_offsets(AF_INET6); |
1754 | xt_compat_unlock(AF_INET6); | 1754 | xt_compat_unlock(AF_INET6); |
1755 | if (ret) | 1755 | if (ret) |
1756 | goto free_newinfo; | 1756 | goto free_newinfo; |
1757 | 1757 | ||
1758 | ret = -ELOOP; | 1758 | ret = -ELOOP; |
1759 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) | 1759 | if (!mark_source_chains(newinfo, valid_hooks, entry1)) |
1760 | goto free_newinfo; | 1760 | goto free_newinfo; |
1761 | 1761 | ||
1762 | i = 0; | 1762 | i = 0; |
1763 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1763 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1764 | ret = compat_check_entry(iter1, net, name); | 1764 | ret = compat_check_entry(iter1, net, name); |
1765 | if (ret != 0) | 1765 | if (ret != 0) |
1766 | break; | 1766 | break; |
1767 | ++i; | 1767 | ++i; |
1768 | } | 1768 | } |
1769 | if (ret) { | 1769 | if (ret) { |
1770 | /* | 1770 | /* |
1771 | * The first i matches need cleanup_entry (calls ->destroy) | 1771 | * The first i matches need cleanup_entry (calls ->destroy) |
1772 | * because they had called ->check already. The other j-i | 1772 | * because they had called ->check already. The other j-i |
1773 | * entries need only release. | 1773 | * entries need only release. |
1774 | */ | 1774 | */ |
1775 | int skip = i; | 1775 | int skip = i; |
1776 | j -= i; | 1776 | j -= i; |
1777 | xt_entry_foreach(iter0, entry0, newinfo->size) { | 1777 | xt_entry_foreach(iter0, entry0, newinfo->size) { |
1778 | if (skip-- > 0) | 1778 | if (skip-- > 0) |
1779 | continue; | 1779 | continue; |
1780 | if (j-- == 0) | 1780 | if (j-- == 0) |
1781 | break; | 1781 | break; |
1782 | compat_release_entry(iter0); | 1782 | compat_release_entry(iter0); |
1783 | } | 1783 | } |
1784 | xt_entry_foreach(iter1, entry1, newinfo->size) { | 1784 | xt_entry_foreach(iter1, entry1, newinfo->size) { |
1785 | if (i-- == 0) | 1785 | if (i-- == 0) |
1786 | break; | 1786 | break; |
1787 | cleanup_entry(iter1, net); | 1787 | cleanup_entry(iter1, net); |
1788 | } | 1788 | } |
1789 | xt_free_table_info(newinfo); | 1789 | xt_free_table_info(newinfo); |
1790 | return ret; | 1790 | return ret; |
1791 | } | 1791 | } |
1792 | 1792 | ||
1793 | /* And one copy for every other CPU */ | 1793 | /* And one copy for every other CPU */ |
1794 | for_each_possible_cpu(i) | 1794 | for_each_possible_cpu(i) |
1795 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) | 1795 | if (newinfo->entries[i] && newinfo->entries[i] != entry1) |
1796 | memcpy(newinfo->entries[i], entry1, newinfo->size); | 1796 | memcpy(newinfo->entries[i], entry1, newinfo->size); |
1797 | 1797 | ||
1798 | *pinfo = newinfo; | 1798 | *pinfo = newinfo; |
1799 | *pentry0 = entry1; | 1799 | *pentry0 = entry1; |
1800 | xt_free_table_info(info); | 1800 | xt_free_table_info(info); |
1801 | return 0; | 1801 | return 0; |
1802 | 1802 | ||
1803 | free_newinfo: | 1803 | free_newinfo: |
1804 | xt_free_table_info(newinfo); | 1804 | xt_free_table_info(newinfo); |
1805 | out: | 1805 | out: |
1806 | xt_entry_foreach(iter0, entry0, total_size) { | 1806 | xt_entry_foreach(iter0, entry0, total_size) { |
1807 | if (j-- == 0) | 1807 | if (j-- == 0) |
1808 | break; | 1808 | break; |
1809 | compat_release_entry(iter0); | 1809 | compat_release_entry(iter0); |
1810 | } | 1810 | } |
1811 | return ret; | 1811 | return ret; |
1812 | out_unlock: | 1812 | out_unlock: |
1813 | xt_compat_flush_offsets(AF_INET6); | 1813 | xt_compat_flush_offsets(AF_INET6); |
1814 | xt_compat_unlock(AF_INET6); | 1814 | xt_compat_unlock(AF_INET6); |
1815 | goto out; | 1815 | goto out; |
1816 | } | 1816 | } |
1817 | 1817 | ||
1818 | static int | 1818 | static int |
1819 | compat_do_replace(struct net *net, void __user *user, unsigned int len) | 1819 | compat_do_replace(struct net *net, void __user *user, unsigned int len) |
1820 | { | 1820 | { |
1821 | int ret; | 1821 | int ret; |
1822 | struct compat_ip6t_replace tmp; | 1822 | struct compat_ip6t_replace tmp; |
1823 | struct xt_table_info *newinfo; | 1823 | struct xt_table_info *newinfo; |
1824 | void *loc_cpu_entry; | 1824 | void *loc_cpu_entry; |
1825 | struct ip6t_entry *iter; | 1825 | struct ip6t_entry *iter; |
1826 | 1826 | ||
1827 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 1827 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
1828 | return -EFAULT; | 1828 | return -EFAULT; |
1829 | 1829 | ||
1830 | /* overflow check */ | 1830 | /* overflow check */ |
1831 | if (tmp.size >= INT_MAX / num_possible_cpus()) | 1831 | if (tmp.size >= INT_MAX / num_possible_cpus()) |
1832 | return -ENOMEM; | 1832 | return -ENOMEM; |
1833 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1833 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1834 | return -ENOMEM; | 1834 | return -ENOMEM; |
1835 | 1835 | ||
1836 | newinfo = xt_alloc_table_info(tmp.size); | 1836 | newinfo = xt_alloc_table_info(tmp.size); |
1837 | if (!newinfo) | 1837 | if (!newinfo) |
1838 | return -ENOMEM; | 1838 | return -ENOMEM; |
1839 | 1839 | ||
1840 | /* choose the copy that is on our node/cpu */ | 1840 | /* choose the copy that is on our node/cpu */ |
1841 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 1841 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
1842 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), | 1842 | if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), |
1843 | tmp.size) != 0) { | 1843 | tmp.size) != 0) { |
1844 | ret = -EFAULT; | 1844 | ret = -EFAULT; |
1845 | goto free_newinfo; | 1845 | goto free_newinfo; |
1846 | } | 1846 | } |
1847 | 1847 | ||
1848 | ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, | 1848 | ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, |
1849 | &newinfo, &loc_cpu_entry, tmp.size, | 1849 | &newinfo, &loc_cpu_entry, tmp.size, |
1850 | tmp.num_entries, tmp.hook_entry, | 1850 | tmp.num_entries, tmp.hook_entry, |
1851 | tmp.underflow); | 1851 | tmp.underflow); |
1852 | if (ret != 0) | 1852 | if (ret != 0) |
1853 | goto free_newinfo; | 1853 | goto free_newinfo; |
1854 | 1854 | ||
1855 | duprintf("compat_do_replace: Translated table\n"); | 1855 | duprintf("compat_do_replace: Translated table\n"); |
1856 | 1856 | ||
1857 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1857 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1858 | tmp.num_counters, compat_ptr(tmp.counters)); | 1858 | tmp.num_counters, compat_ptr(tmp.counters)); |
1859 | if (ret) | 1859 | if (ret) |
1860 | goto free_newinfo_untrans; | 1860 | goto free_newinfo_untrans; |
1861 | return 0; | 1861 | return 0; |
1862 | 1862 | ||
1863 | free_newinfo_untrans: | 1863 | free_newinfo_untrans: |
1864 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) | 1864 | xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) |
1865 | cleanup_entry(iter, net); | 1865 | cleanup_entry(iter, net); |
1866 | free_newinfo: | 1866 | free_newinfo: |
1867 | xt_free_table_info(newinfo); | 1867 | xt_free_table_info(newinfo); |
1868 | return ret; | 1868 | return ret; |
1869 | } | 1869 | } |
1870 | 1870 | ||
1871 | static int | 1871 | static int |
1872 | compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, | 1872 | compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, |
1873 | unsigned int len) | 1873 | unsigned int len) |
1874 | { | 1874 | { |
1875 | int ret; | 1875 | int ret; |
1876 | 1876 | ||
1877 | if (!capable(CAP_NET_ADMIN)) | 1877 | if (!capable(CAP_NET_ADMIN)) |
1878 | return -EPERM; | 1878 | return -EPERM; |
1879 | 1879 | ||
1880 | switch (cmd) { | 1880 | switch (cmd) { |
1881 | case IP6T_SO_SET_REPLACE: | 1881 | case IP6T_SO_SET_REPLACE: |
1882 | ret = compat_do_replace(sock_net(sk), user, len); | 1882 | ret = compat_do_replace(sock_net(sk), user, len); |
1883 | break; | 1883 | break; |
1884 | 1884 | ||
1885 | case IP6T_SO_SET_ADD_COUNTERS: | 1885 | case IP6T_SO_SET_ADD_COUNTERS: |
1886 | ret = do_add_counters(sock_net(sk), user, len, 1); | 1886 | ret = do_add_counters(sock_net(sk), user, len, 1); |
1887 | break; | 1887 | break; |
1888 | 1888 | ||
1889 | default: | 1889 | default: |
1890 | duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); | 1890 | duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); |
1891 | ret = -EINVAL; | 1891 | ret = -EINVAL; |
1892 | } | 1892 | } |
1893 | 1893 | ||
1894 | return ret; | 1894 | return ret; |
1895 | } | 1895 | } |
1896 | 1896 | ||
1897 | struct compat_ip6t_get_entries { | 1897 | struct compat_ip6t_get_entries { |
1898 | char name[IP6T_TABLE_MAXNAMELEN]; | 1898 | char name[IP6T_TABLE_MAXNAMELEN]; |
1899 | compat_uint_t size; | 1899 | compat_uint_t size; |
1900 | struct compat_ip6t_entry entrytable[0]; | 1900 | struct compat_ip6t_entry entrytable[0]; |
1901 | }; | 1901 | }; |
1902 | 1902 | ||
1903 | static int | 1903 | static int |
1904 | compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, | 1904 | compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, |
1905 | void __user *userptr) | 1905 | void __user *userptr) |
1906 | { | 1906 | { |
1907 | struct xt_counters *counters; | 1907 | struct xt_counters *counters; |
1908 | const struct xt_table_info *private = table->private; | 1908 | const struct xt_table_info *private = table->private; |
1909 | void __user *pos; | 1909 | void __user *pos; |
1910 | unsigned int size; | 1910 | unsigned int size; |
1911 | int ret = 0; | 1911 | int ret = 0; |
1912 | const void *loc_cpu_entry; | 1912 | const void *loc_cpu_entry; |
1913 | unsigned int i = 0; | 1913 | unsigned int i = 0; |
1914 | struct ip6t_entry *iter; | 1914 | struct ip6t_entry *iter; |
1915 | 1915 | ||
1916 | counters = alloc_counters(table); | 1916 | counters = alloc_counters(table); |
1917 | if (IS_ERR(counters)) | 1917 | if (IS_ERR(counters)) |
1918 | return PTR_ERR(counters); | 1918 | return PTR_ERR(counters); |
1919 | 1919 | ||
1920 | /* choose the copy that is on our node/cpu, ... | 1920 | /* choose the copy that is on our node/cpu, ... |
1921 | * This choice is lazy (because current thread is | 1921 | * This choice is lazy (because current thread is |
1922 | * allowed to migrate to another cpu) | 1922 | * allowed to migrate to another cpu) |
1923 | */ | 1923 | */ |
1924 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1924 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
1925 | pos = userptr; | 1925 | pos = userptr; |
1926 | size = total_size; | 1926 | size = total_size; |
1927 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { | 1927 | xt_entry_foreach(iter, loc_cpu_entry, total_size) { |
1928 | ret = compat_copy_entry_to_user(iter, &pos, | 1928 | ret = compat_copy_entry_to_user(iter, &pos, |
1929 | &size, counters, i++); | 1929 | &size, counters, i++); |
1930 | if (ret != 0) | 1930 | if (ret != 0) |
1931 | break; | 1931 | break; |
1932 | } | 1932 | } |
1933 | 1933 | ||
1934 | vfree(counters); | 1934 | vfree(counters); |
1935 | return ret; | 1935 | return ret; |
1936 | } | 1936 | } |
1937 | 1937 | ||
1938 | static int | 1938 | static int |
1939 | compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, | 1939 | compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, |
1940 | int *len) | 1940 | int *len) |
1941 | { | 1941 | { |
1942 | int ret; | 1942 | int ret; |
1943 | struct compat_ip6t_get_entries get; | 1943 | struct compat_ip6t_get_entries get; |
1944 | struct xt_table *t; | 1944 | struct xt_table *t; |
1945 | 1945 | ||
1946 | if (*len < sizeof(get)) { | 1946 | if (*len < sizeof(get)) { |
1947 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); | 1947 | duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); |
1948 | return -EINVAL; | 1948 | return -EINVAL; |
1949 | } | 1949 | } |
1950 | 1950 | ||
1951 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) | 1951 | if (copy_from_user(&get, uptr, sizeof(get)) != 0) |
1952 | return -EFAULT; | 1952 | return -EFAULT; |
1953 | 1953 | ||
1954 | if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { | 1954 | if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { |
1955 | duprintf("compat_get_entries: %u != %zu\n", | 1955 | duprintf("compat_get_entries: %u != %zu\n", |
1956 | *len, sizeof(get) + get.size); | 1956 | *len, sizeof(get) + get.size); |
1957 | return -EINVAL; | 1957 | return -EINVAL; |
1958 | } | 1958 | } |
1959 | 1959 | ||
1960 | xt_compat_lock(AF_INET6); | 1960 | xt_compat_lock(AF_INET6); |
1961 | t = xt_find_table_lock(net, AF_INET6, get.name); | 1961 | t = xt_find_table_lock(net, AF_INET6, get.name); |
1962 | if (t && !IS_ERR(t)) { | 1962 | if (t && !IS_ERR(t)) { |
1963 | const struct xt_table_info *private = t->private; | 1963 | const struct xt_table_info *private = t->private; |
1964 | struct xt_table_info info; | 1964 | struct xt_table_info info; |
1965 | duprintf("t->private->number = %u\n", private->number); | 1965 | duprintf("t->private->number = %u\n", private->number); |
1966 | ret = compat_table_info(private, &info); | 1966 | ret = compat_table_info(private, &info); |
1967 | if (!ret && get.size == info.size) { | 1967 | if (!ret && get.size == info.size) { |
1968 | ret = compat_copy_entries_to_user(private->size, | 1968 | ret = compat_copy_entries_to_user(private->size, |
1969 | t, uptr->entrytable); | 1969 | t, uptr->entrytable); |
1970 | } else if (!ret) { | 1970 | } else if (!ret) { |
1971 | duprintf("compat_get_entries: I've got %u not %u!\n", | 1971 | duprintf("compat_get_entries: I've got %u not %u!\n", |
1972 | private->size, get.size); | 1972 | private->size, get.size); |
1973 | ret = -EAGAIN; | 1973 | ret = -EAGAIN; |
1974 | } | 1974 | } |
1975 | xt_compat_flush_offsets(AF_INET6); | 1975 | xt_compat_flush_offsets(AF_INET6); |
1976 | module_put(t->me); | 1976 | module_put(t->me); |
1977 | xt_table_unlock(t); | 1977 | xt_table_unlock(t); |
1978 | } else | 1978 | } else |
1979 | ret = t ? PTR_ERR(t) : -ENOENT; | 1979 | ret = t ? PTR_ERR(t) : -ENOENT; |
1980 | 1980 | ||
1981 | xt_compat_unlock(AF_INET6); | 1981 | xt_compat_unlock(AF_INET6); |
1982 | return ret; | 1982 | return ret; |
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); | 1985 | static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); |
1986 | 1986 | ||
1987 | static int | 1987 | static int |
1988 | compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 1988 | compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
1989 | { | 1989 | { |
1990 | int ret; | 1990 | int ret; |
1991 | 1991 | ||
1992 | if (!capable(CAP_NET_ADMIN)) | 1992 | if (!capable(CAP_NET_ADMIN)) |
1993 | return -EPERM; | 1993 | return -EPERM; |
1994 | 1994 | ||
1995 | switch (cmd) { | 1995 | switch (cmd) { |
1996 | case IP6T_SO_GET_INFO: | 1996 | case IP6T_SO_GET_INFO: |
1997 | ret = get_info(sock_net(sk), user, len, 1); | 1997 | ret = get_info(sock_net(sk), user, len, 1); |
1998 | break; | 1998 | break; |
1999 | case IP6T_SO_GET_ENTRIES: | 1999 | case IP6T_SO_GET_ENTRIES: |
2000 | ret = compat_get_entries(sock_net(sk), user, len); | 2000 | ret = compat_get_entries(sock_net(sk), user, len); |
2001 | break; | 2001 | break; |
2002 | default: | 2002 | default: |
2003 | ret = do_ip6t_get_ctl(sk, cmd, user, len); | 2003 | ret = do_ip6t_get_ctl(sk, cmd, user, len); |
2004 | } | 2004 | } |
2005 | return ret; | 2005 | return ret; |
2006 | } | 2006 | } |
2007 | #endif | 2007 | #endif |
2008 | 2008 | ||
2009 | static int | 2009 | static int |
2010 | do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | 2010 | do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) |
2011 | { | 2011 | { |
2012 | int ret; | 2012 | int ret; |
2013 | 2013 | ||
2014 | if (!capable(CAP_NET_ADMIN)) | 2014 | if (!capable(CAP_NET_ADMIN)) |
2015 | return -EPERM; | 2015 | return -EPERM; |
2016 | 2016 | ||
2017 | switch (cmd) { | 2017 | switch (cmd) { |
2018 | case IP6T_SO_SET_REPLACE: | 2018 | case IP6T_SO_SET_REPLACE: |
2019 | ret = do_replace(sock_net(sk), user, len); | 2019 | ret = do_replace(sock_net(sk), user, len); |
2020 | break; | 2020 | break; |
2021 | 2021 | ||
2022 | case IP6T_SO_SET_ADD_COUNTERS: | 2022 | case IP6T_SO_SET_ADD_COUNTERS: |
2023 | ret = do_add_counters(sock_net(sk), user, len, 0); | 2023 | ret = do_add_counters(sock_net(sk), user, len, 0); |
2024 | break; | 2024 | break; |
2025 | 2025 | ||
2026 | default: | 2026 | default: |
2027 | duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); | 2027 | duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); |
2028 | ret = -EINVAL; | 2028 | ret = -EINVAL; |
2029 | } | 2029 | } |
2030 | 2030 | ||
2031 | return ret; | 2031 | return ret; |
2032 | } | 2032 | } |
2033 | 2033 | ||
2034 | static int | 2034 | static int |
2035 | do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | 2035 | do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) |
2036 | { | 2036 | { |
2037 | int ret; | 2037 | int ret; |
2038 | 2038 | ||
2039 | if (!capable(CAP_NET_ADMIN)) | 2039 | if (!capable(CAP_NET_ADMIN)) |
2040 | return -EPERM; | 2040 | return -EPERM; |
2041 | 2041 | ||
2042 | switch (cmd) { | 2042 | switch (cmd) { |
2043 | case IP6T_SO_GET_INFO: | 2043 | case IP6T_SO_GET_INFO: |
2044 | ret = get_info(sock_net(sk), user, len, 0); | 2044 | ret = get_info(sock_net(sk), user, len, 0); |
2045 | break; | 2045 | break; |
2046 | 2046 | ||
2047 | case IP6T_SO_GET_ENTRIES: | 2047 | case IP6T_SO_GET_ENTRIES: |
2048 | ret = get_entries(sock_net(sk), user, len); | 2048 | ret = get_entries(sock_net(sk), user, len); |
2049 | break; | 2049 | break; |
2050 | 2050 | ||
2051 | case IP6T_SO_GET_REVISION_MATCH: | 2051 | case IP6T_SO_GET_REVISION_MATCH: |
2052 | case IP6T_SO_GET_REVISION_TARGET: { | 2052 | case IP6T_SO_GET_REVISION_TARGET: { |
2053 | struct ip6t_get_revision rev; | 2053 | struct ip6t_get_revision rev; |
2054 | int target; | 2054 | int target; |
2055 | 2055 | ||
2056 | if (*len != sizeof(rev)) { | 2056 | if (*len != sizeof(rev)) { |
2057 | ret = -EINVAL; | 2057 | ret = -EINVAL; |
2058 | break; | 2058 | break; |
2059 | } | 2059 | } |
2060 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { | 2060 | if (copy_from_user(&rev, user, sizeof(rev)) != 0) { |
2061 | ret = -EFAULT; | 2061 | ret = -EFAULT; |
2062 | break; | 2062 | break; |
2063 | } | 2063 | } |
2064 | 2064 | ||
2065 | if (cmd == IP6T_SO_GET_REVISION_TARGET) | 2065 | if (cmd == IP6T_SO_GET_REVISION_TARGET) |
2066 | target = 1; | 2066 | target = 1; |
2067 | else | 2067 | else |
2068 | target = 0; | 2068 | target = 0; |
2069 | 2069 | ||
2070 | try_then_request_module(xt_find_revision(AF_INET6, rev.name, | 2070 | try_then_request_module(xt_find_revision(AF_INET6, rev.name, |
2071 | rev.revision, | 2071 | rev.revision, |
2072 | target, &ret), | 2072 | target, &ret), |
2073 | "ip6t_%s", rev.name); | 2073 | "ip6t_%s", rev.name); |
2074 | break; | 2074 | break; |
2075 | } | 2075 | } |
2076 | 2076 | ||
2077 | default: | 2077 | default: |
2078 | duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); | 2078 | duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); |
2079 | ret = -EINVAL; | 2079 | ret = -EINVAL; |
2080 | } | 2080 | } |
2081 | 2081 | ||
2082 | return ret; | 2082 | return ret; |
2083 | } | 2083 | } |
2084 | 2084 | ||
2085 | struct xt_table *ip6t_register_table(struct net *net, | 2085 | struct xt_table *ip6t_register_table(struct net *net, |
2086 | const struct xt_table *table, | 2086 | const struct xt_table *table, |
2087 | const struct ip6t_replace *repl) | 2087 | const struct ip6t_replace *repl) |
2088 | { | 2088 | { |
2089 | int ret; | 2089 | int ret; |
2090 | struct xt_table_info *newinfo; | 2090 | struct xt_table_info *newinfo; |
2091 | struct xt_table_info bootstrap = {0}; | 2091 | struct xt_table_info bootstrap = {0}; |
2092 | void *loc_cpu_entry; | 2092 | void *loc_cpu_entry; |
2093 | struct xt_table *new_table; | 2093 | struct xt_table *new_table; |
2094 | 2094 | ||
2095 | newinfo = xt_alloc_table_info(repl->size); | 2095 | newinfo = xt_alloc_table_info(repl->size); |
2096 | if (!newinfo) { | 2096 | if (!newinfo) { |
2097 | ret = -ENOMEM; | 2097 | ret = -ENOMEM; |
2098 | goto out; | 2098 | goto out; |
2099 | } | 2099 | } |
2100 | 2100 | ||
2101 | /* choose the copy on our node/cpu, but dont care about preemption */ | 2101 | /* choose the copy on our node/cpu, but dont care about preemption */ |
2102 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; | 2102 | loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; |
2103 | memcpy(loc_cpu_entry, repl->entries, repl->size); | 2103 | memcpy(loc_cpu_entry, repl->entries, repl->size); |
2104 | 2104 | ||
2105 | ret = translate_table(net, newinfo, loc_cpu_entry, repl); | 2105 | ret = translate_table(net, newinfo, loc_cpu_entry, repl); |
2106 | if (ret != 0) | 2106 | if (ret != 0) |
2107 | goto out_free; | 2107 | goto out_free; |
2108 | 2108 | ||
2109 | new_table = xt_register_table(net, table, &bootstrap, newinfo); | 2109 | new_table = xt_register_table(net, table, &bootstrap, newinfo); |
2110 | if (IS_ERR(new_table)) { | 2110 | if (IS_ERR(new_table)) { |
2111 | ret = PTR_ERR(new_table); | 2111 | ret = PTR_ERR(new_table); |
2112 | goto out_free; | 2112 | goto out_free; |
2113 | } | 2113 | } |
2114 | return new_table; | 2114 | return new_table; |
2115 | 2115 | ||
2116 | out_free: | 2116 | out_free: |
2117 | xt_free_table_info(newinfo); | 2117 | xt_free_table_info(newinfo); |
2118 | out: | 2118 | out: |
2119 | return ERR_PTR(ret); | 2119 | return ERR_PTR(ret); |
2120 | } | 2120 | } |
2121 | 2121 | ||
2122 | void ip6t_unregister_table(struct net *net, struct xt_table *table) | 2122 | void ip6t_unregister_table(struct net *net, struct xt_table *table) |
2123 | { | 2123 | { |
2124 | struct xt_table_info *private; | 2124 | struct xt_table_info *private; |
2125 | void *loc_cpu_entry; | 2125 | void *loc_cpu_entry; |
2126 | struct module *table_owner = table->me; | 2126 | struct module *table_owner = table->me; |
2127 | struct ip6t_entry *iter; | 2127 | struct ip6t_entry *iter; |
2128 | 2128 | ||
2129 | private = xt_unregister_table(table); | 2129 | private = xt_unregister_table(table); |
2130 | 2130 | ||
2131 | /* Decrease module usage counts and free resources */ | 2131 | /* Decrease module usage counts and free resources */ |
2132 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 2132 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
2133 | xt_entry_foreach(iter, loc_cpu_entry, private->size) | 2133 | xt_entry_foreach(iter, loc_cpu_entry, private->size) |
2134 | cleanup_entry(iter, net); | 2134 | cleanup_entry(iter, net); |
2135 | if (private->number > private->initial_entries) | 2135 | if (private->number > private->initial_entries) |
2136 | module_put(table_owner); | 2136 | module_put(table_owner); |
2137 | xt_free_table_info(private); | 2137 | xt_free_table_info(private); |
2138 | } | 2138 | } |
2139 | 2139 | ||
2140 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ | 2140 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ |
2141 | static inline bool | 2141 | static inline bool |
2142 | icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, | 2142 | icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, |
2143 | u_int8_t type, u_int8_t code, | 2143 | u_int8_t type, u_int8_t code, |
2144 | bool invert) | 2144 | bool invert) |
2145 | { | 2145 | { |
2146 | return (type == test_type && code >= min_code && code <= max_code) | 2146 | return (type == test_type && code >= min_code && code <= max_code) |
2147 | ^ invert; | 2147 | ^ invert; |
2148 | } | 2148 | } |
2149 | 2149 | ||
2150 | static bool | 2150 | static bool |
2151 | icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) | 2151 | icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) |
2152 | { | 2152 | { |
2153 | const struct icmp6hdr *ic; | 2153 | const struct icmp6hdr *ic; |
2154 | struct icmp6hdr _icmph; | 2154 | struct icmp6hdr _icmph; |
2155 | const struct ip6t_icmp *icmpinfo = par->matchinfo; | 2155 | const struct ip6t_icmp *icmpinfo = par->matchinfo; |
2156 | 2156 | ||
2157 | /* Must not be a fragment. */ | 2157 | /* Must not be a fragment. */ |
2158 | if (par->fragoff != 0) | 2158 | if (par->fragoff != 0) |
2159 | return false; | 2159 | return false; |
2160 | 2160 | ||
2161 | ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); | 2161 | ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); |
2162 | if (ic == NULL) { | 2162 | if (ic == NULL) { |
2163 | /* We've been asked to examine this packet, and we | 2163 | /* We've been asked to examine this packet, and we |
2164 | * can't. Hence, no choice but to drop. | 2164 | * can't. Hence, no choice but to drop. |
2165 | */ | 2165 | */ |
2166 | duprintf("Dropping evil ICMP tinygram.\n"); | 2166 | duprintf("Dropping evil ICMP tinygram.\n"); |
2167 | par->hotdrop = true; | 2167 | par->hotdrop = true; |
2168 | return false; | 2168 | return false; |
2169 | } | 2169 | } |
2170 | 2170 | ||
2171 | return icmp6_type_code_match(icmpinfo->type, | 2171 | return icmp6_type_code_match(icmpinfo->type, |
2172 | icmpinfo->code[0], | 2172 | icmpinfo->code[0], |
2173 | icmpinfo->code[1], | 2173 | icmpinfo->code[1], |
2174 | ic->icmp6_type, ic->icmp6_code, | 2174 | ic->icmp6_type, ic->icmp6_code, |
2175 | !!(icmpinfo->invflags&IP6T_ICMP_INV)); | 2175 | !!(icmpinfo->invflags&IP6T_ICMP_INV)); |
2176 | } | 2176 | } |
2177 | 2177 | ||
2178 | /* Called when user tries to insert an entry of this type. */ | 2178 | /* Called when user tries to insert an entry of this type. */ |
2179 | static int icmp6_checkentry(const struct xt_mtchk_param *par) | 2179 | static int icmp6_checkentry(const struct xt_mtchk_param *par) |
2180 | { | 2180 | { |
2181 | const struct ip6t_icmp *icmpinfo = par->matchinfo; | 2181 | const struct ip6t_icmp *icmpinfo = par->matchinfo; |
2182 | 2182 | ||
2183 | /* Must specify no unknown invflags */ | 2183 | /* Must specify no unknown invflags */ |
2184 | return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; | 2184 | return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; |
2185 | } | 2185 | } |
2186 | 2186 | ||
2187 | /* The built-in targets: standard (NULL) and error. */ | 2187 | /* The built-in targets: standard (NULL) and error. */ |
2188 | static struct xt_target ip6t_builtin_tg[] __read_mostly = { | 2188 | static struct xt_target ip6t_builtin_tg[] __read_mostly = { |
2189 | { | 2189 | { |
2190 | .name = IP6T_STANDARD_TARGET, | 2190 | .name = IP6T_STANDARD_TARGET, |
2191 | .targetsize = sizeof(int), | 2191 | .targetsize = sizeof(int), |
2192 | .family = NFPROTO_IPV6, | 2192 | .family = NFPROTO_IPV6, |
2193 | #ifdef CONFIG_COMPAT | 2193 | #ifdef CONFIG_COMPAT |
2194 | .compatsize = sizeof(compat_int_t), | 2194 | .compatsize = sizeof(compat_int_t), |
2195 | .compat_from_user = compat_standard_from_user, | 2195 | .compat_from_user = compat_standard_from_user, |
2196 | .compat_to_user = compat_standard_to_user, | 2196 | .compat_to_user = compat_standard_to_user, |
2197 | #endif | 2197 | #endif |
2198 | }, | 2198 | }, |
2199 | { | 2199 | { |
2200 | .name = IP6T_ERROR_TARGET, | 2200 | .name = IP6T_ERROR_TARGET, |
2201 | .target = ip6t_error, | 2201 | .target = ip6t_error, |
2202 | .targetsize = IP6T_FUNCTION_MAXNAMELEN, | 2202 | .targetsize = IP6T_FUNCTION_MAXNAMELEN, |
2203 | .family = NFPROTO_IPV6, | 2203 | .family = NFPROTO_IPV6, |
2204 | }, | 2204 | }, |
2205 | }; | 2205 | }; |
2206 | 2206 | ||
2207 | static struct nf_sockopt_ops ip6t_sockopts = { | 2207 | static struct nf_sockopt_ops ip6t_sockopts = { |
2208 | .pf = PF_INET6, | 2208 | .pf = PF_INET6, |
2209 | .set_optmin = IP6T_BASE_CTL, | 2209 | .set_optmin = IP6T_BASE_CTL, |
2210 | .set_optmax = IP6T_SO_SET_MAX+1, | 2210 | .set_optmax = IP6T_SO_SET_MAX+1, |
2211 | .set = do_ip6t_set_ctl, | 2211 | .set = do_ip6t_set_ctl, |
2212 | #ifdef CONFIG_COMPAT | 2212 | #ifdef CONFIG_COMPAT |
2213 | .compat_set = compat_do_ip6t_set_ctl, | 2213 | .compat_set = compat_do_ip6t_set_ctl, |
2214 | #endif | 2214 | #endif |
2215 | .get_optmin = IP6T_BASE_CTL, | 2215 | .get_optmin = IP6T_BASE_CTL, |
2216 | .get_optmax = IP6T_SO_GET_MAX+1, | 2216 | .get_optmax = IP6T_SO_GET_MAX+1, |
2217 | .get = do_ip6t_get_ctl, | 2217 | .get = do_ip6t_get_ctl, |
2218 | #ifdef CONFIG_COMPAT | 2218 | #ifdef CONFIG_COMPAT |
2219 | .compat_get = compat_do_ip6t_get_ctl, | 2219 | .compat_get = compat_do_ip6t_get_ctl, |
2220 | #endif | 2220 | #endif |
2221 | .owner = THIS_MODULE, | 2221 | .owner = THIS_MODULE, |
2222 | }; | 2222 | }; |
2223 | 2223 | ||
2224 | static struct xt_match ip6t_builtin_mt[] __read_mostly = { | 2224 | static struct xt_match ip6t_builtin_mt[] __read_mostly = { |
2225 | { | 2225 | { |
2226 | .name = "icmp6", | 2226 | .name = "icmp6", |
2227 | .match = icmp6_match, | 2227 | .match = icmp6_match, |
2228 | .matchsize = sizeof(struct ip6t_icmp), | 2228 | .matchsize = sizeof(struct ip6t_icmp), |
2229 | .checkentry = icmp6_checkentry, | 2229 | .checkentry = icmp6_checkentry, |
2230 | .proto = IPPROTO_ICMPV6, | 2230 | .proto = IPPROTO_ICMPV6, |
2231 | .family = NFPROTO_IPV6, | 2231 | .family = NFPROTO_IPV6, |
2232 | }, | 2232 | }, |
2233 | }; | 2233 | }; |
2234 | 2234 | ||
2235 | static int __net_init ip6_tables_net_init(struct net *net) | 2235 | static int __net_init ip6_tables_net_init(struct net *net) |
2236 | { | 2236 | { |
2237 | return xt_proto_init(net, NFPROTO_IPV6); | 2237 | return xt_proto_init(net, NFPROTO_IPV6); |
2238 | } | 2238 | } |
2239 | 2239 | ||
2240 | static void __net_exit ip6_tables_net_exit(struct net *net) | 2240 | static void __net_exit ip6_tables_net_exit(struct net *net) |
2241 | { | 2241 | { |
2242 | xt_proto_fini(net, NFPROTO_IPV6); | 2242 | xt_proto_fini(net, NFPROTO_IPV6); |
2243 | } | 2243 | } |
2244 | 2244 | ||
2245 | static struct pernet_operations ip6_tables_net_ops = { | 2245 | static struct pernet_operations ip6_tables_net_ops = { |
2246 | .init = ip6_tables_net_init, | 2246 | .init = ip6_tables_net_init, |
2247 | .exit = ip6_tables_net_exit, | 2247 | .exit = ip6_tables_net_exit, |
2248 | }; | 2248 | }; |
2249 | 2249 | ||
2250 | static int __init ip6_tables_init(void) | 2250 | static int __init ip6_tables_init(void) |
2251 | { | 2251 | { |
2252 | int ret; | 2252 | int ret; |
2253 | 2253 | ||
2254 | ret = register_pernet_subsys(&ip6_tables_net_ops); | 2254 | ret = register_pernet_subsys(&ip6_tables_net_ops); |
2255 | if (ret < 0) | 2255 | if (ret < 0) |
2256 | goto err1; | 2256 | goto err1; |
2257 | 2257 | ||
2258 | /* Noone else will be downing sem now, so we won't sleep */ | 2258 | /* Noone else will be downing sem now, so we won't sleep */ |
2259 | ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); | 2259 | ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2260 | if (ret < 0) | 2260 | if (ret < 0) |
2261 | goto err2; | 2261 | goto err2; |
2262 | ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); | 2262 | ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2263 | if (ret < 0) | 2263 | if (ret < 0) |
2264 | goto err4; | 2264 | goto err4; |
2265 | 2265 | ||
2266 | /* Register setsockopt */ | 2266 | /* Register setsockopt */ |
2267 | ret = nf_register_sockopt(&ip6t_sockopts); | 2267 | ret = nf_register_sockopt(&ip6t_sockopts); |
2268 | if (ret < 0) | 2268 | if (ret < 0) |
2269 | goto err5; | 2269 | goto err5; |
2270 | 2270 | ||
2271 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); | 2271 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); |
2272 | return 0; | 2272 | return 0; |
2273 | 2273 | ||
2274 | err5: | 2274 | err5: |
2275 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); | 2275 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2276 | err4: | 2276 | err4: |
2277 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); | 2277 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2278 | err2: | 2278 | err2: |
2279 | unregister_pernet_subsys(&ip6_tables_net_ops); | 2279 | unregister_pernet_subsys(&ip6_tables_net_ops); |
2280 | err1: | 2280 | err1: |
2281 | return ret; | 2281 | return ret; |
2282 | } | 2282 | } |
2283 | 2283 | ||
2284 | static void __exit ip6_tables_fini(void) | 2284 | static void __exit ip6_tables_fini(void) |
2285 | { | 2285 | { |
2286 | nf_unregister_sockopt(&ip6t_sockopts); | 2286 | nf_unregister_sockopt(&ip6t_sockopts); |
2287 | 2287 | ||
2288 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); | 2288 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2289 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); | 2289 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2290 | unregister_pernet_subsys(&ip6_tables_net_ops); | 2290 | unregister_pernet_subsys(&ip6_tables_net_ops); |
2291 | } | 2291 | } |
2292 | 2292 | ||
2293 | /* | 2293 | /* |
2294 | * find the offset to specified header or the protocol number of last header | 2294 | * find the offset to specified header or the protocol number of last header |
2295 | * if target < 0. "last header" is transport protocol header, ESP, or | 2295 | * if target < 0. "last header" is transport protocol header, ESP, or |
2296 | * "No next header". | 2296 | * "No next header". |
2297 | * | 2297 | * |
2298 | * If target header is found, its offset is set in *offset and return protocol | 2298 | * If target header is found, its offset is set in *offset and return protocol |
2299 | * number. Otherwise, return -1. | 2299 | * number. Otherwise, return -1. |
2300 | * | 2300 | * |
2301 | * If the first fragment doesn't contain the final protocol header or | 2301 | * If the first fragment doesn't contain the final protocol header or |
2302 | * NEXTHDR_NONE it is considered invalid. | 2302 | * NEXTHDR_NONE it is considered invalid. |
2303 | * | 2303 | * |
2304 | * Note that non-1st fragment is special case that "the protocol number | 2304 | * Note that non-1st fragment is special case that "the protocol number |
2305 | * of last header" is "next header" field in Fragment header. In this case, | 2305 | * of last header" is "next header" field in Fragment header. In this case, |
2306 | * *offset is meaningless and fragment offset is stored in *fragoff if fragoff | 2306 | * *offset is meaningless and fragment offset is stored in *fragoff if fragoff |
2307 | * isn't NULL. | 2307 | * isn't NULL. |
2308 | * | 2308 | * |
2309 | */ | 2309 | */ |
2310 | int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, | 2310 | int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, |
2311 | int target, unsigned short *fragoff) | 2311 | int target, unsigned short *fragoff) |
2312 | { | 2312 | { |
2313 | unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); | 2313 | unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); |
2314 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; | 2314 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; |
2315 | unsigned int len = skb->len - start; | 2315 | unsigned int len = skb->len - start; |
2316 | 2316 | ||
2317 | if (fragoff) | 2317 | if (fragoff) |
2318 | *fragoff = 0; | 2318 | *fragoff = 0; |
2319 | 2319 | ||
2320 | while (nexthdr != target) { | 2320 | while (nexthdr != target) { |
2321 | struct ipv6_opt_hdr _hdr, *hp; | 2321 | struct ipv6_opt_hdr _hdr, *hp; |
2322 | unsigned int hdrlen; | 2322 | unsigned int hdrlen; |
2323 | 2323 | ||
2324 | if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { | 2324 | if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { |
2325 | if (target < 0) | 2325 | if (target < 0) |
2326 | break; | 2326 | break; |
2327 | return -ENOENT; | 2327 | return -ENOENT; |
2328 | } | 2328 | } |
2329 | 2329 | ||
2330 | hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); | 2330 | hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); |
2331 | if (hp == NULL) | 2331 | if (hp == NULL) |
2332 | return -EBADMSG; | 2332 | return -EBADMSG; |
2333 | if (nexthdr == NEXTHDR_FRAGMENT) { | 2333 | if (nexthdr == NEXTHDR_FRAGMENT) { |
2334 | unsigned short _frag_off; | 2334 | unsigned short _frag_off; |
2335 | __be16 *fp; | 2335 | __be16 *fp; |
2336 | fp = skb_header_pointer(skb, | 2336 | fp = skb_header_pointer(skb, |
2337 | start+offsetof(struct frag_hdr, | 2337 | start+offsetof(struct frag_hdr, |
2338 | frag_off), | 2338 | frag_off), |
2339 | sizeof(_frag_off), | 2339 | sizeof(_frag_off), |
2340 | &_frag_off); | 2340 | &_frag_off); |
2341 | if (fp == NULL) | 2341 | if (fp == NULL) |
2342 | return -EBADMSG; | 2342 | return -EBADMSG; |
2343 | 2343 | ||
2344 | _frag_off = ntohs(*fp) & ~0x7; | 2344 | _frag_off = ntohs(*fp) & ~0x7; |
2345 | if (_frag_off) { | 2345 | if (_frag_off) { |
2346 | if (target < 0 && | 2346 | if (target < 0 && |
2347 | ((!ipv6_ext_hdr(hp->nexthdr)) || | 2347 | ((!ipv6_ext_hdr(hp->nexthdr)) || |
2348 | hp->nexthdr == NEXTHDR_NONE)) { | 2348 | hp->nexthdr == NEXTHDR_NONE)) { |
2349 | if (fragoff) | 2349 | if (fragoff) |
2350 | *fragoff = _frag_off; | 2350 | *fragoff = _frag_off; |
2351 | return hp->nexthdr; | 2351 | return hp->nexthdr; |
2352 | } | 2352 | } |
2353 | return -ENOENT; | 2353 | return -ENOENT; |
2354 | } | 2354 | } |
2355 | hdrlen = 8; | 2355 | hdrlen = 8; |
2356 | } else if (nexthdr == NEXTHDR_AUTH) | 2356 | } else if (nexthdr == NEXTHDR_AUTH) |
2357 | hdrlen = (hp->hdrlen + 2) << 2; | 2357 | hdrlen = (hp->hdrlen + 2) << 2; |
2358 | else | 2358 | else |
2359 | hdrlen = ipv6_optlen(hp); | 2359 | hdrlen = ipv6_optlen(hp); |
2360 | 2360 | ||
2361 | nexthdr = hp->nexthdr; | 2361 | nexthdr = hp->nexthdr; |
2362 | len -= hdrlen; | 2362 | len -= hdrlen; |
2363 | start += hdrlen; | 2363 | start += hdrlen; |
2364 | } | 2364 | } |
2365 | 2365 | ||
2366 | *offset = start; | 2366 | *offset = start; |
2367 | return nexthdr; | 2367 | return nexthdr; |
2368 | } | 2368 | } |
2369 | 2369 | ||
2370 | EXPORT_SYMBOL(ip6t_register_table); | 2370 | EXPORT_SYMBOL(ip6t_register_table); |
2371 | EXPORT_SYMBOL(ip6t_unregister_table); | 2371 | EXPORT_SYMBOL(ip6t_unregister_table); |
2372 | EXPORT_SYMBOL(ip6t_do_table); | 2372 | EXPORT_SYMBOL(ip6t_do_table); |
2373 | EXPORT_SYMBOL(ip6t_ext_hdr); | 2373 | EXPORT_SYMBOL(ip6t_ext_hdr); |
2374 | EXPORT_SYMBOL(ipv6_find_hdr); | 2374 | EXPORT_SYMBOL(ipv6_find_hdr); |
2375 | 2375 | ||
2376 | module_init(ip6_tables_init); | 2376 | module_init(ip6_tables_init); |
2377 | module_exit(ip6_tables_fini); | 2377 | module_exit(ip6_tables_fini); |
2378 | 2378 |
net/ipv6/netfilter/ip6table_filter.c
1 | /* | 1 | /* |
2 | * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. | 2 | * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | 4 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
5 | * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> | 5 | * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/netfilter_ipv6/ip6_tables.h> | 14 | #include <linux/netfilter_ipv6/ip6_tables.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | 16 | ||
17 | MODULE_LICENSE("GPL"); | 17 | MODULE_LICENSE("GPL"); |
18 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 18 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
19 | MODULE_DESCRIPTION("ip6tables filter table"); | 19 | MODULE_DESCRIPTION("ip6tables filter table"); |
20 | 20 | ||
21 | #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ | 21 | #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ |
22 | (1 << NF_INET_FORWARD) | \ | 22 | (1 << NF_INET_FORWARD) | \ |
23 | (1 << NF_INET_LOCAL_OUT)) | 23 | (1 << NF_INET_LOCAL_OUT)) |
24 | 24 | ||
25 | static const struct xt_table packet_filter = { | 25 | static const struct xt_table packet_filter = { |
26 | .name = "filter", | 26 | .name = "filter", |
27 | .valid_hooks = FILTER_VALID_HOOKS, | 27 | .valid_hooks = FILTER_VALID_HOOKS, |
28 | .me = THIS_MODULE, | 28 | .me = THIS_MODULE, |
29 | .af = NFPROTO_IPV6, | 29 | .af = NFPROTO_IPV6, |
30 | .priority = NF_IP6_PRI_FILTER, | 30 | .priority = NF_IP6_PRI_FILTER, |
31 | }; | 31 | }; |
32 | 32 | ||
33 | /* The work comes in here from netfilter.c. */ | 33 | /* The work comes in here from netfilter.c. */ |
34 | static unsigned int | 34 | static unsigned int |
35 | ip6table_filter_hook(unsigned int hook, struct sk_buff *skb, | 35 | ip6table_filter_hook(unsigned int hook, struct sk_buff *skb, |
36 | const struct net_device *in, const struct net_device *out, | 36 | const struct net_device *in, const struct net_device *out, |
37 | int (*okfn)(struct sk_buff *)) | 37 | int (*okfn)(struct sk_buff *)) |
38 | { | 38 | { |
39 | const struct net *net = dev_net((in != NULL) ? in : out); | 39 | const struct net *net = dev_net((in != NULL) ? in : out); |
40 | 40 | ||
41 | return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter); | 41 | return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter); |
42 | } | 42 | } |
43 | 43 | ||
44 | static struct nf_hook_ops *filter_ops __read_mostly; | 44 | static struct nf_hook_ops *filter_ops __read_mostly; |
45 | 45 | ||
46 | /* Default to forward because I got too much mail already. */ | 46 | /* Default to forward because I got too much mail already. */ |
47 | static int forward = NF_ACCEPT; | 47 | static int forward = NF_ACCEPT; |
48 | module_param(forward, bool, 0000); | 48 | module_param(forward, bool, 0000); |
49 | 49 | ||
50 | static int __net_init ip6table_filter_net_init(struct net *net) | 50 | static int __net_init ip6table_filter_net_init(struct net *net) |
51 | { | 51 | { |
52 | struct ip6t_replace *repl; | 52 | struct ip6t_replace *repl; |
53 | 53 | ||
54 | repl = ip6t_alloc_initial_table(&packet_filter); | 54 | repl = ip6t_alloc_initial_table(&packet_filter); |
55 | if (repl == NULL) | 55 | if (repl == NULL) |
56 | return -ENOMEM; | 56 | return -ENOMEM; |
57 | /* Entry 1 is the FORWARD hook */ | 57 | /* Entry 1 is the FORWARD hook */ |
58 | ((struct ip6t_standard *)repl->entries)[1].target.verdict = | 58 | ((struct ip6t_standard *)repl->entries)[1].target.verdict = |
59 | -forward - 1; | 59 | -forward - 1; |
60 | 60 | ||
61 | net->ipv6.ip6table_filter = | 61 | net->ipv6.ip6table_filter = |
62 | ip6t_register_table(net, &packet_filter, repl); | 62 | ip6t_register_table(net, &packet_filter, repl); |
63 | kfree(repl); | 63 | kfree(repl); |
64 | if (IS_ERR(net->ipv6.ip6table_filter)) | 64 | if (IS_ERR(net->ipv6.ip6table_filter)) |
65 | return PTR_ERR(net->ipv6.ip6table_filter); | 65 | return PTR_ERR(net->ipv6.ip6table_filter); |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | 68 | ||
69 | static void __net_exit ip6table_filter_net_exit(struct net *net) | 69 | static void __net_exit ip6table_filter_net_exit(struct net *net) |
70 | { | 70 | { |
71 | ip6t_unregister_table(net, net->ipv6.ip6table_filter); | 71 | ip6t_unregister_table(net, net->ipv6.ip6table_filter); |
72 | } | 72 | } |
73 | 73 | ||
74 | static struct pernet_operations ip6table_filter_net_ops = { | 74 | static struct pernet_operations ip6table_filter_net_ops = { |
75 | .init = ip6table_filter_net_init, | 75 | .init = ip6table_filter_net_init, |
76 | .exit = ip6table_filter_net_exit, | 76 | .exit = ip6table_filter_net_exit, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static int __init ip6table_filter_init(void) | 79 | static int __init ip6table_filter_init(void) |
80 | { | 80 | { |
81 | int ret; | 81 | int ret; |
82 | 82 | ||
83 | if (forward < 0 || forward > NF_MAX_VERDICT) { | 83 | if (forward < 0 || forward > NF_MAX_VERDICT) { |
84 | printk("iptables forward must be 0 or 1\n"); | 84 | pr_err("iptables forward must be 0 or 1\n"); |
85 | return -EINVAL; | 85 | return -EINVAL; |
86 | } | 86 | } |
87 | 87 | ||
88 | ret = register_pernet_subsys(&ip6table_filter_net_ops); | 88 | ret = register_pernet_subsys(&ip6table_filter_net_ops); |
89 | if (ret < 0) | 89 | if (ret < 0) |
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | /* Register hooks */ | 92 | /* Register hooks */ |
93 | filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook); | 93 | filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook); |
94 | if (IS_ERR(filter_ops)) { | 94 | if (IS_ERR(filter_ops)) { |
95 | ret = PTR_ERR(filter_ops); | 95 | ret = PTR_ERR(filter_ops); |
96 | goto cleanup_table; | 96 | goto cleanup_table; |
97 | } | 97 | } |
98 | 98 | ||
99 | return ret; | 99 | return ret; |
100 | 100 | ||
101 | cleanup_table: | 101 | cleanup_table: |
102 | unregister_pernet_subsys(&ip6table_filter_net_ops); | 102 | unregister_pernet_subsys(&ip6table_filter_net_ops); |
103 | return ret; | 103 | return ret; |
104 | } | 104 | } |
105 | 105 | ||
106 | static void __exit ip6table_filter_fini(void) | 106 | static void __exit ip6table_filter_fini(void) |
107 | { | 107 | { |
108 | xt_hook_unlink(&packet_filter, filter_ops); | 108 | xt_hook_unlink(&packet_filter, filter_ops); |
109 | unregister_pernet_subsys(&ip6table_filter_net_ops); | 109 | unregister_pernet_subsys(&ip6table_filter_net_ops); |
110 | } | 110 | } |
111 | 111 | ||
112 | module_init(ip6table_filter_init); | 112 | module_init(ip6table_filter_init); |
113 | module_exit(ip6table_filter_fini); | 113 | module_exit(ip6table_filter_fini); |
114 | 114 |
net/ipv6/netfilter/ip6table_mangle.c
1 | /* | 1 | /* |
2 | * IPv6 packet mangling table, a port of the IPv4 mangle table to IPv6 | 2 | * IPv6 packet mangling table, a port of the IPv4 mangle table to IPv6 |
3 | * | 3 | * |
4 | * Copyright (C) 2000-2001 by Harald Welte <laforge@gnumonks.org> | 4 | * Copyright (C) 2000-2001 by Harald Welte <laforge@gnumonks.org> |
5 | * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> | 5 | * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/netfilter_ipv6/ip6_tables.h> | 12 | #include <linux/netfilter_ipv6/ip6_tables.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | 14 | ||
15 | MODULE_LICENSE("GPL"); | 15 | MODULE_LICENSE("GPL"); |
16 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 16 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
17 | MODULE_DESCRIPTION("ip6tables mangle table"); | 17 | MODULE_DESCRIPTION("ip6tables mangle table"); |
18 | 18 | ||
19 | #define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ | 19 | #define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ |
20 | (1 << NF_INET_LOCAL_IN) | \ | 20 | (1 << NF_INET_LOCAL_IN) | \ |
21 | (1 << NF_INET_FORWARD) | \ | 21 | (1 << NF_INET_FORWARD) | \ |
22 | (1 << NF_INET_LOCAL_OUT) | \ | 22 | (1 << NF_INET_LOCAL_OUT) | \ |
23 | (1 << NF_INET_POST_ROUTING)) | 23 | (1 << NF_INET_POST_ROUTING)) |
24 | 24 | ||
25 | static const struct xt_table packet_mangler = { | 25 | static const struct xt_table packet_mangler = { |
26 | .name = "mangle", | 26 | .name = "mangle", |
27 | .valid_hooks = MANGLE_VALID_HOOKS, | 27 | .valid_hooks = MANGLE_VALID_HOOKS, |
28 | .me = THIS_MODULE, | 28 | .me = THIS_MODULE, |
29 | .af = NFPROTO_IPV6, | 29 | .af = NFPROTO_IPV6, |
30 | .priority = NF_IP6_PRI_MANGLE, | 30 | .priority = NF_IP6_PRI_MANGLE, |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static unsigned int | 33 | static unsigned int |
34 | ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) | 34 | ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) |
35 | { | 35 | { |
36 | unsigned int ret; | 36 | unsigned int ret; |
37 | struct in6_addr saddr, daddr; | 37 | struct in6_addr saddr, daddr; |
38 | u_int8_t hop_limit; | 38 | u_int8_t hop_limit; |
39 | u_int32_t flowlabel, mark; | 39 | u_int32_t flowlabel, mark; |
40 | 40 | ||
41 | #if 0 | 41 | #if 0 |
42 | /* root is playing with raw sockets. */ | 42 | /* root is playing with raw sockets. */ |
43 | if (skb->len < sizeof(struct iphdr) || | 43 | if (skb->len < sizeof(struct iphdr) || |
44 | ip_hdrlen(skb) < sizeof(struct iphdr)) { | 44 | ip_hdrlen(skb) < sizeof(struct iphdr)) { |
45 | if (net_ratelimit()) | 45 | if (net_ratelimit()) |
46 | printk("ip6t_hook: happy cracking.\n"); | 46 | pr_warning("ip6t_hook: happy cracking.\n"); |
47 | return NF_ACCEPT; | 47 | return NF_ACCEPT; |
48 | } | 48 | } |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | /* save source/dest address, mark, hoplimit, flowlabel, priority, */ | 51 | /* save source/dest address, mark, hoplimit, flowlabel, priority, */ |
52 | memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); | 52 | memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); |
53 | memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); | 53 | memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); |
54 | mark = skb->mark; | 54 | mark = skb->mark; |
55 | hop_limit = ipv6_hdr(skb)->hop_limit; | 55 | hop_limit = ipv6_hdr(skb)->hop_limit; |
56 | 56 | ||
57 | /* flowlabel and prio (includes version, which shouldn't change either */ | 57 | /* flowlabel and prio (includes version, which shouldn't change either */ |
58 | flowlabel = *((u_int32_t *)ipv6_hdr(skb)); | 58 | flowlabel = *((u_int32_t *)ipv6_hdr(skb)); |
59 | 59 | ||
60 | ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, | 60 | ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, |
61 | dev_net(out)->ipv6.ip6table_mangle); | 61 | dev_net(out)->ipv6.ip6table_mangle); |
62 | 62 | ||
63 | if (ret != NF_DROP && ret != NF_STOLEN && | 63 | if (ret != NF_DROP && ret != NF_STOLEN && |
64 | (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || | 64 | (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || |
65 | memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || | 65 | memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || |
66 | skb->mark != mark || | 66 | skb->mark != mark || |
67 | ipv6_hdr(skb)->hop_limit != hop_limit)) | 67 | ipv6_hdr(skb)->hop_limit != hop_limit)) |
68 | return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; | 68 | return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; |
69 | 69 | ||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | /* The work comes in here from netfilter.c. */ | 73 | /* The work comes in here from netfilter.c. */ |
74 | static unsigned int | 74 | static unsigned int |
75 | ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb, | 75 | ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb, |
76 | const struct net_device *in, const struct net_device *out, | 76 | const struct net_device *in, const struct net_device *out, |
77 | int (*okfn)(struct sk_buff *)) | 77 | int (*okfn)(struct sk_buff *)) |
78 | { | 78 | { |
79 | if (hook == NF_INET_LOCAL_OUT) | 79 | if (hook == NF_INET_LOCAL_OUT) |
80 | return ip6t_mangle_out(skb, out); | 80 | return ip6t_mangle_out(skb, out); |
81 | if (hook == NF_INET_POST_ROUTING) | 81 | if (hook == NF_INET_POST_ROUTING) |
82 | return ip6t_do_table(skb, hook, in, out, | 82 | return ip6t_do_table(skb, hook, in, out, |
83 | dev_net(out)->ipv6.ip6table_mangle); | 83 | dev_net(out)->ipv6.ip6table_mangle); |
84 | /* INPUT/FORWARD */ | 84 | /* INPUT/FORWARD */ |
85 | return ip6t_do_table(skb, hook, in, out, | 85 | return ip6t_do_table(skb, hook, in, out, |
86 | dev_net(in)->ipv6.ip6table_mangle); | 86 | dev_net(in)->ipv6.ip6table_mangle); |
87 | } | 87 | } |
88 | 88 | ||
89 | static struct nf_hook_ops *mangle_ops __read_mostly; | 89 | static struct nf_hook_ops *mangle_ops __read_mostly; |
90 | static int __net_init ip6table_mangle_net_init(struct net *net) | 90 | static int __net_init ip6table_mangle_net_init(struct net *net) |
91 | { | 91 | { |
92 | struct ip6t_replace *repl; | 92 | struct ip6t_replace *repl; |
93 | 93 | ||
94 | repl = ip6t_alloc_initial_table(&packet_mangler); | 94 | repl = ip6t_alloc_initial_table(&packet_mangler); |
95 | if (repl == NULL) | 95 | if (repl == NULL) |
96 | return -ENOMEM; | 96 | return -ENOMEM; |
97 | net->ipv6.ip6table_mangle = | 97 | net->ipv6.ip6table_mangle = |
98 | ip6t_register_table(net, &packet_mangler, repl); | 98 | ip6t_register_table(net, &packet_mangler, repl); |
99 | kfree(repl); | 99 | kfree(repl); |
100 | if (IS_ERR(net->ipv6.ip6table_mangle)) | 100 | if (IS_ERR(net->ipv6.ip6table_mangle)) |
101 | return PTR_ERR(net->ipv6.ip6table_mangle); | 101 | return PTR_ERR(net->ipv6.ip6table_mangle); |
102 | return 0; | 102 | return 0; |
103 | } | 103 | } |
104 | 104 | ||
105 | static void __net_exit ip6table_mangle_net_exit(struct net *net) | 105 | static void __net_exit ip6table_mangle_net_exit(struct net *net) |
106 | { | 106 | { |
107 | ip6t_unregister_table(net, net->ipv6.ip6table_mangle); | 107 | ip6t_unregister_table(net, net->ipv6.ip6table_mangle); |
108 | } | 108 | } |
109 | 109 | ||
110 | static struct pernet_operations ip6table_mangle_net_ops = { | 110 | static struct pernet_operations ip6table_mangle_net_ops = { |
111 | .init = ip6table_mangle_net_init, | 111 | .init = ip6table_mangle_net_init, |
112 | .exit = ip6table_mangle_net_exit, | 112 | .exit = ip6table_mangle_net_exit, |
113 | }; | 113 | }; |
114 | 114 | ||
115 | static int __init ip6table_mangle_init(void) | 115 | static int __init ip6table_mangle_init(void) |
116 | { | 116 | { |
117 | int ret; | 117 | int ret; |
118 | 118 | ||
119 | ret = register_pernet_subsys(&ip6table_mangle_net_ops); | 119 | ret = register_pernet_subsys(&ip6table_mangle_net_ops); |
120 | if (ret < 0) | 120 | if (ret < 0) |
121 | return ret; | 121 | return ret; |
122 | 122 | ||
123 | /* Register hooks */ | 123 | /* Register hooks */ |
124 | mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook); | 124 | mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook); |
125 | if (IS_ERR(mangle_ops)) { | 125 | if (IS_ERR(mangle_ops)) { |
126 | ret = PTR_ERR(mangle_ops); | 126 | ret = PTR_ERR(mangle_ops); |
127 | goto cleanup_table; | 127 | goto cleanup_table; |
128 | } | 128 | } |
129 | 129 | ||
130 | return ret; | 130 | return ret; |
131 | 131 | ||
132 | cleanup_table: | 132 | cleanup_table: |
133 | unregister_pernet_subsys(&ip6table_mangle_net_ops); | 133 | unregister_pernet_subsys(&ip6table_mangle_net_ops); |
134 | return ret; | 134 | return ret; |
135 | } | 135 | } |
136 | 136 | ||
137 | static void __exit ip6table_mangle_fini(void) | 137 | static void __exit ip6table_mangle_fini(void) |
138 | { | 138 | { |
139 | xt_hook_unlink(&packet_mangler, mangle_ops); | 139 | xt_hook_unlink(&packet_mangler, mangle_ops); |
140 | unregister_pernet_subsys(&ip6table_mangle_net_ops); | 140 | unregister_pernet_subsys(&ip6table_mangle_net_ops); |
141 | } | 141 | } |
142 | 142 | ||
143 | module_init(ip6table_mangle_init); | 143 | module_init(ip6table_mangle_init); |
144 | module_exit(ip6table_mangle_fini); | 144 | module_exit(ip6table_mangle_fini); |
145 | 145 |
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
1 | /* | 1 | /* |
2 | * Copyright (C)2004 USAGI/WIDE Project | 2 | * Copyright (C)2004 USAGI/WIDE Project |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * Author: | 8 | * Author: |
9 | * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> | 9 | * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/ipv6.h> | 13 | #include <linux/ipv6.h> |
14 | #include <linux/in6.h> | 14 | #include <linux/in6.h> |
15 | #include <linux/netfilter.h> | 15 | #include <linux/netfilter.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
18 | #include <linux/icmp.h> | 18 | #include <linux/icmp.h> |
19 | #include <linux/sysctl.h> | 19 | #include <linux/sysctl.h> |
20 | #include <net/ipv6.h> | 20 | #include <net/ipv6.h> |
21 | #include <net/inet_frag.h> | 21 | #include <net/inet_frag.h> |
22 | 22 | ||
23 | #include <linux/netfilter_bridge.h> | 23 | #include <linux/netfilter_bridge.h> |
24 | #include <linux/netfilter_ipv6.h> | 24 | #include <linux/netfilter_ipv6.h> |
25 | #include <net/netfilter/nf_conntrack.h> | 25 | #include <net/netfilter/nf_conntrack.h> |
26 | #include <net/netfilter/nf_conntrack_helper.h> | 26 | #include <net/netfilter/nf_conntrack_helper.h> |
27 | #include <net/netfilter/nf_conntrack_l4proto.h> | 27 | #include <net/netfilter/nf_conntrack_l4proto.h> |
28 | #include <net/netfilter/nf_conntrack_l3proto.h> | 28 | #include <net/netfilter/nf_conntrack_l3proto.h> |
29 | #include <net/netfilter/nf_conntrack_core.h> | 29 | #include <net/netfilter/nf_conntrack_core.h> |
30 | #include <net/netfilter/nf_conntrack_zones.h> | 30 | #include <net/netfilter/nf_conntrack_zones.h> |
31 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> | 31 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> |
32 | #include <net/netfilter/nf_log.h> | 32 | #include <net/netfilter/nf_log.h> |
33 | 33 | ||
34 | static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, | 34 | static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, |
35 | struct nf_conntrack_tuple *tuple) | 35 | struct nf_conntrack_tuple *tuple) |
36 | { | 36 | { |
37 | const u_int32_t *ap; | 37 | const u_int32_t *ap; |
38 | u_int32_t _addrs[8]; | 38 | u_int32_t _addrs[8]; |
39 | 39 | ||
40 | ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr), | 40 | ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr), |
41 | sizeof(_addrs), _addrs); | 41 | sizeof(_addrs), _addrs); |
42 | if (ap == NULL) | 42 | if (ap == NULL) |
43 | return false; | 43 | return false; |
44 | 44 | ||
45 | memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6)); | 45 | memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6)); |
46 | memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6)); | 46 | memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6)); |
47 | 47 | ||
48 | return true; | 48 | return true; |
49 | } | 49 | } |
50 | 50 | ||
51 | static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple, | 51 | static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple, |
52 | const struct nf_conntrack_tuple *orig) | 52 | const struct nf_conntrack_tuple *orig) |
53 | { | 53 | { |
54 | memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6)); | 54 | memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6)); |
55 | memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6)); | 55 | memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6)); |
56 | 56 | ||
57 | return true; | 57 | return true; |
58 | } | 58 | } |
59 | 59 | ||
60 | static int ipv6_print_tuple(struct seq_file *s, | 60 | static int ipv6_print_tuple(struct seq_file *s, |
61 | const struct nf_conntrack_tuple *tuple) | 61 | const struct nf_conntrack_tuple *tuple) |
62 | { | 62 | { |
63 | return seq_printf(s, "src=%pI6 dst=%pI6 ", | 63 | return seq_printf(s, "src=%pI6 dst=%pI6 ", |
64 | tuple->src.u3.ip6, tuple->dst.u3.ip6); | 64 | tuple->src.u3.ip6, tuple->dst.u3.ip6); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c | 68 | * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c |
69 | * | 69 | * |
70 | * This function parses (probably truncated) exthdr set "hdr" | 70 | * This function parses (probably truncated) exthdr set "hdr" |
71 | * of length "len". "nexthdrp" initially points to some place, | 71 | * of length "len". "nexthdrp" initially points to some place, |
72 | * where type of the first header can be found. | 72 | * where type of the first header can be found. |
73 | * | 73 | * |
74 | * It skips all well-known exthdrs, and returns pointer to the start | 74 | * It skips all well-known exthdrs, and returns pointer to the start |
75 | * of unparsable area i.e. the first header with unknown type. | 75 | * of unparsable area i.e. the first header with unknown type. |
76 | * if success, *nexthdr is updated by type/protocol of this header. | 76 | * if success, *nexthdr is updated by type/protocol of this header. |
77 | * | 77 | * |
78 | * NOTES: - it may return pointer pointing beyond end of packet, | 78 | * NOTES: - it may return pointer pointing beyond end of packet, |
79 | * if the last recognized header is truncated in the middle. | 79 | * if the last recognized header is truncated in the middle. |
80 | * - if packet is truncated, so that all parsed headers are skipped, | 80 | * - if packet is truncated, so that all parsed headers are skipped, |
81 | * it returns -1. | 81 | * it returns -1. |
82 | * - if packet is fragmented, return pointer of the fragment header. | 82 | * - if packet is fragmented, return pointer of the fragment header. |
83 | * - ESP is unparsable for now and considered like | 83 | * - ESP is unparsable for now and considered like |
84 | * normal payload protocol. | 84 | * normal payload protocol. |
85 | * - Note also special handling of AUTH header. Thanks to IPsec wizards. | 85 | * - Note also special handling of AUTH header. Thanks to IPsec wizards. |
86 | */ | 86 | */ |
87 | 87 | ||
88 | static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start, | 88 | static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start, |
89 | u8 *nexthdrp, int len) | 89 | u8 *nexthdrp, int len) |
90 | { | 90 | { |
91 | u8 nexthdr = *nexthdrp; | 91 | u8 nexthdr = *nexthdrp; |
92 | 92 | ||
93 | while (ipv6_ext_hdr(nexthdr)) { | 93 | while (ipv6_ext_hdr(nexthdr)) { |
94 | struct ipv6_opt_hdr hdr; | 94 | struct ipv6_opt_hdr hdr; |
95 | int hdrlen; | 95 | int hdrlen; |
96 | 96 | ||
97 | if (len < (int)sizeof(struct ipv6_opt_hdr)) | 97 | if (len < (int)sizeof(struct ipv6_opt_hdr)) |
98 | return -1; | 98 | return -1; |
99 | if (nexthdr == NEXTHDR_NONE) | 99 | if (nexthdr == NEXTHDR_NONE) |
100 | break; | 100 | break; |
101 | if (nexthdr == NEXTHDR_FRAGMENT) | 101 | if (nexthdr == NEXTHDR_FRAGMENT) |
102 | break; | 102 | break; |
103 | if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) | 103 | if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) |
104 | BUG(); | 104 | BUG(); |
105 | if (nexthdr == NEXTHDR_AUTH) | 105 | if (nexthdr == NEXTHDR_AUTH) |
106 | hdrlen = (hdr.hdrlen+2)<<2; | 106 | hdrlen = (hdr.hdrlen+2)<<2; |
107 | else | 107 | else |
108 | hdrlen = ipv6_optlen(&hdr); | 108 | hdrlen = ipv6_optlen(&hdr); |
109 | 109 | ||
110 | nexthdr = hdr.nexthdr; | 110 | nexthdr = hdr.nexthdr; |
111 | len -= hdrlen; | 111 | len -= hdrlen; |
112 | start += hdrlen; | 112 | start += hdrlen; |
113 | } | 113 | } |
114 | 114 | ||
115 | *nexthdrp = nexthdr; | 115 | *nexthdrp = nexthdr; |
116 | return start; | 116 | return start; |
117 | } | 117 | } |
118 | 118 | ||
119 | static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | 119 | static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, |
120 | unsigned int *dataoff, u_int8_t *protonum) | 120 | unsigned int *dataoff, u_int8_t *protonum) |
121 | { | 121 | { |
122 | unsigned int extoff = nhoff + sizeof(struct ipv6hdr); | 122 | unsigned int extoff = nhoff + sizeof(struct ipv6hdr); |
123 | unsigned char pnum; | 123 | unsigned char pnum; |
124 | int protoff; | 124 | int protoff; |
125 | 125 | ||
126 | if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr), | 126 | if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr), |
127 | &pnum, sizeof(pnum)) != 0) { | 127 | &pnum, sizeof(pnum)) != 0) { |
128 | pr_debug("ip6_conntrack_core: can't get nexthdr\n"); | 128 | pr_debug("ip6_conntrack_core: can't get nexthdr\n"); |
129 | return -NF_ACCEPT; | 129 | return -NF_ACCEPT; |
130 | } | 130 | } |
131 | protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, skb->len - extoff); | 131 | protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, skb->len - extoff); |
132 | /* | 132 | /* |
133 | * (protoff == skb->len) mean that the packet doesn't have no data | 133 | * (protoff == skb->len) mean that the packet doesn't have no data |
134 | * except of IPv6 & ext headers. but it's tracked anyway. - YK | 134 | * except of IPv6 & ext headers. but it's tracked anyway. - YK |
135 | */ | 135 | */ |
136 | if ((protoff < 0) || (protoff > skb->len)) { | 136 | if ((protoff < 0) || (protoff > skb->len)) { |
137 | pr_debug("ip6_conntrack_core: can't find proto in pkt\n"); | 137 | pr_debug("ip6_conntrack_core: can't find proto in pkt\n"); |
138 | return -NF_ACCEPT; | 138 | return -NF_ACCEPT; |
139 | } | 139 | } |
140 | 140 | ||
141 | *dataoff = protoff; | 141 | *dataoff = protoff; |
142 | *protonum = pnum; | 142 | *protonum = pnum; |
143 | return NF_ACCEPT; | 143 | return NF_ACCEPT; |
144 | } | 144 | } |
145 | 145 | ||
146 | static unsigned int ipv6_confirm(unsigned int hooknum, | 146 | static unsigned int ipv6_confirm(unsigned int hooknum, |
147 | struct sk_buff *skb, | 147 | struct sk_buff *skb, |
148 | const struct net_device *in, | 148 | const struct net_device *in, |
149 | const struct net_device *out, | 149 | const struct net_device *out, |
150 | int (*okfn)(struct sk_buff *)) | 150 | int (*okfn)(struct sk_buff *)) |
151 | { | 151 | { |
152 | struct nf_conn *ct; | 152 | struct nf_conn *ct; |
153 | const struct nf_conn_help *help; | 153 | const struct nf_conn_help *help; |
154 | const struct nf_conntrack_helper *helper; | 154 | const struct nf_conntrack_helper *helper; |
155 | enum ip_conntrack_info ctinfo; | 155 | enum ip_conntrack_info ctinfo; |
156 | unsigned int ret, protoff; | 156 | unsigned int ret, protoff; |
157 | unsigned int extoff = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; | 157 | unsigned int extoff = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; |
158 | unsigned char pnum = ipv6_hdr(skb)->nexthdr; | 158 | unsigned char pnum = ipv6_hdr(skb)->nexthdr; |
159 | 159 | ||
160 | 160 | ||
161 | /* This is where we call the helper: as the packet goes out. */ | 161 | /* This is where we call the helper: as the packet goes out. */ |
162 | ct = nf_ct_get(skb, &ctinfo); | 162 | ct = nf_ct_get(skb, &ctinfo); |
163 | if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) | 163 | if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) |
164 | goto out; | 164 | goto out; |
165 | 165 | ||
166 | help = nfct_help(ct); | 166 | help = nfct_help(ct); |
167 | if (!help) | 167 | if (!help) |
168 | goto out; | 168 | goto out; |
169 | /* rcu_read_lock()ed by nf_hook_slow */ | 169 | /* rcu_read_lock()ed by nf_hook_slow */ |
170 | helper = rcu_dereference(help->helper); | 170 | helper = rcu_dereference(help->helper); |
171 | if (!helper) | 171 | if (!helper) |
172 | goto out; | 172 | goto out; |
173 | 173 | ||
174 | protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, | 174 | protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, |
175 | skb->len - extoff); | 175 | skb->len - extoff); |
176 | if (protoff > skb->len || pnum == NEXTHDR_FRAGMENT) { | 176 | if (protoff > skb->len || pnum == NEXTHDR_FRAGMENT) { |
177 | pr_debug("proto header not found\n"); | 177 | pr_debug("proto header not found\n"); |
178 | return NF_ACCEPT; | 178 | return NF_ACCEPT; |
179 | } | 179 | } |
180 | 180 | ||
181 | ret = helper->help(skb, protoff, ct, ctinfo); | 181 | ret = helper->help(skb, protoff, ct, ctinfo); |
182 | if (ret != NF_ACCEPT) { | 182 | if (ret != NF_ACCEPT) { |
183 | nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL, | 183 | nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL, |
184 | "nf_ct_%s: dropping packet", helper->name); | 184 | "nf_ct_%s: dropping packet", helper->name); |
185 | return ret; | 185 | return ret; |
186 | } | 186 | } |
187 | out: | 187 | out: |
188 | /* We've seen it coming out the other side: confirm it */ | 188 | /* We've seen it coming out the other side: confirm it */ |
189 | return nf_conntrack_confirm(skb); | 189 | return nf_conntrack_confirm(skb); |
190 | } | 190 | } |
191 | 191 | ||
192 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | 192 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, |
193 | struct sk_buff *skb) | 193 | struct sk_buff *skb) |
194 | { | 194 | { |
195 | u16 zone = NF_CT_DEFAULT_ZONE; | 195 | u16 zone = NF_CT_DEFAULT_ZONE; |
196 | 196 | ||
197 | if (skb->nfct) | 197 | if (skb->nfct) |
198 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); | 198 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); |
199 | 199 | ||
200 | #ifdef CONFIG_BRIDGE_NETFILTER | 200 | #ifdef CONFIG_BRIDGE_NETFILTER |
201 | if (skb->nf_bridge && | 201 | if (skb->nf_bridge && |
202 | skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) | 202 | skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) |
203 | return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone; | 203 | return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone; |
204 | #endif | 204 | #endif |
205 | if (hooknum == NF_INET_PRE_ROUTING) | 205 | if (hooknum == NF_INET_PRE_ROUTING) |
206 | return IP6_DEFRAG_CONNTRACK_IN + zone; | 206 | return IP6_DEFRAG_CONNTRACK_IN + zone; |
207 | else | 207 | else |
208 | return IP6_DEFRAG_CONNTRACK_OUT + zone; | 208 | return IP6_DEFRAG_CONNTRACK_OUT + zone; |
209 | 209 | ||
210 | } | 210 | } |
211 | 211 | ||
212 | static unsigned int ipv6_defrag(unsigned int hooknum, | 212 | static unsigned int ipv6_defrag(unsigned int hooknum, |
213 | struct sk_buff *skb, | 213 | struct sk_buff *skb, |
214 | const struct net_device *in, | 214 | const struct net_device *in, |
215 | const struct net_device *out, | 215 | const struct net_device *out, |
216 | int (*okfn)(struct sk_buff *)) | 216 | int (*okfn)(struct sk_buff *)) |
217 | { | 217 | { |
218 | struct sk_buff *reasm; | 218 | struct sk_buff *reasm; |
219 | 219 | ||
220 | /* Previously seen (loopback)? */ | 220 | /* Previously seen (loopback)? */ |
221 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) | 221 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) |
222 | return NF_ACCEPT; | 222 | return NF_ACCEPT; |
223 | 223 | ||
224 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); | 224 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); |
225 | /* queued */ | 225 | /* queued */ |
226 | if (reasm == NULL) | 226 | if (reasm == NULL) |
227 | return NF_STOLEN; | 227 | return NF_STOLEN; |
228 | 228 | ||
229 | /* error occured or not fragmented */ | 229 | /* error occured or not fragmented */ |
230 | if (reasm == skb) | 230 | if (reasm == skb) |
231 | return NF_ACCEPT; | 231 | return NF_ACCEPT; |
232 | 232 | ||
233 | nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in, | 233 | nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in, |
234 | (struct net_device *)out, okfn); | 234 | (struct net_device *)out, okfn); |
235 | 235 | ||
236 | return NF_STOLEN; | 236 | return NF_STOLEN; |
237 | } | 237 | } |
238 | 238 | ||
239 | static unsigned int __ipv6_conntrack_in(struct net *net, | 239 | static unsigned int __ipv6_conntrack_in(struct net *net, |
240 | unsigned int hooknum, | 240 | unsigned int hooknum, |
241 | struct sk_buff *skb, | 241 | struct sk_buff *skb, |
242 | int (*okfn)(struct sk_buff *)) | 242 | int (*okfn)(struct sk_buff *)) |
243 | { | 243 | { |
244 | struct sk_buff *reasm = skb->nfct_reasm; | 244 | struct sk_buff *reasm = skb->nfct_reasm; |
245 | 245 | ||
246 | /* This packet is fragmented and has reassembled packet. */ | 246 | /* This packet is fragmented and has reassembled packet. */ |
247 | if (reasm) { | 247 | if (reasm) { |
248 | /* Reassembled packet isn't parsed yet ? */ | 248 | /* Reassembled packet isn't parsed yet ? */ |
249 | if (!reasm->nfct) { | 249 | if (!reasm->nfct) { |
250 | unsigned int ret; | 250 | unsigned int ret; |
251 | 251 | ||
252 | ret = nf_conntrack_in(net, PF_INET6, hooknum, reasm); | 252 | ret = nf_conntrack_in(net, PF_INET6, hooknum, reasm); |
253 | if (ret != NF_ACCEPT) | 253 | if (ret != NF_ACCEPT) |
254 | return ret; | 254 | return ret; |
255 | } | 255 | } |
256 | nf_conntrack_get(reasm->nfct); | 256 | nf_conntrack_get(reasm->nfct); |
257 | skb->nfct = reasm->nfct; | 257 | skb->nfct = reasm->nfct; |
258 | skb->nfctinfo = reasm->nfctinfo; | 258 | skb->nfctinfo = reasm->nfctinfo; |
259 | return NF_ACCEPT; | 259 | return NF_ACCEPT; |
260 | } | 260 | } |
261 | 261 | ||
262 | return nf_conntrack_in(net, PF_INET6, hooknum, skb); | 262 | return nf_conntrack_in(net, PF_INET6, hooknum, skb); |
263 | } | 263 | } |
264 | 264 | ||
265 | static unsigned int ipv6_conntrack_in(unsigned int hooknum, | 265 | static unsigned int ipv6_conntrack_in(unsigned int hooknum, |
266 | struct sk_buff *skb, | 266 | struct sk_buff *skb, |
267 | const struct net_device *in, | 267 | const struct net_device *in, |
268 | const struct net_device *out, | 268 | const struct net_device *out, |
269 | int (*okfn)(struct sk_buff *)) | 269 | int (*okfn)(struct sk_buff *)) |
270 | { | 270 | { |
271 | return __ipv6_conntrack_in(dev_net(in), hooknum, skb, okfn); | 271 | return __ipv6_conntrack_in(dev_net(in), hooknum, skb, okfn); |
272 | } | 272 | } |
273 | 273 | ||
274 | static unsigned int ipv6_conntrack_local(unsigned int hooknum, | 274 | static unsigned int ipv6_conntrack_local(unsigned int hooknum, |
275 | struct sk_buff *skb, | 275 | struct sk_buff *skb, |
276 | const struct net_device *in, | 276 | const struct net_device *in, |
277 | const struct net_device *out, | 277 | const struct net_device *out, |
278 | int (*okfn)(struct sk_buff *)) | 278 | int (*okfn)(struct sk_buff *)) |
279 | { | 279 | { |
280 | /* root is playing with raw sockets. */ | 280 | /* root is playing with raw sockets. */ |
281 | if (skb->len < sizeof(struct ipv6hdr)) { | 281 | if (skb->len < sizeof(struct ipv6hdr)) { |
282 | if (net_ratelimit()) | 282 | if (net_ratelimit()) |
283 | printk("ipv6_conntrack_local: packet too short\n"); | 283 | pr_notice("ipv6_conntrack_local: packet too short\n"); |
284 | return NF_ACCEPT; | 284 | return NF_ACCEPT; |
285 | } | 285 | } |
286 | return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn); | 286 | return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn); |
287 | } | 287 | } |
288 | 288 | ||
289 | static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { | 289 | static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { |
290 | { | 290 | { |
291 | .hook = ipv6_defrag, | 291 | .hook = ipv6_defrag, |
292 | .owner = THIS_MODULE, | 292 | .owner = THIS_MODULE, |
293 | .pf = NFPROTO_IPV6, | 293 | .pf = NFPROTO_IPV6, |
294 | .hooknum = NF_INET_PRE_ROUTING, | 294 | .hooknum = NF_INET_PRE_ROUTING, |
295 | .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, | 295 | .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, |
296 | }, | 296 | }, |
297 | { | 297 | { |
298 | .hook = ipv6_conntrack_in, | 298 | .hook = ipv6_conntrack_in, |
299 | .owner = THIS_MODULE, | 299 | .owner = THIS_MODULE, |
300 | .pf = NFPROTO_IPV6, | 300 | .pf = NFPROTO_IPV6, |
301 | .hooknum = NF_INET_PRE_ROUTING, | 301 | .hooknum = NF_INET_PRE_ROUTING, |
302 | .priority = NF_IP6_PRI_CONNTRACK, | 302 | .priority = NF_IP6_PRI_CONNTRACK, |
303 | }, | 303 | }, |
304 | { | 304 | { |
305 | .hook = ipv6_conntrack_local, | 305 | .hook = ipv6_conntrack_local, |
306 | .owner = THIS_MODULE, | 306 | .owner = THIS_MODULE, |
307 | .pf = NFPROTO_IPV6, | 307 | .pf = NFPROTO_IPV6, |
308 | .hooknum = NF_INET_LOCAL_OUT, | 308 | .hooknum = NF_INET_LOCAL_OUT, |
309 | .priority = NF_IP6_PRI_CONNTRACK, | 309 | .priority = NF_IP6_PRI_CONNTRACK, |
310 | }, | 310 | }, |
311 | { | 311 | { |
312 | .hook = ipv6_defrag, | 312 | .hook = ipv6_defrag, |
313 | .owner = THIS_MODULE, | 313 | .owner = THIS_MODULE, |
314 | .pf = NFPROTO_IPV6, | 314 | .pf = NFPROTO_IPV6, |
315 | .hooknum = NF_INET_LOCAL_OUT, | 315 | .hooknum = NF_INET_LOCAL_OUT, |
316 | .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, | 316 | .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, |
317 | }, | 317 | }, |
318 | { | 318 | { |
319 | .hook = ipv6_confirm, | 319 | .hook = ipv6_confirm, |
320 | .owner = THIS_MODULE, | 320 | .owner = THIS_MODULE, |
321 | .pf = NFPROTO_IPV6, | 321 | .pf = NFPROTO_IPV6, |
322 | .hooknum = NF_INET_POST_ROUTING, | 322 | .hooknum = NF_INET_POST_ROUTING, |
323 | .priority = NF_IP6_PRI_LAST, | 323 | .priority = NF_IP6_PRI_LAST, |
324 | }, | 324 | }, |
325 | { | 325 | { |
326 | .hook = ipv6_confirm, | 326 | .hook = ipv6_confirm, |
327 | .owner = THIS_MODULE, | 327 | .owner = THIS_MODULE, |
328 | .pf = NFPROTO_IPV6, | 328 | .pf = NFPROTO_IPV6, |
329 | .hooknum = NF_INET_LOCAL_IN, | 329 | .hooknum = NF_INET_LOCAL_IN, |
330 | .priority = NF_IP6_PRI_LAST-1, | 330 | .priority = NF_IP6_PRI_LAST-1, |
331 | }, | 331 | }, |
332 | }; | 332 | }; |
333 | 333 | ||
334 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 334 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
335 | 335 | ||
336 | #include <linux/netfilter/nfnetlink.h> | 336 | #include <linux/netfilter/nfnetlink.h> |
337 | #include <linux/netfilter/nfnetlink_conntrack.h> | 337 | #include <linux/netfilter/nfnetlink_conntrack.h> |
338 | 338 | ||
339 | static int ipv6_tuple_to_nlattr(struct sk_buff *skb, | 339 | static int ipv6_tuple_to_nlattr(struct sk_buff *skb, |
340 | const struct nf_conntrack_tuple *tuple) | 340 | const struct nf_conntrack_tuple *tuple) |
341 | { | 341 | { |
342 | NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, | 342 | NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, |
343 | &tuple->src.u3.ip6); | 343 | &tuple->src.u3.ip6); |
344 | NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, | 344 | NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, |
345 | &tuple->dst.u3.ip6); | 345 | &tuple->dst.u3.ip6); |
346 | return 0; | 346 | return 0; |
347 | 347 | ||
348 | nla_put_failure: | 348 | nla_put_failure: |
349 | return -1; | 349 | return -1; |
350 | } | 350 | } |
351 | 351 | ||
352 | static const struct nla_policy ipv6_nla_policy[CTA_IP_MAX+1] = { | 352 | static const struct nla_policy ipv6_nla_policy[CTA_IP_MAX+1] = { |
353 | [CTA_IP_V6_SRC] = { .len = sizeof(u_int32_t)*4 }, | 353 | [CTA_IP_V6_SRC] = { .len = sizeof(u_int32_t)*4 }, |
354 | [CTA_IP_V6_DST] = { .len = sizeof(u_int32_t)*4 }, | 354 | [CTA_IP_V6_DST] = { .len = sizeof(u_int32_t)*4 }, |
355 | }; | 355 | }; |
356 | 356 | ||
357 | static int ipv6_nlattr_to_tuple(struct nlattr *tb[], | 357 | static int ipv6_nlattr_to_tuple(struct nlattr *tb[], |
358 | struct nf_conntrack_tuple *t) | 358 | struct nf_conntrack_tuple *t) |
359 | { | 359 | { |
360 | if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST]) | 360 | if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST]) |
361 | return -EINVAL; | 361 | return -EINVAL; |
362 | 362 | ||
363 | memcpy(&t->src.u3.ip6, nla_data(tb[CTA_IP_V6_SRC]), | 363 | memcpy(&t->src.u3.ip6, nla_data(tb[CTA_IP_V6_SRC]), |
364 | sizeof(u_int32_t) * 4); | 364 | sizeof(u_int32_t) * 4); |
365 | memcpy(&t->dst.u3.ip6, nla_data(tb[CTA_IP_V6_DST]), | 365 | memcpy(&t->dst.u3.ip6, nla_data(tb[CTA_IP_V6_DST]), |
366 | sizeof(u_int32_t) * 4); | 366 | sizeof(u_int32_t) * 4); |
367 | 367 | ||
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | 370 | ||
371 | static int ipv6_nlattr_tuple_size(void) | 371 | static int ipv6_nlattr_tuple_size(void) |
372 | { | 372 | { |
373 | return nla_policy_len(ipv6_nla_policy, CTA_IP_MAX + 1); | 373 | return nla_policy_len(ipv6_nla_policy, CTA_IP_MAX + 1); |
374 | } | 374 | } |
375 | #endif | 375 | #endif |
376 | 376 | ||
377 | struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = { | 377 | struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = { |
378 | .l3proto = PF_INET6, | 378 | .l3proto = PF_INET6, |
379 | .name = "ipv6", | 379 | .name = "ipv6", |
380 | .pkt_to_tuple = ipv6_pkt_to_tuple, | 380 | .pkt_to_tuple = ipv6_pkt_to_tuple, |
381 | .invert_tuple = ipv6_invert_tuple, | 381 | .invert_tuple = ipv6_invert_tuple, |
382 | .print_tuple = ipv6_print_tuple, | 382 | .print_tuple = ipv6_print_tuple, |
383 | .get_l4proto = ipv6_get_l4proto, | 383 | .get_l4proto = ipv6_get_l4proto, |
384 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 384 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
385 | .tuple_to_nlattr = ipv6_tuple_to_nlattr, | 385 | .tuple_to_nlattr = ipv6_tuple_to_nlattr, |
386 | .nlattr_tuple_size = ipv6_nlattr_tuple_size, | 386 | .nlattr_tuple_size = ipv6_nlattr_tuple_size, |
387 | .nlattr_to_tuple = ipv6_nlattr_to_tuple, | 387 | .nlattr_to_tuple = ipv6_nlattr_to_tuple, |
388 | .nla_policy = ipv6_nla_policy, | 388 | .nla_policy = ipv6_nla_policy, |
389 | #endif | 389 | #endif |
390 | #ifdef CONFIG_SYSCTL | 390 | #ifdef CONFIG_SYSCTL |
391 | .ctl_table_path = nf_net_netfilter_sysctl_path, | 391 | .ctl_table_path = nf_net_netfilter_sysctl_path, |
392 | .ctl_table = nf_ct_ipv6_sysctl_table, | 392 | .ctl_table = nf_ct_ipv6_sysctl_table, |
393 | #endif | 393 | #endif |
394 | .me = THIS_MODULE, | 394 | .me = THIS_MODULE, |
395 | }; | 395 | }; |
396 | 396 | ||
397 | MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); | 397 | MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); |
398 | MODULE_LICENSE("GPL"); | 398 | MODULE_LICENSE("GPL"); |
399 | MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); | 399 | MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); |
400 | 400 | ||
401 | static int __init nf_conntrack_l3proto_ipv6_init(void) | 401 | static int __init nf_conntrack_l3proto_ipv6_init(void) |
402 | { | 402 | { |
403 | int ret = 0; | 403 | int ret = 0; |
404 | 404 | ||
405 | need_conntrack(); | 405 | need_conntrack(); |
406 | 406 | ||
407 | ret = nf_ct_frag6_init(); | 407 | ret = nf_ct_frag6_init(); |
408 | if (ret < 0) { | 408 | if (ret < 0) { |
409 | printk("nf_conntrack_ipv6: can't initialize frag6.\n"); | 409 | pr_err("nf_conntrack_ipv6: can't initialize frag6.\n"); |
410 | return ret; | 410 | return ret; |
411 | } | 411 | } |
412 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6); | 412 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6); |
413 | if (ret < 0) { | 413 | if (ret < 0) { |
414 | printk("nf_conntrack_ipv6: can't register tcp.\n"); | 414 | pr_err("nf_conntrack_ipv6: can't register tcp.\n"); |
415 | goto cleanup_frag6; | 415 | goto cleanup_frag6; |
416 | } | 416 | } |
417 | 417 | ||
418 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6); | 418 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6); |
419 | if (ret < 0) { | 419 | if (ret < 0) { |
420 | printk("nf_conntrack_ipv6: can't register udp.\n"); | 420 | pr_err("nf_conntrack_ipv6: can't register udp.\n"); |
421 | goto cleanup_tcp; | 421 | goto cleanup_tcp; |
422 | } | 422 | } |
423 | 423 | ||
424 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6); | 424 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6); |
425 | if (ret < 0) { | 425 | if (ret < 0) { |
426 | printk("nf_conntrack_ipv6: can't register icmpv6.\n"); | 426 | pr_err("nf_conntrack_ipv6: can't register icmpv6.\n"); |
427 | goto cleanup_udp; | 427 | goto cleanup_udp; |
428 | } | 428 | } |
429 | 429 | ||
430 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6); | 430 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6); |
431 | if (ret < 0) { | 431 | if (ret < 0) { |
432 | printk("nf_conntrack_ipv6: can't register ipv6\n"); | 432 | pr_err("nf_conntrack_ipv6: can't register ipv6\n"); |
433 | goto cleanup_icmpv6; | 433 | goto cleanup_icmpv6; |
434 | } | 434 | } |
435 | 435 | ||
436 | ret = nf_register_hooks(ipv6_conntrack_ops, | 436 | ret = nf_register_hooks(ipv6_conntrack_ops, |
437 | ARRAY_SIZE(ipv6_conntrack_ops)); | 437 | ARRAY_SIZE(ipv6_conntrack_ops)); |
438 | if (ret < 0) { | 438 | if (ret < 0) { |
439 | printk("nf_conntrack_ipv6: can't register pre-routing defrag " | 439 | pr_err("nf_conntrack_ipv6: can't register pre-routing defrag " |
440 | "hook.\n"); | 440 | "hook.\n"); |
441 | goto cleanup_ipv6; | 441 | goto cleanup_ipv6; |
442 | } | 442 | } |
443 | return ret; | 443 | return ret; |
444 | 444 | ||
445 | cleanup_ipv6: | 445 | cleanup_ipv6: |
446 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); | 446 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); |
447 | cleanup_icmpv6: | 447 | cleanup_icmpv6: |
448 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6); | 448 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6); |
449 | cleanup_udp: | 449 | cleanup_udp: |
450 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6); | 450 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6); |
451 | cleanup_tcp: | 451 | cleanup_tcp: |
452 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6); | 452 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6); |
453 | cleanup_frag6: | 453 | cleanup_frag6: |
454 | nf_ct_frag6_cleanup(); | 454 | nf_ct_frag6_cleanup(); |
455 | return ret; | 455 | return ret; |
456 | } | 456 | } |
457 | 457 | ||
458 | static void __exit nf_conntrack_l3proto_ipv6_fini(void) | 458 | static void __exit nf_conntrack_l3proto_ipv6_fini(void) |
459 | { | 459 | { |
460 | synchronize_net(); | 460 | synchronize_net(); |
461 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); | 461 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); |
462 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); | 462 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); |
463 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6); | 463 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6); |
464 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6); | 464 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6); |
465 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6); | 465 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6); |
466 | nf_ct_frag6_cleanup(); | 466 | nf_ct_frag6_cleanup(); |
467 | } | 467 | } |
468 | 468 | ||
469 | module_init(nf_conntrack_l3proto_ipv6_init); | 469 | module_init(nf_conntrack_l3proto_ipv6_init); |
470 | module_exit(nf_conntrack_l3proto_ipv6_fini); | 470 | module_exit(nf_conntrack_l3proto_ipv6_fini); |
471 | 471 |
net/netfilter/nf_conntrack_amanda.c
1 | /* Amanda extension for IP connection tracking | 1 | /* Amanda extension for IP connection tracking |
2 | * | 2 | * |
3 | * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca> | 3 | * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca> |
4 | * based on HW's ip_conntrack_irc.c as well as other modules | 4 | * based on HW's ip_conntrack_irc.c as well as other modules |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/textsearch.h> | 14 | #include <linux/textsearch.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/in.h> | 16 | #include <linux/in.h> |
17 | #include <linux/udp.h> | 17 | #include <linux/udp.h> |
18 | #include <linux/netfilter.h> | 18 | #include <linux/netfilter.h> |
19 | #include <linux/gfp.h> | 19 | #include <linux/gfp.h> |
20 | 20 | ||
21 | #include <net/netfilter/nf_conntrack.h> | 21 | #include <net/netfilter/nf_conntrack.h> |
22 | #include <net/netfilter/nf_conntrack_expect.h> | 22 | #include <net/netfilter/nf_conntrack_expect.h> |
23 | #include <net/netfilter/nf_conntrack_ecache.h> | 23 | #include <net/netfilter/nf_conntrack_ecache.h> |
24 | #include <net/netfilter/nf_conntrack_helper.h> | 24 | #include <net/netfilter/nf_conntrack_helper.h> |
25 | #include <linux/netfilter/nf_conntrack_amanda.h> | 25 | #include <linux/netfilter/nf_conntrack_amanda.h> |
26 | 26 | ||
27 | static unsigned int master_timeout __read_mostly = 300; | 27 | static unsigned int master_timeout __read_mostly = 300; |
28 | static char *ts_algo = "kmp"; | 28 | static char *ts_algo = "kmp"; |
29 | 29 | ||
30 | MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>"); | 30 | MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>"); |
31 | MODULE_DESCRIPTION("Amanda connection tracking module"); | 31 | MODULE_DESCRIPTION("Amanda connection tracking module"); |
32 | MODULE_LICENSE("GPL"); | 32 | MODULE_LICENSE("GPL"); |
33 | MODULE_ALIAS("ip_conntrack_amanda"); | 33 | MODULE_ALIAS("ip_conntrack_amanda"); |
34 | MODULE_ALIAS_NFCT_HELPER("amanda"); | 34 | MODULE_ALIAS_NFCT_HELPER("amanda"); |
35 | 35 | ||
36 | module_param(master_timeout, uint, 0600); | 36 | module_param(master_timeout, uint, 0600); |
37 | MODULE_PARM_DESC(master_timeout, "timeout for the master connection"); | 37 | MODULE_PARM_DESC(master_timeout, "timeout for the master connection"); |
38 | module_param(ts_algo, charp, 0400); | 38 | module_param(ts_algo, charp, 0400); |
39 | MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)"); | 39 | MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)"); |
40 | 40 | ||
41 | unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, | 41 | unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, |
42 | enum ip_conntrack_info ctinfo, | 42 | enum ip_conntrack_info ctinfo, |
43 | unsigned int matchoff, | 43 | unsigned int matchoff, |
44 | unsigned int matchlen, | 44 | unsigned int matchlen, |
45 | struct nf_conntrack_expect *exp) | 45 | struct nf_conntrack_expect *exp) |
46 | __read_mostly; | 46 | __read_mostly; |
47 | EXPORT_SYMBOL_GPL(nf_nat_amanda_hook); | 47 | EXPORT_SYMBOL_GPL(nf_nat_amanda_hook); |
48 | 48 | ||
49 | enum amanda_strings { | 49 | enum amanda_strings { |
50 | SEARCH_CONNECT, | 50 | SEARCH_CONNECT, |
51 | SEARCH_NEWLINE, | 51 | SEARCH_NEWLINE, |
52 | SEARCH_DATA, | 52 | SEARCH_DATA, |
53 | SEARCH_MESG, | 53 | SEARCH_MESG, |
54 | SEARCH_INDEX, | 54 | SEARCH_INDEX, |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static struct { | 57 | static struct { |
58 | const char *string; | 58 | const char *string; |
59 | size_t len; | 59 | size_t len; |
60 | struct ts_config *ts; | 60 | struct ts_config *ts; |
61 | } search[] __read_mostly = { | 61 | } search[] __read_mostly = { |
62 | [SEARCH_CONNECT] = { | 62 | [SEARCH_CONNECT] = { |
63 | .string = "CONNECT ", | 63 | .string = "CONNECT ", |
64 | .len = 8, | 64 | .len = 8, |
65 | }, | 65 | }, |
66 | [SEARCH_NEWLINE] = { | 66 | [SEARCH_NEWLINE] = { |
67 | .string = "\n", | 67 | .string = "\n", |
68 | .len = 1, | 68 | .len = 1, |
69 | }, | 69 | }, |
70 | [SEARCH_DATA] = { | 70 | [SEARCH_DATA] = { |
71 | .string = "DATA ", | 71 | .string = "DATA ", |
72 | .len = 5, | 72 | .len = 5, |
73 | }, | 73 | }, |
74 | [SEARCH_MESG] = { | 74 | [SEARCH_MESG] = { |
75 | .string = "MESG ", | 75 | .string = "MESG ", |
76 | .len = 5, | 76 | .len = 5, |
77 | }, | 77 | }, |
78 | [SEARCH_INDEX] = { | 78 | [SEARCH_INDEX] = { |
79 | .string = "INDEX ", | 79 | .string = "INDEX ", |
80 | .len = 6, | 80 | .len = 6, |
81 | }, | 81 | }, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static int amanda_help(struct sk_buff *skb, | 84 | static int amanda_help(struct sk_buff *skb, |
85 | unsigned int protoff, | 85 | unsigned int protoff, |
86 | struct nf_conn *ct, | 86 | struct nf_conn *ct, |
87 | enum ip_conntrack_info ctinfo) | 87 | enum ip_conntrack_info ctinfo) |
88 | { | 88 | { |
89 | struct ts_state ts; | 89 | struct ts_state ts; |
90 | struct nf_conntrack_expect *exp; | 90 | struct nf_conntrack_expect *exp; |
91 | struct nf_conntrack_tuple *tuple; | 91 | struct nf_conntrack_tuple *tuple; |
92 | unsigned int dataoff, start, stop, off, i; | 92 | unsigned int dataoff, start, stop, off, i; |
93 | char pbuf[sizeof("65535")], *tmp; | 93 | char pbuf[sizeof("65535")], *tmp; |
94 | u_int16_t len; | 94 | u_int16_t len; |
95 | __be16 port; | 95 | __be16 port; |
96 | int ret = NF_ACCEPT; | 96 | int ret = NF_ACCEPT; |
97 | typeof(nf_nat_amanda_hook) nf_nat_amanda; | 97 | typeof(nf_nat_amanda_hook) nf_nat_amanda; |
98 | 98 | ||
99 | /* Only look at packets from the Amanda server */ | 99 | /* Only look at packets from the Amanda server */ |
100 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | 100 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) |
101 | return NF_ACCEPT; | 101 | return NF_ACCEPT; |
102 | 102 | ||
103 | /* increase the UDP timeout of the master connection as replies from | 103 | /* increase the UDP timeout of the master connection as replies from |
104 | * Amanda clients to the server can be quite delayed */ | 104 | * Amanda clients to the server can be quite delayed */ |
105 | nf_ct_refresh(ct, skb, master_timeout * HZ); | 105 | nf_ct_refresh(ct, skb, master_timeout * HZ); |
106 | 106 | ||
107 | /* No data? */ | 107 | /* No data? */ |
108 | dataoff = protoff + sizeof(struct udphdr); | 108 | dataoff = protoff + sizeof(struct udphdr); |
109 | if (dataoff >= skb->len) { | 109 | if (dataoff >= skb->len) { |
110 | if (net_ratelimit()) | 110 | if (net_ratelimit()) |
111 | printk("amanda_help: skblen = %u\n", skb->len); | 111 | printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len); |
112 | return NF_ACCEPT; | 112 | return NF_ACCEPT; |
113 | } | 113 | } |
114 | 114 | ||
115 | memset(&ts, 0, sizeof(ts)); | 115 | memset(&ts, 0, sizeof(ts)); |
116 | start = skb_find_text(skb, dataoff, skb->len, | 116 | start = skb_find_text(skb, dataoff, skb->len, |
117 | search[SEARCH_CONNECT].ts, &ts); | 117 | search[SEARCH_CONNECT].ts, &ts); |
118 | if (start == UINT_MAX) | 118 | if (start == UINT_MAX) |
119 | goto out; | 119 | goto out; |
120 | start += dataoff + search[SEARCH_CONNECT].len; | 120 | start += dataoff + search[SEARCH_CONNECT].len; |
121 | 121 | ||
122 | memset(&ts, 0, sizeof(ts)); | 122 | memset(&ts, 0, sizeof(ts)); |
123 | stop = skb_find_text(skb, start, skb->len, | 123 | stop = skb_find_text(skb, start, skb->len, |
124 | search[SEARCH_NEWLINE].ts, &ts); | 124 | search[SEARCH_NEWLINE].ts, &ts); |
125 | if (stop == UINT_MAX) | 125 | if (stop == UINT_MAX) |
126 | goto out; | 126 | goto out; |
127 | stop += start; | 127 | stop += start; |
128 | 128 | ||
129 | for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) { | 129 | for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) { |
130 | memset(&ts, 0, sizeof(ts)); | 130 | memset(&ts, 0, sizeof(ts)); |
131 | off = skb_find_text(skb, start, stop, search[i].ts, &ts); | 131 | off = skb_find_text(skb, start, stop, search[i].ts, &ts); |
132 | if (off == UINT_MAX) | 132 | if (off == UINT_MAX) |
133 | continue; | 133 | continue; |
134 | off += start + search[i].len; | 134 | off += start + search[i].len; |
135 | 135 | ||
136 | len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off); | 136 | len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off); |
137 | if (skb_copy_bits(skb, off, pbuf, len)) | 137 | if (skb_copy_bits(skb, off, pbuf, len)) |
138 | break; | 138 | break; |
139 | pbuf[len] = '\0'; | 139 | pbuf[len] = '\0'; |
140 | 140 | ||
141 | port = htons(simple_strtoul(pbuf, &tmp, 10)); | 141 | port = htons(simple_strtoul(pbuf, &tmp, 10)); |
142 | len = tmp - pbuf; | 142 | len = tmp - pbuf; |
143 | if (port == 0 || len > 5) | 143 | if (port == 0 || len > 5) |
144 | break; | 144 | break; |
145 | 145 | ||
146 | exp = nf_ct_expect_alloc(ct); | 146 | exp = nf_ct_expect_alloc(ct); |
147 | if (exp == NULL) { | 147 | if (exp == NULL) { |
148 | ret = NF_DROP; | 148 | ret = NF_DROP; |
149 | goto out; | 149 | goto out; |
150 | } | 150 | } |
151 | tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | 151 | tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
152 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, | 152 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, |
153 | nf_ct_l3num(ct), | 153 | nf_ct_l3num(ct), |
154 | &tuple->src.u3, &tuple->dst.u3, | 154 | &tuple->src.u3, &tuple->dst.u3, |
155 | IPPROTO_TCP, NULL, &port); | 155 | IPPROTO_TCP, NULL, &port); |
156 | 156 | ||
157 | nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook); | 157 | nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook); |
158 | if (nf_nat_amanda && ct->status & IPS_NAT_MASK) | 158 | if (nf_nat_amanda && ct->status & IPS_NAT_MASK) |
159 | ret = nf_nat_amanda(skb, ctinfo, off - dataoff, | 159 | ret = nf_nat_amanda(skb, ctinfo, off - dataoff, |
160 | len, exp); | 160 | len, exp); |
161 | else if (nf_ct_expect_related(exp) != 0) | 161 | else if (nf_ct_expect_related(exp) != 0) |
162 | ret = NF_DROP; | 162 | ret = NF_DROP; |
163 | nf_ct_expect_put(exp); | 163 | nf_ct_expect_put(exp); |
164 | } | 164 | } |
165 | 165 | ||
166 | out: | 166 | out: |
167 | return ret; | 167 | return ret; |
168 | } | 168 | } |
169 | 169 | ||
170 | static const struct nf_conntrack_expect_policy amanda_exp_policy = { | 170 | static const struct nf_conntrack_expect_policy amanda_exp_policy = { |
171 | .max_expected = 3, | 171 | .max_expected = 3, |
172 | .timeout = 180, | 172 | .timeout = 180, |
173 | }; | 173 | }; |
174 | 174 | ||
175 | static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { | 175 | static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { |
176 | { | 176 | { |
177 | .name = "amanda", | 177 | .name = "amanda", |
178 | .me = THIS_MODULE, | 178 | .me = THIS_MODULE, |
179 | .help = amanda_help, | 179 | .help = amanda_help, |
180 | .tuple.src.l3num = AF_INET, | 180 | .tuple.src.l3num = AF_INET, |
181 | .tuple.src.u.udp.port = cpu_to_be16(10080), | 181 | .tuple.src.u.udp.port = cpu_to_be16(10080), |
182 | .tuple.dst.protonum = IPPROTO_UDP, | 182 | .tuple.dst.protonum = IPPROTO_UDP, |
183 | .expect_policy = &amanda_exp_policy, | 183 | .expect_policy = &amanda_exp_policy, |
184 | }, | 184 | }, |
185 | { | 185 | { |
186 | .name = "amanda", | 186 | .name = "amanda", |
187 | .me = THIS_MODULE, | 187 | .me = THIS_MODULE, |
188 | .help = amanda_help, | 188 | .help = amanda_help, |
189 | .tuple.src.l3num = AF_INET6, | 189 | .tuple.src.l3num = AF_INET6, |
190 | .tuple.src.u.udp.port = cpu_to_be16(10080), | 190 | .tuple.src.u.udp.port = cpu_to_be16(10080), |
191 | .tuple.dst.protonum = IPPROTO_UDP, | 191 | .tuple.dst.protonum = IPPROTO_UDP, |
192 | .expect_policy = &amanda_exp_policy, | 192 | .expect_policy = &amanda_exp_policy, |
193 | }, | 193 | }, |
194 | }; | 194 | }; |
195 | 195 | ||
196 | static void __exit nf_conntrack_amanda_fini(void) | 196 | static void __exit nf_conntrack_amanda_fini(void) |
197 | { | 197 | { |
198 | int i; | 198 | int i; |
199 | 199 | ||
200 | nf_conntrack_helper_unregister(&amanda_helper[0]); | 200 | nf_conntrack_helper_unregister(&amanda_helper[0]); |
201 | nf_conntrack_helper_unregister(&amanda_helper[1]); | 201 | nf_conntrack_helper_unregister(&amanda_helper[1]); |
202 | for (i = 0; i < ARRAY_SIZE(search); i++) | 202 | for (i = 0; i < ARRAY_SIZE(search); i++) |
203 | textsearch_destroy(search[i].ts); | 203 | textsearch_destroy(search[i].ts); |
204 | } | 204 | } |
205 | 205 | ||
206 | static int __init nf_conntrack_amanda_init(void) | 206 | static int __init nf_conntrack_amanda_init(void) |
207 | { | 207 | { |
208 | int ret, i; | 208 | int ret, i; |
209 | 209 | ||
210 | for (i = 0; i < ARRAY_SIZE(search); i++) { | 210 | for (i = 0; i < ARRAY_SIZE(search); i++) { |
211 | search[i].ts = textsearch_prepare(ts_algo, search[i].string, | 211 | search[i].ts = textsearch_prepare(ts_algo, search[i].string, |
212 | search[i].len, | 212 | search[i].len, |
213 | GFP_KERNEL, TS_AUTOLOAD); | 213 | GFP_KERNEL, TS_AUTOLOAD); |
214 | if (IS_ERR(search[i].ts)) { | 214 | if (IS_ERR(search[i].ts)) { |
215 | ret = PTR_ERR(search[i].ts); | 215 | ret = PTR_ERR(search[i].ts); |
216 | goto err1; | 216 | goto err1; |
217 | } | 217 | } |
218 | } | 218 | } |
219 | ret = nf_conntrack_helper_register(&amanda_helper[0]); | 219 | ret = nf_conntrack_helper_register(&amanda_helper[0]); |
220 | if (ret < 0) | 220 | if (ret < 0) |
221 | goto err1; | 221 | goto err1; |
222 | ret = nf_conntrack_helper_register(&amanda_helper[1]); | 222 | ret = nf_conntrack_helper_register(&amanda_helper[1]); |
223 | if (ret < 0) | 223 | if (ret < 0) |
224 | goto err2; | 224 | goto err2; |
225 | return 0; | 225 | return 0; |
226 | 226 | ||
227 | err2: | 227 | err2: |
228 | nf_conntrack_helper_unregister(&amanda_helper[0]); | 228 | nf_conntrack_helper_unregister(&amanda_helper[0]); |
229 | err1: | 229 | err1: |
230 | while (--i >= 0) | 230 | while (--i >= 0) |
231 | textsearch_destroy(search[i].ts); | 231 | textsearch_destroy(search[i].ts); |
232 | 232 | ||
233 | return ret; | 233 | return ret; |
234 | } | 234 | } |
235 | 235 | ||
236 | module_init(nf_conntrack_amanda_init); | 236 | module_init(nf_conntrack_amanda_init); |
237 | module_exit(nf_conntrack_amanda_fini); | 237 | module_exit(nf_conntrack_amanda_fini); |
238 | 238 |
net/netfilter/nf_conntrack_core.c
1 | /* Connection state tracking for netfilter. This is separated from, | 1 | /* Connection state tracking for netfilter. This is separated from, |
2 | but required by, the NAT layer; it can also be used by an iptables | 2 | but required by, the NAT layer; it can also be used by an iptables |
3 | extension. */ | 3 | extension. */ |
4 | 4 | ||
5 | /* (C) 1999-2001 Paul `Rusty' Russell | 5 | /* (C) 1999-2001 Paul `Rusty' Russell |
6 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | 6 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
7 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | 7 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/netfilter.h> | 15 | #include <linux/netfilter.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
20 | #include <linux/vmalloc.h> | 20 | #include <linux/vmalloc.h> |
21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/random.h> | 23 | #include <linux/random.h> |
24 | #include <linux/jhash.h> | 24 | #include <linux/jhash.h> |
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/percpu.h> | 26 | #include <linux/percpu.h> |
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/notifier.h> | 28 | #include <linux/notifier.h> |
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
31 | #include <linux/socket.h> | 31 | #include <linux/socket.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/nsproxy.h> | 33 | #include <linux/nsproxy.h> |
34 | #include <linux/rculist_nulls.h> | 34 | #include <linux/rculist_nulls.h> |
35 | 35 | ||
36 | #include <net/netfilter/nf_conntrack.h> | 36 | #include <net/netfilter/nf_conntrack.h> |
37 | #include <net/netfilter/nf_conntrack_l3proto.h> | 37 | #include <net/netfilter/nf_conntrack_l3proto.h> |
38 | #include <net/netfilter/nf_conntrack_l4proto.h> | 38 | #include <net/netfilter/nf_conntrack_l4proto.h> |
39 | #include <net/netfilter/nf_conntrack_expect.h> | 39 | #include <net/netfilter/nf_conntrack_expect.h> |
40 | #include <net/netfilter/nf_conntrack_helper.h> | 40 | #include <net/netfilter/nf_conntrack_helper.h> |
41 | #include <net/netfilter/nf_conntrack_core.h> | 41 | #include <net/netfilter/nf_conntrack_core.h> |
42 | #include <net/netfilter/nf_conntrack_extend.h> | 42 | #include <net/netfilter/nf_conntrack_extend.h> |
43 | #include <net/netfilter/nf_conntrack_acct.h> | 43 | #include <net/netfilter/nf_conntrack_acct.h> |
44 | #include <net/netfilter/nf_conntrack_ecache.h> | 44 | #include <net/netfilter/nf_conntrack_ecache.h> |
45 | #include <net/netfilter/nf_conntrack_zones.h> | 45 | #include <net/netfilter/nf_conntrack_zones.h> |
46 | #include <net/netfilter/nf_nat.h> | 46 | #include <net/netfilter/nf_nat.h> |
47 | #include <net/netfilter/nf_nat_core.h> | 47 | #include <net/netfilter/nf_nat_core.h> |
48 | 48 | ||
49 | #define NF_CONNTRACK_VERSION "0.5.0" | 49 | #define NF_CONNTRACK_VERSION "0.5.0" |
50 | 50 | ||
51 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, | 51 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, |
52 | enum nf_nat_manip_type manip, | 52 | enum nf_nat_manip_type manip, |
53 | const struct nlattr *attr) __read_mostly; | 53 | const struct nlattr *attr) __read_mostly; |
54 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); | 54 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); |
55 | 55 | ||
56 | DEFINE_SPINLOCK(nf_conntrack_lock); | 56 | DEFINE_SPINLOCK(nf_conntrack_lock); |
57 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); | 57 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); |
58 | 58 | ||
59 | unsigned int nf_conntrack_htable_size __read_mostly; | 59 | unsigned int nf_conntrack_htable_size __read_mostly; |
60 | EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); | 60 | EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); |
61 | 61 | ||
62 | unsigned int nf_conntrack_max __read_mostly; | 62 | unsigned int nf_conntrack_max __read_mostly; |
63 | EXPORT_SYMBOL_GPL(nf_conntrack_max); | 63 | EXPORT_SYMBOL_GPL(nf_conntrack_max); |
64 | 64 | ||
65 | struct nf_conn nf_conntrack_untracked __read_mostly; | 65 | struct nf_conn nf_conntrack_untracked __read_mostly; |
66 | EXPORT_SYMBOL_GPL(nf_conntrack_untracked); | 66 | EXPORT_SYMBOL_GPL(nf_conntrack_untracked); |
67 | 67 | ||
68 | static int nf_conntrack_hash_rnd_initted; | 68 | static int nf_conntrack_hash_rnd_initted; |
69 | static unsigned int nf_conntrack_hash_rnd; | 69 | static unsigned int nf_conntrack_hash_rnd; |
70 | 70 | ||
71 | static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, | 71 | static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, |
72 | u16 zone, unsigned int size, unsigned int rnd) | 72 | u16 zone, unsigned int size, unsigned int rnd) |
73 | { | 73 | { |
74 | unsigned int n; | 74 | unsigned int n; |
75 | u_int32_t h; | 75 | u_int32_t h; |
76 | 76 | ||
77 | /* The direction must be ignored, so we hash everything up to the | 77 | /* The direction must be ignored, so we hash everything up to the |
78 | * destination ports (which is a multiple of 4) and treat the last | 78 | * destination ports (which is a multiple of 4) and treat the last |
79 | * three bytes manually. | 79 | * three bytes manually. |
80 | */ | 80 | */ |
81 | n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); | 81 | n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); |
82 | h = jhash2((u32 *)tuple, n, | 82 | h = jhash2((u32 *)tuple, n, |
83 | zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) | | 83 | zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) | |
84 | tuple->dst.protonum)); | 84 | tuple->dst.protonum)); |
85 | 85 | ||
86 | return ((u64)h * size) >> 32; | 86 | return ((u64)h * size) >> 32; |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, | 89 | static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, |
90 | const struct nf_conntrack_tuple *tuple) | 90 | const struct nf_conntrack_tuple *tuple) |
91 | { | 91 | { |
92 | return __hash_conntrack(tuple, zone, net->ct.htable_size, | 92 | return __hash_conntrack(tuple, zone, net->ct.htable_size, |
93 | nf_conntrack_hash_rnd); | 93 | nf_conntrack_hash_rnd); |
94 | } | 94 | } |
95 | 95 | ||
96 | bool | 96 | bool |
97 | nf_ct_get_tuple(const struct sk_buff *skb, | 97 | nf_ct_get_tuple(const struct sk_buff *skb, |
98 | unsigned int nhoff, | 98 | unsigned int nhoff, |
99 | unsigned int dataoff, | 99 | unsigned int dataoff, |
100 | u_int16_t l3num, | 100 | u_int16_t l3num, |
101 | u_int8_t protonum, | 101 | u_int8_t protonum, |
102 | struct nf_conntrack_tuple *tuple, | 102 | struct nf_conntrack_tuple *tuple, |
103 | const struct nf_conntrack_l3proto *l3proto, | 103 | const struct nf_conntrack_l3proto *l3proto, |
104 | const struct nf_conntrack_l4proto *l4proto) | 104 | const struct nf_conntrack_l4proto *l4proto) |
105 | { | 105 | { |
106 | memset(tuple, 0, sizeof(*tuple)); | 106 | memset(tuple, 0, sizeof(*tuple)); |
107 | 107 | ||
108 | tuple->src.l3num = l3num; | 108 | tuple->src.l3num = l3num; |
109 | if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) | 109 | if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) |
110 | return false; | 110 | return false; |
111 | 111 | ||
112 | tuple->dst.protonum = protonum; | 112 | tuple->dst.protonum = protonum; |
113 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; | 113 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; |
114 | 114 | ||
115 | return l4proto->pkt_to_tuple(skb, dataoff, tuple); | 115 | return l4proto->pkt_to_tuple(skb, dataoff, tuple); |
116 | } | 116 | } |
117 | EXPORT_SYMBOL_GPL(nf_ct_get_tuple); | 117 | EXPORT_SYMBOL_GPL(nf_ct_get_tuple); |
118 | 118 | ||
119 | bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, | 119 | bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, |
120 | u_int16_t l3num, struct nf_conntrack_tuple *tuple) | 120 | u_int16_t l3num, struct nf_conntrack_tuple *tuple) |
121 | { | 121 | { |
122 | struct nf_conntrack_l3proto *l3proto; | 122 | struct nf_conntrack_l3proto *l3proto; |
123 | struct nf_conntrack_l4proto *l4proto; | 123 | struct nf_conntrack_l4proto *l4proto; |
124 | unsigned int protoff; | 124 | unsigned int protoff; |
125 | u_int8_t protonum; | 125 | u_int8_t protonum; |
126 | int ret; | 126 | int ret; |
127 | 127 | ||
128 | rcu_read_lock(); | 128 | rcu_read_lock(); |
129 | 129 | ||
130 | l3proto = __nf_ct_l3proto_find(l3num); | 130 | l3proto = __nf_ct_l3proto_find(l3num); |
131 | ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); | 131 | ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); |
132 | if (ret != NF_ACCEPT) { | 132 | if (ret != NF_ACCEPT) { |
133 | rcu_read_unlock(); | 133 | rcu_read_unlock(); |
134 | return false; | 134 | return false; |
135 | } | 135 | } |
136 | 136 | ||
137 | l4proto = __nf_ct_l4proto_find(l3num, protonum); | 137 | l4proto = __nf_ct_l4proto_find(l3num, protonum); |
138 | 138 | ||
139 | ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, | 139 | ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, |
140 | l3proto, l4proto); | 140 | l3proto, l4proto); |
141 | 141 | ||
142 | rcu_read_unlock(); | 142 | rcu_read_unlock(); |
143 | return ret; | 143 | return ret; |
144 | } | 144 | } |
145 | EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); | 145 | EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); |
146 | 146 | ||
147 | bool | 147 | bool |
148 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | 148 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, |
149 | const struct nf_conntrack_tuple *orig, | 149 | const struct nf_conntrack_tuple *orig, |
150 | const struct nf_conntrack_l3proto *l3proto, | 150 | const struct nf_conntrack_l3proto *l3proto, |
151 | const struct nf_conntrack_l4proto *l4proto) | 151 | const struct nf_conntrack_l4proto *l4proto) |
152 | { | 152 | { |
153 | memset(inverse, 0, sizeof(*inverse)); | 153 | memset(inverse, 0, sizeof(*inverse)); |
154 | 154 | ||
155 | inverse->src.l3num = orig->src.l3num; | 155 | inverse->src.l3num = orig->src.l3num; |
156 | if (l3proto->invert_tuple(inverse, orig) == 0) | 156 | if (l3proto->invert_tuple(inverse, orig) == 0) |
157 | return false; | 157 | return false; |
158 | 158 | ||
159 | inverse->dst.dir = !orig->dst.dir; | 159 | inverse->dst.dir = !orig->dst.dir; |
160 | 160 | ||
161 | inverse->dst.protonum = orig->dst.protonum; | 161 | inverse->dst.protonum = orig->dst.protonum; |
162 | return l4proto->invert_tuple(inverse, orig); | 162 | return l4proto->invert_tuple(inverse, orig); |
163 | } | 163 | } |
164 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); | 164 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); |
165 | 165 | ||
166 | static void | 166 | static void |
167 | clean_from_lists(struct nf_conn *ct) | 167 | clean_from_lists(struct nf_conn *ct) |
168 | { | 168 | { |
169 | pr_debug("clean_from_lists(%p)\n", ct); | 169 | pr_debug("clean_from_lists(%p)\n", ct); |
170 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | 170 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); |
171 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); | 171 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); |
172 | 172 | ||
173 | /* Destroy all pending expectations */ | 173 | /* Destroy all pending expectations */ |
174 | nf_ct_remove_expectations(ct); | 174 | nf_ct_remove_expectations(ct); |
175 | } | 175 | } |
176 | 176 | ||
177 | static void | 177 | static void |
178 | destroy_conntrack(struct nf_conntrack *nfct) | 178 | destroy_conntrack(struct nf_conntrack *nfct) |
179 | { | 179 | { |
180 | struct nf_conn *ct = (struct nf_conn *)nfct; | 180 | struct nf_conn *ct = (struct nf_conn *)nfct; |
181 | struct net *net = nf_ct_net(ct); | 181 | struct net *net = nf_ct_net(ct); |
182 | struct nf_conntrack_l4proto *l4proto; | 182 | struct nf_conntrack_l4proto *l4proto; |
183 | 183 | ||
184 | pr_debug("destroy_conntrack(%p)\n", ct); | 184 | pr_debug("destroy_conntrack(%p)\n", ct); |
185 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); | 185 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); |
186 | NF_CT_ASSERT(!timer_pending(&ct->timeout)); | 186 | NF_CT_ASSERT(!timer_pending(&ct->timeout)); |
187 | 187 | ||
188 | /* To make sure we don't get any weird locking issues here: | 188 | /* To make sure we don't get any weird locking issues here: |
189 | * destroy_conntrack() MUST NOT be called with a write lock | 189 | * destroy_conntrack() MUST NOT be called with a write lock |
190 | * to nf_conntrack_lock!!! -HW */ | 190 | * to nf_conntrack_lock!!! -HW */ |
191 | rcu_read_lock(); | 191 | rcu_read_lock(); |
192 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | 192 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
193 | if (l4proto && l4proto->destroy) | 193 | if (l4proto && l4proto->destroy) |
194 | l4proto->destroy(ct); | 194 | l4proto->destroy(ct); |
195 | 195 | ||
196 | rcu_read_unlock(); | 196 | rcu_read_unlock(); |
197 | 197 | ||
198 | spin_lock_bh(&nf_conntrack_lock); | 198 | spin_lock_bh(&nf_conntrack_lock); |
199 | /* Expectations will have been removed in clean_from_lists, | 199 | /* Expectations will have been removed in clean_from_lists, |
200 | * except TFTP can create an expectation on the first packet, | 200 | * except TFTP can create an expectation on the first packet, |
201 | * before connection is in the list, so we need to clean here, | 201 | * before connection is in the list, so we need to clean here, |
202 | * too. */ | 202 | * too. */ |
203 | nf_ct_remove_expectations(ct); | 203 | nf_ct_remove_expectations(ct); |
204 | 204 | ||
205 | /* We overload first tuple to link into unconfirmed list. */ | 205 | /* We overload first tuple to link into unconfirmed list. */ |
206 | if (!nf_ct_is_confirmed(ct)) { | 206 | if (!nf_ct_is_confirmed(ct)) { |
207 | BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); | 207 | BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); |
208 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | 208 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); |
209 | } | 209 | } |
210 | 210 | ||
211 | NF_CT_STAT_INC(net, delete); | 211 | NF_CT_STAT_INC(net, delete); |
212 | spin_unlock_bh(&nf_conntrack_lock); | 212 | spin_unlock_bh(&nf_conntrack_lock); |
213 | 213 | ||
214 | if (ct->master) | 214 | if (ct->master) |
215 | nf_ct_put(ct->master); | 215 | nf_ct_put(ct->master); |
216 | 216 | ||
217 | pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); | 217 | pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); |
218 | nf_conntrack_free(ct); | 218 | nf_conntrack_free(ct); |
219 | } | 219 | } |
220 | 220 | ||
221 | void nf_ct_delete_from_lists(struct nf_conn *ct) | 221 | void nf_ct_delete_from_lists(struct nf_conn *ct) |
222 | { | 222 | { |
223 | struct net *net = nf_ct_net(ct); | 223 | struct net *net = nf_ct_net(ct); |
224 | 224 | ||
225 | nf_ct_helper_destroy(ct); | 225 | nf_ct_helper_destroy(ct); |
226 | spin_lock_bh(&nf_conntrack_lock); | 226 | spin_lock_bh(&nf_conntrack_lock); |
227 | /* Inside lock so preempt is disabled on module removal path. | 227 | /* Inside lock so preempt is disabled on module removal path. |
228 | * Otherwise we can get spurious warnings. */ | 228 | * Otherwise we can get spurious warnings. */ |
229 | NF_CT_STAT_INC(net, delete_list); | 229 | NF_CT_STAT_INC(net, delete_list); |
230 | clean_from_lists(ct); | 230 | clean_from_lists(ct); |
231 | spin_unlock_bh(&nf_conntrack_lock); | 231 | spin_unlock_bh(&nf_conntrack_lock); |
232 | } | 232 | } |
233 | EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); | 233 | EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); |
234 | 234 | ||
235 | static void death_by_event(unsigned long ul_conntrack) | 235 | static void death_by_event(unsigned long ul_conntrack) |
236 | { | 236 | { |
237 | struct nf_conn *ct = (void *)ul_conntrack; | 237 | struct nf_conn *ct = (void *)ul_conntrack; |
238 | struct net *net = nf_ct_net(ct); | 238 | struct net *net = nf_ct_net(ct); |
239 | 239 | ||
240 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { | 240 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { |
241 | /* bad luck, let's retry again */ | 241 | /* bad luck, let's retry again */ |
242 | ct->timeout.expires = jiffies + | 242 | ct->timeout.expires = jiffies + |
243 | (random32() % net->ct.sysctl_events_retry_timeout); | 243 | (random32() % net->ct.sysctl_events_retry_timeout); |
244 | add_timer(&ct->timeout); | 244 | add_timer(&ct->timeout); |
245 | return; | 245 | return; |
246 | } | 246 | } |
247 | /* we've got the event delivered, now it's dying */ | 247 | /* we've got the event delivered, now it's dying */ |
248 | set_bit(IPS_DYING_BIT, &ct->status); | 248 | set_bit(IPS_DYING_BIT, &ct->status); |
249 | spin_lock(&nf_conntrack_lock); | 249 | spin_lock(&nf_conntrack_lock); |
250 | hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | 250 | hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); |
251 | spin_unlock(&nf_conntrack_lock); | 251 | spin_unlock(&nf_conntrack_lock); |
252 | nf_ct_put(ct); | 252 | nf_ct_put(ct); |
253 | } | 253 | } |
254 | 254 | ||
255 | void nf_ct_insert_dying_list(struct nf_conn *ct) | 255 | void nf_ct_insert_dying_list(struct nf_conn *ct) |
256 | { | 256 | { |
257 | struct net *net = nf_ct_net(ct); | 257 | struct net *net = nf_ct_net(ct); |
258 | 258 | ||
259 | /* add this conntrack to the dying list */ | 259 | /* add this conntrack to the dying list */ |
260 | spin_lock_bh(&nf_conntrack_lock); | 260 | spin_lock_bh(&nf_conntrack_lock); |
261 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | 261 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
262 | &net->ct.dying); | 262 | &net->ct.dying); |
263 | spin_unlock_bh(&nf_conntrack_lock); | 263 | spin_unlock_bh(&nf_conntrack_lock); |
264 | /* set a new timer to retry event delivery */ | 264 | /* set a new timer to retry event delivery */ |
265 | setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); | 265 | setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); |
266 | ct->timeout.expires = jiffies + | 266 | ct->timeout.expires = jiffies + |
267 | (random32() % net->ct.sysctl_events_retry_timeout); | 267 | (random32() % net->ct.sysctl_events_retry_timeout); |
268 | add_timer(&ct->timeout); | 268 | add_timer(&ct->timeout); |
269 | } | 269 | } |
270 | EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); | 270 | EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); |
271 | 271 | ||
272 | static void death_by_timeout(unsigned long ul_conntrack) | 272 | static void death_by_timeout(unsigned long ul_conntrack) |
273 | { | 273 | { |
274 | struct nf_conn *ct = (void *)ul_conntrack; | 274 | struct nf_conn *ct = (void *)ul_conntrack; |
275 | 275 | ||
276 | if (!test_bit(IPS_DYING_BIT, &ct->status) && | 276 | if (!test_bit(IPS_DYING_BIT, &ct->status) && |
277 | unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { | 277 | unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { |
278 | /* destroy event was not delivered */ | 278 | /* destroy event was not delivered */ |
279 | nf_ct_delete_from_lists(ct); | 279 | nf_ct_delete_from_lists(ct); |
280 | nf_ct_insert_dying_list(ct); | 280 | nf_ct_insert_dying_list(ct); |
281 | return; | 281 | return; |
282 | } | 282 | } |
283 | set_bit(IPS_DYING_BIT, &ct->status); | 283 | set_bit(IPS_DYING_BIT, &ct->status); |
284 | nf_ct_delete_from_lists(ct); | 284 | nf_ct_delete_from_lists(ct); |
285 | nf_ct_put(ct); | 285 | nf_ct_put(ct); |
286 | } | 286 | } |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * Warning : | 289 | * Warning : |
290 | * - Caller must take a reference on returned object | 290 | * - Caller must take a reference on returned object |
291 | * and recheck nf_ct_tuple_equal(tuple, &h->tuple) | 291 | * and recheck nf_ct_tuple_equal(tuple, &h->tuple) |
292 | * OR | 292 | * OR |
293 | * - Caller must lock nf_conntrack_lock before calling this function | 293 | * - Caller must lock nf_conntrack_lock before calling this function |
294 | */ | 294 | */ |
295 | struct nf_conntrack_tuple_hash * | 295 | struct nf_conntrack_tuple_hash * |
296 | __nf_conntrack_find(struct net *net, u16 zone, | 296 | __nf_conntrack_find(struct net *net, u16 zone, |
297 | const struct nf_conntrack_tuple *tuple) | 297 | const struct nf_conntrack_tuple *tuple) |
298 | { | 298 | { |
299 | struct nf_conntrack_tuple_hash *h; | 299 | struct nf_conntrack_tuple_hash *h; |
300 | struct hlist_nulls_node *n; | 300 | struct hlist_nulls_node *n; |
301 | unsigned int hash = hash_conntrack(net, zone, tuple); | 301 | unsigned int hash = hash_conntrack(net, zone, tuple); |
302 | 302 | ||
303 | /* Disable BHs the entire time since we normally need to disable them | 303 | /* Disable BHs the entire time since we normally need to disable them |
304 | * at least once for the stats anyway. | 304 | * at least once for the stats anyway. |
305 | */ | 305 | */ |
306 | local_bh_disable(); | 306 | local_bh_disable(); |
307 | begin: | 307 | begin: |
308 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { | 308 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { |
309 | if (nf_ct_tuple_equal(tuple, &h->tuple) && | 309 | if (nf_ct_tuple_equal(tuple, &h->tuple) && |
310 | nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { | 310 | nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { |
311 | NF_CT_STAT_INC(net, found); | 311 | NF_CT_STAT_INC(net, found); |
312 | local_bh_enable(); | 312 | local_bh_enable(); |
313 | return h; | 313 | return h; |
314 | } | 314 | } |
315 | NF_CT_STAT_INC(net, searched); | 315 | NF_CT_STAT_INC(net, searched); |
316 | } | 316 | } |
317 | /* | 317 | /* |
318 | * if the nulls value we got at the end of this lookup is | 318 | * if the nulls value we got at the end of this lookup is |
319 | * not the expected one, we must restart lookup. | 319 | * not the expected one, we must restart lookup. |
320 | * We probably met an item that was moved to another chain. | 320 | * We probably met an item that was moved to another chain. |
321 | */ | 321 | */ |
322 | if (get_nulls_value(n) != hash) { | 322 | if (get_nulls_value(n) != hash) { |
323 | NF_CT_STAT_INC(net, search_restart); | 323 | NF_CT_STAT_INC(net, search_restart); |
324 | goto begin; | 324 | goto begin; |
325 | } | 325 | } |
326 | local_bh_enable(); | 326 | local_bh_enable(); |
327 | 327 | ||
328 | return NULL; | 328 | return NULL; |
329 | } | 329 | } |
330 | EXPORT_SYMBOL_GPL(__nf_conntrack_find); | 330 | EXPORT_SYMBOL_GPL(__nf_conntrack_find); |
331 | 331 | ||
332 | /* Find a connection corresponding to a tuple. */ | 332 | /* Find a connection corresponding to a tuple. */ |
333 | struct nf_conntrack_tuple_hash * | 333 | struct nf_conntrack_tuple_hash * |
334 | nf_conntrack_find_get(struct net *net, u16 zone, | 334 | nf_conntrack_find_get(struct net *net, u16 zone, |
335 | const struct nf_conntrack_tuple *tuple) | 335 | const struct nf_conntrack_tuple *tuple) |
336 | { | 336 | { |
337 | struct nf_conntrack_tuple_hash *h; | 337 | struct nf_conntrack_tuple_hash *h; |
338 | struct nf_conn *ct; | 338 | struct nf_conn *ct; |
339 | 339 | ||
340 | rcu_read_lock(); | 340 | rcu_read_lock(); |
341 | begin: | 341 | begin: |
342 | h = __nf_conntrack_find(net, zone, tuple); | 342 | h = __nf_conntrack_find(net, zone, tuple); |
343 | if (h) { | 343 | if (h) { |
344 | ct = nf_ct_tuplehash_to_ctrack(h); | 344 | ct = nf_ct_tuplehash_to_ctrack(h); |
345 | if (unlikely(nf_ct_is_dying(ct) || | 345 | if (unlikely(nf_ct_is_dying(ct) || |
346 | !atomic_inc_not_zero(&ct->ct_general.use))) | 346 | !atomic_inc_not_zero(&ct->ct_general.use))) |
347 | h = NULL; | 347 | h = NULL; |
348 | else { | 348 | else { |
349 | if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || | 349 | if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || |
350 | nf_ct_zone(ct) != zone)) { | 350 | nf_ct_zone(ct) != zone)) { |
351 | nf_ct_put(ct); | 351 | nf_ct_put(ct); |
352 | goto begin; | 352 | goto begin; |
353 | } | 353 | } |
354 | } | 354 | } |
355 | } | 355 | } |
356 | rcu_read_unlock(); | 356 | rcu_read_unlock(); |
357 | 357 | ||
358 | return h; | 358 | return h; |
359 | } | 359 | } |
360 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); | 360 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); |
361 | 361 | ||
362 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, | 362 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, |
363 | unsigned int hash, | 363 | unsigned int hash, |
364 | unsigned int repl_hash) | 364 | unsigned int repl_hash) |
365 | { | 365 | { |
366 | struct net *net = nf_ct_net(ct); | 366 | struct net *net = nf_ct_net(ct); |
367 | 367 | ||
368 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | 368 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
369 | &net->ct.hash[hash]); | 369 | &net->ct.hash[hash]); |
370 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, | 370 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, |
371 | &net->ct.hash[repl_hash]); | 371 | &net->ct.hash[repl_hash]); |
372 | } | 372 | } |
373 | 373 | ||
374 | void nf_conntrack_hash_insert(struct nf_conn *ct) | 374 | void nf_conntrack_hash_insert(struct nf_conn *ct) |
375 | { | 375 | { |
376 | struct net *net = nf_ct_net(ct); | 376 | struct net *net = nf_ct_net(ct); |
377 | unsigned int hash, repl_hash; | 377 | unsigned int hash, repl_hash; |
378 | u16 zone; | 378 | u16 zone; |
379 | 379 | ||
380 | zone = nf_ct_zone(ct); | 380 | zone = nf_ct_zone(ct); |
381 | hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 381 | hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
382 | repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | 382 | repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
383 | 383 | ||
384 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | 384 | __nf_conntrack_hash_insert(ct, hash, repl_hash); |
385 | } | 385 | } |
386 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); | 386 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); |
387 | 387 | ||
388 | /* Confirm a connection given skb; places it in hash table */ | 388 | /* Confirm a connection given skb; places it in hash table */ |
389 | int | 389 | int |
390 | __nf_conntrack_confirm(struct sk_buff *skb) | 390 | __nf_conntrack_confirm(struct sk_buff *skb) |
391 | { | 391 | { |
392 | unsigned int hash, repl_hash; | 392 | unsigned int hash, repl_hash; |
393 | struct nf_conntrack_tuple_hash *h; | 393 | struct nf_conntrack_tuple_hash *h; |
394 | struct nf_conn *ct; | 394 | struct nf_conn *ct; |
395 | struct nf_conn_help *help; | 395 | struct nf_conn_help *help; |
396 | struct hlist_nulls_node *n; | 396 | struct hlist_nulls_node *n; |
397 | enum ip_conntrack_info ctinfo; | 397 | enum ip_conntrack_info ctinfo; |
398 | struct net *net; | 398 | struct net *net; |
399 | u16 zone; | 399 | u16 zone; |
400 | 400 | ||
401 | ct = nf_ct_get(skb, &ctinfo); | 401 | ct = nf_ct_get(skb, &ctinfo); |
402 | net = nf_ct_net(ct); | 402 | net = nf_ct_net(ct); |
403 | 403 | ||
404 | /* ipt_REJECT uses nf_conntrack_attach to attach related | 404 | /* ipt_REJECT uses nf_conntrack_attach to attach related |
405 | ICMP/TCP RST packets in other direction. Actual packet | 405 | ICMP/TCP RST packets in other direction. Actual packet |
406 | which created connection will be IP_CT_NEW or for an | 406 | which created connection will be IP_CT_NEW or for an |
407 | expected connection, IP_CT_RELATED. */ | 407 | expected connection, IP_CT_RELATED. */ |
408 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | 408 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) |
409 | return NF_ACCEPT; | 409 | return NF_ACCEPT; |
410 | 410 | ||
411 | zone = nf_ct_zone(ct); | 411 | zone = nf_ct_zone(ct); |
412 | hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 412 | hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
413 | repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | 413 | repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
414 | 414 | ||
415 | /* We're not in hash table, and we refuse to set up related | 415 | /* We're not in hash table, and we refuse to set up related |
416 | connections for unconfirmed conns. But packet copies and | 416 | connections for unconfirmed conns. But packet copies and |
417 | REJECT will give spurious warnings here. */ | 417 | REJECT will give spurious warnings here. */ |
418 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ | 418 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ |
419 | 419 | ||
420 | /* No external references means noone else could have | 420 | /* No external references means noone else could have |
421 | confirmed us. */ | 421 | confirmed us. */ |
422 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | 422 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); |
423 | pr_debug("Confirming conntrack %p\n", ct); | 423 | pr_debug("Confirming conntrack %p\n", ct); |
424 | 424 | ||
425 | spin_lock_bh(&nf_conntrack_lock); | 425 | spin_lock_bh(&nf_conntrack_lock); |
426 | 426 | ||
427 | /* See if there's one in the list already, including reverse: | 427 | /* See if there's one in the list already, including reverse: |
428 | NAT could have grabbed it without realizing, since we're | 428 | NAT could have grabbed it without realizing, since we're |
429 | not in the hash. If there is, we lost race. */ | 429 | not in the hash. If there is, we lost race. */ |
430 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) | 430 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) |
431 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 431 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
432 | &h->tuple) && | 432 | &h->tuple) && |
433 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | 433 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) |
434 | goto out; | 434 | goto out; |
435 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) | 435 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) |
436 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, | 436 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
437 | &h->tuple) && | 437 | &h->tuple) && |
438 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | 438 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) |
439 | goto out; | 439 | goto out; |
440 | 440 | ||
441 | /* Remove from unconfirmed list */ | 441 | /* Remove from unconfirmed list */ |
442 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | 442 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); |
443 | 443 | ||
444 | /* Timer relative to confirmation time, not original | 444 | /* Timer relative to confirmation time, not original |
445 | setting time, otherwise we'd get timer wrap in | 445 | setting time, otherwise we'd get timer wrap in |
446 | weird delay cases. */ | 446 | weird delay cases. */ |
447 | ct->timeout.expires += jiffies; | 447 | ct->timeout.expires += jiffies; |
448 | add_timer(&ct->timeout); | 448 | add_timer(&ct->timeout); |
449 | atomic_inc(&ct->ct_general.use); | 449 | atomic_inc(&ct->ct_general.use); |
450 | set_bit(IPS_CONFIRMED_BIT, &ct->status); | 450 | set_bit(IPS_CONFIRMED_BIT, &ct->status); |
451 | 451 | ||
452 | /* Since the lookup is lockless, hash insertion must be done after | 452 | /* Since the lookup is lockless, hash insertion must be done after |
453 | * starting the timer and setting the CONFIRMED bit. The RCU barriers | 453 | * starting the timer and setting the CONFIRMED bit. The RCU barriers |
454 | * guarantee that no other CPU can find the conntrack before the above | 454 | * guarantee that no other CPU can find the conntrack before the above |
455 | * stores are visible. | 455 | * stores are visible. |
456 | */ | 456 | */ |
457 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | 457 | __nf_conntrack_hash_insert(ct, hash, repl_hash); |
458 | NF_CT_STAT_INC(net, insert); | 458 | NF_CT_STAT_INC(net, insert); |
459 | spin_unlock_bh(&nf_conntrack_lock); | 459 | spin_unlock_bh(&nf_conntrack_lock); |
460 | 460 | ||
461 | help = nfct_help(ct); | 461 | help = nfct_help(ct); |
462 | if (help && help->helper) | 462 | if (help && help->helper) |
463 | nf_conntrack_event_cache(IPCT_HELPER, ct); | 463 | nf_conntrack_event_cache(IPCT_HELPER, ct); |
464 | 464 | ||
465 | nf_conntrack_event_cache(master_ct(ct) ? | 465 | nf_conntrack_event_cache(master_ct(ct) ? |
466 | IPCT_RELATED : IPCT_NEW, ct); | 466 | IPCT_RELATED : IPCT_NEW, ct); |
467 | return NF_ACCEPT; | 467 | return NF_ACCEPT; |
468 | 468 | ||
469 | out: | 469 | out: |
470 | NF_CT_STAT_INC(net, insert_failed); | 470 | NF_CT_STAT_INC(net, insert_failed); |
471 | spin_unlock_bh(&nf_conntrack_lock); | 471 | spin_unlock_bh(&nf_conntrack_lock); |
472 | return NF_DROP; | 472 | return NF_DROP; |
473 | } | 473 | } |
474 | EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); | 474 | EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); |
475 | 475 | ||
476 | /* Returns true if a connection correspondings to the tuple (required | 476 | /* Returns true if a connection correspondings to the tuple (required |
477 | for NAT). */ | 477 | for NAT). */ |
478 | int | 478 | int |
479 | nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | 479 | nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, |
480 | const struct nf_conn *ignored_conntrack) | 480 | const struct nf_conn *ignored_conntrack) |
481 | { | 481 | { |
482 | struct net *net = nf_ct_net(ignored_conntrack); | 482 | struct net *net = nf_ct_net(ignored_conntrack); |
483 | struct nf_conntrack_tuple_hash *h; | 483 | struct nf_conntrack_tuple_hash *h; |
484 | struct hlist_nulls_node *n; | 484 | struct hlist_nulls_node *n; |
485 | struct nf_conn *ct; | 485 | struct nf_conn *ct; |
486 | u16 zone = nf_ct_zone(ignored_conntrack); | 486 | u16 zone = nf_ct_zone(ignored_conntrack); |
487 | unsigned int hash = hash_conntrack(net, zone, tuple); | 487 | unsigned int hash = hash_conntrack(net, zone, tuple); |
488 | 488 | ||
489 | /* Disable BHs the entire time since we need to disable them at | 489 | /* Disable BHs the entire time since we need to disable them at |
490 | * least once for the stats anyway. | 490 | * least once for the stats anyway. |
491 | */ | 491 | */ |
492 | rcu_read_lock_bh(); | 492 | rcu_read_lock_bh(); |
493 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { | 493 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { |
494 | ct = nf_ct_tuplehash_to_ctrack(h); | 494 | ct = nf_ct_tuplehash_to_ctrack(h); |
495 | if (ct != ignored_conntrack && | 495 | if (ct != ignored_conntrack && |
496 | nf_ct_tuple_equal(tuple, &h->tuple) && | 496 | nf_ct_tuple_equal(tuple, &h->tuple) && |
497 | nf_ct_zone(ct) == zone) { | 497 | nf_ct_zone(ct) == zone) { |
498 | NF_CT_STAT_INC(net, found); | 498 | NF_CT_STAT_INC(net, found); |
499 | rcu_read_unlock_bh(); | 499 | rcu_read_unlock_bh(); |
500 | return 1; | 500 | return 1; |
501 | } | 501 | } |
502 | NF_CT_STAT_INC(net, searched); | 502 | NF_CT_STAT_INC(net, searched); |
503 | } | 503 | } |
504 | rcu_read_unlock_bh(); | 504 | rcu_read_unlock_bh(); |
505 | 505 | ||
506 | return 0; | 506 | return 0; |
507 | } | 507 | } |
508 | EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); | 508 | EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); |
509 | 509 | ||
510 | #define NF_CT_EVICTION_RANGE 8 | 510 | #define NF_CT_EVICTION_RANGE 8 |
511 | 511 | ||
512 | /* There's a small race here where we may free a just-assured | 512 | /* There's a small race here where we may free a just-assured |
513 | connection. Too bad: we're in trouble anyway. */ | 513 | connection. Too bad: we're in trouble anyway. */ |
514 | static noinline int early_drop(struct net *net, unsigned int hash) | 514 | static noinline int early_drop(struct net *net, unsigned int hash) |
515 | { | 515 | { |
516 | /* Use oldest entry, which is roughly LRU */ | 516 | /* Use oldest entry, which is roughly LRU */ |
517 | struct nf_conntrack_tuple_hash *h; | 517 | struct nf_conntrack_tuple_hash *h; |
518 | struct nf_conn *ct = NULL, *tmp; | 518 | struct nf_conn *ct = NULL, *tmp; |
519 | struct hlist_nulls_node *n; | 519 | struct hlist_nulls_node *n; |
520 | unsigned int i, cnt = 0; | 520 | unsigned int i, cnt = 0; |
521 | int dropped = 0; | 521 | int dropped = 0; |
522 | 522 | ||
523 | rcu_read_lock(); | 523 | rcu_read_lock(); |
524 | for (i = 0; i < net->ct.htable_size; i++) { | 524 | for (i = 0; i < net->ct.htable_size; i++) { |
525 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], | 525 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], |
526 | hnnode) { | 526 | hnnode) { |
527 | tmp = nf_ct_tuplehash_to_ctrack(h); | 527 | tmp = nf_ct_tuplehash_to_ctrack(h); |
528 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) | 528 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) |
529 | ct = tmp; | 529 | ct = tmp; |
530 | cnt++; | 530 | cnt++; |
531 | } | 531 | } |
532 | 532 | ||
533 | if (ct != NULL) { | 533 | if (ct != NULL) { |
534 | if (likely(!nf_ct_is_dying(ct) && | 534 | if (likely(!nf_ct_is_dying(ct) && |
535 | atomic_inc_not_zero(&ct->ct_general.use))) | 535 | atomic_inc_not_zero(&ct->ct_general.use))) |
536 | break; | 536 | break; |
537 | else | 537 | else |
538 | ct = NULL; | 538 | ct = NULL; |
539 | } | 539 | } |
540 | 540 | ||
541 | if (cnt >= NF_CT_EVICTION_RANGE) | 541 | if (cnt >= NF_CT_EVICTION_RANGE) |
542 | break; | 542 | break; |
543 | 543 | ||
544 | hash = (hash + 1) % net->ct.htable_size; | 544 | hash = (hash + 1) % net->ct.htable_size; |
545 | } | 545 | } |
546 | rcu_read_unlock(); | 546 | rcu_read_unlock(); |
547 | 547 | ||
548 | if (!ct) | 548 | if (!ct) |
549 | return dropped; | 549 | return dropped; |
550 | 550 | ||
551 | if (del_timer(&ct->timeout)) { | 551 | if (del_timer(&ct->timeout)) { |
552 | death_by_timeout((unsigned long)ct); | 552 | death_by_timeout((unsigned long)ct); |
553 | dropped = 1; | 553 | dropped = 1; |
554 | NF_CT_STAT_INC_ATOMIC(net, early_drop); | 554 | NF_CT_STAT_INC_ATOMIC(net, early_drop); |
555 | } | 555 | } |
556 | nf_ct_put(ct); | 556 | nf_ct_put(ct); |
557 | return dropped; | 557 | return dropped; |
558 | } | 558 | } |
559 | 559 | ||
560 | struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, | 560 | struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, |
561 | const struct nf_conntrack_tuple *orig, | 561 | const struct nf_conntrack_tuple *orig, |
562 | const struct nf_conntrack_tuple *repl, | 562 | const struct nf_conntrack_tuple *repl, |
563 | gfp_t gfp) | 563 | gfp_t gfp) |
564 | { | 564 | { |
565 | struct nf_conn *ct; | 565 | struct nf_conn *ct; |
566 | 566 | ||
567 | if (unlikely(!nf_conntrack_hash_rnd_initted)) { | 567 | if (unlikely(!nf_conntrack_hash_rnd_initted)) { |
568 | get_random_bytes(&nf_conntrack_hash_rnd, | 568 | get_random_bytes(&nf_conntrack_hash_rnd, |
569 | sizeof(nf_conntrack_hash_rnd)); | 569 | sizeof(nf_conntrack_hash_rnd)); |
570 | nf_conntrack_hash_rnd_initted = 1; | 570 | nf_conntrack_hash_rnd_initted = 1; |
571 | } | 571 | } |
572 | 572 | ||
573 | /* We don't want any race condition at early drop stage */ | 573 | /* We don't want any race condition at early drop stage */ |
574 | atomic_inc(&net->ct.count); | 574 | atomic_inc(&net->ct.count); |
575 | 575 | ||
576 | if (nf_conntrack_max && | 576 | if (nf_conntrack_max && |
577 | unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { | 577 | unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { |
578 | unsigned int hash = hash_conntrack(net, zone, orig); | 578 | unsigned int hash = hash_conntrack(net, zone, orig); |
579 | if (!early_drop(net, hash)) { | 579 | if (!early_drop(net, hash)) { |
580 | atomic_dec(&net->ct.count); | 580 | atomic_dec(&net->ct.count); |
581 | if (net_ratelimit()) | 581 | if (net_ratelimit()) |
582 | printk(KERN_WARNING | 582 | printk(KERN_WARNING |
583 | "nf_conntrack: table full, dropping" | 583 | "nf_conntrack: table full, dropping" |
584 | " packet.\n"); | 584 | " packet.\n"); |
585 | return ERR_PTR(-ENOMEM); | 585 | return ERR_PTR(-ENOMEM); |
586 | } | 586 | } |
587 | } | 587 | } |
588 | 588 | ||
589 | /* | 589 | /* |
590 | * Do not use kmem_cache_zalloc(), as this cache uses | 590 | * Do not use kmem_cache_zalloc(), as this cache uses |
591 | * SLAB_DESTROY_BY_RCU. | 591 | * SLAB_DESTROY_BY_RCU. |
592 | */ | 592 | */ |
593 | ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); | 593 | ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); |
594 | if (ct == NULL) { | 594 | if (ct == NULL) { |
595 | pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); | 595 | pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); |
596 | atomic_dec(&net->ct.count); | 596 | atomic_dec(&net->ct.count); |
597 | return ERR_PTR(-ENOMEM); | 597 | return ERR_PTR(-ENOMEM); |
598 | } | 598 | } |
599 | /* | 599 | /* |
600 | * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next | 600 | * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next |
601 | * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. | 601 | * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. |
602 | */ | 602 | */ |
603 | memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, | 603 | memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, |
604 | sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); | 604 | sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); |
605 | spin_lock_init(&ct->lock); | 605 | spin_lock_init(&ct->lock); |
606 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; | 606 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; |
607 | ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; | 607 | ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; |
608 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; | 608 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; |
609 | ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; | 609 | ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; |
610 | /* Don't set timer yet: wait for confirmation */ | 610 | /* Don't set timer yet: wait for confirmation */ |
611 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); | 611 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); |
612 | #ifdef CONFIG_NET_NS | 612 | #ifdef CONFIG_NET_NS |
613 | ct->ct_net = net; | 613 | ct->ct_net = net; |
614 | #endif | 614 | #endif |
615 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 615 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
616 | if (zone) { | 616 | if (zone) { |
617 | struct nf_conntrack_zone *nf_ct_zone; | 617 | struct nf_conntrack_zone *nf_ct_zone; |
618 | 618 | ||
619 | nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); | 619 | nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); |
620 | if (!nf_ct_zone) | 620 | if (!nf_ct_zone) |
621 | goto out_free; | 621 | goto out_free; |
622 | nf_ct_zone->id = zone; | 622 | nf_ct_zone->id = zone; |
623 | } | 623 | } |
624 | #endif | 624 | #endif |
625 | /* | 625 | /* |
626 | * changes to lookup keys must be done before setting refcnt to 1 | 626 | * changes to lookup keys must be done before setting refcnt to 1 |
627 | */ | 627 | */ |
628 | smp_wmb(); | 628 | smp_wmb(); |
629 | atomic_set(&ct->ct_general.use, 1); | 629 | atomic_set(&ct->ct_general.use, 1); |
630 | return ct; | 630 | return ct; |
631 | 631 | ||
632 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 632 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
633 | out_free: | 633 | out_free: |
634 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | 634 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); |
635 | return ERR_PTR(-ENOMEM); | 635 | return ERR_PTR(-ENOMEM); |
636 | #endif | 636 | #endif |
637 | } | 637 | } |
638 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | 638 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); |
639 | 639 | ||
640 | void nf_conntrack_free(struct nf_conn *ct) | 640 | void nf_conntrack_free(struct nf_conn *ct) |
641 | { | 641 | { |
642 | struct net *net = nf_ct_net(ct); | 642 | struct net *net = nf_ct_net(ct); |
643 | 643 | ||
644 | nf_ct_ext_destroy(ct); | 644 | nf_ct_ext_destroy(ct); |
645 | atomic_dec(&net->ct.count); | 645 | atomic_dec(&net->ct.count); |
646 | nf_ct_ext_free(ct); | 646 | nf_ct_ext_free(ct); |
647 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | 647 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); |
648 | } | 648 | } |
649 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | 649 | EXPORT_SYMBOL_GPL(nf_conntrack_free); |
650 | 650 | ||
651 | /* Allocate a new conntrack: we return -ENOMEM if classification | 651 | /* Allocate a new conntrack: we return -ENOMEM if classification |
652 | failed due to stress. Otherwise it really is unclassifiable. */ | 652 | failed due to stress. Otherwise it really is unclassifiable. */ |
653 | static struct nf_conntrack_tuple_hash * | 653 | static struct nf_conntrack_tuple_hash * |
654 | init_conntrack(struct net *net, struct nf_conn *tmpl, | 654 | init_conntrack(struct net *net, struct nf_conn *tmpl, |
655 | const struct nf_conntrack_tuple *tuple, | 655 | const struct nf_conntrack_tuple *tuple, |
656 | struct nf_conntrack_l3proto *l3proto, | 656 | struct nf_conntrack_l3proto *l3proto, |
657 | struct nf_conntrack_l4proto *l4proto, | 657 | struct nf_conntrack_l4proto *l4proto, |
658 | struct sk_buff *skb, | 658 | struct sk_buff *skb, |
659 | unsigned int dataoff) | 659 | unsigned int dataoff) |
660 | { | 660 | { |
661 | struct nf_conn *ct; | 661 | struct nf_conn *ct; |
662 | struct nf_conn_help *help; | 662 | struct nf_conn_help *help; |
663 | struct nf_conntrack_tuple repl_tuple; | 663 | struct nf_conntrack_tuple repl_tuple; |
664 | struct nf_conntrack_ecache *ecache; | 664 | struct nf_conntrack_ecache *ecache; |
665 | struct nf_conntrack_expect *exp; | 665 | struct nf_conntrack_expect *exp; |
666 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; | 666 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; |
667 | 667 | ||
668 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { | 668 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { |
669 | pr_debug("Can't invert tuple.\n"); | 669 | pr_debug("Can't invert tuple.\n"); |
670 | return NULL; | 670 | return NULL; |
671 | } | 671 | } |
672 | 672 | ||
673 | ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC); | 673 | ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC); |
674 | if (IS_ERR(ct)) { | 674 | if (IS_ERR(ct)) { |
675 | pr_debug("Can't allocate conntrack.\n"); | 675 | pr_debug("Can't allocate conntrack.\n"); |
676 | return (struct nf_conntrack_tuple_hash *)ct; | 676 | return (struct nf_conntrack_tuple_hash *)ct; |
677 | } | 677 | } |
678 | 678 | ||
679 | if (!l4proto->new(ct, skb, dataoff)) { | 679 | if (!l4proto->new(ct, skb, dataoff)) { |
680 | nf_conntrack_free(ct); | 680 | nf_conntrack_free(ct); |
681 | pr_debug("init conntrack: can't track with proto module\n"); | 681 | pr_debug("init conntrack: can't track with proto module\n"); |
682 | return NULL; | 682 | return NULL; |
683 | } | 683 | } |
684 | 684 | ||
685 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); | 685 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); |
686 | 686 | ||
687 | ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; | 687 | ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; |
688 | nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, | 688 | nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, |
689 | ecache ? ecache->expmask : 0, | 689 | ecache ? ecache->expmask : 0, |
690 | GFP_ATOMIC); | 690 | GFP_ATOMIC); |
691 | 691 | ||
692 | spin_lock_bh(&nf_conntrack_lock); | 692 | spin_lock_bh(&nf_conntrack_lock); |
693 | exp = nf_ct_find_expectation(net, zone, tuple); | 693 | exp = nf_ct_find_expectation(net, zone, tuple); |
694 | if (exp) { | 694 | if (exp) { |
695 | pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", | 695 | pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", |
696 | ct, exp); | 696 | ct, exp); |
697 | /* Welcome, Mr. Bond. We've been expecting you... */ | 697 | /* Welcome, Mr. Bond. We've been expecting you... */ |
698 | __set_bit(IPS_EXPECTED_BIT, &ct->status); | 698 | __set_bit(IPS_EXPECTED_BIT, &ct->status); |
699 | ct->master = exp->master; | 699 | ct->master = exp->master; |
700 | if (exp->helper) { | 700 | if (exp->helper) { |
701 | help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); | 701 | help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); |
702 | if (help) | 702 | if (help) |
703 | rcu_assign_pointer(help->helper, exp->helper); | 703 | rcu_assign_pointer(help->helper, exp->helper); |
704 | } | 704 | } |
705 | 705 | ||
706 | #ifdef CONFIG_NF_CONNTRACK_MARK | 706 | #ifdef CONFIG_NF_CONNTRACK_MARK |
707 | ct->mark = exp->master->mark; | 707 | ct->mark = exp->master->mark; |
708 | #endif | 708 | #endif |
709 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 709 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
710 | ct->secmark = exp->master->secmark; | 710 | ct->secmark = exp->master->secmark; |
711 | #endif | 711 | #endif |
712 | nf_conntrack_get(&ct->master->ct_general); | 712 | nf_conntrack_get(&ct->master->ct_general); |
713 | NF_CT_STAT_INC(net, expect_new); | 713 | NF_CT_STAT_INC(net, expect_new); |
714 | } else { | 714 | } else { |
715 | __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); | 715 | __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); |
716 | NF_CT_STAT_INC(net, new); | 716 | NF_CT_STAT_INC(net, new); |
717 | } | 717 | } |
718 | 718 | ||
719 | /* Overload tuple linked list to put us in unconfirmed list. */ | 719 | /* Overload tuple linked list to put us in unconfirmed list. */ |
720 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | 720 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
721 | &net->ct.unconfirmed); | 721 | &net->ct.unconfirmed); |
722 | 722 | ||
723 | spin_unlock_bh(&nf_conntrack_lock); | 723 | spin_unlock_bh(&nf_conntrack_lock); |
724 | 724 | ||
725 | if (exp) { | 725 | if (exp) { |
726 | if (exp->expectfn) | 726 | if (exp->expectfn) |
727 | exp->expectfn(ct, exp); | 727 | exp->expectfn(ct, exp); |
728 | nf_ct_expect_put(exp); | 728 | nf_ct_expect_put(exp); |
729 | } | 729 | } |
730 | 730 | ||
731 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; | 731 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; |
732 | } | 732 | } |
733 | 733 | ||
734 | /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ | 734 | /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ |
735 | static inline struct nf_conn * | 735 | static inline struct nf_conn * |
736 | resolve_normal_ct(struct net *net, struct nf_conn *tmpl, | 736 | resolve_normal_ct(struct net *net, struct nf_conn *tmpl, |
737 | struct sk_buff *skb, | 737 | struct sk_buff *skb, |
738 | unsigned int dataoff, | 738 | unsigned int dataoff, |
739 | u_int16_t l3num, | 739 | u_int16_t l3num, |
740 | u_int8_t protonum, | 740 | u_int8_t protonum, |
741 | struct nf_conntrack_l3proto *l3proto, | 741 | struct nf_conntrack_l3proto *l3proto, |
742 | struct nf_conntrack_l4proto *l4proto, | 742 | struct nf_conntrack_l4proto *l4proto, |
743 | int *set_reply, | 743 | int *set_reply, |
744 | enum ip_conntrack_info *ctinfo) | 744 | enum ip_conntrack_info *ctinfo) |
745 | { | 745 | { |
746 | struct nf_conntrack_tuple tuple; | 746 | struct nf_conntrack_tuple tuple; |
747 | struct nf_conntrack_tuple_hash *h; | 747 | struct nf_conntrack_tuple_hash *h; |
748 | struct nf_conn *ct; | 748 | struct nf_conn *ct; |
749 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; | 749 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; |
750 | 750 | ||
751 | if (!nf_ct_get_tuple(skb, skb_network_offset(skb), | 751 | if (!nf_ct_get_tuple(skb, skb_network_offset(skb), |
752 | dataoff, l3num, protonum, &tuple, l3proto, | 752 | dataoff, l3num, protonum, &tuple, l3proto, |
753 | l4proto)) { | 753 | l4proto)) { |
754 | pr_debug("resolve_normal_ct: Can't get tuple\n"); | 754 | pr_debug("resolve_normal_ct: Can't get tuple\n"); |
755 | return NULL; | 755 | return NULL; |
756 | } | 756 | } |
757 | 757 | ||
758 | /* look for tuple match */ | 758 | /* look for tuple match */ |
759 | h = nf_conntrack_find_get(net, zone, &tuple); | 759 | h = nf_conntrack_find_get(net, zone, &tuple); |
760 | if (!h) { | 760 | if (!h) { |
761 | h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, | 761 | h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, |
762 | skb, dataoff); | 762 | skb, dataoff); |
763 | if (!h) | 763 | if (!h) |
764 | return NULL; | 764 | return NULL; |
765 | if (IS_ERR(h)) | 765 | if (IS_ERR(h)) |
766 | return (void *)h; | 766 | return (void *)h; |
767 | } | 767 | } |
768 | ct = nf_ct_tuplehash_to_ctrack(h); | 768 | ct = nf_ct_tuplehash_to_ctrack(h); |
769 | 769 | ||
770 | /* It exists; we have (non-exclusive) reference. */ | 770 | /* It exists; we have (non-exclusive) reference. */ |
771 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { | 771 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { |
772 | *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; | 772 | *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; |
773 | /* Please set reply bit if this packet OK */ | 773 | /* Please set reply bit if this packet OK */ |
774 | *set_reply = 1; | 774 | *set_reply = 1; |
775 | } else { | 775 | } else { |
776 | /* Once we've had two way comms, always ESTABLISHED. */ | 776 | /* Once we've had two way comms, always ESTABLISHED. */ |
777 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { | 777 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { |
778 | pr_debug("nf_conntrack_in: normal packet for %p\n", ct); | 778 | pr_debug("nf_conntrack_in: normal packet for %p\n", ct); |
779 | *ctinfo = IP_CT_ESTABLISHED; | 779 | *ctinfo = IP_CT_ESTABLISHED; |
780 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { | 780 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { |
781 | pr_debug("nf_conntrack_in: related packet for %p\n", | 781 | pr_debug("nf_conntrack_in: related packet for %p\n", |
782 | ct); | 782 | ct); |
783 | *ctinfo = IP_CT_RELATED; | 783 | *ctinfo = IP_CT_RELATED; |
784 | } else { | 784 | } else { |
785 | pr_debug("nf_conntrack_in: new packet for %p\n", ct); | 785 | pr_debug("nf_conntrack_in: new packet for %p\n", ct); |
786 | *ctinfo = IP_CT_NEW; | 786 | *ctinfo = IP_CT_NEW; |
787 | } | 787 | } |
788 | *set_reply = 0; | 788 | *set_reply = 0; |
789 | } | 789 | } |
790 | skb->nfct = &ct->ct_general; | 790 | skb->nfct = &ct->ct_general; |
791 | skb->nfctinfo = *ctinfo; | 791 | skb->nfctinfo = *ctinfo; |
792 | return ct; | 792 | return ct; |
793 | } | 793 | } |
794 | 794 | ||
795 | unsigned int | 795 | unsigned int |
796 | nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | 796 | nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, |
797 | struct sk_buff *skb) | 797 | struct sk_buff *skb) |
798 | { | 798 | { |
799 | struct nf_conn *ct, *tmpl = NULL; | 799 | struct nf_conn *ct, *tmpl = NULL; |
800 | enum ip_conntrack_info ctinfo; | 800 | enum ip_conntrack_info ctinfo; |
801 | struct nf_conntrack_l3proto *l3proto; | 801 | struct nf_conntrack_l3proto *l3proto; |
802 | struct nf_conntrack_l4proto *l4proto; | 802 | struct nf_conntrack_l4proto *l4proto; |
803 | unsigned int dataoff; | 803 | unsigned int dataoff; |
804 | u_int8_t protonum; | 804 | u_int8_t protonum; |
805 | int set_reply = 0; | 805 | int set_reply = 0; |
806 | int ret; | 806 | int ret; |
807 | 807 | ||
808 | if (skb->nfct) { | 808 | if (skb->nfct) { |
809 | /* Previously seen (loopback or untracked)? Ignore. */ | 809 | /* Previously seen (loopback or untracked)? Ignore. */ |
810 | tmpl = (struct nf_conn *)skb->nfct; | 810 | tmpl = (struct nf_conn *)skb->nfct; |
811 | if (!nf_ct_is_template(tmpl)) { | 811 | if (!nf_ct_is_template(tmpl)) { |
812 | NF_CT_STAT_INC_ATOMIC(net, ignore); | 812 | NF_CT_STAT_INC_ATOMIC(net, ignore); |
813 | return NF_ACCEPT; | 813 | return NF_ACCEPT; |
814 | } | 814 | } |
815 | skb->nfct = NULL; | 815 | skb->nfct = NULL; |
816 | } | 816 | } |
817 | 817 | ||
818 | /* rcu_read_lock()ed by nf_hook_slow */ | 818 | /* rcu_read_lock()ed by nf_hook_slow */ |
819 | l3proto = __nf_ct_l3proto_find(pf); | 819 | l3proto = __nf_ct_l3proto_find(pf); |
820 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), | 820 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), |
821 | &dataoff, &protonum); | 821 | &dataoff, &protonum); |
822 | if (ret <= 0) { | 822 | if (ret <= 0) { |
823 | pr_debug("not prepared to track yet or error occured\n"); | 823 | pr_debug("not prepared to track yet or error occured\n"); |
824 | NF_CT_STAT_INC_ATOMIC(net, error); | 824 | NF_CT_STAT_INC_ATOMIC(net, error); |
825 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 825 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
826 | ret = -ret; | 826 | ret = -ret; |
827 | goto out; | 827 | goto out; |
828 | } | 828 | } |
829 | 829 | ||
830 | l4proto = __nf_ct_l4proto_find(pf, protonum); | 830 | l4proto = __nf_ct_l4proto_find(pf, protonum); |
831 | 831 | ||
832 | /* It may be an special packet, error, unclean... | 832 | /* It may be an special packet, error, unclean... |
833 | * inverse of the return code tells to the netfilter | 833 | * inverse of the return code tells to the netfilter |
834 | * core what to do with the packet. */ | 834 | * core what to do with the packet. */ |
835 | if (l4proto->error != NULL) { | 835 | if (l4proto->error != NULL) { |
836 | ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, | 836 | ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, |
837 | pf, hooknum); | 837 | pf, hooknum); |
838 | if (ret <= 0) { | 838 | if (ret <= 0) { |
839 | NF_CT_STAT_INC_ATOMIC(net, error); | 839 | NF_CT_STAT_INC_ATOMIC(net, error); |
840 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 840 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
841 | ret = -ret; | 841 | ret = -ret; |
842 | goto out; | 842 | goto out; |
843 | } | 843 | } |
844 | } | 844 | } |
845 | 845 | ||
846 | ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, | 846 | ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, |
847 | l3proto, l4proto, &set_reply, &ctinfo); | 847 | l3proto, l4proto, &set_reply, &ctinfo); |
848 | if (!ct) { | 848 | if (!ct) { |
849 | /* Not valid part of a connection */ | 849 | /* Not valid part of a connection */ |
850 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 850 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
851 | ret = NF_ACCEPT; | 851 | ret = NF_ACCEPT; |
852 | goto out; | 852 | goto out; |
853 | } | 853 | } |
854 | 854 | ||
855 | if (IS_ERR(ct)) { | 855 | if (IS_ERR(ct)) { |
856 | /* Too stressed to deal. */ | 856 | /* Too stressed to deal. */ |
857 | NF_CT_STAT_INC_ATOMIC(net, drop); | 857 | NF_CT_STAT_INC_ATOMIC(net, drop); |
858 | ret = NF_DROP; | 858 | ret = NF_DROP; |
859 | goto out; | 859 | goto out; |
860 | } | 860 | } |
861 | 861 | ||
862 | NF_CT_ASSERT(skb->nfct); | 862 | NF_CT_ASSERT(skb->nfct); |
863 | 863 | ||
864 | ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); | 864 | ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); |
865 | if (ret <= 0) { | 865 | if (ret <= 0) { |
866 | /* Invalid: inverse of the return code tells | 866 | /* Invalid: inverse of the return code tells |
867 | * the netfilter core what to do */ | 867 | * the netfilter core what to do */ |
868 | pr_debug("nf_conntrack_in: Can't track with proto module\n"); | 868 | pr_debug("nf_conntrack_in: Can't track with proto module\n"); |
869 | nf_conntrack_put(skb->nfct); | 869 | nf_conntrack_put(skb->nfct); |
870 | skb->nfct = NULL; | 870 | skb->nfct = NULL; |
871 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 871 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
872 | if (ret == -NF_DROP) | 872 | if (ret == -NF_DROP) |
873 | NF_CT_STAT_INC_ATOMIC(net, drop); | 873 | NF_CT_STAT_INC_ATOMIC(net, drop); |
874 | ret = -ret; | 874 | ret = -ret; |
875 | goto out; | 875 | goto out; |
876 | } | 876 | } |
877 | 877 | ||
878 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) | 878 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) |
879 | nf_conntrack_event_cache(IPCT_REPLY, ct); | 879 | nf_conntrack_event_cache(IPCT_REPLY, ct); |
880 | out: | 880 | out: |
881 | if (tmpl) | 881 | if (tmpl) |
882 | nf_ct_put(tmpl); | 882 | nf_ct_put(tmpl); |
883 | 883 | ||
884 | return ret; | 884 | return ret; |
885 | } | 885 | } |
886 | EXPORT_SYMBOL_GPL(nf_conntrack_in); | 886 | EXPORT_SYMBOL_GPL(nf_conntrack_in); |
887 | 887 | ||
888 | bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, | 888 | bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, |
889 | const struct nf_conntrack_tuple *orig) | 889 | const struct nf_conntrack_tuple *orig) |
890 | { | 890 | { |
891 | bool ret; | 891 | bool ret; |
892 | 892 | ||
893 | rcu_read_lock(); | 893 | rcu_read_lock(); |
894 | ret = nf_ct_invert_tuple(inverse, orig, | 894 | ret = nf_ct_invert_tuple(inverse, orig, |
895 | __nf_ct_l3proto_find(orig->src.l3num), | 895 | __nf_ct_l3proto_find(orig->src.l3num), |
896 | __nf_ct_l4proto_find(orig->src.l3num, | 896 | __nf_ct_l4proto_find(orig->src.l3num, |
897 | orig->dst.protonum)); | 897 | orig->dst.protonum)); |
898 | rcu_read_unlock(); | 898 | rcu_read_unlock(); |
899 | return ret; | 899 | return ret; |
900 | } | 900 | } |
901 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); | 901 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); |
902 | 902 | ||
903 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is | 903 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is |
904 | implicitly racy: see __nf_conntrack_confirm */ | 904 | implicitly racy: see __nf_conntrack_confirm */ |
905 | void nf_conntrack_alter_reply(struct nf_conn *ct, | 905 | void nf_conntrack_alter_reply(struct nf_conn *ct, |
906 | const struct nf_conntrack_tuple *newreply) | 906 | const struct nf_conntrack_tuple *newreply) |
907 | { | 907 | { |
908 | struct nf_conn_help *help = nfct_help(ct); | 908 | struct nf_conn_help *help = nfct_help(ct); |
909 | 909 | ||
910 | /* Should be unconfirmed, so not in hash table yet */ | 910 | /* Should be unconfirmed, so not in hash table yet */ |
911 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | 911 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); |
912 | 912 | ||
913 | pr_debug("Altering reply tuple of %p to ", ct); | 913 | pr_debug("Altering reply tuple of %p to ", ct); |
914 | nf_ct_dump_tuple(newreply); | 914 | nf_ct_dump_tuple(newreply); |
915 | 915 | ||
916 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; | 916 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; |
917 | if (ct->master || (help && !hlist_empty(&help->expectations))) | 917 | if (ct->master || (help && !hlist_empty(&help->expectations))) |
918 | return; | 918 | return; |
919 | 919 | ||
920 | rcu_read_lock(); | 920 | rcu_read_lock(); |
921 | __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); | 921 | __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); |
922 | rcu_read_unlock(); | 922 | rcu_read_unlock(); |
923 | } | 923 | } |
924 | EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); | 924 | EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); |
925 | 925 | ||
926 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ | 926 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ |
927 | void __nf_ct_refresh_acct(struct nf_conn *ct, | 927 | void __nf_ct_refresh_acct(struct nf_conn *ct, |
928 | enum ip_conntrack_info ctinfo, | 928 | enum ip_conntrack_info ctinfo, |
929 | const struct sk_buff *skb, | 929 | const struct sk_buff *skb, |
930 | unsigned long extra_jiffies, | 930 | unsigned long extra_jiffies, |
931 | int do_acct) | 931 | int do_acct) |
932 | { | 932 | { |
933 | NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); | 933 | NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); |
934 | NF_CT_ASSERT(skb); | 934 | NF_CT_ASSERT(skb); |
935 | 935 | ||
936 | /* Only update if this is not a fixed timeout */ | 936 | /* Only update if this is not a fixed timeout */ |
937 | if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) | 937 | if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) |
938 | goto acct; | 938 | goto acct; |
939 | 939 | ||
940 | /* If not in hash table, timer will not be active yet */ | 940 | /* If not in hash table, timer will not be active yet */ |
941 | if (!nf_ct_is_confirmed(ct)) { | 941 | if (!nf_ct_is_confirmed(ct)) { |
942 | ct->timeout.expires = extra_jiffies; | 942 | ct->timeout.expires = extra_jiffies; |
943 | } else { | 943 | } else { |
944 | unsigned long newtime = jiffies + extra_jiffies; | 944 | unsigned long newtime = jiffies + extra_jiffies; |
945 | 945 | ||
946 | /* Only update the timeout if the new timeout is at least | 946 | /* Only update the timeout if the new timeout is at least |
947 | HZ jiffies from the old timeout. Need del_timer for race | 947 | HZ jiffies from the old timeout. Need del_timer for race |
948 | avoidance (may already be dying). */ | 948 | avoidance (may already be dying). */ |
949 | if (newtime - ct->timeout.expires >= HZ) | 949 | if (newtime - ct->timeout.expires >= HZ) |
950 | mod_timer_pending(&ct->timeout, newtime); | 950 | mod_timer_pending(&ct->timeout, newtime); |
951 | } | 951 | } |
952 | 952 | ||
953 | acct: | 953 | acct: |
954 | if (do_acct) { | 954 | if (do_acct) { |
955 | struct nf_conn_counter *acct; | 955 | struct nf_conn_counter *acct; |
956 | 956 | ||
957 | acct = nf_conn_acct_find(ct); | 957 | acct = nf_conn_acct_find(ct); |
958 | if (acct) { | 958 | if (acct) { |
959 | spin_lock_bh(&ct->lock); | 959 | spin_lock_bh(&ct->lock); |
960 | acct[CTINFO2DIR(ctinfo)].packets++; | 960 | acct[CTINFO2DIR(ctinfo)].packets++; |
961 | acct[CTINFO2DIR(ctinfo)].bytes += | 961 | acct[CTINFO2DIR(ctinfo)].bytes += |
962 | skb->len - skb_network_offset(skb); | 962 | skb->len - skb_network_offset(skb); |
963 | spin_unlock_bh(&ct->lock); | 963 | spin_unlock_bh(&ct->lock); |
964 | } | 964 | } |
965 | } | 965 | } |
966 | } | 966 | } |
967 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); | 967 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); |
968 | 968 | ||
969 | bool __nf_ct_kill_acct(struct nf_conn *ct, | 969 | bool __nf_ct_kill_acct(struct nf_conn *ct, |
970 | enum ip_conntrack_info ctinfo, | 970 | enum ip_conntrack_info ctinfo, |
971 | const struct sk_buff *skb, | 971 | const struct sk_buff *skb, |
972 | int do_acct) | 972 | int do_acct) |
973 | { | 973 | { |
974 | if (do_acct) { | 974 | if (do_acct) { |
975 | struct nf_conn_counter *acct; | 975 | struct nf_conn_counter *acct; |
976 | 976 | ||
977 | acct = nf_conn_acct_find(ct); | 977 | acct = nf_conn_acct_find(ct); |
978 | if (acct) { | 978 | if (acct) { |
979 | spin_lock_bh(&ct->lock); | 979 | spin_lock_bh(&ct->lock); |
980 | acct[CTINFO2DIR(ctinfo)].packets++; | 980 | acct[CTINFO2DIR(ctinfo)].packets++; |
981 | acct[CTINFO2DIR(ctinfo)].bytes += | 981 | acct[CTINFO2DIR(ctinfo)].bytes += |
982 | skb->len - skb_network_offset(skb); | 982 | skb->len - skb_network_offset(skb); |
983 | spin_unlock_bh(&ct->lock); | 983 | spin_unlock_bh(&ct->lock); |
984 | } | 984 | } |
985 | } | 985 | } |
986 | 986 | ||
987 | if (del_timer(&ct->timeout)) { | 987 | if (del_timer(&ct->timeout)) { |
988 | ct->timeout.function((unsigned long)ct); | 988 | ct->timeout.function((unsigned long)ct); |
989 | return true; | 989 | return true; |
990 | } | 990 | } |
991 | return false; | 991 | return false; |
992 | } | 992 | } |
993 | EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); | 993 | EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); |
994 | 994 | ||
995 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 995 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
996 | static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { | 996 | static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { |
997 | .len = sizeof(struct nf_conntrack_zone), | 997 | .len = sizeof(struct nf_conntrack_zone), |
998 | .align = __alignof__(struct nf_conntrack_zone), | 998 | .align = __alignof__(struct nf_conntrack_zone), |
999 | .id = NF_CT_EXT_ZONE, | 999 | .id = NF_CT_EXT_ZONE, |
1000 | }; | 1000 | }; |
1001 | #endif | 1001 | #endif |
1002 | 1002 | ||
1003 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 1003 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
1004 | 1004 | ||
1005 | #include <linux/netfilter/nfnetlink.h> | 1005 | #include <linux/netfilter/nfnetlink.h> |
1006 | #include <linux/netfilter/nfnetlink_conntrack.h> | 1006 | #include <linux/netfilter/nfnetlink_conntrack.h> |
1007 | #include <linux/mutex.h> | 1007 | #include <linux/mutex.h> |
1008 | 1008 | ||
1009 | /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be | 1009 | /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be |
1010 | * in ip_conntrack_core, since we don't want the protocols to autoload | 1010 | * in ip_conntrack_core, since we don't want the protocols to autoload |
1011 | * or depend on ctnetlink */ | 1011 | * or depend on ctnetlink */ |
1012 | int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, | 1012 | int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, |
1013 | const struct nf_conntrack_tuple *tuple) | 1013 | const struct nf_conntrack_tuple *tuple) |
1014 | { | 1014 | { |
1015 | NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); | 1015 | NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); |
1016 | NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); | 1016 | NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); |
1017 | return 0; | 1017 | return 0; |
1018 | 1018 | ||
1019 | nla_put_failure: | 1019 | nla_put_failure: |
1020 | return -1; | 1020 | return -1; |
1021 | } | 1021 | } |
1022 | EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); | 1022 | EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); |
1023 | 1023 | ||
1024 | const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { | 1024 | const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { |
1025 | [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, | 1025 | [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, |
1026 | [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, | 1026 | [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, |
1027 | }; | 1027 | }; |
1028 | EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); | 1028 | EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); |
1029 | 1029 | ||
1030 | int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], | 1030 | int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], |
1031 | struct nf_conntrack_tuple *t) | 1031 | struct nf_conntrack_tuple *t) |
1032 | { | 1032 | { |
1033 | if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) | 1033 | if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) |
1034 | return -EINVAL; | 1034 | return -EINVAL; |
1035 | 1035 | ||
1036 | t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); | 1036 | t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); |
1037 | t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); | 1037 | t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); |
1038 | 1038 | ||
1039 | return 0; | 1039 | return 0; |
1040 | } | 1040 | } |
1041 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); | 1041 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); |
1042 | 1042 | ||
1043 | int nf_ct_port_nlattr_tuple_size(void) | 1043 | int nf_ct_port_nlattr_tuple_size(void) |
1044 | { | 1044 | { |
1045 | return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); | 1045 | return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); |
1046 | } | 1046 | } |
1047 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); | 1047 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); |
1048 | #endif | 1048 | #endif |
1049 | 1049 | ||
1050 | /* Used by ipt_REJECT and ip6t_REJECT. */ | 1050 | /* Used by ipt_REJECT and ip6t_REJECT. */ |
1051 | static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) | 1051 | static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) |
1052 | { | 1052 | { |
1053 | struct nf_conn *ct; | 1053 | struct nf_conn *ct; |
1054 | enum ip_conntrack_info ctinfo; | 1054 | enum ip_conntrack_info ctinfo; |
1055 | 1055 | ||
1056 | /* This ICMP is in reverse direction to the packet which caused it */ | 1056 | /* This ICMP is in reverse direction to the packet which caused it */ |
1057 | ct = nf_ct_get(skb, &ctinfo); | 1057 | ct = nf_ct_get(skb, &ctinfo); |
1058 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | 1058 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) |
1059 | ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; | 1059 | ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; |
1060 | else | 1060 | else |
1061 | ctinfo = IP_CT_RELATED; | 1061 | ctinfo = IP_CT_RELATED; |
1062 | 1062 | ||
1063 | /* Attach to new skbuff, and increment count */ | 1063 | /* Attach to new skbuff, and increment count */ |
1064 | nskb->nfct = &ct->ct_general; | 1064 | nskb->nfct = &ct->ct_general; |
1065 | nskb->nfctinfo = ctinfo; | 1065 | nskb->nfctinfo = ctinfo; |
1066 | nf_conntrack_get(nskb->nfct); | 1066 | nf_conntrack_get(nskb->nfct); |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | /* Bring out ya dead! */ | 1069 | /* Bring out ya dead! */ |
1070 | static struct nf_conn * | 1070 | static struct nf_conn * |
1071 | get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), | 1071 | get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), |
1072 | void *data, unsigned int *bucket) | 1072 | void *data, unsigned int *bucket) |
1073 | { | 1073 | { |
1074 | struct nf_conntrack_tuple_hash *h; | 1074 | struct nf_conntrack_tuple_hash *h; |
1075 | struct nf_conn *ct; | 1075 | struct nf_conn *ct; |
1076 | struct hlist_nulls_node *n; | 1076 | struct hlist_nulls_node *n; |
1077 | 1077 | ||
1078 | spin_lock_bh(&nf_conntrack_lock); | 1078 | spin_lock_bh(&nf_conntrack_lock); |
1079 | for (; *bucket < net->ct.htable_size; (*bucket)++) { | 1079 | for (; *bucket < net->ct.htable_size; (*bucket)++) { |
1080 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { | 1080 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { |
1081 | ct = nf_ct_tuplehash_to_ctrack(h); | 1081 | ct = nf_ct_tuplehash_to_ctrack(h); |
1082 | if (iter(ct, data)) | 1082 | if (iter(ct, data)) |
1083 | goto found; | 1083 | goto found; |
1084 | } | 1084 | } |
1085 | } | 1085 | } |
1086 | hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { | 1086 | hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { |
1087 | ct = nf_ct_tuplehash_to_ctrack(h); | 1087 | ct = nf_ct_tuplehash_to_ctrack(h); |
1088 | if (iter(ct, data)) | 1088 | if (iter(ct, data)) |
1089 | set_bit(IPS_DYING_BIT, &ct->status); | 1089 | set_bit(IPS_DYING_BIT, &ct->status); |
1090 | } | 1090 | } |
1091 | spin_unlock_bh(&nf_conntrack_lock); | 1091 | spin_unlock_bh(&nf_conntrack_lock); |
1092 | return NULL; | 1092 | return NULL; |
1093 | found: | 1093 | found: |
1094 | atomic_inc(&ct->ct_general.use); | 1094 | atomic_inc(&ct->ct_general.use); |
1095 | spin_unlock_bh(&nf_conntrack_lock); | 1095 | spin_unlock_bh(&nf_conntrack_lock); |
1096 | return ct; | 1096 | return ct; |
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | void nf_ct_iterate_cleanup(struct net *net, | 1099 | void nf_ct_iterate_cleanup(struct net *net, |
1100 | int (*iter)(struct nf_conn *i, void *data), | 1100 | int (*iter)(struct nf_conn *i, void *data), |
1101 | void *data) | 1101 | void *data) |
1102 | { | 1102 | { |
1103 | struct nf_conn *ct; | 1103 | struct nf_conn *ct; |
1104 | unsigned int bucket = 0; | 1104 | unsigned int bucket = 0; |
1105 | 1105 | ||
1106 | while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { | 1106 | while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { |
1107 | /* Time to push up daises... */ | 1107 | /* Time to push up daises... */ |
1108 | if (del_timer(&ct->timeout)) | 1108 | if (del_timer(&ct->timeout)) |
1109 | death_by_timeout((unsigned long)ct); | 1109 | death_by_timeout((unsigned long)ct); |
1110 | /* ... else the timer will get him soon. */ | 1110 | /* ... else the timer will get him soon. */ |
1111 | 1111 | ||
1112 | nf_ct_put(ct); | 1112 | nf_ct_put(ct); |
1113 | } | 1113 | } |
1114 | } | 1114 | } |
1115 | EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); | 1115 | EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); |
1116 | 1116 | ||
1117 | struct __nf_ct_flush_report { | 1117 | struct __nf_ct_flush_report { |
1118 | u32 pid; | 1118 | u32 pid; |
1119 | int report; | 1119 | int report; |
1120 | }; | 1120 | }; |
1121 | 1121 | ||
1122 | static int kill_report(struct nf_conn *i, void *data) | 1122 | static int kill_report(struct nf_conn *i, void *data) |
1123 | { | 1123 | { |
1124 | struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; | 1124 | struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; |
1125 | 1125 | ||
1126 | /* If we fail to deliver the event, death_by_timeout() will retry */ | 1126 | /* If we fail to deliver the event, death_by_timeout() will retry */ |
1127 | if (nf_conntrack_event_report(IPCT_DESTROY, i, | 1127 | if (nf_conntrack_event_report(IPCT_DESTROY, i, |
1128 | fr->pid, fr->report) < 0) | 1128 | fr->pid, fr->report) < 0) |
1129 | return 1; | 1129 | return 1; |
1130 | 1130 | ||
1131 | /* Avoid the delivery of the destroy event in death_by_timeout(). */ | 1131 | /* Avoid the delivery of the destroy event in death_by_timeout(). */ |
1132 | set_bit(IPS_DYING_BIT, &i->status); | 1132 | set_bit(IPS_DYING_BIT, &i->status); |
1133 | return 1; | 1133 | return 1; |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | static int kill_all(struct nf_conn *i, void *data) | 1136 | static int kill_all(struct nf_conn *i, void *data) |
1137 | { | 1137 | { |
1138 | return 1; | 1138 | return 1; |
1139 | } | 1139 | } |
1140 | 1140 | ||
1141 | void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size) | 1141 | void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size) |
1142 | { | 1142 | { |
1143 | if (vmalloced) | 1143 | if (vmalloced) |
1144 | vfree(hash); | 1144 | vfree(hash); |
1145 | else | 1145 | else |
1146 | free_pages((unsigned long)hash, | 1146 | free_pages((unsigned long)hash, |
1147 | get_order(sizeof(struct hlist_head) * size)); | 1147 | get_order(sizeof(struct hlist_head) * size)); |
1148 | } | 1148 | } |
1149 | EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); | 1149 | EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); |
1150 | 1150 | ||
1151 | void nf_conntrack_flush_report(struct net *net, u32 pid, int report) | 1151 | void nf_conntrack_flush_report(struct net *net, u32 pid, int report) |
1152 | { | 1152 | { |
1153 | struct __nf_ct_flush_report fr = { | 1153 | struct __nf_ct_flush_report fr = { |
1154 | .pid = pid, | 1154 | .pid = pid, |
1155 | .report = report, | 1155 | .report = report, |
1156 | }; | 1156 | }; |
1157 | nf_ct_iterate_cleanup(net, kill_report, &fr); | 1157 | nf_ct_iterate_cleanup(net, kill_report, &fr); |
1158 | } | 1158 | } |
1159 | EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); | 1159 | EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); |
1160 | 1160 | ||
1161 | static void nf_ct_release_dying_list(struct net *net) | 1161 | static void nf_ct_release_dying_list(struct net *net) |
1162 | { | 1162 | { |
1163 | struct nf_conntrack_tuple_hash *h; | 1163 | struct nf_conntrack_tuple_hash *h; |
1164 | struct nf_conn *ct; | 1164 | struct nf_conn *ct; |
1165 | struct hlist_nulls_node *n; | 1165 | struct hlist_nulls_node *n; |
1166 | 1166 | ||
1167 | spin_lock_bh(&nf_conntrack_lock); | 1167 | spin_lock_bh(&nf_conntrack_lock); |
1168 | hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { | 1168 | hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { |
1169 | ct = nf_ct_tuplehash_to_ctrack(h); | 1169 | ct = nf_ct_tuplehash_to_ctrack(h); |
1170 | /* never fails to remove them, no listeners at this point */ | 1170 | /* never fails to remove them, no listeners at this point */ |
1171 | nf_ct_kill(ct); | 1171 | nf_ct_kill(ct); |
1172 | } | 1172 | } |
1173 | spin_unlock_bh(&nf_conntrack_lock); | 1173 | spin_unlock_bh(&nf_conntrack_lock); |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | static void nf_conntrack_cleanup_init_net(void) | 1176 | static void nf_conntrack_cleanup_init_net(void) |
1177 | { | 1177 | { |
1178 | /* wait until all references to nf_conntrack_untracked are dropped */ | 1178 | /* wait until all references to nf_conntrack_untracked are dropped */ |
1179 | while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) | 1179 | while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) |
1180 | schedule(); | 1180 | schedule(); |
1181 | 1181 | ||
1182 | nf_conntrack_helper_fini(); | 1182 | nf_conntrack_helper_fini(); |
1183 | nf_conntrack_proto_fini(); | 1183 | nf_conntrack_proto_fini(); |
1184 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 1184 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
1185 | nf_ct_extend_unregister(&nf_ct_zone_extend); | 1185 | nf_ct_extend_unregister(&nf_ct_zone_extend); |
1186 | #endif | 1186 | #endif |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | static void nf_conntrack_cleanup_net(struct net *net) | 1189 | static void nf_conntrack_cleanup_net(struct net *net) |
1190 | { | 1190 | { |
1191 | i_see_dead_people: | 1191 | i_see_dead_people: |
1192 | nf_ct_iterate_cleanup(net, kill_all, NULL); | 1192 | nf_ct_iterate_cleanup(net, kill_all, NULL); |
1193 | nf_ct_release_dying_list(net); | 1193 | nf_ct_release_dying_list(net); |
1194 | if (atomic_read(&net->ct.count) != 0) { | 1194 | if (atomic_read(&net->ct.count) != 0) { |
1195 | schedule(); | 1195 | schedule(); |
1196 | goto i_see_dead_people; | 1196 | goto i_see_dead_people; |
1197 | } | 1197 | } |
1198 | 1198 | ||
1199 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, | 1199 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, |
1200 | net->ct.htable_size); | 1200 | net->ct.htable_size); |
1201 | nf_conntrack_ecache_fini(net); | 1201 | nf_conntrack_ecache_fini(net); |
1202 | nf_conntrack_acct_fini(net); | 1202 | nf_conntrack_acct_fini(net); |
1203 | nf_conntrack_expect_fini(net); | 1203 | nf_conntrack_expect_fini(net); |
1204 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); | 1204 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); |
1205 | kfree(net->ct.slabname); | 1205 | kfree(net->ct.slabname); |
1206 | free_percpu(net->ct.stat); | 1206 | free_percpu(net->ct.stat); |
1207 | } | 1207 | } |
1208 | 1208 | ||
1209 | /* Mishearing the voices in his head, our hero wonders how he's | 1209 | /* Mishearing the voices in his head, our hero wonders how he's |
1210 | supposed to kill the mall. */ | 1210 | supposed to kill the mall. */ |
1211 | void nf_conntrack_cleanup(struct net *net) | 1211 | void nf_conntrack_cleanup(struct net *net) |
1212 | { | 1212 | { |
1213 | if (net_eq(net, &init_net)) | 1213 | if (net_eq(net, &init_net)) |
1214 | rcu_assign_pointer(ip_ct_attach, NULL); | 1214 | rcu_assign_pointer(ip_ct_attach, NULL); |
1215 | 1215 | ||
1216 | /* This makes sure all current packets have passed through | 1216 | /* This makes sure all current packets have passed through |
1217 | netfilter framework. Roll on, two-stage module | 1217 | netfilter framework. Roll on, two-stage module |
1218 | delete... */ | 1218 | delete... */ |
1219 | synchronize_net(); | 1219 | synchronize_net(); |
1220 | 1220 | ||
1221 | nf_conntrack_cleanup_net(net); | 1221 | nf_conntrack_cleanup_net(net); |
1222 | 1222 | ||
1223 | if (net_eq(net, &init_net)) { | 1223 | if (net_eq(net, &init_net)) { |
1224 | rcu_assign_pointer(nf_ct_destroy, NULL); | 1224 | rcu_assign_pointer(nf_ct_destroy, NULL); |
1225 | nf_conntrack_cleanup_init_net(); | 1225 | nf_conntrack_cleanup_init_net(); |
1226 | } | 1226 | } |
1227 | } | 1227 | } |
1228 | 1228 | ||
1229 | void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) | 1229 | void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) |
1230 | { | 1230 | { |
1231 | struct hlist_nulls_head *hash; | 1231 | struct hlist_nulls_head *hash; |
1232 | unsigned int nr_slots, i; | 1232 | unsigned int nr_slots, i; |
1233 | size_t sz; | 1233 | size_t sz; |
1234 | 1234 | ||
1235 | *vmalloced = 0; | 1235 | *vmalloced = 0; |
1236 | 1236 | ||
1237 | BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); | 1237 | BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); |
1238 | nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); | 1238 | nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); |
1239 | sz = nr_slots * sizeof(struct hlist_nulls_head); | 1239 | sz = nr_slots * sizeof(struct hlist_nulls_head); |
1240 | hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, | 1240 | hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, |
1241 | get_order(sz)); | 1241 | get_order(sz)); |
1242 | if (!hash) { | 1242 | if (!hash) { |
1243 | *vmalloced = 1; | 1243 | *vmalloced = 1; |
1244 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); | 1244 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); |
1245 | hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | 1245 | hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | if (hash && nulls) | 1248 | if (hash && nulls) |
1249 | for (i = 0; i < nr_slots; i++) | 1249 | for (i = 0; i < nr_slots; i++) |
1250 | INIT_HLIST_NULLS_HEAD(&hash[i], i); | 1250 | INIT_HLIST_NULLS_HEAD(&hash[i], i); |
1251 | 1251 | ||
1252 | return hash; | 1252 | return hash; |
1253 | } | 1253 | } |
1254 | EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); | 1254 | EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); |
1255 | 1255 | ||
1256 | int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) | 1256 | int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) |
1257 | { | 1257 | { |
1258 | int i, bucket, vmalloced, old_vmalloced; | 1258 | int i, bucket, vmalloced, old_vmalloced; |
1259 | unsigned int hashsize, old_size; | 1259 | unsigned int hashsize, old_size; |
1260 | struct hlist_nulls_head *hash, *old_hash; | 1260 | struct hlist_nulls_head *hash, *old_hash; |
1261 | struct nf_conntrack_tuple_hash *h; | 1261 | struct nf_conntrack_tuple_hash *h; |
1262 | struct nf_conn *ct; | 1262 | struct nf_conn *ct; |
1263 | 1263 | ||
1264 | if (current->nsproxy->net_ns != &init_net) | 1264 | if (current->nsproxy->net_ns != &init_net) |
1265 | return -EOPNOTSUPP; | 1265 | return -EOPNOTSUPP; |
1266 | 1266 | ||
1267 | /* On boot, we can set this without any fancy locking. */ | 1267 | /* On boot, we can set this without any fancy locking. */ |
1268 | if (!nf_conntrack_htable_size) | 1268 | if (!nf_conntrack_htable_size) |
1269 | return param_set_uint(val, kp); | 1269 | return param_set_uint(val, kp); |
1270 | 1270 | ||
1271 | hashsize = simple_strtoul(val, NULL, 0); | 1271 | hashsize = simple_strtoul(val, NULL, 0); |
1272 | if (!hashsize) | 1272 | if (!hashsize) |
1273 | return -EINVAL; | 1273 | return -EINVAL; |
1274 | 1274 | ||
1275 | hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1); | 1275 | hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1); |
1276 | if (!hash) | 1276 | if (!hash) |
1277 | return -ENOMEM; | 1277 | return -ENOMEM; |
1278 | 1278 | ||
1279 | /* Lookups in the old hash might happen in parallel, which means we | 1279 | /* Lookups in the old hash might happen in parallel, which means we |
1280 | * might get false negatives during connection lookup. New connections | 1280 | * might get false negatives during connection lookup. New connections |
1281 | * created because of a false negative won't make it into the hash | 1281 | * created because of a false negative won't make it into the hash |
1282 | * though since that required taking the lock. | 1282 | * though since that required taking the lock. |
1283 | */ | 1283 | */ |
1284 | spin_lock_bh(&nf_conntrack_lock); | 1284 | spin_lock_bh(&nf_conntrack_lock); |
1285 | for (i = 0; i < init_net.ct.htable_size; i++) { | 1285 | for (i = 0; i < init_net.ct.htable_size; i++) { |
1286 | while (!hlist_nulls_empty(&init_net.ct.hash[i])) { | 1286 | while (!hlist_nulls_empty(&init_net.ct.hash[i])) { |
1287 | h = hlist_nulls_entry(init_net.ct.hash[i].first, | 1287 | h = hlist_nulls_entry(init_net.ct.hash[i].first, |
1288 | struct nf_conntrack_tuple_hash, hnnode); | 1288 | struct nf_conntrack_tuple_hash, hnnode); |
1289 | ct = nf_ct_tuplehash_to_ctrack(h); | 1289 | ct = nf_ct_tuplehash_to_ctrack(h); |
1290 | hlist_nulls_del_rcu(&h->hnnode); | 1290 | hlist_nulls_del_rcu(&h->hnnode); |
1291 | bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), | 1291 | bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), |
1292 | hashsize, | 1292 | hashsize, |
1293 | nf_conntrack_hash_rnd); | 1293 | nf_conntrack_hash_rnd); |
1294 | hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); | 1294 | hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); |
1295 | } | 1295 | } |
1296 | } | 1296 | } |
1297 | old_size = init_net.ct.htable_size; | 1297 | old_size = init_net.ct.htable_size; |
1298 | old_vmalloced = init_net.ct.hash_vmalloc; | 1298 | old_vmalloced = init_net.ct.hash_vmalloc; |
1299 | old_hash = init_net.ct.hash; | 1299 | old_hash = init_net.ct.hash; |
1300 | 1300 | ||
1301 | init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; | 1301 | init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; |
1302 | init_net.ct.hash_vmalloc = vmalloced; | 1302 | init_net.ct.hash_vmalloc = vmalloced; |
1303 | init_net.ct.hash = hash; | 1303 | init_net.ct.hash = hash; |
1304 | spin_unlock_bh(&nf_conntrack_lock); | 1304 | spin_unlock_bh(&nf_conntrack_lock); |
1305 | 1305 | ||
1306 | nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); | 1306 | nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); |
1307 | return 0; | 1307 | return 0; |
1308 | } | 1308 | } |
1309 | EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); | 1309 | EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); |
1310 | 1310 | ||
1311 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, | 1311 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, |
1312 | &nf_conntrack_htable_size, 0600); | 1312 | &nf_conntrack_htable_size, 0600); |
1313 | 1313 | ||
1314 | static int nf_conntrack_init_init_net(void) | 1314 | static int nf_conntrack_init_init_net(void) |
1315 | { | 1315 | { |
1316 | int max_factor = 8; | 1316 | int max_factor = 8; |
1317 | int ret; | 1317 | int ret; |
1318 | 1318 | ||
1319 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB | 1319 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB |
1320 | * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ | 1320 | * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ |
1321 | if (!nf_conntrack_htable_size) { | 1321 | if (!nf_conntrack_htable_size) { |
1322 | nf_conntrack_htable_size | 1322 | nf_conntrack_htable_size |
1323 | = (((totalram_pages << PAGE_SHIFT) / 16384) | 1323 | = (((totalram_pages << PAGE_SHIFT) / 16384) |
1324 | / sizeof(struct hlist_head)); | 1324 | / sizeof(struct hlist_head)); |
1325 | if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) | 1325 | if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) |
1326 | nf_conntrack_htable_size = 16384; | 1326 | nf_conntrack_htable_size = 16384; |
1327 | if (nf_conntrack_htable_size < 32) | 1327 | if (nf_conntrack_htable_size < 32) |
1328 | nf_conntrack_htable_size = 32; | 1328 | nf_conntrack_htable_size = 32; |
1329 | 1329 | ||
1330 | /* Use a max. factor of four by default to get the same max as | 1330 | /* Use a max. factor of four by default to get the same max as |
1331 | * with the old struct list_heads. When a table size is given | 1331 | * with the old struct list_heads. When a table size is given |
1332 | * we use the old value of 8 to avoid reducing the max. | 1332 | * we use the old value of 8 to avoid reducing the max. |
1333 | * entries. */ | 1333 | * entries. */ |
1334 | max_factor = 4; | 1334 | max_factor = 4; |
1335 | } | 1335 | } |
1336 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; | 1336 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; |
1337 | 1337 | ||
1338 | printk("nf_conntrack version %s (%u buckets, %d max)\n", | 1338 | printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", |
1339 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, | 1339 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, |
1340 | nf_conntrack_max); | 1340 | nf_conntrack_max); |
1341 | 1341 | ||
1342 | ret = nf_conntrack_proto_init(); | 1342 | ret = nf_conntrack_proto_init(); |
1343 | if (ret < 0) | 1343 | if (ret < 0) |
1344 | goto err_proto; | 1344 | goto err_proto; |
1345 | 1345 | ||
1346 | ret = nf_conntrack_helper_init(); | 1346 | ret = nf_conntrack_helper_init(); |
1347 | if (ret < 0) | 1347 | if (ret < 0) |
1348 | goto err_helper; | 1348 | goto err_helper; |
1349 | 1349 | ||
1350 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 1350 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
1351 | ret = nf_ct_extend_register(&nf_ct_zone_extend); | 1351 | ret = nf_ct_extend_register(&nf_ct_zone_extend); |
1352 | if (ret < 0) | 1352 | if (ret < 0) |
1353 | goto err_extend; | 1353 | goto err_extend; |
1354 | #endif | 1354 | #endif |
1355 | /* Set up fake conntrack: to never be deleted, not in any hashes */ | 1355 | /* Set up fake conntrack: to never be deleted, not in any hashes */ |
1356 | #ifdef CONFIG_NET_NS | 1356 | #ifdef CONFIG_NET_NS |
1357 | nf_conntrack_untracked.ct_net = &init_net; | 1357 | nf_conntrack_untracked.ct_net = &init_net; |
1358 | #endif | 1358 | #endif |
1359 | atomic_set(&nf_conntrack_untracked.ct_general.use, 1); | 1359 | atomic_set(&nf_conntrack_untracked.ct_general.use, 1); |
1360 | /* - and look it like as a confirmed connection */ | 1360 | /* - and look it like as a confirmed connection */ |
1361 | set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); | 1361 | set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); |
1362 | 1362 | ||
1363 | return 0; | 1363 | return 0; |
1364 | 1364 | ||
1365 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 1365 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
1366 | err_extend: | 1366 | err_extend: |
1367 | nf_conntrack_helper_fini(); | 1367 | nf_conntrack_helper_fini(); |
1368 | #endif | 1368 | #endif |
1369 | err_helper: | 1369 | err_helper: |
1370 | nf_conntrack_proto_fini(); | 1370 | nf_conntrack_proto_fini(); |
1371 | err_proto: | 1371 | err_proto: |
1372 | return ret; | 1372 | return ret; |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | /* | 1375 | /* |
1376 | * We need to use special "null" values, not used in hash table | 1376 | * We need to use special "null" values, not used in hash table |
1377 | */ | 1377 | */ |
1378 | #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) | 1378 | #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) |
1379 | #define DYING_NULLS_VAL ((1<<30)+1) | 1379 | #define DYING_NULLS_VAL ((1<<30)+1) |
1380 | 1380 | ||
1381 | static int nf_conntrack_init_net(struct net *net) | 1381 | static int nf_conntrack_init_net(struct net *net) |
1382 | { | 1382 | { |
1383 | int ret; | 1383 | int ret; |
1384 | 1384 | ||
1385 | atomic_set(&net->ct.count, 0); | 1385 | atomic_set(&net->ct.count, 0); |
1386 | INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); | 1386 | INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); |
1387 | INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); | 1387 | INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); |
1388 | net->ct.stat = alloc_percpu(struct ip_conntrack_stat); | 1388 | net->ct.stat = alloc_percpu(struct ip_conntrack_stat); |
1389 | if (!net->ct.stat) { | 1389 | if (!net->ct.stat) { |
1390 | ret = -ENOMEM; | 1390 | ret = -ENOMEM; |
1391 | goto err_stat; | 1391 | goto err_stat; |
1392 | } | 1392 | } |
1393 | 1393 | ||
1394 | net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); | 1394 | net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); |
1395 | if (!net->ct.slabname) { | 1395 | if (!net->ct.slabname) { |
1396 | ret = -ENOMEM; | 1396 | ret = -ENOMEM; |
1397 | goto err_slabname; | 1397 | goto err_slabname; |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, | 1400 | net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, |
1401 | sizeof(struct nf_conn), 0, | 1401 | sizeof(struct nf_conn), 0, |
1402 | SLAB_DESTROY_BY_RCU, NULL); | 1402 | SLAB_DESTROY_BY_RCU, NULL); |
1403 | if (!net->ct.nf_conntrack_cachep) { | 1403 | if (!net->ct.nf_conntrack_cachep) { |
1404 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); | 1404 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); |
1405 | ret = -ENOMEM; | 1405 | ret = -ENOMEM; |
1406 | goto err_cache; | 1406 | goto err_cache; |
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | net->ct.htable_size = nf_conntrack_htable_size; | 1409 | net->ct.htable_size = nf_conntrack_htable_size; |
1410 | net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, | 1410 | net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, |
1411 | &net->ct.hash_vmalloc, 1); | 1411 | &net->ct.hash_vmalloc, 1); |
1412 | if (!net->ct.hash) { | 1412 | if (!net->ct.hash) { |
1413 | ret = -ENOMEM; | 1413 | ret = -ENOMEM; |
1414 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); | 1414 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); |
1415 | goto err_hash; | 1415 | goto err_hash; |
1416 | } | 1416 | } |
1417 | ret = nf_conntrack_expect_init(net); | 1417 | ret = nf_conntrack_expect_init(net); |
1418 | if (ret < 0) | 1418 | if (ret < 0) |
1419 | goto err_expect; | 1419 | goto err_expect; |
1420 | ret = nf_conntrack_acct_init(net); | 1420 | ret = nf_conntrack_acct_init(net); |
1421 | if (ret < 0) | 1421 | if (ret < 0) |
1422 | goto err_acct; | 1422 | goto err_acct; |
1423 | ret = nf_conntrack_ecache_init(net); | 1423 | ret = nf_conntrack_ecache_init(net); |
1424 | if (ret < 0) | 1424 | if (ret < 0) |
1425 | goto err_ecache; | 1425 | goto err_ecache; |
1426 | 1426 | ||
1427 | return 0; | 1427 | return 0; |
1428 | 1428 | ||
1429 | err_ecache: | 1429 | err_ecache: |
1430 | nf_conntrack_acct_fini(net); | 1430 | nf_conntrack_acct_fini(net); |
1431 | err_acct: | 1431 | err_acct: |
1432 | nf_conntrack_expect_fini(net); | 1432 | nf_conntrack_expect_fini(net); |
1433 | err_expect: | 1433 | err_expect: |
1434 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, | 1434 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, |
1435 | net->ct.htable_size); | 1435 | net->ct.htable_size); |
1436 | err_hash: | 1436 | err_hash: |
1437 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); | 1437 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); |
1438 | err_cache: | 1438 | err_cache: |
1439 | kfree(net->ct.slabname); | 1439 | kfree(net->ct.slabname); |
1440 | err_slabname: | 1440 | err_slabname: |
1441 | free_percpu(net->ct.stat); | 1441 | free_percpu(net->ct.stat); |
1442 | err_stat: | 1442 | err_stat: |
1443 | return ret; | 1443 | return ret; |
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, | 1446 | s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, |
1447 | enum ip_conntrack_dir dir, | 1447 | enum ip_conntrack_dir dir, |
1448 | u32 seq); | 1448 | u32 seq); |
1449 | EXPORT_SYMBOL_GPL(nf_ct_nat_offset); | 1449 | EXPORT_SYMBOL_GPL(nf_ct_nat_offset); |
1450 | 1450 | ||
1451 | int nf_conntrack_init(struct net *net) | 1451 | int nf_conntrack_init(struct net *net) |
1452 | { | 1452 | { |
1453 | int ret; | 1453 | int ret; |
1454 | 1454 | ||
1455 | if (net_eq(net, &init_net)) { | 1455 | if (net_eq(net, &init_net)) { |
1456 | ret = nf_conntrack_init_init_net(); | 1456 | ret = nf_conntrack_init_init_net(); |
1457 | if (ret < 0) | 1457 | if (ret < 0) |
1458 | goto out_init_net; | 1458 | goto out_init_net; |
1459 | } | 1459 | } |
1460 | ret = nf_conntrack_init_net(net); | 1460 | ret = nf_conntrack_init_net(net); |
1461 | if (ret < 0) | 1461 | if (ret < 0) |
1462 | goto out_net; | 1462 | goto out_net; |
1463 | 1463 | ||
1464 | if (net_eq(net, &init_net)) { | 1464 | if (net_eq(net, &init_net)) { |
1465 | /* For use by REJECT target */ | 1465 | /* For use by REJECT target */ |
1466 | rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); | 1466 | rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); |
1467 | rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); | 1467 | rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); |
1468 | 1468 | ||
1469 | /* Howto get NAT offsets */ | 1469 | /* Howto get NAT offsets */ |
1470 | rcu_assign_pointer(nf_ct_nat_offset, NULL); | 1470 | rcu_assign_pointer(nf_ct_nat_offset, NULL); |
1471 | } | 1471 | } |
1472 | return 0; | 1472 | return 0; |
1473 | 1473 | ||
1474 | out_net: | 1474 | out_net: |
1475 | if (net_eq(net, &init_net)) | 1475 | if (net_eq(net, &init_net)) |
1476 | nf_conntrack_cleanup_init_net(); | 1476 | nf_conntrack_cleanup_init_net(); |
1477 | out_init_net: | 1477 | out_init_net: |
1478 | return ret; | 1478 | return ret; |
1479 | } | 1479 | } |
1480 | 1480 |
net/netfilter/nf_conntrack_ftp.c
1 | /* FTP extension for connection tracking. */ | 1 | /* FTP extension for connection tracking. */ |
2 | 2 | ||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | 3 | /* (C) 1999-2001 Paul `Rusty' Russell |
4 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> | 4 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> |
5 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | 5 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/netfilter.h> | 14 | #include <linux/netfilter.h> |
15 | #include <linux/ip.h> | 15 | #include <linux/ip.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/ipv6.h> | 17 | #include <linux/ipv6.h> |
18 | #include <linux/ctype.h> | 18 | #include <linux/ctype.h> |
19 | #include <linux/inet.h> | 19 | #include <linux/inet.h> |
20 | #include <net/checksum.h> | 20 | #include <net/checksum.h> |
21 | #include <net/tcp.h> | 21 | #include <net/tcp.h> |
22 | 22 | ||
23 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
24 | #include <net/netfilter/nf_conntrack_expect.h> | 24 | #include <net/netfilter/nf_conntrack_expect.h> |
25 | #include <net/netfilter/nf_conntrack_ecache.h> | 25 | #include <net/netfilter/nf_conntrack_ecache.h> |
26 | #include <net/netfilter/nf_conntrack_helper.h> | 26 | #include <net/netfilter/nf_conntrack_helper.h> |
27 | #include <linux/netfilter/nf_conntrack_ftp.h> | 27 | #include <linux/netfilter/nf_conntrack_ftp.h> |
28 | 28 | ||
29 | MODULE_LICENSE("GPL"); | 29 | MODULE_LICENSE("GPL"); |
30 | MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); | 30 | MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); |
31 | MODULE_DESCRIPTION("ftp connection tracking helper"); | 31 | MODULE_DESCRIPTION("ftp connection tracking helper"); |
32 | MODULE_ALIAS("ip_conntrack_ftp"); | 32 | MODULE_ALIAS("ip_conntrack_ftp"); |
33 | MODULE_ALIAS_NFCT_HELPER("ftp"); | 33 | MODULE_ALIAS_NFCT_HELPER("ftp"); |
34 | 34 | ||
35 | /* This is slow, but it's simple. --RR */ | 35 | /* This is slow, but it's simple. --RR */ |
36 | static char *ftp_buffer; | 36 | static char *ftp_buffer; |
37 | 37 | ||
38 | static DEFINE_SPINLOCK(nf_ftp_lock); | 38 | static DEFINE_SPINLOCK(nf_ftp_lock); |
39 | 39 | ||
40 | #define MAX_PORTS 8 | 40 | #define MAX_PORTS 8 |
41 | static u_int16_t ports[MAX_PORTS]; | 41 | static u_int16_t ports[MAX_PORTS]; |
42 | static unsigned int ports_c; | 42 | static unsigned int ports_c; |
43 | module_param_array(ports, ushort, &ports_c, 0400); | 43 | module_param_array(ports, ushort, &ports_c, 0400); |
44 | 44 | ||
45 | static int loose; | 45 | static int loose; |
46 | module_param(loose, bool, 0600); | 46 | module_param(loose, bool, 0600); |
47 | 47 | ||
48 | unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, | 48 | unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, |
49 | enum ip_conntrack_info ctinfo, | 49 | enum ip_conntrack_info ctinfo, |
50 | enum nf_ct_ftp_type type, | 50 | enum nf_ct_ftp_type type, |
51 | unsigned int matchoff, | 51 | unsigned int matchoff, |
52 | unsigned int matchlen, | 52 | unsigned int matchlen, |
53 | struct nf_conntrack_expect *exp); | 53 | struct nf_conntrack_expect *exp); |
54 | EXPORT_SYMBOL_GPL(nf_nat_ftp_hook); | 54 | EXPORT_SYMBOL_GPL(nf_nat_ftp_hook); |
55 | 55 | ||
56 | static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char); | 56 | static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char); |
57 | static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char); | 57 | static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char); |
58 | static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *, | 58 | static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *, |
59 | char); | 59 | char); |
60 | 60 | ||
61 | static struct ftp_search { | 61 | static struct ftp_search { |
62 | const char *pattern; | 62 | const char *pattern; |
63 | size_t plen; | 63 | size_t plen; |
64 | char skip; | 64 | char skip; |
65 | char term; | 65 | char term; |
66 | enum nf_ct_ftp_type ftptype; | 66 | enum nf_ct_ftp_type ftptype; |
67 | int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char); | 67 | int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char); |
68 | } search[IP_CT_DIR_MAX][2] = { | 68 | } search[IP_CT_DIR_MAX][2] = { |
69 | [IP_CT_DIR_ORIGINAL] = { | 69 | [IP_CT_DIR_ORIGINAL] = { |
70 | { | 70 | { |
71 | .pattern = "PORT", | 71 | .pattern = "PORT", |
72 | .plen = sizeof("PORT") - 1, | 72 | .plen = sizeof("PORT") - 1, |
73 | .skip = ' ', | 73 | .skip = ' ', |
74 | .term = '\r', | 74 | .term = '\r', |
75 | .ftptype = NF_CT_FTP_PORT, | 75 | .ftptype = NF_CT_FTP_PORT, |
76 | .getnum = try_rfc959, | 76 | .getnum = try_rfc959, |
77 | }, | 77 | }, |
78 | { | 78 | { |
79 | .pattern = "EPRT", | 79 | .pattern = "EPRT", |
80 | .plen = sizeof("EPRT") - 1, | 80 | .plen = sizeof("EPRT") - 1, |
81 | .skip = ' ', | 81 | .skip = ' ', |
82 | .term = '\r', | 82 | .term = '\r', |
83 | .ftptype = NF_CT_FTP_EPRT, | 83 | .ftptype = NF_CT_FTP_EPRT, |
84 | .getnum = try_eprt, | 84 | .getnum = try_eprt, |
85 | }, | 85 | }, |
86 | }, | 86 | }, |
87 | [IP_CT_DIR_REPLY] = { | 87 | [IP_CT_DIR_REPLY] = { |
88 | { | 88 | { |
89 | .pattern = "227 ", | 89 | .pattern = "227 ", |
90 | .plen = sizeof("227 ") - 1, | 90 | .plen = sizeof("227 ") - 1, |
91 | .skip = '(', | 91 | .skip = '(', |
92 | .term = ')', | 92 | .term = ')', |
93 | .ftptype = NF_CT_FTP_PASV, | 93 | .ftptype = NF_CT_FTP_PASV, |
94 | .getnum = try_rfc959, | 94 | .getnum = try_rfc959, |
95 | }, | 95 | }, |
96 | { | 96 | { |
97 | .pattern = "229 ", | 97 | .pattern = "229 ", |
98 | .plen = sizeof("229 ") - 1, | 98 | .plen = sizeof("229 ") - 1, |
99 | .skip = '(', | 99 | .skip = '(', |
100 | .term = ')', | 100 | .term = ')', |
101 | .ftptype = NF_CT_FTP_EPSV, | 101 | .ftptype = NF_CT_FTP_EPSV, |
102 | .getnum = try_epsv_response, | 102 | .getnum = try_epsv_response, |
103 | }, | 103 | }, |
104 | }, | 104 | }, |
105 | }; | 105 | }; |
106 | 106 | ||
107 | static int | 107 | static int |
108 | get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term) | 108 | get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term) |
109 | { | 109 | { |
110 | const char *end; | 110 | const char *end; |
111 | int ret = in6_pton(src, min_t(size_t, dlen, 0xffff), (u8 *)dst, term, &end); | 111 | int ret = in6_pton(src, min_t(size_t, dlen, 0xffff), (u8 *)dst, term, &end); |
112 | if (ret > 0) | 112 | if (ret > 0) |
113 | return (int)(end - src); | 113 | return (int)(end - src); |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
116 | 116 | ||
117 | static int try_number(const char *data, size_t dlen, u_int32_t array[], | 117 | static int try_number(const char *data, size_t dlen, u_int32_t array[], |
118 | int array_size, char sep, char term) | 118 | int array_size, char sep, char term) |
119 | { | 119 | { |
120 | u_int32_t i, len; | 120 | u_int32_t i, len; |
121 | 121 | ||
122 | memset(array, 0, sizeof(array[0])*array_size); | 122 | memset(array, 0, sizeof(array[0])*array_size); |
123 | 123 | ||
124 | /* Keep data pointing at next char. */ | 124 | /* Keep data pointing at next char. */ |
125 | for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) { | 125 | for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) { |
126 | if (*data >= '0' && *data <= '9') { | 126 | if (*data >= '0' && *data <= '9') { |
127 | array[i] = array[i]*10 + *data - '0'; | 127 | array[i] = array[i]*10 + *data - '0'; |
128 | } | 128 | } |
129 | else if (*data == sep) | 129 | else if (*data == sep) |
130 | i++; | 130 | i++; |
131 | else { | 131 | else { |
132 | /* Unexpected character; true if it's the | 132 | /* Unexpected character; true if it's the |
133 | terminator and we're finished. */ | 133 | terminator and we're finished. */ |
134 | if (*data == term && i == array_size - 1) | 134 | if (*data == term && i == array_size - 1) |
135 | return len; | 135 | return len; |
136 | 136 | ||
137 | pr_debug("Char %u (got %u nums) `%u' unexpected\n", | 137 | pr_debug("Char %u (got %u nums) `%u' unexpected\n", |
138 | len, i, *data); | 138 | len, i, *data); |
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | } | 141 | } |
142 | pr_debug("Failed to fill %u numbers separated by %c\n", | 142 | pr_debug("Failed to fill %u numbers separated by %c\n", |
143 | array_size, sep); | 143 | array_size, sep); |
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | 146 | ||
147 | /* Returns 0, or length of numbers: 192,168,1,1,5,6 */ | 147 | /* Returns 0, or length of numbers: 192,168,1,1,5,6 */ |
148 | static int try_rfc959(const char *data, size_t dlen, | 148 | static int try_rfc959(const char *data, size_t dlen, |
149 | struct nf_conntrack_man *cmd, char term) | 149 | struct nf_conntrack_man *cmd, char term) |
150 | { | 150 | { |
151 | int length; | 151 | int length; |
152 | u_int32_t array[6]; | 152 | u_int32_t array[6]; |
153 | 153 | ||
154 | length = try_number(data, dlen, array, 6, ',', term); | 154 | length = try_number(data, dlen, array, 6, ',', term); |
155 | if (length == 0) | 155 | if (length == 0) |
156 | return 0; | 156 | return 0; |
157 | 157 | ||
158 | cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | | 158 | cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | |
159 | (array[2] << 8) | array[3]); | 159 | (array[2] << 8) | array[3]); |
160 | cmd->u.tcp.port = htons((array[4] << 8) | array[5]); | 160 | cmd->u.tcp.port = htons((array[4] << 8) | array[5]); |
161 | return length; | 161 | return length; |
162 | } | 162 | } |
163 | 163 | ||
164 | /* Grab port: number up to delimiter */ | 164 | /* Grab port: number up to delimiter */ |
165 | static int get_port(const char *data, int start, size_t dlen, char delim, | 165 | static int get_port(const char *data, int start, size_t dlen, char delim, |
166 | __be16 *port) | 166 | __be16 *port) |
167 | { | 167 | { |
168 | u_int16_t tmp_port = 0; | 168 | u_int16_t tmp_port = 0; |
169 | int i; | 169 | int i; |
170 | 170 | ||
171 | for (i = start; i < dlen; i++) { | 171 | for (i = start; i < dlen; i++) { |
172 | /* Finished? */ | 172 | /* Finished? */ |
173 | if (data[i] == delim) { | 173 | if (data[i] == delim) { |
174 | if (tmp_port == 0) | 174 | if (tmp_port == 0) |
175 | break; | 175 | break; |
176 | *port = htons(tmp_port); | 176 | *port = htons(tmp_port); |
177 | pr_debug("get_port: return %d\n", tmp_port); | 177 | pr_debug("get_port: return %d\n", tmp_port); |
178 | return i + 1; | 178 | return i + 1; |
179 | } | 179 | } |
180 | else if (data[i] >= '0' && data[i] <= '9') | 180 | else if (data[i] >= '0' && data[i] <= '9') |
181 | tmp_port = tmp_port*10 + data[i] - '0'; | 181 | tmp_port = tmp_port*10 + data[i] - '0'; |
182 | else { /* Some other crap */ | 182 | else { /* Some other crap */ |
183 | pr_debug("get_port: invalid char.\n"); | 183 | pr_debug("get_port: invalid char.\n"); |
184 | break; | 184 | break; |
185 | } | 185 | } |
186 | } | 186 | } |
187 | return 0; | 187 | return 0; |
188 | } | 188 | } |
189 | 189 | ||
190 | /* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */ | 190 | /* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */ |
191 | static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd, | 191 | static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd, |
192 | char term) | 192 | char term) |
193 | { | 193 | { |
194 | char delim; | 194 | char delim; |
195 | int length; | 195 | int length; |
196 | 196 | ||
197 | /* First character is delimiter, then "1" for IPv4 or "2" for IPv6, | 197 | /* First character is delimiter, then "1" for IPv4 or "2" for IPv6, |
198 | then delimiter again. */ | 198 | then delimiter again. */ |
199 | if (dlen <= 3) { | 199 | if (dlen <= 3) { |
200 | pr_debug("EPRT: too short\n"); | 200 | pr_debug("EPRT: too short\n"); |
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | delim = data[0]; | 203 | delim = data[0]; |
204 | if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) { | 204 | if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) { |
205 | pr_debug("try_eprt: invalid delimitter.\n"); | 205 | pr_debug("try_eprt: invalid delimitter.\n"); |
206 | return 0; | 206 | return 0; |
207 | } | 207 | } |
208 | 208 | ||
209 | if ((cmd->l3num == PF_INET && data[1] != '1') || | 209 | if ((cmd->l3num == PF_INET && data[1] != '1') || |
210 | (cmd->l3num == PF_INET6 && data[1] != '2')) { | 210 | (cmd->l3num == PF_INET6 && data[1] != '2')) { |
211 | pr_debug("EPRT: invalid protocol number.\n"); | 211 | pr_debug("EPRT: invalid protocol number.\n"); |
212 | return 0; | 212 | return 0; |
213 | } | 213 | } |
214 | 214 | ||
215 | pr_debug("EPRT: Got %c%c%c\n", delim, data[1], delim); | 215 | pr_debug("EPRT: Got %c%c%c\n", delim, data[1], delim); |
216 | 216 | ||
217 | if (data[1] == '1') { | 217 | if (data[1] == '1') { |
218 | u_int32_t array[4]; | 218 | u_int32_t array[4]; |
219 | 219 | ||
220 | /* Now we have IP address. */ | 220 | /* Now we have IP address. */ |
221 | length = try_number(data + 3, dlen - 3, array, 4, '.', delim); | 221 | length = try_number(data + 3, dlen - 3, array, 4, '.', delim); |
222 | if (length != 0) | 222 | if (length != 0) |
223 | cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | 223 | cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) |
224 | | (array[2] << 8) | array[3]); | 224 | | (array[2] << 8) | array[3]); |
225 | } else { | 225 | } else { |
226 | /* Now we have IPv6 address. */ | 226 | /* Now we have IPv6 address. */ |
227 | length = get_ipv6_addr(data + 3, dlen - 3, | 227 | length = get_ipv6_addr(data + 3, dlen - 3, |
228 | (struct in6_addr *)cmd->u3.ip6, delim); | 228 | (struct in6_addr *)cmd->u3.ip6, delim); |
229 | } | 229 | } |
230 | 230 | ||
231 | if (length == 0) | 231 | if (length == 0) |
232 | return 0; | 232 | return 0; |
233 | pr_debug("EPRT: Got IP address!\n"); | 233 | pr_debug("EPRT: Got IP address!\n"); |
234 | /* Start offset includes initial "|1|", and trailing delimiter */ | 234 | /* Start offset includes initial "|1|", and trailing delimiter */ |
235 | return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port); | 235 | return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port); |
236 | } | 236 | } |
237 | 237 | ||
238 | /* Returns 0, or length of numbers: |||6446| */ | 238 | /* Returns 0, or length of numbers: |||6446| */ |
239 | static int try_epsv_response(const char *data, size_t dlen, | 239 | static int try_epsv_response(const char *data, size_t dlen, |
240 | struct nf_conntrack_man *cmd, char term) | 240 | struct nf_conntrack_man *cmd, char term) |
241 | { | 241 | { |
242 | char delim; | 242 | char delim; |
243 | 243 | ||
244 | /* Three delimiters. */ | 244 | /* Three delimiters. */ |
245 | if (dlen <= 3) return 0; | 245 | if (dlen <= 3) return 0; |
246 | delim = data[0]; | 246 | delim = data[0]; |
247 | if (isdigit(delim) || delim < 33 || delim > 126 || | 247 | if (isdigit(delim) || delim < 33 || delim > 126 || |
248 | data[1] != delim || data[2] != delim) | 248 | data[1] != delim || data[2] != delim) |
249 | return 0; | 249 | return 0; |
250 | 250 | ||
251 | return get_port(data, 3, dlen, delim, &cmd->u.tcp.port); | 251 | return get_port(data, 3, dlen, delim, &cmd->u.tcp.port); |
252 | } | 252 | } |
253 | 253 | ||
254 | /* Return 1 for match, 0 for accept, -1 for partial. */ | 254 | /* Return 1 for match, 0 for accept, -1 for partial. */ |
255 | static int find_pattern(const char *data, size_t dlen, | 255 | static int find_pattern(const char *data, size_t dlen, |
256 | const char *pattern, size_t plen, | 256 | const char *pattern, size_t plen, |
257 | char skip, char term, | 257 | char skip, char term, |
258 | unsigned int *numoff, | 258 | unsigned int *numoff, |
259 | unsigned int *numlen, | 259 | unsigned int *numlen, |
260 | struct nf_conntrack_man *cmd, | 260 | struct nf_conntrack_man *cmd, |
261 | int (*getnum)(const char *, size_t, | 261 | int (*getnum)(const char *, size_t, |
262 | struct nf_conntrack_man *, char)) | 262 | struct nf_conntrack_man *, char)) |
263 | { | 263 | { |
264 | size_t i; | 264 | size_t i; |
265 | 265 | ||
266 | pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen); | 266 | pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen); |
267 | if (dlen == 0) | 267 | if (dlen == 0) |
268 | return 0; | 268 | return 0; |
269 | 269 | ||
270 | if (dlen <= plen) { | 270 | if (dlen <= plen) { |
271 | /* Short packet: try for partial? */ | 271 | /* Short packet: try for partial? */ |
272 | if (strnicmp(data, pattern, dlen) == 0) | 272 | if (strnicmp(data, pattern, dlen) == 0) |
273 | return -1; | 273 | return -1; |
274 | else return 0; | 274 | else return 0; |
275 | } | 275 | } |
276 | 276 | ||
277 | if (strnicmp(data, pattern, plen) != 0) { | 277 | if (strnicmp(data, pattern, plen) != 0) { |
278 | #if 0 | 278 | #if 0 |
279 | size_t i; | 279 | size_t i; |
280 | 280 | ||
281 | pr_debug("ftp: string mismatch\n"); | 281 | pr_debug("ftp: string mismatch\n"); |
282 | for (i = 0; i < plen; i++) { | 282 | for (i = 0; i < plen; i++) { |
283 | pr_debug("ftp:char %u `%c'(%u) vs `%c'(%u)\n", | 283 | pr_debug("ftp:char %u `%c'(%u) vs `%c'(%u)\n", |
284 | i, data[i], data[i], | 284 | i, data[i], data[i], |
285 | pattern[i], pattern[i]); | 285 | pattern[i], pattern[i]); |
286 | } | 286 | } |
287 | #endif | 287 | #endif |
288 | return 0; | 288 | return 0; |
289 | } | 289 | } |
290 | 290 | ||
291 | pr_debug("Pattern matches!\n"); | 291 | pr_debug("Pattern matches!\n"); |
292 | /* Now we've found the constant string, try to skip | 292 | /* Now we've found the constant string, try to skip |
293 | to the 'skip' character */ | 293 | to the 'skip' character */ |
294 | for (i = plen; data[i] != skip; i++) | 294 | for (i = plen; data[i] != skip; i++) |
295 | if (i == dlen - 1) return -1; | 295 | if (i == dlen - 1) return -1; |
296 | 296 | ||
297 | /* Skip over the last character */ | 297 | /* Skip over the last character */ |
298 | i++; | 298 | i++; |
299 | 299 | ||
300 | pr_debug("Skipped up to `%c'!\n", skip); | 300 | pr_debug("Skipped up to `%c'!\n", skip); |
301 | 301 | ||
302 | *numoff = i; | 302 | *numoff = i; |
303 | *numlen = getnum(data + i, dlen - i, cmd, term); | 303 | *numlen = getnum(data + i, dlen - i, cmd, term); |
304 | if (!*numlen) | 304 | if (!*numlen) |
305 | return -1; | 305 | return -1; |
306 | 306 | ||
307 | pr_debug("Match succeeded!\n"); | 307 | pr_debug("Match succeeded!\n"); |
308 | return 1; | 308 | return 1; |
309 | } | 309 | } |
310 | 310 | ||
311 | /* Look up to see if we're just after a \n. */ | 311 | /* Look up to see if we're just after a \n. */ |
312 | static int find_nl_seq(u32 seq, const struct nf_ct_ftp_master *info, int dir) | 312 | static int find_nl_seq(u32 seq, const struct nf_ct_ftp_master *info, int dir) |
313 | { | 313 | { |
314 | unsigned int i; | 314 | unsigned int i; |
315 | 315 | ||
316 | for (i = 0; i < info->seq_aft_nl_num[dir]; i++) | 316 | for (i = 0; i < info->seq_aft_nl_num[dir]; i++) |
317 | if (info->seq_aft_nl[dir][i] == seq) | 317 | if (info->seq_aft_nl[dir][i] == seq) |
318 | return 1; | 318 | return 1; |
319 | return 0; | 319 | return 0; |
320 | } | 320 | } |
321 | 321 | ||
322 | /* We don't update if it's older than what we have. */ | 322 | /* We don't update if it's older than what we have. */ |
323 | static void update_nl_seq(struct nf_conn *ct, u32 nl_seq, | 323 | static void update_nl_seq(struct nf_conn *ct, u32 nl_seq, |
324 | struct nf_ct_ftp_master *info, int dir, | 324 | struct nf_ct_ftp_master *info, int dir, |
325 | struct sk_buff *skb) | 325 | struct sk_buff *skb) |
326 | { | 326 | { |
327 | unsigned int i, oldest; | 327 | unsigned int i, oldest; |
328 | 328 | ||
329 | /* Look for oldest: if we find exact match, we're done. */ | 329 | /* Look for oldest: if we find exact match, we're done. */ |
330 | for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { | 330 | for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { |
331 | if (info->seq_aft_nl[dir][i] == nl_seq) | 331 | if (info->seq_aft_nl[dir][i] == nl_seq) |
332 | return; | 332 | return; |
333 | } | 333 | } |
334 | 334 | ||
335 | if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { | 335 | if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { |
336 | info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; | 336 | info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; |
337 | } else { | 337 | } else { |
338 | if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1])) | 338 | if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1])) |
339 | oldest = 0; | 339 | oldest = 0; |
340 | else | 340 | else |
341 | oldest = 1; | 341 | oldest = 1; |
342 | 342 | ||
343 | if (after(nl_seq, info->seq_aft_nl[dir][oldest])) | 343 | if (after(nl_seq, info->seq_aft_nl[dir][oldest])) |
344 | info->seq_aft_nl[dir][oldest] = nl_seq; | 344 | info->seq_aft_nl[dir][oldest] = nl_seq; |
345 | } | 345 | } |
346 | } | 346 | } |
347 | 347 | ||
348 | static int help(struct sk_buff *skb, | 348 | static int help(struct sk_buff *skb, |
349 | unsigned int protoff, | 349 | unsigned int protoff, |
350 | struct nf_conn *ct, | 350 | struct nf_conn *ct, |
351 | enum ip_conntrack_info ctinfo) | 351 | enum ip_conntrack_info ctinfo) |
352 | { | 352 | { |
353 | unsigned int dataoff, datalen; | 353 | unsigned int dataoff, datalen; |
354 | const struct tcphdr *th; | 354 | const struct tcphdr *th; |
355 | struct tcphdr _tcph; | 355 | struct tcphdr _tcph; |
356 | const char *fb_ptr; | 356 | const char *fb_ptr; |
357 | int ret; | 357 | int ret; |
358 | u32 seq; | 358 | u32 seq; |
359 | int dir = CTINFO2DIR(ctinfo); | 359 | int dir = CTINFO2DIR(ctinfo); |
360 | unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff); | 360 | unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff); |
361 | struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info; | 361 | struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info; |
362 | struct nf_conntrack_expect *exp; | 362 | struct nf_conntrack_expect *exp; |
363 | union nf_inet_addr *daddr; | 363 | union nf_inet_addr *daddr; |
364 | struct nf_conntrack_man cmd = {}; | 364 | struct nf_conntrack_man cmd = {}; |
365 | unsigned int i; | 365 | unsigned int i; |
366 | int found = 0, ends_in_nl; | 366 | int found = 0, ends_in_nl; |
367 | typeof(nf_nat_ftp_hook) nf_nat_ftp; | 367 | typeof(nf_nat_ftp_hook) nf_nat_ftp; |
368 | 368 | ||
369 | /* Until there's been traffic both ways, don't look in packets. */ | 369 | /* Until there's been traffic both ways, don't look in packets. */ |
370 | if (ctinfo != IP_CT_ESTABLISHED && | 370 | if (ctinfo != IP_CT_ESTABLISHED && |
371 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { | 371 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { |
372 | pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); | 372 | pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); |
373 | return NF_ACCEPT; | 373 | return NF_ACCEPT; |
374 | } | 374 | } |
375 | 375 | ||
376 | th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); | 376 | th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); |
377 | if (th == NULL) | 377 | if (th == NULL) |
378 | return NF_ACCEPT; | 378 | return NF_ACCEPT; |
379 | 379 | ||
380 | dataoff = protoff + th->doff * 4; | 380 | dataoff = protoff + th->doff * 4; |
381 | /* No data? */ | 381 | /* No data? */ |
382 | if (dataoff >= skb->len) { | 382 | if (dataoff >= skb->len) { |
383 | pr_debug("ftp: dataoff(%u) >= skblen(%u)\n", dataoff, | 383 | pr_debug("ftp: dataoff(%u) >= skblen(%u)\n", dataoff, |
384 | skb->len); | 384 | skb->len); |
385 | return NF_ACCEPT; | 385 | return NF_ACCEPT; |
386 | } | 386 | } |
387 | datalen = skb->len - dataoff; | 387 | datalen = skb->len - dataoff; |
388 | 388 | ||
389 | spin_lock_bh(&nf_ftp_lock); | 389 | spin_lock_bh(&nf_ftp_lock); |
390 | fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer); | 390 | fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer); |
391 | BUG_ON(fb_ptr == NULL); | 391 | BUG_ON(fb_ptr == NULL); |
392 | 392 | ||
393 | ends_in_nl = (fb_ptr[datalen - 1] == '\n'); | 393 | ends_in_nl = (fb_ptr[datalen - 1] == '\n'); |
394 | seq = ntohl(th->seq) + datalen; | 394 | seq = ntohl(th->seq) + datalen; |
395 | 395 | ||
396 | /* Look up to see if we're just after a \n. */ | 396 | /* Look up to see if we're just after a \n. */ |
397 | if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { | 397 | if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { |
398 | /* Now if this ends in \n, update ftp info. */ | 398 | /* Now if this ends in \n, update ftp info. */ |
399 | pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n", | 399 | pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n", |
400 | ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)", | 400 | ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)", |
401 | ct_ftp_info->seq_aft_nl[dir][0], | 401 | ct_ftp_info->seq_aft_nl[dir][0], |
402 | ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)", | 402 | ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)", |
403 | ct_ftp_info->seq_aft_nl[dir][1]); | 403 | ct_ftp_info->seq_aft_nl[dir][1]); |
404 | ret = NF_ACCEPT; | 404 | ret = NF_ACCEPT; |
405 | goto out_update_nl; | 405 | goto out_update_nl; |
406 | } | 406 | } |
407 | 407 | ||
408 | /* Initialize IP/IPv6 addr to expected address (it's not mentioned | 408 | /* Initialize IP/IPv6 addr to expected address (it's not mentioned |
409 | in EPSV responses) */ | 409 | in EPSV responses) */ |
410 | cmd.l3num = nf_ct_l3num(ct); | 410 | cmd.l3num = nf_ct_l3num(ct); |
411 | memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, | 411 | memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, |
412 | sizeof(cmd.u3.all)); | 412 | sizeof(cmd.u3.all)); |
413 | 413 | ||
414 | for (i = 0; i < ARRAY_SIZE(search[dir]); i++) { | 414 | for (i = 0; i < ARRAY_SIZE(search[dir]); i++) { |
415 | found = find_pattern(fb_ptr, datalen, | 415 | found = find_pattern(fb_ptr, datalen, |
416 | search[dir][i].pattern, | 416 | search[dir][i].pattern, |
417 | search[dir][i].plen, | 417 | search[dir][i].plen, |
418 | search[dir][i].skip, | 418 | search[dir][i].skip, |
419 | search[dir][i].term, | 419 | search[dir][i].term, |
420 | &matchoff, &matchlen, | 420 | &matchoff, &matchlen, |
421 | &cmd, | 421 | &cmd, |
422 | search[dir][i].getnum); | 422 | search[dir][i].getnum); |
423 | if (found) break; | 423 | if (found) break; |
424 | } | 424 | } |
425 | if (found == -1) { | 425 | if (found == -1) { |
426 | /* We don't usually drop packets. After all, this is | 426 | /* We don't usually drop packets. After all, this is |
427 | connection tracking, not packet filtering. | 427 | connection tracking, not packet filtering. |
428 | However, it is necessary for accurate tracking in | 428 | However, it is necessary for accurate tracking in |
429 | this case. */ | 429 | this case. */ |
430 | pr_debug("conntrack_ftp: partial %s %u+%u\n", | 430 | pr_debug("conntrack_ftp: partial %s %u+%u\n", |
431 | search[dir][i].pattern, ntohl(th->seq), datalen); | 431 | search[dir][i].pattern, ntohl(th->seq), datalen); |
432 | ret = NF_DROP; | 432 | ret = NF_DROP; |
433 | goto out; | 433 | goto out; |
434 | } else if (found == 0) { /* No match */ | 434 | } else if (found == 0) { /* No match */ |
435 | ret = NF_ACCEPT; | 435 | ret = NF_ACCEPT; |
436 | goto out_update_nl; | 436 | goto out_update_nl; |
437 | } | 437 | } |
438 | 438 | ||
439 | pr_debug("conntrack_ftp: match `%.*s' (%u bytes at %u)\n", | 439 | pr_debug("conntrack_ftp: match `%.*s' (%u bytes at %u)\n", |
440 | matchlen, fb_ptr + matchoff, | 440 | matchlen, fb_ptr + matchoff, |
441 | matchlen, ntohl(th->seq) + matchoff); | 441 | matchlen, ntohl(th->seq) + matchoff); |
442 | 442 | ||
443 | exp = nf_ct_expect_alloc(ct); | 443 | exp = nf_ct_expect_alloc(ct); |
444 | if (exp == NULL) { | 444 | if (exp == NULL) { |
445 | ret = NF_DROP; | 445 | ret = NF_DROP; |
446 | goto out; | 446 | goto out; |
447 | } | 447 | } |
448 | 448 | ||
449 | /* We refer to the reverse direction ("!dir") tuples here, | 449 | /* We refer to the reverse direction ("!dir") tuples here, |
450 | * because we're expecting something in the other direction. | 450 | * because we're expecting something in the other direction. |
451 | * Doesn't matter unless NAT is happening. */ | 451 | * Doesn't matter unless NAT is happening. */ |
452 | daddr = &ct->tuplehash[!dir].tuple.dst.u3; | 452 | daddr = &ct->tuplehash[!dir].tuple.dst.u3; |
453 | 453 | ||
454 | /* Update the ftp info */ | 454 | /* Update the ftp info */ |
455 | if ((cmd.l3num == nf_ct_l3num(ct)) && | 455 | if ((cmd.l3num == nf_ct_l3num(ct)) && |
456 | memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, | 456 | memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, |
457 | sizeof(cmd.u3.all))) { | 457 | sizeof(cmd.u3.all))) { |
458 | /* Enrico Scholz's passive FTP to partially RNAT'd ftp | 458 | /* Enrico Scholz's passive FTP to partially RNAT'd ftp |
459 | server: it really wants us to connect to a | 459 | server: it really wants us to connect to a |
460 | different IP address. Simply don't record it for | 460 | different IP address. Simply don't record it for |
461 | NAT. */ | 461 | NAT. */ |
462 | if (cmd.l3num == PF_INET) { | 462 | if (cmd.l3num == PF_INET) { |
463 | pr_debug("conntrack_ftp: NOT RECORDING: %pI4 != %pI4\n", | 463 | pr_debug("conntrack_ftp: NOT RECORDING: %pI4 != %pI4\n", |
464 | &cmd.u3.ip, | 464 | &cmd.u3.ip, |
465 | &ct->tuplehash[dir].tuple.src.u3.ip); | 465 | &ct->tuplehash[dir].tuple.src.u3.ip); |
466 | } else { | 466 | } else { |
467 | pr_debug("conntrack_ftp: NOT RECORDING: %pI6 != %pI6\n", | 467 | pr_debug("conntrack_ftp: NOT RECORDING: %pI6 != %pI6\n", |
468 | cmd.u3.ip6, | 468 | cmd.u3.ip6, |
469 | ct->tuplehash[dir].tuple.src.u3.ip6); | 469 | ct->tuplehash[dir].tuple.src.u3.ip6); |
470 | } | 470 | } |
471 | 471 | ||
472 | /* Thanks to Cristiano Lincoln Mattos | 472 | /* Thanks to Cristiano Lincoln Mattos |
473 | <lincoln@cesar.org.br> for reporting this potential | 473 | <lincoln@cesar.org.br> for reporting this potential |
474 | problem (DMZ machines opening holes to internal | 474 | problem (DMZ machines opening holes to internal |
475 | networks, or the packet filter itself). */ | 475 | networks, or the packet filter itself). */ |
476 | if (!loose) { | 476 | if (!loose) { |
477 | ret = NF_ACCEPT; | 477 | ret = NF_ACCEPT; |
478 | goto out_put_expect; | 478 | goto out_put_expect; |
479 | } | 479 | } |
480 | daddr = &cmd.u3; | 480 | daddr = &cmd.u3; |
481 | } | 481 | } |
482 | 482 | ||
483 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, cmd.l3num, | 483 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, cmd.l3num, |
484 | &ct->tuplehash[!dir].tuple.src.u3, daddr, | 484 | &ct->tuplehash[!dir].tuple.src.u3, daddr, |
485 | IPPROTO_TCP, NULL, &cmd.u.tcp.port); | 485 | IPPROTO_TCP, NULL, &cmd.u.tcp.port); |
486 | 486 | ||
487 | /* Now, NAT might want to mangle the packet, and register the | 487 | /* Now, NAT might want to mangle the packet, and register the |
488 | * (possibly changed) expectation itself. */ | 488 | * (possibly changed) expectation itself. */ |
489 | nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook); | 489 | nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook); |
490 | if (nf_nat_ftp && ct->status & IPS_NAT_MASK) | 490 | if (nf_nat_ftp && ct->status & IPS_NAT_MASK) |
491 | ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype, | 491 | ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype, |
492 | matchoff, matchlen, exp); | 492 | matchoff, matchlen, exp); |
493 | else { | 493 | else { |
494 | /* Can't expect this? Best to drop packet now. */ | 494 | /* Can't expect this? Best to drop packet now. */ |
495 | if (nf_ct_expect_related(exp) != 0) | 495 | if (nf_ct_expect_related(exp) != 0) |
496 | ret = NF_DROP; | 496 | ret = NF_DROP; |
497 | else | 497 | else |
498 | ret = NF_ACCEPT; | 498 | ret = NF_ACCEPT; |
499 | } | 499 | } |
500 | 500 | ||
501 | out_put_expect: | 501 | out_put_expect: |
502 | nf_ct_expect_put(exp); | 502 | nf_ct_expect_put(exp); |
503 | 503 | ||
504 | out_update_nl: | 504 | out_update_nl: |
505 | /* Now if this ends in \n, update ftp info. Seq may have been | 505 | /* Now if this ends in \n, update ftp info. Seq may have been |
506 | * adjusted by NAT code. */ | 506 | * adjusted by NAT code. */ |
507 | if (ends_in_nl) | 507 | if (ends_in_nl) |
508 | update_nl_seq(ct, seq, ct_ftp_info, dir, skb); | 508 | update_nl_seq(ct, seq, ct_ftp_info, dir, skb); |
509 | out: | 509 | out: |
510 | spin_unlock_bh(&nf_ftp_lock); | 510 | spin_unlock_bh(&nf_ftp_lock); |
511 | return ret; | 511 | return ret; |
512 | } | 512 | } |
513 | 513 | ||
514 | static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly; | 514 | static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly; |
515 | static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly; | 515 | static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly; |
516 | 516 | ||
517 | static const struct nf_conntrack_expect_policy ftp_exp_policy = { | 517 | static const struct nf_conntrack_expect_policy ftp_exp_policy = { |
518 | .max_expected = 1, | 518 | .max_expected = 1, |
519 | .timeout = 5 * 60, | 519 | .timeout = 5 * 60, |
520 | }; | 520 | }; |
521 | 521 | ||
522 | /* don't make this __exit, since it's called from __init ! */ | 522 | /* don't make this __exit, since it's called from __init ! */ |
523 | static void nf_conntrack_ftp_fini(void) | 523 | static void nf_conntrack_ftp_fini(void) |
524 | { | 524 | { |
525 | int i, j; | 525 | int i, j; |
526 | for (i = 0; i < ports_c; i++) { | 526 | for (i = 0; i < ports_c; i++) { |
527 | for (j = 0; j < 2; j++) { | 527 | for (j = 0; j < 2; j++) { |
528 | if (ftp[i][j].me == NULL) | 528 | if (ftp[i][j].me == NULL) |
529 | continue; | 529 | continue; |
530 | 530 | ||
531 | pr_debug("nf_ct_ftp: unregistering helper for pf: %d " | 531 | pr_debug("nf_ct_ftp: unregistering helper for pf: %d " |
532 | "port: %d\n", | 532 | "port: %d\n", |
533 | ftp[i][j].tuple.src.l3num, ports[i]); | 533 | ftp[i][j].tuple.src.l3num, ports[i]); |
534 | nf_conntrack_helper_unregister(&ftp[i][j]); | 534 | nf_conntrack_helper_unregister(&ftp[i][j]); |
535 | } | 535 | } |
536 | } | 536 | } |
537 | 537 | ||
538 | kfree(ftp_buffer); | 538 | kfree(ftp_buffer); |
539 | } | 539 | } |
540 | 540 | ||
541 | static int __init nf_conntrack_ftp_init(void) | 541 | static int __init nf_conntrack_ftp_init(void) |
542 | { | 542 | { |
543 | int i, j = -1, ret = 0; | 543 | int i, j = -1, ret = 0; |
544 | char *tmpname; | 544 | char *tmpname; |
545 | 545 | ||
546 | ftp_buffer = kmalloc(65536, GFP_KERNEL); | 546 | ftp_buffer = kmalloc(65536, GFP_KERNEL); |
547 | if (!ftp_buffer) | 547 | if (!ftp_buffer) |
548 | return -ENOMEM; | 548 | return -ENOMEM; |
549 | 549 | ||
550 | if (ports_c == 0) | 550 | if (ports_c == 0) |
551 | ports[ports_c++] = FTP_PORT; | 551 | ports[ports_c++] = FTP_PORT; |
552 | 552 | ||
553 | /* FIXME should be configurable whether IPv4 and IPv6 FTP connections | 553 | /* FIXME should be configurable whether IPv4 and IPv6 FTP connections |
554 | are tracked or not - YK */ | 554 | are tracked or not - YK */ |
555 | for (i = 0; i < ports_c; i++) { | 555 | for (i = 0; i < ports_c; i++) { |
556 | ftp[i][0].tuple.src.l3num = PF_INET; | 556 | ftp[i][0].tuple.src.l3num = PF_INET; |
557 | ftp[i][1].tuple.src.l3num = PF_INET6; | 557 | ftp[i][1].tuple.src.l3num = PF_INET6; |
558 | for (j = 0; j < 2; j++) { | 558 | for (j = 0; j < 2; j++) { |
559 | ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); | 559 | ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); |
560 | ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; | 560 | ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; |
561 | ftp[i][j].expect_policy = &ftp_exp_policy; | 561 | ftp[i][j].expect_policy = &ftp_exp_policy; |
562 | ftp[i][j].me = THIS_MODULE; | 562 | ftp[i][j].me = THIS_MODULE; |
563 | ftp[i][j].help = help; | 563 | ftp[i][j].help = help; |
564 | tmpname = &ftp_names[i][j][0]; | 564 | tmpname = &ftp_names[i][j][0]; |
565 | if (ports[i] == FTP_PORT) | 565 | if (ports[i] == FTP_PORT) |
566 | sprintf(tmpname, "ftp"); | 566 | sprintf(tmpname, "ftp"); |
567 | else | 567 | else |
568 | sprintf(tmpname, "ftp-%d", ports[i]); | 568 | sprintf(tmpname, "ftp-%d", ports[i]); |
569 | ftp[i][j].name = tmpname; | 569 | ftp[i][j].name = tmpname; |
570 | 570 | ||
571 | pr_debug("nf_ct_ftp: registering helper for pf: %d " | 571 | pr_debug("nf_ct_ftp: registering helper for pf: %d " |
572 | "port: %d\n", | 572 | "port: %d\n", |
573 | ftp[i][j].tuple.src.l3num, ports[i]); | 573 | ftp[i][j].tuple.src.l3num, ports[i]); |
574 | ret = nf_conntrack_helper_register(&ftp[i][j]); | 574 | ret = nf_conntrack_helper_register(&ftp[i][j]); |
575 | if (ret) { | 575 | if (ret) { |
576 | printk("nf_ct_ftp: failed to register helper " | 576 | printk(KERN_ERR "nf_ct_ftp: failed to register" |
577 | " for pf: %d port: %d\n", | 577 | " helper for pf: %d port: %d\n", |
578 | ftp[i][j].tuple.src.l3num, ports[i]); | 578 | ftp[i][j].tuple.src.l3num, ports[i]); |
579 | nf_conntrack_ftp_fini(); | 579 | nf_conntrack_ftp_fini(); |
580 | return ret; | 580 | return ret; |
581 | } | 581 | } |
582 | } | 582 | } |
583 | } | 583 | } |
584 | 584 | ||
585 | return 0; | 585 | return 0; |
586 | } | 586 | } |
587 | 587 | ||
588 | module_init(nf_conntrack_ftp_init); | 588 | module_init(nf_conntrack_ftp_init); |
589 | module_exit(nf_conntrack_ftp_fini); | 589 | module_exit(nf_conntrack_ftp_fini); |
590 | 590 |
net/netfilter/nf_conntrack_h323_main.c
1 | /* | 1 | /* |
2 | * H.323 connection tracking helper | 2 | * H.323 connection tracking helper |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> | 4 | * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> |
5 | * | 5 | * |
6 | * This source code is licensed under General Public License version 2. | 6 | * This source code is licensed under General Public License version 2. |
7 | * | 7 | * |
8 | * Based on the 'brute force' H.323 connection tracking module by | 8 | * Based on the 'brute force' H.323 connection tracking module by |
9 | * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 9 | * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
10 | * | 10 | * |
11 | * For more information, please see http://nath323.sourceforge.net/ | 11 | * For more information, please see http://nath323.sourceforge.net/ |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/inet.h> | 17 | #include <linux/inet.h> |
18 | #include <linux/in.h> | 18 | #include <linux/in.h> |
19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/udp.h> | 21 | #include <linux/udp.h> |
22 | #include <linux/tcp.h> | 22 | #include <linux/tcp.h> |
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <net/route.h> | 24 | #include <net/route.h> |
25 | #include <net/ip6_route.h> | 25 | #include <net/ip6_route.h> |
26 | 26 | ||
27 | #include <net/netfilter/nf_conntrack.h> | 27 | #include <net/netfilter/nf_conntrack.h> |
28 | #include <net/netfilter/nf_conntrack_core.h> | 28 | #include <net/netfilter/nf_conntrack_core.h> |
29 | #include <net/netfilter/nf_conntrack_tuple.h> | 29 | #include <net/netfilter/nf_conntrack_tuple.h> |
30 | #include <net/netfilter/nf_conntrack_expect.h> | 30 | #include <net/netfilter/nf_conntrack_expect.h> |
31 | #include <net/netfilter/nf_conntrack_ecache.h> | 31 | #include <net/netfilter/nf_conntrack_ecache.h> |
32 | #include <net/netfilter/nf_conntrack_helper.h> | 32 | #include <net/netfilter/nf_conntrack_helper.h> |
33 | #include <net/netfilter/nf_conntrack_zones.h> | 33 | #include <net/netfilter/nf_conntrack_zones.h> |
34 | #include <linux/netfilter/nf_conntrack_h323.h> | 34 | #include <linux/netfilter/nf_conntrack_h323.h> |
35 | 35 | ||
36 | /* Parameters */ | 36 | /* Parameters */ |
37 | static unsigned int default_rrq_ttl __read_mostly = 300; | 37 | static unsigned int default_rrq_ttl __read_mostly = 300; |
38 | module_param(default_rrq_ttl, uint, 0600); | 38 | module_param(default_rrq_ttl, uint, 0600); |
39 | MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ"); | 39 | MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ"); |
40 | 40 | ||
41 | static int gkrouted_only __read_mostly = 1; | 41 | static int gkrouted_only __read_mostly = 1; |
42 | module_param(gkrouted_only, int, 0600); | 42 | module_param(gkrouted_only, int, 0600); |
43 | MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); | 43 | MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); |
44 | 44 | ||
45 | static int callforward_filter __read_mostly = 1; | 45 | static int callforward_filter __read_mostly = 1; |
46 | module_param(callforward_filter, bool, 0600); | 46 | module_param(callforward_filter, bool, 0600); |
47 | MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " | 47 | MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " |
48 | "if both endpoints are on different sides " | 48 | "if both endpoints are on different sides " |
49 | "(determined by routing information)"); | 49 | "(determined by routing information)"); |
50 | 50 | ||
51 | /* Hooks for NAT */ | 51 | /* Hooks for NAT */ |
52 | int (*set_h245_addr_hook) (struct sk_buff *skb, | 52 | int (*set_h245_addr_hook) (struct sk_buff *skb, |
53 | unsigned char **data, int dataoff, | 53 | unsigned char **data, int dataoff, |
54 | H245_TransportAddress *taddr, | 54 | H245_TransportAddress *taddr, |
55 | union nf_inet_addr *addr, __be16 port) | 55 | union nf_inet_addr *addr, __be16 port) |
56 | __read_mostly; | 56 | __read_mostly; |
57 | int (*set_h225_addr_hook) (struct sk_buff *skb, | 57 | int (*set_h225_addr_hook) (struct sk_buff *skb, |
58 | unsigned char **data, int dataoff, | 58 | unsigned char **data, int dataoff, |
59 | TransportAddress *taddr, | 59 | TransportAddress *taddr, |
60 | union nf_inet_addr *addr, __be16 port) | 60 | union nf_inet_addr *addr, __be16 port) |
61 | __read_mostly; | 61 | __read_mostly; |
62 | int (*set_sig_addr_hook) (struct sk_buff *skb, | 62 | int (*set_sig_addr_hook) (struct sk_buff *skb, |
63 | struct nf_conn *ct, | 63 | struct nf_conn *ct, |
64 | enum ip_conntrack_info ctinfo, | 64 | enum ip_conntrack_info ctinfo, |
65 | unsigned char **data, | 65 | unsigned char **data, |
66 | TransportAddress *taddr, int count) __read_mostly; | 66 | TransportAddress *taddr, int count) __read_mostly; |
67 | int (*set_ras_addr_hook) (struct sk_buff *skb, | 67 | int (*set_ras_addr_hook) (struct sk_buff *skb, |
68 | struct nf_conn *ct, | 68 | struct nf_conn *ct, |
69 | enum ip_conntrack_info ctinfo, | 69 | enum ip_conntrack_info ctinfo, |
70 | unsigned char **data, | 70 | unsigned char **data, |
71 | TransportAddress *taddr, int count) __read_mostly; | 71 | TransportAddress *taddr, int count) __read_mostly; |
72 | int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, | 72 | int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, |
73 | struct nf_conn *ct, | 73 | struct nf_conn *ct, |
74 | enum ip_conntrack_info ctinfo, | 74 | enum ip_conntrack_info ctinfo, |
75 | unsigned char **data, int dataoff, | 75 | unsigned char **data, int dataoff, |
76 | H245_TransportAddress *taddr, | 76 | H245_TransportAddress *taddr, |
77 | __be16 port, __be16 rtp_port, | 77 | __be16 port, __be16 rtp_port, |
78 | struct nf_conntrack_expect *rtp_exp, | 78 | struct nf_conntrack_expect *rtp_exp, |
79 | struct nf_conntrack_expect *rtcp_exp) __read_mostly; | 79 | struct nf_conntrack_expect *rtcp_exp) __read_mostly; |
80 | int (*nat_t120_hook) (struct sk_buff *skb, | 80 | int (*nat_t120_hook) (struct sk_buff *skb, |
81 | struct nf_conn *ct, | 81 | struct nf_conn *ct, |
82 | enum ip_conntrack_info ctinfo, | 82 | enum ip_conntrack_info ctinfo, |
83 | unsigned char **data, int dataoff, | 83 | unsigned char **data, int dataoff, |
84 | H245_TransportAddress *taddr, __be16 port, | 84 | H245_TransportAddress *taddr, __be16 port, |
85 | struct nf_conntrack_expect *exp) __read_mostly; | 85 | struct nf_conntrack_expect *exp) __read_mostly; |
86 | int (*nat_h245_hook) (struct sk_buff *skb, | 86 | int (*nat_h245_hook) (struct sk_buff *skb, |
87 | struct nf_conn *ct, | 87 | struct nf_conn *ct, |
88 | enum ip_conntrack_info ctinfo, | 88 | enum ip_conntrack_info ctinfo, |
89 | unsigned char **data, int dataoff, | 89 | unsigned char **data, int dataoff, |
90 | TransportAddress *taddr, __be16 port, | 90 | TransportAddress *taddr, __be16 port, |
91 | struct nf_conntrack_expect *exp) __read_mostly; | 91 | struct nf_conntrack_expect *exp) __read_mostly; |
92 | int (*nat_callforwarding_hook) (struct sk_buff *skb, | 92 | int (*nat_callforwarding_hook) (struct sk_buff *skb, |
93 | struct nf_conn *ct, | 93 | struct nf_conn *ct, |
94 | enum ip_conntrack_info ctinfo, | 94 | enum ip_conntrack_info ctinfo, |
95 | unsigned char **data, int dataoff, | 95 | unsigned char **data, int dataoff, |
96 | TransportAddress *taddr, __be16 port, | 96 | TransportAddress *taddr, __be16 port, |
97 | struct nf_conntrack_expect *exp) __read_mostly; | 97 | struct nf_conntrack_expect *exp) __read_mostly; |
98 | int (*nat_q931_hook) (struct sk_buff *skb, | 98 | int (*nat_q931_hook) (struct sk_buff *skb, |
99 | struct nf_conn *ct, | 99 | struct nf_conn *ct, |
100 | enum ip_conntrack_info ctinfo, | 100 | enum ip_conntrack_info ctinfo, |
101 | unsigned char **data, TransportAddress *taddr, int idx, | 101 | unsigned char **data, TransportAddress *taddr, int idx, |
102 | __be16 port, struct nf_conntrack_expect *exp) | 102 | __be16 port, struct nf_conntrack_expect *exp) |
103 | __read_mostly; | 103 | __read_mostly; |
104 | 104 | ||
105 | static DEFINE_SPINLOCK(nf_h323_lock); | 105 | static DEFINE_SPINLOCK(nf_h323_lock); |
106 | static char *h323_buffer; | 106 | static char *h323_buffer; |
107 | 107 | ||
108 | static struct nf_conntrack_helper nf_conntrack_helper_h245; | 108 | static struct nf_conntrack_helper nf_conntrack_helper_h245; |
109 | static struct nf_conntrack_helper nf_conntrack_helper_q931[]; | 109 | static struct nf_conntrack_helper nf_conntrack_helper_q931[]; |
110 | static struct nf_conntrack_helper nf_conntrack_helper_ras[]; | 110 | static struct nf_conntrack_helper nf_conntrack_helper_ras[]; |
111 | 111 | ||
112 | /****************************************************************************/ | 112 | /****************************************************************************/ |
113 | static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff, | 113 | static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff, |
114 | struct nf_conn *ct, enum ip_conntrack_info ctinfo, | 114 | struct nf_conn *ct, enum ip_conntrack_info ctinfo, |
115 | unsigned char **data, int *datalen, int *dataoff) | 115 | unsigned char **data, int *datalen, int *dataoff) |
116 | { | 116 | { |
117 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 117 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
118 | int dir = CTINFO2DIR(ctinfo); | 118 | int dir = CTINFO2DIR(ctinfo); |
119 | const struct tcphdr *th; | 119 | const struct tcphdr *th; |
120 | struct tcphdr _tcph; | 120 | struct tcphdr _tcph; |
121 | int tcpdatalen; | 121 | int tcpdatalen; |
122 | int tcpdataoff; | 122 | int tcpdataoff; |
123 | unsigned char *tpkt; | 123 | unsigned char *tpkt; |
124 | int tpktlen; | 124 | int tpktlen; |
125 | int tpktoff; | 125 | int tpktoff; |
126 | 126 | ||
127 | /* Get TCP header */ | 127 | /* Get TCP header */ |
128 | th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); | 128 | th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); |
129 | if (th == NULL) | 129 | if (th == NULL) |
130 | return 0; | 130 | return 0; |
131 | 131 | ||
132 | /* Get TCP data offset */ | 132 | /* Get TCP data offset */ |
133 | tcpdataoff = protoff + th->doff * 4; | 133 | tcpdataoff = protoff + th->doff * 4; |
134 | 134 | ||
135 | /* Get TCP data length */ | 135 | /* Get TCP data length */ |
136 | tcpdatalen = skb->len - tcpdataoff; | 136 | tcpdatalen = skb->len - tcpdataoff; |
137 | if (tcpdatalen <= 0) /* No TCP data */ | 137 | if (tcpdatalen <= 0) /* No TCP data */ |
138 | goto clear_out; | 138 | goto clear_out; |
139 | 139 | ||
140 | if (*data == NULL) { /* first TPKT */ | 140 | if (*data == NULL) { /* first TPKT */ |
141 | /* Get first TPKT pointer */ | 141 | /* Get first TPKT pointer */ |
142 | tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen, | 142 | tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen, |
143 | h323_buffer); | 143 | h323_buffer); |
144 | BUG_ON(tpkt == NULL); | 144 | BUG_ON(tpkt == NULL); |
145 | 145 | ||
146 | /* Validate TPKT identifier */ | 146 | /* Validate TPKT identifier */ |
147 | if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) { | 147 | if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) { |
148 | /* Netmeeting sends TPKT header and data separately */ | 148 | /* Netmeeting sends TPKT header and data separately */ |
149 | if (info->tpkt_len[dir] > 0) { | 149 | if (info->tpkt_len[dir] > 0) { |
150 | pr_debug("nf_ct_h323: previous packet " | 150 | pr_debug("nf_ct_h323: previous packet " |
151 | "indicated separate TPKT data of %hu " | 151 | "indicated separate TPKT data of %hu " |
152 | "bytes\n", info->tpkt_len[dir]); | 152 | "bytes\n", info->tpkt_len[dir]); |
153 | if (info->tpkt_len[dir] <= tcpdatalen) { | 153 | if (info->tpkt_len[dir] <= tcpdatalen) { |
154 | /* Yes, there was a TPKT header | 154 | /* Yes, there was a TPKT header |
155 | * received */ | 155 | * received */ |
156 | *data = tpkt; | 156 | *data = tpkt; |
157 | *datalen = info->tpkt_len[dir]; | 157 | *datalen = info->tpkt_len[dir]; |
158 | *dataoff = 0; | 158 | *dataoff = 0; |
159 | goto out; | 159 | goto out; |
160 | } | 160 | } |
161 | 161 | ||
162 | /* Fragmented TPKT */ | 162 | /* Fragmented TPKT */ |
163 | pr_debug("nf_ct_h323: fragmented TPKT\n"); | 163 | pr_debug("nf_ct_h323: fragmented TPKT\n"); |
164 | goto clear_out; | 164 | goto clear_out; |
165 | } | 165 | } |
166 | 166 | ||
167 | /* It is not even a TPKT */ | 167 | /* It is not even a TPKT */ |
168 | return 0; | 168 | return 0; |
169 | } | 169 | } |
170 | tpktoff = 0; | 170 | tpktoff = 0; |
171 | } else { /* Next TPKT */ | 171 | } else { /* Next TPKT */ |
172 | tpktoff = *dataoff + *datalen; | 172 | tpktoff = *dataoff + *datalen; |
173 | tcpdatalen -= tpktoff; | 173 | tcpdatalen -= tpktoff; |
174 | if (tcpdatalen <= 4) /* No more TPKT */ | 174 | if (tcpdatalen <= 4) /* No more TPKT */ |
175 | goto clear_out; | 175 | goto clear_out; |
176 | tpkt = *data + *datalen; | 176 | tpkt = *data + *datalen; |
177 | 177 | ||
178 | /* Validate TPKT identifier */ | 178 | /* Validate TPKT identifier */ |
179 | if (tpkt[0] != 0x03 || tpkt[1] != 0) | 179 | if (tpkt[0] != 0x03 || tpkt[1] != 0) |
180 | goto clear_out; | 180 | goto clear_out; |
181 | } | 181 | } |
182 | 182 | ||
183 | /* Validate TPKT length */ | 183 | /* Validate TPKT length */ |
184 | tpktlen = tpkt[2] * 256 + tpkt[3]; | 184 | tpktlen = tpkt[2] * 256 + tpkt[3]; |
185 | if (tpktlen < 4) | 185 | if (tpktlen < 4) |
186 | goto clear_out; | 186 | goto clear_out; |
187 | if (tpktlen > tcpdatalen) { | 187 | if (tpktlen > tcpdatalen) { |
188 | if (tcpdatalen == 4) { /* Separate TPKT header */ | 188 | if (tcpdatalen == 4) { /* Separate TPKT header */ |
189 | /* Netmeeting sends TPKT header and data separately */ | 189 | /* Netmeeting sends TPKT header and data separately */ |
190 | pr_debug("nf_ct_h323: separate TPKT header indicates " | 190 | pr_debug("nf_ct_h323: separate TPKT header indicates " |
191 | "there will be TPKT data of %hu bytes\n", | 191 | "there will be TPKT data of %hu bytes\n", |
192 | tpktlen - 4); | 192 | tpktlen - 4); |
193 | info->tpkt_len[dir] = tpktlen - 4; | 193 | info->tpkt_len[dir] = tpktlen - 4; |
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
197 | pr_debug("nf_ct_h323: incomplete TPKT (fragmented?)\n"); | 197 | pr_debug("nf_ct_h323: incomplete TPKT (fragmented?)\n"); |
198 | goto clear_out; | 198 | goto clear_out; |
199 | } | 199 | } |
200 | 200 | ||
201 | /* This is the encapsulated data */ | 201 | /* This is the encapsulated data */ |
202 | *data = tpkt + 4; | 202 | *data = tpkt + 4; |
203 | *datalen = tpktlen - 4; | 203 | *datalen = tpktlen - 4; |
204 | *dataoff = tpktoff + 4; | 204 | *dataoff = tpktoff + 4; |
205 | 205 | ||
206 | out: | 206 | out: |
207 | /* Clear TPKT length */ | 207 | /* Clear TPKT length */ |
208 | info->tpkt_len[dir] = 0; | 208 | info->tpkt_len[dir] = 0; |
209 | return 1; | 209 | return 1; |
210 | 210 | ||
211 | clear_out: | 211 | clear_out: |
212 | info->tpkt_len[dir] = 0; | 212 | info->tpkt_len[dir] = 0; |
213 | return 0; | 213 | return 0; |
214 | } | 214 | } |
215 | 215 | ||
216 | /****************************************************************************/ | 216 | /****************************************************************************/ |
217 | static int get_h245_addr(struct nf_conn *ct, const unsigned char *data, | 217 | static int get_h245_addr(struct nf_conn *ct, const unsigned char *data, |
218 | H245_TransportAddress *taddr, | 218 | H245_TransportAddress *taddr, |
219 | union nf_inet_addr *addr, __be16 *port) | 219 | union nf_inet_addr *addr, __be16 *port) |
220 | { | 220 | { |
221 | const unsigned char *p; | 221 | const unsigned char *p; |
222 | int len; | 222 | int len; |
223 | 223 | ||
224 | if (taddr->choice != eH245_TransportAddress_unicastAddress) | 224 | if (taddr->choice != eH245_TransportAddress_unicastAddress) |
225 | return 0; | 225 | return 0; |
226 | 226 | ||
227 | switch (taddr->unicastAddress.choice) { | 227 | switch (taddr->unicastAddress.choice) { |
228 | case eUnicastAddress_iPAddress: | 228 | case eUnicastAddress_iPAddress: |
229 | if (nf_ct_l3num(ct) != AF_INET) | 229 | if (nf_ct_l3num(ct) != AF_INET) |
230 | return 0; | 230 | return 0; |
231 | p = data + taddr->unicastAddress.iPAddress.network; | 231 | p = data + taddr->unicastAddress.iPAddress.network; |
232 | len = 4; | 232 | len = 4; |
233 | break; | 233 | break; |
234 | case eUnicastAddress_iP6Address: | 234 | case eUnicastAddress_iP6Address: |
235 | if (nf_ct_l3num(ct) != AF_INET6) | 235 | if (nf_ct_l3num(ct) != AF_INET6) |
236 | return 0; | 236 | return 0; |
237 | p = data + taddr->unicastAddress.iP6Address.network; | 237 | p = data + taddr->unicastAddress.iP6Address.network; |
238 | len = 16; | 238 | len = 16; |
239 | break; | 239 | break; |
240 | default: | 240 | default: |
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
243 | 243 | ||
244 | memcpy(addr, p, len); | 244 | memcpy(addr, p, len); |
245 | memset((void *)addr + len, 0, sizeof(*addr) - len); | 245 | memset((void *)addr + len, 0, sizeof(*addr) - len); |
246 | memcpy(port, p + len, sizeof(__be16)); | 246 | memcpy(port, p + len, sizeof(__be16)); |
247 | 247 | ||
248 | return 1; | 248 | return 1; |
249 | } | 249 | } |
250 | 250 | ||
251 | /****************************************************************************/ | 251 | /****************************************************************************/ |
252 | static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | 252 | static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, |
253 | enum ip_conntrack_info ctinfo, | 253 | enum ip_conntrack_info ctinfo, |
254 | unsigned char **data, int dataoff, | 254 | unsigned char **data, int dataoff, |
255 | H245_TransportAddress *taddr) | 255 | H245_TransportAddress *taddr) |
256 | { | 256 | { |
257 | int dir = CTINFO2DIR(ctinfo); | 257 | int dir = CTINFO2DIR(ctinfo); |
258 | int ret = 0; | 258 | int ret = 0; |
259 | __be16 port; | 259 | __be16 port; |
260 | __be16 rtp_port, rtcp_port; | 260 | __be16 rtp_port, rtcp_port; |
261 | union nf_inet_addr addr; | 261 | union nf_inet_addr addr; |
262 | struct nf_conntrack_expect *rtp_exp; | 262 | struct nf_conntrack_expect *rtp_exp; |
263 | struct nf_conntrack_expect *rtcp_exp; | 263 | struct nf_conntrack_expect *rtcp_exp; |
264 | typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp; | 264 | typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp; |
265 | 265 | ||
266 | /* Read RTP or RTCP address */ | 266 | /* Read RTP or RTCP address */ |
267 | if (!get_h245_addr(ct, *data, taddr, &addr, &port) || | 267 | if (!get_h245_addr(ct, *data, taddr, &addr, &port) || |
268 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || | 268 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || |
269 | port == 0) | 269 | port == 0) |
270 | return 0; | 270 | return 0; |
271 | 271 | ||
272 | /* RTP port is even */ | 272 | /* RTP port is even */ |
273 | port &= htons(~1); | 273 | port &= htons(~1); |
274 | rtp_port = port; | 274 | rtp_port = port; |
275 | rtcp_port = htons(ntohs(port) + 1); | 275 | rtcp_port = htons(ntohs(port) + 1); |
276 | 276 | ||
277 | /* Create expect for RTP */ | 277 | /* Create expect for RTP */ |
278 | if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) | 278 | if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) |
279 | return -1; | 279 | return -1; |
280 | nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 280 | nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
281 | &ct->tuplehash[!dir].tuple.src.u3, | 281 | &ct->tuplehash[!dir].tuple.src.u3, |
282 | &ct->tuplehash[!dir].tuple.dst.u3, | 282 | &ct->tuplehash[!dir].tuple.dst.u3, |
283 | IPPROTO_UDP, NULL, &rtp_port); | 283 | IPPROTO_UDP, NULL, &rtp_port); |
284 | 284 | ||
285 | /* Create expect for RTCP */ | 285 | /* Create expect for RTCP */ |
286 | if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) { | 286 | if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) { |
287 | nf_ct_expect_put(rtp_exp); | 287 | nf_ct_expect_put(rtp_exp); |
288 | return -1; | 288 | return -1; |
289 | } | 289 | } |
290 | nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 290 | nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
291 | &ct->tuplehash[!dir].tuple.src.u3, | 291 | &ct->tuplehash[!dir].tuple.src.u3, |
292 | &ct->tuplehash[!dir].tuple.dst.u3, | 292 | &ct->tuplehash[!dir].tuple.dst.u3, |
293 | IPPROTO_UDP, NULL, &rtcp_port); | 293 | IPPROTO_UDP, NULL, &rtcp_port); |
294 | 294 | ||
295 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | 295 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, |
296 | &ct->tuplehash[!dir].tuple.dst.u3, | 296 | &ct->tuplehash[!dir].tuple.dst.u3, |
297 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | 297 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && |
298 | (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) && | 298 | (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) && |
299 | ct->status & IPS_NAT_MASK) { | 299 | ct->status & IPS_NAT_MASK) { |
300 | /* NAT needed */ | 300 | /* NAT needed */ |
301 | ret = nat_rtp_rtcp(skb, ct, ctinfo, data, dataoff, | 301 | ret = nat_rtp_rtcp(skb, ct, ctinfo, data, dataoff, |
302 | taddr, port, rtp_port, rtp_exp, rtcp_exp); | 302 | taddr, port, rtp_port, rtp_exp, rtcp_exp); |
303 | } else { /* Conntrack only */ | 303 | } else { /* Conntrack only */ |
304 | if (nf_ct_expect_related(rtp_exp) == 0) { | 304 | if (nf_ct_expect_related(rtp_exp) == 0) { |
305 | if (nf_ct_expect_related(rtcp_exp) == 0) { | 305 | if (nf_ct_expect_related(rtcp_exp) == 0) { |
306 | pr_debug("nf_ct_h323: expect RTP "); | 306 | pr_debug("nf_ct_h323: expect RTP "); |
307 | nf_ct_dump_tuple(&rtp_exp->tuple); | 307 | nf_ct_dump_tuple(&rtp_exp->tuple); |
308 | pr_debug("nf_ct_h323: expect RTCP "); | 308 | pr_debug("nf_ct_h323: expect RTCP "); |
309 | nf_ct_dump_tuple(&rtcp_exp->tuple); | 309 | nf_ct_dump_tuple(&rtcp_exp->tuple); |
310 | } else { | 310 | } else { |
311 | nf_ct_unexpect_related(rtp_exp); | 311 | nf_ct_unexpect_related(rtp_exp); |
312 | ret = -1; | 312 | ret = -1; |
313 | } | 313 | } |
314 | } else | 314 | } else |
315 | ret = -1; | 315 | ret = -1; |
316 | } | 316 | } |
317 | 317 | ||
318 | nf_ct_expect_put(rtp_exp); | 318 | nf_ct_expect_put(rtp_exp); |
319 | nf_ct_expect_put(rtcp_exp); | 319 | nf_ct_expect_put(rtcp_exp); |
320 | 320 | ||
321 | return ret; | 321 | return ret; |
322 | } | 322 | } |
323 | 323 | ||
324 | /****************************************************************************/ | 324 | /****************************************************************************/ |
325 | static int expect_t120(struct sk_buff *skb, | 325 | static int expect_t120(struct sk_buff *skb, |
326 | struct nf_conn *ct, | 326 | struct nf_conn *ct, |
327 | enum ip_conntrack_info ctinfo, | 327 | enum ip_conntrack_info ctinfo, |
328 | unsigned char **data, int dataoff, | 328 | unsigned char **data, int dataoff, |
329 | H245_TransportAddress *taddr) | 329 | H245_TransportAddress *taddr) |
330 | { | 330 | { |
331 | int dir = CTINFO2DIR(ctinfo); | 331 | int dir = CTINFO2DIR(ctinfo); |
332 | int ret = 0; | 332 | int ret = 0; |
333 | __be16 port; | 333 | __be16 port; |
334 | union nf_inet_addr addr; | 334 | union nf_inet_addr addr; |
335 | struct nf_conntrack_expect *exp; | 335 | struct nf_conntrack_expect *exp; |
336 | typeof(nat_t120_hook) nat_t120; | 336 | typeof(nat_t120_hook) nat_t120; |
337 | 337 | ||
338 | /* Read T.120 address */ | 338 | /* Read T.120 address */ |
339 | if (!get_h245_addr(ct, *data, taddr, &addr, &port) || | 339 | if (!get_h245_addr(ct, *data, taddr, &addr, &port) || |
340 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || | 340 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || |
341 | port == 0) | 341 | port == 0) |
342 | return 0; | 342 | return 0; |
343 | 343 | ||
344 | /* Create expect for T.120 connections */ | 344 | /* Create expect for T.120 connections */ |
345 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) | 345 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) |
346 | return -1; | 346 | return -1; |
347 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 347 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
348 | &ct->tuplehash[!dir].tuple.src.u3, | 348 | &ct->tuplehash[!dir].tuple.src.u3, |
349 | &ct->tuplehash[!dir].tuple.dst.u3, | 349 | &ct->tuplehash[!dir].tuple.dst.u3, |
350 | IPPROTO_TCP, NULL, &port); | 350 | IPPROTO_TCP, NULL, &port); |
351 | exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */ | 351 | exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */ |
352 | 352 | ||
353 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | 353 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, |
354 | &ct->tuplehash[!dir].tuple.dst.u3, | 354 | &ct->tuplehash[!dir].tuple.dst.u3, |
355 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | 355 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && |
356 | (nat_t120 = rcu_dereference(nat_t120_hook)) && | 356 | (nat_t120 = rcu_dereference(nat_t120_hook)) && |
357 | ct->status & IPS_NAT_MASK) { | 357 | ct->status & IPS_NAT_MASK) { |
358 | /* NAT needed */ | 358 | /* NAT needed */ |
359 | ret = nat_t120(skb, ct, ctinfo, data, dataoff, taddr, | 359 | ret = nat_t120(skb, ct, ctinfo, data, dataoff, taddr, |
360 | port, exp); | 360 | port, exp); |
361 | } else { /* Conntrack only */ | 361 | } else { /* Conntrack only */ |
362 | if (nf_ct_expect_related(exp) == 0) { | 362 | if (nf_ct_expect_related(exp) == 0) { |
363 | pr_debug("nf_ct_h323: expect T.120 "); | 363 | pr_debug("nf_ct_h323: expect T.120 "); |
364 | nf_ct_dump_tuple(&exp->tuple); | 364 | nf_ct_dump_tuple(&exp->tuple); |
365 | } else | 365 | } else |
366 | ret = -1; | 366 | ret = -1; |
367 | } | 367 | } |
368 | 368 | ||
369 | nf_ct_expect_put(exp); | 369 | nf_ct_expect_put(exp); |
370 | 370 | ||
371 | return ret; | 371 | return ret; |
372 | } | 372 | } |
373 | 373 | ||
374 | /****************************************************************************/ | 374 | /****************************************************************************/ |
375 | static int process_h245_channel(struct sk_buff *skb, | 375 | static int process_h245_channel(struct sk_buff *skb, |
376 | struct nf_conn *ct, | 376 | struct nf_conn *ct, |
377 | enum ip_conntrack_info ctinfo, | 377 | enum ip_conntrack_info ctinfo, |
378 | unsigned char **data, int dataoff, | 378 | unsigned char **data, int dataoff, |
379 | H2250LogicalChannelParameters *channel) | 379 | H2250LogicalChannelParameters *channel) |
380 | { | 380 | { |
381 | int ret; | 381 | int ret; |
382 | 382 | ||
383 | if (channel->options & eH2250LogicalChannelParameters_mediaChannel) { | 383 | if (channel->options & eH2250LogicalChannelParameters_mediaChannel) { |
384 | /* RTP */ | 384 | /* RTP */ |
385 | ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, | 385 | ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, |
386 | &channel->mediaChannel); | 386 | &channel->mediaChannel); |
387 | if (ret < 0) | 387 | if (ret < 0) |
388 | return -1; | 388 | return -1; |
389 | } | 389 | } |
390 | 390 | ||
391 | if (channel-> | 391 | if (channel-> |
392 | options & eH2250LogicalChannelParameters_mediaControlChannel) { | 392 | options & eH2250LogicalChannelParameters_mediaControlChannel) { |
393 | /* RTCP */ | 393 | /* RTCP */ |
394 | ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, | 394 | ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, |
395 | &channel->mediaControlChannel); | 395 | &channel->mediaControlChannel); |
396 | if (ret < 0) | 396 | if (ret < 0) |
397 | return -1; | 397 | return -1; |
398 | } | 398 | } |
399 | 399 | ||
400 | return 0; | 400 | return 0; |
401 | } | 401 | } |
402 | 402 | ||
403 | /****************************************************************************/ | 403 | /****************************************************************************/ |
404 | static int process_olc(struct sk_buff *skb, struct nf_conn *ct, | 404 | static int process_olc(struct sk_buff *skb, struct nf_conn *ct, |
405 | enum ip_conntrack_info ctinfo, | 405 | enum ip_conntrack_info ctinfo, |
406 | unsigned char **data, int dataoff, | 406 | unsigned char **data, int dataoff, |
407 | OpenLogicalChannel *olc) | 407 | OpenLogicalChannel *olc) |
408 | { | 408 | { |
409 | int ret; | 409 | int ret; |
410 | 410 | ||
411 | pr_debug("nf_ct_h323: OpenLogicalChannel\n"); | 411 | pr_debug("nf_ct_h323: OpenLogicalChannel\n"); |
412 | 412 | ||
413 | if (olc->forwardLogicalChannelParameters.multiplexParameters.choice == | 413 | if (olc->forwardLogicalChannelParameters.multiplexParameters.choice == |
414 | eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters) | 414 | eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters) |
415 | { | 415 | { |
416 | ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, | 416 | ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, |
417 | &olc-> | 417 | &olc-> |
418 | forwardLogicalChannelParameters. | 418 | forwardLogicalChannelParameters. |
419 | multiplexParameters. | 419 | multiplexParameters. |
420 | h2250LogicalChannelParameters); | 420 | h2250LogicalChannelParameters); |
421 | if (ret < 0) | 421 | if (ret < 0) |
422 | return -1; | 422 | return -1; |
423 | } | 423 | } |
424 | 424 | ||
425 | if ((olc->options & | 425 | if ((olc->options & |
426 | eOpenLogicalChannel_reverseLogicalChannelParameters) && | 426 | eOpenLogicalChannel_reverseLogicalChannelParameters) && |
427 | (olc->reverseLogicalChannelParameters.options & | 427 | (olc->reverseLogicalChannelParameters.options & |
428 | eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters) | 428 | eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters) |
429 | && (olc->reverseLogicalChannelParameters.multiplexParameters. | 429 | && (olc->reverseLogicalChannelParameters.multiplexParameters. |
430 | choice == | 430 | choice == |
431 | eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) | 431 | eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) |
432 | { | 432 | { |
433 | ret = | 433 | ret = |
434 | process_h245_channel(skb, ct, ctinfo, data, dataoff, | 434 | process_h245_channel(skb, ct, ctinfo, data, dataoff, |
435 | &olc-> | 435 | &olc-> |
436 | reverseLogicalChannelParameters. | 436 | reverseLogicalChannelParameters. |
437 | multiplexParameters. | 437 | multiplexParameters. |
438 | h2250LogicalChannelParameters); | 438 | h2250LogicalChannelParameters); |
439 | if (ret < 0) | 439 | if (ret < 0) |
440 | return -1; | 440 | return -1; |
441 | } | 441 | } |
442 | 442 | ||
443 | if ((olc->options & eOpenLogicalChannel_separateStack) && | 443 | if ((olc->options & eOpenLogicalChannel_separateStack) && |
444 | olc->forwardLogicalChannelParameters.dataType.choice == | 444 | olc->forwardLogicalChannelParameters.dataType.choice == |
445 | eDataType_data && | 445 | eDataType_data && |
446 | olc->forwardLogicalChannelParameters.dataType.data.application. | 446 | olc->forwardLogicalChannelParameters.dataType.data.application. |
447 | choice == eDataApplicationCapability_application_t120 && | 447 | choice == eDataApplicationCapability_application_t120 && |
448 | olc->forwardLogicalChannelParameters.dataType.data.application. | 448 | olc->forwardLogicalChannelParameters.dataType.data.application. |
449 | t120.choice == eDataProtocolCapability_separateLANStack && | 449 | t120.choice == eDataProtocolCapability_separateLANStack && |
450 | olc->separateStack.networkAddress.choice == | 450 | olc->separateStack.networkAddress.choice == |
451 | eNetworkAccessParameters_networkAddress_localAreaAddress) { | 451 | eNetworkAccessParameters_networkAddress_localAreaAddress) { |
452 | ret = expect_t120(skb, ct, ctinfo, data, dataoff, | 452 | ret = expect_t120(skb, ct, ctinfo, data, dataoff, |
453 | &olc->separateStack.networkAddress. | 453 | &olc->separateStack.networkAddress. |
454 | localAreaAddress); | 454 | localAreaAddress); |
455 | if (ret < 0) | 455 | if (ret < 0) |
456 | return -1; | 456 | return -1; |
457 | } | 457 | } |
458 | 458 | ||
459 | return 0; | 459 | return 0; |
460 | } | 460 | } |
461 | 461 | ||
462 | /****************************************************************************/ | 462 | /****************************************************************************/ |
463 | static int process_olca(struct sk_buff *skb, struct nf_conn *ct, | 463 | static int process_olca(struct sk_buff *skb, struct nf_conn *ct, |
464 | enum ip_conntrack_info ctinfo, | 464 | enum ip_conntrack_info ctinfo, |
465 | unsigned char **data, int dataoff, | 465 | unsigned char **data, int dataoff, |
466 | OpenLogicalChannelAck *olca) | 466 | OpenLogicalChannelAck *olca) |
467 | { | 467 | { |
468 | H2250LogicalChannelAckParameters *ack; | 468 | H2250LogicalChannelAckParameters *ack; |
469 | int ret; | 469 | int ret; |
470 | 470 | ||
471 | pr_debug("nf_ct_h323: OpenLogicalChannelAck\n"); | 471 | pr_debug("nf_ct_h323: OpenLogicalChannelAck\n"); |
472 | 472 | ||
473 | if ((olca->options & | 473 | if ((olca->options & |
474 | eOpenLogicalChannelAck_reverseLogicalChannelParameters) && | 474 | eOpenLogicalChannelAck_reverseLogicalChannelParameters) && |
475 | (olca->reverseLogicalChannelParameters.options & | 475 | (olca->reverseLogicalChannelParameters.options & |
476 | eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters) | 476 | eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters) |
477 | && (olca->reverseLogicalChannelParameters.multiplexParameters. | 477 | && (olca->reverseLogicalChannelParameters.multiplexParameters. |
478 | choice == | 478 | choice == |
479 | eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) | 479 | eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) |
480 | { | 480 | { |
481 | ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, | 481 | ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, |
482 | &olca-> | 482 | &olca-> |
483 | reverseLogicalChannelParameters. | 483 | reverseLogicalChannelParameters. |
484 | multiplexParameters. | 484 | multiplexParameters. |
485 | h2250LogicalChannelParameters); | 485 | h2250LogicalChannelParameters); |
486 | if (ret < 0) | 486 | if (ret < 0) |
487 | return -1; | 487 | return -1; |
488 | } | 488 | } |
489 | 489 | ||
490 | if ((olca->options & | 490 | if ((olca->options & |
491 | eOpenLogicalChannelAck_forwardMultiplexAckParameters) && | 491 | eOpenLogicalChannelAck_forwardMultiplexAckParameters) && |
492 | (olca->forwardMultiplexAckParameters.choice == | 492 | (olca->forwardMultiplexAckParameters.choice == |
493 | eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters)) | 493 | eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters)) |
494 | { | 494 | { |
495 | ack = &olca->forwardMultiplexAckParameters. | 495 | ack = &olca->forwardMultiplexAckParameters. |
496 | h2250LogicalChannelAckParameters; | 496 | h2250LogicalChannelAckParameters; |
497 | if (ack->options & | 497 | if (ack->options & |
498 | eH2250LogicalChannelAckParameters_mediaChannel) { | 498 | eH2250LogicalChannelAckParameters_mediaChannel) { |
499 | /* RTP */ | 499 | /* RTP */ |
500 | ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, | 500 | ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, |
501 | &ack->mediaChannel); | 501 | &ack->mediaChannel); |
502 | if (ret < 0) | 502 | if (ret < 0) |
503 | return -1; | 503 | return -1; |
504 | } | 504 | } |
505 | 505 | ||
506 | if (ack->options & | 506 | if (ack->options & |
507 | eH2250LogicalChannelAckParameters_mediaControlChannel) { | 507 | eH2250LogicalChannelAckParameters_mediaControlChannel) { |
508 | /* RTCP */ | 508 | /* RTCP */ |
509 | ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, | 509 | ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, |
510 | &ack->mediaControlChannel); | 510 | &ack->mediaControlChannel); |
511 | if (ret < 0) | 511 | if (ret < 0) |
512 | return -1; | 512 | return -1; |
513 | } | 513 | } |
514 | } | 514 | } |
515 | 515 | ||
516 | if ((olca->options & eOpenLogicalChannelAck_separateStack) && | 516 | if ((olca->options & eOpenLogicalChannelAck_separateStack) && |
517 | olca->separateStack.networkAddress.choice == | 517 | olca->separateStack.networkAddress.choice == |
518 | eNetworkAccessParameters_networkAddress_localAreaAddress) { | 518 | eNetworkAccessParameters_networkAddress_localAreaAddress) { |
519 | ret = expect_t120(skb, ct, ctinfo, data, dataoff, | 519 | ret = expect_t120(skb, ct, ctinfo, data, dataoff, |
520 | &olca->separateStack.networkAddress. | 520 | &olca->separateStack.networkAddress. |
521 | localAreaAddress); | 521 | localAreaAddress); |
522 | if (ret < 0) | 522 | if (ret < 0) |
523 | return -1; | 523 | return -1; |
524 | } | 524 | } |
525 | 525 | ||
526 | return 0; | 526 | return 0; |
527 | } | 527 | } |
528 | 528 | ||
529 | /****************************************************************************/ | 529 | /****************************************************************************/ |
530 | static int process_h245(struct sk_buff *skb, struct nf_conn *ct, | 530 | static int process_h245(struct sk_buff *skb, struct nf_conn *ct, |
531 | enum ip_conntrack_info ctinfo, | 531 | enum ip_conntrack_info ctinfo, |
532 | unsigned char **data, int dataoff, | 532 | unsigned char **data, int dataoff, |
533 | MultimediaSystemControlMessage *mscm) | 533 | MultimediaSystemControlMessage *mscm) |
534 | { | 534 | { |
535 | switch (mscm->choice) { | 535 | switch (mscm->choice) { |
536 | case eMultimediaSystemControlMessage_request: | 536 | case eMultimediaSystemControlMessage_request: |
537 | if (mscm->request.choice == | 537 | if (mscm->request.choice == |
538 | eRequestMessage_openLogicalChannel) { | 538 | eRequestMessage_openLogicalChannel) { |
539 | return process_olc(skb, ct, ctinfo, data, dataoff, | 539 | return process_olc(skb, ct, ctinfo, data, dataoff, |
540 | &mscm->request.openLogicalChannel); | 540 | &mscm->request.openLogicalChannel); |
541 | } | 541 | } |
542 | pr_debug("nf_ct_h323: H.245 Request %d\n", | 542 | pr_debug("nf_ct_h323: H.245 Request %d\n", |
543 | mscm->request.choice); | 543 | mscm->request.choice); |
544 | break; | 544 | break; |
545 | case eMultimediaSystemControlMessage_response: | 545 | case eMultimediaSystemControlMessage_response: |
546 | if (mscm->response.choice == | 546 | if (mscm->response.choice == |
547 | eResponseMessage_openLogicalChannelAck) { | 547 | eResponseMessage_openLogicalChannelAck) { |
548 | return process_olca(skb, ct, ctinfo, data, dataoff, | 548 | return process_olca(skb, ct, ctinfo, data, dataoff, |
549 | &mscm->response. | 549 | &mscm->response. |
550 | openLogicalChannelAck); | 550 | openLogicalChannelAck); |
551 | } | 551 | } |
552 | pr_debug("nf_ct_h323: H.245 Response %d\n", | 552 | pr_debug("nf_ct_h323: H.245 Response %d\n", |
553 | mscm->response.choice); | 553 | mscm->response.choice); |
554 | break; | 554 | break; |
555 | default: | 555 | default: |
556 | pr_debug("nf_ct_h323: H.245 signal %d\n", mscm->choice); | 556 | pr_debug("nf_ct_h323: H.245 signal %d\n", mscm->choice); |
557 | break; | 557 | break; |
558 | } | 558 | } |
559 | 559 | ||
560 | return 0; | 560 | return 0; |
561 | } | 561 | } |
562 | 562 | ||
563 | /****************************************************************************/ | 563 | /****************************************************************************/ |
564 | static int h245_help(struct sk_buff *skb, unsigned int protoff, | 564 | static int h245_help(struct sk_buff *skb, unsigned int protoff, |
565 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | 565 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) |
566 | { | 566 | { |
567 | static MultimediaSystemControlMessage mscm; | 567 | static MultimediaSystemControlMessage mscm; |
568 | unsigned char *data = NULL; | 568 | unsigned char *data = NULL; |
569 | int datalen; | 569 | int datalen; |
570 | int dataoff; | 570 | int dataoff; |
571 | int ret; | 571 | int ret; |
572 | 572 | ||
573 | /* Until there's been traffic both ways, don't look in packets. */ | 573 | /* Until there's been traffic both ways, don't look in packets. */ |
574 | if (ctinfo != IP_CT_ESTABLISHED && | 574 | if (ctinfo != IP_CT_ESTABLISHED && |
575 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { | 575 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { |
576 | return NF_ACCEPT; | 576 | return NF_ACCEPT; |
577 | } | 577 | } |
578 | pr_debug("nf_ct_h245: skblen = %u\n", skb->len); | 578 | pr_debug("nf_ct_h245: skblen = %u\n", skb->len); |
579 | 579 | ||
580 | spin_lock_bh(&nf_h323_lock); | 580 | spin_lock_bh(&nf_h323_lock); |
581 | 581 | ||
582 | /* Process each TPKT */ | 582 | /* Process each TPKT */ |
583 | while (get_tpkt_data(skb, protoff, ct, ctinfo, | 583 | while (get_tpkt_data(skb, protoff, ct, ctinfo, |
584 | &data, &datalen, &dataoff)) { | 584 | &data, &datalen, &dataoff)) { |
585 | pr_debug("nf_ct_h245: TPKT len=%d ", datalen); | 585 | pr_debug("nf_ct_h245: TPKT len=%d ", datalen); |
586 | nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); | 586 | nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); |
587 | 587 | ||
588 | /* Decode H.245 signal */ | 588 | /* Decode H.245 signal */ |
589 | ret = DecodeMultimediaSystemControlMessage(data, datalen, | 589 | ret = DecodeMultimediaSystemControlMessage(data, datalen, |
590 | &mscm); | 590 | &mscm); |
591 | if (ret < 0) { | 591 | if (ret < 0) { |
592 | pr_debug("nf_ct_h245: decoding error: %s\n", | 592 | pr_debug("nf_ct_h245: decoding error: %s\n", |
593 | ret == H323_ERROR_BOUND ? | 593 | ret == H323_ERROR_BOUND ? |
594 | "out of bound" : "out of range"); | 594 | "out of bound" : "out of range"); |
595 | /* We don't drop when decoding error */ | 595 | /* We don't drop when decoding error */ |
596 | break; | 596 | break; |
597 | } | 597 | } |
598 | 598 | ||
599 | /* Process H.245 signal */ | 599 | /* Process H.245 signal */ |
600 | if (process_h245(skb, ct, ctinfo, &data, dataoff, &mscm) < 0) | 600 | if (process_h245(skb, ct, ctinfo, &data, dataoff, &mscm) < 0) |
601 | goto drop; | 601 | goto drop; |
602 | } | 602 | } |
603 | 603 | ||
604 | spin_unlock_bh(&nf_h323_lock); | 604 | spin_unlock_bh(&nf_h323_lock); |
605 | return NF_ACCEPT; | 605 | return NF_ACCEPT; |
606 | 606 | ||
607 | drop: | 607 | drop: |
608 | spin_unlock_bh(&nf_h323_lock); | 608 | spin_unlock_bh(&nf_h323_lock); |
609 | if (net_ratelimit()) | 609 | if (net_ratelimit()) |
610 | printk("nf_ct_h245: packet dropped\n"); | 610 | pr_info("nf_ct_h245: packet dropped\n"); |
611 | return NF_DROP; | 611 | return NF_DROP; |
612 | } | 612 | } |
613 | 613 | ||
614 | /****************************************************************************/ | 614 | /****************************************************************************/ |
615 | static const struct nf_conntrack_expect_policy h245_exp_policy = { | 615 | static const struct nf_conntrack_expect_policy h245_exp_policy = { |
616 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */, | 616 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */, |
617 | .timeout = 240, | 617 | .timeout = 240, |
618 | }; | 618 | }; |
619 | 619 | ||
620 | static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { | 620 | static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { |
621 | .name = "H.245", | 621 | .name = "H.245", |
622 | .me = THIS_MODULE, | 622 | .me = THIS_MODULE, |
623 | .tuple.src.l3num = AF_UNSPEC, | 623 | .tuple.src.l3num = AF_UNSPEC, |
624 | .tuple.dst.protonum = IPPROTO_UDP, | 624 | .tuple.dst.protonum = IPPROTO_UDP, |
625 | .help = h245_help, | 625 | .help = h245_help, |
626 | .expect_policy = &h245_exp_policy, | 626 | .expect_policy = &h245_exp_policy, |
627 | }; | 627 | }; |
628 | 628 | ||
629 | /****************************************************************************/ | 629 | /****************************************************************************/ |
630 | int get_h225_addr(struct nf_conn *ct, unsigned char *data, | 630 | int get_h225_addr(struct nf_conn *ct, unsigned char *data, |
631 | TransportAddress *taddr, | 631 | TransportAddress *taddr, |
632 | union nf_inet_addr *addr, __be16 *port) | 632 | union nf_inet_addr *addr, __be16 *port) |
633 | { | 633 | { |
634 | const unsigned char *p; | 634 | const unsigned char *p; |
635 | int len; | 635 | int len; |
636 | 636 | ||
637 | switch (taddr->choice) { | 637 | switch (taddr->choice) { |
638 | case eTransportAddress_ipAddress: | 638 | case eTransportAddress_ipAddress: |
639 | if (nf_ct_l3num(ct) != AF_INET) | 639 | if (nf_ct_l3num(ct) != AF_INET) |
640 | return 0; | 640 | return 0; |
641 | p = data + taddr->ipAddress.ip; | 641 | p = data + taddr->ipAddress.ip; |
642 | len = 4; | 642 | len = 4; |
643 | break; | 643 | break; |
644 | case eTransportAddress_ip6Address: | 644 | case eTransportAddress_ip6Address: |
645 | if (nf_ct_l3num(ct) != AF_INET6) | 645 | if (nf_ct_l3num(ct) != AF_INET6) |
646 | return 0; | 646 | return 0; |
647 | p = data + taddr->ip6Address.ip; | 647 | p = data + taddr->ip6Address.ip; |
648 | len = 16; | 648 | len = 16; |
649 | break; | 649 | break; |
650 | default: | 650 | default: |
651 | return 0; | 651 | return 0; |
652 | } | 652 | } |
653 | 653 | ||
654 | memcpy(addr, p, len); | 654 | memcpy(addr, p, len); |
655 | memset((void *)addr + len, 0, sizeof(*addr) - len); | 655 | memset((void *)addr + len, 0, sizeof(*addr) - len); |
656 | memcpy(port, p + len, sizeof(__be16)); | 656 | memcpy(port, p + len, sizeof(__be16)); |
657 | 657 | ||
658 | return 1; | 658 | return 1; |
659 | } | 659 | } |
660 | 660 | ||
661 | /****************************************************************************/ | 661 | /****************************************************************************/ |
662 | static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, | 662 | static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, |
663 | enum ip_conntrack_info ctinfo, | 663 | enum ip_conntrack_info ctinfo, |
664 | unsigned char **data, int dataoff, | 664 | unsigned char **data, int dataoff, |
665 | TransportAddress *taddr) | 665 | TransportAddress *taddr) |
666 | { | 666 | { |
667 | int dir = CTINFO2DIR(ctinfo); | 667 | int dir = CTINFO2DIR(ctinfo); |
668 | int ret = 0; | 668 | int ret = 0; |
669 | __be16 port; | 669 | __be16 port; |
670 | union nf_inet_addr addr; | 670 | union nf_inet_addr addr; |
671 | struct nf_conntrack_expect *exp; | 671 | struct nf_conntrack_expect *exp; |
672 | typeof(nat_h245_hook) nat_h245; | 672 | typeof(nat_h245_hook) nat_h245; |
673 | 673 | ||
674 | /* Read h245Address */ | 674 | /* Read h245Address */ |
675 | if (!get_h225_addr(ct, *data, taddr, &addr, &port) || | 675 | if (!get_h225_addr(ct, *data, taddr, &addr, &port) || |
676 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || | 676 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || |
677 | port == 0) | 677 | port == 0) |
678 | return 0; | 678 | return 0; |
679 | 679 | ||
680 | /* Create expect for h245 connection */ | 680 | /* Create expect for h245 connection */ |
681 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) | 681 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) |
682 | return -1; | 682 | return -1; |
683 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 683 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
684 | &ct->tuplehash[!dir].tuple.src.u3, | 684 | &ct->tuplehash[!dir].tuple.src.u3, |
685 | &ct->tuplehash[!dir].tuple.dst.u3, | 685 | &ct->tuplehash[!dir].tuple.dst.u3, |
686 | IPPROTO_TCP, NULL, &port); | 686 | IPPROTO_TCP, NULL, &port); |
687 | exp->helper = &nf_conntrack_helper_h245; | 687 | exp->helper = &nf_conntrack_helper_h245; |
688 | 688 | ||
689 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | 689 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, |
690 | &ct->tuplehash[!dir].tuple.dst.u3, | 690 | &ct->tuplehash[!dir].tuple.dst.u3, |
691 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | 691 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && |
692 | (nat_h245 = rcu_dereference(nat_h245_hook)) && | 692 | (nat_h245 = rcu_dereference(nat_h245_hook)) && |
693 | ct->status & IPS_NAT_MASK) { | 693 | ct->status & IPS_NAT_MASK) { |
694 | /* NAT needed */ | 694 | /* NAT needed */ |
695 | ret = nat_h245(skb, ct, ctinfo, data, dataoff, taddr, | 695 | ret = nat_h245(skb, ct, ctinfo, data, dataoff, taddr, |
696 | port, exp); | 696 | port, exp); |
697 | } else { /* Conntrack only */ | 697 | } else { /* Conntrack only */ |
698 | if (nf_ct_expect_related(exp) == 0) { | 698 | if (nf_ct_expect_related(exp) == 0) { |
699 | pr_debug("nf_ct_q931: expect H.245 "); | 699 | pr_debug("nf_ct_q931: expect H.245 "); |
700 | nf_ct_dump_tuple(&exp->tuple); | 700 | nf_ct_dump_tuple(&exp->tuple); |
701 | } else | 701 | } else |
702 | ret = -1; | 702 | ret = -1; |
703 | } | 703 | } |
704 | 704 | ||
705 | nf_ct_expect_put(exp); | 705 | nf_ct_expect_put(exp); |
706 | 706 | ||
707 | return ret; | 707 | return ret; |
708 | } | 708 | } |
709 | 709 | ||
710 | /* If the calling party is on the same side of the forward-to party, | 710 | /* If the calling party is on the same side of the forward-to party, |
711 | * we don't need to track the second call */ | 711 | * we don't need to track the second call */ |
712 | static int callforward_do_filter(const union nf_inet_addr *src, | 712 | static int callforward_do_filter(const union nf_inet_addr *src, |
713 | const union nf_inet_addr *dst, | 713 | const union nf_inet_addr *dst, |
714 | u_int8_t family) | 714 | u_int8_t family) |
715 | { | 715 | { |
716 | const struct nf_afinfo *afinfo; | 716 | const struct nf_afinfo *afinfo; |
717 | struct flowi fl1, fl2; | 717 | struct flowi fl1, fl2; |
718 | int ret = 0; | 718 | int ret = 0; |
719 | 719 | ||
720 | /* rcu_read_lock()ed by nf_hook_slow() */ | 720 | /* rcu_read_lock()ed by nf_hook_slow() */ |
721 | afinfo = nf_get_afinfo(family); | 721 | afinfo = nf_get_afinfo(family); |
722 | if (!afinfo) | 722 | if (!afinfo) |
723 | return 0; | 723 | return 0; |
724 | 724 | ||
725 | memset(&fl1, 0, sizeof(fl1)); | 725 | memset(&fl1, 0, sizeof(fl1)); |
726 | memset(&fl2, 0, sizeof(fl2)); | 726 | memset(&fl2, 0, sizeof(fl2)); |
727 | 727 | ||
728 | switch (family) { | 728 | switch (family) { |
729 | case AF_INET: { | 729 | case AF_INET: { |
730 | struct rtable *rt1, *rt2; | 730 | struct rtable *rt1, *rt2; |
731 | 731 | ||
732 | fl1.fl4_dst = src->ip; | 732 | fl1.fl4_dst = src->ip; |
733 | fl2.fl4_dst = dst->ip; | 733 | fl2.fl4_dst = dst->ip; |
734 | if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) { | 734 | if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) { |
735 | if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { | 735 | if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { |
736 | if (rt1->rt_gateway == rt2->rt_gateway && | 736 | if (rt1->rt_gateway == rt2->rt_gateway && |
737 | rt1->u.dst.dev == rt2->u.dst.dev) | 737 | rt1->u.dst.dev == rt2->u.dst.dev) |
738 | ret = 1; | 738 | ret = 1; |
739 | dst_release(&rt2->u.dst); | 739 | dst_release(&rt2->u.dst); |
740 | } | 740 | } |
741 | dst_release(&rt1->u.dst); | 741 | dst_release(&rt1->u.dst); |
742 | } | 742 | } |
743 | break; | 743 | break; |
744 | } | 744 | } |
745 | #if defined(CONFIG_NF_CONNTRACK_IPV6) || \ | 745 | #if defined(CONFIG_NF_CONNTRACK_IPV6) || \ |
746 | defined(CONFIG_NF_CONNTRACK_IPV6_MODULE) | 746 | defined(CONFIG_NF_CONNTRACK_IPV6_MODULE) |
747 | case AF_INET6: { | 747 | case AF_INET6: { |
748 | struct rt6_info *rt1, *rt2; | 748 | struct rt6_info *rt1, *rt2; |
749 | 749 | ||
750 | memcpy(&fl1.fl6_dst, src, sizeof(fl1.fl6_dst)); | 750 | memcpy(&fl1.fl6_dst, src, sizeof(fl1.fl6_dst)); |
751 | memcpy(&fl2.fl6_dst, dst, sizeof(fl2.fl6_dst)); | 751 | memcpy(&fl2.fl6_dst, dst, sizeof(fl2.fl6_dst)); |
752 | if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) { | 752 | if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) { |
753 | if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { | 753 | if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { |
754 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, | 754 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, |
755 | sizeof(rt1->rt6i_gateway)) && | 755 | sizeof(rt1->rt6i_gateway)) && |
756 | rt1->u.dst.dev == rt2->u.dst.dev) | 756 | rt1->u.dst.dev == rt2->u.dst.dev) |
757 | ret = 1; | 757 | ret = 1; |
758 | dst_release(&rt2->u.dst); | 758 | dst_release(&rt2->u.dst); |
759 | } | 759 | } |
760 | dst_release(&rt1->u.dst); | 760 | dst_release(&rt1->u.dst); |
761 | } | 761 | } |
762 | break; | 762 | break; |
763 | } | 763 | } |
764 | #endif | 764 | #endif |
765 | } | 765 | } |
766 | return ret; | 766 | return ret; |
767 | 767 | ||
768 | } | 768 | } |
769 | 769 | ||
770 | /****************************************************************************/ | 770 | /****************************************************************************/ |
771 | static int expect_callforwarding(struct sk_buff *skb, | 771 | static int expect_callforwarding(struct sk_buff *skb, |
772 | struct nf_conn *ct, | 772 | struct nf_conn *ct, |
773 | enum ip_conntrack_info ctinfo, | 773 | enum ip_conntrack_info ctinfo, |
774 | unsigned char **data, int dataoff, | 774 | unsigned char **data, int dataoff, |
775 | TransportAddress *taddr) | 775 | TransportAddress *taddr) |
776 | { | 776 | { |
777 | int dir = CTINFO2DIR(ctinfo); | 777 | int dir = CTINFO2DIR(ctinfo); |
778 | int ret = 0; | 778 | int ret = 0; |
779 | __be16 port; | 779 | __be16 port; |
780 | union nf_inet_addr addr; | 780 | union nf_inet_addr addr; |
781 | struct nf_conntrack_expect *exp; | 781 | struct nf_conntrack_expect *exp; |
782 | typeof(nat_callforwarding_hook) nat_callforwarding; | 782 | typeof(nat_callforwarding_hook) nat_callforwarding; |
783 | 783 | ||
784 | /* Read alternativeAddress */ | 784 | /* Read alternativeAddress */ |
785 | if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0) | 785 | if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0) |
786 | return 0; | 786 | return 0; |
787 | 787 | ||
788 | /* If the calling party is on the same side of the forward-to party, | 788 | /* If the calling party is on the same side of the forward-to party, |
789 | * we don't need to track the second call */ | 789 | * we don't need to track the second call */ |
790 | if (callforward_filter && | 790 | if (callforward_filter && |
791 | callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, | 791 | callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, |
792 | nf_ct_l3num(ct))) { | 792 | nf_ct_l3num(ct))) { |
793 | pr_debug("nf_ct_q931: Call Forwarding not tracked\n"); | 793 | pr_debug("nf_ct_q931: Call Forwarding not tracked\n"); |
794 | return 0; | 794 | return 0; |
795 | } | 795 | } |
796 | 796 | ||
797 | /* Create expect for the second call leg */ | 797 | /* Create expect for the second call leg */ |
798 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) | 798 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) |
799 | return -1; | 799 | return -1; |
800 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 800 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
801 | &ct->tuplehash[!dir].tuple.src.u3, &addr, | 801 | &ct->tuplehash[!dir].tuple.src.u3, &addr, |
802 | IPPROTO_TCP, NULL, &port); | 802 | IPPROTO_TCP, NULL, &port); |
803 | exp->helper = nf_conntrack_helper_q931; | 803 | exp->helper = nf_conntrack_helper_q931; |
804 | 804 | ||
805 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | 805 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, |
806 | &ct->tuplehash[!dir].tuple.dst.u3, | 806 | &ct->tuplehash[!dir].tuple.dst.u3, |
807 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | 807 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && |
808 | (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) && | 808 | (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) && |
809 | ct->status & IPS_NAT_MASK) { | 809 | ct->status & IPS_NAT_MASK) { |
810 | /* Need NAT */ | 810 | /* Need NAT */ |
811 | ret = nat_callforwarding(skb, ct, ctinfo, data, dataoff, | 811 | ret = nat_callforwarding(skb, ct, ctinfo, data, dataoff, |
812 | taddr, port, exp); | 812 | taddr, port, exp); |
813 | } else { /* Conntrack only */ | 813 | } else { /* Conntrack only */ |
814 | if (nf_ct_expect_related(exp) == 0) { | 814 | if (nf_ct_expect_related(exp) == 0) { |
815 | pr_debug("nf_ct_q931: expect Call Forwarding "); | 815 | pr_debug("nf_ct_q931: expect Call Forwarding "); |
816 | nf_ct_dump_tuple(&exp->tuple); | 816 | nf_ct_dump_tuple(&exp->tuple); |
817 | } else | 817 | } else |
818 | ret = -1; | 818 | ret = -1; |
819 | } | 819 | } |
820 | 820 | ||
821 | nf_ct_expect_put(exp); | 821 | nf_ct_expect_put(exp); |
822 | 822 | ||
823 | return ret; | 823 | return ret; |
824 | } | 824 | } |
825 | 825 | ||
826 | /****************************************************************************/ | 826 | /****************************************************************************/ |
827 | static int process_setup(struct sk_buff *skb, struct nf_conn *ct, | 827 | static int process_setup(struct sk_buff *skb, struct nf_conn *ct, |
828 | enum ip_conntrack_info ctinfo, | 828 | enum ip_conntrack_info ctinfo, |
829 | unsigned char **data, int dataoff, | 829 | unsigned char **data, int dataoff, |
830 | Setup_UUIE *setup) | 830 | Setup_UUIE *setup) |
831 | { | 831 | { |
832 | int dir = CTINFO2DIR(ctinfo); | 832 | int dir = CTINFO2DIR(ctinfo); |
833 | int ret; | 833 | int ret; |
834 | int i; | 834 | int i; |
835 | __be16 port; | 835 | __be16 port; |
836 | union nf_inet_addr addr; | 836 | union nf_inet_addr addr; |
837 | typeof(set_h225_addr_hook) set_h225_addr; | 837 | typeof(set_h225_addr_hook) set_h225_addr; |
838 | 838 | ||
839 | pr_debug("nf_ct_q931: Setup\n"); | 839 | pr_debug("nf_ct_q931: Setup\n"); |
840 | 840 | ||
841 | if (setup->options & eSetup_UUIE_h245Address) { | 841 | if (setup->options & eSetup_UUIE_h245Address) { |
842 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, | 842 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, |
843 | &setup->h245Address); | 843 | &setup->h245Address); |
844 | if (ret < 0) | 844 | if (ret < 0) |
845 | return -1; | 845 | return -1; |
846 | } | 846 | } |
847 | 847 | ||
848 | set_h225_addr = rcu_dereference(set_h225_addr_hook); | 848 | set_h225_addr = rcu_dereference(set_h225_addr_hook); |
849 | if ((setup->options & eSetup_UUIE_destCallSignalAddress) && | 849 | if ((setup->options & eSetup_UUIE_destCallSignalAddress) && |
850 | (set_h225_addr) && ct->status & IPS_NAT_MASK && | 850 | (set_h225_addr) && ct->status & IPS_NAT_MASK && |
851 | get_h225_addr(ct, *data, &setup->destCallSignalAddress, | 851 | get_h225_addr(ct, *data, &setup->destCallSignalAddress, |
852 | &addr, &port) && | 852 | &addr, &port) && |
853 | memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { | 853 | memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { |
854 | pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n", | 854 | pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n", |
855 | &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3, | 855 | &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3, |
856 | ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); | 856 | ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); |
857 | ret = set_h225_addr(skb, data, dataoff, | 857 | ret = set_h225_addr(skb, data, dataoff, |
858 | &setup->destCallSignalAddress, | 858 | &setup->destCallSignalAddress, |
859 | &ct->tuplehash[!dir].tuple.src.u3, | 859 | &ct->tuplehash[!dir].tuple.src.u3, |
860 | ct->tuplehash[!dir].tuple.src.u.tcp.port); | 860 | ct->tuplehash[!dir].tuple.src.u.tcp.port); |
861 | if (ret < 0) | 861 | if (ret < 0) |
862 | return -1; | 862 | return -1; |
863 | } | 863 | } |
864 | 864 | ||
865 | if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && | 865 | if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && |
866 | (set_h225_addr) && ct->status & IPS_NAT_MASK && | 866 | (set_h225_addr) && ct->status & IPS_NAT_MASK && |
867 | get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, | 867 | get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, |
868 | &addr, &port) && | 868 | &addr, &port) && |
869 | memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { | 869 | memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { |
870 | pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n", | 870 | pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n", |
871 | &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3, | 871 | &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3, |
872 | ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); | 872 | ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); |
873 | ret = set_h225_addr(skb, data, dataoff, | 873 | ret = set_h225_addr(skb, data, dataoff, |
874 | &setup->sourceCallSignalAddress, | 874 | &setup->sourceCallSignalAddress, |
875 | &ct->tuplehash[!dir].tuple.dst.u3, | 875 | &ct->tuplehash[!dir].tuple.dst.u3, |
876 | ct->tuplehash[!dir].tuple.dst.u.tcp.port); | 876 | ct->tuplehash[!dir].tuple.dst.u.tcp.port); |
877 | if (ret < 0) | 877 | if (ret < 0) |
878 | return -1; | 878 | return -1; |
879 | } | 879 | } |
880 | 880 | ||
881 | if (setup->options & eSetup_UUIE_fastStart) { | 881 | if (setup->options & eSetup_UUIE_fastStart) { |
882 | for (i = 0; i < setup->fastStart.count; i++) { | 882 | for (i = 0; i < setup->fastStart.count; i++) { |
883 | ret = process_olc(skb, ct, ctinfo, data, dataoff, | 883 | ret = process_olc(skb, ct, ctinfo, data, dataoff, |
884 | &setup->fastStart.item[i]); | 884 | &setup->fastStart.item[i]); |
885 | if (ret < 0) | 885 | if (ret < 0) |
886 | return -1; | 886 | return -1; |
887 | } | 887 | } |
888 | } | 888 | } |
889 | 889 | ||
890 | return 0; | 890 | return 0; |
891 | } | 891 | } |
892 | 892 | ||
893 | /****************************************************************************/ | 893 | /****************************************************************************/ |
894 | static int process_callproceeding(struct sk_buff *skb, | 894 | static int process_callproceeding(struct sk_buff *skb, |
895 | struct nf_conn *ct, | 895 | struct nf_conn *ct, |
896 | enum ip_conntrack_info ctinfo, | 896 | enum ip_conntrack_info ctinfo, |
897 | unsigned char **data, int dataoff, | 897 | unsigned char **data, int dataoff, |
898 | CallProceeding_UUIE *callproc) | 898 | CallProceeding_UUIE *callproc) |
899 | { | 899 | { |
900 | int ret; | 900 | int ret; |
901 | int i; | 901 | int i; |
902 | 902 | ||
903 | pr_debug("nf_ct_q931: CallProceeding\n"); | 903 | pr_debug("nf_ct_q931: CallProceeding\n"); |
904 | 904 | ||
905 | if (callproc->options & eCallProceeding_UUIE_h245Address) { | 905 | if (callproc->options & eCallProceeding_UUIE_h245Address) { |
906 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, | 906 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, |
907 | &callproc->h245Address); | 907 | &callproc->h245Address); |
908 | if (ret < 0) | 908 | if (ret < 0) |
909 | return -1; | 909 | return -1; |
910 | } | 910 | } |
911 | 911 | ||
912 | if (callproc->options & eCallProceeding_UUIE_fastStart) { | 912 | if (callproc->options & eCallProceeding_UUIE_fastStart) { |
913 | for (i = 0; i < callproc->fastStart.count; i++) { | 913 | for (i = 0; i < callproc->fastStart.count; i++) { |
914 | ret = process_olc(skb, ct, ctinfo, data, dataoff, | 914 | ret = process_olc(skb, ct, ctinfo, data, dataoff, |
915 | &callproc->fastStart.item[i]); | 915 | &callproc->fastStart.item[i]); |
916 | if (ret < 0) | 916 | if (ret < 0) |
917 | return -1; | 917 | return -1; |
918 | } | 918 | } |
919 | } | 919 | } |
920 | 920 | ||
921 | return 0; | 921 | return 0; |
922 | } | 922 | } |
923 | 923 | ||
924 | /****************************************************************************/ | 924 | /****************************************************************************/ |
925 | static int process_connect(struct sk_buff *skb, struct nf_conn *ct, | 925 | static int process_connect(struct sk_buff *skb, struct nf_conn *ct, |
926 | enum ip_conntrack_info ctinfo, | 926 | enum ip_conntrack_info ctinfo, |
927 | unsigned char **data, int dataoff, | 927 | unsigned char **data, int dataoff, |
928 | Connect_UUIE *connect) | 928 | Connect_UUIE *connect) |
929 | { | 929 | { |
930 | int ret; | 930 | int ret; |
931 | int i; | 931 | int i; |
932 | 932 | ||
933 | pr_debug("nf_ct_q931: Connect\n"); | 933 | pr_debug("nf_ct_q931: Connect\n"); |
934 | 934 | ||
935 | if (connect->options & eConnect_UUIE_h245Address) { | 935 | if (connect->options & eConnect_UUIE_h245Address) { |
936 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, | 936 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, |
937 | &connect->h245Address); | 937 | &connect->h245Address); |
938 | if (ret < 0) | 938 | if (ret < 0) |
939 | return -1; | 939 | return -1; |
940 | } | 940 | } |
941 | 941 | ||
942 | if (connect->options & eConnect_UUIE_fastStart) { | 942 | if (connect->options & eConnect_UUIE_fastStart) { |
943 | for (i = 0; i < connect->fastStart.count; i++) { | 943 | for (i = 0; i < connect->fastStart.count; i++) { |
944 | ret = process_olc(skb, ct, ctinfo, data, dataoff, | 944 | ret = process_olc(skb, ct, ctinfo, data, dataoff, |
945 | &connect->fastStart.item[i]); | 945 | &connect->fastStart.item[i]); |
946 | if (ret < 0) | 946 | if (ret < 0) |
947 | return -1; | 947 | return -1; |
948 | } | 948 | } |
949 | } | 949 | } |
950 | 950 | ||
951 | return 0; | 951 | return 0; |
952 | } | 952 | } |
953 | 953 | ||
954 | /****************************************************************************/ | 954 | /****************************************************************************/ |
955 | static int process_alerting(struct sk_buff *skb, struct nf_conn *ct, | 955 | static int process_alerting(struct sk_buff *skb, struct nf_conn *ct, |
956 | enum ip_conntrack_info ctinfo, | 956 | enum ip_conntrack_info ctinfo, |
957 | unsigned char **data, int dataoff, | 957 | unsigned char **data, int dataoff, |
958 | Alerting_UUIE *alert) | 958 | Alerting_UUIE *alert) |
959 | { | 959 | { |
960 | int ret; | 960 | int ret; |
961 | int i; | 961 | int i; |
962 | 962 | ||
963 | pr_debug("nf_ct_q931: Alerting\n"); | 963 | pr_debug("nf_ct_q931: Alerting\n"); |
964 | 964 | ||
965 | if (alert->options & eAlerting_UUIE_h245Address) { | 965 | if (alert->options & eAlerting_UUIE_h245Address) { |
966 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, | 966 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, |
967 | &alert->h245Address); | 967 | &alert->h245Address); |
968 | if (ret < 0) | 968 | if (ret < 0) |
969 | return -1; | 969 | return -1; |
970 | } | 970 | } |
971 | 971 | ||
972 | if (alert->options & eAlerting_UUIE_fastStart) { | 972 | if (alert->options & eAlerting_UUIE_fastStart) { |
973 | for (i = 0; i < alert->fastStart.count; i++) { | 973 | for (i = 0; i < alert->fastStart.count; i++) { |
974 | ret = process_olc(skb, ct, ctinfo, data, dataoff, | 974 | ret = process_olc(skb, ct, ctinfo, data, dataoff, |
975 | &alert->fastStart.item[i]); | 975 | &alert->fastStart.item[i]); |
976 | if (ret < 0) | 976 | if (ret < 0) |
977 | return -1; | 977 | return -1; |
978 | } | 978 | } |
979 | } | 979 | } |
980 | 980 | ||
981 | return 0; | 981 | return 0; |
982 | } | 982 | } |
983 | 983 | ||
984 | /****************************************************************************/ | 984 | /****************************************************************************/ |
985 | static int process_facility(struct sk_buff *skb, struct nf_conn *ct, | 985 | static int process_facility(struct sk_buff *skb, struct nf_conn *ct, |
986 | enum ip_conntrack_info ctinfo, | 986 | enum ip_conntrack_info ctinfo, |
987 | unsigned char **data, int dataoff, | 987 | unsigned char **data, int dataoff, |
988 | Facility_UUIE *facility) | 988 | Facility_UUIE *facility) |
989 | { | 989 | { |
990 | int ret; | 990 | int ret; |
991 | int i; | 991 | int i; |
992 | 992 | ||
993 | pr_debug("nf_ct_q931: Facility\n"); | 993 | pr_debug("nf_ct_q931: Facility\n"); |
994 | 994 | ||
995 | if (facility->reason.choice == eFacilityReason_callForwarded) { | 995 | if (facility->reason.choice == eFacilityReason_callForwarded) { |
996 | if (facility->options & eFacility_UUIE_alternativeAddress) | 996 | if (facility->options & eFacility_UUIE_alternativeAddress) |
997 | return expect_callforwarding(skb, ct, ctinfo, data, | 997 | return expect_callforwarding(skb, ct, ctinfo, data, |
998 | dataoff, | 998 | dataoff, |
999 | &facility-> | 999 | &facility-> |
1000 | alternativeAddress); | 1000 | alternativeAddress); |
1001 | return 0; | 1001 | return 0; |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | if (facility->options & eFacility_UUIE_h245Address) { | 1004 | if (facility->options & eFacility_UUIE_h245Address) { |
1005 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, | 1005 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, |
1006 | &facility->h245Address); | 1006 | &facility->h245Address); |
1007 | if (ret < 0) | 1007 | if (ret < 0) |
1008 | return -1; | 1008 | return -1; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | if (facility->options & eFacility_UUIE_fastStart) { | 1011 | if (facility->options & eFacility_UUIE_fastStart) { |
1012 | for (i = 0; i < facility->fastStart.count; i++) { | 1012 | for (i = 0; i < facility->fastStart.count; i++) { |
1013 | ret = process_olc(skb, ct, ctinfo, data, dataoff, | 1013 | ret = process_olc(skb, ct, ctinfo, data, dataoff, |
1014 | &facility->fastStart.item[i]); | 1014 | &facility->fastStart.item[i]); |
1015 | if (ret < 0) | 1015 | if (ret < 0) |
1016 | return -1; | 1016 | return -1; |
1017 | } | 1017 | } |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | return 0; | 1020 | return 0; |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | /****************************************************************************/ | 1023 | /****************************************************************************/ |
1024 | static int process_progress(struct sk_buff *skb, struct nf_conn *ct, | 1024 | static int process_progress(struct sk_buff *skb, struct nf_conn *ct, |
1025 | enum ip_conntrack_info ctinfo, | 1025 | enum ip_conntrack_info ctinfo, |
1026 | unsigned char **data, int dataoff, | 1026 | unsigned char **data, int dataoff, |
1027 | Progress_UUIE *progress) | 1027 | Progress_UUIE *progress) |
1028 | { | 1028 | { |
1029 | int ret; | 1029 | int ret; |
1030 | int i; | 1030 | int i; |
1031 | 1031 | ||
1032 | pr_debug("nf_ct_q931: Progress\n"); | 1032 | pr_debug("nf_ct_q931: Progress\n"); |
1033 | 1033 | ||
1034 | if (progress->options & eProgress_UUIE_h245Address) { | 1034 | if (progress->options & eProgress_UUIE_h245Address) { |
1035 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, | 1035 | ret = expect_h245(skb, ct, ctinfo, data, dataoff, |
1036 | &progress->h245Address); | 1036 | &progress->h245Address); |
1037 | if (ret < 0) | 1037 | if (ret < 0) |
1038 | return -1; | 1038 | return -1; |
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | if (progress->options & eProgress_UUIE_fastStart) { | 1041 | if (progress->options & eProgress_UUIE_fastStart) { |
1042 | for (i = 0; i < progress->fastStart.count; i++) { | 1042 | for (i = 0; i < progress->fastStart.count; i++) { |
1043 | ret = process_olc(skb, ct, ctinfo, data, dataoff, | 1043 | ret = process_olc(skb, ct, ctinfo, data, dataoff, |
1044 | &progress->fastStart.item[i]); | 1044 | &progress->fastStart.item[i]); |
1045 | if (ret < 0) | 1045 | if (ret < 0) |
1046 | return -1; | 1046 | return -1; |
1047 | } | 1047 | } |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | return 0; | 1050 | return 0; |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | /****************************************************************************/ | 1053 | /****************************************************************************/ |
1054 | static int process_q931(struct sk_buff *skb, struct nf_conn *ct, | 1054 | static int process_q931(struct sk_buff *skb, struct nf_conn *ct, |
1055 | enum ip_conntrack_info ctinfo, | 1055 | enum ip_conntrack_info ctinfo, |
1056 | unsigned char **data, int dataoff, Q931 *q931) | 1056 | unsigned char **data, int dataoff, Q931 *q931) |
1057 | { | 1057 | { |
1058 | H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu; | 1058 | H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu; |
1059 | int i; | 1059 | int i; |
1060 | int ret = 0; | 1060 | int ret = 0; |
1061 | 1061 | ||
1062 | switch (pdu->h323_message_body.choice) { | 1062 | switch (pdu->h323_message_body.choice) { |
1063 | case eH323_UU_PDU_h323_message_body_setup: | 1063 | case eH323_UU_PDU_h323_message_body_setup: |
1064 | ret = process_setup(skb, ct, ctinfo, data, dataoff, | 1064 | ret = process_setup(skb, ct, ctinfo, data, dataoff, |
1065 | &pdu->h323_message_body.setup); | 1065 | &pdu->h323_message_body.setup); |
1066 | break; | 1066 | break; |
1067 | case eH323_UU_PDU_h323_message_body_callProceeding: | 1067 | case eH323_UU_PDU_h323_message_body_callProceeding: |
1068 | ret = process_callproceeding(skb, ct, ctinfo, data, dataoff, | 1068 | ret = process_callproceeding(skb, ct, ctinfo, data, dataoff, |
1069 | &pdu->h323_message_body. | 1069 | &pdu->h323_message_body. |
1070 | callProceeding); | 1070 | callProceeding); |
1071 | break; | 1071 | break; |
1072 | case eH323_UU_PDU_h323_message_body_connect: | 1072 | case eH323_UU_PDU_h323_message_body_connect: |
1073 | ret = process_connect(skb, ct, ctinfo, data, dataoff, | 1073 | ret = process_connect(skb, ct, ctinfo, data, dataoff, |
1074 | &pdu->h323_message_body.connect); | 1074 | &pdu->h323_message_body.connect); |
1075 | break; | 1075 | break; |
1076 | case eH323_UU_PDU_h323_message_body_alerting: | 1076 | case eH323_UU_PDU_h323_message_body_alerting: |
1077 | ret = process_alerting(skb, ct, ctinfo, data, dataoff, | 1077 | ret = process_alerting(skb, ct, ctinfo, data, dataoff, |
1078 | &pdu->h323_message_body.alerting); | 1078 | &pdu->h323_message_body.alerting); |
1079 | break; | 1079 | break; |
1080 | case eH323_UU_PDU_h323_message_body_facility: | 1080 | case eH323_UU_PDU_h323_message_body_facility: |
1081 | ret = process_facility(skb, ct, ctinfo, data, dataoff, | 1081 | ret = process_facility(skb, ct, ctinfo, data, dataoff, |
1082 | &pdu->h323_message_body.facility); | 1082 | &pdu->h323_message_body.facility); |
1083 | break; | 1083 | break; |
1084 | case eH323_UU_PDU_h323_message_body_progress: | 1084 | case eH323_UU_PDU_h323_message_body_progress: |
1085 | ret = process_progress(skb, ct, ctinfo, data, dataoff, | 1085 | ret = process_progress(skb, ct, ctinfo, data, dataoff, |
1086 | &pdu->h323_message_body.progress); | 1086 | &pdu->h323_message_body.progress); |
1087 | break; | 1087 | break; |
1088 | default: | 1088 | default: |
1089 | pr_debug("nf_ct_q931: Q.931 signal %d\n", | 1089 | pr_debug("nf_ct_q931: Q.931 signal %d\n", |
1090 | pdu->h323_message_body.choice); | 1090 | pdu->h323_message_body.choice); |
1091 | break; | 1091 | break; |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | if (ret < 0) | 1094 | if (ret < 0) |
1095 | return -1; | 1095 | return -1; |
1096 | 1096 | ||
1097 | if (pdu->options & eH323_UU_PDU_h245Control) { | 1097 | if (pdu->options & eH323_UU_PDU_h245Control) { |
1098 | for (i = 0; i < pdu->h245Control.count; i++) { | 1098 | for (i = 0; i < pdu->h245Control.count; i++) { |
1099 | ret = process_h245(skb, ct, ctinfo, data, dataoff, | 1099 | ret = process_h245(skb, ct, ctinfo, data, dataoff, |
1100 | &pdu->h245Control.item[i]); | 1100 | &pdu->h245Control.item[i]); |
1101 | if (ret < 0) | 1101 | if (ret < 0) |
1102 | return -1; | 1102 | return -1; |
1103 | } | 1103 | } |
1104 | } | 1104 | } |
1105 | 1105 | ||
1106 | return 0; | 1106 | return 0; |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | /****************************************************************************/ | 1109 | /****************************************************************************/ |
1110 | static int q931_help(struct sk_buff *skb, unsigned int protoff, | 1110 | static int q931_help(struct sk_buff *skb, unsigned int protoff, |
1111 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | 1111 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) |
1112 | { | 1112 | { |
1113 | static Q931 q931; | 1113 | static Q931 q931; |
1114 | unsigned char *data = NULL; | 1114 | unsigned char *data = NULL; |
1115 | int datalen; | 1115 | int datalen; |
1116 | int dataoff; | 1116 | int dataoff; |
1117 | int ret; | 1117 | int ret; |
1118 | 1118 | ||
1119 | /* Until there's been traffic both ways, don't look in packets. */ | 1119 | /* Until there's been traffic both ways, don't look in packets. */ |
1120 | if (ctinfo != IP_CT_ESTABLISHED && | 1120 | if (ctinfo != IP_CT_ESTABLISHED && |
1121 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { | 1121 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { |
1122 | return NF_ACCEPT; | 1122 | return NF_ACCEPT; |
1123 | } | 1123 | } |
1124 | pr_debug("nf_ct_q931: skblen = %u\n", skb->len); | 1124 | pr_debug("nf_ct_q931: skblen = %u\n", skb->len); |
1125 | 1125 | ||
1126 | spin_lock_bh(&nf_h323_lock); | 1126 | spin_lock_bh(&nf_h323_lock); |
1127 | 1127 | ||
1128 | /* Process each TPKT */ | 1128 | /* Process each TPKT */ |
1129 | while (get_tpkt_data(skb, protoff, ct, ctinfo, | 1129 | while (get_tpkt_data(skb, protoff, ct, ctinfo, |
1130 | &data, &datalen, &dataoff)) { | 1130 | &data, &datalen, &dataoff)) { |
1131 | pr_debug("nf_ct_q931: TPKT len=%d ", datalen); | 1131 | pr_debug("nf_ct_q931: TPKT len=%d ", datalen); |
1132 | nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); | 1132 | nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); |
1133 | 1133 | ||
1134 | /* Decode Q.931 signal */ | 1134 | /* Decode Q.931 signal */ |
1135 | ret = DecodeQ931(data, datalen, &q931); | 1135 | ret = DecodeQ931(data, datalen, &q931); |
1136 | if (ret < 0) { | 1136 | if (ret < 0) { |
1137 | pr_debug("nf_ct_q931: decoding error: %s\n", | 1137 | pr_debug("nf_ct_q931: decoding error: %s\n", |
1138 | ret == H323_ERROR_BOUND ? | 1138 | ret == H323_ERROR_BOUND ? |
1139 | "out of bound" : "out of range"); | 1139 | "out of bound" : "out of range"); |
1140 | /* We don't drop when decoding error */ | 1140 | /* We don't drop when decoding error */ |
1141 | break; | 1141 | break; |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | /* Process Q.931 signal */ | 1144 | /* Process Q.931 signal */ |
1145 | if (process_q931(skb, ct, ctinfo, &data, dataoff, &q931) < 0) | 1145 | if (process_q931(skb, ct, ctinfo, &data, dataoff, &q931) < 0) |
1146 | goto drop; | 1146 | goto drop; |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | spin_unlock_bh(&nf_h323_lock); | 1149 | spin_unlock_bh(&nf_h323_lock); |
1150 | return NF_ACCEPT; | 1150 | return NF_ACCEPT; |
1151 | 1151 | ||
1152 | drop: | 1152 | drop: |
1153 | spin_unlock_bh(&nf_h323_lock); | 1153 | spin_unlock_bh(&nf_h323_lock); |
1154 | if (net_ratelimit()) | 1154 | if (net_ratelimit()) |
1155 | printk("nf_ct_q931: packet dropped\n"); | 1155 | pr_info("nf_ct_q931: packet dropped\n"); |
1156 | return NF_DROP; | 1156 | return NF_DROP; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | /****************************************************************************/ | 1159 | /****************************************************************************/ |
1160 | static const struct nf_conntrack_expect_policy q931_exp_policy = { | 1160 | static const struct nf_conntrack_expect_policy q931_exp_policy = { |
1161 | /* T.120 and H.245 */ | 1161 | /* T.120 and H.245 */ |
1162 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4, | 1162 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4, |
1163 | .timeout = 240, | 1163 | .timeout = 240, |
1164 | }; | 1164 | }; |
1165 | 1165 | ||
1166 | static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { | 1166 | static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { |
1167 | { | 1167 | { |
1168 | .name = "Q.931", | 1168 | .name = "Q.931", |
1169 | .me = THIS_MODULE, | 1169 | .me = THIS_MODULE, |
1170 | .tuple.src.l3num = AF_INET, | 1170 | .tuple.src.l3num = AF_INET, |
1171 | .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), | 1171 | .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), |
1172 | .tuple.dst.protonum = IPPROTO_TCP, | 1172 | .tuple.dst.protonum = IPPROTO_TCP, |
1173 | .help = q931_help, | 1173 | .help = q931_help, |
1174 | .expect_policy = &q931_exp_policy, | 1174 | .expect_policy = &q931_exp_policy, |
1175 | }, | 1175 | }, |
1176 | { | 1176 | { |
1177 | .name = "Q.931", | 1177 | .name = "Q.931", |
1178 | .me = THIS_MODULE, | 1178 | .me = THIS_MODULE, |
1179 | .tuple.src.l3num = AF_INET6, | 1179 | .tuple.src.l3num = AF_INET6, |
1180 | .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), | 1180 | .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), |
1181 | .tuple.dst.protonum = IPPROTO_TCP, | 1181 | .tuple.dst.protonum = IPPROTO_TCP, |
1182 | .help = q931_help, | 1182 | .help = q931_help, |
1183 | .expect_policy = &q931_exp_policy, | 1183 | .expect_policy = &q931_exp_policy, |
1184 | }, | 1184 | }, |
1185 | }; | 1185 | }; |
1186 | 1186 | ||
1187 | /****************************************************************************/ | 1187 | /****************************************************************************/ |
1188 | static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff, | 1188 | static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff, |
1189 | int *datalen) | 1189 | int *datalen) |
1190 | { | 1190 | { |
1191 | const struct udphdr *uh; | 1191 | const struct udphdr *uh; |
1192 | struct udphdr _uh; | 1192 | struct udphdr _uh; |
1193 | int dataoff; | 1193 | int dataoff; |
1194 | 1194 | ||
1195 | uh = skb_header_pointer(skb, protoff, sizeof(_uh), &_uh); | 1195 | uh = skb_header_pointer(skb, protoff, sizeof(_uh), &_uh); |
1196 | if (uh == NULL) | 1196 | if (uh == NULL) |
1197 | return NULL; | 1197 | return NULL; |
1198 | dataoff = protoff + sizeof(_uh); | 1198 | dataoff = protoff + sizeof(_uh); |
1199 | if (dataoff >= skb->len) | 1199 | if (dataoff >= skb->len) |
1200 | return NULL; | 1200 | return NULL; |
1201 | *datalen = skb->len - dataoff; | 1201 | *datalen = skb->len - dataoff; |
1202 | return skb_header_pointer(skb, dataoff, *datalen, h323_buffer); | 1202 | return skb_header_pointer(skb, dataoff, *datalen, h323_buffer); |
1203 | } | 1203 | } |
1204 | 1204 | ||
1205 | /****************************************************************************/ | 1205 | /****************************************************************************/ |
1206 | static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, | 1206 | static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, |
1207 | union nf_inet_addr *addr, | 1207 | union nf_inet_addr *addr, |
1208 | __be16 port) | 1208 | __be16 port) |
1209 | { | 1209 | { |
1210 | struct net *net = nf_ct_net(ct); | 1210 | struct net *net = nf_ct_net(ct); |
1211 | struct nf_conntrack_expect *exp; | 1211 | struct nf_conntrack_expect *exp; |
1212 | struct nf_conntrack_tuple tuple; | 1212 | struct nf_conntrack_tuple tuple; |
1213 | 1213 | ||
1214 | memset(&tuple.src.u3, 0, sizeof(tuple.src.u3)); | 1214 | memset(&tuple.src.u3, 0, sizeof(tuple.src.u3)); |
1215 | tuple.src.u.tcp.port = 0; | 1215 | tuple.src.u.tcp.port = 0; |
1216 | memcpy(&tuple.dst.u3, addr, sizeof(tuple.dst.u3)); | 1216 | memcpy(&tuple.dst.u3, addr, sizeof(tuple.dst.u3)); |
1217 | tuple.dst.u.tcp.port = port; | 1217 | tuple.dst.u.tcp.port = port; |
1218 | tuple.dst.protonum = IPPROTO_TCP; | 1218 | tuple.dst.protonum = IPPROTO_TCP; |
1219 | 1219 | ||
1220 | exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); | 1220 | exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); |
1221 | if (exp && exp->master == ct) | 1221 | if (exp && exp->master == ct) |
1222 | return exp; | 1222 | return exp; |
1223 | return NULL; | 1223 | return NULL; |
1224 | } | 1224 | } |
1225 | 1225 | ||
1226 | /****************************************************************************/ | 1226 | /****************************************************************************/ |
1227 | static int set_expect_timeout(struct nf_conntrack_expect *exp, | 1227 | static int set_expect_timeout(struct nf_conntrack_expect *exp, |
1228 | unsigned timeout) | 1228 | unsigned timeout) |
1229 | { | 1229 | { |
1230 | if (!exp || !del_timer(&exp->timeout)) | 1230 | if (!exp || !del_timer(&exp->timeout)) |
1231 | return 0; | 1231 | return 0; |
1232 | 1232 | ||
1233 | exp->timeout.expires = jiffies + timeout * HZ; | 1233 | exp->timeout.expires = jiffies + timeout * HZ; |
1234 | add_timer(&exp->timeout); | 1234 | add_timer(&exp->timeout); |
1235 | 1235 | ||
1236 | return 1; | 1236 | return 1; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | /****************************************************************************/ | 1239 | /****************************************************************************/ |
1240 | static int expect_q931(struct sk_buff *skb, struct nf_conn *ct, | 1240 | static int expect_q931(struct sk_buff *skb, struct nf_conn *ct, |
1241 | enum ip_conntrack_info ctinfo, | 1241 | enum ip_conntrack_info ctinfo, |
1242 | unsigned char **data, | 1242 | unsigned char **data, |
1243 | TransportAddress *taddr, int count) | 1243 | TransportAddress *taddr, int count) |
1244 | { | 1244 | { |
1245 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 1245 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
1246 | int dir = CTINFO2DIR(ctinfo); | 1246 | int dir = CTINFO2DIR(ctinfo); |
1247 | int ret = 0; | 1247 | int ret = 0; |
1248 | int i; | 1248 | int i; |
1249 | __be16 port; | 1249 | __be16 port; |
1250 | union nf_inet_addr addr; | 1250 | union nf_inet_addr addr; |
1251 | struct nf_conntrack_expect *exp; | 1251 | struct nf_conntrack_expect *exp; |
1252 | typeof(nat_q931_hook) nat_q931; | 1252 | typeof(nat_q931_hook) nat_q931; |
1253 | 1253 | ||
1254 | /* Look for the first related address */ | 1254 | /* Look for the first related address */ |
1255 | for (i = 0; i < count; i++) { | 1255 | for (i = 0; i < count; i++) { |
1256 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && | 1256 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && |
1257 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, | 1257 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, |
1258 | sizeof(addr)) == 0 && port != 0) | 1258 | sizeof(addr)) == 0 && port != 0) |
1259 | break; | 1259 | break; |
1260 | } | 1260 | } |
1261 | 1261 | ||
1262 | if (i >= count) /* Not found */ | 1262 | if (i >= count) /* Not found */ |
1263 | return 0; | 1263 | return 0; |
1264 | 1264 | ||
1265 | /* Create expect for Q.931 */ | 1265 | /* Create expect for Q.931 */ |
1266 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) | 1266 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) |
1267 | return -1; | 1267 | return -1; |
1268 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 1268 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
1269 | gkrouted_only ? /* only accept calls from GK? */ | 1269 | gkrouted_only ? /* only accept calls from GK? */ |
1270 | &ct->tuplehash[!dir].tuple.src.u3 : NULL, | 1270 | &ct->tuplehash[!dir].tuple.src.u3 : NULL, |
1271 | &ct->tuplehash[!dir].tuple.dst.u3, | 1271 | &ct->tuplehash[!dir].tuple.dst.u3, |
1272 | IPPROTO_TCP, NULL, &port); | 1272 | IPPROTO_TCP, NULL, &port); |
1273 | exp->helper = nf_conntrack_helper_q931; | 1273 | exp->helper = nf_conntrack_helper_q931; |
1274 | exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */ | 1274 | exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */ |
1275 | 1275 | ||
1276 | nat_q931 = rcu_dereference(nat_q931_hook); | 1276 | nat_q931 = rcu_dereference(nat_q931_hook); |
1277 | if (nat_q931 && ct->status & IPS_NAT_MASK) { /* Need NAT */ | 1277 | if (nat_q931 && ct->status & IPS_NAT_MASK) { /* Need NAT */ |
1278 | ret = nat_q931(skb, ct, ctinfo, data, taddr, i, port, exp); | 1278 | ret = nat_q931(skb, ct, ctinfo, data, taddr, i, port, exp); |
1279 | } else { /* Conntrack only */ | 1279 | } else { /* Conntrack only */ |
1280 | if (nf_ct_expect_related(exp) == 0) { | 1280 | if (nf_ct_expect_related(exp) == 0) { |
1281 | pr_debug("nf_ct_ras: expect Q.931 "); | 1281 | pr_debug("nf_ct_ras: expect Q.931 "); |
1282 | nf_ct_dump_tuple(&exp->tuple); | 1282 | nf_ct_dump_tuple(&exp->tuple); |
1283 | 1283 | ||
1284 | /* Save port for looking up expect in processing RCF */ | 1284 | /* Save port for looking up expect in processing RCF */ |
1285 | info->sig_port[dir] = port; | 1285 | info->sig_port[dir] = port; |
1286 | } else | 1286 | } else |
1287 | ret = -1; | 1287 | ret = -1; |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | nf_ct_expect_put(exp); | 1290 | nf_ct_expect_put(exp); |
1291 | 1291 | ||
1292 | return ret; | 1292 | return ret; |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | /****************************************************************************/ | 1295 | /****************************************************************************/ |
1296 | static int process_grq(struct sk_buff *skb, struct nf_conn *ct, | 1296 | static int process_grq(struct sk_buff *skb, struct nf_conn *ct, |
1297 | enum ip_conntrack_info ctinfo, | 1297 | enum ip_conntrack_info ctinfo, |
1298 | unsigned char **data, GatekeeperRequest *grq) | 1298 | unsigned char **data, GatekeeperRequest *grq) |
1299 | { | 1299 | { |
1300 | typeof(set_ras_addr_hook) set_ras_addr; | 1300 | typeof(set_ras_addr_hook) set_ras_addr; |
1301 | 1301 | ||
1302 | pr_debug("nf_ct_ras: GRQ\n"); | 1302 | pr_debug("nf_ct_ras: GRQ\n"); |
1303 | 1303 | ||
1304 | set_ras_addr = rcu_dereference(set_ras_addr_hook); | 1304 | set_ras_addr = rcu_dereference(set_ras_addr_hook); |
1305 | if (set_ras_addr && ct->status & IPS_NAT_MASK) /* NATed */ | 1305 | if (set_ras_addr && ct->status & IPS_NAT_MASK) /* NATed */ |
1306 | return set_ras_addr(skb, ct, ctinfo, data, | 1306 | return set_ras_addr(skb, ct, ctinfo, data, |
1307 | &grq->rasAddress, 1); | 1307 | &grq->rasAddress, 1); |
1308 | return 0; | 1308 | return 0; |
1309 | } | 1309 | } |
1310 | 1310 | ||
1311 | /****************************************************************************/ | 1311 | /****************************************************************************/ |
1312 | static int process_gcf(struct sk_buff *skb, struct nf_conn *ct, | 1312 | static int process_gcf(struct sk_buff *skb, struct nf_conn *ct, |
1313 | enum ip_conntrack_info ctinfo, | 1313 | enum ip_conntrack_info ctinfo, |
1314 | unsigned char **data, GatekeeperConfirm *gcf) | 1314 | unsigned char **data, GatekeeperConfirm *gcf) |
1315 | { | 1315 | { |
1316 | int dir = CTINFO2DIR(ctinfo); | 1316 | int dir = CTINFO2DIR(ctinfo); |
1317 | int ret = 0; | 1317 | int ret = 0; |
1318 | __be16 port; | 1318 | __be16 port; |
1319 | union nf_inet_addr addr; | 1319 | union nf_inet_addr addr; |
1320 | struct nf_conntrack_expect *exp; | 1320 | struct nf_conntrack_expect *exp; |
1321 | 1321 | ||
1322 | pr_debug("nf_ct_ras: GCF\n"); | 1322 | pr_debug("nf_ct_ras: GCF\n"); |
1323 | 1323 | ||
1324 | if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port)) | 1324 | if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port)) |
1325 | return 0; | 1325 | return 0; |
1326 | 1326 | ||
1327 | /* Registration port is the same as discovery port */ | 1327 | /* Registration port is the same as discovery port */ |
1328 | if (!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && | 1328 | if (!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && |
1329 | port == ct->tuplehash[dir].tuple.src.u.udp.port) | 1329 | port == ct->tuplehash[dir].tuple.src.u.udp.port) |
1330 | return 0; | 1330 | return 0; |
1331 | 1331 | ||
1332 | /* Avoid RAS expectation loops. A GCF is never expected. */ | 1332 | /* Avoid RAS expectation loops. A GCF is never expected. */ |
1333 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) | 1333 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) |
1334 | return 0; | 1334 | return 0; |
1335 | 1335 | ||
1336 | /* Need new expect */ | 1336 | /* Need new expect */ |
1337 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) | 1337 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) |
1338 | return -1; | 1338 | return -1; |
1339 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 1339 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
1340 | &ct->tuplehash[!dir].tuple.src.u3, &addr, | 1340 | &ct->tuplehash[!dir].tuple.src.u3, &addr, |
1341 | IPPROTO_UDP, NULL, &port); | 1341 | IPPROTO_UDP, NULL, &port); |
1342 | exp->helper = nf_conntrack_helper_ras; | 1342 | exp->helper = nf_conntrack_helper_ras; |
1343 | 1343 | ||
1344 | if (nf_ct_expect_related(exp) == 0) { | 1344 | if (nf_ct_expect_related(exp) == 0) { |
1345 | pr_debug("nf_ct_ras: expect RAS "); | 1345 | pr_debug("nf_ct_ras: expect RAS "); |
1346 | nf_ct_dump_tuple(&exp->tuple); | 1346 | nf_ct_dump_tuple(&exp->tuple); |
1347 | } else | 1347 | } else |
1348 | ret = -1; | 1348 | ret = -1; |
1349 | 1349 | ||
1350 | nf_ct_expect_put(exp); | 1350 | nf_ct_expect_put(exp); |
1351 | 1351 | ||
1352 | return ret; | 1352 | return ret; |
1353 | } | 1353 | } |
1354 | 1354 | ||
1355 | /****************************************************************************/ | 1355 | /****************************************************************************/ |
1356 | static int process_rrq(struct sk_buff *skb, struct nf_conn *ct, | 1356 | static int process_rrq(struct sk_buff *skb, struct nf_conn *ct, |
1357 | enum ip_conntrack_info ctinfo, | 1357 | enum ip_conntrack_info ctinfo, |
1358 | unsigned char **data, RegistrationRequest *rrq) | 1358 | unsigned char **data, RegistrationRequest *rrq) |
1359 | { | 1359 | { |
1360 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 1360 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
1361 | int ret; | 1361 | int ret; |
1362 | typeof(set_ras_addr_hook) set_ras_addr; | 1362 | typeof(set_ras_addr_hook) set_ras_addr; |
1363 | 1363 | ||
1364 | pr_debug("nf_ct_ras: RRQ\n"); | 1364 | pr_debug("nf_ct_ras: RRQ\n"); |
1365 | 1365 | ||
1366 | ret = expect_q931(skb, ct, ctinfo, data, | 1366 | ret = expect_q931(skb, ct, ctinfo, data, |
1367 | rrq->callSignalAddress.item, | 1367 | rrq->callSignalAddress.item, |
1368 | rrq->callSignalAddress.count); | 1368 | rrq->callSignalAddress.count); |
1369 | if (ret < 0) | 1369 | if (ret < 0) |
1370 | return -1; | 1370 | return -1; |
1371 | 1371 | ||
1372 | set_ras_addr = rcu_dereference(set_ras_addr_hook); | 1372 | set_ras_addr = rcu_dereference(set_ras_addr_hook); |
1373 | if (set_ras_addr && ct->status & IPS_NAT_MASK) { | 1373 | if (set_ras_addr && ct->status & IPS_NAT_MASK) { |
1374 | ret = set_ras_addr(skb, ct, ctinfo, data, | 1374 | ret = set_ras_addr(skb, ct, ctinfo, data, |
1375 | rrq->rasAddress.item, | 1375 | rrq->rasAddress.item, |
1376 | rrq->rasAddress.count); | 1376 | rrq->rasAddress.count); |
1377 | if (ret < 0) | 1377 | if (ret < 0) |
1378 | return -1; | 1378 | return -1; |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | if (rrq->options & eRegistrationRequest_timeToLive) { | 1381 | if (rrq->options & eRegistrationRequest_timeToLive) { |
1382 | pr_debug("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive); | 1382 | pr_debug("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive); |
1383 | info->timeout = rrq->timeToLive; | 1383 | info->timeout = rrq->timeToLive; |
1384 | } else | 1384 | } else |
1385 | info->timeout = default_rrq_ttl; | 1385 | info->timeout = default_rrq_ttl; |
1386 | 1386 | ||
1387 | return 0; | 1387 | return 0; |
1388 | } | 1388 | } |
1389 | 1389 | ||
1390 | /****************************************************************************/ | 1390 | /****************************************************************************/ |
1391 | static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, | 1391 | static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, |
1392 | enum ip_conntrack_info ctinfo, | 1392 | enum ip_conntrack_info ctinfo, |
1393 | unsigned char **data, RegistrationConfirm *rcf) | 1393 | unsigned char **data, RegistrationConfirm *rcf) |
1394 | { | 1394 | { |
1395 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 1395 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
1396 | int dir = CTINFO2DIR(ctinfo); | 1396 | int dir = CTINFO2DIR(ctinfo); |
1397 | int ret; | 1397 | int ret; |
1398 | struct nf_conntrack_expect *exp; | 1398 | struct nf_conntrack_expect *exp; |
1399 | typeof(set_sig_addr_hook) set_sig_addr; | 1399 | typeof(set_sig_addr_hook) set_sig_addr; |
1400 | 1400 | ||
1401 | pr_debug("nf_ct_ras: RCF\n"); | 1401 | pr_debug("nf_ct_ras: RCF\n"); |
1402 | 1402 | ||
1403 | set_sig_addr = rcu_dereference(set_sig_addr_hook); | 1403 | set_sig_addr = rcu_dereference(set_sig_addr_hook); |
1404 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { | 1404 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { |
1405 | ret = set_sig_addr(skb, ct, ctinfo, data, | 1405 | ret = set_sig_addr(skb, ct, ctinfo, data, |
1406 | rcf->callSignalAddress.item, | 1406 | rcf->callSignalAddress.item, |
1407 | rcf->callSignalAddress.count); | 1407 | rcf->callSignalAddress.count); |
1408 | if (ret < 0) | 1408 | if (ret < 0) |
1409 | return -1; | 1409 | return -1; |
1410 | } | 1410 | } |
1411 | 1411 | ||
1412 | if (rcf->options & eRegistrationConfirm_timeToLive) { | 1412 | if (rcf->options & eRegistrationConfirm_timeToLive) { |
1413 | pr_debug("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive); | 1413 | pr_debug("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive); |
1414 | info->timeout = rcf->timeToLive; | 1414 | info->timeout = rcf->timeToLive; |
1415 | } | 1415 | } |
1416 | 1416 | ||
1417 | if (info->timeout > 0) { | 1417 | if (info->timeout > 0) { |
1418 | pr_debug("nf_ct_ras: set RAS connection timeout to " | 1418 | pr_debug("nf_ct_ras: set RAS connection timeout to " |
1419 | "%u seconds\n", info->timeout); | 1419 | "%u seconds\n", info->timeout); |
1420 | nf_ct_refresh(ct, skb, info->timeout * HZ); | 1420 | nf_ct_refresh(ct, skb, info->timeout * HZ); |
1421 | 1421 | ||
1422 | /* Set expect timeout */ | 1422 | /* Set expect timeout */ |
1423 | spin_lock_bh(&nf_conntrack_lock); | 1423 | spin_lock_bh(&nf_conntrack_lock); |
1424 | exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, | 1424 | exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, |
1425 | info->sig_port[!dir]); | 1425 | info->sig_port[!dir]); |
1426 | if (exp) { | 1426 | if (exp) { |
1427 | pr_debug("nf_ct_ras: set Q.931 expect " | 1427 | pr_debug("nf_ct_ras: set Q.931 expect " |
1428 | "timeout to %u seconds for", | 1428 | "timeout to %u seconds for", |
1429 | info->timeout); | 1429 | info->timeout); |
1430 | nf_ct_dump_tuple(&exp->tuple); | 1430 | nf_ct_dump_tuple(&exp->tuple); |
1431 | set_expect_timeout(exp, info->timeout); | 1431 | set_expect_timeout(exp, info->timeout); |
1432 | } | 1432 | } |
1433 | spin_unlock_bh(&nf_conntrack_lock); | 1433 | spin_unlock_bh(&nf_conntrack_lock); |
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | return 0; | 1436 | return 0; |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | /****************************************************************************/ | 1439 | /****************************************************************************/ |
1440 | static int process_urq(struct sk_buff *skb, struct nf_conn *ct, | 1440 | static int process_urq(struct sk_buff *skb, struct nf_conn *ct, |
1441 | enum ip_conntrack_info ctinfo, | 1441 | enum ip_conntrack_info ctinfo, |
1442 | unsigned char **data, UnregistrationRequest *urq) | 1442 | unsigned char **data, UnregistrationRequest *urq) |
1443 | { | 1443 | { |
1444 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 1444 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
1445 | int dir = CTINFO2DIR(ctinfo); | 1445 | int dir = CTINFO2DIR(ctinfo); |
1446 | int ret; | 1446 | int ret; |
1447 | typeof(set_sig_addr_hook) set_sig_addr; | 1447 | typeof(set_sig_addr_hook) set_sig_addr; |
1448 | 1448 | ||
1449 | pr_debug("nf_ct_ras: URQ\n"); | 1449 | pr_debug("nf_ct_ras: URQ\n"); |
1450 | 1450 | ||
1451 | set_sig_addr = rcu_dereference(set_sig_addr_hook); | 1451 | set_sig_addr = rcu_dereference(set_sig_addr_hook); |
1452 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { | 1452 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { |
1453 | ret = set_sig_addr(skb, ct, ctinfo, data, | 1453 | ret = set_sig_addr(skb, ct, ctinfo, data, |
1454 | urq->callSignalAddress.item, | 1454 | urq->callSignalAddress.item, |
1455 | urq->callSignalAddress.count); | 1455 | urq->callSignalAddress.count); |
1456 | if (ret < 0) | 1456 | if (ret < 0) |
1457 | return -1; | 1457 | return -1; |
1458 | } | 1458 | } |
1459 | 1459 | ||
1460 | /* Clear old expect */ | 1460 | /* Clear old expect */ |
1461 | nf_ct_remove_expectations(ct); | 1461 | nf_ct_remove_expectations(ct); |
1462 | info->sig_port[dir] = 0; | 1462 | info->sig_port[dir] = 0; |
1463 | info->sig_port[!dir] = 0; | 1463 | info->sig_port[!dir] = 0; |
1464 | 1464 | ||
1465 | /* Give it 30 seconds for UCF or URJ */ | 1465 | /* Give it 30 seconds for UCF or URJ */ |
1466 | nf_ct_refresh(ct, skb, 30 * HZ); | 1466 | nf_ct_refresh(ct, skb, 30 * HZ); |
1467 | 1467 | ||
1468 | return 0; | 1468 | return 0; |
1469 | } | 1469 | } |
1470 | 1470 | ||
1471 | /****************************************************************************/ | 1471 | /****************************************************************************/ |
1472 | static int process_arq(struct sk_buff *skb, struct nf_conn *ct, | 1472 | static int process_arq(struct sk_buff *skb, struct nf_conn *ct, |
1473 | enum ip_conntrack_info ctinfo, | 1473 | enum ip_conntrack_info ctinfo, |
1474 | unsigned char **data, AdmissionRequest *arq) | 1474 | unsigned char **data, AdmissionRequest *arq) |
1475 | { | 1475 | { |
1476 | const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | 1476 | const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; |
1477 | int dir = CTINFO2DIR(ctinfo); | 1477 | int dir = CTINFO2DIR(ctinfo); |
1478 | __be16 port; | 1478 | __be16 port; |
1479 | union nf_inet_addr addr; | 1479 | union nf_inet_addr addr; |
1480 | typeof(set_h225_addr_hook) set_h225_addr; | 1480 | typeof(set_h225_addr_hook) set_h225_addr; |
1481 | 1481 | ||
1482 | pr_debug("nf_ct_ras: ARQ\n"); | 1482 | pr_debug("nf_ct_ras: ARQ\n"); |
1483 | 1483 | ||
1484 | set_h225_addr = rcu_dereference(set_h225_addr_hook); | 1484 | set_h225_addr = rcu_dereference(set_h225_addr_hook); |
1485 | if ((arq->options & eAdmissionRequest_destCallSignalAddress) && | 1485 | if ((arq->options & eAdmissionRequest_destCallSignalAddress) && |
1486 | get_h225_addr(ct, *data, &arq->destCallSignalAddress, | 1486 | get_h225_addr(ct, *data, &arq->destCallSignalAddress, |
1487 | &addr, &port) && | 1487 | &addr, &port) && |
1488 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && | 1488 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && |
1489 | port == info->sig_port[dir] && | 1489 | port == info->sig_port[dir] && |
1490 | set_h225_addr && ct->status & IPS_NAT_MASK) { | 1490 | set_h225_addr && ct->status & IPS_NAT_MASK) { |
1491 | /* Answering ARQ */ | 1491 | /* Answering ARQ */ |
1492 | return set_h225_addr(skb, data, 0, | 1492 | return set_h225_addr(skb, data, 0, |
1493 | &arq->destCallSignalAddress, | 1493 | &arq->destCallSignalAddress, |
1494 | &ct->tuplehash[!dir].tuple.dst.u3, | 1494 | &ct->tuplehash[!dir].tuple.dst.u3, |
1495 | info->sig_port[!dir]); | 1495 | info->sig_port[!dir]); |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && | 1498 | if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && |
1499 | get_h225_addr(ct, *data, &arq->srcCallSignalAddress, | 1499 | get_h225_addr(ct, *data, &arq->srcCallSignalAddress, |
1500 | &addr, &port) && | 1500 | &addr, &port) && |
1501 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && | 1501 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && |
1502 | set_h225_addr && ct->status & IPS_NAT_MASK) { | 1502 | set_h225_addr && ct->status & IPS_NAT_MASK) { |
1503 | /* Calling ARQ */ | 1503 | /* Calling ARQ */ |
1504 | return set_h225_addr(skb, data, 0, | 1504 | return set_h225_addr(skb, data, 0, |
1505 | &arq->srcCallSignalAddress, | 1505 | &arq->srcCallSignalAddress, |
1506 | &ct->tuplehash[!dir].tuple.dst.u3, | 1506 | &ct->tuplehash[!dir].tuple.dst.u3, |
1507 | port); | 1507 | port); |
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | return 0; | 1510 | return 0; |
1511 | } | 1511 | } |
1512 | 1512 | ||
1513 | /****************************************************************************/ | 1513 | /****************************************************************************/ |
1514 | static int process_acf(struct sk_buff *skb, struct nf_conn *ct, | 1514 | static int process_acf(struct sk_buff *skb, struct nf_conn *ct, |
1515 | enum ip_conntrack_info ctinfo, | 1515 | enum ip_conntrack_info ctinfo, |
1516 | unsigned char **data, AdmissionConfirm *acf) | 1516 | unsigned char **data, AdmissionConfirm *acf) |
1517 | { | 1517 | { |
1518 | int dir = CTINFO2DIR(ctinfo); | 1518 | int dir = CTINFO2DIR(ctinfo); |
1519 | int ret = 0; | 1519 | int ret = 0; |
1520 | __be16 port; | 1520 | __be16 port; |
1521 | union nf_inet_addr addr; | 1521 | union nf_inet_addr addr; |
1522 | struct nf_conntrack_expect *exp; | 1522 | struct nf_conntrack_expect *exp; |
1523 | typeof(set_sig_addr_hook) set_sig_addr; | 1523 | typeof(set_sig_addr_hook) set_sig_addr; |
1524 | 1524 | ||
1525 | pr_debug("nf_ct_ras: ACF\n"); | 1525 | pr_debug("nf_ct_ras: ACF\n"); |
1526 | 1526 | ||
1527 | if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress, | 1527 | if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress, |
1528 | &addr, &port)) | 1528 | &addr, &port)) |
1529 | return 0; | 1529 | return 0; |
1530 | 1530 | ||
1531 | if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) { | 1531 | if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) { |
1532 | /* Answering ACF */ | 1532 | /* Answering ACF */ |
1533 | set_sig_addr = rcu_dereference(set_sig_addr_hook); | 1533 | set_sig_addr = rcu_dereference(set_sig_addr_hook); |
1534 | if (set_sig_addr && ct->status & IPS_NAT_MASK) | 1534 | if (set_sig_addr && ct->status & IPS_NAT_MASK) |
1535 | return set_sig_addr(skb, ct, ctinfo, data, | 1535 | return set_sig_addr(skb, ct, ctinfo, data, |
1536 | &acf->destCallSignalAddress, 1); | 1536 | &acf->destCallSignalAddress, 1); |
1537 | return 0; | 1537 | return 0; |
1538 | } | 1538 | } |
1539 | 1539 | ||
1540 | /* Need new expect */ | 1540 | /* Need new expect */ |
1541 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) | 1541 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) |
1542 | return -1; | 1542 | return -1; |
1543 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 1543 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
1544 | &ct->tuplehash[!dir].tuple.src.u3, &addr, | 1544 | &ct->tuplehash[!dir].tuple.src.u3, &addr, |
1545 | IPPROTO_TCP, NULL, &port); | 1545 | IPPROTO_TCP, NULL, &port); |
1546 | exp->flags = NF_CT_EXPECT_PERMANENT; | 1546 | exp->flags = NF_CT_EXPECT_PERMANENT; |
1547 | exp->helper = nf_conntrack_helper_q931; | 1547 | exp->helper = nf_conntrack_helper_q931; |
1548 | 1548 | ||
1549 | if (nf_ct_expect_related(exp) == 0) { | 1549 | if (nf_ct_expect_related(exp) == 0) { |
1550 | pr_debug("nf_ct_ras: expect Q.931 "); | 1550 | pr_debug("nf_ct_ras: expect Q.931 "); |
1551 | nf_ct_dump_tuple(&exp->tuple); | 1551 | nf_ct_dump_tuple(&exp->tuple); |
1552 | } else | 1552 | } else |
1553 | ret = -1; | 1553 | ret = -1; |
1554 | 1554 | ||
1555 | nf_ct_expect_put(exp); | 1555 | nf_ct_expect_put(exp); |
1556 | 1556 | ||
1557 | return ret; | 1557 | return ret; |
1558 | } | 1558 | } |
1559 | 1559 | ||
1560 | /****************************************************************************/ | 1560 | /****************************************************************************/ |
1561 | static int process_lrq(struct sk_buff *skb, struct nf_conn *ct, | 1561 | static int process_lrq(struct sk_buff *skb, struct nf_conn *ct, |
1562 | enum ip_conntrack_info ctinfo, | 1562 | enum ip_conntrack_info ctinfo, |
1563 | unsigned char **data, LocationRequest *lrq) | 1563 | unsigned char **data, LocationRequest *lrq) |
1564 | { | 1564 | { |
1565 | typeof(set_ras_addr_hook) set_ras_addr; | 1565 | typeof(set_ras_addr_hook) set_ras_addr; |
1566 | 1566 | ||
1567 | pr_debug("nf_ct_ras: LRQ\n"); | 1567 | pr_debug("nf_ct_ras: LRQ\n"); |
1568 | 1568 | ||
1569 | set_ras_addr = rcu_dereference(set_ras_addr_hook); | 1569 | set_ras_addr = rcu_dereference(set_ras_addr_hook); |
1570 | if (set_ras_addr && ct->status & IPS_NAT_MASK) | 1570 | if (set_ras_addr && ct->status & IPS_NAT_MASK) |
1571 | return set_ras_addr(skb, ct, ctinfo, data, | 1571 | return set_ras_addr(skb, ct, ctinfo, data, |
1572 | &lrq->replyAddress, 1); | 1572 | &lrq->replyAddress, 1); |
1573 | return 0; | 1573 | return 0; |
1574 | } | 1574 | } |
1575 | 1575 | ||
1576 | /****************************************************************************/ | 1576 | /****************************************************************************/ |
1577 | static int process_lcf(struct sk_buff *skb, struct nf_conn *ct, | 1577 | static int process_lcf(struct sk_buff *skb, struct nf_conn *ct, |
1578 | enum ip_conntrack_info ctinfo, | 1578 | enum ip_conntrack_info ctinfo, |
1579 | unsigned char **data, LocationConfirm *lcf) | 1579 | unsigned char **data, LocationConfirm *lcf) |
1580 | { | 1580 | { |
1581 | int dir = CTINFO2DIR(ctinfo); | 1581 | int dir = CTINFO2DIR(ctinfo); |
1582 | int ret = 0; | 1582 | int ret = 0; |
1583 | __be16 port; | 1583 | __be16 port; |
1584 | union nf_inet_addr addr; | 1584 | union nf_inet_addr addr; |
1585 | struct nf_conntrack_expect *exp; | 1585 | struct nf_conntrack_expect *exp; |
1586 | 1586 | ||
1587 | pr_debug("nf_ct_ras: LCF\n"); | 1587 | pr_debug("nf_ct_ras: LCF\n"); |
1588 | 1588 | ||
1589 | if (!get_h225_addr(ct, *data, &lcf->callSignalAddress, | 1589 | if (!get_h225_addr(ct, *data, &lcf->callSignalAddress, |
1590 | &addr, &port)) | 1590 | &addr, &port)) |
1591 | return 0; | 1591 | return 0; |
1592 | 1592 | ||
1593 | /* Need new expect for call signal */ | 1593 | /* Need new expect for call signal */ |
1594 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) | 1594 | if ((exp = nf_ct_expect_alloc(ct)) == NULL) |
1595 | return -1; | 1595 | return -1; |
1596 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), | 1596 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), |
1597 | &ct->tuplehash[!dir].tuple.src.u3, &addr, | 1597 | &ct->tuplehash[!dir].tuple.src.u3, &addr, |
1598 | IPPROTO_TCP, NULL, &port); | 1598 | IPPROTO_TCP, NULL, &port); |
1599 | exp->flags = NF_CT_EXPECT_PERMANENT; | 1599 | exp->flags = NF_CT_EXPECT_PERMANENT; |
1600 | exp->helper = nf_conntrack_helper_q931; | 1600 | exp->helper = nf_conntrack_helper_q931; |
1601 | 1601 | ||
1602 | if (nf_ct_expect_related(exp) == 0) { | 1602 | if (nf_ct_expect_related(exp) == 0) { |
1603 | pr_debug("nf_ct_ras: expect Q.931 "); | 1603 | pr_debug("nf_ct_ras: expect Q.931 "); |
1604 | nf_ct_dump_tuple(&exp->tuple); | 1604 | nf_ct_dump_tuple(&exp->tuple); |
1605 | } else | 1605 | } else |
1606 | ret = -1; | 1606 | ret = -1; |
1607 | 1607 | ||
1608 | nf_ct_expect_put(exp); | 1608 | nf_ct_expect_put(exp); |
1609 | 1609 | ||
1610 | /* Ignore rasAddress */ | 1610 | /* Ignore rasAddress */ |
1611 | 1611 | ||
1612 | return ret; | 1612 | return ret; |
1613 | } | 1613 | } |
1614 | 1614 | ||
1615 | /****************************************************************************/ | 1615 | /****************************************************************************/ |
1616 | static int process_irr(struct sk_buff *skb, struct nf_conn *ct, | 1616 | static int process_irr(struct sk_buff *skb, struct nf_conn *ct, |
1617 | enum ip_conntrack_info ctinfo, | 1617 | enum ip_conntrack_info ctinfo, |
1618 | unsigned char **data, InfoRequestResponse *irr) | 1618 | unsigned char **data, InfoRequestResponse *irr) |
1619 | { | 1619 | { |
1620 | int ret; | 1620 | int ret; |
1621 | typeof(set_ras_addr_hook) set_ras_addr; | 1621 | typeof(set_ras_addr_hook) set_ras_addr; |
1622 | typeof(set_sig_addr_hook) set_sig_addr; | 1622 | typeof(set_sig_addr_hook) set_sig_addr; |
1623 | 1623 | ||
1624 | pr_debug("nf_ct_ras: IRR\n"); | 1624 | pr_debug("nf_ct_ras: IRR\n"); |
1625 | 1625 | ||
1626 | set_ras_addr = rcu_dereference(set_ras_addr_hook); | 1626 | set_ras_addr = rcu_dereference(set_ras_addr_hook); |
1627 | if (set_ras_addr && ct->status & IPS_NAT_MASK) { | 1627 | if (set_ras_addr && ct->status & IPS_NAT_MASK) { |
1628 | ret = set_ras_addr(skb, ct, ctinfo, data, | 1628 | ret = set_ras_addr(skb, ct, ctinfo, data, |
1629 | &irr->rasAddress, 1); | 1629 | &irr->rasAddress, 1); |
1630 | if (ret < 0) | 1630 | if (ret < 0) |
1631 | return -1; | 1631 | return -1; |
1632 | } | 1632 | } |
1633 | 1633 | ||
1634 | set_sig_addr = rcu_dereference(set_sig_addr_hook); | 1634 | set_sig_addr = rcu_dereference(set_sig_addr_hook); |
1635 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { | 1635 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { |
1636 | ret = set_sig_addr(skb, ct, ctinfo, data, | 1636 | ret = set_sig_addr(skb, ct, ctinfo, data, |
1637 | irr->callSignalAddress.item, | 1637 | irr->callSignalAddress.item, |
1638 | irr->callSignalAddress.count); | 1638 | irr->callSignalAddress.count); |
1639 | if (ret < 0) | 1639 | if (ret < 0) |
1640 | return -1; | 1640 | return -1; |
1641 | } | 1641 | } |
1642 | 1642 | ||
1643 | return 0; | 1643 | return 0; |
1644 | } | 1644 | } |
1645 | 1645 | ||
1646 | /****************************************************************************/ | 1646 | /****************************************************************************/ |
1647 | static int process_ras(struct sk_buff *skb, struct nf_conn *ct, | 1647 | static int process_ras(struct sk_buff *skb, struct nf_conn *ct, |
1648 | enum ip_conntrack_info ctinfo, | 1648 | enum ip_conntrack_info ctinfo, |
1649 | unsigned char **data, RasMessage *ras) | 1649 | unsigned char **data, RasMessage *ras) |
1650 | { | 1650 | { |
1651 | switch (ras->choice) { | 1651 | switch (ras->choice) { |
1652 | case eRasMessage_gatekeeperRequest: | 1652 | case eRasMessage_gatekeeperRequest: |
1653 | return process_grq(skb, ct, ctinfo, data, | 1653 | return process_grq(skb, ct, ctinfo, data, |
1654 | &ras->gatekeeperRequest); | 1654 | &ras->gatekeeperRequest); |
1655 | case eRasMessage_gatekeeperConfirm: | 1655 | case eRasMessage_gatekeeperConfirm: |
1656 | return process_gcf(skb, ct, ctinfo, data, | 1656 | return process_gcf(skb, ct, ctinfo, data, |
1657 | &ras->gatekeeperConfirm); | 1657 | &ras->gatekeeperConfirm); |
1658 | case eRasMessage_registrationRequest: | 1658 | case eRasMessage_registrationRequest: |
1659 | return process_rrq(skb, ct, ctinfo, data, | 1659 | return process_rrq(skb, ct, ctinfo, data, |
1660 | &ras->registrationRequest); | 1660 | &ras->registrationRequest); |
1661 | case eRasMessage_registrationConfirm: | 1661 | case eRasMessage_registrationConfirm: |
1662 | return process_rcf(skb, ct, ctinfo, data, | 1662 | return process_rcf(skb, ct, ctinfo, data, |
1663 | &ras->registrationConfirm); | 1663 | &ras->registrationConfirm); |
1664 | case eRasMessage_unregistrationRequest: | 1664 | case eRasMessage_unregistrationRequest: |
1665 | return process_urq(skb, ct, ctinfo, data, | 1665 | return process_urq(skb, ct, ctinfo, data, |
1666 | &ras->unregistrationRequest); | 1666 | &ras->unregistrationRequest); |
1667 | case eRasMessage_admissionRequest: | 1667 | case eRasMessage_admissionRequest: |
1668 | return process_arq(skb, ct, ctinfo, data, | 1668 | return process_arq(skb, ct, ctinfo, data, |
1669 | &ras->admissionRequest); | 1669 | &ras->admissionRequest); |
1670 | case eRasMessage_admissionConfirm: | 1670 | case eRasMessage_admissionConfirm: |
1671 | return process_acf(skb, ct, ctinfo, data, | 1671 | return process_acf(skb, ct, ctinfo, data, |
1672 | &ras->admissionConfirm); | 1672 | &ras->admissionConfirm); |
1673 | case eRasMessage_locationRequest: | 1673 | case eRasMessage_locationRequest: |
1674 | return process_lrq(skb, ct, ctinfo, data, | 1674 | return process_lrq(skb, ct, ctinfo, data, |
1675 | &ras->locationRequest); | 1675 | &ras->locationRequest); |
1676 | case eRasMessage_locationConfirm: | 1676 | case eRasMessage_locationConfirm: |
1677 | return process_lcf(skb, ct, ctinfo, data, | 1677 | return process_lcf(skb, ct, ctinfo, data, |
1678 | &ras->locationConfirm); | 1678 | &ras->locationConfirm); |
1679 | case eRasMessage_infoRequestResponse: | 1679 | case eRasMessage_infoRequestResponse: |
1680 | return process_irr(skb, ct, ctinfo, data, | 1680 | return process_irr(skb, ct, ctinfo, data, |
1681 | &ras->infoRequestResponse); | 1681 | &ras->infoRequestResponse); |
1682 | default: | 1682 | default: |
1683 | pr_debug("nf_ct_ras: RAS message %d\n", ras->choice); | 1683 | pr_debug("nf_ct_ras: RAS message %d\n", ras->choice); |
1684 | break; | 1684 | break; |
1685 | } | 1685 | } |
1686 | 1686 | ||
1687 | return 0; | 1687 | return 0; |
1688 | } | 1688 | } |
1689 | 1689 | ||
1690 | /****************************************************************************/ | 1690 | /****************************************************************************/ |
1691 | static int ras_help(struct sk_buff *skb, unsigned int protoff, | 1691 | static int ras_help(struct sk_buff *skb, unsigned int protoff, |
1692 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | 1692 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) |
1693 | { | 1693 | { |
1694 | static RasMessage ras; | 1694 | static RasMessage ras; |
1695 | unsigned char *data; | 1695 | unsigned char *data; |
1696 | int datalen = 0; | 1696 | int datalen = 0; |
1697 | int ret; | 1697 | int ret; |
1698 | 1698 | ||
1699 | pr_debug("nf_ct_ras: skblen = %u\n", skb->len); | 1699 | pr_debug("nf_ct_ras: skblen = %u\n", skb->len); |
1700 | 1700 | ||
1701 | spin_lock_bh(&nf_h323_lock); | 1701 | spin_lock_bh(&nf_h323_lock); |
1702 | 1702 | ||
1703 | /* Get UDP data */ | 1703 | /* Get UDP data */ |
1704 | data = get_udp_data(skb, protoff, &datalen); | 1704 | data = get_udp_data(skb, protoff, &datalen); |
1705 | if (data == NULL) | 1705 | if (data == NULL) |
1706 | goto accept; | 1706 | goto accept; |
1707 | pr_debug("nf_ct_ras: RAS message len=%d ", datalen); | 1707 | pr_debug("nf_ct_ras: RAS message len=%d ", datalen); |
1708 | nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); | 1708 | nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); |
1709 | 1709 | ||
1710 | /* Decode RAS message */ | 1710 | /* Decode RAS message */ |
1711 | ret = DecodeRasMessage(data, datalen, &ras); | 1711 | ret = DecodeRasMessage(data, datalen, &ras); |
1712 | if (ret < 0) { | 1712 | if (ret < 0) { |
1713 | pr_debug("nf_ct_ras: decoding error: %s\n", | 1713 | pr_debug("nf_ct_ras: decoding error: %s\n", |
1714 | ret == H323_ERROR_BOUND ? | 1714 | ret == H323_ERROR_BOUND ? |
1715 | "out of bound" : "out of range"); | 1715 | "out of bound" : "out of range"); |
1716 | goto accept; | 1716 | goto accept; |
1717 | } | 1717 | } |
1718 | 1718 | ||
1719 | /* Process RAS message */ | 1719 | /* Process RAS message */ |
1720 | if (process_ras(skb, ct, ctinfo, &data, &ras) < 0) | 1720 | if (process_ras(skb, ct, ctinfo, &data, &ras) < 0) |
1721 | goto drop; | 1721 | goto drop; |
1722 | 1722 | ||
1723 | accept: | 1723 | accept: |
1724 | spin_unlock_bh(&nf_h323_lock); | 1724 | spin_unlock_bh(&nf_h323_lock); |
1725 | return NF_ACCEPT; | 1725 | return NF_ACCEPT; |
1726 | 1726 | ||
1727 | drop: | 1727 | drop: |
1728 | spin_unlock_bh(&nf_h323_lock); | 1728 | spin_unlock_bh(&nf_h323_lock); |
1729 | if (net_ratelimit()) | 1729 | if (net_ratelimit()) |
1730 | printk("nf_ct_ras: packet dropped\n"); | 1730 | pr_info("nf_ct_ras: packet dropped\n"); |
1731 | return NF_DROP; | 1731 | return NF_DROP; |
1732 | } | 1732 | } |
1733 | 1733 | ||
1734 | /****************************************************************************/ | 1734 | /****************************************************************************/ |
1735 | static const struct nf_conntrack_expect_policy ras_exp_policy = { | 1735 | static const struct nf_conntrack_expect_policy ras_exp_policy = { |
1736 | .max_expected = 32, | 1736 | .max_expected = 32, |
1737 | .timeout = 240, | 1737 | .timeout = 240, |
1738 | }; | 1738 | }; |
1739 | 1739 | ||
1740 | static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { | 1740 | static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { |
1741 | { | 1741 | { |
1742 | .name = "RAS", | 1742 | .name = "RAS", |
1743 | .me = THIS_MODULE, | 1743 | .me = THIS_MODULE, |
1744 | .tuple.src.l3num = AF_INET, | 1744 | .tuple.src.l3num = AF_INET, |
1745 | .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), | 1745 | .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), |
1746 | .tuple.dst.protonum = IPPROTO_UDP, | 1746 | .tuple.dst.protonum = IPPROTO_UDP, |
1747 | .help = ras_help, | 1747 | .help = ras_help, |
1748 | .expect_policy = &ras_exp_policy, | 1748 | .expect_policy = &ras_exp_policy, |
1749 | }, | 1749 | }, |
1750 | { | 1750 | { |
1751 | .name = "RAS", | 1751 | .name = "RAS", |
1752 | .me = THIS_MODULE, | 1752 | .me = THIS_MODULE, |
1753 | .tuple.src.l3num = AF_INET6, | 1753 | .tuple.src.l3num = AF_INET6, |
1754 | .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), | 1754 | .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), |
1755 | .tuple.dst.protonum = IPPROTO_UDP, | 1755 | .tuple.dst.protonum = IPPROTO_UDP, |
1756 | .help = ras_help, | 1756 | .help = ras_help, |
1757 | .expect_policy = &ras_exp_policy, | 1757 | .expect_policy = &ras_exp_policy, |
1758 | }, | 1758 | }, |
1759 | }; | 1759 | }; |
1760 | 1760 | ||
1761 | /****************************************************************************/ | 1761 | /****************************************************************************/ |
1762 | static void __exit nf_conntrack_h323_fini(void) | 1762 | static void __exit nf_conntrack_h323_fini(void) |
1763 | { | 1763 | { |
1764 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[1]); | 1764 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[1]); |
1765 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); | 1765 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); |
1766 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); | 1766 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); |
1767 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); | 1767 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); |
1768 | nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); | 1768 | nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); |
1769 | kfree(h323_buffer); | 1769 | kfree(h323_buffer); |
1770 | pr_debug("nf_ct_h323: fini\n"); | 1770 | pr_debug("nf_ct_h323: fini\n"); |
1771 | } | 1771 | } |
1772 | 1772 | ||
1773 | /****************************************************************************/ | 1773 | /****************************************************************************/ |
1774 | static int __init nf_conntrack_h323_init(void) | 1774 | static int __init nf_conntrack_h323_init(void) |
1775 | { | 1775 | { |
1776 | int ret; | 1776 | int ret; |
1777 | 1777 | ||
1778 | h323_buffer = kmalloc(65536, GFP_KERNEL); | 1778 | h323_buffer = kmalloc(65536, GFP_KERNEL); |
1779 | if (!h323_buffer) | 1779 | if (!h323_buffer) |
1780 | return -ENOMEM; | 1780 | return -ENOMEM; |
1781 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_h245); | 1781 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_h245); |
1782 | if (ret < 0) | 1782 | if (ret < 0) |
1783 | goto err1; | 1783 | goto err1; |
1784 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]); | 1784 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]); |
1785 | if (ret < 0) | 1785 | if (ret < 0) |
1786 | goto err2; | 1786 | goto err2; |
1787 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]); | 1787 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]); |
1788 | if (ret < 0) | 1788 | if (ret < 0) |
1789 | goto err3; | 1789 | goto err3; |
1790 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]); | 1790 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]); |
1791 | if (ret < 0) | 1791 | if (ret < 0) |
1792 | goto err4; | 1792 | goto err4; |
1793 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]); | 1793 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]); |
1794 | if (ret < 0) | 1794 | if (ret < 0) |
1795 | goto err5; | 1795 | goto err5; |
1796 | pr_debug("nf_ct_h323: init success\n"); | 1796 | pr_debug("nf_ct_h323: init success\n"); |
1797 | return 0; | 1797 | return 0; |
1798 | 1798 | ||
1799 | err5: | 1799 | err5: |
1800 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); | 1800 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); |
1801 | err4: | 1801 | err4: |
1802 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); | 1802 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); |
1803 | err3: | 1803 | err3: |
1804 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); | 1804 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); |
1805 | err2: | 1805 | err2: |
1806 | nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); | 1806 | nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); |
1807 | err1: | 1807 | err1: |
1808 | kfree(h323_buffer); | 1808 | kfree(h323_buffer); |
1809 | return ret; | 1809 | return ret; |
1810 | } | 1810 | } |
1811 | 1811 | ||
1812 | /****************************************************************************/ | 1812 | /****************************************************************************/ |
1813 | module_init(nf_conntrack_h323_init); | 1813 | module_init(nf_conntrack_h323_init); |
1814 | module_exit(nf_conntrack_h323_fini); | 1814 | module_exit(nf_conntrack_h323_fini); |
1815 | 1815 | ||
1816 | EXPORT_SYMBOL_GPL(get_h225_addr); | 1816 | EXPORT_SYMBOL_GPL(get_h225_addr); |
1817 | EXPORT_SYMBOL_GPL(set_h245_addr_hook); | 1817 | EXPORT_SYMBOL_GPL(set_h245_addr_hook); |
1818 | EXPORT_SYMBOL_GPL(set_h225_addr_hook); | 1818 | EXPORT_SYMBOL_GPL(set_h225_addr_hook); |
1819 | EXPORT_SYMBOL_GPL(set_sig_addr_hook); | 1819 | EXPORT_SYMBOL_GPL(set_sig_addr_hook); |
1820 | EXPORT_SYMBOL_GPL(set_ras_addr_hook); | 1820 | EXPORT_SYMBOL_GPL(set_ras_addr_hook); |
1821 | EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook); | 1821 | EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook); |
1822 | EXPORT_SYMBOL_GPL(nat_t120_hook); | 1822 | EXPORT_SYMBOL_GPL(nat_t120_hook); |
1823 | EXPORT_SYMBOL_GPL(nat_h245_hook); | 1823 | EXPORT_SYMBOL_GPL(nat_h245_hook); |
1824 | EXPORT_SYMBOL_GPL(nat_callforwarding_hook); | 1824 | EXPORT_SYMBOL_GPL(nat_callforwarding_hook); |
1825 | EXPORT_SYMBOL_GPL(nat_q931_hook); | 1825 | EXPORT_SYMBOL_GPL(nat_q931_hook); |
1826 | 1826 | ||
1827 | MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); | 1827 | MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); |
1828 | MODULE_DESCRIPTION("H.323 connection tracking helper"); | 1828 | MODULE_DESCRIPTION("H.323 connection tracking helper"); |
1829 | MODULE_LICENSE("GPL"); | 1829 | MODULE_LICENSE("GPL"); |
1830 | MODULE_ALIAS("ip_conntrack_h323"); | 1830 | MODULE_ALIAS("ip_conntrack_h323"); |
1831 | MODULE_ALIAS_NFCT_HELPER("h323"); | 1831 | MODULE_ALIAS_NFCT_HELPER("h323"); |
1832 | 1832 |
net/netfilter/nf_conntrack_irc.c
1 | /* IRC extension for IP connection tracking, Version 1.21 | 1 | /* IRC extension for IP connection tracking, Version 1.21 |
2 | * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org> | 2 | * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org> |
3 | * based on RR's ip_conntrack_ftp.c | 3 | * based on RR's ip_conntrack_ftp.c |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License | 6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version | 7 | * as published by the Free Software Foundation; either version |
8 | * 2 of the License, or (at your option) any later version. | 8 | * 2 of the License, or (at your option) any later version. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 12 | #include <linux/moduleparam.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/in.h> | 14 | #include <linux/in.h> |
15 | #include <linux/ip.h> | 15 | #include <linux/ip.h> |
16 | #include <linux/tcp.h> | 16 | #include <linux/tcp.h> |
17 | #include <linux/netfilter.h> | 17 | #include <linux/netfilter.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | 19 | ||
20 | #include <net/netfilter/nf_conntrack.h> | 20 | #include <net/netfilter/nf_conntrack.h> |
21 | #include <net/netfilter/nf_conntrack_expect.h> | 21 | #include <net/netfilter/nf_conntrack_expect.h> |
22 | #include <net/netfilter/nf_conntrack_helper.h> | 22 | #include <net/netfilter/nf_conntrack_helper.h> |
23 | #include <linux/netfilter/nf_conntrack_irc.h> | 23 | #include <linux/netfilter/nf_conntrack_irc.h> |
24 | 24 | ||
25 | #define MAX_PORTS 8 | 25 | #define MAX_PORTS 8 |
26 | static unsigned short ports[MAX_PORTS]; | 26 | static unsigned short ports[MAX_PORTS]; |
27 | static unsigned int ports_c; | 27 | static unsigned int ports_c; |
28 | static unsigned int max_dcc_channels = 8; | 28 | static unsigned int max_dcc_channels = 8; |
29 | static unsigned int dcc_timeout __read_mostly = 300; | 29 | static unsigned int dcc_timeout __read_mostly = 300; |
30 | /* This is slow, but it's simple. --RR */ | 30 | /* This is slow, but it's simple. --RR */ |
31 | static char *irc_buffer; | 31 | static char *irc_buffer; |
32 | static DEFINE_SPINLOCK(irc_buffer_lock); | 32 | static DEFINE_SPINLOCK(irc_buffer_lock); |
33 | 33 | ||
34 | unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, | 34 | unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, |
35 | enum ip_conntrack_info ctinfo, | 35 | enum ip_conntrack_info ctinfo, |
36 | unsigned int matchoff, | 36 | unsigned int matchoff, |
37 | unsigned int matchlen, | 37 | unsigned int matchlen, |
38 | struct nf_conntrack_expect *exp) __read_mostly; | 38 | struct nf_conntrack_expect *exp) __read_mostly; |
39 | EXPORT_SYMBOL_GPL(nf_nat_irc_hook); | 39 | EXPORT_SYMBOL_GPL(nf_nat_irc_hook); |
40 | 40 | ||
41 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 41 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
42 | MODULE_DESCRIPTION("IRC (DCC) connection tracking helper"); | 42 | MODULE_DESCRIPTION("IRC (DCC) connection tracking helper"); |
43 | MODULE_LICENSE("GPL"); | 43 | MODULE_LICENSE("GPL"); |
44 | MODULE_ALIAS("ip_conntrack_irc"); | 44 | MODULE_ALIAS("ip_conntrack_irc"); |
45 | MODULE_ALIAS_NFCT_HELPER("irc"); | 45 | MODULE_ALIAS_NFCT_HELPER("irc"); |
46 | 46 | ||
47 | module_param_array(ports, ushort, &ports_c, 0400); | 47 | module_param_array(ports, ushort, &ports_c, 0400); |
48 | MODULE_PARM_DESC(ports, "port numbers of IRC servers"); | 48 | MODULE_PARM_DESC(ports, "port numbers of IRC servers"); |
49 | module_param(max_dcc_channels, uint, 0400); | 49 | module_param(max_dcc_channels, uint, 0400); |
50 | MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per " | 50 | MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per " |
51 | "IRC session"); | 51 | "IRC session"); |
52 | module_param(dcc_timeout, uint, 0400); | 52 | module_param(dcc_timeout, uint, 0400); |
53 | MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels"); | 53 | MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels"); |
54 | 54 | ||
55 | static const char *const dccprotos[] = { | 55 | static const char *const dccprotos[] = { |
56 | "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " | 56 | "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " |
57 | }; | 57 | }; |
58 | 58 | ||
59 | #define MINMATCHLEN 5 | 59 | #define MINMATCHLEN 5 |
60 | 60 | ||
61 | /* tries to get the ip_addr and port out of a dcc command | 61 | /* tries to get the ip_addr and port out of a dcc command |
62 | * return value: -1 on failure, 0 on success | 62 | * return value: -1 on failure, 0 on success |
63 | * data pointer to first byte of DCC command data | 63 | * data pointer to first byte of DCC command data |
64 | * data_end pointer to last byte of dcc command data | 64 | * data_end pointer to last byte of dcc command data |
65 | * ip returns parsed ip of dcc command | 65 | * ip returns parsed ip of dcc command |
66 | * port returns parsed port of dcc command | 66 | * port returns parsed port of dcc command |
67 | * ad_beg_p returns pointer to first byte of addr data | 67 | * ad_beg_p returns pointer to first byte of addr data |
68 | * ad_end_p returns pointer to last byte of addr data | 68 | * ad_end_p returns pointer to last byte of addr data |
69 | */ | 69 | */ |
70 | static int parse_dcc(char *data, const char *data_end, __be32 *ip, | 70 | static int parse_dcc(char *data, const char *data_end, __be32 *ip, |
71 | u_int16_t *port, char **ad_beg_p, char **ad_end_p) | 71 | u_int16_t *port, char **ad_beg_p, char **ad_end_p) |
72 | { | 72 | { |
73 | char *tmp; | 73 | char *tmp; |
74 | 74 | ||
75 | /* at least 12: "AAAAAAAA P\1\n" */ | 75 | /* at least 12: "AAAAAAAA P\1\n" */ |
76 | while (*data++ != ' ') | 76 | while (*data++ != ' ') |
77 | if (data > data_end - 12) | 77 | if (data > data_end - 12) |
78 | return -1; | 78 | return -1; |
79 | 79 | ||
80 | /* Make sure we have a newline character within the packet boundaries | 80 | /* Make sure we have a newline character within the packet boundaries |
81 | * because simple_strtoul parses until the first invalid character. */ | 81 | * because simple_strtoul parses until the first invalid character. */ |
82 | for (tmp = data; tmp <= data_end; tmp++) | 82 | for (tmp = data; tmp <= data_end; tmp++) |
83 | if (*tmp == '\n') | 83 | if (*tmp == '\n') |
84 | break; | 84 | break; |
85 | if (tmp > data_end || *tmp != '\n') | 85 | if (tmp > data_end || *tmp != '\n') |
86 | return -1; | 86 | return -1; |
87 | 87 | ||
88 | *ad_beg_p = data; | 88 | *ad_beg_p = data; |
89 | *ip = cpu_to_be32(simple_strtoul(data, &data, 10)); | 89 | *ip = cpu_to_be32(simple_strtoul(data, &data, 10)); |
90 | 90 | ||
91 | /* skip blanks between ip and port */ | 91 | /* skip blanks between ip and port */ |
92 | while (*data == ' ') { | 92 | while (*data == ' ') { |
93 | if (data >= data_end) | 93 | if (data >= data_end) |
94 | return -1; | 94 | return -1; |
95 | data++; | 95 | data++; |
96 | } | 96 | } |
97 | 97 | ||
98 | *port = simple_strtoul(data, &data, 10); | 98 | *port = simple_strtoul(data, &data, 10); |
99 | *ad_end_p = data; | 99 | *ad_end_p = data; |
100 | 100 | ||
101 | return 0; | 101 | return 0; |
102 | } | 102 | } |
103 | 103 | ||
104 | static int help(struct sk_buff *skb, unsigned int protoff, | 104 | static int help(struct sk_buff *skb, unsigned int protoff, |
105 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | 105 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) |
106 | { | 106 | { |
107 | unsigned int dataoff; | 107 | unsigned int dataoff; |
108 | const struct iphdr *iph; | 108 | const struct iphdr *iph; |
109 | const struct tcphdr *th; | 109 | const struct tcphdr *th; |
110 | struct tcphdr _tcph; | 110 | struct tcphdr _tcph; |
111 | const char *data_limit; | 111 | const char *data_limit; |
112 | char *data, *ib_ptr; | 112 | char *data, *ib_ptr; |
113 | int dir = CTINFO2DIR(ctinfo); | 113 | int dir = CTINFO2DIR(ctinfo); |
114 | struct nf_conntrack_expect *exp; | 114 | struct nf_conntrack_expect *exp; |
115 | struct nf_conntrack_tuple *tuple; | 115 | struct nf_conntrack_tuple *tuple; |
116 | __be32 dcc_ip; | 116 | __be32 dcc_ip; |
117 | u_int16_t dcc_port; | 117 | u_int16_t dcc_port; |
118 | __be16 port; | 118 | __be16 port; |
119 | int i, ret = NF_ACCEPT; | 119 | int i, ret = NF_ACCEPT; |
120 | char *addr_beg_p, *addr_end_p; | 120 | char *addr_beg_p, *addr_end_p; |
121 | typeof(nf_nat_irc_hook) nf_nat_irc; | 121 | typeof(nf_nat_irc_hook) nf_nat_irc; |
122 | 122 | ||
123 | /* If packet is coming from IRC server */ | 123 | /* If packet is coming from IRC server */ |
124 | if (dir == IP_CT_DIR_REPLY) | 124 | if (dir == IP_CT_DIR_REPLY) |
125 | return NF_ACCEPT; | 125 | return NF_ACCEPT; |
126 | 126 | ||
127 | /* Until there's been traffic both ways, don't look in packets. */ | 127 | /* Until there's been traffic both ways, don't look in packets. */ |
128 | if (ctinfo != IP_CT_ESTABLISHED && | 128 | if (ctinfo != IP_CT_ESTABLISHED && |
129 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) | 129 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) |
130 | return NF_ACCEPT; | 130 | return NF_ACCEPT; |
131 | 131 | ||
132 | /* Not a full tcp header? */ | 132 | /* Not a full tcp header? */ |
133 | th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); | 133 | th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); |
134 | if (th == NULL) | 134 | if (th == NULL) |
135 | return NF_ACCEPT; | 135 | return NF_ACCEPT; |
136 | 136 | ||
137 | /* No data? */ | 137 | /* No data? */ |
138 | dataoff = protoff + th->doff*4; | 138 | dataoff = protoff + th->doff*4; |
139 | if (dataoff >= skb->len) | 139 | if (dataoff >= skb->len) |
140 | return NF_ACCEPT; | 140 | return NF_ACCEPT; |
141 | 141 | ||
142 | spin_lock_bh(&irc_buffer_lock); | 142 | spin_lock_bh(&irc_buffer_lock); |
143 | ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff, | 143 | ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff, |
144 | irc_buffer); | 144 | irc_buffer); |
145 | BUG_ON(ib_ptr == NULL); | 145 | BUG_ON(ib_ptr == NULL); |
146 | 146 | ||
147 | data = ib_ptr; | 147 | data = ib_ptr; |
148 | data_limit = ib_ptr + skb->len - dataoff; | 148 | data_limit = ib_ptr + skb->len - dataoff; |
149 | 149 | ||
150 | /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 | 150 | /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 |
151 | * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ | 151 | * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ |
152 | while (data < data_limit - (19 + MINMATCHLEN)) { | 152 | while (data < data_limit - (19 + MINMATCHLEN)) { |
153 | if (memcmp(data, "\1DCC ", 5)) { | 153 | if (memcmp(data, "\1DCC ", 5)) { |
154 | data++; | 154 | data++; |
155 | continue; | 155 | continue; |
156 | } | 156 | } |
157 | data += 5; | 157 | data += 5; |
158 | /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ | 158 | /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ |
159 | 159 | ||
160 | iph = ip_hdr(skb); | 160 | iph = ip_hdr(skb); |
161 | pr_debug("DCC found in master %pI4:%u %pI4:%u\n", | 161 | pr_debug("DCC found in master %pI4:%u %pI4:%u\n", |
162 | &iph->saddr, ntohs(th->source), | 162 | &iph->saddr, ntohs(th->source), |
163 | &iph->daddr, ntohs(th->dest)); | 163 | &iph->daddr, ntohs(th->dest)); |
164 | 164 | ||
165 | for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { | 165 | for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { |
166 | if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) { | 166 | if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) { |
167 | /* no match */ | 167 | /* no match */ |
168 | continue; | 168 | continue; |
169 | } | 169 | } |
170 | data += strlen(dccprotos[i]); | 170 | data += strlen(dccprotos[i]); |
171 | pr_debug("DCC %s detected\n", dccprotos[i]); | 171 | pr_debug("DCC %s detected\n", dccprotos[i]); |
172 | 172 | ||
173 | /* we have at least | 173 | /* we have at least |
174 | * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid | 174 | * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid |
175 | * data left (== 14/13 bytes) */ | 175 | * data left (== 14/13 bytes) */ |
176 | if (parse_dcc(data, data_limit, &dcc_ip, | 176 | if (parse_dcc(data, data_limit, &dcc_ip, |
177 | &dcc_port, &addr_beg_p, &addr_end_p)) { | 177 | &dcc_port, &addr_beg_p, &addr_end_p)) { |
178 | pr_debug("unable to parse dcc command\n"); | 178 | pr_debug("unable to parse dcc command\n"); |
179 | continue; | 179 | continue; |
180 | } | 180 | } |
181 | 181 | ||
182 | pr_debug("DCC bound ip/port: %pI4:%u\n", | 182 | pr_debug("DCC bound ip/port: %pI4:%u\n", |
183 | &dcc_ip, dcc_port); | 183 | &dcc_ip, dcc_port); |
184 | 184 | ||
185 | /* dcc_ip can be the internal OR external (NAT'ed) IP */ | 185 | /* dcc_ip can be the internal OR external (NAT'ed) IP */ |
186 | tuple = &ct->tuplehash[dir].tuple; | 186 | tuple = &ct->tuplehash[dir].tuple; |
187 | if (tuple->src.u3.ip != dcc_ip && | 187 | if (tuple->src.u3.ip != dcc_ip && |
188 | tuple->dst.u3.ip != dcc_ip) { | 188 | tuple->dst.u3.ip != dcc_ip) { |
189 | if (net_ratelimit()) | 189 | if (net_ratelimit()) |
190 | printk(KERN_WARNING | 190 | printk(KERN_WARNING |
191 | "Forged DCC command from %pI4: %pI4:%u\n", | 191 | "Forged DCC command from %pI4: %pI4:%u\n", |
192 | &tuple->src.u3.ip, | 192 | &tuple->src.u3.ip, |
193 | &dcc_ip, dcc_port); | 193 | &dcc_ip, dcc_port); |
194 | continue; | 194 | continue; |
195 | } | 195 | } |
196 | 196 | ||
197 | exp = nf_ct_expect_alloc(ct); | 197 | exp = nf_ct_expect_alloc(ct); |
198 | if (exp == NULL) { | 198 | if (exp == NULL) { |
199 | ret = NF_DROP; | 199 | ret = NF_DROP; |
200 | goto out; | 200 | goto out; |
201 | } | 201 | } |
202 | tuple = &ct->tuplehash[!dir].tuple; | 202 | tuple = &ct->tuplehash[!dir].tuple; |
203 | port = htons(dcc_port); | 203 | port = htons(dcc_port); |
204 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, | 204 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, |
205 | tuple->src.l3num, | 205 | tuple->src.l3num, |
206 | NULL, &tuple->dst.u3, | 206 | NULL, &tuple->dst.u3, |
207 | IPPROTO_TCP, NULL, &port); | 207 | IPPROTO_TCP, NULL, &port); |
208 | 208 | ||
209 | nf_nat_irc = rcu_dereference(nf_nat_irc_hook); | 209 | nf_nat_irc = rcu_dereference(nf_nat_irc_hook); |
210 | if (nf_nat_irc && ct->status & IPS_NAT_MASK) | 210 | if (nf_nat_irc && ct->status & IPS_NAT_MASK) |
211 | ret = nf_nat_irc(skb, ctinfo, | 211 | ret = nf_nat_irc(skb, ctinfo, |
212 | addr_beg_p - ib_ptr, | 212 | addr_beg_p - ib_ptr, |
213 | addr_end_p - addr_beg_p, | 213 | addr_end_p - addr_beg_p, |
214 | exp); | 214 | exp); |
215 | else if (nf_ct_expect_related(exp) != 0) | 215 | else if (nf_ct_expect_related(exp) != 0) |
216 | ret = NF_DROP; | 216 | ret = NF_DROP; |
217 | nf_ct_expect_put(exp); | 217 | nf_ct_expect_put(exp); |
218 | goto out; | 218 | goto out; |
219 | } | 219 | } |
220 | } | 220 | } |
221 | out: | 221 | out: |
222 | spin_unlock_bh(&irc_buffer_lock); | 222 | spin_unlock_bh(&irc_buffer_lock); |
223 | return ret; | 223 | return ret; |
224 | } | 224 | } |
225 | 225 | ||
226 | static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly; | 226 | static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly; |
227 | static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly; | 227 | static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly; |
228 | static struct nf_conntrack_expect_policy irc_exp_policy; | 228 | static struct nf_conntrack_expect_policy irc_exp_policy; |
229 | 229 | ||
230 | static void nf_conntrack_irc_fini(void); | 230 | static void nf_conntrack_irc_fini(void); |
231 | 231 | ||
232 | static int __init nf_conntrack_irc_init(void) | 232 | static int __init nf_conntrack_irc_init(void) |
233 | { | 233 | { |
234 | int i, ret; | 234 | int i, ret; |
235 | char *tmpname; | 235 | char *tmpname; |
236 | 236 | ||
237 | if (max_dcc_channels < 1) { | 237 | if (max_dcc_channels < 1) { |
238 | printk("nf_ct_irc: max_dcc_channels must not be zero\n"); | 238 | printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n"); |
239 | return -EINVAL; | 239 | return -EINVAL; |
240 | } | 240 | } |
241 | 241 | ||
242 | irc_exp_policy.max_expected = max_dcc_channels; | 242 | irc_exp_policy.max_expected = max_dcc_channels; |
243 | irc_exp_policy.timeout = dcc_timeout; | 243 | irc_exp_policy.timeout = dcc_timeout; |
244 | 244 | ||
245 | irc_buffer = kmalloc(65536, GFP_KERNEL); | 245 | irc_buffer = kmalloc(65536, GFP_KERNEL); |
246 | if (!irc_buffer) | 246 | if (!irc_buffer) |
247 | return -ENOMEM; | 247 | return -ENOMEM; |
248 | 248 | ||
249 | /* If no port given, default to standard irc port */ | 249 | /* If no port given, default to standard irc port */ |
250 | if (ports_c == 0) | 250 | if (ports_c == 0) |
251 | ports[ports_c++] = IRC_PORT; | 251 | ports[ports_c++] = IRC_PORT; |
252 | 252 | ||
253 | for (i = 0; i < ports_c; i++) { | 253 | for (i = 0; i < ports_c; i++) { |
254 | irc[i].tuple.src.l3num = AF_INET; | 254 | irc[i].tuple.src.l3num = AF_INET; |
255 | irc[i].tuple.src.u.tcp.port = htons(ports[i]); | 255 | irc[i].tuple.src.u.tcp.port = htons(ports[i]); |
256 | irc[i].tuple.dst.protonum = IPPROTO_TCP; | 256 | irc[i].tuple.dst.protonum = IPPROTO_TCP; |
257 | irc[i].expect_policy = &irc_exp_policy; | 257 | irc[i].expect_policy = &irc_exp_policy; |
258 | irc[i].me = THIS_MODULE; | 258 | irc[i].me = THIS_MODULE; |
259 | irc[i].help = help; | 259 | irc[i].help = help; |
260 | 260 | ||
261 | tmpname = &irc_names[i][0]; | 261 | tmpname = &irc_names[i][0]; |
262 | if (ports[i] == IRC_PORT) | 262 | if (ports[i] == IRC_PORT) |
263 | sprintf(tmpname, "irc"); | 263 | sprintf(tmpname, "irc"); |
264 | else | 264 | else |
265 | sprintf(tmpname, "irc-%u", i); | 265 | sprintf(tmpname, "irc-%u", i); |
266 | irc[i].name = tmpname; | 266 | irc[i].name = tmpname; |
267 | 267 | ||
268 | ret = nf_conntrack_helper_register(&irc[i]); | 268 | ret = nf_conntrack_helper_register(&irc[i]); |
269 | if (ret) { | 269 | if (ret) { |
270 | printk("nf_ct_irc: failed to register helper " | 270 | printk(KERN_ERR "nf_ct_irc: failed to register helper " |
271 | "for pf: %u port: %u\n", | 271 | "for pf: %u port: %u\n", |
272 | irc[i].tuple.src.l3num, ports[i]); | 272 | irc[i].tuple.src.l3num, ports[i]); |
273 | nf_conntrack_irc_fini(); | 273 | nf_conntrack_irc_fini(); |
274 | return ret; | 274 | return ret; |
275 | } | 275 | } |
276 | } | 276 | } |
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | /* This function is intentionally _NOT_ defined as __exit, because | 280 | /* This function is intentionally _NOT_ defined as __exit, because |
281 | * it is needed by the init function */ | 281 | * it is needed by the init function */ |
282 | static void nf_conntrack_irc_fini(void) | 282 | static void nf_conntrack_irc_fini(void) |
283 | { | 283 | { |
284 | int i; | 284 | int i; |
285 | 285 | ||
286 | for (i = 0; i < ports_c; i++) | 286 | for (i = 0; i < ports_c; i++) |
287 | nf_conntrack_helper_unregister(&irc[i]); | 287 | nf_conntrack_helper_unregister(&irc[i]); |
288 | kfree(irc_buffer); | 288 | kfree(irc_buffer); |
289 | } | 289 | } |
290 | 290 | ||
291 | module_init(nf_conntrack_irc_init); | 291 | module_init(nf_conntrack_irc_init); |
292 | module_exit(nf_conntrack_irc_fini); | 292 | module_exit(nf_conntrack_irc_fini); |
293 | 293 |
net/netfilter/nf_conntrack_netlink.c
1 | /* Connection tracking via netlink socket. Allows for user space | 1 | /* Connection tracking via netlink socket. Allows for user space |
2 | * protocol helpers and general trouble making from userspace. | 2 | * protocol helpers and general trouble making from userspace. |
3 | * | 3 | * |
4 | * (C) 2001 by Jay Schulist <jschlst@samba.org> | 4 | * (C) 2001 by Jay Schulist <jschlst@samba.org> |
5 | * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> | 5 | * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> |
6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> | 6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> |
7 | * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org> | 7 | * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org> |
8 | * | 8 | * |
9 | * Initial connection tracking via netlink development funded and | 9 | * Initial connection tracking via netlink development funded and |
10 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) | 10 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) |
11 | * | 11 | * |
12 | * Further development of this code funded by Astaro AG (http://www.astaro.com) | 12 | * Further development of this code funded by Astaro AG (http://www.astaro.com) |
13 | * | 13 | * |
14 | * This software may be used and distributed according to the terms | 14 | * This software may be used and distributed according to the terms |
15 | * of the GNU General Public License, incorporated herein by reference. | 15 | * of the GNU General Public License, incorporated herein by reference. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/rculist.h> | 21 | #include <linux/rculist.h> |
22 | #include <linux/rculist_nulls.h> | 22 | #include <linux/rculist_nulls.h> |
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/timer.h> | 24 | #include <linux/timer.h> |
25 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
26 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
27 | #include <linux/netlink.h> | 27 | #include <linux/netlink.h> |
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <linux/netfilter.h> | 32 | #include <linux/netfilter.h> |
33 | #include <net/netlink.h> | 33 | #include <net/netlink.h> |
34 | #include <net/sock.h> | 34 | #include <net/sock.h> |
35 | #include <net/netfilter/nf_conntrack.h> | 35 | #include <net/netfilter/nf_conntrack.h> |
36 | #include <net/netfilter/nf_conntrack_core.h> | 36 | #include <net/netfilter/nf_conntrack_core.h> |
37 | #include <net/netfilter/nf_conntrack_expect.h> | 37 | #include <net/netfilter/nf_conntrack_expect.h> |
38 | #include <net/netfilter/nf_conntrack_helper.h> | 38 | #include <net/netfilter/nf_conntrack_helper.h> |
39 | #include <net/netfilter/nf_conntrack_l3proto.h> | 39 | #include <net/netfilter/nf_conntrack_l3proto.h> |
40 | #include <net/netfilter/nf_conntrack_l4proto.h> | 40 | #include <net/netfilter/nf_conntrack_l4proto.h> |
41 | #include <net/netfilter/nf_conntrack_tuple.h> | 41 | #include <net/netfilter/nf_conntrack_tuple.h> |
42 | #include <net/netfilter/nf_conntrack_acct.h> | 42 | #include <net/netfilter/nf_conntrack_acct.h> |
43 | #include <net/netfilter/nf_conntrack_zones.h> | 43 | #include <net/netfilter/nf_conntrack_zones.h> |
44 | #ifdef CONFIG_NF_NAT_NEEDED | 44 | #ifdef CONFIG_NF_NAT_NEEDED |
45 | #include <net/netfilter/nf_nat_core.h> | 45 | #include <net/netfilter/nf_nat_core.h> |
46 | #include <net/netfilter/nf_nat_protocol.h> | 46 | #include <net/netfilter/nf_nat_protocol.h> |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #include <linux/netfilter/nfnetlink.h> | 49 | #include <linux/netfilter/nfnetlink.h> |
50 | #include <linux/netfilter/nfnetlink_conntrack.h> | 50 | #include <linux/netfilter/nfnetlink_conntrack.h> |
51 | 51 | ||
52 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
53 | 53 | ||
54 | static char __initdata version[] = "0.93"; | 54 | static char __initdata version[] = "0.93"; |
55 | 55 | ||
56 | static inline int | 56 | static inline int |
57 | ctnetlink_dump_tuples_proto(struct sk_buff *skb, | 57 | ctnetlink_dump_tuples_proto(struct sk_buff *skb, |
58 | const struct nf_conntrack_tuple *tuple, | 58 | const struct nf_conntrack_tuple *tuple, |
59 | struct nf_conntrack_l4proto *l4proto) | 59 | struct nf_conntrack_l4proto *l4proto) |
60 | { | 60 | { |
61 | int ret = 0; | 61 | int ret = 0; |
62 | struct nlattr *nest_parms; | 62 | struct nlattr *nest_parms; |
63 | 63 | ||
64 | nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); | 64 | nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); |
65 | if (!nest_parms) | 65 | if (!nest_parms) |
66 | goto nla_put_failure; | 66 | goto nla_put_failure; |
67 | NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum); | 67 | NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum); |
68 | 68 | ||
69 | if (likely(l4proto->tuple_to_nlattr)) | 69 | if (likely(l4proto->tuple_to_nlattr)) |
70 | ret = l4proto->tuple_to_nlattr(skb, tuple); | 70 | ret = l4proto->tuple_to_nlattr(skb, tuple); |
71 | 71 | ||
72 | nla_nest_end(skb, nest_parms); | 72 | nla_nest_end(skb, nest_parms); |
73 | 73 | ||
74 | return ret; | 74 | return ret; |
75 | 75 | ||
76 | nla_put_failure: | 76 | nla_put_failure: |
77 | return -1; | 77 | return -1; |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline int | 80 | static inline int |
81 | ctnetlink_dump_tuples_ip(struct sk_buff *skb, | 81 | ctnetlink_dump_tuples_ip(struct sk_buff *skb, |
82 | const struct nf_conntrack_tuple *tuple, | 82 | const struct nf_conntrack_tuple *tuple, |
83 | struct nf_conntrack_l3proto *l3proto) | 83 | struct nf_conntrack_l3proto *l3proto) |
84 | { | 84 | { |
85 | int ret = 0; | 85 | int ret = 0; |
86 | struct nlattr *nest_parms; | 86 | struct nlattr *nest_parms; |
87 | 87 | ||
88 | nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED); | 88 | nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED); |
89 | if (!nest_parms) | 89 | if (!nest_parms) |
90 | goto nla_put_failure; | 90 | goto nla_put_failure; |
91 | 91 | ||
92 | if (likely(l3proto->tuple_to_nlattr)) | 92 | if (likely(l3proto->tuple_to_nlattr)) |
93 | ret = l3proto->tuple_to_nlattr(skb, tuple); | 93 | ret = l3proto->tuple_to_nlattr(skb, tuple); |
94 | 94 | ||
95 | nla_nest_end(skb, nest_parms); | 95 | nla_nest_end(skb, nest_parms); |
96 | 96 | ||
97 | return ret; | 97 | return ret; |
98 | 98 | ||
99 | nla_put_failure: | 99 | nla_put_failure: |
100 | return -1; | 100 | return -1; |
101 | } | 101 | } |
102 | 102 | ||
103 | static int | 103 | static int |
104 | ctnetlink_dump_tuples(struct sk_buff *skb, | 104 | ctnetlink_dump_tuples(struct sk_buff *skb, |
105 | const struct nf_conntrack_tuple *tuple) | 105 | const struct nf_conntrack_tuple *tuple) |
106 | { | 106 | { |
107 | int ret; | 107 | int ret; |
108 | struct nf_conntrack_l3proto *l3proto; | 108 | struct nf_conntrack_l3proto *l3proto; |
109 | struct nf_conntrack_l4proto *l4proto; | 109 | struct nf_conntrack_l4proto *l4proto; |
110 | 110 | ||
111 | l3proto = __nf_ct_l3proto_find(tuple->src.l3num); | 111 | l3proto = __nf_ct_l3proto_find(tuple->src.l3num); |
112 | ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto); | 112 | ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto); |
113 | 113 | ||
114 | if (unlikely(ret < 0)) | 114 | if (unlikely(ret < 0)) |
115 | return ret; | 115 | return ret; |
116 | 116 | ||
117 | l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); | 117 | l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); |
118 | ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); | 118 | ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); |
119 | 119 | ||
120 | return ret; | 120 | return ret; |
121 | } | 121 | } |
122 | 122 | ||
123 | static inline int | 123 | static inline int |
124 | ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) | 124 | ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) |
125 | { | 125 | { |
126 | NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status)); | 126 | NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status)); |
127 | return 0; | 127 | return 0; |
128 | 128 | ||
129 | nla_put_failure: | 129 | nla_put_failure: |
130 | return -1; | 130 | return -1; |
131 | } | 131 | } |
132 | 132 | ||
133 | static inline int | 133 | static inline int |
134 | ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) | 134 | ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) |
135 | { | 135 | { |
136 | long timeout = (ct->timeout.expires - jiffies) / HZ; | 136 | long timeout = (ct->timeout.expires - jiffies) / HZ; |
137 | 137 | ||
138 | if (timeout < 0) | 138 | if (timeout < 0) |
139 | timeout = 0; | 139 | timeout = 0; |
140 | 140 | ||
141 | NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout)); | 141 | NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout)); |
142 | return 0; | 142 | return 0; |
143 | 143 | ||
144 | nla_put_failure: | 144 | nla_put_failure: |
145 | return -1; | 145 | return -1; |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline int | 148 | static inline int |
149 | ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct) | 149 | ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct) |
150 | { | 150 | { |
151 | struct nf_conntrack_l4proto *l4proto; | 151 | struct nf_conntrack_l4proto *l4proto; |
152 | struct nlattr *nest_proto; | 152 | struct nlattr *nest_proto; |
153 | int ret; | 153 | int ret; |
154 | 154 | ||
155 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | 155 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
156 | if (!l4proto->to_nlattr) | 156 | if (!l4proto->to_nlattr) |
157 | return 0; | 157 | return 0; |
158 | 158 | ||
159 | nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED); | 159 | nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED); |
160 | if (!nest_proto) | 160 | if (!nest_proto) |
161 | goto nla_put_failure; | 161 | goto nla_put_failure; |
162 | 162 | ||
163 | ret = l4proto->to_nlattr(skb, nest_proto, ct); | 163 | ret = l4proto->to_nlattr(skb, nest_proto, ct); |
164 | 164 | ||
165 | nla_nest_end(skb, nest_proto); | 165 | nla_nest_end(skb, nest_proto); |
166 | 166 | ||
167 | return ret; | 167 | return ret; |
168 | 168 | ||
169 | nla_put_failure: | 169 | nla_put_failure: |
170 | return -1; | 170 | return -1; |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline int | 173 | static inline int |
174 | ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) | 174 | ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) |
175 | { | 175 | { |
176 | struct nlattr *nest_helper; | 176 | struct nlattr *nest_helper; |
177 | const struct nf_conn_help *help = nfct_help(ct); | 177 | const struct nf_conn_help *help = nfct_help(ct); |
178 | struct nf_conntrack_helper *helper; | 178 | struct nf_conntrack_helper *helper; |
179 | 179 | ||
180 | if (!help) | 180 | if (!help) |
181 | return 0; | 181 | return 0; |
182 | 182 | ||
183 | helper = rcu_dereference(help->helper); | 183 | helper = rcu_dereference(help->helper); |
184 | if (!helper) | 184 | if (!helper) |
185 | goto out; | 185 | goto out; |
186 | 186 | ||
187 | nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); | 187 | nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); |
188 | if (!nest_helper) | 188 | if (!nest_helper) |
189 | goto nla_put_failure; | 189 | goto nla_put_failure; |
190 | NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name); | 190 | NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name); |
191 | 191 | ||
192 | if (helper->to_nlattr) | 192 | if (helper->to_nlattr) |
193 | helper->to_nlattr(skb, ct); | 193 | helper->to_nlattr(skb, ct); |
194 | 194 | ||
195 | nla_nest_end(skb, nest_helper); | 195 | nla_nest_end(skb, nest_helper); |
196 | out: | 196 | out: |
197 | return 0; | 197 | return 0; |
198 | 198 | ||
199 | nla_put_failure: | 199 | nla_put_failure: |
200 | return -1; | 200 | return -1; |
201 | } | 201 | } |
202 | 202 | ||
203 | static int | 203 | static int |
204 | ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, | 204 | ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, |
205 | enum ip_conntrack_dir dir) | 205 | enum ip_conntrack_dir dir) |
206 | { | 206 | { |
207 | enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; | 207 | enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; |
208 | struct nlattr *nest_count; | 208 | struct nlattr *nest_count; |
209 | const struct nf_conn_counter *acct; | 209 | const struct nf_conn_counter *acct; |
210 | 210 | ||
211 | acct = nf_conn_acct_find(ct); | 211 | acct = nf_conn_acct_find(ct); |
212 | if (!acct) | 212 | if (!acct) |
213 | return 0; | 213 | return 0; |
214 | 214 | ||
215 | nest_count = nla_nest_start(skb, type | NLA_F_NESTED); | 215 | nest_count = nla_nest_start(skb, type | NLA_F_NESTED); |
216 | if (!nest_count) | 216 | if (!nest_count) |
217 | goto nla_put_failure; | 217 | goto nla_put_failure; |
218 | 218 | ||
219 | NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, | 219 | NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, |
220 | cpu_to_be64(acct[dir].packets)); | 220 | cpu_to_be64(acct[dir].packets)); |
221 | NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, | 221 | NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, |
222 | cpu_to_be64(acct[dir].bytes)); | 222 | cpu_to_be64(acct[dir].bytes)); |
223 | 223 | ||
224 | nla_nest_end(skb, nest_count); | 224 | nla_nest_end(skb, nest_count); |
225 | 225 | ||
226 | return 0; | 226 | return 0; |
227 | 227 | ||
228 | nla_put_failure: | 228 | nla_put_failure: |
229 | return -1; | 229 | return -1; |
230 | } | 230 | } |
231 | 231 | ||
232 | #ifdef CONFIG_NF_CONNTRACK_MARK | 232 | #ifdef CONFIG_NF_CONNTRACK_MARK |
233 | static inline int | 233 | static inline int |
234 | ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) | 234 | ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) |
235 | { | 235 | { |
236 | NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark)); | 236 | NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark)); |
237 | return 0; | 237 | return 0; |
238 | 238 | ||
239 | nla_put_failure: | 239 | nla_put_failure: |
240 | return -1; | 240 | return -1; |
241 | } | 241 | } |
242 | #else | 242 | #else |
243 | #define ctnetlink_dump_mark(a, b) (0) | 243 | #define ctnetlink_dump_mark(a, b) (0) |
244 | #endif | 244 | #endif |
245 | 245 | ||
246 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 246 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
247 | static inline int | 247 | static inline int |
248 | ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct) | 248 | ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct) |
249 | { | 249 | { |
250 | NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark)); | 250 | NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark)); |
251 | return 0; | 251 | return 0; |
252 | 252 | ||
253 | nla_put_failure: | 253 | nla_put_failure: |
254 | return -1; | 254 | return -1; |
255 | } | 255 | } |
256 | #else | 256 | #else |
257 | #define ctnetlink_dump_secmark(a, b) (0) | 257 | #define ctnetlink_dump_secmark(a, b) (0) |
258 | #endif | 258 | #endif |
259 | 259 | ||
260 | #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) | 260 | #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) |
261 | 261 | ||
262 | static inline int | 262 | static inline int |
263 | ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) | 263 | ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) |
264 | { | 264 | { |
265 | struct nlattr *nest_parms; | 265 | struct nlattr *nest_parms; |
266 | 266 | ||
267 | if (!(ct->status & IPS_EXPECTED)) | 267 | if (!(ct->status & IPS_EXPECTED)) |
268 | return 0; | 268 | return 0; |
269 | 269 | ||
270 | nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED); | 270 | nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED); |
271 | if (!nest_parms) | 271 | if (!nest_parms) |
272 | goto nla_put_failure; | 272 | goto nla_put_failure; |
273 | if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) | 273 | if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) |
274 | goto nla_put_failure; | 274 | goto nla_put_failure; |
275 | nla_nest_end(skb, nest_parms); | 275 | nla_nest_end(skb, nest_parms); |
276 | 276 | ||
277 | return 0; | 277 | return 0; |
278 | 278 | ||
279 | nla_put_failure: | 279 | nla_put_failure: |
280 | return -1; | 280 | return -1; |
281 | } | 281 | } |
282 | 282 | ||
283 | #ifdef CONFIG_NF_NAT_NEEDED | 283 | #ifdef CONFIG_NF_NAT_NEEDED |
284 | static int | 284 | static int |
285 | dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) | 285 | dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) |
286 | { | 286 | { |
287 | struct nlattr *nest_parms; | 287 | struct nlattr *nest_parms; |
288 | 288 | ||
289 | nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); | 289 | nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); |
290 | if (!nest_parms) | 290 | if (!nest_parms) |
291 | goto nla_put_failure; | 291 | goto nla_put_failure; |
292 | 292 | ||
293 | NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, | 293 | NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, |
294 | htonl(natseq->correction_pos)); | 294 | htonl(natseq->correction_pos)); |
295 | NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, | 295 | NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, |
296 | htonl(natseq->offset_before)); | 296 | htonl(natseq->offset_before)); |
297 | NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, | 297 | NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, |
298 | htonl(natseq->offset_after)); | 298 | htonl(natseq->offset_after)); |
299 | 299 | ||
300 | nla_nest_end(skb, nest_parms); | 300 | nla_nest_end(skb, nest_parms); |
301 | 301 | ||
302 | return 0; | 302 | return 0; |
303 | 303 | ||
304 | nla_put_failure: | 304 | nla_put_failure: |
305 | return -1; | 305 | return -1; |
306 | } | 306 | } |
307 | 307 | ||
308 | static inline int | 308 | static inline int |
309 | ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) | 309 | ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) |
310 | { | 310 | { |
311 | struct nf_nat_seq *natseq; | 311 | struct nf_nat_seq *natseq; |
312 | struct nf_conn_nat *nat = nfct_nat(ct); | 312 | struct nf_conn_nat *nat = nfct_nat(ct); |
313 | 313 | ||
314 | if (!(ct->status & IPS_SEQ_ADJUST) || !nat) | 314 | if (!(ct->status & IPS_SEQ_ADJUST) || !nat) |
315 | return 0; | 315 | return 0; |
316 | 316 | ||
317 | natseq = &nat->seq[IP_CT_DIR_ORIGINAL]; | 317 | natseq = &nat->seq[IP_CT_DIR_ORIGINAL]; |
318 | if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1) | 318 | if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1) |
319 | return -1; | 319 | return -1; |
320 | 320 | ||
321 | natseq = &nat->seq[IP_CT_DIR_REPLY]; | 321 | natseq = &nat->seq[IP_CT_DIR_REPLY]; |
322 | if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1) | 322 | if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1) |
323 | return -1; | 323 | return -1; |
324 | 324 | ||
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
327 | #else | 327 | #else |
328 | #define ctnetlink_dump_nat_seq_adj(a, b) (0) | 328 | #define ctnetlink_dump_nat_seq_adj(a, b) (0) |
329 | #endif | 329 | #endif |
330 | 330 | ||
331 | static inline int | 331 | static inline int |
332 | ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) | 332 | ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) |
333 | { | 333 | { |
334 | NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct)); | 334 | NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct)); |
335 | return 0; | 335 | return 0; |
336 | 336 | ||
337 | nla_put_failure: | 337 | nla_put_failure: |
338 | return -1; | 338 | return -1; |
339 | } | 339 | } |
340 | 340 | ||
341 | static inline int | 341 | static inline int |
342 | ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) | 342 | ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) |
343 | { | 343 | { |
344 | NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); | 344 | NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); |
345 | return 0; | 345 | return 0; |
346 | 346 | ||
347 | nla_put_failure: | 347 | nla_put_failure: |
348 | return -1; | 348 | return -1; |
349 | } | 349 | } |
350 | 350 | ||
351 | static int | 351 | static int |
352 | ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | 352 | ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, |
353 | int event, struct nf_conn *ct) | 353 | int event, struct nf_conn *ct) |
354 | { | 354 | { |
355 | struct nlmsghdr *nlh; | 355 | struct nlmsghdr *nlh; |
356 | struct nfgenmsg *nfmsg; | 356 | struct nfgenmsg *nfmsg; |
357 | struct nlattr *nest_parms; | 357 | struct nlattr *nest_parms; |
358 | unsigned int flags = pid ? NLM_F_MULTI : 0; | 358 | unsigned int flags = pid ? NLM_F_MULTI : 0; |
359 | 359 | ||
360 | event |= NFNL_SUBSYS_CTNETLINK << 8; | 360 | event |= NFNL_SUBSYS_CTNETLINK << 8; |
361 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); | 361 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); |
362 | if (nlh == NULL) | 362 | if (nlh == NULL) |
363 | goto nlmsg_failure; | 363 | goto nlmsg_failure; |
364 | 364 | ||
365 | nfmsg = nlmsg_data(nlh); | 365 | nfmsg = nlmsg_data(nlh); |
366 | nfmsg->nfgen_family = nf_ct_l3num(ct); | 366 | nfmsg->nfgen_family = nf_ct_l3num(ct); |
367 | nfmsg->version = NFNETLINK_V0; | 367 | nfmsg->version = NFNETLINK_V0; |
368 | nfmsg->res_id = 0; | 368 | nfmsg->res_id = 0; |
369 | 369 | ||
370 | nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); | 370 | nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); |
371 | if (!nest_parms) | 371 | if (!nest_parms) |
372 | goto nla_put_failure; | 372 | goto nla_put_failure; |
373 | if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) | 373 | if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) |
374 | goto nla_put_failure; | 374 | goto nla_put_failure; |
375 | nla_nest_end(skb, nest_parms); | 375 | nla_nest_end(skb, nest_parms); |
376 | 376 | ||
377 | nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); | 377 | nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); |
378 | if (!nest_parms) | 378 | if (!nest_parms) |
379 | goto nla_put_failure; | 379 | goto nla_put_failure; |
380 | if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) | 380 | if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) |
381 | goto nla_put_failure; | 381 | goto nla_put_failure; |
382 | nla_nest_end(skb, nest_parms); | 382 | nla_nest_end(skb, nest_parms); |
383 | 383 | ||
384 | if (nf_ct_zone(ct)) | 384 | if (nf_ct_zone(ct)) |
385 | NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); | 385 | NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); |
386 | 386 | ||
387 | if (ctnetlink_dump_status(skb, ct) < 0 || | 387 | if (ctnetlink_dump_status(skb, ct) < 0 || |
388 | ctnetlink_dump_timeout(skb, ct) < 0 || | 388 | ctnetlink_dump_timeout(skb, ct) < 0 || |
389 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || | 389 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || |
390 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || | 390 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || |
391 | ctnetlink_dump_protoinfo(skb, ct) < 0 || | 391 | ctnetlink_dump_protoinfo(skb, ct) < 0 || |
392 | ctnetlink_dump_helpinfo(skb, ct) < 0 || | 392 | ctnetlink_dump_helpinfo(skb, ct) < 0 || |
393 | ctnetlink_dump_mark(skb, ct) < 0 || | 393 | ctnetlink_dump_mark(skb, ct) < 0 || |
394 | ctnetlink_dump_secmark(skb, ct) < 0 || | 394 | ctnetlink_dump_secmark(skb, ct) < 0 || |
395 | ctnetlink_dump_id(skb, ct) < 0 || | 395 | ctnetlink_dump_id(skb, ct) < 0 || |
396 | ctnetlink_dump_use(skb, ct) < 0 || | 396 | ctnetlink_dump_use(skb, ct) < 0 || |
397 | ctnetlink_dump_master(skb, ct) < 0 || | 397 | ctnetlink_dump_master(skb, ct) < 0 || |
398 | ctnetlink_dump_nat_seq_adj(skb, ct) < 0) | 398 | ctnetlink_dump_nat_seq_adj(skb, ct) < 0) |
399 | goto nla_put_failure; | 399 | goto nla_put_failure; |
400 | 400 | ||
401 | nlmsg_end(skb, nlh); | 401 | nlmsg_end(skb, nlh); |
402 | return skb->len; | 402 | return skb->len; |
403 | 403 | ||
404 | nlmsg_failure: | 404 | nlmsg_failure: |
405 | nla_put_failure: | 405 | nla_put_failure: |
406 | nlmsg_cancel(skb, nlh); | 406 | nlmsg_cancel(skb, nlh); |
407 | return -1; | 407 | return -1; |
408 | } | 408 | } |
409 | 409 | ||
410 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 410 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
411 | static inline size_t | 411 | static inline size_t |
412 | ctnetlink_proto_size(const struct nf_conn *ct) | 412 | ctnetlink_proto_size(const struct nf_conn *ct) |
413 | { | 413 | { |
414 | struct nf_conntrack_l3proto *l3proto; | 414 | struct nf_conntrack_l3proto *l3proto; |
415 | struct nf_conntrack_l4proto *l4proto; | 415 | struct nf_conntrack_l4proto *l4proto; |
416 | size_t len = 0; | 416 | size_t len = 0; |
417 | 417 | ||
418 | rcu_read_lock(); | 418 | rcu_read_lock(); |
419 | l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); | 419 | l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); |
420 | len += l3proto->nla_size; | 420 | len += l3proto->nla_size; |
421 | 421 | ||
422 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | 422 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
423 | len += l4proto->nla_size; | 423 | len += l4proto->nla_size; |
424 | rcu_read_unlock(); | 424 | rcu_read_unlock(); |
425 | 425 | ||
426 | return len; | 426 | return len; |
427 | } | 427 | } |
428 | 428 | ||
429 | static inline size_t | 429 | static inline size_t |
430 | ctnetlink_counters_size(const struct nf_conn *ct) | 430 | ctnetlink_counters_size(const struct nf_conn *ct) |
431 | { | 431 | { |
432 | if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) | 432 | if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) |
433 | return 0; | 433 | return 0; |
434 | return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ | 434 | return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ |
435 | + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ | 435 | + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ |
436 | + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ | 436 | + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ |
437 | ; | 437 | ; |
438 | } | 438 | } |
439 | 439 | ||
440 | static inline size_t | 440 | static inline size_t |
441 | ctnetlink_nlmsg_size(const struct nf_conn *ct) | 441 | ctnetlink_nlmsg_size(const struct nf_conn *ct) |
442 | { | 442 | { |
443 | return NLMSG_ALIGN(sizeof(struct nfgenmsg)) | 443 | return NLMSG_ALIGN(sizeof(struct nfgenmsg)) |
444 | + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ | 444 | + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ |
445 | + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ | 445 | + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ |
446 | + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ | 446 | + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ |
447 | + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ | 447 | + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ |
448 | + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ | 448 | + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ |
449 | + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ | 449 | + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ |
450 | + ctnetlink_counters_size(ct) | 450 | + ctnetlink_counters_size(ct) |
451 | + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ | 451 | + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ |
452 | + nla_total_size(0) /* CTA_PROTOINFO */ | 452 | + nla_total_size(0) /* CTA_PROTOINFO */ |
453 | + nla_total_size(0) /* CTA_HELP */ | 453 | + nla_total_size(0) /* CTA_HELP */ |
454 | + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ | 454 | + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ |
455 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 455 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
456 | + nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */ | 456 | + nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */ |
457 | #endif | 457 | #endif |
458 | #ifdef CONFIG_NF_NAT_NEEDED | 458 | #ifdef CONFIG_NF_NAT_NEEDED |
459 | + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ | 459 | + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ |
460 | + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ | 460 | + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ |
461 | #endif | 461 | #endif |
462 | #ifdef CONFIG_NF_CONNTRACK_MARK | 462 | #ifdef CONFIG_NF_CONNTRACK_MARK |
463 | + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ | 463 | + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ |
464 | #endif | 464 | #endif |
465 | + ctnetlink_proto_size(ct) | 465 | + ctnetlink_proto_size(ct) |
466 | ; | 466 | ; |
467 | } | 467 | } |
468 | 468 | ||
469 | static int | 469 | static int |
470 | ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) | 470 | ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) |
471 | { | 471 | { |
472 | struct net *net; | 472 | struct net *net; |
473 | struct nlmsghdr *nlh; | 473 | struct nlmsghdr *nlh; |
474 | struct nfgenmsg *nfmsg; | 474 | struct nfgenmsg *nfmsg; |
475 | struct nlattr *nest_parms; | 475 | struct nlattr *nest_parms; |
476 | struct nf_conn *ct = item->ct; | 476 | struct nf_conn *ct = item->ct; |
477 | struct sk_buff *skb; | 477 | struct sk_buff *skb; |
478 | unsigned int type; | 478 | unsigned int type; |
479 | unsigned int flags = 0, group; | 479 | unsigned int flags = 0, group; |
480 | int err; | 480 | int err; |
481 | 481 | ||
482 | /* ignore our fake conntrack entry */ | 482 | /* ignore our fake conntrack entry */ |
483 | if (ct == &nf_conntrack_untracked) | 483 | if (ct == &nf_conntrack_untracked) |
484 | return 0; | 484 | return 0; |
485 | 485 | ||
486 | if (events & (1 << IPCT_DESTROY)) { | 486 | if (events & (1 << IPCT_DESTROY)) { |
487 | type = IPCTNL_MSG_CT_DELETE; | 487 | type = IPCTNL_MSG_CT_DELETE; |
488 | group = NFNLGRP_CONNTRACK_DESTROY; | 488 | group = NFNLGRP_CONNTRACK_DESTROY; |
489 | } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) { | 489 | } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) { |
490 | type = IPCTNL_MSG_CT_NEW; | 490 | type = IPCTNL_MSG_CT_NEW; |
491 | flags = NLM_F_CREATE|NLM_F_EXCL; | 491 | flags = NLM_F_CREATE|NLM_F_EXCL; |
492 | group = NFNLGRP_CONNTRACK_NEW; | 492 | group = NFNLGRP_CONNTRACK_NEW; |
493 | } else if (events) { | 493 | } else if (events) { |
494 | type = IPCTNL_MSG_CT_NEW; | 494 | type = IPCTNL_MSG_CT_NEW; |
495 | group = NFNLGRP_CONNTRACK_UPDATE; | 495 | group = NFNLGRP_CONNTRACK_UPDATE; |
496 | } else | 496 | } else |
497 | return 0; | 497 | return 0; |
498 | 498 | ||
499 | net = nf_ct_net(ct); | 499 | net = nf_ct_net(ct); |
500 | if (!item->report && !nfnetlink_has_listeners(net, group)) | 500 | if (!item->report && !nfnetlink_has_listeners(net, group)) |
501 | return 0; | 501 | return 0; |
502 | 502 | ||
503 | skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); | 503 | skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); |
504 | if (skb == NULL) | 504 | if (skb == NULL) |
505 | goto errout; | 505 | goto errout; |
506 | 506 | ||
507 | type |= NFNL_SUBSYS_CTNETLINK << 8; | 507 | type |= NFNL_SUBSYS_CTNETLINK << 8; |
508 | nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags); | 508 | nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags); |
509 | if (nlh == NULL) | 509 | if (nlh == NULL) |
510 | goto nlmsg_failure; | 510 | goto nlmsg_failure; |
511 | 511 | ||
512 | nfmsg = nlmsg_data(nlh); | 512 | nfmsg = nlmsg_data(nlh); |
513 | nfmsg->nfgen_family = nf_ct_l3num(ct); | 513 | nfmsg->nfgen_family = nf_ct_l3num(ct); |
514 | nfmsg->version = NFNETLINK_V0; | 514 | nfmsg->version = NFNETLINK_V0; |
515 | nfmsg->res_id = 0; | 515 | nfmsg->res_id = 0; |
516 | 516 | ||
517 | rcu_read_lock(); | 517 | rcu_read_lock(); |
518 | nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); | 518 | nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); |
519 | if (!nest_parms) | 519 | if (!nest_parms) |
520 | goto nla_put_failure; | 520 | goto nla_put_failure; |
521 | if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) | 521 | if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) |
522 | goto nla_put_failure; | 522 | goto nla_put_failure; |
523 | nla_nest_end(skb, nest_parms); | 523 | nla_nest_end(skb, nest_parms); |
524 | 524 | ||
525 | nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); | 525 | nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); |
526 | if (!nest_parms) | 526 | if (!nest_parms) |
527 | goto nla_put_failure; | 527 | goto nla_put_failure; |
528 | if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) | 528 | if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) |
529 | goto nla_put_failure; | 529 | goto nla_put_failure; |
530 | nla_nest_end(skb, nest_parms); | 530 | nla_nest_end(skb, nest_parms); |
531 | 531 | ||
532 | if (nf_ct_zone(ct)) | 532 | if (nf_ct_zone(ct)) |
533 | NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); | 533 | NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); |
534 | 534 | ||
535 | if (ctnetlink_dump_id(skb, ct) < 0) | 535 | if (ctnetlink_dump_id(skb, ct) < 0) |
536 | goto nla_put_failure; | 536 | goto nla_put_failure; |
537 | 537 | ||
538 | if (ctnetlink_dump_status(skb, ct) < 0) | 538 | if (ctnetlink_dump_status(skb, ct) < 0) |
539 | goto nla_put_failure; | 539 | goto nla_put_failure; |
540 | 540 | ||
541 | if (events & (1 << IPCT_DESTROY)) { | 541 | if (events & (1 << IPCT_DESTROY)) { |
542 | if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || | 542 | if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || |
543 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) | 543 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) |
544 | goto nla_put_failure; | 544 | goto nla_put_failure; |
545 | } else { | 545 | } else { |
546 | if (ctnetlink_dump_timeout(skb, ct) < 0) | 546 | if (ctnetlink_dump_timeout(skb, ct) < 0) |
547 | goto nla_put_failure; | 547 | goto nla_put_failure; |
548 | 548 | ||
549 | if (events & (1 << IPCT_PROTOINFO) | 549 | if (events & (1 << IPCT_PROTOINFO) |
550 | && ctnetlink_dump_protoinfo(skb, ct) < 0) | 550 | && ctnetlink_dump_protoinfo(skb, ct) < 0) |
551 | goto nla_put_failure; | 551 | goto nla_put_failure; |
552 | 552 | ||
553 | if ((events & (1 << IPCT_HELPER) || nfct_help(ct)) | 553 | if ((events & (1 << IPCT_HELPER) || nfct_help(ct)) |
554 | && ctnetlink_dump_helpinfo(skb, ct) < 0) | 554 | && ctnetlink_dump_helpinfo(skb, ct) < 0) |
555 | goto nla_put_failure; | 555 | goto nla_put_failure; |
556 | 556 | ||
557 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 557 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
558 | if ((events & (1 << IPCT_SECMARK) || ct->secmark) | 558 | if ((events & (1 << IPCT_SECMARK) || ct->secmark) |
559 | && ctnetlink_dump_secmark(skb, ct) < 0) | 559 | && ctnetlink_dump_secmark(skb, ct) < 0) |
560 | goto nla_put_failure; | 560 | goto nla_put_failure; |
561 | #endif | 561 | #endif |
562 | 562 | ||
563 | if (events & (1 << IPCT_RELATED) && | 563 | if (events & (1 << IPCT_RELATED) && |
564 | ctnetlink_dump_master(skb, ct) < 0) | 564 | ctnetlink_dump_master(skb, ct) < 0) |
565 | goto nla_put_failure; | 565 | goto nla_put_failure; |
566 | 566 | ||
567 | if (events & (1 << IPCT_NATSEQADJ) && | 567 | if (events & (1 << IPCT_NATSEQADJ) && |
568 | ctnetlink_dump_nat_seq_adj(skb, ct) < 0) | 568 | ctnetlink_dump_nat_seq_adj(skb, ct) < 0) |
569 | goto nla_put_failure; | 569 | goto nla_put_failure; |
570 | } | 570 | } |
571 | 571 | ||
572 | #ifdef CONFIG_NF_CONNTRACK_MARK | 572 | #ifdef CONFIG_NF_CONNTRACK_MARK |
573 | if ((events & (1 << IPCT_MARK) || ct->mark) | 573 | if ((events & (1 << IPCT_MARK) || ct->mark) |
574 | && ctnetlink_dump_mark(skb, ct) < 0) | 574 | && ctnetlink_dump_mark(skb, ct) < 0) |
575 | goto nla_put_failure; | 575 | goto nla_put_failure; |
576 | #endif | 576 | #endif |
577 | rcu_read_unlock(); | 577 | rcu_read_unlock(); |
578 | 578 | ||
579 | nlmsg_end(skb, nlh); | 579 | nlmsg_end(skb, nlh); |
580 | err = nfnetlink_send(skb, net, item->pid, group, item->report, | 580 | err = nfnetlink_send(skb, net, item->pid, group, item->report, |
581 | GFP_ATOMIC); | 581 | GFP_ATOMIC); |
582 | if (err == -ENOBUFS || err == -EAGAIN) | 582 | if (err == -ENOBUFS || err == -EAGAIN) |
583 | return -ENOBUFS; | 583 | return -ENOBUFS; |
584 | 584 | ||
585 | return 0; | 585 | return 0; |
586 | 586 | ||
587 | nla_put_failure: | 587 | nla_put_failure: |
588 | rcu_read_unlock(); | 588 | rcu_read_unlock(); |
589 | nlmsg_cancel(skb, nlh); | 589 | nlmsg_cancel(skb, nlh); |
590 | nlmsg_failure: | 590 | nlmsg_failure: |
591 | kfree_skb(skb); | 591 | kfree_skb(skb); |
592 | errout: | 592 | errout: |
593 | if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) | 593 | if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) |
594 | return -ENOBUFS; | 594 | return -ENOBUFS; |
595 | 595 | ||
596 | return 0; | 596 | return 0; |
597 | } | 597 | } |
598 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ | 598 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ |
599 | 599 | ||
600 | static int ctnetlink_done(struct netlink_callback *cb) | 600 | static int ctnetlink_done(struct netlink_callback *cb) |
601 | { | 601 | { |
602 | if (cb->args[1]) | 602 | if (cb->args[1]) |
603 | nf_ct_put((struct nf_conn *)cb->args[1]); | 603 | nf_ct_put((struct nf_conn *)cb->args[1]); |
604 | return 0; | 604 | return 0; |
605 | } | 605 | } |
606 | 606 | ||
607 | static int | 607 | static int |
608 | ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | 608 | ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) |
609 | { | 609 | { |
610 | struct net *net = sock_net(skb->sk); | 610 | struct net *net = sock_net(skb->sk); |
611 | struct nf_conn *ct, *last; | 611 | struct nf_conn *ct, *last; |
612 | struct nf_conntrack_tuple_hash *h; | 612 | struct nf_conntrack_tuple_hash *h; |
613 | struct hlist_nulls_node *n; | 613 | struct hlist_nulls_node *n; |
614 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); | 614 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); |
615 | u_int8_t l3proto = nfmsg->nfgen_family; | 615 | u_int8_t l3proto = nfmsg->nfgen_family; |
616 | 616 | ||
617 | rcu_read_lock(); | 617 | rcu_read_lock(); |
618 | last = (struct nf_conn *)cb->args[1]; | 618 | last = (struct nf_conn *)cb->args[1]; |
619 | for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { | 619 | for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { |
620 | restart: | 620 | restart: |
621 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], | 621 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], |
622 | hnnode) { | 622 | hnnode) { |
623 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) | 623 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) |
624 | continue; | 624 | continue; |
625 | ct = nf_ct_tuplehash_to_ctrack(h); | 625 | ct = nf_ct_tuplehash_to_ctrack(h); |
626 | if (!atomic_inc_not_zero(&ct->ct_general.use)) | 626 | if (!atomic_inc_not_zero(&ct->ct_general.use)) |
627 | continue; | 627 | continue; |
628 | /* Dump entries of a given L3 protocol number. | 628 | /* Dump entries of a given L3 protocol number. |
629 | * If it is not specified, ie. l3proto == 0, | 629 | * If it is not specified, ie. l3proto == 0, |
630 | * then dump everything. */ | 630 | * then dump everything. */ |
631 | if (l3proto && nf_ct_l3num(ct) != l3proto) | 631 | if (l3proto && nf_ct_l3num(ct) != l3proto) |
632 | goto releasect; | 632 | goto releasect; |
633 | if (cb->args[1]) { | 633 | if (cb->args[1]) { |
634 | if (ct != last) | 634 | if (ct != last) |
635 | goto releasect; | 635 | goto releasect; |
636 | cb->args[1] = 0; | 636 | cb->args[1] = 0; |
637 | } | 637 | } |
638 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, | 638 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, |
639 | cb->nlh->nlmsg_seq, | 639 | cb->nlh->nlmsg_seq, |
640 | IPCTNL_MSG_CT_NEW, ct) < 0) { | 640 | IPCTNL_MSG_CT_NEW, ct) < 0) { |
641 | cb->args[1] = (unsigned long)ct; | 641 | cb->args[1] = (unsigned long)ct; |
642 | goto out; | 642 | goto out; |
643 | } | 643 | } |
644 | 644 | ||
645 | if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == | 645 | if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == |
646 | IPCTNL_MSG_CT_GET_CTRZERO) { | 646 | IPCTNL_MSG_CT_GET_CTRZERO) { |
647 | struct nf_conn_counter *acct; | 647 | struct nf_conn_counter *acct; |
648 | 648 | ||
649 | acct = nf_conn_acct_find(ct); | 649 | acct = nf_conn_acct_find(ct); |
650 | if (acct) | 650 | if (acct) |
651 | memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); | 651 | memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); |
652 | } | 652 | } |
653 | releasect: | 653 | releasect: |
654 | nf_ct_put(ct); | 654 | nf_ct_put(ct); |
655 | } | 655 | } |
656 | if (cb->args[1]) { | 656 | if (cb->args[1]) { |
657 | cb->args[1] = 0; | 657 | cb->args[1] = 0; |
658 | goto restart; | 658 | goto restart; |
659 | } | 659 | } |
660 | } | 660 | } |
661 | out: | 661 | out: |
662 | rcu_read_unlock(); | 662 | rcu_read_unlock(); |
663 | if (last) | 663 | if (last) |
664 | nf_ct_put(last); | 664 | nf_ct_put(last); |
665 | 665 | ||
666 | return skb->len; | 666 | return skb->len; |
667 | } | 667 | } |
668 | 668 | ||
669 | static inline int | 669 | static inline int |
670 | ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple) | 670 | ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple) |
671 | { | 671 | { |
672 | struct nlattr *tb[CTA_IP_MAX+1]; | 672 | struct nlattr *tb[CTA_IP_MAX+1]; |
673 | struct nf_conntrack_l3proto *l3proto; | 673 | struct nf_conntrack_l3proto *l3proto; |
674 | int ret = 0; | 674 | int ret = 0; |
675 | 675 | ||
676 | nla_parse_nested(tb, CTA_IP_MAX, attr, NULL); | 676 | nla_parse_nested(tb, CTA_IP_MAX, attr, NULL); |
677 | 677 | ||
678 | rcu_read_lock(); | 678 | rcu_read_lock(); |
679 | l3proto = __nf_ct_l3proto_find(tuple->src.l3num); | 679 | l3proto = __nf_ct_l3proto_find(tuple->src.l3num); |
680 | 680 | ||
681 | if (likely(l3proto->nlattr_to_tuple)) { | 681 | if (likely(l3proto->nlattr_to_tuple)) { |
682 | ret = nla_validate_nested(attr, CTA_IP_MAX, | 682 | ret = nla_validate_nested(attr, CTA_IP_MAX, |
683 | l3proto->nla_policy); | 683 | l3proto->nla_policy); |
684 | if (ret == 0) | 684 | if (ret == 0) |
685 | ret = l3proto->nlattr_to_tuple(tb, tuple); | 685 | ret = l3proto->nlattr_to_tuple(tb, tuple); |
686 | } | 686 | } |
687 | 687 | ||
688 | rcu_read_unlock(); | 688 | rcu_read_unlock(); |
689 | 689 | ||
690 | return ret; | 690 | return ret; |
691 | } | 691 | } |
692 | 692 | ||
693 | static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { | 693 | static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { |
694 | [CTA_PROTO_NUM] = { .type = NLA_U8 }, | 694 | [CTA_PROTO_NUM] = { .type = NLA_U8 }, |
695 | }; | 695 | }; |
696 | 696 | ||
697 | static inline int | 697 | static inline int |
698 | ctnetlink_parse_tuple_proto(struct nlattr *attr, | 698 | ctnetlink_parse_tuple_proto(struct nlattr *attr, |
699 | struct nf_conntrack_tuple *tuple) | 699 | struct nf_conntrack_tuple *tuple) |
700 | { | 700 | { |
701 | struct nlattr *tb[CTA_PROTO_MAX+1]; | 701 | struct nlattr *tb[CTA_PROTO_MAX+1]; |
702 | struct nf_conntrack_l4proto *l4proto; | 702 | struct nf_conntrack_l4proto *l4proto; |
703 | int ret = 0; | 703 | int ret = 0; |
704 | 704 | ||
705 | ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy); | 705 | ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy); |
706 | if (ret < 0) | 706 | if (ret < 0) |
707 | return ret; | 707 | return ret; |
708 | 708 | ||
709 | if (!tb[CTA_PROTO_NUM]) | 709 | if (!tb[CTA_PROTO_NUM]) |
710 | return -EINVAL; | 710 | return -EINVAL; |
711 | tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); | 711 | tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); |
712 | 712 | ||
713 | rcu_read_lock(); | 713 | rcu_read_lock(); |
714 | l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); | 714 | l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); |
715 | 715 | ||
716 | if (likely(l4proto->nlattr_to_tuple)) { | 716 | if (likely(l4proto->nlattr_to_tuple)) { |
717 | ret = nla_validate_nested(attr, CTA_PROTO_MAX, | 717 | ret = nla_validate_nested(attr, CTA_PROTO_MAX, |
718 | l4proto->nla_policy); | 718 | l4proto->nla_policy); |
719 | if (ret == 0) | 719 | if (ret == 0) |
720 | ret = l4proto->nlattr_to_tuple(tb, tuple); | 720 | ret = l4proto->nlattr_to_tuple(tb, tuple); |
721 | } | 721 | } |
722 | 722 | ||
723 | rcu_read_unlock(); | 723 | rcu_read_unlock(); |
724 | 724 | ||
725 | return ret; | 725 | return ret; |
726 | } | 726 | } |
727 | 727 | ||
728 | static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { | 728 | static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { |
729 | [CTA_TUPLE_IP] = { .type = NLA_NESTED }, | 729 | [CTA_TUPLE_IP] = { .type = NLA_NESTED }, |
730 | [CTA_TUPLE_PROTO] = { .type = NLA_NESTED }, | 730 | [CTA_TUPLE_PROTO] = { .type = NLA_NESTED }, |
731 | }; | 731 | }; |
732 | 732 | ||
733 | static int | 733 | static int |
734 | ctnetlink_parse_tuple(const struct nlattr * const cda[], | 734 | ctnetlink_parse_tuple(const struct nlattr * const cda[], |
735 | struct nf_conntrack_tuple *tuple, | 735 | struct nf_conntrack_tuple *tuple, |
736 | enum ctattr_tuple type, u_int8_t l3num) | 736 | enum ctattr_tuple type, u_int8_t l3num) |
737 | { | 737 | { |
738 | struct nlattr *tb[CTA_TUPLE_MAX+1]; | 738 | struct nlattr *tb[CTA_TUPLE_MAX+1]; |
739 | int err; | 739 | int err; |
740 | 740 | ||
741 | memset(tuple, 0, sizeof(*tuple)); | 741 | memset(tuple, 0, sizeof(*tuple)); |
742 | 742 | ||
743 | nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy); | 743 | nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy); |
744 | 744 | ||
745 | if (!tb[CTA_TUPLE_IP]) | 745 | if (!tb[CTA_TUPLE_IP]) |
746 | return -EINVAL; | 746 | return -EINVAL; |
747 | 747 | ||
748 | tuple->src.l3num = l3num; | 748 | tuple->src.l3num = l3num; |
749 | 749 | ||
750 | err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); | 750 | err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); |
751 | if (err < 0) | 751 | if (err < 0) |
752 | return err; | 752 | return err; |
753 | 753 | ||
754 | if (!tb[CTA_TUPLE_PROTO]) | 754 | if (!tb[CTA_TUPLE_PROTO]) |
755 | return -EINVAL; | 755 | return -EINVAL; |
756 | 756 | ||
757 | err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple); | 757 | err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple); |
758 | if (err < 0) | 758 | if (err < 0) |
759 | return err; | 759 | return err; |
760 | 760 | ||
761 | /* orig and expect tuples get DIR_ORIGINAL */ | 761 | /* orig and expect tuples get DIR_ORIGINAL */ |
762 | if (type == CTA_TUPLE_REPLY) | 762 | if (type == CTA_TUPLE_REPLY) |
763 | tuple->dst.dir = IP_CT_DIR_REPLY; | 763 | tuple->dst.dir = IP_CT_DIR_REPLY; |
764 | else | 764 | else |
765 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; | 765 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; |
766 | 766 | ||
767 | return 0; | 767 | return 0; |
768 | } | 768 | } |
769 | 769 | ||
770 | static int | 770 | static int |
771 | ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone) | 771 | ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone) |
772 | { | 772 | { |
773 | if (attr) | 773 | if (attr) |
774 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 774 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
775 | *zone = ntohs(nla_get_be16(attr)); | 775 | *zone = ntohs(nla_get_be16(attr)); |
776 | #else | 776 | #else |
777 | return -EOPNOTSUPP; | 777 | return -EOPNOTSUPP; |
778 | #endif | 778 | #endif |
779 | else | 779 | else |
780 | *zone = 0; | 780 | *zone = 0; |
781 | 781 | ||
782 | return 0; | 782 | return 0; |
783 | } | 783 | } |
784 | 784 | ||
785 | static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { | 785 | static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { |
786 | [CTA_HELP_NAME] = { .type = NLA_NUL_STRING }, | 786 | [CTA_HELP_NAME] = { .type = NLA_NUL_STRING }, |
787 | }; | 787 | }; |
788 | 788 | ||
789 | static inline int | 789 | static inline int |
790 | ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) | 790 | ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) |
791 | { | 791 | { |
792 | struct nlattr *tb[CTA_HELP_MAX+1]; | 792 | struct nlattr *tb[CTA_HELP_MAX+1]; |
793 | 793 | ||
794 | nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy); | 794 | nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy); |
795 | 795 | ||
796 | if (!tb[CTA_HELP_NAME]) | 796 | if (!tb[CTA_HELP_NAME]) |
797 | return -EINVAL; | 797 | return -EINVAL; |
798 | 798 | ||
799 | *helper_name = nla_data(tb[CTA_HELP_NAME]); | 799 | *helper_name = nla_data(tb[CTA_HELP_NAME]); |
800 | 800 | ||
801 | return 0; | 801 | return 0; |
802 | } | 802 | } |
803 | 803 | ||
804 | static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { | 804 | static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { |
805 | [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, | 805 | [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, |
806 | [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, | 806 | [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, |
807 | [CTA_STATUS] = { .type = NLA_U32 }, | 807 | [CTA_STATUS] = { .type = NLA_U32 }, |
808 | [CTA_PROTOINFO] = { .type = NLA_NESTED }, | 808 | [CTA_PROTOINFO] = { .type = NLA_NESTED }, |
809 | [CTA_HELP] = { .type = NLA_NESTED }, | 809 | [CTA_HELP] = { .type = NLA_NESTED }, |
810 | [CTA_NAT_SRC] = { .type = NLA_NESTED }, | 810 | [CTA_NAT_SRC] = { .type = NLA_NESTED }, |
811 | [CTA_TIMEOUT] = { .type = NLA_U32 }, | 811 | [CTA_TIMEOUT] = { .type = NLA_U32 }, |
812 | [CTA_MARK] = { .type = NLA_U32 }, | 812 | [CTA_MARK] = { .type = NLA_U32 }, |
813 | [CTA_ID] = { .type = NLA_U32 }, | 813 | [CTA_ID] = { .type = NLA_U32 }, |
814 | [CTA_NAT_DST] = { .type = NLA_NESTED }, | 814 | [CTA_NAT_DST] = { .type = NLA_NESTED }, |
815 | [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, | 815 | [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, |
816 | [CTA_ZONE] = { .type = NLA_U16 }, | 816 | [CTA_ZONE] = { .type = NLA_U16 }, |
817 | }; | 817 | }; |
818 | 818 | ||
819 | static int | 819 | static int |
820 | ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | 820 | ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, |
821 | const struct nlmsghdr *nlh, | 821 | const struct nlmsghdr *nlh, |
822 | const struct nlattr * const cda[]) | 822 | const struct nlattr * const cda[]) |
823 | { | 823 | { |
824 | struct net *net = sock_net(ctnl); | 824 | struct net *net = sock_net(ctnl); |
825 | struct nf_conntrack_tuple_hash *h; | 825 | struct nf_conntrack_tuple_hash *h; |
826 | struct nf_conntrack_tuple tuple; | 826 | struct nf_conntrack_tuple tuple; |
827 | struct nf_conn *ct; | 827 | struct nf_conn *ct; |
828 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 828 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
829 | u_int8_t u3 = nfmsg->nfgen_family; | 829 | u_int8_t u3 = nfmsg->nfgen_family; |
830 | u16 zone; | 830 | u16 zone; |
831 | int err; | 831 | int err; |
832 | 832 | ||
833 | err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); | 833 | err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); |
834 | if (err < 0) | 834 | if (err < 0) |
835 | return err; | 835 | return err; |
836 | 836 | ||
837 | if (cda[CTA_TUPLE_ORIG]) | 837 | if (cda[CTA_TUPLE_ORIG]) |
838 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); | 838 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); |
839 | else if (cda[CTA_TUPLE_REPLY]) | 839 | else if (cda[CTA_TUPLE_REPLY]) |
840 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); | 840 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); |
841 | else { | 841 | else { |
842 | /* Flush the whole table */ | 842 | /* Flush the whole table */ |
843 | nf_conntrack_flush_report(net, | 843 | nf_conntrack_flush_report(net, |
844 | NETLINK_CB(skb).pid, | 844 | NETLINK_CB(skb).pid, |
845 | nlmsg_report(nlh)); | 845 | nlmsg_report(nlh)); |
846 | return 0; | 846 | return 0; |
847 | } | 847 | } |
848 | 848 | ||
849 | if (err < 0) | 849 | if (err < 0) |
850 | return err; | 850 | return err; |
851 | 851 | ||
852 | h = nf_conntrack_find_get(net, zone, &tuple); | 852 | h = nf_conntrack_find_get(net, zone, &tuple); |
853 | if (!h) | 853 | if (!h) |
854 | return -ENOENT; | 854 | return -ENOENT; |
855 | 855 | ||
856 | ct = nf_ct_tuplehash_to_ctrack(h); | 856 | ct = nf_ct_tuplehash_to_ctrack(h); |
857 | 857 | ||
858 | if (cda[CTA_ID]) { | 858 | if (cda[CTA_ID]) { |
859 | u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID])); | 859 | u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID])); |
860 | if (id != (u32)(unsigned long)ct) { | 860 | if (id != (u32)(unsigned long)ct) { |
861 | nf_ct_put(ct); | 861 | nf_ct_put(ct); |
862 | return -ENOENT; | 862 | return -ENOENT; |
863 | } | 863 | } |
864 | } | 864 | } |
865 | 865 | ||
866 | if (nf_conntrack_event_report(IPCT_DESTROY, ct, | 866 | if (nf_conntrack_event_report(IPCT_DESTROY, ct, |
867 | NETLINK_CB(skb).pid, | 867 | NETLINK_CB(skb).pid, |
868 | nlmsg_report(nlh)) < 0) { | 868 | nlmsg_report(nlh)) < 0) { |
869 | nf_ct_delete_from_lists(ct); | 869 | nf_ct_delete_from_lists(ct); |
870 | /* we failed to report the event, try later */ | 870 | /* we failed to report the event, try later */ |
871 | nf_ct_insert_dying_list(ct); | 871 | nf_ct_insert_dying_list(ct); |
872 | nf_ct_put(ct); | 872 | nf_ct_put(ct); |
873 | return 0; | 873 | return 0; |
874 | } | 874 | } |
875 | 875 | ||
876 | /* death_by_timeout would report the event again */ | 876 | /* death_by_timeout would report the event again */ |
877 | set_bit(IPS_DYING_BIT, &ct->status); | 877 | set_bit(IPS_DYING_BIT, &ct->status); |
878 | 878 | ||
879 | nf_ct_kill(ct); | 879 | nf_ct_kill(ct); |
880 | nf_ct_put(ct); | 880 | nf_ct_put(ct); |
881 | 881 | ||
882 | return 0; | 882 | return 0; |
883 | } | 883 | } |
884 | 884 | ||
885 | static int | 885 | static int |
886 | ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | 886 | ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, |
887 | const struct nlmsghdr *nlh, | 887 | const struct nlmsghdr *nlh, |
888 | const struct nlattr * const cda[]) | 888 | const struct nlattr * const cda[]) |
889 | { | 889 | { |
890 | struct net *net = sock_net(ctnl); | 890 | struct net *net = sock_net(ctnl); |
891 | struct nf_conntrack_tuple_hash *h; | 891 | struct nf_conntrack_tuple_hash *h; |
892 | struct nf_conntrack_tuple tuple; | 892 | struct nf_conntrack_tuple tuple; |
893 | struct nf_conn *ct; | 893 | struct nf_conn *ct; |
894 | struct sk_buff *skb2 = NULL; | 894 | struct sk_buff *skb2 = NULL; |
895 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 895 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
896 | u_int8_t u3 = nfmsg->nfgen_family; | 896 | u_int8_t u3 = nfmsg->nfgen_family; |
897 | u16 zone; | 897 | u16 zone; |
898 | int err; | 898 | int err; |
899 | 899 | ||
900 | if (nlh->nlmsg_flags & NLM_F_DUMP) | 900 | if (nlh->nlmsg_flags & NLM_F_DUMP) |
901 | return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, | 901 | return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, |
902 | ctnetlink_done); | 902 | ctnetlink_done); |
903 | 903 | ||
904 | err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); | 904 | err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); |
905 | if (err < 0) | 905 | if (err < 0) |
906 | return err; | 906 | return err; |
907 | 907 | ||
908 | if (cda[CTA_TUPLE_ORIG]) | 908 | if (cda[CTA_TUPLE_ORIG]) |
909 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); | 909 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); |
910 | else if (cda[CTA_TUPLE_REPLY]) | 910 | else if (cda[CTA_TUPLE_REPLY]) |
911 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); | 911 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); |
912 | else | 912 | else |
913 | return -EINVAL; | 913 | return -EINVAL; |
914 | 914 | ||
915 | if (err < 0) | 915 | if (err < 0) |
916 | return err; | 916 | return err; |
917 | 917 | ||
918 | h = nf_conntrack_find_get(net, zone, &tuple); | 918 | h = nf_conntrack_find_get(net, zone, &tuple); |
919 | if (!h) | 919 | if (!h) |
920 | return -ENOENT; | 920 | return -ENOENT; |
921 | 921 | ||
922 | ct = nf_ct_tuplehash_to_ctrack(h); | 922 | ct = nf_ct_tuplehash_to_ctrack(h); |
923 | 923 | ||
924 | err = -ENOMEM; | 924 | err = -ENOMEM; |
925 | skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 925 | skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
926 | if (skb2 == NULL) { | 926 | if (skb2 == NULL) { |
927 | nf_ct_put(ct); | 927 | nf_ct_put(ct); |
928 | return -ENOMEM; | 928 | return -ENOMEM; |
929 | } | 929 | } |
930 | 930 | ||
931 | rcu_read_lock(); | 931 | rcu_read_lock(); |
932 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, | 932 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, |
933 | IPCTNL_MSG_CT_NEW, ct); | 933 | IPCTNL_MSG_CT_NEW, ct); |
934 | rcu_read_unlock(); | 934 | rcu_read_unlock(); |
935 | nf_ct_put(ct); | 935 | nf_ct_put(ct); |
936 | if (err <= 0) | 936 | if (err <= 0) |
937 | goto free; | 937 | goto free; |
938 | 938 | ||
939 | err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); | 939 | err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); |
940 | if (err < 0) | 940 | if (err < 0) |
941 | goto out; | 941 | goto out; |
942 | 942 | ||
943 | return 0; | 943 | return 0; |
944 | 944 | ||
945 | free: | 945 | free: |
946 | kfree_skb(skb2); | 946 | kfree_skb(skb2); |
947 | out: | 947 | out: |
948 | return err; | 948 | return err; |
949 | } | 949 | } |
950 | 950 | ||
951 | #ifdef CONFIG_NF_NAT_NEEDED | 951 | #ifdef CONFIG_NF_NAT_NEEDED |
952 | static int | 952 | static int |
953 | ctnetlink_parse_nat_setup(struct nf_conn *ct, | 953 | ctnetlink_parse_nat_setup(struct nf_conn *ct, |
954 | enum nf_nat_manip_type manip, | 954 | enum nf_nat_manip_type manip, |
955 | const struct nlattr *attr) | 955 | const struct nlattr *attr) |
956 | { | 956 | { |
957 | typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup; | 957 | typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup; |
958 | 958 | ||
959 | parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook); | 959 | parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook); |
960 | if (!parse_nat_setup) { | 960 | if (!parse_nat_setup) { |
961 | #ifdef CONFIG_MODULES | 961 | #ifdef CONFIG_MODULES |
962 | rcu_read_unlock(); | 962 | rcu_read_unlock(); |
963 | spin_unlock_bh(&nf_conntrack_lock); | 963 | spin_unlock_bh(&nf_conntrack_lock); |
964 | nfnl_unlock(); | 964 | nfnl_unlock(); |
965 | if (request_module("nf-nat-ipv4") < 0) { | 965 | if (request_module("nf-nat-ipv4") < 0) { |
966 | nfnl_lock(); | 966 | nfnl_lock(); |
967 | spin_lock_bh(&nf_conntrack_lock); | 967 | spin_lock_bh(&nf_conntrack_lock); |
968 | rcu_read_lock(); | 968 | rcu_read_lock(); |
969 | return -EOPNOTSUPP; | 969 | return -EOPNOTSUPP; |
970 | } | 970 | } |
971 | nfnl_lock(); | 971 | nfnl_lock(); |
972 | spin_lock_bh(&nf_conntrack_lock); | 972 | spin_lock_bh(&nf_conntrack_lock); |
973 | rcu_read_lock(); | 973 | rcu_read_lock(); |
974 | if (nfnetlink_parse_nat_setup_hook) | 974 | if (nfnetlink_parse_nat_setup_hook) |
975 | return -EAGAIN; | 975 | return -EAGAIN; |
976 | #endif | 976 | #endif |
977 | return -EOPNOTSUPP; | 977 | return -EOPNOTSUPP; |
978 | } | 978 | } |
979 | 979 | ||
980 | return parse_nat_setup(ct, manip, attr); | 980 | return parse_nat_setup(ct, manip, attr); |
981 | } | 981 | } |
982 | #endif | 982 | #endif |
983 | 983 | ||
984 | static int | 984 | static int |
985 | ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) | 985 | ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) |
986 | { | 986 | { |
987 | unsigned long d; | 987 | unsigned long d; |
988 | unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); | 988 | unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); |
989 | d = ct->status ^ status; | 989 | d = ct->status ^ status; |
990 | 990 | ||
991 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) | 991 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) |
992 | /* unchangeable */ | 992 | /* unchangeable */ |
993 | return -EBUSY; | 993 | return -EBUSY; |
994 | 994 | ||
995 | if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) | 995 | if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) |
996 | /* SEEN_REPLY bit can only be set */ | 996 | /* SEEN_REPLY bit can only be set */ |
997 | return -EBUSY; | 997 | return -EBUSY; |
998 | 998 | ||
999 | if (d & IPS_ASSURED && !(status & IPS_ASSURED)) | 999 | if (d & IPS_ASSURED && !(status & IPS_ASSURED)) |
1000 | /* ASSURED bit can only be set */ | 1000 | /* ASSURED bit can only be set */ |
1001 | return -EBUSY; | 1001 | return -EBUSY; |
1002 | 1002 | ||
1003 | /* Be careful here, modifying NAT bits can screw up things, | 1003 | /* Be careful here, modifying NAT bits can screw up things, |
1004 | * so don't let users modify them directly if they don't pass | 1004 | * so don't let users modify them directly if they don't pass |
1005 | * nf_nat_range. */ | 1005 | * nf_nat_range. */ |
1006 | ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK); | 1006 | ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK); |
1007 | return 0; | 1007 | return 0; |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | static int | 1010 | static int |
1011 | ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[]) | 1011 | ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[]) |
1012 | { | 1012 | { |
1013 | #ifdef CONFIG_NF_NAT_NEEDED | 1013 | #ifdef CONFIG_NF_NAT_NEEDED |
1014 | int ret; | 1014 | int ret; |
1015 | 1015 | ||
1016 | if (cda[CTA_NAT_DST]) { | 1016 | if (cda[CTA_NAT_DST]) { |
1017 | ret = ctnetlink_parse_nat_setup(ct, | 1017 | ret = ctnetlink_parse_nat_setup(ct, |
1018 | IP_NAT_MANIP_DST, | 1018 | IP_NAT_MANIP_DST, |
1019 | cda[CTA_NAT_DST]); | 1019 | cda[CTA_NAT_DST]); |
1020 | if (ret < 0) | 1020 | if (ret < 0) |
1021 | return ret; | 1021 | return ret; |
1022 | } | 1022 | } |
1023 | if (cda[CTA_NAT_SRC]) { | 1023 | if (cda[CTA_NAT_SRC]) { |
1024 | ret = ctnetlink_parse_nat_setup(ct, | 1024 | ret = ctnetlink_parse_nat_setup(ct, |
1025 | IP_NAT_MANIP_SRC, | 1025 | IP_NAT_MANIP_SRC, |
1026 | cda[CTA_NAT_SRC]); | 1026 | cda[CTA_NAT_SRC]); |
1027 | if (ret < 0) | 1027 | if (ret < 0) |
1028 | return ret; | 1028 | return ret; |
1029 | } | 1029 | } |
1030 | return 0; | 1030 | return 0; |
1031 | #else | 1031 | #else |
1032 | return -EOPNOTSUPP; | 1032 | return -EOPNOTSUPP; |
1033 | #endif | 1033 | #endif |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | static inline int | 1036 | static inline int |
1037 | ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) | 1037 | ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) |
1038 | { | 1038 | { |
1039 | struct nf_conntrack_helper *helper; | 1039 | struct nf_conntrack_helper *helper; |
1040 | struct nf_conn_help *help = nfct_help(ct); | 1040 | struct nf_conn_help *help = nfct_help(ct); |
1041 | char *helpname = NULL; | 1041 | char *helpname = NULL; |
1042 | int err; | 1042 | int err; |
1043 | 1043 | ||
1044 | /* don't change helper of sibling connections */ | 1044 | /* don't change helper of sibling connections */ |
1045 | if (ct->master) | 1045 | if (ct->master) |
1046 | return -EBUSY; | 1046 | return -EBUSY; |
1047 | 1047 | ||
1048 | err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); | 1048 | err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); |
1049 | if (err < 0) | 1049 | if (err < 0) |
1050 | return err; | 1050 | return err; |
1051 | 1051 | ||
1052 | if (!strcmp(helpname, "")) { | 1052 | if (!strcmp(helpname, "")) { |
1053 | if (help && help->helper) { | 1053 | if (help && help->helper) { |
1054 | /* we had a helper before ... */ | 1054 | /* we had a helper before ... */ |
1055 | nf_ct_remove_expectations(ct); | 1055 | nf_ct_remove_expectations(ct); |
1056 | rcu_assign_pointer(help->helper, NULL); | 1056 | rcu_assign_pointer(help->helper, NULL); |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | return 0; | 1059 | return 0; |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), | 1062 | helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), |
1063 | nf_ct_protonum(ct)); | 1063 | nf_ct_protonum(ct)); |
1064 | if (helper == NULL) { | 1064 | if (helper == NULL) { |
1065 | #ifdef CONFIG_MODULES | 1065 | #ifdef CONFIG_MODULES |
1066 | spin_unlock_bh(&nf_conntrack_lock); | 1066 | spin_unlock_bh(&nf_conntrack_lock); |
1067 | 1067 | ||
1068 | if (request_module("nfct-helper-%s", helpname) < 0) { | 1068 | if (request_module("nfct-helper-%s", helpname) < 0) { |
1069 | spin_lock_bh(&nf_conntrack_lock); | 1069 | spin_lock_bh(&nf_conntrack_lock); |
1070 | return -EOPNOTSUPP; | 1070 | return -EOPNOTSUPP; |
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | spin_lock_bh(&nf_conntrack_lock); | 1073 | spin_lock_bh(&nf_conntrack_lock); |
1074 | helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), | 1074 | helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), |
1075 | nf_ct_protonum(ct)); | 1075 | nf_ct_protonum(ct)); |
1076 | if (helper) | 1076 | if (helper) |
1077 | return -EAGAIN; | 1077 | return -EAGAIN; |
1078 | #endif | 1078 | #endif |
1079 | return -EOPNOTSUPP; | 1079 | return -EOPNOTSUPP; |
1080 | } | 1080 | } |
1081 | 1081 | ||
1082 | if (help) { | 1082 | if (help) { |
1083 | if (help->helper == helper) | 1083 | if (help->helper == helper) |
1084 | return 0; | 1084 | return 0; |
1085 | if (help->helper) | 1085 | if (help->helper) |
1086 | return -EBUSY; | 1086 | return -EBUSY; |
1087 | /* need to zero data of old helper */ | 1087 | /* need to zero data of old helper */ |
1088 | memset(&help->help, 0, sizeof(help->help)); | 1088 | memset(&help->help, 0, sizeof(help->help)); |
1089 | } else { | 1089 | } else { |
1090 | /* we cannot set a helper for an existing conntrack */ | 1090 | /* we cannot set a helper for an existing conntrack */ |
1091 | return -EOPNOTSUPP; | 1091 | return -EOPNOTSUPP; |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | rcu_assign_pointer(help->helper, helper); | 1094 | rcu_assign_pointer(help->helper, helper); |
1095 | 1095 | ||
1096 | return 0; | 1096 | return 0; |
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | static inline int | 1099 | static inline int |
1100 | ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[]) | 1100 | ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[]) |
1101 | { | 1101 | { |
1102 | u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); | 1102 | u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); |
1103 | 1103 | ||
1104 | if (!del_timer(&ct->timeout)) | 1104 | if (!del_timer(&ct->timeout)) |
1105 | return -ETIME; | 1105 | return -ETIME; |
1106 | 1106 | ||
1107 | ct->timeout.expires = jiffies + timeout * HZ; | 1107 | ct->timeout.expires = jiffies + timeout * HZ; |
1108 | add_timer(&ct->timeout); | 1108 | add_timer(&ct->timeout); |
1109 | 1109 | ||
1110 | return 0; | 1110 | return 0; |
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { | 1113 | static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { |
1114 | [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED }, | 1114 | [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED }, |
1115 | [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED }, | 1115 | [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED }, |
1116 | [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, | 1116 | [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, |
1117 | }; | 1117 | }; |
1118 | 1118 | ||
1119 | static inline int | 1119 | static inline int |
1120 | ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) | 1120 | ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) |
1121 | { | 1121 | { |
1122 | const struct nlattr *attr = cda[CTA_PROTOINFO]; | 1122 | const struct nlattr *attr = cda[CTA_PROTOINFO]; |
1123 | struct nlattr *tb[CTA_PROTOINFO_MAX+1]; | 1123 | struct nlattr *tb[CTA_PROTOINFO_MAX+1]; |
1124 | struct nf_conntrack_l4proto *l4proto; | 1124 | struct nf_conntrack_l4proto *l4proto; |
1125 | int err = 0; | 1125 | int err = 0; |
1126 | 1126 | ||
1127 | nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy); | 1127 | nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy); |
1128 | 1128 | ||
1129 | rcu_read_lock(); | 1129 | rcu_read_lock(); |
1130 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | 1130 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
1131 | if (l4proto->from_nlattr) | 1131 | if (l4proto->from_nlattr) |
1132 | err = l4proto->from_nlattr(tb, ct); | 1132 | err = l4proto->from_nlattr(tb, ct); |
1133 | rcu_read_unlock(); | 1133 | rcu_read_unlock(); |
1134 | 1134 | ||
1135 | return err; | 1135 | return err; |
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | #ifdef CONFIG_NF_NAT_NEEDED | 1138 | #ifdef CONFIG_NF_NAT_NEEDED |
1139 | static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = { | 1139 | static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = { |
1140 | [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 }, | 1140 | [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 }, |
1141 | [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 }, | 1141 | [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 }, |
1142 | [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 }, | 1142 | [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 }, |
1143 | }; | 1143 | }; |
1144 | 1144 | ||
1145 | static inline int | 1145 | static inline int |
1146 | change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) | 1146 | change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) |
1147 | { | 1147 | { |
1148 | struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; | 1148 | struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; |
1149 | 1149 | ||
1150 | nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy); | 1150 | nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy); |
1151 | 1151 | ||
1152 | if (!cda[CTA_NAT_SEQ_CORRECTION_POS]) | 1152 | if (!cda[CTA_NAT_SEQ_CORRECTION_POS]) |
1153 | return -EINVAL; | 1153 | return -EINVAL; |
1154 | 1154 | ||
1155 | natseq->correction_pos = | 1155 | natseq->correction_pos = |
1156 | ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS])); | 1156 | ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS])); |
1157 | 1157 | ||
1158 | if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE]) | 1158 | if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE]) |
1159 | return -EINVAL; | 1159 | return -EINVAL; |
1160 | 1160 | ||
1161 | natseq->offset_before = | 1161 | natseq->offset_before = |
1162 | ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE])); | 1162 | ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE])); |
1163 | 1163 | ||
1164 | if (!cda[CTA_NAT_SEQ_OFFSET_AFTER]) | 1164 | if (!cda[CTA_NAT_SEQ_OFFSET_AFTER]) |
1165 | return -EINVAL; | 1165 | return -EINVAL; |
1166 | 1166 | ||
1167 | natseq->offset_after = | 1167 | natseq->offset_after = |
1168 | ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER])); | 1168 | ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER])); |
1169 | 1169 | ||
1170 | return 0; | 1170 | return 0; |
1171 | } | 1171 | } |
1172 | 1172 | ||
1173 | static int | 1173 | static int |
1174 | ctnetlink_change_nat_seq_adj(struct nf_conn *ct, | 1174 | ctnetlink_change_nat_seq_adj(struct nf_conn *ct, |
1175 | const struct nlattr * const cda[]) | 1175 | const struct nlattr * const cda[]) |
1176 | { | 1176 | { |
1177 | int ret = 0; | 1177 | int ret = 0; |
1178 | struct nf_conn_nat *nat = nfct_nat(ct); | 1178 | struct nf_conn_nat *nat = nfct_nat(ct); |
1179 | 1179 | ||
1180 | if (!nat) | 1180 | if (!nat) |
1181 | return 0; | 1181 | return 0; |
1182 | 1182 | ||
1183 | if (cda[CTA_NAT_SEQ_ADJ_ORIG]) { | 1183 | if (cda[CTA_NAT_SEQ_ADJ_ORIG]) { |
1184 | ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL], | 1184 | ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL], |
1185 | cda[CTA_NAT_SEQ_ADJ_ORIG]); | 1185 | cda[CTA_NAT_SEQ_ADJ_ORIG]); |
1186 | if (ret < 0) | 1186 | if (ret < 0) |
1187 | return ret; | 1187 | return ret; |
1188 | 1188 | ||
1189 | ct->status |= IPS_SEQ_ADJUST; | 1189 | ct->status |= IPS_SEQ_ADJUST; |
1190 | } | 1190 | } |
1191 | 1191 | ||
1192 | if (cda[CTA_NAT_SEQ_ADJ_REPLY]) { | 1192 | if (cda[CTA_NAT_SEQ_ADJ_REPLY]) { |
1193 | ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY], | 1193 | ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY], |
1194 | cda[CTA_NAT_SEQ_ADJ_REPLY]); | 1194 | cda[CTA_NAT_SEQ_ADJ_REPLY]); |
1195 | if (ret < 0) | 1195 | if (ret < 0) |
1196 | return ret; | 1196 | return ret; |
1197 | 1197 | ||
1198 | ct->status |= IPS_SEQ_ADJUST; | 1198 | ct->status |= IPS_SEQ_ADJUST; |
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | return 0; | 1201 | return 0; |
1202 | } | 1202 | } |
1203 | #endif | 1203 | #endif |
1204 | 1204 | ||
1205 | static int | 1205 | static int |
1206 | ctnetlink_change_conntrack(struct nf_conn *ct, | 1206 | ctnetlink_change_conntrack(struct nf_conn *ct, |
1207 | const struct nlattr * const cda[]) | 1207 | const struct nlattr * const cda[]) |
1208 | { | 1208 | { |
1209 | int err; | 1209 | int err; |
1210 | 1210 | ||
1211 | /* only allow NAT changes and master assignation for new conntracks */ | 1211 | /* only allow NAT changes and master assignation for new conntracks */ |
1212 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER]) | 1212 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER]) |
1213 | return -EOPNOTSUPP; | 1213 | return -EOPNOTSUPP; |
1214 | 1214 | ||
1215 | if (cda[CTA_HELP]) { | 1215 | if (cda[CTA_HELP]) { |
1216 | err = ctnetlink_change_helper(ct, cda); | 1216 | err = ctnetlink_change_helper(ct, cda); |
1217 | if (err < 0) | 1217 | if (err < 0) |
1218 | return err; | 1218 | return err; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | if (cda[CTA_TIMEOUT]) { | 1221 | if (cda[CTA_TIMEOUT]) { |
1222 | err = ctnetlink_change_timeout(ct, cda); | 1222 | err = ctnetlink_change_timeout(ct, cda); |
1223 | if (err < 0) | 1223 | if (err < 0) |
1224 | return err; | 1224 | return err; |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | if (cda[CTA_STATUS]) { | 1227 | if (cda[CTA_STATUS]) { |
1228 | err = ctnetlink_change_status(ct, cda); | 1228 | err = ctnetlink_change_status(ct, cda); |
1229 | if (err < 0) | 1229 | if (err < 0) |
1230 | return err; | 1230 | return err; |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | if (cda[CTA_PROTOINFO]) { | 1233 | if (cda[CTA_PROTOINFO]) { |
1234 | err = ctnetlink_change_protoinfo(ct, cda); | 1234 | err = ctnetlink_change_protoinfo(ct, cda); |
1235 | if (err < 0) | 1235 | if (err < 0) |
1236 | return err; | 1236 | return err; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | #if defined(CONFIG_NF_CONNTRACK_MARK) | 1239 | #if defined(CONFIG_NF_CONNTRACK_MARK) |
1240 | if (cda[CTA_MARK]) | 1240 | if (cda[CTA_MARK]) |
1241 | ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); | 1241 | ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); |
1242 | #endif | 1242 | #endif |
1243 | 1243 | ||
1244 | #ifdef CONFIG_NF_NAT_NEEDED | 1244 | #ifdef CONFIG_NF_NAT_NEEDED |
1245 | if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { | 1245 | if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { |
1246 | err = ctnetlink_change_nat_seq_adj(ct, cda); | 1246 | err = ctnetlink_change_nat_seq_adj(ct, cda); |
1247 | if (err < 0) | 1247 | if (err < 0) |
1248 | return err; | 1248 | return err; |
1249 | } | 1249 | } |
1250 | #endif | 1250 | #endif |
1251 | 1251 | ||
1252 | return 0; | 1252 | return 0; |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | static struct nf_conn * | 1255 | static struct nf_conn * |
1256 | ctnetlink_create_conntrack(struct net *net, u16 zone, | 1256 | ctnetlink_create_conntrack(struct net *net, u16 zone, |
1257 | const struct nlattr * const cda[], | 1257 | const struct nlattr * const cda[], |
1258 | struct nf_conntrack_tuple *otuple, | 1258 | struct nf_conntrack_tuple *otuple, |
1259 | struct nf_conntrack_tuple *rtuple, | 1259 | struct nf_conntrack_tuple *rtuple, |
1260 | u8 u3) | 1260 | u8 u3) |
1261 | { | 1261 | { |
1262 | struct nf_conn *ct; | 1262 | struct nf_conn *ct; |
1263 | int err = -EINVAL; | 1263 | int err = -EINVAL; |
1264 | struct nf_conntrack_helper *helper; | 1264 | struct nf_conntrack_helper *helper; |
1265 | 1265 | ||
1266 | ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); | 1266 | ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); |
1267 | if (IS_ERR(ct)) | 1267 | if (IS_ERR(ct)) |
1268 | return ERR_PTR(-ENOMEM); | 1268 | return ERR_PTR(-ENOMEM); |
1269 | 1269 | ||
1270 | if (!cda[CTA_TIMEOUT]) | 1270 | if (!cda[CTA_TIMEOUT]) |
1271 | goto err1; | 1271 | goto err1; |
1272 | ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); | 1272 | ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); |
1273 | 1273 | ||
1274 | ct->timeout.expires = jiffies + ct->timeout.expires * HZ; | 1274 | ct->timeout.expires = jiffies + ct->timeout.expires * HZ; |
1275 | 1275 | ||
1276 | rcu_read_lock(); | 1276 | rcu_read_lock(); |
1277 | if (cda[CTA_HELP]) { | 1277 | if (cda[CTA_HELP]) { |
1278 | char *helpname = NULL; | 1278 | char *helpname = NULL; |
1279 | 1279 | ||
1280 | err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); | 1280 | err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); |
1281 | if (err < 0) | 1281 | if (err < 0) |
1282 | goto err2; | 1282 | goto err2; |
1283 | 1283 | ||
1284 | helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), | 1284 | helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), |
1285 | nf_ct_protonum(ct)); | 1285 | nf_ct_protonum(ct)); |
1286 | if (helper == NULL) { | 1286 | if (helper == NULL) { |
1287 | rcu_read_unlock(); | 1287 | rcu_read_unlock(); |
1288 | #ifdef CONFIG_MODULES | 1288 | #ifdef CONFIG_MODULES |
1289 | if (request_module("nfct-helper-%s", helpname) < 0) { | 1289 | if (request_module("nfct-helper-%s", helpname) < 0) { |
1290 | err = -EOPNOTSUPP; | 1290 | err = -EOPNOTSUPP; |
1291 | goto err1; | 1291 | goto err1; |
1292 | } | 1292 | } |
1293 | 1293 | ||
1294 | rcu_read_lock(); | 1294 | rcu_read_lock(); |
1295 | helper = __nf_conntrack_helper_find(helpname, | 1295 | helper = __nf_conntrack_helper_find(helpname, |
1296 | nf_ct_l3num(ct), | 1296 | nf_ct_l3num(ct), |
1297 | nf_ct_protonum(ct)); | 1297 | nf_ct_protonum(ct)); |
1298 | if (helper) { | 1298 | if (helper) { |
1299 | err = -EAGAIN; | 1299 | err = -EAGAIN; |
1300 | goto err2; | 1300 | goto err2; |
1301 | } | 1301 | } |
1302 | rcu_read_unlock(); | 1302 | rcu_read_unlock(); |
1303 | #endif | 1303 | #endif |
1304 | err = -EOPNOTSUPP; | 1304 | err = -EOPNOTSUPP; |
1305 | goto err1; | 1305 | goto err1; |
1306 | } else { | 1306 | } else { |
1307 | struct nf_conn_help *help; | 1307 | struct nf_conn_help *help; |
1308 | 1308 | ||
1309 | help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); | 1309 | help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); |
1310 | if (help == NULL) { | 1310 | if (help == NULL) { |
1311 | err = -ENOMEM; | 1311 | err = -ENOMEM; |
1312 | goto err2; | 1312 | goto err2; |
1313 | } | 1313 | } |
1314 | 1314 | ||
1315 | /* not in hash table yet so not strictly necessary */ | 1315 | /* not in hash table yet so not strictly necessary */ |
1316 | rcu_assign_pointer(help->helper, helper); | 1316 | rcu_assign_pointer(help->helper, helper); |
1317 | } | 1317 | } |
1318 | } else { | 1318 | } else { |
1319 | /* try an implicit helper assignation */ | 1319 | /* try an implicit helper assignation */ |
1320 | err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); | 1320 | err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); |
1321 | if (err < 0) | 1321 | if (err < 0) |
1322 | goto err2; | 1322 | goto err2; |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { | 1325 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { |
1326 | err = ctnetlink_change_nat(ct, cda); | 1326 | err = ctnetlink_change_nat(ct, cda); |
1327 | if (err < 0) | 1327 | if (err < 0) |
1328 | goto err2; | 1328 | goto err2; |
1329 | } | 1329 | } |
1330 | 1330 | ||
1331 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); | 1331 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); |
1332 | nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); | 1332 | nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); |
1333 | /* we must add conntrack extensions before confirmation. */ | 1333 | /* we must add conntrack extensions before confirmation. */ |
1334 | ct->status |= IPS_CONFIRMED; | 1334 | ct->status |= IPS_CONFIRMED; |
1335 | 1335 | ||
1336 | if (cda[CTA_STATUS]) { | 1336 | if (cda[CTA_STATUS]) { |
1337 | err = ctnetlink_change_status(ct, cda); | 1337 | err = ctnetlink_change_status(ct, cda); |
1338 | if (err < 0) | 1338 | if (err < 0) |
1339 | goto err2; | 1339 | goto err2; |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | #ifdef CONFIG_NF_NAT_NEEDED | 1342 | #ifdef CONFIG_NF_NAT_NEEDED |
1343 | if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { | 1343 | if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { |
1344 | err = ctnetlink_change_nat_seq_adj(ct, cda); | 1344 | err = ctnetlink_change_nat_seq_adj(ct, cda); |
1345 | if (err < 0) | 1345 | if (err < 0) |
1346 | goto err2; | 1346 | goto err2; |
1347 | } | 1347 | } |
1348 | #endif | 1348 | #endif |
1349 | 1349 | ||
1350 | if (cda[CTA_PROTOINFO]) { | 1350 | if (cda[CTA_PROTOINFO]) { |
1351 | err = ctnetlink_change_protoinfo(ct, cda); | 1351 | err = ctnetlink_change_protoinfo(ct, cda); |
1352 | if (err < 0) | 1352 | if (err < 0) |
1353 | goto err2; | 1353 | goto err2; |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | #if defined(CONFIG_NF_CONNTRACK_MARK) | 1356 | #if defined(CONFIG_NF_CONNTRACK_MARK) |
1357 | if (cda[CTA_MARK]) | 1357 | if (cda[CTA_MARK]) |
1358 | ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); | 1358 | ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); |
1359 | #endif | 1359 | #endif |
1360 | 1360 | ||
1361 | /* setup master conntrack: this is a confirmed expectation */ | 1361 | /* setup master conntrack: this is a confirmed expectation */ |
1362 | if (cda[CTA_TUPLE_MASTER]) { | 1362 | if (cda[CTA_TUPLE_MASTER]) { |
1363 | struct nf_conntrack_tuple master; | 1363 | struct nf_conntrack_tuple master; |
1364 | struct nf_conntrack_tuple_hash *master_h; | 1364 | struct nf_conntrack_tuple_hash *master_h; |
1365 | struct nf_conn *master_ct; | 1365 | struct nf_conn *master_ct; |
1366 | 1366 | ||
1367 | err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3); | 1367 | err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3); |
1368 | if (err < 0) | 1368 | if (err < 0) |
1369 | goto err2; | 1369 | goto err2; |
1370 | 1370 | ||
1371 | master_h = nf_conntrack_find_get(net, zone, &master); | 1371 | master_h = nf_conntrack_find_get(net, zone, &master); |
1372 | if (master_h == NULL) { | 1372 | if (master_h == NULL) { |
1373 | err = -ENOENT; | 1373 | err = -ENOENT; |
1374 | goto err2; | 1374 | goto err2; |
1375 | } | 1375 | } |
1376 | master_ct = nf_ct_tuplehash_to_ctrack(master_h); | 1376 | master_ct = nf_ct_tuplehash_to_ctrack(master_h); |
1377 | __set_bit(IPS_EXPECTED_BIT, &ct->status); | 1377 | __set_bit(IPS_EXPECTED_BIT, &ct->status); |
1378 | ct->master = master_ct; | 1378 | ct->master = master_ct; |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | add_timer(&ct->timeout); | 1381 | add_timer(&ct->timeout); |
1382 | nf_conntrack_hash_insert(ct); | 1382 | nf_conntrack_hash_insert(ct); |
1383 | rcu_read_unlock(); | 1383 | rcu_read_unlock(); |
1384 | 1384 | ||
1385 | return ct; | 1385 | return ct; |
1386 | 1386 | ||
1387 | err2: | 1387 | err2: |
1388 | rcu_read_unlock(); | 1388 | rcu_read_unlock(); |
1389 | err1: | 1389 | err1: |
1390 | nf_conntrack_free(ct); | 1390 | nf_conntrack_free(ct); |
1391 | return ERR_PTR(err); | 1391 | return ERR_PTR(err); |
1392 | } | 1392 | } |
1393 | 1393 | ||
1394 | static int | 1394 | static int |
1395 | ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | 1395 | ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, |
1396 | const struct nlmsghdr *nlh, | 1396 | const struct nlmsghdr *nlh, |
1397 | const struct nlattr * const cda[]) | 1397 | const struct nlattr * const cda[]) |
1398 | { | 1398 | { |
1399 | struct net *net = sock_net(ctnl); | 1399 | struct net *net = sock_net(ctnl); |
1400 | struct nf_conntrack_tuple otuple, rtuple; | 1400 | struct nf_conntrack_tuple otuple, rtuple; |
1401 | struct nf_conntrack_tuple_hash *h = NULL; | 1401 | struct nf_conntrack_tuple_hash *h = NULL; |
1402 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1402 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
1403 | u_int8_t u3 = nfmsg->nfgen_family; | 1403 | u_int8_t u3 = nfmsg->nfgen_family; |
1404 | u16 zone; | 1404 | u16 zone; |
1405 | int err; | 1405 | int err; |
1406 | 1406 | ||
1407 | err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); | 1407 | err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); |
1408 | if (err < 0) | 1408 | if (err < 0) |
1409 | return err; | 1409 | return err; |
1410 | 1410 | ||
1411 | if (cda[CTA_TUPLE_ORIG]) { | 1411 | if (cda[CTA_TUPLE_ORIG]) { |
1412 | err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); | 1412 | err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); |
1413 | if (err < 0) | 1413 | if (err < 0) |
1414 | return err; | 1414 | return err; |
1415 | } | 1415 | } |
1416 | 1416 | ||
1417 | if (cda[CTA_TUPLE_REPLY]) { | 1417 | if (cda[CTA_TUPLE_REPLY]) { |
1418 | err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3); | 1418 | err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3); |
1419 | if (err < 0) | 1419 | if (err < 0) |
1420 | return err; | 1420 | return err; |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | spin_lock_bh(&nf_conntrack_lock); | 1423 | spin_lock_bh(&nf_conntrack_lock); |
1424 | if (cda[CTA_TUPLE_ORIG]) | 1424 | if (cda[CTA_TUPLE_ORIG]) |
1425 | h = __nf_conntrack_find(net, zone, &otuple); | 1425 | h = __nf_conntrack_find(net, zone, &otuple); |
1426 | else if (cda[CTA_TUPLE_REPLY]) | 1426 | else if (cda[CTA_TUPLE_REPLY]) |
1427 | h = __nf_conntrack_find(net, zone, &rtuple); | 1427 | h = __nf_conntrack_find(net, zone, &rtuple); |
1428 | 1428 | ||
1429 | if (h == NULL) { | 1429 | if (h == NULL) { |
1430 | err = -ENOENT; | 1430 | err = -ENOENT; |
1431 | if (nlh->nlmsg_flags & NLM_F_CREATE) { | 1431 | if (nlh->nlmsg_flags & NLM_F_CREATE) { |
1432 | struct nf_conn *ct; | 1432 | struct nf_conn *ct; |
1433 | enum ip_conntrack_events events; | 1433 | enum ip_conntrack_events events; |
1434 | 1434 | ||
1435 | ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, | 1435 | ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, |
1436 | &rtuple, u3); | 1436 | &rtuple, u3); |
1437 | if (IS_ERR(ct)) { | 1437 | if (IS_ERR(ct)) { |
1438 | err = PTR_ERR(ct); | 1438 | err = PTR_ERR(ct); |
1439 | goto out_unlock; | 1439 | goto out_unlock; |
1440 | } | 1440 | } |
1441 | err = 0; | 1441 | err = 0; |
1442 | nf_conntrack_get(&ct->ct_general); | 1442 | nf_conntrack_get(&ct->ct_general); |
1443 | spin_unlock_bh(&nf_conntrack_lock); | 1443 | spin_unlock_bh(&nf_conntrack_lock); |
1444 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) | 1444 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) |
1445 | events = IPCT_RELATED; | 1445 | events = IPCT_RELATED; |
1446 | else | 1446 | else |
1447 | events = IPCT_NEW; | 1447 | events = IPCT_NEW; |
1448 | 1448 | ||
1449 | nf_conntrack_eventmask_report((1 << IPCT_REPLY) | | 1449 | nf_conntrack_eventmask_report((1 << IPCT_REPLY) | |
1450 | (1 << IPCT_ASSURED) | | 1450 | (1 << IPCT_ASSURED) | |
1451 | (1 << IPCT_HELPER) | | 1451 | (1 << IPCT_HELPER) | |
1452 | (1 << IPCT_PROTOINFO) | | 1452 | (1 << IPCT_PROTOINFO) | |
1453 | (1 << IPCT_NATSEQADJ) | | 1453 | (1 << IPCT_NATSEQADJ) | |
1454 | (1 << IPCT_MARK) | events, | 1454 | (1 << IPCT_MARK) | events, |
1455 | ct, NETLINK_CB(skb).pid, | 1455 | ct, NETLINK_CB(skb).pid, |
1456 | nlmsg_report(nlh)); | 1456 | nlmsg_report(nlh)); |
1457 | nf_ct_put(ct); | 1457 | nf_ct_put(ct); |
1458 | } else | 1458 | } else |
1459 | spin_unlock_bh(&nf_conntrack_lock); | 1459 | spin_unlock_bh(&nf_conntrack_lock); |
1460 | 1460 | ||
1461 | return err; | 1461 | return err; |
1462 | } | 1462 | } |
1463 | /* implicit 'else' */ | 1463 | /* implicit 'else' */ |
1464 | 1464 | ||
1465 | /* We manipulate the conntrack inside the global conntrack table lock, | 1465 | /* We manipulate the conntrack inside the global conntrack table lock, |
1466 | * so there's no need to increase the refcount */ | 1466 | * so there's no need to increase the refcount */ |
1467 | err = -EEXIST; | 1467 | err = -EEXIST; |
1468 | if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { | 1468 | if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { |
1469 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); | 1469 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); |
1470 | 1470 | ||
1471 | err = ctnetlink_change_conntrack(ct, cda); | 1471 | err = ctnetlink_change_conntrack(ct, cda); |
1472 | if (err == 0) { | 1472 | if (err == 0) { |
1473 | nf_conntrack_get(&ct->ct_general); | 1473 | nf_conntrack_get(&ct->ct_general); |
1474 | spin_unlock_bh(&nf_conntrack_lock); | 1474 | spin_unlock_bh(&nf_conntrack_lock); |
1475 | nf_conntrack_eventmask_report((1 << IPCT_REPLY) | | 1475 | nf_conntrack_eventmask_report((1 << IPCT_REPLY) | |
1476 | (1 << IPCT_ASSURED) | | 1476 | (1 << IPCT_ASSURED) | |
1477 | (1 << IPCT_HELPER) | | 1477 | (1 << IPCT_HELPER) | |
1478 | (1 << IPCT_PROTOINFO) | | 1478 | (1 << IPCT_PROTOINFO) | |
1479 | (1 << IPCT_NATSEQADJ) | | 1479 | (1 << IPCT_NATSEQADJ) | |
1480 | (1 << IPCT_MARK), | 1480 | (1 << IPCT_MARK), |
1481 | ct, NETLINK_CB(skb).pid, | 1481 | ct, NETLINK_CB(skb).pid, |
1482 | nlmsg_report(nlh)); | 1482 | nlmsg_report(nlh)); |
1483 | nf_ct_put(ct); | 1483 | nf_ct_put(ct); |
1484 | } else | 1484 | } else |
1485 | spin_unlock_bh(&nf_conntrack_lock); | 1485 | spin_unlock_bh(&nf_conntrack_lock); |
1486 | 1486 | ||
1487 | return err; | 1487 | return err; |
1488 | } | 1488 | } |
1489 | 1489 | ||
1490 | out_unlock: | 1490 | out_unlock: |
1491 | spin_unlock_bh(&nf_conntrack_lock); | 1491 | spin_unlock_bh(&nf_conntrack_lock); |
1492 | return err; | 1492 | return err; |
1493 | } | 1493 | } |
1494 | 1494 | ||
1495 | /*********************************************************************** | 1495 | /*********************************************************************** |
1496 | * EXPECT | 1496 | * EXPECT |
1497 | ***********************************************************************/ | 1497 | ***********************************************************************/ |
1498 | 1498 | ||
1499 | static inline int | 1499 | static inline int |
1500 | ctnetlink_exp_dump_tuple(struct sk_buff *skb, | 1500 | ctnetlink_exp_dump_tuple(struct sk_buff *skb, |
1501 | const struct nf_conntrack_tuple *tuple, | 1501 | const struct nf_conntrack_tuple *tuple, |
1502 | enum ctattr_expect type) | 1502 | enum ctattr_expect type) |
1503 | { | 1503 | { |
1504 | struct nlattr *nest_parms; | 1504 | struct nlattr *nest_parms; |
1505 | 1505 | ||
1506 | nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); | 1506 | nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); |
1507 | if (!nest_parms) | 1507 | if (!nest_parms) |
1508 | goto nla_put_failure; | 1508 | goto nla_put_failure; |
1509 | if (ctnetlink_dump_tuples(skb, tuple) < 0) | 1509 | if (ctnetlink_dump_tuples(skb, tuple) < 0) |
1510 | goto nla_put_failure; | 1510 | goto nla_put_failure; |
1511 | nla_nest_end(skb, nest_parms); | 1511 | nla_nest_end(skb, nest_parms); |
1512 | 1512 | ||
1513 | return 0; | 1513 | return 0; |
1514 | 1514 | ||
1515 | nla_put_failure: | 1515 | nla_put_failure: |
1516 | return -1; | 1516 | return -1; |
1517 | } | 1517 | } |
1518 | 1518 | ||
1519 | static inline int | 1519 | static inline int |
1520 | ctnetlink_exp_dump_mask(struct sk_buff *skb, | 1520 | ctnetlink_exp_dump_mask(struct sk_buff *skb, |
1521 | const struct nf_conntrack_tuple *tuple, | 1521 | const struct nf_conntrack_tuple *tuple, |
1522 | const struct nf_conntrack_tuple_mask *mask) | 1522 | const struct nf_conntrack_tuple_mask *mask) |
1523 | { | 1523 | { |
1524 | int ret; | 1524 | int ret; |
1525 | struct nf_conntrack_l3proto *l3proto; | 1525 | struct nf_conntrack_l3proto *l3proto; |
1526 | struct nf_conntrack_l4proto *l4proto; | 1526 | struct nf_conntrack_l4proto *l4proto; |
1527 | struct nf_conntrack_tuple m; | 1527 | struct nf_conntrack_tuple m; |
1528 | struct nlattr *nest_parms; | 1528 | struct nlattr *nest_parms; |
1529 | 1529 | ||
1530 | memset(&m, 0xFF, sizeof(m)); | 1530 | memset(&m, 0xFF, sizeof(m)); |
1531 | memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); | 1531 | memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); |
1532 | m.src.u.all = mask->src.u.all; | 1532 | m.src.u.all = mask->src.u.all; |
1533 | m.dst.protonum = tuple->dst.protonum; | 1533 | m.dst.protonum = tuple->dst.protonum; |
1534 | 1534 | ||
1535 | nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED); | 1535 | nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED); |
1536 | if (!nest_parms) | 1536 | if (!nest_parms) |
1537 | goto nla_put_failure; | 1537 | goto nla_put_failure; |
1538 | 1538 | ||
1539 | l3proto = __nf_ct_l3proto_find(tuple->src.l3num); | 1539 | l3proto = __nf_ct_l3proto_find(tuple->src.l3num); |
1540 | ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto); | 1540 | ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto); |
1541 | 1541 | ||
1542 | if (unlikely(ret < 0)) | 1542 | if (unlikely(ret < 0)) |
1543 | goto nla_put_failure; | 1543 | goto nla_put_failure; |
1544 | 1544 | ||
1545 | l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); | 1545 | l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); |
1546 | ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); | 1546 | ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); |
1547 | if (unlikely(ret < 0)) | 1547 | if (unlikely(ret < 0)) |
1548 | goto nla_put_failure; | 1548 | goto nla_put_failure; |
1549 | 1549 | ||
1550 | nla_nest_end(skb, nest_parms); | 1550 | nla_nest_end(skb, nest_parms); |
1551 | 1551 | ||
1552 | return 0; | 1552 | return 0; |
1553 | 1553 | ||
1554 | nla_put_failure: | 1554 | nla_put_failure: |
1555 | return -1; | 1555 | return -1; |
1556 | } | 1556 | } |
1557 | 1557 | ||
1558 | static int | 1558 | static int |
1559 | ctnetlink_exp_dump_expect(struct sk_buff *skb, | 1559 | ctnetlink_exp_dump_expect(struct sk_buff *skb, |
1560 | const struct nf_conntrack_expect *exp) | 1560 | const struct nf_conntrack_expect *exp) |
1561 | { | 1561 | { |
1562 | struct nf_conn *master = exp->master; | 1562 | struct nf_conn *master = exp->master; |
1563 | struct nf_conntrack_helper *helper; | 1563 | struct nf_conntrack_helper *helper; |
1564 | long timeout = (exp->timeout.expires - jiffies) / HZ; | 1564 | long timeout = (exp->timeout.expires - jiffies) / HZ; |
1565 | 1565 | ||
1566 | if (timeout < 0) | 1566 | if (timeout < 0) |
1567 | timeout = 0; | 1567 | timeout = 0; |
1568 | 1568 | ||
1569 | if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) | 1569 | if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) |
1570 | goto nla_put_failure; | 1570 | goto nla_put_failure; |
1571 | if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) | 1571 | if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) |
1572 | goto nla_put_failure; | 1572 | goto nla_put_failure; |
1573 | if (ctnetlink_exp_dump_tuple(skb, | 1573 | if (ctnetlink_exp_dump_tuple(skb, |
1574 | &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 1574 | &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
1575 | CTA_EXPECT_MASTER) < 0) | 1575 | CTA_EXPECT_MASTER) < 0) |
1576 | goto nla_put_failure; | 1576 | goto nla_put_failure; |
1577 | 1577 | ||
1578 | NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); | 1578 | NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); |
1579 | NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); | 1579 | NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); |
1580 | helper = rcu_dereference(nfct_help(master)->helper); | 1580 | helper = rcu_dereference(nfct_help(master)->helper); |
1581 | if (helper) | 1581 | if (helper) |
1582 | NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); | 1582 | NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); |
1583 | 1583 | ||
1584 | return 0; | 1584 | return 0; |
1585 | 1585 | ||
1586 | nla_put_failure: | 1586 | nla_put_failure: |
1587 | return -1; | 1587 | return -1; |
1588 | } | 1588 | } |
1589 | 1589 | ||
1590 | static int | 1590 | static int |
1591 | ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | 1591 | ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, |
1592 | int event, const struct nf_conntrack_expect *exp) | 1592 | int event, const struct nf_conntrack_expect *exp) |
1593 | { | 1593 | { |
1594 | struct nlmsghdr *nlh; | 1594 | struct nlmsghdr *nlh; |
1595 | struct nfgenmsg *nfmsg; | 1595 | struct nfgenmsg *nfmsg; |
1596 | unsigned int flags = pid ? NLM_F_MULTI : 0; | 1596 | unsigned int flags = pid ? NLM_F_MULTI : 0; |
1597 | 1597 | ||
1598 | event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; | 1598 | event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; |
1599 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); | 1599 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); |
1600 | if (nlh == NULL) | 1600 | if (nlh == NULL) |
1601 | goto nlmsg_failure; | 1601 | goto nlmsg_failure; |
1602 | 1602 | ||
1603 | nfmsg = nlmsg_data(nlh); | 1603 | nfmsg = nlmsg_data(nlh); |
1604 | nfmsg->nfgen_family = exp->tuple.src.l3num; | 1604 | nfmsg->nfgen_family = exp->tuple.src.l3num; |
1605 | nfmsg->version = NFNETLINK_V0; | 1605 | nfmsg->version = NFNETLINK_V0; |
1606 | nfmsg->res_id = 0; | 1606 | nfmsg->res_id = 0; |
1607 | 1607 | ||
1608 | if (ctnetlink_exp_dump_expect(skb, exp) < 0) | 1608 | if (ctnetlink_exp_dump_expect(skb, exp) < 0) |
1609 | goto nla_put_failure; | 1609 | goto nla_put_failure; |
1610 | 1610 | ||
1611 | nlmsg_end(skb, nlh); | 1611 | nlmsg_end(skb, nlh); |
1612 | return skb->len; | 1612 | return skb->len; |
1613 | 1613 | ||
1614 | nlmsg_failure: | 1614 | nlmsg_failure: |
1615 | nla_put_failure: | 1615 | nla_put_failure: |
1616 | nlmsg_cancel(skb, nlh); | 1616 | nlmsg_cancel(skb, nlh); |
1617 | return -1; | 1617 | return -1; |
1618 | } | 1618 | } |
1619 | 1619 | ||
1620 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 1620 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
1621 | static int | 1621 | static int |
1622 | ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) | 1622 | ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) |
1623 | { | 1623 | { |
1624 | struct nf_conntrack_expect *exp = item->exp; | 1624 | struct nf_conntrack_expect *exp = item->exp; |
1625 | struct net *net = nf_ct_exp_net(exp); | 1625 | struct net *net = nf_ct_exp_net(exp); |
1626 | struct nlmsghdr *nlh; | 1626 | struct nlmsghdr *nlh; |
1627 | struct nfgenmsg *nfmsg; | 1627 | struct nfgenmsg *nfmsg; |
1628 | struct sk_buff *skb; | 1628 | struct sk_buff *skb; |
1629 | unsigned int type; | 1629 | unsigned int type; |
1630 | int flags = 0; | 1630 | int flags = 0; |
1631 | 1631 | ||
1632 | if (events & (1 << IPEXP_NEW)) { | 1632 | if (events & (1 << IPEXP_NEW)) { |
1633 | type = IPCTNL_MSG_EXP_NEW; | 1633 | type = IPCTNL_MSG_EXP_NEW; |
1634 | flags = NLM_F_CREATE|NLM_F_EXCL; | 1634 | flags = NLM_F_CREATE|NLM_F_EXCL; |
1635 | } else | 1635 | } else |
1636 | return 0; | 1636 | return 0; |
1637 | 1637 | ||
1638 | if (!item->report && | 1638 | if (!item->report && |
1639 | !nfnetlink_has_listeners(net, NFNLGRP_CONNTRACK_EXP_NEW)) | 1639 | !nfnetlink_has_listeners(net, NFNLGRP_CONNTRACK_EXP_NEW)) |
1640 | return 0; | 1640 | return 0; |
1641 | 1641 | ||
1642 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | 1642 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
1643 | if (skb == NULL) | 1643 | if (skb == NULL) |
1644 | goto errout; | 1644 | goto errout; |
1645 | 1645 | ||
1646 | type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; | 1646 | type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; |
1647 | nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags); | 1647 | nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags); |
1648 | if (nlh == NULL) | 1648 | if (nlh == NULL) |
1649 | goto nlmsg_failure; | 1649 | goto nlmsg_failure; |
1650 | 1650 | ||
1651 | nfmsg = nlmsg_data(nlh); | 1651 | nfmsg = nlmsg_data(nlh); |
1652 | nfmsg->nfgen_family = exp->tuple.src.l3num; | 1652 | nfmsg->nfgen_family = exp->tuple.src.l3num; |
1653 | nfmsg->version = NFNETLINK_V0; | 1653 | nfmsg->version = NFNETLINK_V0; |
1654 | nfmsg->res_id = 0; | 1654 | nfmsg->res_id = 0; |
1655 | 1655 | ||
1656 | rcu_read_lock(); | 1656 | rcu_read_lock(); |
1657 | if (ctnetlink_exp_dump_expect(skb, exp) < 0) | 1657 | if (ctnetlink_exp_dump_expect(skb, exp) < 0) |
1658 | goto nla_put_failure; | 1658 | goto nla_put_failure; |
1659 | rcu_read_unlock(); | 1659 | rcu_read_unlock(); |
1660 | 1660 | ||
1661 | nlmsg_end(skb, nlh); | 1661 | nlmsg_end(skb, nlh); |
1662 | nfnetlink_send(skb, net, item->pid, NFNLGRP_CONNTRACK_EXP_NEW, | 1662 | nfnetlink_send(skb, net, item->pid, NFNLGRP_CONNTRACK_EXP_NEW, |
1663 | item->report, GFP_ATOMIC); | 1663 | item->report, GFP_ATOMIC); |
1664 | return 0; | 1664 | return 0; |
1665 | 1665 | ||
1666 | nla_put_failure: | 1666 | nla_put_failure: |
1667 | rcu_read_unlock(); | 1667 | rcu_read_unlock(); |
1668 | nlmsg_cancel(skb, nlh); | 1668 | nlmsg_cancel(skb, nlh); |
1669 | nlmsg_failure: | 1669 | nlmsg_failure: |
1670 | kfree_skb(skb); | 1670 | kfree_skb(skb); |
1671 | errout: | 1671 | errout: |
1672 | nfnetlink_set_err(net, 0, 0, -ENOBUFS); | 1672 | nfnetlink_set_err(net, 0, 0, -ENOBUFS); |
1673 | return 0; | 1673 | return 0; |
1674 | } | 1674 | } |
1675 | #endif | 1675 | #endif |
1676 | static int ctnetlink_exp_done(struct netlink_callback *cb) | 1676 | static int ctnetlink_exp_done(struct netlink_callback *cb) |
1677 | { | 1677 | { |
1678 | if (cb->args[1]) | 1678 | if (cb->args[1]) |
1679 | nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]); | 1679 | nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]); |
1680 | return 0; | 1680 | return 0; |
1681 | } | 1681 | } |
1682 | 1682 | ||
1683 | static int | 1683 | static int |
1684 | ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | 1684 | ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) |
1685 | { | 1685 | { |
1686 | struct net *net = sock_net(skb->sk); | 1686 | struct net *net = sock_net(skb->sk); |
1687 | struct nf_conntrack_expect *exp, *last; | 1687 | struct nf_conntrack_expect *exp, *last; |
1688 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); | 1688 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); |
1689 | struct hlist_node *n; | 1689 | struct hlist_node *n; |
1690 | u_int8_t l3proto = nfmsg->nfgen_family; | 1690 | u_int8_t l3proto = nfmsg->nfgen_family; |
1691 | 1691 | ||
1692 | rcu_read_lock(); | 1692 | rcu_read_lock(); |
1693 | last = (struct nf_conntrack_expect *)cb->args[1]; | 1693 | last = (struct nf_conntrack_expect *)cb->args[1]; |
1694 | for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { | 1694 | for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { |
1695 | restart: | 1695 | restart: |
1696 | hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]], | 1696 | hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]], |
1697 | hnode) { | 1697 | hnode) { |
1698 | if (l3proto && exp->tuple.src.l3num != l3proto) | 1698 | if (l3proto && exp->tuple.src.l3num != l3proto) |
1699 | continue; | 1699 | continue; |
1700 | if (cb->args[1]) { | 1700 | if (cb->args[1]) { |
1701 | if (exp != last) | 1701 | if (exp != last) |
1702 | continue; | 1702 | continue; |
1703 | cb->args[1] = 0; | 1703 | cb->args[1] = 0; |
1704 | } | 1704 | } |
1705 | if (ctnetlink_exp_fill_info(skb, | 1705 | if (ctnetlink_exp_fill_info(skb, |
1706 | NETLINK_CB(cb->skb).pid, | 1706 | NETLINK_CB(cb->skb).pid, |
1707 | cb->nlh->nlmsg_seq, | 1707 | cb->nlh->nlmsg_seq, |
1708 | IPCTNL_MSG_EXP_NEW, | 1708 | IPCTNL_MSG_EXP_NEW, |
1709 | exp) < 0) { | 1709 | exp) < 0) { |
1710 | if (!atomic_inc_not_zero(&exp->use)) | 1710 | if (!atomic_inc_not_zero(&exp->use)) |
1711 | continue; | 1711 | continue; |
1712 | cb->args[1] = (unsigned long)exp; | 1712 | cb->args[1] = (unsigned long)exp; |
1713 | goto out; | 1713 | goto out; |
1714 | } | 1714 | } |
1715 | } | 1715 | } |
1716 | if (cb->args[1]) { | 1716 | if (cb->args[1]) { |
1717 | cb->args[1] = 0; | 1717 | cb->args[1] = 0; |
1718 | goto restart; | 1718 | goto restart; |
1719 | } | 1719 | } |
1720 | } | 1720 | } |
1721 | out: | 1721 | out: |
1722 | rcu_read_unlock(); | 1722 | rcu_read_unlock(); |
1723 | if (last) | 1723 | if (last) |
1724 | nf_ct_expect_put(last); | 1724 | nf_ct_expect_put(last); |
1725 | 1725 | ||
1726 | return skb->len; | 1726 | return skb->len; |
1727 | } | 1727 | } |
1728 | 1728 | ||
1729 | static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { | 1729 | static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { |
1730 | [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, | 1730 | [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, |
1731 | [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, | 1731 | [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, |
1732 | [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, | 1732 | [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, |
1733 | [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, | 1733 | [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, |
1734 | [CTA_EXPECT_ID] = { .type = NLA_U32 }, | 1734 | [CTA_EXPECT_ID] = { .type = NLA_U32 }, |
1735 | [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING }, | 1735 | [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING }, |
1736 | }; | 1736 | }; |
1737 | 1737 | ||
1738 | static int | 1738 | static int |
1739 | ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | 1739 | ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, |
1740 | const struct nlmsghdr *nlh, | 1740 | const struct nlmsghdr *nlh, |
1741 | const struct nlattr * const cda[]) | 1741 | const struct nlattr * const cda[]) |
1742 | { | 1742 | { |
1743 | struct net *net = sock_net(ctnl); | 1743 | struct net *net = sock_net(ctnl); |
1744 | struct nf_conntrack_tuple tuple; | 1744 | struct nf_conntrack_tuple tuple; |
1745 | struct nf_conntrack_expect *exp; | 1745 | struct nf_conntrack_expect *exp; |
1746 | struct sk_buff *skb2; | 1746 | struct sk_buff *skb2; |
1747 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1747 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
1748 | u_int8_t u3 = nfmsg->nfgen_family; | 1748 | u_int8_t u3 = nfmsg->nfgen_family; |
1749 | u16 zone; | 1749 | u16 zone; |
1750 | int err; | 1750 | int err; |
1751 | 1751 | ||
1752 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 1752 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
1753 | return netlink_dump_start(ctnl, skb, nlh, | 1753 | return netlink_dump_start(ctnl, skb, nlh, |
1754 | ctnetlink_exp_dump_table, | 1754 | ctnetlink_exp_dump_table, |
1755 | ctnetlink_exp_done); | 1755 | ctnetlink_exp_done); |
1756 | } | 1756 | } |
1757 | 1757 | ||
1758 | err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); | 1758 | err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); |
1759 | if (err < 0) | 1759 | if (err < 0) |
1760 | return err; | 1760 | return err; |
1761 | 1761 | ||
1762 | if (cda[CTA_EXPECT_MASTER]) | 1762 | if (cda[CTA_EXPECT_MASTER]) |
1763 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); | 1763 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); |
1764 | else | 1764 | else |
1765 | return -EINVAL; | 1765 | return -EINVAL; |
1766 | 1766 | ||
1767 | if (err < 0) | 1767 | if (err < 0) |
1768 | return err; | 1768 | return err; |
1769 | 1769 | ||
1770 | exp = nf_ct_expect_find_get(net, zone, &tuple); | 1770 | exp = nf_ct_expect_find_get(net, zone, &tuple); |
1771 | if (!exp) | 1771 | if (!exp) |
1772 | return -ENOENT; | 1772 | return -ENOENT; |
1773 | 1773 | ||
1774 | if (cda[CTA_EXPECT_ID]) { | 1774 | if (cda[CTA_EXPECT_ID]) { |
1775 | __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); | 1775 | __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); |
1776 | if (ntohl(id) != (u32)(unsigned long)exp) { | 1776 | if (ntohl(id) != (u32)(unsigned long)exp) { |
1777 | nf_ct_expect_put(exp); | 1777 | nf_ct_expect_put(exp); |
1778 | return -ENOENT; | 1778 | return -ENOENT; |
1779 | } | 1779 | } |
1780 | } | 1780 | } |
1781 | 1781 | ||
1782 | err = -ENOMEM; | 1782 | err = -ENOMEM; |
1783 | skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 1783 | skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
1784 | if (skb2 == NULL) | 1784 | if (skb2 == NULL) |
1785 | goto out; | 1785 | goto out; |
1786 | 1786 | ||
1787 | rcu_read_lock(); | 1787 | rcu_read_lock(); |
1788 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, | 1788 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, |
1789 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp); | 1789 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp); |
1790 | rcu_read_unlock(); | 1790 | rcu_read_unlock(); |
1791 | if (err <= 0) | 1791 | if (err <= 0) |
1792 | goto free; | 1792 | goto free; |
1793 | 1793 | ||
1794 | nf_ct_expect_put(exp); | 1794 | nf_ct_expect_put(exp); |
1795 | 1795 | ||
1796 | return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); | 1796 | return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); |
1797 | 1797 | ||
1798 | free: | 1798 | free: |
1799 | kfree_skb(skb2); | 1799 | kfree_skb(skb2); |
1800 | out: | 1800 | out: |
1801 | nf_ct_expect_put(exp); | 1801 | nf_ct_expect_put(exp); |
1802 | return err; | 1802 | return err; |
1803 | } | 1803 | } |
1804 | 1804 | ||
1805 | static int | 1805 | static int |
1806 | ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | 1806 | ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, |
1807 | const struct nlmsghdr *nlh, | 1807 | const struct nlmsghdr *nlh, |
1808 | const struct nlattr * const cda[]) | 1808 | const struct nlattr * const cda[]) |
1809 | { | 1809 | { |
1810 | struct net *net = sock_net(ctnl); | 1810 | struct net *net = sock_net(ctnl); |
1811 | struct nf_conntrack_expect *exp; | 1811 | struct nf_conntrack_expect *exp; |
1812 | struct nf_conntrack_tuple tuple; | 1812 | struct nf_conntrack_tuple tuple; |
1813 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1813 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
1814 | struct hlist_node *n, *next; | 1814 | struct hlist_node *n, *next; |
1815 | u_int8_t u3 = nfmsg->nfgen_family; | 1815 | u_int8_t u3 = nfmsg->nfgen_family; |
1816 | unsigned int i; | 1816 | unsigned int i; |
1817 | u16 zone; | 1817 | u16 zone; |
1818 | int err; | 1818 | int err; |
1819 | 1819 | ||
1820 | if (cda[CTA_EXPECT_TUPLE]) { | 1820 | if (cda[CTA_EXPECT_TUPLE]) { |
1821 | /* delete a single expect by tuple */ | 1821 | /* delete a single expect by tuple */ |
1822 | err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); | 1822 | err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); |
1823 | if (err < 0) | 1823 | if (err < 0) |
1824 | return err; | 1824 | return err; |
1825 | 1825 | ||
1826 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); | 1826 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); |
1827 | if (err < 0) | 1827 | if (err < 0) |
1828 | return err; | 1828 | return err; |
1829 | 1829 | ||
1830 | /* bump usage count to 2 */ | 1830 | /* bump usage count to 2 */ |
1831 | exp = nf_ct_expect_find_get(net, zone, &tuple); | 1831 | exp = nf_ct_expect_find_get(net, zone, &tuple); |
1832 | if (!exp) | 1832 | if (!exp) |
1833 | return -ENOENT; | 1833 | return -ENOENT; |
1834 | 1834 | ||
1835 | if (cda[CTA_EXPECT_ID]) { | 1835 | if (cda[CTA_EXPECT_ID]) { |
1836 | __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); | 1836 | __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); |
1837 | if (ntohl(id) != (u32)(unsigned long)exp) { | 1837 | if (ntohl(id) != (u32)(unsigned long)exp) { |
1838 | nf_ct_expect_put(exp); | 1838 | nf_ct_expect_put(exp); |
1839 | return -ENOENT; | 1839 | return -ENOENT; |
1840 | } | 1840 | } |
1841 | } | 1841 | } |
1842 | 1842 | ||
1843 | /* after list removal, usage count == 1 */ | 1843 | /* after list removal, usage count == 1 */ |
1844 | nf_ct_unexpect_related(exp); | 1844 | nf_ct_unexpect_related(exp); |
1845 | /* have to put what we 'get' above. | 1845 | /* have to put what we 'get' above. |
1846 | * after this line usage count == 0 */ | 1846 | * after this line usage count == 0 */ |
1847 | nf_ct_expect_put(exp); | 1847 | nf_ct_expect_put(exp); |
1848 | } else if (cda[CTA_EXPECT_HELP_NAME]) { | 1848 | } else if (cda[CTA_EXPECT_HELP_NAME]) { |
1849 | char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); | 1849 | char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); |
1850 | struct nf_conn_help *m_help; | 1850 | struct nf_conn_help *m_help; |
1851 | 1851 | ||
1852 | /* delete all expectations for this helper */ | 1852 | /* delete all expectations for this helper */ |
1853 | spin_lock_bh(&nf_conntrack_lock); | 1853 | spin_lock_bh(&nf_conntrack_lock); |
1854 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 1854 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
1855 | hlist_for_each_entry_safe(exp, n, next, | 1855 | hlist_for_each_entry_safe(exp, n, next, |
1856 | &net->ct.expect_hash[i], | 1856 | &net->ct.expect_hash[i], |
1857 | hnode) { | 1857 | hnode) { |
1858 | m_help = nfct_help(exp->master); | 1858 | m_help = nfct_help(exp->master); |
1859 | if (!strcmp(m_help->helper->name, name) && | 1859 | if (!strcmp(m_help->helper->name, name) && |
1860 | del_timer(&exp->timeout)) { | 1860 | del_timer(&exp->timeout)) { |
1861 | nf_ct_unlink_expect(exp); | 1861 | nf_ct_unlink_expect(exp); |
1862 | nf_ct_expect_put(exp); | 1862 | nf_ct_expect_put(exp); |
1863 | } | 1863 | } |
1864 | } | 1864 | } |
1865 | } | 1865 | } |
1866 | spin_unlock_bh(&nf_conntrack_lock); | 1866 | spin_unlock_bh(&nf_conntrack_lock); |
1867 | } else { | 1867 | } else { |
1868 | /* This basically means we have to flush everything*/ | 1868 | /* This basically means we have to flush everything*/ |
1869 | spin_lock_bh(&nf_conntrack_lock); | 1869 | spin_lock_bh(&nf_conntrack_lock); |
1870 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 1870 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
1871 | hlist_for_each_entry_safe(exp, n, next, | 1871 | hlist_for_each_entry_safe(exp, n, next, |
1872 | &net->ct.expect_hash[i], | 1872 | &net->ct.expect_hash[i], |
1873 | hnode) { | 1873 | hnode) { |
1874 | if (del_timer(&exp->timeout)) { | 1874 | if (del_timer(&exp->timeout)) { |
1875 | nf_ct_unlink_expect(exp); | 1875 | nf_ct_unlink_expect(exp); |
1876 | nf_ct_expect_put(exp); | 1876 | nf_ct_expect_put(exp); |
1877 | } | 1877 | } |
1878 | } | 1878 | } |
1879 | } | 1879 | } |
1880 | spin_unlock_bh(&nf_conntrack_lock); | 1880 | spin_unlock_bh(&nf_conntrack_lock); |
1881 | } | 1881 | } |
1882 | 1882 | ||
1883 | return 0; | 1883 | return 0; |
1884 | } | 1884 | } |
1885 | static int | 1885 | static int |
1886 | ctnetlink_change_expect(struct nf_conntrack_expect *x, | 1886 | ctnetlink_change_expect(struct nf_conntrack_expect *x, |
1887 | const struct nlattr * const cda[]) | 1887 | const struct nlattr * const cda[]) |
1888 | { | 1888 | { |
1889 | return -EOPNOTSUPP; | 1889 | return -EOPNOTSUPP; |
1890 | } | 1890 | } |
1891 | 1891 | ||
1892 | static int | 1892 | static int |
1893 | ctnetlink_create_expect(struct net *net, u16 zone, | 1893 | ctnetlink_create_expect(struct net *net, u16 zone, |
1894 | const struct nlattr * const cda[], | 1894 | const struct nlattr * const cda[], |
1895 | u_int8_t u3, | 1895 | u_int8_t u3, |
1896 | u32 pid, int report) | 1896 | u32 pid, int report) |
1897 | { | 1897 | { |
1898 | struct nf_conntrack_tuple tuple, mask, master_tuple; | 1898 | struct nf_conntrack_tuple tuple, mask, master_tuple; |
1899 | struct nf_conntrack_tuple_hash *h = NULL; | 1899 | struct nf_conntrack_tuple_hash *h = NULL; |
1900 | struct nf_conntrack_expect *exp; | 1900 | struct nf_conntrack_expect *exp; |
1901 | struct nf_conn *ct; | 1901 | struct nf_conn *ct; |
1902 | struct nf_conn_help *help; | 1902 | struct nf_conn_help *help; |
1903 | int err = 0; | 1903 | int err = 0; |
1904 | 1904 | ||
1905 | /* caller guarantees that those three CTA_EXPECT_* exist */ | 1905 | /* caller guarantees that those three CTA_EXPECT_* exist */ |
1906 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); | 1906 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); |
1907 | if (err < 0) | 1907 | if (err < 0) |
1908 | return err; | 1908 | return err; |
1909 | err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3); | 1909 | err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3); |
1910 | if (err < 0) | 1910 | if (err < 0) |
1911 | return err; | 1911 | return err; |
1912 | err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3); | 1912 | err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3); |
1913 | if (err < 0) | 1913 | if (err < 0) |
1914 | return err; | 1914 | return err; |
1915 | 1915 | ||
1916 | /* Look for master conntrack of this expectation */ | 1916 | /* Look for master conntrack of this expectation */ |
1917 | h = nf_conntrack_find_get(net, zone, &master_tuple); | 1917 | h = nf_conntrack_find_get(net, zone, &master_tuple); |
1918 | if (!h) | 1918 | if (!h) |
1919 | return -ENOENT; | 1919 | return -ENOENT; |
1920 | ct = nf_ct_tuplehash_to_ctrack(h); | 1920 | ct = nf_ct_tuplehash_to_ctrack(h); |
1921 | help = nfct_help(ct); | 1921 | help = nfct_help(ct); |
1922 | 1922 | ||
1923 | if (!help || !help->helper) { | 1923 | if (!help || !help->helper) { |
1924 | /* such conntrack hasn't got any helper, abort */ | 1924 | /* such conntrack hasn't got any helper, abort */ |
1925 | err = -EOPNOTSUPP; | 1925 | err = -EOPNOTSUPP; |
1926 | goto out; | 1926 | goto out; |
1927 | } | 1927 | } |
1928 | 1928 | ||
1929 | exp = nf_ct_expect_alloc(ct); | 1929 | exp = nf_ct_expect_alloc(ct); |
1930 | if (!exp) { | 1930 | if (!exp) { |
1931 | err = -ENOMEM; | 1931 | err = -ENOMEM; |
1932 | goto out; | 1932 | goto out; |
1933 | } | 1933 | } |
1934 | 1934 | ||
1935 | exp->class = 0; | 1935 | exp->class = 0; |
1936 | exp->expectfn = NULL; | 1936 | exp->expectfn = NULL; |
1937 | exp->flags = 0; | 1937 | exp->flags = 0; |
1938 | exp->master = ct; | 1938 | exp->master = ct; |
1939 | exp->helper = NULL; | 1939 | exp->helper = NULL; |
1940 | memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); | 1940 | memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); |
1941 | memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3)); | 1941 | memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3)); |
1942 | exp->mask.src.u.all = mask.src.u.all; | 1942 | exp->mask.src.u.all = mask.src.u.all; |
1943 | 1943 | ||
1944 | err = nf_ct_expect_related_report(exp, pid, report); | 1944 | err = nf_ct_expect_related_report(exp, pid, report); |
1945 | nf_ct_expect_put(exp); | 1945 | nf_ct_expect_put(exp); |
1946 | 1946 | ||
1947 | out: | 1947 | out: |
1948 | nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); | 1948 | nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); |
1949 | return err; | 1949 | return err; |
1950 | } | 1950 | } |
1951 | 1951 | ||
1952 | static int | 1952 | static int |
1953 | ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, | 1953 | ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, |
1954 | const struct nlmsghdr *nlh, | 1954 | const struct nlmsghdr *nlh, |
1955 | const struct nlattr * const cda[]) | 1955 | const struct nlattr * const cda[]) |
1956 | { | 1956 | { |
1957 | struct net *net = sock_net(ctnl); | 1957 | struct net *net = sock_net(ctnl); |
1958 | struct nf_conntrack_tuple tuple; | 1958 | struct nf_conntrack_tuple tuple; |
1959 | struct nf_conntrack_expect *exp; | 1959 | struct nf_conntrack_expect *exp; |
1960 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1960 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
1961 | u_int8_t u3 = nfmsg->nfgen_family; | 1961 | u_int8_t u3 = nfmsg->nfgen_family; |
1962 | u16 zone; | 1962 | u16 zone; |
1963 | int err; | 1963 | int err; |
1964 | 1964 | ||
1965 | if (!cda[CTA_EXPECT_TUPLE] | 1965 | if (!cda[CTA_EXPECT_TUPLE] |
1966 | || !cda[CTA_EXPECT_MASK] | 1966 | || !cda[CTA_EXPECT_MASK] |
1967 | || !cda[CTA_EXPECT_MASTER]) | 1967 | || !cda[CTA_EXPECT_MASTER]) |
1968 | return -EINVAL; | 1968 | return -EINVAL; |
1969 | 1969 | ||
1970 | err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); | 1970 | err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); |
1971 | if (err < 0) | 1971 | if (err < 0) |
1972 | return err; | 1972 | return err; |
1973 | 1973 | ||
1974 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); | 1974 | err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); |
1975 | if (err < 0) | 1975 | if (err < 0) |
1976 | return err; | 1976 | return err; |
1977 | 1977 | ||
1978 | spin_lock_bh(&nf_conntrack_lock); | 1978 | spin_lock_bh(&nf_conntrack_lock); |
1979 | exp = __nf_ct_expect_find(net, zone, &tuple); | 1979 | exp = __nf_ct_expect_find(net, zone, &tuple); |
1980 | 1980 | ||
1981 | if (!exp) { | 1981 | if (!exp) { |
1982 | spin_unlock_bh(&nf_conntrack_lock); | 1982 | spin_unlock_bh(&nf_conntrack_lock); |
1983 | err = -ENOENT; | 1983 | err = -ENOENT; |
1984 | if (nlh->nlmsg_flags & NLM_F_CREATE) { | 1984 | if (nlh->nlmsg_flags & NLM_F_CREATE) { |
1985 | err = ctnetlink_create_expect(net, zone, cda, | 1985 | err = ctnetlink_create_expect(net, zone, cda, |
1986 | u3, | 1986 | u3, |
1987 | NETLINK_CB(skb).pid, | 1987 | NETLINK_CB(skb).pid, |
1988 | nlmsg_report(nlh)); | 1988 | nlmsg_report(nlh)); |
1989 | } | 1989 | } |
1990 | return err; | 1990 | return err; |
1991 | } | 1991 | } |
1992 | 1992 | ||
1993 | err = -EEXIST; | 1993 | err = -EEXIST; |
1994 | if (!(nlh->nlmsg_flags & NLM_F_EXCL)) | 1994 | if (!(nlh->nlmsg_flags & NLM_F_EXCL)) |
1995 | err = ctnetlink_change_expect(exp, cda); | 1995 | err = ctnetlink_change_expect(exp, cda); |
1996 | spin_unlock_bh(&nf_conntrack_lock); | 1996 | spin_unlock_bh(&nf_conntrack_lock); |
1997 | 1997 | ||
1998 | return err; | 1998 | return err; |
1999 | } | 1999 | } |
2000 | 2000 | ||
2001 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2001 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
2002 | static struct nf_ct_event_notifier ctnl_notifier = { | 2002 | static struct nf_ct_event_notifier ctnl_notifier = { |
2003 | .fcn = ctnetlink_conntrack_event, | 2003 | .fcn = ctnetlink_conntrack_event, |
2004 | }; | 2004 | }; |
2005 | 2005 | ||
2006 | static struct nf_exp_event_notifier ctnl_notifier_exp = { | 2006 | static struct nf_exp_event_notifier ctnl_notifier_exp = { |
2007 | .fcn = ctnetlink_expect_event, | 2007 | .fcn = ctnetlink_expect_event, |
2008 | }; | 2008 | }; |
2009 | #endif | 2009 | #endif |
2010 | 2010 | ||
2011 | static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { | 2011 | static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { |
2012 | [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack, | 2012 | [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack, |
2013 | .attr_count = CTA_MAX, | 2013 | .attr_count = CTA_MAX, |
2014 | .policy = ct_nla_policy }, | 2014 | .policy = ct_nla_policy }, |
2015 | [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack, | 2015 | [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack, |
2016 | .attr_count = CTA_MAX, | 2016 | .attr_count = CTA_MAX, |
2017 | .policy = ct_nla_policy }, | 2017 | .policy = ct_nla_policy }, |
2018 | [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack, | 2018 | [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack, |
2019 | .attr_count = CTA_MAX, | 2019 | .attr_count = CTA_MAX, |
2020 | .policy = ct_nla_policy }, | 2020 | .policy = ct_nla_policy }, |
2021 | [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack, | 2021 | [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack, |
2022 | .attr_count = CTA_MAX, | 2022 | .attr_count = CTA_MAX, |
2023 | .policy = ct_nla_policy }, | 2023 | .policy = ct_nla_policy }, |
2024 | }; | 2024 | }; |
2025 | 2025 | ||
2026 | static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { | 2026 | static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { |
2027 | [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect, | 2027 | [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect, |
2028 | .attr_count = CTA_EXPECT_MAX, | 2028 | .attr_count = CTA_EXPECT_MAX, |
2029 | .policy = exp_nla_policy }, | 2029 | .policy = exp_nla_policy }, |
2030 | [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect, | 2030 | [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect, |
2031 | .attr_count = CTA_EXPECT_MAX, | 2031 | .attr_count = CTA_EXPECT_MAX, |
2032 | .policy = exp_nla_policy }, | 2032 | .policy = exp_nla_policy }, |
2033 | [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect, | 2033 | [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect, |
2034 | .attr_count = CTA_EXPECT_MAX, | 2034 | .attr_count = CTA_EXPECT_MAX, |
2035 | .policy = exp_nla_policy }, | 2035 | .policy = exp_nla_policy }, |
2036 | }; | 2036 | }; |
2037 | 2037 | ||
2038 | static const struct nfnetlink_subsystem ctnl_subsys = { | 2038 | static const struct nfnetlink_subsystem ctnl_subsys = { |
2039 | .name = "conntrack", | 2039 | .name = "conntrack", |
2040 | .subsys_id = NFNL_SUBSYS_CTNETLINK, | 2040 | .subsys_id = NFNL_SUBSYS_CTNETLINK, |
2041 | .cb_count = IPCTNL_MSG_MAX, | 2041 | .cb_count = IPCTNL_MSG_MAX, |
2042 | .cb = ctnl_cb, | 2042 | .cb = ctnl_cb, |
2043 | }; | 2043 | }; |
2044 | 2044 | ||
2045 | static const struct nfnetlink_subsystem ctnl_exp_subsys = { | 2045 | static const struct nfnetlink_subsystem ctnl_exp_subsys = { |
2046 | .name = "conntrack_expect", | 2046 | .name = "conntrack_expect", |
2047 | .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, | 2047 | .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, |
2048 | .cb_count = IPCTNL_MSG_EXP_MAX, | 2048 | .cb_count = IPCTNL_MSG_EXP_MAX, |
2049 | .cb = ctnl_exp_cb, | 2049 | .cb = ctnl_exp_cb, |
2050 | }; | 2050 | }; |
2051 | 2051 | ||
2052 | MODULE_ALIAS("ip_conntrack_netlink"); | 2052 | MODULE_ALIAS("ip_conntrack_netlink"); |
2053 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); | 2053 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); |
2054 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); | 2054 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); |
2055 | 2055 | ||
2056 | static int __init ctnetlink_init(void) | 2056 | static int __init ctnetlink_init(void) |
2057 | { | 2057 | { |
2058 | int ret; | 2058 | int ret; |
2059 | 2059 | ||
2060 | printk("ctnetlink v%s: registering with nfnetlink.\n", version); | 2060 | pr_info("ctnetlink v%s: registering with nfnetlink.\n", version); |
2061 | ret = nfnetlink_subsys_register(&ctnl_subsys); | 2061 | ret = nfnetlink_subsys_register(&ctnl_subsys); |
2062 | if (ret < 0) { | 2062 | if (ret < 0) { |
2063 | printk("ctnetlink_init: cannot register with nfnetlink.\n"); | 2063 | pr_err("ctnetlink_init: cannot register with nfnetlink.\n"); |
2064 | goto err_out; | 2064 | goto err_out; |
2065 | } | 2065 | } |
2066 | 2066 | ||
2067 | ret = nfnetlink_subsys_register(&ctnl_exp_subsys); | 2067 | ret = nfnetlink_subsys_register(&ctnl_exp_subsys); |
2068 | if (ret < 0) { | 2068 | if (ret < 0) { |
2069 | printk("ctnetlink_init: cannot register exp with nfnetlink.\n"); | 2069 | pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n"); |
2070 | goto err_unreg_subsys; | 2070 | goto err_unreg_subsys; |
2071 | } | 2071 | } |
2072 | 2072 | ||
2073 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2073 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
2074 | ret = nf_conntrack_register_notifier(&ctnl_notifier); | 2074 | ret = nf_conntrack_register_notifier(&ctnl_notifier); |
2075 | if (ret < 0) { | 2075 | if (ret < 0) { |
2076 | printk("ctnetlink_init: cannot register notifier.\n"); | 2076 | pr_err("ctnetlink_init: cannot register notifier.\n"); |
2077 | goto err_unreg_exp_subsys; | 2077 | goto err_unreg_exp_subsys; |
2078 | } | 2078 | } |
2079 | 2079 | ||
2080 | ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp); | 2080 | ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp); |
2081 | if (ret < 0) { | 2081 | if (ret < 0) { |
2082 | printk("ctnetlink_init: cannot expect register notifier.\n"); | 2082 | pr_err("ctnetlink_init: cannot expect register notifier.\n"); |
2083 | goto err_unreg_notifier; | 2083 | goto err_unreg_notifier; |
2084 | } | 2084 | } |
2085 | #endif | 2085 | #endif |
2086 | 2086 | ||
2087 | return 0; | 2087 | return 0; |
2088 | 2088 | ||
2089 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2089 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
2090 | err_unreg_notifier: | 2090 | err_unreg_notifier: |
2091 | nf_conntrack_unregister_notifier(&ctnl_notifier); | 2091 | nf_conntrack_unregister_notifier(&ctnl_notifier); |
2092 | err_unreg_exp_subsys: | 2092 | err_unreg_exp_subsys: |
2093 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); | 2093 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); |
2094 | #endif | 2094 | #endif |
2095 | err_unreg_subsys: | 2095 | err_unreg_subsys: |
2096 | nfnetlink_subsys_unregister(&ctnl_subsys); | 2096 | nfnetlink_subsys_unregister(&ctnl_subsys); |
2097 | err_out: | 2097 | err_out: |
2098 | return ret; | 2098 | return ret; |
2099 | } | 2099 | } |
2100 | 2100 | ||
2101 | static void __exit ctnetlink_exit(void) | 2101 | static void __exit ctnetlink_exit(void) |
2102 | { | 2102 | { |
2103 | printk("ctnetlink: unregistering from nfnetlink.\n"); | 2103 | pr_info("ctnetlink: unregistering from nfnetlink.\n"); |
2104 | 2104 | ||
2105 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2105 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
2106 | nf_ct_expect_unregister_notifier(&ctnl_notifier_exp); | 2106 | nf_ct_expect_unregister_notifier(&ctnl_notifier_exp); |
2107 | nf_conntrack_unregister_notifier(&ctnl_notifier); | 2107 | nf_conntrack_unregister_notifier(&ctnl_notifier); |
2108 | #endif | 2108 | #endif |
2109 | 2109 | ||
2110 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); | 2110 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); |
2111 | nfnetlink_subsys_unregister(&ctnl_subsys); | 2111 | nfnetlink_subsys_unregister(&ctnl_subsys); |
2112 | return; | 2112 | return; |
2113 | } | 2113 | } |
2114 | 2114 | ||
2115 | module_init(ctnetlink_init); | 2115 | module_init(ctnetlink_init); |
2116 | module_exit(ctnetlink_exit); | 2116 | module_exit(ctnetlink_exit); |
2117 | 2117 |
net/netfilter/nf_conntrack_proto_sctp.c
1 | /* | 1 | /* |
2 | * Connection tracking protocol helper module for SCTP. | 2 | * Connection tracking protocol helper module for SCTP. |
3 | * | 3 | * |
4 | * SCTP is defined in RFC 2960. References to various sections in this code | 4 | * SCTP is defined in RFC 2960. References to various sections in this code |
5 | * are to this RFC. | 5 | * are to this RFC. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/timer.h> | 13 | #include <linux/timer.h> |
14 | #include <linux/netfilter.h> | 14 | #include <linux/netfilter.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/in.h> | 16 | #include <linux/in.h> |
17 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
18 | #include <linux/sctp.h> | 18 | #include <linux/sctp.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | 23 | ||
24 | #include <net/netfilter/nf_conntrack.h> | 24 | #include <net/netfilter/nf_conntrack.h> |
25 | #include <net/netfilter/nf_conntrack_l4proto.h> | 25 | #include <net/netfilter/nf_conntrack_l4proto.h> |
26 | #include <net/netfilter/nf_conntrack_ecache.h> | 26 | #include <net/netfilter/nf_conntrack_ecache.h> |
27 | 27 | ||
28 | /* FIXME: Examine ipfilter's timeouts and conntrack transitions more | 28 | /* FIXME: Examine ipfilter's timeouts and conntrack transitions more |
29 | closely. They're more complex. --RR | 29 | closely. They're more complex. --RR |
30 | 30 | ||
31 | And so for me for SCTP :D -Kiran */ | 31 | And so for me for SCTP :D -Kiran */ |
32 | 32 | ||
33 | static const char *const sctp_conntrack_names[] = { | 33 | static const char *const sctp_conntrack_names[] = { |
34 | "NONE", | 34 | "NONE", |
35 | "CLOSED", | 35 | "CLOSED", |
36 | "COOKIE_WAIT", | 36 | "COOKIE_WAIT", |
37 | "COOKIE_ECHOED", | 37 | "COOKIE_ECHOED", |
38 | "ESTABLISHED", | 38 | "ESTABLISHED", |
39 | "SHUTDOWN_SENT", | 39 | "SHUTDOWN_SENT", |
40 | "SHUTDOWN_RECD", | 40 | "SHUTDOWN_RECD", |
41 | "SHUTDOWN_ACK_SENT", | 41 | "SHUTDOWN_ACK_SENT", |
42 | }; | 42 | }; |
43 | 43 | ||
44 | #define SECS * HZ | 44 | #define SECS * HZ |
45 | #define MINS * 60 SECS | 45 | #define MINS * 60 SECS |
46 | #define HOURS * 60 MINS | 46 | #define HOURS * 60 MINS |
47 | #define DAYS * 24 HOURS | 47 | #define DAYS * 24 HOURS |
48 | 48 | ||
49 | static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = { | 49 | static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = { |
50 | [SCTP_CONNTRACK_CLOSED] = 10 SECS, | 50 | [SCTP_CONNTRACK_CLOSED] = 10 SECS, |
51 | [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, | 51 | [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, |
52 | [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, | 52 | [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, |
53 | [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS, | 53 | [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS, |
54 | [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, | 54 | [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, |
55 | [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, | 55 | [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, |
56 | [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, | 56 | [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | #define sNO SCTP_CONNTRACK_NONE | 59 | #define sNO SCTP_CONNTRACK_NONE |
60 | #define sCL SCTP_CONNTRACK_CLOSED | 60 | #define sCL SCTP_CONNTRACK_CLOSED |
61 | #define sCW SCTP_CONNTRACK_COOKIE_WAIT | 61 | #define sCW SCTP_CONNTRACK_COOKIE_WAIT |
62 | #define sCE SCTP_CONNTRACK_COOKIE_ECHOED | 62 | #define sCE SCTP_CONNTRACK_COOKIE_ECHOED |
63 | #define sES SCTP_CONNTRACK_ESTABLISHED | 63 | #define sES SCTP_CONNTRACK_ESTABLISHED |
64 | #define sSS SCTP_CONNTRACK_SHUTDOWN_SENT | 64 | #define sSS SCTP_CONNTRACK_SHUTDOWN_SENT |
65 | #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD | 65 | #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD |
66 | #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT | 66 | #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT |
67 | #define sIV SCTP_CONNTRACK_MAX | 67 | #define sIV SCTP_CONNTRACK_MAX |
68 | 68 | ||
69 | /* | 69 | /* |
70 | These are the descriptions of the states: | 70 | These are the descriptions of the states: |
71 | 71 | ||
72 | NOTE: These state names are tantalizingly similar to the states of an | 72 | NOTE: These state names are tantalizingly similar to the states of an |
73 | SCTP endpoint. But the interpretation of the states is a little different, | 73 | SCTP endpoint. But the interpretation of the states is a little different, |
74 | considering that these are the states of the connection and not of an end | 74 | considering that these are the states of the connection and not of an end |
75 | point. Please note the subtleties. -Kiran | 75 | point. Please note the subtleties. -Kiran |
76 | 76 | ||
77 | NONE - Nothing so far. | 77 | NONE - Nothing so far. |
78 | COOKIE WAIT - We have seen an INIT chunk in the original direction, or also | 78 | COOKIE WAIT - We have seen an INIT chunk in the original direction, or also |
79 | an INIT_ACK chunk in the reply direction. | 79 | an INIT_ACK chunk in the reply direction. |
80 | COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. | 80 | COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. |
81 | ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. | 81 | ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. |
82 | SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. | 82 | SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. |
83 | SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. | 83 | SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. |
84 | SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite | 84 | SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite |
85 | to that of the SHUTDOWN chunk. | 85 | to that of the SHUTDOWN chunk. |
86 | CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of | 86 | CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of |
87 | the SHUTDOWN chunk. Connection is closed. | 87 | the SHUTDOWN chunk. Connection is closed. |
88 | */ | 88 | */ |
89 | 89 | ||
90 | /* TODO | 90 | /* TODO |
91 | - I have assumed that the first INIT is in the original direction. | 91 | - I have assumed that the first INIT is in the original direction. |
92 | This messes things when an INIT comes in the reply direction in CLOSED | 92 | This messes things when an INIT comes in the reply direction in CLOSED |
93 | state. | 93 | state. |
94 | - Check the error type in the reply dir before transitioning from | 94 | - Check the error type in the reply dir before transitioning from |
95 | cookie echoed to closed. | 95 | cookie echoed to closed. |
96 | - Sec 5.2.4 of RFC 2960 | 96 | - Sec 5.2.4 of RFC 2960 |
97 | - Multi Homing support. | 97 | - Multi Homing support. |
98 | */ | 98 | */ |
99 | 99 | ||
100 | /* SCTP conntrack state transitions */ | 100 | /* SCTP conntrack state transitions */ |
101 | static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = { | 101 | static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = { |
102 | { | 102 | { |
103 | /* ORIGINAL */ | 103 | /* ORIGINAL */ |
104 | /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */ | 104 | /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */ |
105 | /* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA}, | 105 | /* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA}, |
106 | /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA}, | 106 | /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA}, |
107 | /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, | 107 | /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, |
108 | /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA}, | 108 | /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA}, |
109 | /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA}, | 109 | /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA}, |
110 | /* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/ | 110 | /* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/ |
111 | /* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */ | 111 | /* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */ |
112 | /* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */ | 112 | /* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */ |
113 | /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL} | 113 | /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL} |
114 | }, | 114 | }, |
115 | { | 115 | { |
116 | /* REPLY */ | 116 | /* REPLY */ |
117 | /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */ | 117 | /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */ |
118 | /* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */ | 118 | /* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */ |
119 | /* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA}, | 119 | /* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA}, |
120 | /* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, | 120 | /* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, |
121 | /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA}, | 121 | /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA}, |
122 | /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA}, | 122 | /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA}, |
123 | /* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA}, | 123 | /* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA}, |
124 | /* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */ | 124 | /* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */ |
125 | /* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA}, | 125 | /* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA}, |
126 | /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL} | 126 | /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL} |
127 | } | 127 | } |
128 | }; | 128 | }; |
129 | 129 | ||
130 | static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, | 130 | static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, |
131 | struct nf_conntrack_tuple *tuple) | 131 | struct nf_conntrack_tuple *tuple) |
132 | { | 132 | { |
133 | const struct sctphdr *hp; | 133 | const struct sctphdr *hp; |
134 | struct sctphdr _hdr; | 134 | struct sctphdr _hdr; |
135 | 135 | ||
136 | /* Actually only need first 8 bytes. */ | 136 | /* Actually only need first 8 bytes. */ |
137 | hp = skb_header_pointer(skb, dataoff, 8, &_hdr); | 137 | hp = skb_header_pointer(skb, dataoff, 8, &_hdr); |
138 | if (hp == NULL) | 138 | if (hp == NULL) |
139 | return false; | 139 | return false; |
140 | 140 | ||
141 | tuple->src.u.sctp.port = hp->source; | 141 | tuple->src.u.sctp.port = hp->source; |
142 | tuple->dst.u.sctp.port = hp->dest; | 142 | tuple->dst.u.sctp.port = hp->dest; |
143 | return true; | 143 | return true; |
144 | } | 144 | } |
145 | 145 | ||
146 | static bool sctp_invert_tuple(struct nf_conntrack_tuple *tuple, | 146 | static bool sctp_invert_tuple(struct nf_conntrack_tuple *tuple, |
147 | const struct nf_conntrack_tuple *orig) | 147 | const struct nf_conntrack_tuple *orig) |
148 | { | 148 | { |
149 | tuple->src.u.sctp.port = orig->dst.u.sctp.port; | 149 | tuple->src.u.sctp.port = orig->dst.u.sctp.port; |
150 | tuple->dst.u.sctp.port = orig->src.u.sctp.port; | 150 | tuple->dst.u.sctp.port = orig->src.u.sctp.port; |
151 | return true; | 151 | return true; |
152 | } | 152 | } |
153 | 153 | ||
154 | /* Print out the per-protocol part of the tuple. */ | 154 | /* Print out the per-protocol part of the tuple. */ |
155 | static int sctp_print_tuple(struct seq_file *s, | 155 | static int sctp_print_tuple(struct seq_file *s, |
156 | const struct nf_conntrack_tuple *tuple) | 156 | const struct nf_conntrack_tuple *tuple) |
157 | { | 157 | { |
158 | return seq_printf(s, "sport=%hu dport=%hu ", | 158 | return seq_printf(s, "sport=%hu dport=%hu ", |
159 | ntohs(tuple->src.u.sctp.port), | 159 | ntohs(tuple->src.u.sctp.port), |
160 | ntohs(tuple->dst.u.sctp.port)); | 160 | ntohs(tuple->dst.u.sctp.port)); |
161 | } | 161 | } |
162 | 162 | ||
163 | /* Print out the private part of the conntrack. */ | 163 | /* Print out the private part of the conntrack. */ |
164 | static int sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) | 164 | static int sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) |
165 | { | 165 | { |
166 | enum sctp_conntrack state; | 166 | enum sctp_conntrack state; |
167 | 167 | ||
168 | spin_lock_bh(&ct->lock); | 168 | spin_lock_bh(&ct->lock); |
169 | state = ct->proto.sctp.state; | 169 | state = ct->proto.sctp.state; |
170 | spin_unlock_bh(&ct->lock); | 170 | spin_unlock_bh(&ct->lock); |
171 | 171 | ||
172 | return seq_printf(s, "%s ", sctp_conntrack_names[state]); | 172 | return seq_printf(s, "%s ", sctp_conntrack_names[state]); |
173 | } | 173 | } |
174 | 174 | ||
175 | #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ | 175 | #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ |
176 | for ((offset) = (dataoff) + sizeof(sctp_sctphdr_t), (count) = 0; \ | 176 | for ((offset) = (dataoff) + sizeof(sctp_sctphdr_t), (count) = 0; \ |
177 | (offset) < (skb)->len && \ | 177 | (offset) < (skb)->len && \ |
178 | ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \ | 178 | ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \ |
179 | (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++) | 179 | (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++) |
180 | 180 | ||
181 | /* Some validity checks to make sure the chunks are fine */ | 181 | /* Some validity checks to make sure the chunks are fine */ |
182 | static int do_basic_checks(struct nf_conn *ct, | 182 | static int do_basic_checks(struct nf_conn *ct, |
183 | const struct sk_buff *skb, | 183 | const struct sk_buff *skb, |
184 | unsigned int dataoff, | 184 | unsigned int dataoff, |
185 | unsigned long *map) | 185 | unsigned long *map) |
186 | { | 186 | { |
187 | u_int32_t offset, count; | 187 | u_int32_t offset, count; |
188 | sctp_chunkhdr_t _sch, *sch; | 188 | sctp_chunkhdr_t _sch, *sch; |
189 | int flag; | 189 | int flag; |
190 | 190 | ||
191 | flag = 0; | 191 | flag = 0; |
192 | 192 | ||
193 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { | 193 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { |
194 | pr_debug("Chunk Num: %d Type: %d\n", count, sch->type); | 194 | pr_debug("Chunk Num: %d Type: %d\n", count, sch->type); |
195 | 195 | ||
196 | if (sch->type == SCTP_CID_INIT || | 196 | if (sch->type == SCTP_CID_INIT || |
197 | sch->type == SCTP_CID_INIT_ACK || | 197 | sch->type == SCTP_CID_INIT_ACK || |
198 | sch->type == SCTP_CID_SHUTDOWN_COMPLETE) | 198 | sch->type == SCTP_CID_SHUTDOWN_COMPLETE) |
199 | flag = 1; | 199 | flag = 1; |
200 | 200 | ||
201 | /* | 201 | /* |
202 | * Cookie Ack/Echo chunks not the first OR | 202 | * Cookie Ack/Echo chunks not the first OR |
203 | * Init / Init Ack / Shutdown compl chunks not the only chunks | 203 | * Init / Init Ack / Shutdown compl chunks not the only chunks |
204 | * OR zero-length. | 204 | * OR zero-length. |
205 | */ | 205 | */ |
206 | if (((sch->type == SCTP_CID_COOKIE_ACK || | 206 | if (((sch->type == SCTP_CID_COOKIE_ACK || |
207 | sch->type == SCTP_CID_COOKIE_ECHO || | 207 | sch->type == SCTP_CID_COOKIE_ECHO || |
208 | flag) && | 208 | flag) && |
209 | count != 0) || !sch->length) { | 209 | count != 0) || !sch->length) { |
210 | pr_debug("Basic checks failed\n"); | 210 | pr_debug("Basic checks failed\n"); |
211 | return 1; | 211 | return 1; |
212 | } | 212 | } |
213 | 213 | ||
214 | if (map) | 214 | if (map) |
215 | set_bit(sch->type, map); | 215 | set_bit(sch->type, map); |
216 | } | 216 | } |
217 | 217 | ||
218 | pr_debug("Basic checks passed\n"); | 218 | pr_debug("Basic checks passed\n"); |
219 | return count == 0; | 219 | return count == 0; |
220 | } | 220 | } |
221 | 221 | ||
222 | static int sctp_new_state(enum ip_conntrack_dir dir, | 222 | static int sctp_new_state(enum ip_conntrack_dir dir, |
223 | enum sctp_conntrack cur_state, | 223 | enum sctp_conntrack cur_state, |
224 | int chunk_type) | 224 | int chunk_type) |
225 | { | 225 | { |
226 | int i; | 226 | int i; |
227 | 227 | ||
228 | pr_debug("Chunk type: %d\n", chunk_type); | 228 | pr_debug("Chunk type: %d\n", chunk_type); |
229 | 229 | ||
230 | switch (chunk_type) { | 230 | switch (chunk_type) { |
231 | case SCTP_CID_INIT: | 231 | case SCTP_CID_INIT: |
232 | pr_debug("SCTP_CID_INIT\n"); | 232 | pr_debug("SCTP_CID_INIT\n"); |
233 | i = 0; | 233 | i = 0; |
234 | break; | 234 | break; |
235 | case SCTP_CID_INIT_ACK: | 235 | case SCTP_CID_INIT_ACK: |
236 | pr_debug("SCTP_CID_INIT_ACK\n"); | 236 | pr_debug("SCTP_CID_INIT_ACK\n"); |
237 | i = 1; | 237 | i = 1; |
238 | break; | 238 | break; |
239 | case SCTP_CID_ABORT: | 239 | case SCTP_CID_ABORT: |
240 | pr_debug("SCTP_CID_ABORT\n"); | 240 | pr_debug("SCTP_CID_ABORT\n"); |
241 | i = 2; | 241 | i = 2; |
242 | break; | 242 | break; |
243 | case SCTP_CID_SHUTDOWN: | 243 | case SCTP_CID_SHUTDOWN: |
244 | pr_debug("SCTP_CID_SHUTDOWN\n"); | 244 | pr_debug("SCTP_CID_SHUTDOWN\n"); |
245 | i = 3; | 245 | i = 3; |
246 | break; | 246 | break; |
247 | case SCTP_CID_SHUTDOWN_ACK: | 247 | case SCTP_CID_SHUTDOWN_ACK: |
248 | pr_debug("SCTP_CID_SHUTDOWN_ACK\n"); | 248 | pr_debug("SCTP_CID_SHUTDOWN_ACK\n"); |
249 | i = 4; | 249 | i = 4; |
250 | break; | 250 | break; |
251 | case SCTP_CID_ERROR: | 251 | case SCTP_CID_ERROR: |
252 | pr_debug("SCTP_CID_ERROR\n"); | 252 | pr_debug("SCTP_CID_ERROR\n"); |
253 | i = 5; | 253 | i = 5; |
254 | break; | 254 | break; |
255 | case SCTP_CID_COOKIE_ECHO: | 255 | case SCTP_CID_COOKIE_ECHO: |
256 | pr_debug("SCTP_CID_COOKIE_ECHO\n"); | 256 | pr_debug("SCTP_CID_COOKIE_ECHO\n"); |
257 | i = 6; | 257 | i = 6; |
258 | break; | 258 | break; |
259 | case SCTP_CID_COOKIE_ACK: | 259 | case SCTP_CID_COOKIE_ACK: |
260 | pr_debug("SCTP_CID_COOKIE_ACK\n"); | 260 | pr_debug("SCTP_CID_COOKIE_ACK\n"); |
261 | i = 7; | 261 | i = 7; |
262 | break; | 262 | break; |
263 | case SCTP_CID_SHUTDOWN_COMPLETE: | 263 | case SCTP_CID_SHUTDOWN_COMPLETE: |
264 | pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n"); | 264 | pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n"); |
265 | i = 8; | 265 | i = 8; |
266 | break; | 266 | break; |
267 | default: | 267 | default: |
268 | /* Other chunks like DATA, SACK, HEARTBEAT and | 268 | /* Other chunks like DATA, SACK, HEARTBEAT and |
269 | its ACK do not cause a change in state */ | 269 | its ACK do not cause a change in state */ |
270 | pr_debug("Unknown chunk type, Will stay in %s\n", | 270 | pr_debug("Unknown chunk type, Will stay in %s\n", |
271 | sctp_conntrack_names[cur_state]); | 271 | sctp_conntrack_names[cur_state]); |
272 | return cur_state; | 272 | return cur_state; |
273 | } | 273 | } |
274 | 274 | ||
275 | pr_debug("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", | 275 | pr_debug("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", |
276 | dir, sctp_conntrack_names[cur_state], chunk_type, | 276 | dir, sctp_conntrack_names[cur_state], chunk_type, |
277 | sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); | 277 | sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); |
278 | 278 | ||
279 | return sctp_conntracks[dir][i][cur_state]; | 279 | return sctp_conntracks[dir][i][cur_state]; |
280 | } | 280 | } |
281 | 281 | ||
282 | /* Returns verdict for packet, or -NF_ACCEPT for invalid. */ | 282 | /* Returns verdict for packet, or -NF_ACCEPT for invalid. */ |
283 | static int sctp_packet(struct nf_conn *ct, | 283 | static int sctp_packet(struct nf_conn *ct, |
284 | const struct sk_buff *skb, | 284 | const struct sk_buff *skb, |
285 | unsigned int dataoff, | 285 | unsigned int dataoff, |
286 | enum ip_conntrack_info ctinfo, | 286 | enum ip_conntrack_info ctinfo, |
287 | u_int8_t pf, | 287 | u_int8_t pf, |
288 | unsigned int hooknum) | 288 | unsigned int hooknum) |
289 | { | 289 | { |
290 | enum sctp_conntrack new_state, old_state; | 290 | enum sctp_conntrack new_state, old_state; |
291 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 291 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
292 | const struct sctphdr *sh; | 292 | const struct sctphdr *sh; |
293 | struct sctphdr _sctph; | 293 | struct sctphdr _sctph; |
294 | const struct sctp_chunkhdr *sch; | 294 | const struct sctp_chunkhdr *sch; |
295 | struct sctp_chunkhdr _sch; | 295 | struct sctp_chunkhdr _sch; |
296 | u_int32_t offset, count; | 296 | u_int32_t offset, count; |
297 | unsigned long map[256 / sizeof(unsigned long)] = { 0 }; | 297 | unsigned long map[256 / sizeof(unsigned long)] = { 0 }; |
298 | 298 | ||
299 | sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); | 299 | sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); |
300 | if (sh == NULL) | 300 | if (sh == NULL) |
301 | goto out; | 301 | goto out; |
302 | 302 | ||
303 | if (do_basic_checks(ct, skb, dataoff, map) != 0) | 303 | if (do_basic_checks(ct, skb, dataoff, map) != 0) |
304 | goto out; | 304 | goto out; |
305 | 305 | ||
306 | /* Check the verification tag (Sec 8.5) */ | 306 | /* Check the verification tag (Sec 8.5) */ |
307 | if (!test_bit(SCTP_CID_INIT, map) && | 307 | if (!test_bit(SCTP_CID_INIT, map) && |
308 | !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && | 308 | !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && |
309 | !test_bit(SCTP_CID_COOKIE_ECHO, map) && | 309 | !test_bit(SCTP_CID_COOKIE_ECHO, map) && |
310 | !test_bit(SCTP_CID_ABORT, map) && | 310 | !test_bit(SCTP_CID_ABORT, map) && |
311 | !test_bit(SCTP_CID_SHUTDOWN_ACK, map) && | 311 | !test_bit(SCTP_CID_SHUTDOWN_ACK, map) && |
312 | sh->vtag != ct->proto.sctp.vtag[dir]) { | 312 | sh->vtag != ct->proto.sctp.vtag[dir]) { |
313 | pr_debug("Verification tag check failed\n"); | 313 | pr_debug("Verification tag check failed\n"); |
314 | goto out; | 314 | goto out; |
315 | } | 315 | } |
316 | 316 | ||
317 | old_state = new_state = SCTP_CONNTRACK_NONE; | 317 | old_state = new_state = SCTP_CONNTRACK_NONE; |
318 | spin_lock_bh(&ct->lock); | 318 | spin_lock_bh(&ct->lock); |
319 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { | 319 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { |
320 | /* Special cases of Verification tag check (Sec 8.5.1) */ | 320 | /* Special cases of Verification tag check (Sec 8.5.1) */ |
321 | if (sch->type == SCTP_CID_INIT) { | 321 | if (sch->type == SCTP_CID_INIT) { |
322 | /* Sec 8.5.1 (A) */ | 322 | /* Sec 8.5.1 (A) */ |
323 | if (sh->vtag != 0) | 323 | if (sh->vtag != 0) |
324 | goto out_unlock; | 324 | goto out_unlock; |
325 | } else if (sch->type == SCTP_CID_ABORT) { | 325 | } else if (sch->type == SCTP_CID_ABORT) { |
326 | /* Sec 8.5.1 (B) */ | 326 | /* Sec 8.5.1 (B) */ |
327 | if (sh->vtag != ct->proto.sctp.vtag[dir] && | 327 | if (sh->vtag != ct->proto.sctp.vtag[dir] && |
328 | sh->vtag != ct->proto.sctp.vtag[!dir]) | 328 | sh->vtag != ct->proto.sctp.vtag[!dir]) |
329 | goto out_unlock; | 329 | goto out_unlock; |
330 | } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { | 330 | } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { |
331 | /* Sec 8.5.1 (C) */ | 331 | /* Sec 8.5.1 (C) */ |
332 | if (sh->vtag != ct->proto.sctp.vtag[dir] && | 332 | if (sh->vtag != ct->proto.sctp.vtag[dir] && |
333 | sh->vtag != ct->proto.sctp.vtag[!dir] && | 333 | sh->vtag != ct->proto.sctp.vtag[!dir] && |
334 | sch->flags & SCTP_CHUNK_FLAG_T) | 334 | sch->flags & SCTP_CHUNK_FLAG_T) |
335 | goto out_unlock; | 335 | goto out_unlock; |
336 | } else if (sch->type == SCTP_CID_COOKIE_ECHO) { | 336 | } else if (sch->type == SCTP_CID_COOKIE_ECHO) { |
337 | /* Sec 8.5.1 (D) */ | 337 | /* Sec 8.5.1 (D) */ |
338 | if (sh->vtag != ct->proto.sctp.vtag[dir]) | 338 | if (sh->vtag != ct->proto.sctp.vtag[dir]) |
339 | goto out_unlock; | 339 | goto out_unlock; |
340 | } | 340 | } |
341 | 341 | ||
342 | old_state = ct->proto.sctp.state; | 342 | old_state = ct->proto.sctp.state; |
343 | new_state = sctp_new_state(dir, old_state, sch->type); | 343 | new_state = sctp_new_state(dir, old_state, sch->type); |
344 | 344 | ||
345 | /* Invalid */ | 345 | /* Invalid */ |
346 | if (new_state == SCTP_CONNTRACK_MAX) { | 346 | if (new_state == SCTP_CONNTRACK_MAX) { |
347 | pr_debug("nf_conntrack_sctp: Invalid dir=%i ctype=%u " | 347 | pr_debug("nf_conntrack_sctp: Invalid dir=%i ctype=%u " |
348 | "conntrack=%u\n", | 348 | "conntrack=%u\n", |
349 | dir, sch->type, old_state); | 349 | dir, sch->type, old_state); |
350 | goto out_unlock; | 350 | goto out_unlock; |
351 | } | 351 | } |
352 | 352 | ||
353 | /* If it is an INIT or an INIT ACK note down the vtag */ | 353 | /* If it is an INIT or an INIT ACK note down the vtag */ |
354 | if (sch->type == SCTP_CID_INIT || | 354 | if (sch->type == SCTP_CID_INIT || |
355 | sch->type == SCTP_CID_INIT_ACK) { | 355 | sch->type == SCTP_CID_INIT_ACK) { |
356 | sctp_inithdr_t _inithdr, *ih; | 356 | sctp_inithdr_t _inithdr, *ih; |
357 | 357 | ||
358 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), | 358 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), |
359 | sizeof(_inithdr), &_inithdr); | 359 | sizeof(_inithdr), &_inithdr); |
360 | if (ih == NULL) | 360 | if (ih == NULL) |
361 | goto out_unlock; | 361 | goto out_unlock; |
362 | pr_debug("Setting vtag %x for dir %d\n", | 362 | pr_debug("Setting vtag %x for dir %d\n", |
363 | ih->init_tag, !dir); | 363 | ih->init_tag, !dir); |
364 | ct->proto.sctp.vtag[!dir] = ih->init_tag; | 364 | ct->proto.sctp.vtag[!dir] = ih->init_tag; |
365 | } | 365 | } |
366 | 366 | ||
367 | ct->proto.sctp.state = new_state; | 367 | ct->proto.sctp.state = new_state; |
368 | if (old_state != new_state) | 368 | if (old_state != new_state) |
369 | nf_conntrack_event_cache(IPCT_PROTOINFO, ct); | 369 | nf_conntrack_event_cache(IPCT_PROTOINFO, ct); |
370 | } | 370 | } |
371 | spin_unlock_bh(&ct->lock); | 371 | spin_unlock_bh(&ct->lock); |
372 | 372 | ||
373 | nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]); | 373 | nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]); |
374 | 374 | ||
375 | if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && | 375 | if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && |
376 | dir == IP_CT_DIR_REPLY && | 376 | dir == IP_CT_DIR_REPLY && |
377 | new_state == SCTP_CONNTRACK_ESTABLISHED) { | 377 | new_state == SCTP_CONNTRACK_ESTABLISHED) { |
378 | pr_debug("Setting assured bit\n"); | 378 | pr_debug("Setting assured bit\n"); |
379 | set_bit(IPS_ASSURED_BIT, &ct->status); | 379 | set_bit(IPS_ASSURED_BIT, &ct->status); |
380 | nf_conntrack_event_cache(IPCT_ASSURED, ct); | 380 | nf_conntrack_event_cache(IPCT_ASSURED, ct); |
381 | } | 381 | } |
382 | 382 | ||
383 | return NF_ACCEPT; | 383 | return NF_ACCEPT; |
384 | 384 | ||
385 | out_unlock: | 385 | out_unlock: |
386 | spin_unlock_bh(&ct->lock); | 386 | spin_unlock_bh(&ct->lock); |
387 | out: | 387 | out: |
388 | return -NF_ACCEPT; | 388 | return -NF_ACCEPT; |
389 | } | 389 | } |
390 | 390 | ||
391 | /* Called when a new connection for this protocol found. */ | 391 | /* Called when a new connection for this protocol found. */ |
392 | static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, | 392 | static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, |
393 | unsigned int dataoff) | 393 | unsigned int dataoff) |
394 | { | 394 | { |
395 | enum sctp_conntrack new_state; | 395 | enum sctp_conntrack new_state; |
396 | const struct sctphdr *sh; | 396 | const struct sctphdr *sh; |
397 | struct sctphdr _sctph; | 397 | struct sctphdr _sctph; |
398 | const struct sctp_chunkhdr *sch; | 398 | const struct sctp_chunkhdr *sch; |
399 | struct sctp_chunkhdr _sch; | 399 | struct sctp_chunkhdr _sch; |
400 | u_int32_t offset, count; | 400 | u_int32_t offset, count; |
401 | unsigned long map[256 / sizeof(unsigned long)] = { 0 }; | 401 | unsigned long map[256 / sizeof(unsigned long)] = { 0 }; |
402 | 402 | ||
403 | sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); | 403 | sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); |
404 | if (sh == NULL) | 404 | if (sh == NULL) |
405 | return false; | 405 | return false; |
406 | 406 | ||
407 | if (do_basic_checks(ct, skb, dataoff, map) != 0) | 407 | if (do_basic_checks(ct, skb, dataoff, map) != 0) |
408 | return false; | 408 | return false; |
409 | 409 | ||
410 | /* If an OOTB packet has any of these chunks discard (Sec 8.4) */ | 410 | /* If an OOTB packet has any of these chunks discard (Sec 8.4) */ |
411 | if (test_bit(SCTP_CID_ABORT, map) || | 411 | if (test_bit(SCTP_CID_ABORT, map) || |
412 | test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) || | 412 | test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) || |
413 | test_bit(SCTP_CID_COOKIE_ACK, map)) | 413 | test_bit(SCTP_CID_COOKIE_ACK, map)) |
414 | return false; | 414 | return false; |
415 | 415 | ||
416 | new_state = SCTP_CONNTRACK_MAX; | 416 | new_state = SCTP_CONNTRACK_MAX; |
417 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { | 417 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { |
418 | /* Don't need lock here: this conntrack not in circulation yet */ | 418 | /* Don't need lock here: this conntrack not in circulation yet */ |
419 | new_state = sctp_new_state(IP_CT_DIR_ORIGINAL, | 419 | new_state = sctp_new_state(IP_CT_DIR_ORIGINAL, |
420 | SCTP_CONNTRACK_NONE, sch->type); | 420 | SCTP_CONNTRACK_NONE, sch->type); |
421 | 421 | ||
422 | /* Invalid: delete conntrack */ | 422 | /* Invalid: delete conntrack */ |
423 | if (new_state == SCTP_CONNTRACK_NONE || | 423 | if (new_state == SCTP_CONNTRACK_NONE || |
424 | new_state == SCTP_CONNTRACK_MAX) { | 424 | new_state == SCTP_CONNTRACK_MAX) { |
425 | pr_debug("nf_conntrack_sctp: invalid new deleting.\n"); | 425 | pr_debug("nf_conntrack_sctp: invalid new deleting.\n"); |
426 | return false; | 426 | return false; |
427 | } | 427 | } |
428 | 428 | ||
429 | /* Copy the vtag into the state info */ | 429 | /* Copy the vtag into the state info */ |
430 | if (sch->type == SCTP_CID_INIT) { | 430 | if (sch->type == SCTP_CID_INIT) { |
431 | if (sh->vtag == 0) { | 431 | if (sh->vtag == 0) { |
432 | sctp_inithdr_t _inithdr, *ih; | 432 | sctp_inithdr_t _inithdr, *ih; |
433 | 433 | ||
434 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), | 434 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), |
435 | sizeof(_inithdr), &_inithdr); | 435 | sizeof(_inithdr), &_inithdr); |
436 | if (ih == NULL) | 436 | if (ih == NULL) |
437 | return false; | 437 | return false; |
438 | 438 | ||
439 | pr_debug("Setting vtag %x for new conn\n", | 439 | pr_debug("Setting vtag %x for new conn\n", |
440 | ih->init_tag); | 440 | ih->init_tag); |
441 | 441 | ||
442 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = | 442 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = |
443 | ih->init_tag; | 443 | ih->init_tag; |
444 | } else { | 444 | } else { |
445 | /* Sec 8.5.1 (A) */ | 445 | /* Sec 8.5.1 (A) */ |
446 | return false; | 446 | return false; |
447 | } | 447 | } |
448 | } | 448 | } |
449 | /* If it is a shutdown ack OOTB packet, we expect a return | 449 | /* If it is a shutdown ack OOTB packet, we expect a return |
450 | shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ | 450 | shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ |
451 | else { | 451 | else { |
452 | pr_debug("Setting vtag %x for new conn OOTB\n", | 452 | pr_debug("Setting vtag %x for new conn OOTB\n", |
453 | sh->vtag); | 453 | sh->vtag); |
454 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; | 454 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; |
455 | } | 455 | } |
456 | 456 | ||
457 | ct->proto.sctp.state = new_state; | 457 | ct->proto.sctp.state = new_state; |
458 | } | 458 | } |
459 | 459 | ||
460 | return true; | 460 | return true; |
461 | } | 461 | } |
462 | 462 | ||
463 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 463 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
464 | 464 | ||
465 | #include <linux/netfilter/nfnetlink.h> | 465 | #include <linux/netfilter/nfnetlink.h> |
466 | #include <linux/netfilter/nfnetlink_conntrack.h> | 466 | #include <linux/netfilter/nfnetlink_conntrack.h> |
467 | 467 | ||
468 | static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, | 468 | static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, |
469 | struct nf_conn *ct) | 469 | struct nf_conn *ct) |
470 | { | 470 | { |
471 | struct nlattr *nest_parms; | 471 | struct nlattr *nest_parms; |
472 | 472 | ||
473 | spin_lock_bh(&ct->lock); | 473 | spin_lock_bh(&ct->lock); |
474 | nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED); | 474 | nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED); |
475 | if (!nest_parms) | 475 | if (!nest_parms) |
476 | goto nla_put_failure; | 476 | goto nla_put_failure; |
477 | 477 | ||
478 | NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state); | 478 | NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state); |
479 | 479 | ||
480 | NLA_PUT_BE32(skb, | 480 | NLA_PUT_BE32(skb, |
481 | CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, | 481 | CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, |
482 | ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]); | 482 | ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]); |
483 | 483 | ||
484 | NLA_PUT_BE32(skb, | 484 | NLA_PUT_BE32(skb, |
485 | CTA_PROTOINFO_SCTP_VTAG_REPLY, | 485 | CTA_PROTOINFO_SCTP_VTAG_REPLY, |
486 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY]); | 486 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY]); |
487 | 487 | ||
488 | spin_unlock_bh(&ct->lock); | 488 | spin_unlock_bh(&ct->lock); |
489 | 489 | ||
490 | nla_nest_end(skb, nest_parms); | 490 | nla_nest_end(skb, nest_parms); |
491 | 491 | ||
492 | return 0; | 492 | return 0; |
493 | 493 | ||
494 | nla_put_failure: | 494 | nla_put_failure: |
495 | spin_unlock_bh(&ct->lock); | 495 | spin_unlock_bh(&ct->lock); |
496 | return -1; | 496 | return -1; |
497 | } | 497 | } |
498 | 498 | ||
499 | static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = { | 499 | static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = { |
500 | [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 }, | 500 | [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 }, |
501 | [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 }, | 501 | [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 }, |
502 | [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 }, | 502 | [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 }, |
503 | }; | 503 | }; |
504 | 504 | ||
505 | static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) | 505 | static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) |
506 | { | 506 | { |
507 | struct nlattr *attr = cda[CTA_PROTOINFO_SCTP]; | 507 | struct nlattr *attr = cda[CTA_PROTOINFO_SCTP]; |
508 | struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1]; | 508 | struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1]; |
509 | int err; | 509 | int err; |
510 | 510 | ||
511 | /* updates may not contain the internal protocol info, skip parsing */ | 511 | /* updates may not contain the internal protocol info, skip parsing */ |
512 | if (!attr) | 512 | if (!attr) |
513 | return 0; | 513 | return 0; |
514 | 514 | ||
515 | err = nla_parse_nested(tb, | 515 | err = nla_parse_nested(tb, |
516 | CTA_PROTOINFO_SCTP_MAX, | 516 | CTA_PROTOINFO_SCTP_MAX, |
517 | attr, | 517 | attr, |
518 | sctp_nla_policy); | 518 | sctp_nla_policy); |
519 | if (err < 0) | 519 | if (err < 0) |
520 | return err; | 520 | return err; |
521 | 521 | ||
522 | if (!tb[CTA_PROTOINFO_SCTP_STATE] || | 522 | if (!tb[CTA_PROTOINFO_SCTP_STATE] || |
523 | !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] || | 523 | !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] || |
524 | !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]) | 524 | !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]) |
525 | return -EINVAL; | 525 | return -EINVAL; |
526 | 526 | ||
527 | spin_lock_bh(&ct->lock); | 527 | spin_lock_bh(&ct->lock); |
528 | ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); | 528 | ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); |
529 | ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = | 529 | ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = |
530 | nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]); | 530 | nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]); |
531 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = | 531 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = |
532 | nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]); | 532 | nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]); |
533 | spin_unlock_bh(&ct->lock); | 533 | spin_unlock_bh(&ct->lock); |
534 | 534 | ||
535 | return 0; | 535 | return 0; |
536 | } | 536 | } |
537 | 537 | ||
538 | static int sctp_nlattr_size(void) | 538 | static int sctp_nlattr_size(void) |
539 | { | 539 | { |
540 | return nla_total_size(0) /* CTA_PROTOINFO_SCTP */ | 540 | return nla_total_size(0) /* CTA_PROTOINFO_SCTP */ |
541 | + nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1); | 541 | + nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1); |
542 | } | 542 | } |
543 | #endif | 543 | #endif |
544 | 544 | ||
545 | #ifdef CONFIG_SYSCTL | 545 | #ifdef CONFIG_SYSCTL |
546 | static unsigned int sctp_sysctl_table_users; | 546 | static unsigned int sctp_sysctl_table_users; |
547 | static struct ctl_table_header *sctp_sysctl_header; | 547 | static struct ctl_table_header *sctp_sysctl_header; |
548 | static struct ctl_table sctp_sysctl_table[] = { | 548 | static struct ctl_table sctp_sysctl_table[] = { |
549 | { | 549 | { |
550 | .procname = "nf_conntrack_sctp_timeout_closed", | 550 | .procname = "nf_conntrack_sctp_timeout_closed", |
551 | .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED], | 551 | .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED], |
552 | .maxlen = sizeof(unsigned int), | 552 | .maxlen = sizeof(unsigned int), |
553 | .mode = 0644, | 553 | .mode = 0644, |
554 | .proc_handler = proc_dointvec_jiffies, | 554 | .proc_handler = proc_dointvec_jiffies, |
555 | }, | 555 | }, |
556 | { | 556 | { |
557 | .procname = "nf_conntrack_sctp_timeout_cookie_wait", | 557 | .procname = "nf_conntrack_sctp_timeout_cookie_wait", |
558 | .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT], | 558 | .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT], |
559 | .maxlen = sizeof(unsigned int), | 559 | .maxlen = sizeof(unsigned int), |
560 | .mode = 0644, | 560 | .mode = 0644, |
561 | .proc_handler = proc_dointvec_jiffies, | 561 | .proc_handler = proc_dointvec_jiffies, |
562 | }, | 562 | }, |
563 | { | 563 | { |
564 | .procname = "nf_conntrack_sctp_timeout_cookie_echoed", | 564 | .procname = "nf_conntrack_sctp_timeout_cookie_echoed", |
565 | .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED], | 565 | .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED], |
566 | .maxlen = sizeof(unsigned int), | 566 | .maxlen = sizeof(unsigned int), |
567 | .mode = 0644, | 567 | .mode = 0644, |
568 | .proc_handler = proc_dointvec_jiffies, | 568 | .proc_handler = proc_dointvec_jiffies, |
569 | }, | 569 | }, |
570 | { | 570 | { |
571 | .procname = "nf_conntrack_sctp_timeout_established", | 571 | .procname = "nf_conntrack_sctp_timeout_established", |
572 | .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED], | 572 | .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED], |
573 | .maxlen = sizeof(unsigned int), | 573 | .maxlen = sizeof(unsigned int), |
574 | .mode = 0644, | 574 | .mode = 0644, |
575 | .proc_handler = proc_dointvec_jiffies, | 575 | .proc_handler = proc_dointvec_jiffies, |
576 | }, | 576 | }, |
577 | { | 577 | { |
578 | .procname = "nf_conntrack_sctp_timeout_shutdown_sent", | 578 | .procname = "nf_conntrack_sctp_timeout_shutdown_sent", |
579 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT], | 579 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT], |
580 | .maxlen = sizeof(unsigned int), | 580 | .maxlen = sizeof(unsigned int), |
581 | .mode = 0644, | 581 | .mode = 0644, |
582 | .proc_handler = proc_dointvec_jiffies, | 582 | .proc_handler = proc_dointvec_jiffies, |
583 | }, | 583 | }, |
584 | { | 584 | { |
585 | .procname = "nf_conntrack_sctp_timeout_shutdown_recd", | 585 | .procname = "nf_conntrack_sctp_timeout_shutdown_recd", |
586 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD], | 586 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD], |
587 | .maxlen = sizeof(unsigned int), | 587 | .maxlen = sizeof(unsigned int), |
588 | .mode = 0644, | 588 | .mode = 0644, |
589 | .proc_handler = proc_dointvec_jiffies, | 589 | .proc_handler = proc_dointvec_jiffies, |
590 | }, | 590 | }, |
591 | { | 591 | { |
592 | .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent", | 592 | .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent", |
593 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT], | 593 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT], |
594 | .maxlen = sizeof(unsigned int), | 594 | .maxlen = sizeof(unsigned int), |
595 | .mode = 0644, | 595 | .mode = 0644, |
596 | .proc_handler = proc_dointvec_jiffies, | 596 | .proc_handler = proc_dointvec_jiffies, |
597 | }, | 597 | }, |
598 | { } | 598 | { } |
599 | }; | 599 | }; |
600 | 600 | ||
601 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | 601 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT |
602 | static struct ctl_table sctp_compat_sysctl_table[] = { | 602 | static struct ctl_table sctp_compat_sysctl_table[] = { |
603 | { | 603 | { |
604 | .procname = "ip_conntrack_sctp_timeout_closed", | 604 | .procname = "ip_conntrack_sctp_timeout_closed", |
605 | .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED], | 605 | .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED], |
606 | .maxlen = sizeof(unsigned int), | 606 | .maxlen = sizeof(unsigned int), |
607 | .mode = 0644, | 607 | .mode = 0644, |
608 | .proc_handler = proc_dointvec_jiffies, | 608 | .proc_handler = proc_dointvec_jiffies, |
609 | }, | 609 | }, |
610 | { | 610 | { |
611 | .procname = "ip_conntrack_sctp_timeout_cookie_wait", | 611 | .procname = "ip_conntrack_sctp_timeout_cookie_wait", |
612 | .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT], | 612 | .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT], |
613 | .maxlen = sizeof(unsigned int), | 613 | .maxlen = sizeof(unsigned int), |
614 | .mode = 0644, | 614 | .mode = 0644, |
615 | .proc_handler = proc_dointvec_jiffies, | 615 | .proc_handler = proc_dointvec_jiffies, |
616 | }, | 616 | }, |
617 | { | 617 | { |
618 | .procname = "ip_conntrack_sctp_timeout_cookie_echoed", | 618 | .procname = "ip_conntrack_sctp_timeout_cookie_echoed", |
619 | .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED], | 619 | .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED], |
620 | .maxlen = sizeof(unsigned int), | 620 | .maxlen = sizeof(unsigned int), |
621 | .mode = 0644, | 621 | .mode = 0644, |
622 | .proc_handler = proc_dointvec_jiffies, | 622 | .proc_handler = proc_dointvec_jiffies, |
623 | }, | 623 | }, |
624 | { | 624 | { |
625 | .procname = "ip_conntrack_sctp_timeout_established", | 625 | .procname = "ip_conntrack_sctp_timeout_established", |
626 | .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED], | 626 | .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED], |
627 | .maxlen = sizeof(unsigned int), | 627 | .maxlen = sizeof(unsigned int), |
628 | .mode = 0644, | 628 | .mode = 0644, |
629 | .proc_handler = proc_dointvec_jiffies, | 629 | .proc_handler = proc_dointvec_jiffies, |
630 | }, | 630 | }, |
631 | { | 631 | { |
632 | .procname = "ip_conntrack_sctp_timeout_shutdown_sent", | 632 | .procname = "ip_conntrack_sctp_timeout_shutdown_sent", |
633 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT], | 633 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT], |
634 | .maxlen = sizeof(unsigned int), | 634 | .maxlen = sizeof(unsigned int), |
635 | .mode = 0644, | 635 | .mode = 0644, |
636 | .proc_handler = proc_dointvec_jiffies, | 636 | .proc_handler = proc_dointvec_jiffies, |
637 | }, | 637 | }, |
638 | { | 638 | { |
639 | .procname = "ip_conntrack_sctp_timeout_shutdown_recd", | 639 | .procname = "ip_conntrack_sctp_timeout_shutdown_recd", |
640 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD], | 640 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD], |
641 | .maxlen = sizeof(unsigned int), | 641 | .maxlen = sizeof(unsigned int), |
642 | .mode = 0644, | 642 | .mode = 0644, |
643 | .proc_handler = proc_dointvec_jiffies, | 643 | .proc_handler = proc_dointvec_jiffies, |
644 | }, | 644 | }, |
645 | { | 645 | { |
646 | .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent", | 646 | .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent", |
647 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT], | 647 | .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT], |
648 | .maxlen = sizeof(unsigned int), | 648 | .maxlen = sizeof(unsigned int), |
649 | .mode = 0644, | 649 | .mode = 0644, |
650 | .proc_handler = proc_dointvec_jiffies, | 650 | .proc_handler = proc_dointvec_jiffies, |
651 | }, | 651 | }, |
652 | { } | 652 | { } |
653 | }; | 653 | }; |
654 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ | 654 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ |
655 | #endif | 655 | #endif |
656 | 656 | ||
657 | static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { | 657 | static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { |
658 | .l3proto = PF_INET, | 658 | .l3proto = PF_INET, |
659 | .l4proto = IPPROTO_SCTP, | 659 | .l4proto = IPPROTO_SCTP, |
660 | .name = "sctp", | 660 | .name = "sctp", |
661 | .pkt_to_tuple = sctp_pkt_to_tuple, | 661 | .pkt_to_tuple = sctp_pkt_to_tuple, |
662 | .invert_tuple = sctp_invert_tuple, | 662 | .invert_tuple = sctp_invert_tuple, |
663 | .print_tuple = sctp_print_tuple, | 663 | .print_tuple = sctp_print_tuple, |
664 | .print_conntrack = sctp_print_conntrack, | 664 | .print_conntrack = sctp_print_conntrack, |
665 | .packet = sctp_packet, | 665 | .packet = sctp_packet, |
666 | .new = sctp_new, | 666 | .new = sctp_new, |
667 | .me = THIS_MODULE, | 667 | .me = THIS_MODULE, |
668 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 668 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
669 | .to_nlattr = sctp_to_nlattr, | 669 | .to_nlattr = sctp_to_nlattr, |
670 | .nlattr_size = sctp_nlattr_size, | 670 | .nlattr_size = sctp_nlattr_size, |
671 | .from_nlattr = nlattr_to_sctp, | 671 | .from_nlattr = nlattr_to_sctp, |
672 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, | 672 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, |
673 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 673 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
674 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 674 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
675 | .nla_policy = nf_ct_port_nla_policy, | 675 | .nla_policy = nf_ct_port_nla_policy, |
676 | #endif | 676 | #endif |
677 | #ifdef CONFIG_SYSCTL | 677 | #ifdef CONFIG_SYSCTL |
678 | .ctl_table_users = &sctp_sysctl_table_users, | 678 | .ctl_table_users = &sctp_sysctl_table_users, |
679 | .ctl_table_header = &sctp_sysctl_header, | 679 | .ctl_table_header = &sctp_sysctl_header, |
680 | .ctl_table = sctp_sysctl_table, | 680 | .ctl_table = sctp_sysctl_table, |
681 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | 681 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT |
682 | .ctl_compat_table = sctp_compat_sysctl_table, | 682 | .ctl_compat_table = sctp_compat_sysctl_table, |
683 | #endif | 683 | #endif |
684 | #endif | 684 | #endif |
685 | }; | 685 | }; |
686 | 686 | ||
687 | static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { | 687 | static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { |
688 | .l3proto = PF_INET6, | 688 | .l3proto = PF_INET6, |
689 | .l4proto = IPPROTO_SCTP, | 689 | .l4proto = IPPROTO_SCTP, |
690 | .name = "sctp", | 690 | .name = "sctp", |
691 | .pkt_to_tuple = sctp_pkt_to_tuple, | 691 | .pkt_to_tuple = sctp_pkt_to_tuple, |
692 | .invert_tuple = sctp_invert_tuple, | 692 | .invert_tuple = sctp_invert_tuple, |
693 | .print_tuple = sctp_print_tuple, | 693 | .print_tuple = sctp_print_tuple, |
694 | .print_conntrack = sctp_print_conntrack, | 694 | .print_conntrack = sctp_print_conntrack, |
695 | .packet = sctp_packet, | 695 | .packet = sctp_packet, |
696 | .new = sctp_new, | 696 | .new = sctp_new, |
697 | .me = THIS_MODULE, | 697 | .me = THIS_MODULE, |
698 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 698 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
699 | .to_nlattr = sctp_to_nlattr, | 699 | .to_nlattr = sctp_to_nlattr, |
700 | .nlattr_size = sctp_nlattr_size, | 700 | .nlattr_size = sctp_nlattr_size, |
701 | .from_nlattr = nlattr_to_sctp, | 701 | .from_nlattr = nlattr_to_sctp, |
702 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, | 702 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, |
703 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 703 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
704 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 704 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
705 | .nla_policy = nf_ct_port_nla_policy, | 705 | .nla_policy = nf_ct_port_nla_policy, |
706 | #endif | 706 | #endif |
707 | #ifdef CONFIG_SYSCTL | 707 | #ifdef CONFIG_SYSCTL |
708 | .ctl_table_users = &sctp_sysctl_table_users, | 708 | .ctl_table_users = &sctp_sysctl_table_users, |
709 | .ctl_table_header = &sctp_sysctl_header, | 709 | .ctl_table_header = &sctp_sysctl_header, |
710 | .ctl_table = sctp_sysctl_table, | 710 | .ctl_table = sctp_sysctl_table, |
711 | #endif | 711 | #endif |
712 | }; | 712 | }; |
713 | 713 | ||
714 | static int __init nf_conntrack_proto_sctp_init(void) | 714 | static int __init nf_conntrack_proto_sctp_init(void) |
715 | { | 715 | { |
716 | int ret; | 716 | int ret; |
717 | 717 | ||
718 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4); | 718 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4); |
719 | if (ret) { | 719 | if (ret) { |
720 | printk("nf_conntrack_l4proto_sctp4: protocol register failed\n"); | 720 | pr_err("nf_conntrack_l4proto_sctp4: protocol register failed\n"); |
721 | goto out; | 721 | goto out; |
722 | } | 722 | } |
723 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6); | 723 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6); |
724 | if (ret) { | 724 | if (ret) { |
725 | printk("nf_conntrack_l4proto_sctp6: protocol register failed\n"); | 725 | pr_err("nf_conntrack_l4proto_sctp6: protocol register failed\n"); |
726 | goto cleanup_sctp4; | 726 | goto cleanup_sctp4; |
727 | } | 727 | } |
728 | 728 | ||
729 | return ret; | 729 | return ret; |
730 | 730 | ||
731 | cleanup_sctp4: | 731 | cleanup_sctp4: |
732 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); | 732 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); |
733 | out: | 733 | out: |
734 | return ret; | 734 | return ret; |
735 | } | 735 | } |
736 | 736 | ||
737 | static void __exit nf_conntrack_proto_sctp_fini(void) | 737 | static void __exit nf_conntrack_proto_sctp_fini(void) |
738 | { | 738 | { |
739 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6); | 739 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6); |
740 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); | 740 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); |
741 | } | 741 | } |
742 | 742 | ||
743 | module_init(nf_conntrack_proto_sctp_init); | 743 | module_init(nf_conntrack_proto_sctp_init); |
744 | module_exit(nf_conntrack_proto_sctp_fini); | 744 | module_exit(nf_conntrack_proto_sctp_fini); |
745 | 745 | ||
746 | MODULE_LICENSE("GPL"); | 746 | MODULE_LICENSE("GPL"); |
747 | MODULE_AUTHOR("Kiran Kumar Immidi"); | 747 | MODULE_AUTHOR("Kiran Kumar Immidi"); |
748 | MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP"); | 748 | MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP"); |
749 | MODULE_ALIAS("ip_conntrack_proto_sctp"); | 749 | MODULE_ALIAS("ip_conntrack_proto_sctp"); |
750 | 750 |
net/netfilter/nf_conntrack_sip.c
1 | /* SIP extension for IP connection tracking. | 1 | /* SIP extension for IP connection tracking. |
2 | * | 2 | * |
3 | * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> | 3 | * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> |
4 | * based on RR's ip_conntrack_ftp.c and other modules. | 4 | * based on RR's ip_conntrack_ftp.c and other modules. |
5 | * (C) 2007 United Security Providers | 5 | * (C) 2007 United Security Providers |
6 | * (C) 2007, 2008 Patrick McHardy <kaber@trash.net> | 6 | * (C) 2007, 2008 Patrick McHardy <kaber@trash.net> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/ctype.h> | 14 | #include <linux/ctype.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/inet.h> | 16 | #include <linux/inet.h> |
17 | #include <linux/in.h> | 17 | #include <linux/in.h> |
18 | #include <linux/udp.h> | 18 | #include <linux/udp.h> |
19 | #include <linux/tcp.h> | 19 | #include <linux/tcp.h> |
20 | #include <linux/netfilter.h> | 20 | #include <linux/netfilter.h> |
21 | 21 | ||
22 | #include <net/netfilter/nf_conntrack.h> | 22 | #include <net/netfilter/nf_conntrack.h> |
23 | #include <net/netfilter/nf_conntrack_core.h> | 23 | #include <net/netfilter/nf_conntrack_core.h> |
24 | #include <net/netfilter/nf_conntrack_expect.h> | 24 | #include <net/netfilter/nf_conntrack_expect.h> |
25 | #include <net/netfilter/nf_conntrack_helper.h> | 25 | #include <net/netfilter/nf_conntrack_helper.h> |
26 | #include <net/netfilter/nf_conntrack_zones.h> | 26 | #include <net/netfilter/nf_conntrack_zones.h> |
27 | #include <linux/netfilter/nf_conntrack_sip.h> | 27 | #include <linux/netfilter/nf_conntrack_sip.h> |
28 | 28 | ||
29 | MODULE_LICENSE("GPL"); | 29 | MODULE_LICENSE("GPL"); |
30 | MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>"); | 30 | MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>"); |
31 | MODULE_DESCRIPTION("SIP connection tracking helper"); | 31 | MODULE_DESCRIPTION("SIP connection tracking helper"); |
32 | MODULE_ALIAS("ip_conntrack_sip"); | 32 | MODULE_ALIAS("ip_conntrack_sip"); |
33 | MODULE_ALIAS_NFCT_HELPER("sip"); | 33 | MODULE_ALIAS_NFCT_HELPER("sip"); |
34 | 34 | ||
35 | #define MAX_PORTS 8 | 35 | #define MAX_PORTS 8 |
36 | static unsigned short ports[MAX_PORTS]; | 36 | static unsigned short ports[MAX_PORTS]; |
37 | static unsigned int ports_c; | 37 | static unsigned int ports_c; |
38 | module_param_array(ports, ushort, &ports_c, 0400); | 38 | module_param_array(ports, ushort, &ports_c, 0400); |
39 | MODULE_PARM_DESC(ports, "port numbers of SIP servers"); | 39 | MODULE_PARM_DESC(ports, "port numbers of SIP servers"); |
40 | 40 | ||
41 | static unsigned int sip_timeout __read_mostly = SIP_TIMEOUT; | 41 | static unsigned int sip_timeout __read_mostly = SIP_TIMEOUT; |
42 | module_param(sip_timeout, uint, 0600); | 42 | module_param(sip_timeout, uint, 0600); |
43 | MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session"); | 43 | MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session"); |
44 | 44 | ||
45 | static int sip_direct_signalling __read_mostly = 1; | 45 | static int sip_direct_signalling __read_mostly = 1; |
46 | module_param(sip_direct_signalling, int, 0600); | 46 | module_param(sip_direct_signalling, int, 0600); |
47 | MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar " | 47 | MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar " |
48 | "only (default 1)"); | 48 | "only (default 1)"); |
49 | 49 | ||
50 | static int sip_direct_media __read_mostly = 1; | 50 | static int sip_direct_media __read_mostly = 1; |
51 | module_param(sip_direct_media, int, 0600); | 51 | module_param(sip_direct_media, int, 0600); |
52 | MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " | 52 | MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " |
53 | "endpoints only (default 1)"); | 53 | "endpoints only (default 1)"); |
54 | 54 | ||
55 | unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff, | 55 | unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff, |
56 | const char **dptr, | 56 | const char **dptr, |
57 | unsigned int *datalen) __read_mostly; | 57 | unsigned int *datalen) __read_mostly; |
58 | EXPORT_SYMBOL_GPL(nf_nat_sip_hook); | 58 | EXPORT_SYMBOL_GPL(nf_nat_sip_hook); |
59 | 59 | ||
60 | void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly; | 60 | void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly; |
61 | EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook); | 61 | EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook); |
62 | 62 | ||
63 | unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, | 63 | unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, |
64 | unsigned int dataoff, | 64 | unsigned int dataoff, |
65 | const char **dptr, | 65 | const char **dptr, |
66 | unsigned int *datalen, | 66 | unsigned int *datalen, |
67 | struct nf_conntrack_expect *exp, | 67 | struct nf_conntrack_expect *exp, |
68 | unsigned int matchoff, | 68 | unsigned int matchoff, |
69 | unsigned int matchlen) __read_mostly; | 69 | unsigned int matchlen) __read_mostly; |
70 | EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook); | 70 | EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook); |
71 | 71 | ||
72 | unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff, | 72 | unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff, |
73 | const char **dptr, | 73 | const char **dptr, |
74 | unsigned int *datalen, | 74 | unsigned int *datalen, |
75 | unsigned int sdpoff, | 75 | unsigned int sdpoff, |
76 | enum sdp_header_types type, | 76 | enum sdp_header_types type, |
77 | enum sdp_header_types term, | 77 | enum sdp_header_types term, |
78 | const union nf_inet_addr *addr) | 78 | const union nf_inet_addr *addr) |
79 | __read_mostly; | 79 | __read_mostly; |
80 | EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook); | 80 | EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook); |
81 | 81 | ||
82 | unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff, | 82 | unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff, |
83 | const char **dptr, | 83 | const char **dptr, |
84 | unsigned int *datalen, | 84 | unsigned int *datalen, |
85 | unsigned int matchoff, | 85 | unsigned int matchoff, |
86 | unsigned int matchlen, | 86 | unsigned int matchlen, |
87 | u_int16_t port) __read_mostly; | 87 | u_int16_t port) __read_mostly; |
88 | EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook); | 88 | EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook); |
89 | 89 | ||
90 | unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, | 90 | unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, |
91 | unsigned int dataoff, | 91 | unsigned int dataoff, |
92 | const char **dptr, | 92 | const char **dptr, |
93 | unsigned int *datalen, | 93 | unsigned int *datalen, |
94 | unsigned int sdpoff, | 94 | unsigned int sdpoff, |
95 | const union nf_inet_addr *addr) | 95 | const union nf_inet_addr *addr) |
96 | __read_mostly; | 96 | __read_mostly; |
97 | EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook); | 97 | EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook); |
98 | 98 | ||
99 | unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff, | 99 | unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff, |
100 | const char **dptr, | 100 | const char **dptr, |
101 | unsigned int *datalen, | 101 | unsigned int *datalen, |
102 | struct nf_conntrack_expect *rtp_exp, | 102 | struct nf_conntrack_expect *rtp_exp, |
103 | struct nf_conntrack_expect *rtcp_exp, | 103 | struct nf_conntrack_expect *rtcp_exp, |
104 | unsigned int mediaoff, | 104 | unsigned int mediaoff, |
105 | unsigned int medialen, | 105 | unsigned int medialen, |
106 | union nf_inet_addr *rtp_addr) | 106 | union nf_inet_addr *rtp_addr) |
107 | __read_mostly; | 107 | __read_mostly; |
108 | EXPORT_SYMBOL_GPL(nf_nat_sdp_media_hook); | 108 | EXPORT_SYMBOL_GPL(nf_nat_sdp_media_hook); |
109 | 109 | ||
110 | static int string_len(const struct nf_conn *ct, const char *dptr, | 110 | static int string_len(const struct nf_conn *ct, const char *dptr, |
111 | const char *limit, int *shift) | 111 | const char *limit, int *shift) |
112 | { | 112 | { |
113 | int len = 0; | 113 | int len = 0; |
114 | 114 | ||
115 | while (dptr < limit && isalpha(*dptr)) { | 115 | while (dptr < limit && isalpha(*dptr)) { |
116 | dptr++; | 116 | dptr++; |
117 | len++; | 117 | len++; |
118 | } | 118 | } |
119 | return len; | 119 | return len; |
120 | } | 120 | } |
121 | 121 | ||
122 | static int digits_len(const struct nf_conn *ct, const char *dptr, | 122 | static int digits_len(const struct nf_conn *ct, const char *dptr, |
123 | const char *limit, int *shift) | 123 | const char *limit, int *shift) |
124 | { | 124 | { |
125 | int len = 0; | 125 | int len = 0; |
126 | while (dptr < limit && isdigit(*dptr)) { | 126 | while (dptr < limit && isdigit(*dptr)) { |
127 | dptr++; | 127 | dptr++; |
128 | len++; | 128 | len++; |
129 | } | 129 | } |
130 | return len; | 130 | return len; |
131 | } | 131 | } |
132 | 132 | ||
133 | /* get media type + port length */ | 133 | /* get media type + port length */ |
134 | static int media_len(const struct nf_conn *ct, const char *dptr, | 134 | static int media_len(const struct nf_conn *ct, const char *dptr, |
135 | const char *limit, int *shift) | 135 | const char *limit, int *shift) |
136 | { | 136 | { |
137 | int len = string_len(ct, dptr, limit, shift); | 137 | int len = string_len(ct, dptr, limit, shift); |
138 | 138 | ||
139 | dptr += len; | 139 | dptr += len; |
140 | if (dptr >= limit || *dptr != ' ') | 140 | if (dptr >= limit || *dptr != ' ') |
141 | return 0; | 141 | return 0; |
142 | len++; | 142 | len++; |
143 | dptr++; | 143 | dptr++; |
144 | 144 | ||
145 | return len + digits_len(ct, dptr, limit, shift); | 145 | return len + digits_len(ct, dptr, limit, shift); |
146 | } | 146 | } |
147 | 147 | ||
148 | static int parse_addr(const struct nf_conn *ct, const char *cp, | 148 | static int parse_addr(const struct nf_conn *ct, const char *cp, |
149 | const char **endp, union nf_inet_addr *addr, | 149 | const char **endp, union nf_inet_addr *addr, |
150 | const char *limit) | 150 | const char *limit) |
151 | { | 151 | { |
152 | const char *end; | 152 | const char *end; |
153 | int ret = 0; | 153 | int ret = 0; |
154 | 154 | ||
155 | memset(addr, 0, sizeof(*addr)); | 155 | memset(addr, 0, sizeof(*addr)); |
156 | switch (nf_ct_l3num(ct)) { | 156 | switch (nf_ct_l3num(ct)) { |
157 | case AF_INET: | 157 | case AF_INET: |
158 | ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); | 158 | ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); |
159 | break; | 159 | break; |
160 | case AF_INET6: | 160 | case AF_INET6: |
161 | ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); | 161 | ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); |
162 | break; | 162 | break; |
163 | default: | 163 | default: |
164 | BUG(); | 164 | BUG(); |
165 | } | 165 | } |
166 | 166 | ||
167 | if (ret == 0 || end == cp) | 167 | if (ret == 0 || end == cp) |
168 | return 0; | 168 | return 0; |
169 | if (endp) | 169 | if (endp) |
170 | *endp = end; | 170 | *endp = end; |
171 | return 1; | 171 | return 1; |
172 | } | 172 | } |
173 | 173 | ||
174 | /* skip ip address. returns its length. */ | 174 | /* skip ip address. returns its length. */ |
175 | static int epaddr_len(const struct nf_conn *ct, const char *dptr, | 175 | static int epaddr_len(const struct nf_conn *ct, const char *dptr, |
176 | const char *limit, int *shift) | 176 | const char *limit, int *shift) |
177 | { | 177 | { |
178 | union nf_inet_addr addr; | 178 | union nf_inet_addr addr; |
179 | const char *aux = dptr; | 179 | const char *aux = dptr; |
180 | 180 | ||
181 | if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { | 181 | if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { |
182 | pr_debug("ip: %s parse failed.!\n", dptr); | 182 | pr_debug("ip: %s parse failed.!\n", dptr); |
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | /* Port number */ | 186 | /* Port number */ |
187 | if (*dptr == ':') { | 187 | if (*dptr == ':') { |
188 | dptr++; | 188 | dptr++; |
189 | dptr += digits_len(ct, dptr, limit, shift); | 189 | dptr += digits_len(ct, dptr, limit, shift); |
190 | } | 190 | } |
191 | return dptr - aux; | 191 | return dptr - aux; |
192 | } | 192 | } |
193 | 193 | ||
194 | /* get address length, skiping user info. */ | 194 | /* get address length, skiping user info. */ |
195 | static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr, | 195 | static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr, |
196 | const char *limit, int *shift) | 196 | const char *limit, int *shift) |
197 | { | 197 | { |
198 | const char *start = dptr; | 198 | const char *start = dptr; |
199 | int s = *shift; | 199 | int s = *shift; |
200 | 200 | ||
201 | /* Search for @, but stop at the end of the line. | 201 | /* Search for @, but stop at the end of the line. |
202 | * We are inside a sip: URI, so we don't need to worry about | 202 | * We are inside a sip: URI, so we don't need to worry about |
203 | * continuation lines. */ | 203 | * continuation lines. */ |
204 | while (dptr < limit && | 204 | while (dptr < limit && |
205 | *dptr != '@' && *dptr != '\r' && *dptr != '\n') { | 205 | *dptr != '@' && *dptr != '\r' && *dptr != '\n') { |
206 | (*shift)++; | 206 | (*shift)++; |
207 | dptr++; | 207 | dptr++; |
208 | } | 208 | } |
209 | 209 | ||
210 | if (dptr < limit && *dptr == '@') { | 210 | if (dptr < limit && *dptr == '@') { |
211 | dptr++; | 211 | dptr++; |
212 | (*shift)++; | 212 | (*shift)++; |
213 | } else { | 213 | } else { |
214 | dptr = start; | 214 | dptr = start; |
215 | *shift = s; | 215 | *shift = s; |
216 | } | 216 | } |
217 | 217 | ||
218 | return epaddr_len(ct, dptr, limit, shift); | 218 | return epaddr_len(ct, dptr, limit, shift); |
219 | } | 219 | } |
220 | 220 | ||
221 | /* Parse a SIP request line of the form: | 221 | /* Parse a SIP request line of the form: |
222 | * | 222 | * |
223 | * Request-Line = Method SP Request-URI SP SIP-Version CRLF | 223 | * Request-Line = Method SP Request-URI SP SIP-Version CRLF |
224 | * | 224 | * |
225 | * and return the offset and length of the address contained in the Request-URI. | 225 | * and return the offset and length of the address contained in the Request-URI. |
226 | */ | 226 | */ |
227 | int ct_sip_parse_request(const struct nf_conn *ct, | 227 | int ct_sip_parse_request(const struct nf_conn *ct, |
228 | const char *dptr, unsigned int datalen, | 228 | const char *dptr, unsigned int datalen, |
229 | unsigned int *matchoff, unsigned int *matchlen, | 229 | unsigned int *matchoff, unsigned int *matchlen, |
230 | union nf_inet_addr *addr, __be16 *port) | 230 | union nf_inet_addr *addr, __be16 *port) |
231 | { | 231 | { |
232 | const char *start = dptr, *limit = dptr + datalen, *end; | 232 | const char *start = dptr, *limit = dptr + datalen, *end; |
233 | unsigned int mlen; | 233 | unsigned int mlen; |
234 | unsigned int p; | 234 | unsigned int p; |
235 | int shift = 0; | 235 | int shift = 0; |
236 | 236 | ||
237 | /* Skip method and following whitespace */ | 237 | /* Skip method and following whitespace */ |
238 | mlen = string_len(ct, dptr, limit, NULL); | 238 | mlen = string_len(ct, dptr, limit, NULL); |
239 | if (!mlen) | 239 | if (!mlen) |
240 | return 0; | 240 | return 0; |
241 | dptr += mlen; | 241 | dptr += mlen; |
242 | if (++dptr >= limit) | 242 | if (++dptr >= limit) |
243 | return 0; | 243 | return 0; |
244 | 244 | ||
245 | /* Find SIP URI */ | 245 | /* Find SIP URI */ |
246 | for (; dptr < limit - strlen("sip:"); dptr++) { | 246 | for (; dptr < limit - strlen("sip:"); dptr++) { |
247 | if (*dptr == '\r' || *dptr == '\n') | 247 | if (*dptr == '\r' || *dptr == '\n') |
248 | return -1; | 248 | return -1; |
249 | if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) { | 249 | if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) { |
250 | dptr += strlen("sip:"); | 250 | dptr += strlen("sip:"); |
251 | break; | 251 | break; |
252 | } | 252 | } |
253 | } | 253 | } |
254 | if (!skp_epaddr_len(ct, dptr, limit, &shift)) | 254 | if (!skp_epaddr_len(ct, dptr, limit, &shift)) |
255 | return 0; | 255 | return 0; |
256 | dptr += shift; | 256 | dptr += shift; |
257 | 257 | ||
258 | if (!parse_addr(ct, dptr, &end, addr, limit)) | 258 | if (!parse_addr(ct, dptr, &end, addr, limit)) |
259 | return -1; | 259 | return -1; |
260 | if (end < limit && *end == ':') { | 260 | if (end < limit && *end == ':') { |
261 | end++; | 261 | end++; |
262 | p = simple_strtoul(end, (char **)&end, 10); | 262 | p = simple_strtoul(end, (char **)&end, 10); |
263 | if (p < 1024 || p > 65535) | 263 | if (p < 1024 || p > 65535) |
264 | return -1; | 264 | return -1; |
265 | *port = htons(p); | 265 | *port = htons(p); |
266 | } else | 266 | } else |
267 | *port = htons(SIP_PORT); | 267 | *port = htons(SIP_PORT); |
268 | 268 | ||
269 | if (end == dptr) | 269 | if (end == dptr) |
270 | return 0; | 270 | return 0; |
271 | *matchoff = dptr - start; | 271 | *matchoff = dptr - start; |
272 | *matchlen = end - dptr; | 272 | *matchlen = end - dptr; |
273 | return 1; | 273 | return 1; |
274 | } | 274 | } |
275 | EXPORT_SYMBOL_GPL(ct_sip_parse_request); | 275 | EXPORT_SYMBOL_GPL(ct_sip_parse_request); |
276 | 276 | ||
277 | /* SIP header parsing: SIP headers are located at the beginning of a line, but | 277 | /* SIP header parsing: SIP headers are located at the beginning of a line, but |
278 | * may span several lines, in which case the continuation lines begin with a | 278 | * may span several lines, in which case the continuation lines begin with a |
279 | * whitespace character. RFC 2543 allows lines to be terminated with CR, LF or | 279 | * whitespace character. RFC 2543 allows lines to be terminated with CR, LF or |
280 | * CRLF, RFC 3261 allows only CRLF, we support both. | 280 | * CRLF, RFC 3261 allows only CRLF, we support both. |
281 | * | 281 | * |
282 | * Headers are followed by (optionally) whitespace, a colon, again (optionally) | 282 | * Headers are followed by (optionally) whitespace, a colon, again (optionally) |
283 | * whitespace and the values. Whitespace in this context means any amount of | 283 | * whitespace and the values. Whitespace in this context means any amount of |
284 | * tabs, spaces and continuation lines, which are treated as a single whitespace | 284 | * tabs, spaces and continuation lines, which are treated as a single whitespace |
285 | * character. | 285 | * character. |
286 | * | 286 | * |
287 | * Some headers may appear multiple times. A comma separated list of values is | 287 | * Some headers may appear multiple times. A comma separated list of values is |
288 | * equivalent to multiple headers. | 288 | * equivalent to multiple headers. |
289 | */ | 289 | */ |
290 | static const struct sip_header ct_sip_hdrs[] = { | 290 | static const struct sip_header ct_sip_hdrs[] = { |
291 | [SIP_HDR_CSEQ] = SIP_HDR("CSeq", NULL, NULL, digits_len), | 291 | [SIP_HDR_CSEQ] = SIP_HDR("CSeq", NULL, NULL, digits_len), |
292 | [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len), | 292 | [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len), |
293 | [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len), | 293 | [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len), |
294 | [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len), | 294 | [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len), |
295 | [SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len), | 295 | [SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len), |
296 | [SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len), | 296 | [SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len), |
297 | [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len), | 297 | [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len), |
298 | [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len), | 298 | [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len), |
299 | }; | 299 | }; |
300 | 300 | ||
301 | static const char *sip_follow_continuation(const char *dptr, const char *limit) | 301 | static const char *sip_follow_continuation(const char *dptr, const char *limit) |
302 | { | 302 | { |
303 | /* Walk past newline */ | 303 | /* Walk past newline */ |
304 | if (++dptr >= limit) | 304 | if (++dptr >= limit) |
305 | return NULL; | 305 | return NULL; |
306 | 306 | ||
307 | /* Skip '\n' in CR LF */ | 307 | /* Skip '\n' in CR LF */ |
308 | if (*(dptr - 1) == '\r' && *dptr == '\n') { | 308 | if (*(dptr - 1) == '\r' && *dptr == '\n') { |
309 | if (++dptr >= limit) | 309 | if (++dptr >= limit) |
310 | return NULL; | 310 | return NULL; |
311 | } | 311 | } |
312 | 312 | ||
313 | /* Continuation line? */ | 313 | /* Continuation line? */ |
314 | if (*dptr != ' ' && *dptr != '\t') | 314 | if (*dptr != ' ' && *dptr != '\t') |
315 | return NULL; | 315 | return NULL; |
316 | 316 | ||
317 | /* skip leading whitespace */ | 317 | /* skip leading whitespace */ |
318 | for (; dptr < limit; dptr++) { | 318 | for (; dptr < limit; dptr++) { |
319 | if (*dptr != ' ' && *dptr != '\t') | 319 | if (*dptr != ' ' && *dptr != '\t') |
320 | break; | 320 | break; |
321 | } | 321 | } |
322 | return dptr; | 322 | return dptr; |
323 | } | 323 | } |
324 | 324 | ||
325 | static const char *sip_skip_whitespace(const char *dptr, const char *limit) | 325 | static const char *sip_skip_whitespace(const char *dptr, const char *limit) |
326 | { | 326 | { |
327 | for (; dptr < limit; dptr++) { | 327 | for (; dptr < limit; dptr++) { |
328 | if (*dptr == ' ') | 328 | if (*dptr == ' ') |
329 | continue; | 329 | continue; |
330 | if (*dptr != '\r' && *dptr != '\n') | 330 | if (*dptr != '\r' && *dptr != '\n') |
331 | break; | 331 | break; |
332 | dptr = sip_follow_continuation(dptr, limit); | 332 | dptr = sip_follow_continuation(dptr, limit); |
333 | if (dptr == NULL) | 333 | if (dptr == NULL) |
334 | return NULL; | 334 | return NULL; |
335 | } | 335 | } |
336 | return dptr; | 336 | return dptr; |
337 | } | 337 | } |
338 | 338 | ||
339 | /* Search within a SIP header value, dealing with continuation lines */ | 339 | /* Search within a SIP header value, dealing with continuation lines */ |
340 | static const char *ct_sip_header_search(const char *dptr, const char *limit, | 340 | static const char *ct_sip_header_search(const char *dptr, const char *limit, |
341 | const char *needle, unsigned int len) | 341 | const char *needle, unsigned int len) |
342 | { | 342 | { |
343 | for (limit -= len; dptr < limit; dptr++) { | 343 | for (limit -= len; dptr < limit; dptr++) { |
344 | if (*dptr == '\r' || *dptr == '\n') { | 344 | if (*dptr == '\r' || *dptr == '\n') { |
345 | dptr = sip_follow_continuation(dptr, limit); | 345 | dptr = sip_follow_continuation(dptr, limit); |
346 | if (dptr == NULL) | 346 | if (dptr == NULL) |
347 | break; | 347 | break; |
348 | continue; | 348 | continue; |
349 | } | 349 | } |
350 | 350 | ||
351 | if (strnicmp(dptr, needle, len) == 0) | 351 | if (strnicmp(dptr, needle, len) == 0) |
352 | return dptr; | 352 | return dptr; |
353 | } | 353 | } |
354 | return NULL; | 354 | return NULL; |
355 | } | 355 | } |
356 | 356 | ||
357 | int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, | 357 | int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, |
358 | unsigned int dataoff, unsigned int datalen, | 358 | unsigned int dataoff, unsigned int datalen, |
359 | enum sip_header_types type, | 359 | enum sip_header_types type, |
360 | unsigned int *matchoff, unsigned int *matchlen) | 360 | unsigned int *matchoff, unsigned int *matchlen) |
361 | { | 361 | { |
362 | const struct sip_header *hdr = &ct_sip_hdrs[type]; | 362 | const struct sip_header *hdr = &ct_sip_hdrs[type]; |
363 | const char *start = dptr, *limit = dptr + datalen; | 363 | const char *start = dptr, *limit = dptr + datalen; |
364 | int shift = 0; | 364 | int shift = 0; |
365 | 365 | ||
366 | for (dptr += dataoff; dptr < limit; dptr++) { | 366 | for (dptr += dataoff; dptr < limit; dptr++) { |
367 | /* Find beginning of line */ | 367 | /* Find beginning of line */ |
368 | if (*dptr != '\r' && *dptr != '\n') | 368 | if (*dptr != '\r' && *dptr != '\n') |
369 | continue; | 369 | continue; |
370 | if (++dptr >= limit) | 370 | if (++dptr >= limit) |
371 | break; | 371 | break; |
372 | if (*(dptr - 1) == '\r' && *dptr == '\n') { | 372 | if (*(dptr - 1) == '\r' && *dptr == '\n') { |
373 | if (++dptr >= limit) | 373 | if (++dptr >= limit) |
374 | break; | 374 | break; |
375 | } | 375 | } |
376 | 376 | ||
377 | /* Skip continuation lines */ | 377 | /* Skip continuation lines */ |
378 | if (*dptr == ' ' || *dptr == '\t') | 378 | if (*dptr == ' ' || *dptr == '\t') |
379 | continue; | 379 | continue; |
380 | 380 | ||
381 | /* Find header. Compact headers must be followed by a | 381 | /* Find header. Compact headers must be followed by a |
382 | * non-alphabetic character to avoid mismatches. */ | 382 | * non-alphabetic character to avoid mismatches. */ |
383 | if (limit - dptr >= hdr->len && | 383 | if (limit - dptr >= hdr->len && |
384 | strnicmp(dptr, hdr->name, hdr->len) == 0) | 384 | strnicmp(dptr, hdr->name, hdr->len) == 0) |
385 | dptr += hdr->len; | 385 | dptr += hdr->len; |
386 | else if (hdr->cname && limit - dptr >= hdr->clen + 1 && | 386 | else if (hdr->cname && limit - dptr >= hdr->clen + 1 && |
387 | strnicmp(dptr, hdr->cname, hdr->clen) == 0 && | 387 | strnicmp(dptr, hdr->cname, hdr->clen) == 0 && |
388 | !isalpha(*(dptr + hdr->clen))) | 388 | !isalpha(*(dptr + hdr->clen))) |
389 | dptr += hdr->clen; | 389 | dptr += hdr->clen; |
390 | else | 390 | else |
391 | continue; | 391 | continue; |
392 | 392 | ||
393 | /* Find and skip colon */ | 393 | /* Find and skip colon */ |
394 | dptr = sip_skip_whitespace(dptr, limit); | 394 | dptr = sip_skip_whitespace(dptr, limit); |
395 | if (dptr == NULL) | 395 | if (dptr == NULL) |
396 | break; | 396 | break; |
397 | if (*dptr != ':' || ++dptr >= limit) | 397 | if (*dptr != ':' || ++dptr >= limit) |
398 | break; | 398 | break; |
399 | 399 | ||
400 | /* Skip whitespace after colon */ | 400 | /* Skip whitespace after colon */ |
401 | dptr = sip_skip_whitespace(dptr, limit); | 401 | dptr = sip_skip_whitespace(dptr, limit); |
402 | if (dptr == NULL) | 402 | if (dptr == NULL) |
403 | break; | 403 | break; |
404 | 404 | ||
405 | *matchoff = dptr - start; | 405 | *matchoff = dptr - start; |
406 | if (hdr->search) { | 406 | if (hdr->search) { |
407 | dptr = ct_sip_header_search(dptr, limit, hdr->search, | 407 | dptr = ct_sip_header_search(dptr, limit, hdr->search, |
408 | hdr->slen); | 408 | hdr->slen); |
409 | if (!dptr) | 409 | if (!dptr) |
410 | return -1; | 410 | return -1; |
411 | dptr += hdr->slen; | 411 | dptr += hdr->slen; |
412 | } | 412 | } |
413 | 413 | ||
414 | *matchlen = hdr->match_len(ct, dptr, limit, &shift); | 414 | *matchlen = hdr->match_len(ct, dptr, limit, &shift); |
415 | if (!*matchlen) | 415 | if (!*matchlen) |
416 | return -1; | 416 | return -1; |
417 | *matchoff = dptr - start + shift; | 417 | *matchoff = dptr - start + shift; |
418 | return 1; | 418 | return 1; |
419 | } | 419 | } |
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | EXPORT_SYMBOL_GPL(ct_sip_get_header); | 422 | EXPORT_SYMBOL_GPL(ct_sip_get_header); |
423 | 423 | ||
424 | /* Get next header field in a list of comma separated values */ | 424 | /* Get next header field in a list of comma separated values */ |
425 | static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, | 425 | static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, |
426 | unsigned int dataoff, unsigned int datalen, | 426 | unsigned int dataoff, unsigned int datalen, |
427 | enum sip_header_types type, | 427 | enum sip_header_types type, |
428 | unsigned int *matchoff, unsigned int *matchlen) | 428 | unsigned int *matchoff, unsigned int *matchlen) |
429 | { | 429 | { |
430 | const struct sip_header *hdr = &ct_sip_hdrs[type]; | 430 | const struct sip_header *hdr = &ct_sip_hdrs[type]; |
431 | const char *start = dptr, *limit = dptr + datalen; | 431 | const char *start = dptr, *limit = dptr + datalen; |
432 | int shift = 0; | 432 | int shift = 0; |
433 | 433 | ||
434 | dptr += dataoff; | 434 | dptr += dataoff; |
435 | 435 | ||
436 | dptr = ct_sip_header_search(dptr, limit, ",", strlen(",")); | 436 | dptr = ct_sip_header_search(dptr, limit, ",", strlen(",")); |
437 | if (!dptr) | 437 | if (!dptr) |
438 | return 0; | 438 | return 0; |
439 | 439 | ||
440 | dptr = ct_sip_header_search(dptr, limit, hdr->search, hdr->slen); | 440 | dptr = ct_sip_header_search(dptr, limit, hdr->search, hdr->slen); |
441 | if (!dptr) | 441 | if (!dptr) |
442 | return 0; | 442 | return 0; |
443 | dptr += hdr->slen; | 443 | dptr += hdr->slen; |
444 | 444 | ||
445 | *matchoff = dptr - start; | 445 | *matchoff = dptr - start; |
446 | *matchlen = hdr->match_len(ct, dptr, limit, &shift); | 446 | *matchlen = hdr->match_len(ct, dptr, limit, &shift); |
447 | if (!*matchlen) | 447 | if (!*matchlen) |
448 | return -1; | 448 | return -1; |
449 | *matchoff += shift; | 449 | *matchoff += shift; |
450 | return 1; | 450 | return 1; |
451 | } | 451 | } |
452 | 452 | ||
453 | /* Walk through headers until a parsable one is found or no header of the | 453 | /* Walk through headers until a parsable one is found or no header of the |
454 | * given type is left. */ | 454 | * given type is left. */ |
455 | static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, | 455 | static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, |
456 | unsigned int dataoff, unsigned int datalen, | 456 | unsigned int dataoff, unsigned int datalen, |
457 | enum sip_header_types type, int *in_header, | 457 | enum sip_header_types type, int *in_header, |
458 | unsigned int *matchoff, unsigned int *matchlen) | 458 | unsigned int *matchoff, unsigned int *matchlen) |
459 | { | 459 | { |
460 | int ret; | 460 | int ret; |
461 | 461 | ||
462 | if (in_header && *in_header) { | 462 | if (in_header && *in_header) { |
463 | while (1) { | 463 | while (1) { |
464 | ret = ct_sip_next_header(ct, dptr, dataoff, datalen, | 464 | ret = ct_sip_next_header(ct, dptr, dataoff, datalen, |
465 | type, matchoff, matchlen); | 465 | type, matchoff, matchlen); |
466 | if (ret > 0) | 466 | if (ret > 0) |
467 | return ret; | 467 | return ret; |
468 | if (ret == 0) | 468 | if (ret == 0) |
469 | break; | 469 | break; |
470 | dataoff += *matchoff; | 470 | dataoff += *matchoff; |
471 | } | 471 | } |
472 | *in_header = 0; | 472 | *in_header = 0; |
473 | } | 473 | } |
474 | 474 | ||
475 | while (1) { | 475 | while (1) { |
476 | ret = ct_sip_get_header(ct, dptr, dataoff, datalen, | 476 | ret = ct_sip_get_header(ct, dptr, dataoff, datalen, |
477 | type, matchoff, matchlen); | 477 | type, matchoff, matchlen); |
478 | if (ret > 0) | 478 | if (ret > 0) |
479 | break; | 479 | break; |
480 | if (ret == 0) | 480 | if (ret == 0) |
481 | return ret; | 481 | return ret; |
482 | dataoff += *matchoff; | 482 | dataoff += *matchoff; |
483 | } | 483 | } |
484 | 484 | ||
485 | if (in_header) | 485 | if (in_header) |
486 | *in_header = 1; | 486 | *in_header = 1; |
487 | return 1; | 487 | return 1; |
488 | } | 488 | } |
489 | 489 | ||
490 | /* Locate a SIP header, parse the URI and return the offset and length of | 490 | /* Locate a SIP header, parse the URI and return the offset and length of |
491 | * the address as well as the address and port themselves. A stream of | 491 | * the address as well as the address and port themselves. A stream of |
492 | * headers can be parsed by handing in a non-NULL datalen and in_header | 492 | * headers can be parsed by handing in a non-NULL datalen and in_header |
493 | * pointer. | 493 | * pointer. |
494 | */ | 494 | */ |
495 | int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, | 495 | int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, |
496 | unsigned int *dataoff, unsigned int datalen, | 496 | unsigned int *dataoff, unsigned int datalen, |
497 | enum sip_header_types type, int *in_header, | 497 | enum sip_header_types type, int *in_header, |
498 | unsigned int *matchoff, unsigned int *matchlen, | 498 | unsigned int *matchoff, unsigned int *matchlen, |
499 | union nf_inet_addr *addr, __be16 *port) | 499 | union nf_inet_addr *addr, __be16 *port) |
500 | { | 500 | { |
501 | const char *c, *limit = dptr + datalen; | 501 | const char *c, *limit = dptr + datalen; |
502 | unsigned int p; | 502 | unsigned int p; |
503 | int ret; | 503 | int ret; |
504 | 504 | ||
505 | ret = ct_sip_walk_headers(ct, dptr, dataoff ? *dataoff : 0, datalen, | 505 | ret = ct_sip_walk_headers(ct, dptr, dataoff ? *dataoff : 0, datalen, |
506 | type, in_header, matchoff, matchlen); | 506 | type, in_header, matchoff, matchlen); |
507 | WARN_ON(ret < 0); | 507 | WARN_ON(ret < 0); |
508 | if (ret == 0) | 508 | if (ret == 0) |
509 | return ret; | 509 | return ret; |
510 | 510 | ||
511 | if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit)) | 511 | if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit)) |
512 | return -1; | 512 | return -1; |
513 | if (*c == ':') { | 513 | if (*c == ':') { |
514 | c++; | 514 | c++; |
515 | p = simple_strtoul(c, (char **)&c, 10); | 515 | p = simple_strtoul(c, (char **)&c, 10); |
516 | if (p < 1024 || p > 65535) | 516 | if (p < 1024 || p > 65535) |
517 | return -1; | 517 | return -1; |
518 | *port = htons(p); | 518 | *port = htons(p); |
519 | } else | 519 | } else |
520 | *port = htons(SIP_PORT); | 520 | *port = htons(SIP_PORT); |
521 | 521 | ||
522 | if (dataoff) | 522 | if (dataoff) |
523 | *dataoff = c - dptr; | 523 | *dataoff = c - dptr; |
524 | return 1; | 524 | return 1; |
525 | } | 525 | } |
526 | EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri); | 526 | EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri); |
527 | 527 | ||
528 | static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr, | 528 | static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr, |
529 | unsigned int dataoff, unsigned int datalen, | 529 | unsigned int dataoff, unsigned int datalen, |
530 | const char *name, | 530 | const char *name, |
531 | unsigned int *matchoff, unsigned int *matchlen) | 531 | unsigned int *matchoff, unsigned int *matchlen) |
532 | { | 532 | { |
533 | const char *limit = dptr + datalen; | 533 | const char *limit = dptr + datalen; |
534 | const char *start; | 534 | const char *start; |
535 | const char *end; | 535 | const char *end; |
536 | 536 | ||
537 | limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); | 537 | limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); |
538 | if (!limit) | 538 | if (!limit) |
539 | limit = dptr + datalen; | 539 | limit = dptr + datalen; |
540 | 540 | ||
541 | start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); | 541 | start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); |
542 | if (!start) | 542 | if (!start) |
543 | return 0; | 543 | return 0; |
544 | start += strlen(name); | 544 | start += strlen(name); |
545 | 545 | ||
546 | end = ct_sip_header_search(start, limit, ";", strlen(";")); | 546 | end = ct_sip_header_search(start, limit, ";", strlen(";")); |
547 | if (!end) | 547 | if (!end) |
548 | end = limit; | 548 | end = limit; |
549 | 549 | ||
550 | *matchoff = start - dptr; | 550 | *matchoff = start - dptr; |
551 | *matchlen = end - start; | 551 | *matchlen = end - start; |
552 | return 1; | 552 | return 1; |
553 | } | 553 | } |
554 | 554 | ||
555 | /* Parse address from header parameter and return address, offset and length */ | 555 | /* Parse address from header parameter and return address, offset and length */ |
556 | int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, | 556 | int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, |
557 | unsigned int dataoff, unsigned int datalen, | 557 | unsigned int dataoff, unsigned int datalen, |
558 | const char *name, | 558 | const char *name, |
559 | unsigned int *matchoff, unsigned int *matchlen, | 559 | unsigned int *matchoff, unsigned int *matchlen, |
560 | union nf_inet_addr *addr) | 560 | union nf_inet_addr *addr) |
561 | { | 561 | { |
562 | const char *limit = dptr + datalen; | 562 | const char *limit = dptr + datalen; |
563 | const char *start, *end; | 563 | const char *start, *end; |
564 | 564 | ||
565 | limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); | 565 | limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); |
566 | if (!limit) | 566 | if (!limit) |
567 | limit = dptr + datalen; | 567 | limit = dptr + datalen; |
568 | 568 | ||
569 | start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); | 569 | start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); |
570 | if (!start) | 570 | if (!start) |
571 | return 0; | 571 | return 0; |
572 | 572 | ||
573 | start += strlen(name); | 573 | start += strlen(name); |
574 | if (!parse_addr(ct, start, &end, addr, limit)) | 574 | if (!parse_addr(ct, start, &end, addr, limit)) |
575 | return 0; | 575 | return 0; |
576 | *matchoff = start - dptr; | 576 | *matchoff = start - dptr; |
577 | *matchlen = end - start; | 577 | *matchlen = end - start; |
578 | return 1; | 578 | return 1; |
579 | } | 579 | } |
580 | EXPORT_SYMBOL_GPL(ct_sip_parse_address_param); | 580 | EXPORT_SYMBOL_GPL(ct_sip_parse_address_param); |
581 | 581 | ||
582 | /* Parse numerical header parameter and return value, offset and length */ | 582 | /* Parse numerical header parameter and return value, offset and length */ |
583 | int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, | 583 | int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, |
584 | unsigned int dataoff, unsigned int datalen, | 584 | unsigned int dataoff, unsigned int datalen, |
585 | const char *name, | 585 | const char *name, |
586 | unsigned int *matchoff, unsigned int *matchlen, | 586 | unsigned int *matchoff, unsigned int *matchlen, |
587 | unsigned int *val) | 587 | unsigned int *val) |
588 | { | 588 | { |
589 | const char *limit = dptr + datalen; | 589 | const char *limit = dptr + datalen; |
590 | const char *start; | 590 | const char *start; |
591 | char *end; | 591 | char *end; |
592 | 592 | ||
593 | limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); | 593 | limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); |
594 | if (!limit) | 594 | if (!limit) |
595 | limit = dptr + datalen; | 595 | limit = dptr + datalen; |
596 | 596 | ||
597 | start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); | 597 | start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); |
598 | if (!start) | 598 | if (!start) |
599 | return 0; | 599 | return 0; |
600 | 600 | ||
601 | start += strlen(name); | 601 | start += strlen(name); |
602 | *val = simple_strtoul(start, &end, 0); | 602 | *val = simple_strtoul(start, &end, 0); |
603 | if (start == end) | 603 | if (start == end) |
604 | return 0; | 604 | return 0; |
605 | if (matchoff && matchlen) { | 605 | if (matchoff && matchlen) { |
606 | *matchoff = start - dptr; | 606 | *matchoff = start - dptr; |
607 | *matchlen = end - start; | 607 | *matchlen = end - start; |
608 | } | 608 | } |
609 | return 1; | 609 | return 1; |
610 | } | 610 | } |
611 | EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param); | 611 | EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param); |
612 | 612 | ||
613 | static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, | 613 | static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, |
614 | unsigned int dataoff, unsigned int datalen, | 614 | unsigned int dataoff, unsigned int datalen, |
615 | u8 *proto) | 615 | u8 *proto) |
616 | { | 616 | { |
617 | unsigned int matchoff, matchlen; | 617 | unsigned int matchoff, matchlen; |
618 | 618 | ||
619 | if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=", | 619 | if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=", |
620 | &matchoff, &matchlen)) { | 620 | &matchoff, &matchlen)) { |
621 | if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP"))) | 621 | if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP"))) |
622 | *proto = IPPROTO_TCP; | 622 | *proto = IPPROTO_TCP; |
623 | else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP"))) | 623 | else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP"))) |
624 | *proto = IPPROTO_UDP; | 624 | *proto = IPPROTO_UDP; |
625 | else | 625 | else |
626 | return 0; | 626 | return 0; |
627 | 627 | ||
628 | if (*proto != nf_ct_protonum(ct)) | 628 | if (*proto != nf_ct_protonum(ct)) |
629 | return 0; | 629 | return 0; |
630 | } else | 630 | } else |
631 | *proto = nf_ct_protonum(ct); | 631 | *proto = nf_ct_protonum(ct); |
632 | 632 | ||
633 | return 1; | 633 | return 1; |
634 | } | 634 | } |
635 | 635 | ||
636 | /* SDP header parsing: a SDP session description contains an ordered set of | 636 | /* SDP header parsing: a SDP session description contains an ordered set of |
637 | * headers, starting with a section containing general session parameters, | 637 | * headers, starting with a section containing general session parameters, |
638 | * optionally followed by multiple media descriptions. | 638 | * optionally followed by multiple media descriptions. |
639 | * | 639 | * |
640 | * SDP headers always start at the beginning of a line. According to RFC 2327: | 640 | * SDP headers always start at the beginning of a line. According to RFC 2327: |
641 | * "The sequence CRLF (0x0d0a) is used to end a record, although parsers should | 641 | * "The sequence CRLF (0x0d0a) is used to end a record, although parsers should |
642 | * be tolerant and also accept records terminated with a single newline | 642 | * be tolerant and also accept records terminated with a single newline |
643 | * character". We handle both cases. | 643 | * character". We handle both cases. |
644 | */ | 644 | */ |
645 | static const struct sip_header ct_sdp_hdrs[] = { | 645 | static const struct sip_header ct_sdp_hdrs[] = { |
646 | [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), | 646 | [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), |
647 | [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len), | 647 | [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len), |
648 | [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len), | 648 | [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len), |
649 | [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len), | 649 | [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len), |
650 | [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len), | 650 | [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len), |
651 | [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), | 651 | [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), |
652 | }; | 652 | }; |
653 | 653 | ||
654 | /* Linear string search within SDP header values */ | 654 | /* Linear string search within SDP header values */ |
655 | static const char *ct_sdp_header_search(const char *dptr, const char *limit, | 655 | static const char *ct_sdp_header_search(const char *dptr, const char *limit, |
656 | const char *needle, unsigned int len) | 656 | const char *needle, unsigned int len) |
657 | { | 657 | { |
658 | for (limit -= len; dptr < limit; dptr++) { | 658 | for (limit -= len; dptr < limit; dptr++) { |
659 | if (*dptr == '\r' || *dptr == '\n') | 659 | if (*dptr == '\r' || *dptr == '\n') |
660 | break; | 660 | break; |
661 | if (strncmp(dptr, needle, len) == 0) | 661 | if (strncmp(dptr, needle, len) == 0) |
662 | return dptr; | 662 | return dptr; |
663 | } | 663 | } |
664 | return NULL; | 664 | return NULL; |
665 | } | 665 | } |
666 | 666 | ||
667 | /* Locate a SDP header (optionally a substring within the header value), | 667 | /* Locate a SDP header (optionally a substring within the header value), |
668 | * optionally stopping at the first occurence of the term header, parse | 668 | * optionally stopping at the first occurence of the term header, parse |
669 | * it and return the offset and length of the data we're interested in. | 669 | * it and return the offset and length of the data we're interested in. |
670 | */ | 670 | */ |
671 | int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, | 671 | int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, |
672 | unsigned int dataoff, unsigned int datalen, | 672 | unsigned int dataoff, unsigned int datalen, |
673 | enum sdp_header_types type, | 673 | enum sdp_header_types type, |
674 | enum sdp_header_types term, | 674 | enum sdp_header_types term, |
675 | unsigned int *matchoff, unsigned int *matchlen) | 675 | unsigned int *matchoff, unsigned int *matchlen) |
676 | { | 676 | { |
677 | const struct sip_header *hdr = &ct_sdp_hdrs[type]; | 677 | const struct sip_header *hdr = &ct_sdp_hdrs[type]; |
678 | const struct sip_header *thdr = &ct_sdp_hdrs[term]; | 678 | const struct sip_header *thdr = &ct_sdp_hdrs[term]; |
679 | const char *start = dptr, *limit = dptr + datalen; | 679 | const char *start = dptr, *limit = dptr + datalen; |
680 | int shift = 0; | 680 | int shift = 0; |
681 | 681 | ||
682 | for (dptr += dataoff; dptr < limit; dptr++) { | 682 | for (dptr += dataoff; dptr < limit; dptr++) { |
683 | /* Find beginning of line */ | 683 | /* Find beginning of line */ |
684 | if (*dptr != '\r' && *dptr != '\n') | 684 | if (*dptr != '\r' && *dptr != '\n') |
685 | continue; | 685 | continue; |
686 | if (++dptr >= limit) | 686 | if (++dptr >= limit) |
687 | break; | 687 | break; |
688 | if (*(dptr - 1) == '\r' && *dptr == '\n') { | 688 | if (*(dptr - 1) == '\r' && *dptr == '\n') { |
689 | if (++dptr >= limit) | 689 | if (++dptr >= limit) |
690 | break; | 690 | break; |
691 | } | 691 | } |
692 | 692 | ||
693 | if (term != SDP_HDR_UNSPEC && | 693 | if (term != SDP_HDR_UNSPEC && |
694 | limit - dptr >= thdr->len && | 694 | limit - dptr >= thdr->len && |
695 | strnicmp(dptr, thdr->name, thdr->len) == 0) | 695 | strnicmp(dptr, thdr->name, thdr->len) == 0) |
696 | break; | 696 | break; |
697 | else if (limit - dptr >= hdr->len && | 697 | else if (limit - dptr >= hdr->len && |
698 | strnicmp(dptr, hdr->name, hdr->len) == 0) | 698 | strnicmp(dptr, hdr->name, hdr->len) == 0) |
699 | dptr += hdr->len; | 699 | dptr += hdr->len; |
700 | else | 700 | else |
701 | continue; | 701 | continue; |
702 | 702 | ||
703 | *matchoff = dptr - start; | 703 | *matchoff = dptr - start; |
704 | if (hdr->search) { | 704 | if (hdr->search) { |
705 | dptr = ct_sdp_header_search(dptr, limit, hdr->search, | 705 | dptr = ct_sdp_header_search(dptr, limit, hdr->search, |
706 | hdr->slen); | 706 | hdr->slen); |
707 | if (!dptr) | 707 | if (!dptr) |
708 | return -1; | 708 | return -1; |
709 | dptr += hdr->slen; | 709 | dptr += hdr->slen; |
710 | } | 710 | } |
711 | 711 | ||
712 | *matchlen = hdr->match_len(ct, dptr, limit, &shift); | 712 | *matchlen = hdr->match_len(ct, dptr, limit, &shift); |
713 | if (!*matchlen) | 713 | if (!*matchlen) |
714 | return -1; | 714 | return -1; |
715 | *matchoff = dptr - start + shift; | 715 | *matchoff = dptr - start + shift; |
716 | return 1; | 716 | return 1; |
717 | } | 717 | } |
718 | return 0; | 718 | return 0; |
719 | } | 719 | } |
720 | EXPORT_SYMBOL_GPL(ct_sip_get_sdp_header); | 720 | EXPORT_SYMBOL_GPL(ct_sip_get_sdp_header); |
721 | 721 | ||
722 | static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr, | 722 | static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr, |
723 | unsigned int dataoff, unsigned int datalen, | 723 | unsigned int dataoff, unsigned int datalen, |
724 | enum sdp_header_types type, | 724 | enum sdp_header_types type, |
725 | enum sdp_header_types term, | 725 | enum sdp_header_types term, |
726 | unsigned int *matchoff, unsigned int *matchlen, | 726 | unsigned int *matchoff, unsigned int *matchlen, |
727 | union nf_inet_addr *addr) | 727 | union nf_inet_addr *addr) |
728 | { | 728 | { |
729 | int ret; | 729 | int ret; |
730 | 730 | ||
731 | ret = ct_sip_get_sdp_header(ct, dptr, dataoff, datalen, type, term, | 731 | ret = ct_sip_get_sdp_header(ct, dptr, dataoff, datalen, type, term, |
732 | matchoff, matchlen); | 732 | matchoff, matchlen); |
733 | if (ret <= 0) | 733 | if (ret <= 0) |
734 | return ret; | 734 | return ret; |
735 | 735 | ||
736 | if (!parse_addr(ct, dptr + *matchoff, NULL, addr, | 736 | if (!parse_addr(ct, dptr + *matchoff, NULL, addr, |
737 | dptr + *matchoff + *matchlen)) | 737 | dptr + *matchoff + *matchlen)) |
738 | return -1; | 738 | return -1; |
739 | return 1; | 739 | return 1; |
740 | } | 740 | } |
741 | 741 | ||
742 | static int refresh_signalling_expectation(struct nf_conn *ct, | 742 | static int refresh_signalling_expectation(struct nf_conn *ct, |
743 | union nf_inet_addr *addr, | 743 | union nf_inet_addr *addr, |
744 | u8 proto, __be16 port, | 744 | u8 proto, __be16 port, |
745 | unsigned int expires) | 745 | unsigned int expires) |
746 | { | 746 | { |
747 | struct nf_conn_help *help = nfct_help(ct); | 747 | struct nf_conn_help *help = nfct_help(ct); |
748 | struct nf_conntrack_expect *exp; | 748 | struct nf_conntrack_expect *exp; |
749 | struct hlist_node *n, *next; | 749 | struct hlist_node *n, *next; |
750 | int found = 0; | 750 | int found = 0; |
751 | 751 | ||
752 | spin_lock_bh(&nf_conntrack_lock); | 752 | spin_lock_bh(&nf_conntrack_lock); |
753 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { | 753 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { |
754 | if (exp->class != SIP_EXPECT_SIGNALLING || | 754 | if (exp->class != SIP_EXPECT_SIGNALLING || |
755 | !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || | 755 | !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || |
756 | exp->tuple.dst.protonum != proto || | 756 | exp->tuple.dst.protonum != proto || |
757 | exp->tuple.dst.u.udp.port != port) | 757 | exp->tuple.dst.u.udp.port != port) |
758 | continue; | 758 | continue; |
759 | if (!del_timer(&exp->timeout)) | 759 | if (!del_timer(&exp->timeout)) |
760 | continue; | 760 | continue; |
761 | exp->flags &= ~NF_CT_EXPECT_INACTIVE; | 761 | exp->flags &= ~NF_CT_EXPECT_INACTIVE; |
762 | exp->timeout.expires = jiffies + expires * HZ; | 762 | exp->timeout.expires = jiffies + expires * HZ; |
763 | add_timer(&exp->timeout); | 763 | add_timer(&exp->timeout); |
764 | found = 1; | 764 | found = 1; |
765 | break; | 765 | break; |
766 | } | 766 | } |
767 | spin_unlock_bh(&nf_conntrack_lock); | 767 | spin_unlock_bh(&nf_conntrack_lock); |
768 | return found; | 768 | return found; |
769 | } | 769 | } |
770 | 770 | ||
771 | static void flush_expectations(struct nf_conn *ct, bool media) | 771 | static void flush_expectations(struct nf_conn *ct, bool media) |
772 | { | 772 | { |
773 | struct nf_conn_help *help = nfct_help(ct); | 773 | struct nf_conn_help *help = nfct_help(ct); |
774 | struct nf_conntrack_expect *exp; | 774 | struct nf_conntrack_expect *exp; |
775 | struct hlist_node *n, *next; | 775 | struct hlist_node *n, *next; |
776 | 776 | ||
777 | spin_lock_bh(&nf_conntrack_lock); | 777 | spin_lock_bh(&nf_conntrack_lock); |
778 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { | 778 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { |
779 | if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) | 779 | if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) |
780 | continue; | 780 | continue; |
781 | if (!del_timer(&exp->timeout)) | 781 | if (!del_timer(&exp->timeout)) |
782 | continue; | 782 | continue; |
783 | nf_ct_unlink_expect(exp); | 783 | nf_ct_unlink_expect(exp); |
784 | nf_ct_expect_put(exp); | 784 | nf_ct_expect_put(exp); |
785 | if (!media) | 785 | if (!media) |
786 | break; | 786 | break; |
787 | } | 787 | } |
788 | spin_unlock_bh(&nf_conntrack_lock); | 788 | spin_unlock_bh(&nf_conntrack_lock); |
789 | } | 789 | } |
790 | 790 | ||
791 | static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff, | 791 | static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff, |
792 | const char **dptr, unsigned int *datalen, | 792 | const char **dptr, unsigned int *datalen, |
793 | union nf_inet_addr *daddr, __be16 port, | 793 | union nf_inet_addr *daddr, __be16 port, |
794 | enum sip_expectation_classes class, | 794 | enum sip_expectation_classes class, |
795 | unsigned int mediaoff, unsigned int medialen) | 795 | unsigned int mediaoff, unsigned int medialen) |
796 | { | 796 | { |
797 | struct nf_conntrack_expect *exp, *rtp_exp, *rtcp_exp; | 797 | struct nf_conntrack_expect *exp, *rtp_exp, *rtcp_exp; |
798 | enum ip_conntrack_info ctinfo; | 798 | enum ip_conntrack_info ctinfo; |
799 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 799 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
800 | struct net *net = nf_ct_net(ct); | 800 | struct net *net = nf_ct_net(ct); |
801 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 801 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
802 | union nf_inet_addr *saddr; | 802 | union nf_inet_addr *saddr; |
803 | struct nf_conntrack_tuple tuple; | 803 | struct nf_conntrack_tuple tuple; |
804 | int direct_rtp = 0, skip_expect = 0, ret = NF_DROP; | 804 | int direct_rtp = 0, skip_expect = 0, ret = NF_DROP; |
805 | u_int16_t base_port; | 805 | u_int16_t base_port; |
806 | __be16 rtp_port, rtcp_port; | 806 | __be16 rtp_port, rtcp_port; |
807 | typeof(nf_nat_sdp_port_hook) nf_nat_sdp_port; | 807 | typeof(nf_nat_sdp_port_hook) nf_nat_sdp_port; |
808 | typeof(nf_nat_sdp_media_hook) nf_nat_sdp_media; | 808 | typeof(nf_nat_sdp_media_hook) nf_nat_sdp_media; |
809 | 809 | ||
810 | saddr = NULL; | 810 | saddr = NULL; |
811 | if (sip_direct_media) { | 811 | if (sip_direct_media) { |
812 | if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3)) | 812 | if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3)) |
813 | return NF_ACCEPT; | 813 | return NF_ACCEPT; |
814 | saddr = &ct->tuplehash[!dir].tuple.src.u3; | 814 | saddr = &ct->tuplehash[!dir].tuple.src.u3; |
815 | } | 815 | } |
816 | 816 | ||
817 | /* We need to check whether the registration exists before attempting | 817 | /* We need to check whether the registration exists before attempting |
818 | * to register it since we can see the same media description multiple | 818 | * to register it since we can see the same media description multiple |
819 | * times on different connections in case multiple endpoints receive | 819 | * times on different connections in case multiple endpoints receive |
820 | * the same call. | 820 | * the same call. |
821 | * | 821 | * |
822 | * RTP optimization: if we find a matching media channel expectation | 822 | * RTP optimization: if we find a matching media channel expectation |
823 | * and both the expectation and this connection are SNATed, we assume | 823 | * and both the expectation and this connection are SNATed, we assume |
824 | * both sides can reach each other directly and use the final | 824 | * both sides can reach each other directly and use the final |
825 | * destination address from the expectation. We still need to keep | 825 | * destination address from the expectation. We still need to keep |
826 | * the NATed expectations for media that might arrive from the | 826 | * the NATed expectations for media that might arrive from the |
827 | * outside, and additionally need to expect the direct RTP stream | 827 | * outside, and additionally need to expect the direct RTP stream |
828 | * in case it passes through us even without NAT. | 828 | * in case it passes through us even without NAT. |
829 | */ | 829 | */ |
830 | memset(&tuple, 0, sizeof(tuple)); | 830 | memset(&tuple, 0, sizeof(tuple)); |
831 | if (saddr) | 831 | if (saddr) |
832 | tuple.src.u3 = *saddr; | 832 | tuple.src.u3 = *saddr; |
833 | tuple.src.l3num = nf_ct_l3num(ct); | 833 | tuple.src.l3num = nf_ct_l3num(ct); |
834 | tuple.dst.protonum = IPPROTO_UDP; | 834 | tuple.dst.protonum = IPPROTO_UDP; |
835 | tuple.dst.u3 = *daddr; | 835 | tuple.dst.u3 = *daddr; |
836 | tuple.dst.u.udp.port = port; | 836 | tuple.dst.u.udp.port = port; |
837 | 837 | ||
838 | rcu_read_lock(); | 838 | rcu_read_lock(); |
839 | do { | 839 | do { |
840 | exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); | 840 | exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); |
841 | 841 | ||
842 | if (!exp || exp->master == ct || | 842 | if (!exp || exp->master == ct || |
843 | nfct_help(exp->master)->helper != nfct_help(ct)->helper || | 843 | nfct_help(exp->master)->helper != nfct_help(ct)->helper || |
844 | exp->class != class) | 844 | exp->class != class) |
845 | break; | 845 | break; |
846 | #ifdef CONFIG_NF_NAT_NEEDED | 846 | #ifdef CONFIG_NF_NAT_NEEDED |
847 | if (exp->tuple.src.l3num == AF_INET && !direct_rtp && | 847 | if (exp->tuple.src.l3num == AF_INET && !direct_rtp && |
848 | (exp->saved_ip != exp->tuple.dst.u3.ip || | 848 | (exp->saved_ip != exp->tuple.dst.u3.ip || |
849 | exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) && | 849 | exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) && |
850 | ct->status & IPS_NAT_MASK) { | 850 | ct->status & IPS_NAT_MASK) { |
851 | daddr->ip = exp->saved_ip; | 851 | daddr->ip = exp->saved_ip; |
852 | tuple.dst.u3.ip = exp->saved_ip; | 852 | tuple.dst.u3.ip = exp->saved_ip; |
853 | tuple.dst.u.udp.port = exp->saved_proto.udp.port; | 853 | tuple.dst.u.udp.port = exp->saved_proto.udp.port; |
854 | direct_rtp = 1; | 854 | direct_rtp = 1; |
855 | } else | 855 | } else |
856 | #endif | 856 | #endif |
857 | skip_expect = 1; | 857 | skip_expect = 1; |
858 | } while (!skip_expect); | 858 | } while (!skip_expect); |
859 | rcu_read_unlock(); | 859 | rcu_read_unlock(); |
860 | 860 | ||
861 | base_port = ntohs(tuple.dst.u.udp.port) & ~1; | 861 | base_port = ntohs(tuple.dst.u.udp.port) & ~1; |
862 | rtp_port = htons(base_port); | 862 | rtp_port = htons(base_port); |
863 | rtcp_port = htons(base_port + 1); | 863 | rtcp_port = htons(base_port + 1); |
864 | 864 | ||
865 | if (direct_rtp) { | 865 | if (direct_rtp) { |
866 | nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook); | 866 | nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook); |
867 | if (nf_nat_sdp_port && | 867 | if (nf_nat_sdp_port && |
868 | !nf_nat_sdp_port(skb, dataoff, dptr, datalen, | 868 | !nf_nat_sdp_port(skb, dataoff, dptr, datalen, |
869 | mediaoff, medialen, ntohs(rtp_port))) | 869 | mediaoff, medialen, ntohs(rtp_port))) |
870 | goto err1; | 870 | goto err1; |
871 | } | 871 | } |
872 | 872 | ||
873 | if (skip_expect) | 873 | if (skip_expect) |
874 | return NF_ACCEPT; | 874 | return NF_ACCEPT; |
875 | 875 | ||
876 | rtp_exp = nf_ct_expect_alloc(ct); | 876 | rtp_exp = nf_ct_expect_alloc(ct); |
877 | if (rtp_exp == NULL) | 877 | if (rtp_exp == NULL) |
878 | goto err1; | 878 | goto err1; |
879 | nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr, | 879 | nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr, |
880 | IPPROTO_UDP, NULL, &rtp_port); | 880 | IPPROTO_UDP, NULL, &rtp_port); |
881 | 881 | ||
882 | rtcp_exp = nf_ct_expect_alloc(ct); | 882 | rtcp_exp = nf_ct_expect_alloc(ct); |
883 | if (rtcp_exp == NULL) | 883 | if (rtcp_exp == NULL) |
884 | goto err2; | 884 | goto err2; |
885 | nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr, | 885 | nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr, |
886 | IPPROTO_UDP, NULL, &rtcp_port); | 886 | IPPROTO_UDP, NULL, &rtcp_port); |
887 | 887 | ||
888 | nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook); | 888 | nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook); |
889 | if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp) | 889 | if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp) |
890 | ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen, | 890 | ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen, |
891 | rtp_exp, rtcp_exp, | 891 | rtp_exp, rtcp_exp, |
892 | mediaoff, medialen, daddr); | 892 | mediaoff, medialen, daddr); |
893 | else { | 893 | else { |
894 | if (nf_ct_expect_related(rtp_exp) == 0) { | 894 | if (nf_ct_expect_related(rtp_exp) == 0) { |
895 | if (nf_ct_expect_related(rtcp_exp) != 0) | 895 | if (nf_ct_expect_related(rtcp_exp) != 0) |
896 | nf_ct_unexpect_related(rtp_exp); | 896 | nf_ct_unexpect_related(rtp_exp); |
897 | else | 897 | else |
898 | ret = NF_ACCEPT; | 898 | ret = NF_ACCEPT; |
899 | } | 899 | } |
900 | } | 900 | } |
901 | nf_ct_expect_put(rtcp_exp); | 901 | nf_ct_expect_put(rtcp_exp); |
902 | err2: | 902 | err2: |
903 | nf_ct_expect_put(rtp_exp); | 903 | nf_ct_expect_put(rtp_exp); |
904 | err1: | 904 | err1: |
905 | return ret; | 905 | return ret; |
906 | } | 906 | } |
907 | 907 | ||
908 | static const struct sdp_media_type sdp_media_types[] = { | 908 | static const struct sdp_media_type sdp_media_types[] = { |
909 | SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO), | 909 | SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO), |
910 | SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO), | 910 | SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO), |
911 | SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE), | 911 | SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE), |
912 | }; | 912 | }; |
913 | 913 | ||
914 | static const struct sdp_media_type *sdp_media_type(const char *dptr, | 914 | static const struct sdp_media_type *sdp_media_type(const char *dptr, |
915 | unsigned int matchoff, | 915 | unsigned int matchoff, |
916 | unsigned int matchlen) | 916 | unsigned int matchlen) |
917 | { | 917 | { |
918 | const struct sdp_media_type *t; | 918 | const struct sdp_media_type *t; |
919 | unsigned int i; | 919 | unsigned int i; |
920 | 920 | ||
921 | for (i = 0; i < ARRAY_SIZE(sdp_media_types); i++) { | 921 | for (i = 0; i < ARRAY_SIZE(sdp_media_types); i++) { |
922 | t = &sdp_media_types[i]; | 922 | t = &sdp_media_types[i]; |
923 | if (matchlen < t->len || | 923 | if (matchlen < t->len || |
924 | strncmp(dptr + matchoff, t->name, t->len)) | 924 | strncmp(dptr + matchoff, t->name, t->len)) |
925 | continue; | 925 | continue; |
926 | return t; | 926 | return t; |
927 | } | 927 | } |
928 | return NULL; | 928 | return NULL; |
929 | } | 929 | } |
930 | 930 | ||
931 | static int process_sdp(struct sk_buff *skb, unsigned int dataoff, | 931 | static int process_sdp(struct sk_buff *skb, unsigned int dataoff, |
932 | const char **dptr, unsigned int *datalen, | 932 | const char **dptr, unsigned int *datalen, |
933 | unsigned int cseq) | 933 | unsigned int cseq) |
934 | { | 934 | { |
935 | enum ip_conntrack_info ctinfo; | 935 | enum ip_conntrack_info ctinfo; |
936 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 936 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
937 | unsigned int matchoff, matchlen; | 937 | unsigned int matchoff, matchlen; |
938 | unsigned int mediaoff, medialen; | 938 | unsigned int mediaoff, medialen; |
939 | unsigned int sdpoff; | 939 | unsigned int sdpoff; |
940 | unsigned int caddr_len, maddr_len; | 940 | unsigned int caddr_len, maddr_len; |
941 | unsigned int i; | 941 | unsigned int i; |
942 | union nf_inet_addr caddr, maddr, rtp_addr; | 942 | union nf_inet_addr caddr, maddr, rtp_addr; |
943 | unsigned int port; | 943 | unsigned int port; |
944 | enum sdp_header_types c_hdr; | 944 | enum sdp_header_types c_hdr; |
945 | const struct sdp_media_type *t; | 945 | const struct sdp_media_type *t; |
946 | int ret = NF_ACCEPT; | 946 | int ret = NF_ACCEPT; |
947 | typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr; | 947 | typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr; |
948 | typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session; | 948 | typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session; |
949 | 949 | ||
950 | nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook); | 950 | nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook); |
951 | c_hdr = nf_ct_l3num(ct) == AF_INET ? SDP_HDR_CONNECTION_IP4 : | 951 | c_hdr = nf_ct_l3num(ct) == AF_INET ? SDP_HDR_CONNECTION_IP4 : |
952 | SDP_HDR_CONNECTION_IP6; | 952 | SDP_HDR_CONNECTION_IP6; |
953 | 953 | ||
954 | /* Find beginning of session description */ | 954 | /* Find beginning of session description */ |
955 | if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen, | 955 | if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen, |
956 | SDP_HDR_VERSION, SDP_HDR_UNSPEC, | 956 | SDP_HDR_VERSION, SDP_HDR_UNSPEC, |
957 | &matchoff, &matchlen) <= 0) | 957 | &matchoff, &matchlen) <= 0) |
958 | return NF_ACCEPT; | 958 | return NF_ACCEPT; |
959 | sdpoff = matchoff; | 959 | sdpoff = matchoff; |
960 | 960 | ||
961 | /* The connection information is contained in the session description | 961 | /* The connection information is contained in the session description |
962 | * and/or once per media description. The first media description marks | 962 | * and/or once per media description. The first media description marks |
963 | * the end of the session description. */ | 963 | * the end of the session description. */ |
964 | caddr_len = 0; | 964 | caddr_len = 0; |
965 | if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen, | 965 | if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen, |
966 | c_hdr, SDP_HDR_MEDIA, | 966 | c_hdr, SDP_HDR_MEDIA, |
967 | &matchoff, &matchlen, &caddr) > 0) | 967 | &matchoff, &matchlen, &caddr) > 0) |
968 | caddr_len = matchlen; | 968 | caddr_len = matchlen; |
969 | 969 | ||
970 | mediaoff = sdpoff; | 970 | mediaoff = sdpoff; |
971 | for (i = 0; i < ARRAY_SIZE(sdp_media_types); ) { | 971 | for (i = 0; i < ARRAY_SIZE(sdp_media_types); ) { |
972 | if (ct_sip_get_sdp_header(ct, *dptr, mediaoff, *datalen, | 972 | if (ct_sip_get_sdp_header(ct, *dptr, mediaoff, *datalen, |
973 | SDP_HDR_MEDIA, SDP_HDR_UNSPEC, | 973 | SDP_HDR_MEDIA, SDP_HDR_UNSPEC, |
974 | &mediaoff, &medialen) <= 0) | 974 | &mediaoff, &medialen) <= 0) |
975 | break; | 975 | break; |
976 | 976 | ||
977 | /* Get media type and port number. A media port value of zero | 977 | /* Get media type and port number. A media port value of zero |
978 | * indicates an inactive stream. */ | 978 | * indicates an inactive stream. */ |
979 | t = sdp_media_type(*dptr, mediaoff, medialen); | 979 | t = sdp_media_type(*dptr, mediaoff, medialen); |
980 | if (!t) { | 980 | if (!t) { |
981 | mediaoff += medialen; | 981 | mediaoff += medialen; |
982 | continue; | 982 | continue; |
983 | } | 983 | } |
984 | mediaoff += t->len; | 984 | mediaoff += t->len; |
985 | medialen -= t->len; | 985 | medialen -= t->len; |
986 | 986 | ||
987 | port = simple_strtoul(*dptr + mediaoff, NULL, 10); | 987 | port = simple_strtoul(*dptr + mediaoff, NULL, 10); |
988 | if (port == 0) | 988 | if (port == 0) |
989 | continue; | 989 | continue; |
990 | if (port < 1024 || port > 65535) | 990 | if (port < 1024 || port > 65535) |
991 | return NF_DROP; | 991 | return NF_DROP; |
992 | 992 | ||
993 | /* The media description overrides the session description. */ | 993 | /* The media description overrides the session description. */ |
994 | maddr_len = 0; | 994 | maddr_len = 0; |
995 | if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen, | 995 | if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen, |
996 | c_hdr, SDP_HDR_MEDIA, | 996 | c_hdr, SDP_HDR_MEDIA, |
997 | &matchoff, &matchlen, &maddr) > 0) { | 997 | &matchoff, &matchlen, &maddr) > 0) { |
998 | maddr_len = matchlen; | 998 | maddr_len = matchlen; |
999 | memcpy(&rtp_addr, &maddr, sizeof(rtp_addr)); | 999 | memcpy(&rtp_addr, &maddr, sizeof(rtp_addr)); |
1000 | } else if (caddr_len) | 1000 | } else if (caddr_len) |
1001 | memcpy(&rtp_addr, &caddr, sizeof(rtp_addr)); | 1001 | memcpy(&rtp_addr, &caddr, sizeof(rtp_addr)); |
1002 | else | 1002 | else |
1003 | return NF_DROP; | 1003 | return NF_DROP; |
1004 | 1004 | ||
1005 | ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen, | 1005 | ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen, |
1006 | &rtp_addr, htons(port), t->class, | 1006 | &rtp_addr, htons(port), t->class, |
1007 | mediaoff, medialen); | 1007 | mediaoff, medialen); |
1008 | if (ret != NF_ACCEPT) | 1008 | if (ret != NF_ACCEPT) |
1009 | return ret; | 1009 | return ret; |
1010 | 1010 | ||
1011 | /* Update media connection address if present */ | 1011 | /* Update media connection address if present */ |
1012 | if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { | 1012 | if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { |
1013 | ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen, | 1013 | ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen, |
1014 | mediaoff, c_hdr, SDP_HDR_MEDIA, | 1014 | mediaoff, c_hdr, SDP_HDR_MEDIA, |
1015 | &rtp_addr); | 1015 | &rtp_addr); |
1016 | if (ret != NF_ACCEPT) | 1016 | if (ret != NF_ACCEPT) |
1017 | return ret; | 1017 | return ret; |
1018 | } | 1018 | } |
1019 | i++; | 1019 | i++; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | /* Update session connection and owner addresses */ | 1022 | /* Update session connection and owner addresses */ |
1023 | nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook); | 1023 | nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook); |
1024 | if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) | 1024 | if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) |
1025 | ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff, | 1025 | ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff, |
1026 | &rtp_addr); | 1026 | &rtp_addr); |
1027 | 1027 | ||
1028 | return ret; | 1028 | return ret; |
1029 | } | 1029 | } |
1030 | static int process_invite_response(struct sk_buff *skb, unsigned int dataoff, | 1030 | static int process_invite_response(struct sk_buff *skb, unsigned int dataoff, |
1031 | const char **dptr, unsigned int *datalen, | 1031 | const char **dptr, unsigned int *datalen, |
1032 | unsigned int cseq, unsigned int code) | 1032 | unsigned int cseq, unsigned int code) |
1033 | { | 1033 | { |
1034 | enum ip_conntrack_info ctinfo; | 1034 | enum ip_conntrack_info ctinfo; |
1035 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1035 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1036 | struct nf_conn_help *help = nfct_help(ct); | 1036 | struct nf_conn_help *help = nfct_help(ct); |
1037 | 1037 | ||
1038 | if ((code >= 100 && code <= 199) || | 1038 | if ((code >= 100 && code <= 199) || |
1039 | (code >= 200 && code <= 299)) | 1039 | (code >= 200 && code <= 299)) |
1040 | return process_sdp(skb, dataoff, dptr, datalen, cseq); | 1040 | return process_sdp(skb, dataoff, dptr, datalen, cseq); |
1041 | else if (help->help.ct_sip_info.invite_cseq == cseq) | 1041 | else if (help->help.ct_sip_info.invite_cseq == cseq) |
1042 | flush_expectations(ct, true); | 1042 | flush_expectations(ct, true); |
1043 | return NF_ACCEPT; | 1043 | return NF_ACCEPT; |
1044 | } | 1044 | } |
1045 | 1045 | ||
1046 | static int process_update_response(struct sk_buff *skb, unsigned int dataoff, | 1046 | static int process_update_response(struct sk_buff *skb, unsigned int dataoff, |
1047 | const char **dptr, unsigned int *datalen, | 1047 | const char **dptr, unsigned int *datalen, |
1048 | unsigned int cseq, unsigned int code) | 1048 | unsigned int cseq, unsigned int code) |
1049 | { | 1049 | { |
1050 | enum ip_conntrack_info ctinfo; | 1050 | enum ip_conntrack_info ctinfo; |
1051 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1051 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1052 | struct nf_conn_help *help = nfct_help(ct); | 1052 | struct nf_conn_help *help = nfct_help(ct); |
1053 | 1053 | ||
1054 | if ((code >= 100 && code <= 199) || | 1054 | if ((code >= 100 && code <= 199) || |
1055 | (code >= 200 && code <= 299)) | 1055 | (code >= 200 && code <= 299)) |
1056 | return process_sdp(skb, dataoff, dptr, datalen, cseq); | 1056 | return process_sdp(skb, dataoff, dptr, datalen, cseq); |
1057 | else if (help->help.ct_sip_info.invite_cseq == cseq) | 1057 | else if (help->help.ct_sip_info.invite_cseq == cseq) |
1058 | flush_expectations(ct, true); | 1058 | flush_expectations(ct, true); |
1059 | return NF_ACCEPT; | 1059 | return NF_ACCEPT; |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | static int process_prack_response(struct sk_buff *skb, unsigned int dataoff, | 1062 | static int process_prack_response(struct sk_buff *skb, unsigned int dataoff, |
1063 | const char **dptr, unsigned int *datalen, | 1063 | const char **dptr, unsigned int *datalen, |
1064 | unsigned int cseq, unsigned int code) | 1064 | unsigned int cseq, unsigned int code) |
1065 | { | 1065 | { |
1066 | enum ip_conntrack_info ctinfo; | 1066 | enum ip_conntrack_info ctinfo; |
1067 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1067 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1068 | struct nf_conn_help *help = nfct_help(ct); | 1068 | struct nf_conn_help *help = nfct_help(ct); |
1069 | 1069 | ||
1070 | if ((code >= 100 && code <= 199) || | 1070 | if ((code >= 100 && code <= 199) || |
1071 | (code >= 200 && code <= 299)) | 1071 | (code >= 200 && code <= 299)) |
1072 | return process_sdp(skb, dataoff, dptr, datalen, cseq); | 1072 | return process_sdp(skb, dataoff, dptr, datalen, cseq); |
1073 | else if (help->help.ct_sip_info.invite_cseq == cseq) | 1073 | else if (help->help.ct_sip_info.invite_cseq == cseq) |
1074 | flush_expectations(ct, true); | 1074 | flush_expectations(ct, true); |
1075 | return NF_ACCEPT; | 1075 | return NF_ACCEPT; |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | static int process_invite_request(struct sk_buff *skb, unsigned int dataoff, | 1078 | static int process_invite_request(struct sk_buff *skb, unsigned int dataoff, |
1079 | const char **dptr, unsigned int *datalen, | 1079 | const char **dptr, unsigned int *datalen, |
1080 | unsigned int cseq) | 1080 | unsigned int cseq) |
1081 | { | 1081 | { |
1082 | enum ip_conntrack_info ctinfo; | 1082 | enum ip_conntrack_info ctinfo; |
1083 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1083 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1084 | struct nf_conn_help *help = nfct_help(ct); | 1084 | struct nf_conn_help *help = nfct_help(ct); |
1085 | unsigned int ret; | 1085 | unsigned int ret; |
1086 | 1086 | ||
1087 | flush_expectations(ct, true); | 1087 | flush_expectations(ct, true); |
1088 | ret = process_sdp(skb, dataoff, dptr, datalen, cseq); | 1088 | ret = process_sdp(skb, dataoff, dptr, datalen, cseq); |
1089 | if (ret == NF_ACCEPT) | 1089 | if (ret == NF_ACCEPT) |
1090 | help->help.ct_sip_info.invite_cseq = cseq; | 1090 | help->help.ct_sip_info.invite_cseq = cseq; |
1091 | return ret; | 1091 | return ret; |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | static int process_bye_request(struct sk_buff *skb, unsigned int dataoff, | 1094 | static int process_bye_request(struct sk_buff *skb, unsigned int dataoff, |
1095 | const char **dptr, unsigned int *datalen, | 1095 | const char **dptr, unsigned int *datalen, |
1096 | unsigned int cseq) | 1096 | unsigned int cseq) |
1097 | { | 1097 | { |
1098 | enum ip_conntrack_info ctinfo; | 1098 | enum ip_conntrack_info ctinfo; |
1099 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1099 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1100 | 1100 | ||
1101 | flush_expectations(ct, true); | 1101 | flush_expectations(ct, true); |
1102 | return NF_ACCEPT; | 1102 | return NF_ACCEPT; |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | /* Parse a REGISTER request and create a permanent expectation for incoming | 1105 | /* Parse a REGISTER request and create a permanent expectation for incoming |
1106 | * signalling connections. The expectation is marked inactive and is activated | 1106 | * signalling connections. The expectation is marked inactive and is activated |
1107 | * when receiving a response indicating success from the registrar. | 1107 | * when receiving a response indicating success from the registrar. |
1108 | */ | 1108 | */ |
1109 | static int process_register_request(struct sk_buff *skb, unsigned int dataoff, | 1109 | static int process_register_request(struct sk_buff *skb, unsigned int dataoff, |
1110 | const char **dptr, unsigned int *datalen, | 1110 | const char **dptr, unsigned int *datalen, |
1111 | unsigned int cseq) | 1111 | unsigned int cseq) |
1112 | { | 1112 | { |
1113 | enum ip_conntrack_info ctinfo; | 1113 | enum ip_conntrack_info ctinfo; |
1114 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1114 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1115 | struct nf_conn_help *help = nfct_help(ct); | 1115 | struct nf_conn_help *help = nfct_help(ct); |
1116 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 1116 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
1117 | unsigned int matchoff, matchlen; | 1117 | unsigned int matchoff, matchlen; |
1118 | struct nf_conntrack_expect *exp; | 1118 | struct nf_conntrack_expect *exp; |
1119 | union nf_inet_addr *saddr, daddr; | 1119 | union nf_inet_addr *saddr, daddr; |
1120 | __be16 port; | 1120 | __be16 port; |
1121 | u8 proto; | 1121 | u8 proto; |
1122 | unsigned int expires = 0; | 1122 | unsigned int expires = 0; |
1123 | int ret; | 1123 | int ret; |
1124 | typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect; | 1124 | typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect; |
1125 | 1125 | ||
1126 | /* Expected connections can not register again. */ | 1126 | /* Expected connections can not register again. */ |
1127 | if (ct->status & IPS_EXPECTED) | 1127 | if (ct->status & IPS_EXPECTED) |
1128 | return NF_ACCEPT; | 1128 | return NF_ACCEPT; |
1129 | 1129 | ||
1130 | /* We must check the expiration time: a value of zero signals the | 1130 | /* We must check the expiration time: a value of zero signals the |
1131 | * registrar to release the binding. We'll remove our expectation | 1131 | * registrar to release the binding. We'll remove our expectation |
1132 | * when receiving the new bindings in the response, but we don't | 1132 | * when receiving the new bindings in the response, but we don't |
1133 | * want to create new ones. | 1133 | * want to create new ones. |
1134 | * | 1134 | * |
1135 | * The expiration time may be contained in Expires: header, the | 1135 | * The expiration time may be contained in Expires: header, the |
1136 | * Contact: header parameters or the URI parameters. | 1136 | * Contact: header parameters or the URI parameters. |
1137 | */ | 1137 | */ |
1138 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, | 1138 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, |
1139 | &matchoff, &matchlen) > 0) | 1139 | &matchoff, &matchlen) > 0) |
1140 | expires = simple_strtoul(*dptr + matchoff, NULL, 10); | 1140 | expires = simple_strtoul(*dptr + matchoff, NULL, 10); |
1141 | 1141 | ||
1142 | ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, | 1142 | ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, |
1143 | SIP_HDR_CONTACT, NULL, | 1143 | SIP_HDR_CONTACT, NULL, |
1144 | &matchoff, &matchlen, &daddr, &port); | 1144 | &matchoff, &matchlen, &daddr, &port); |
1145 | if (ret < 0) | 1145 | if (ret < 0) |
1146 | return NF_DROP; | 1146 | return NF_DROP; |
1147 | else if (ret == 0) | 1147 | else if (ret == 0) |
1148 | return NF_ACCEPT; | 1148 | return NF_ACCEPT; |
1149 | 1149 | ||
1150 | /* We don't support third-party registrations */ | 1150 | /* We don't support third-party registrations */ |
1151 | if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr)) | 1151 | if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr)) |
1152 | return NF_ACCEPT; | 1152 | return NF_ACCEPT; |
1153 | 1153 | ||
1154 | if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen, | 1154 | if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen, |
1155 | &proto) == 0) | 1155 | &proto) == 0) |
1156 | return NF_ACCEPT; | 1156 | return NF_ACCEPT; |
1157 | 1157 | ||
1158 | if (ct_sip_parse_numerical_param(ct, *dptr, | 1158 | if (ct_sip_parse_numerical_param(ct, *dptr, |
1159 | matchoff + matchlen, *datalen, | 1159 | matchoff + matchlen, *datalen, |
1160 | "expires=", NULL, NULL, &expires) < 0) | 1160 | "expires=", NULL, NULL, &expires) < 0) |
1161 | return NF_DROP; | 1161 | return NF_DROP; |
1162 | 1162 | ||
1163 | if (expires == 0) { | 1163 | if (expires == 0) { |
1164 | ret = NF_ACCEPT; | 1164 | ret = NF_ACCEPT; |
1165 | goto store_cseq; | 1165 | goto store_cseq; |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | exp = nf_ct_expect_alloc(ct); | 1168 | exp = nf_ct_expect_alloc(ct); |
1169 | if (!exp) | 1169 | if (!exp) |
1170 | return NF_DROP; | 1170 | return NF_DROP; |
1171 | 1171 | ||
1172 | saddr = NULL; | 1172 | saddr = NULL; |
1173 | if (sip_direct_signalling) | 1173 | if (sip_direct_signalling) |
1174 | saddr = &ct->tuplehash[!dir].tuple.src.u3; | 1174 | saddr = &ct->tuplehash[!dir].tuple.src.u3; |
1175 | 1175 | ||
1176 | nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), | 1176 | nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), |
1177 | saddr, &daddr, proto, NULL, &port); | 1177 | saddr, &daddr, proto, NULL, &port); |
1178 | exp->timeout.expires = sip_timeout * HZ; | 1178 | exp->timeout.expires = sip_timeout * HZ; |
1179 | exp->helper = nfct_help(ct)->helper; | 1179 | exp->helper = nfct_help(ct)->helper; |
1180 | exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE; | 1180 | exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE; |
1181 | 1181 | ||
1182 | nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook); | 1182 | nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook); |
1183 | if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK) | 1183 | if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK) |
1184 | ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp, | 1184 | ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp, |
1185 | matchoff, matchlen); | 1185 | matchoff, matchlen); |
1186 | else { | 1186 | else { |
1187 | if (nf_ct_expect_related(exp) != 0) | 1187 | if (nf_ct_expect_related(exp) != 0) |
1188 | ret = NF_DROP; | 1188 | ret = NF_DROP; |
1189 | else | 1189 | else |
1190 | ret = NF_ACCEPT; | 1190 | ret = NF_ACCEPT; |
1191 | } | 1191 | } |
1192 | nf_ct_expect_put(exp); | 1192 | nf_ct_expect_put(exp); |
1193 | 1193 | ||
1194 | store_cseq: | 1194 | store_cseq: |
1195 | if (ret == NF_ACCEPT) | 1195 | if (ret == NF_ACCEPT) |
1196 | help->help.ct_sip_info.register_cseq = cseq; | 1196 | help->help.ct_sip_info.register_cseq = cseq; |
1197 | return ret; | 1197 | return ret; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | static int process_register_response(struct sk_buff *skb, unsigned int dataoff, | 1200 | static int process_register_response(struct sk_buff *skb, unsigned int dataoff, |
1201 | const char **dptr, unsigned int *datalen, | 1201 | const char **dptr, unsigned int *datalen, |
1202 | unsigned int cseq, unsigned int code) | 1202 | unsigned int cseq, unsigned int code) |
1203 | { | 1203 | { |
1204 | enum ip_conntrack_info ctinfo; | 1204 | enum ip_conntrack_info ctinfo; |
1205 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1205 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1206 | struct nf_conn_help *help = nfct_help(ct); | 1206 | struct nf_conn_help *help = nfct_help(ct); |
1207 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 1207 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
1208 | union nf_inet_addr addr; | 1208 | union nf_inet_addr addr; |
1209 | __be16 port; | 1209 | __be16 port; |
1210 | u8 proto; | 1210 | u8 proto; |
1211 | unsigned int matchoff, matchlen, coff = 0; | 1211 | unsigned int matchoff, matchlen, coff = 0; |
1212 | unsigned int expires = 0; | 1212 | unsigned int expires = 0; |
1213 | int in_contact = 0, ret; | 1213 | int in_contact = 0, ret; |
1214 | 1214 | ||
1215 | /* According to RFC 3261, "UAs MUST NOT send a new registration until | 1215 | /* According to RFC 3261, "UAs MUST NOT send a new registration until |
1216 | * they have received a final response from the registrar for the | 1216 | * they have received a final response from the registrar for the |
1217 | * previous one or the previous REGISTER request has timed out". | 1217 | * previous one or the previous REGISTER request has timed out". |
1218 | * | 1218 | * |
1219 | * However, some servers fail to detect retransmissions and send late | 1219 | * However, some servers fail to detect retransmissions and send late |
1220 | * responses, so we store the sequence number of the last valid | 1220 | * responses, so we store the sequence number of the last valid |
1221 | * request and compare it here. | 1221 | * request and compare it here. |
1222 | */ | 1222 | */ |
1223 | if (help->help.ct_sip_info.register_cseq != cseq) | 1223 | if (help->help.ct_sip_info.register_cseq != cseq) |
1224 | return NF_ACCEPT; | 1224 | return NF_ACCEPT; |
1225 | 1225 | ||
1226 | if (code >= 100 && code <= 199) | 1226 | if (code >= 100 && code <= 199) |
1227 | return NF_ACCEPT; | 1227 | return NF_ACCEPT; |
1228 | if (code < 200 || code > 299) | 1228 | if (code < 200 || code > 299) |
1229 | goto flush; | 1229 | goto flush; |
1230 | 1230 | ||
1231 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, | 1231 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, |
1232 | &matchoff, &matchlen) > 0) | 1232 | &matchoff, &matchlen) > 0) |
1233 | expires = simple_strtoul(*dptr + matchoff, NULL, 10); | 1233 | expires = simple_strtoul(*dptr + matchoff, NULL, 10); |
1234 | 1234 | ||
1235 | while (1) { | 1235 | while (1) { |
1236 | unsigned int c_expires = expires; | 1236 | unsigned int c_expires = expires; |
1237 | 1237 | ||
1238 | ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen, | 1238 | ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen, |
1239 | SIP_HDR_CONTACT, &in_contact, | 1239 | SIP_HDR_CONTACT, &in_contact, |
1240 | &matchoff, &matchlen, | 1240 | &matchoff, &matchlen, |
1241 | &addr, &port); | 1241 | &addr, &port); |
1242 | if (ret < 0) | 1242 | if (ret < 0) |
1243 | return NF_DROP; | 1243 | return NF_DROP; |
1244 | else if (ret == 0) | 1244 | else if (ret == 0) |
1245 | break; | 1245 | break; |
1246 | 1246 | ||
1247 | /* We don't support third-party registrations */ | 1247 | /* We don't support third-party registrations */ |
1248 | if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr)) | 1248 | if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr)) |
1249 | continue; | 1249 | continue; |
1250 | 1250 | ||
1251 | if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, | 1251 | if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, |
1252 | *datalen, &proto) == 0) | 1252 | *datalen, &proto) == 0) |
1253 | continue; | 1253 | continue; |
1254 | 1254 | ||
1255 | ret = ct_sip_parse_numerical_param(ct, *dptr, | 1255 | ret = ct_sip_parse_numerical_param(ct, *dptr, |
1256 | matchoff + matchlen, | 1256 | matchoff + matchlen, |
1257 | *datalen, "expires=", | 1257 | *datalen, "expires=", |
1258 | NULL, NULL, &c_expires); | 1258 | NULL, NULL, &c_expires); |
1259 | if (ret < 0) | 1259 | if (ret < 0) |
1260 | return NF_DROP; | 1260 | return NF_DROP; |
1261 | if (c_expires == 0) | 1261 | if (c_expires == 0) |
1262 | break; | 1262 | break; |
1263 | if (refresh_signalling_expectation(ct, &addr, proto, port, | 1263 | if (refresh_signalling_expectation(ct, &addr, proto, port, |
1264 | c_expires)) | 1264 | c_expires)) |
1265 | return NF_ACCEPT; | 1265 | return NF_ACCEPT; |
1266 | } | 1266 | } |
1267 | 1267 | ||
1268 | flush: | 1268 | flush: |
1269 | flush_expectations(ct, false); | 1269 | flush_expectations(ct, false); |
1270 | return NF_ACCEPT; | 1270 | return NF_ACCEPT; |
1271 | } | 1271 | } |
1272 | 1272 | ||
1273 | static const struct sip_handler sip_handlers[] = { | 1273 | static const struct sip_handler sip_handlers[] = { |
1274 | SIP_HANDLER("INVITE", process_invite_request, process_invite_response), | 1274 | SIP_HANDLER("INVITE", process_invite_request, process_invite_response), |
1275 | SIP_HANDLER("UPDATE", process_sdp, process_update_response), | 1275 | SIP_HANDLER("UPDATE", process_sdp, process_update_response), |
1276 | SIP_HANDLER("ACK", process_sdp, NULL), | 1276 | SIP_HANDLER("ACK", process_sdp, NULL), |
1277 | SIP_HANDLER("PRACK", process_sdp, process_prack_response), | 1277 | SIP_HANDLER("PRACK", process_sdp, process_prack_response), |
1278 | SIP_HANDLER("BYE", process_bye_request, NULL), | 1278 | SIP_HANDLER("BYE", process_bye_request, NULL), |
1279 | SIP_HANDLER("REGISTER", process_register_request, process_register_response), | 1279 | SIP_HANDLER("REGISTER", process_register_request, process_register_response), |
1280 | }; | 1280 | }; |
1281 | 1281 | ||
1282 | static int process_sip_response(struct sk_buff *skb, unsigned int dataoff, | 1282 | static int process_sip_response(struct sk_buff *skb, unsigned int dataoff, |
1283 | const char **dptr, unsigned int *datalen) | 1283 | const char **dptr, unsigned int *datalen) |
1284 | { | 1284 | { |
1285 | enum ip_conntrack_info ctinfo; | 1285 | enum ip_conntrack_info ctinfo; |
1286 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1286 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1287 | unsigned int matchoff, matchlen, matchend; | 1287 | unsigned int matchoff, matchlen, matchend; |
1288 | unsigned int code, cseq, i; | 1288 | unsigned int code, cseq, i; |
1289 | 1289 | ||
1290 | if (*datalen < strlen("SIP/2.0 200")) | 1290 | if (*datalen < strlen("SIP/2.0 200")) |
1291 | return NF_ACCEPT; | 1291 | return NF_ACCEPT; |
1292 | code = simple_strtoul(*dptr + strlen("SIP/2.0 "), NULL, 10); | 1292 | code = simple_strtoul(*dptr + strlen("SIP/2.0 "), NULL, 10); |
1293 | if (!code) | 1293 | if (!code) |
1294 | return NF_DROP; | 1294 | return NF_DROP; |
1295 | 1295 | ||
1296 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, | 1296 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, |
1297 | &matchoff, &matchlen) <= 0) | 1297 | &matchoff, &matchlen) <= 0) |
1298 | return NF_DROP; | 1298 | return NF_DROP; |
1299 | cseq = simple_strtoul(*dptr + matchoff, NULL, 10); | 1299 | cseq = simple_strtoul(*dptr + matchoff, NULL, 10); |
1300 | if (!cseq) | 1300 | if (!cseq) |
1301 | return NF_DROP; | 1301 | return NF_DROP; |
1302 | matchend = matchoff + matchlen + 1; | 1302 | matchend = matchoff + matchlen + 1; |
1303 | 1303 | ||
1304 | for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { | 1304 | for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { |
1305 | const struct sip_handler *handler; | 1305 | const struct sip_handler *handler; |
1306 | 1306 | ||
1307 | handler = &sip_handlers[i]; | 1307 | handler = &sip_handlers[i]; |
1308 | if (handler->response == NULL) | 1308 | if (handler->response == NULL) |
1309 | continue; | 1309 | continue; |
1310 | if (*datalen < matchend + handler->len || | 1310 | if (*datalen < matchend + handler->len || |
1311 | strnicmp(*dptr + matchend, handler->method, handler->len)) | 1311 | strnicmp(*dptr + matchend, handler->method, handler->len)) |
1312 | continue; | 1312 | continue; |
1313 | return handler->response(skb, dataoff, dptr, datalen, | 1313 | return handler->response(skb, dataoff, dptr, datalen, |
1314 | cseq, code); | 1314 | cseq, code); |
1315 | } | 1315 | } |
1316 | return NF_ACCEPT; | 1316 | return NF_ACCEPT; |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | static int process_sip_request(struct sk_buff *skb, unsigned int dataoff, | 1319 | static int process_sip_request(struct sk_buff *skb, unsigned int dataoff, |
1320 | const char **dptr, unsigned int *datalen) | 1320 | const char **dptr, unsigned int *datalen) |
1321 | { | 1321 | { |
1322 | enum ip_conntrack_info ctinfo; | 1322 | enum ip_conntrack_info ctinfo; |
1323 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1323 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1324 | unsigned int matchoff, matchlen; | 1324 | unsigned int matchoff, matchlen; |
1325 | unsigned int cseq, i; | 1325 | unsigned int cseq, i; |
1326 | 1326 | ||
1327 | for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { | 1327 | for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { |
1328 | const struct sip_handler *handler; | 1328 | const struct sip_handler *handler; |
1329 | 1329 | ||
1330 | handler = &sip_handlers[i]; | 1330 | handler = &sip_handlers[i]; |
1331 | if (handler->request == NULL) | 1331 | if (handler->request == NULL) |
1332 | continue; | 1332 | continue; |
1333 | if (*datalen < handler->len || | 1333 | if (*datalen < handler->len || |
1334 | strnicmp(*dptr, handler->method, handler->len)) | 1334 | strnicmp(*dptr, handler->method, handler->len)) |
1335 | continue; | 1335 | continue; |
1336 | 1336 | ||
1337 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, | 1337 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, |
1338 | &matchoff, &matchlen) <= 0) | 1338 | &matchoff, &matchlen) <= 0) |
1339 | return NF_DROP; | 1339 | return NF_DROP; |
1340 | cseq = simple_strtoul(*dptr + matchoff, NULL, 10); | 1340 | cseq = simple_strtoul(*dptr + matchoff, NULL, 10); |
1341 | if (!cseq) | 1341 | if (!cseq) |
1342 | return NF_DROP; | 1342 | return NF_DROP; |
1343 | 1343 | ||
1344 | return handler->request(skb, dataoff, dptr, datalen, cseq); | 1344 | return handler->request(skb, dataoff, dptr, datalen, cseq); |
1345 | } | 1345 | } |
1346 | return NF_ACCEPT; | 1346 | return NF_ACCEPT; |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct, | 1349 | static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct, |
1350 | unsigned int dataoff, const char **dptr, | 1350 | unsigned int dataoff, const char **dptr, |
1351 | unsigned int *datalen) | 1351 | unsigned int *datalen) |
1352 | { | 1352 | { |
1353 | typeof(nf_nat_sip_hook) nf_nat_sip; | 1353 | typeof(nf_nat_sip_hook) nf_nat_sip; |
1354 | int ret; | 1354 | int ret; |
1355 | 1355 | ||
1356 | if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) | 1356 | if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) |
1357 | ret = process_sip_request(skb, dataoff, dptr, datalen); | 1357 | ret = process_sip_request(skb, dataoff, dptr, datalen); |
1358 | else | 1358 | else |
1359 | ret = process_sip_response(skb, dataoff, dptr, datalen); | 1359 | ret = process_sip_response(skb, dataoff, dptr, datalen); |
1360 | 1360 | ||
1361 | if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { | 1361 | if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { |
1362 | nf_nat_sip = rcu_dereference(nf_nat_sip_hook); | 1362 | nf_nat_sip = rcu_dereference(nf_nat_sip_hook); |
1363 | if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen)) | 1363 | if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen)) |
1364 | ret = NF_DROP; | 1364 | ret = NF_DROP; |
1365 | } | 1365 | } |
1366 | 1366 | ||
1367 | return ret; | 1367 | return ret; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, | 1370 | static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, |
1371 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | 1371 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) |
1372 | { | 1372 | { |
1373 | struct tcphdr *th, _tcph; | 1373 | struct tcphdr *th, _tcph; |
1374 | unsigned int dataoff, datalen; | 1374 | unsigned int dataoff, datalen; |
1375 | unsigned int matchoff, matchlen, clen; | 1375 | unsigned int matchoff, matchlen, clen; |
1376 | unsigned int msglen, origlen; | 1376 | unsigned int msglen, origlen; |
1377 | const char *dptr, *end; | 1377 | const char *dptr, *end; |
1378 | s16 diff, tdiff = 0; | 1378 | s16 diff, tdiff = 0; |
1379 | int ret; | 1379 | int ret; |
1380 | typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; | 1380 | typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; |
1381 | 1381 | ||
1382 | if (ctinfo != IP_CT_ESTABLISHED && | 1382 | if (ctinfo != IP_CT_ESTABLISHED && |
1383 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) | 1383 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) |
1384 | return NF_ACCEPT; | 1384 | return NF_ACCEPT; |
1385 | 1385 | ||
1386 | /* No Data ? */ | 1386 | /* No Data ? */ |
1387 | th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); | 1387 | th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); |
1388 | if (th == NULL) | 1388 | if (th == NULL) |
1389 | return NF_ACCEPT; | 1389 | return NF_ACCEPT; |
1390 | dataoff = protoff + th->doff * 4; | 1390 | dataoff = protoff + th->doff * 4; |
1391 | if (dataoff >= skb->len) | 1391 | if (dataoff >= skb->len) |
1392 | return NF_ACCEPT; | 1392 | return NF_ACCEPT; |
1393 | 1393 | ||
1394 | nf_ct_refresh(ct, skb, sip_timeout * HZ); | 1394 | nf_ct_refresh(ct, skb, sip_timeout * HZ); |
1395 | 1395 | ||
1396 | if (skb_is_nonlinear(skb)) { | 1396 | if (skb_is_nonlinear(skb)) { |
1397 | pr_debug("Copy of skbuff not supported yet.\n"); | 1397 | pr_debug("Copy of skbuff not supported yet.\n"); |
1398 | return NF_ACCEPT; | 1398 | return NF_ACCEPT; |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | dptr = skb->data + dataoff; | 1401 | dptr = skb->data + dataoff; |
1402 | datalen = skb->len - dataoff; | 1402 | datalen = skb->len - dataoff; |
1403 | if (datalen < strlen("SIP/2.0 200")) | 1403 | if (datalen < strlen("SIP/2.0 200")) |
1404 | return NF_ACCEPT; | 1404 | return NF_ACCEPT; |
1405 | 1405 | ||
1406 | while (1) { | 1406 | while (1) { |
1407 | if (ct_sip_get_header(ct, dptr, 0, datalen, | 1407 | if (ct_sip_get_header(ct, dptr, 0, datalen, |
1408 | SIP_HDR_CONTENT_LENGTH, | 1408 | SIP_HDR_CONTENT_LENGTH, |
1409 | &matchoff, &matchlen) <= 0) | 1409 | &matchoff, &matchlen) <= 0) |
1410 | break; | 1410 | break; |
1411 | 1411 | ||
1412 | clen = simple_strtoul(dptr + matchoff, (char **)&end, 10); | 1412 | clen = simple_strtoul(dptr + matchoff, (char **)&end, 10); |
1413 | if (dptr + matchoff == end) | 1413 | if (dptr + matchoff == end) |
1414 | break; | 1414 | break; |
1415 | 1415 | ||
1416 | if (end + strlen("\r\n\r\n") > dptr + datalen) | 1416 | if (end + strlen("\r\n\r\n") > dptr + datalen) |
1417 | break; | 1417 | break; |
1418 | if (end[0] != '\r' || end[1] != '\n' || | 1418 | if (end[0] != '\r' || end[1] != '\n' || |
1419 | end[2] != '\r' || end[3] != '\n') | 1419 | end[2] != '\r' || end[3] != '\n') |
1420 | break; | 1420 | break; |
1421 | end += strlen("\r\n\r\n") + clen; | 1421 | end += strlen("\r\n\r\n") + clen; |
1422 | 1422 | ||
1423 | msglen = origlen = end - dptr; | 1423 | msglen = origlen = end - dptr; |
1424 | 1424 | ||
1425 | ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen); | 1425 | ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen); |
1426 | if (ret != NF_ACCEPT) | 1426 | if (ret != NF_ACCEPT) |
1427 | break; | 1427 | break; |
1428 | diff = msglen - origlen; | 1428 | diff = msglen - origlen; |
1429 | tdiff += diff; | 1429 | tdiff += diff; |
1430 | 1430 | ||
1431 | dataoff += msglen; | 1431 | dataoff += msglen; |
1432 | dptr += msglen; | 1432 | dptr += msglen; |
1433 | datalen = datalen + diff - msglen; | 1433 | datalen = datalen + diff - msglen; |
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { | 1436 | if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { |
1437 | nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook); | 1437 | nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook); |
1438 | if (nf_nat_sip_seq_adjust) | 1438 | if (nf_nat_sip_seq_adjust) |
1439 | nf_nat_sip_seq_adjust(skb, tdiff); | 1439 | nf_nat_sip_seq_adjust(skb, tdiff); |
1440 | } | 1440 | } |
1441 | 1441 | ||
1442 | return ret; | 1442 | return ret; |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, | 1445 | static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, |
1446 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | 1446 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) |
1447 | { | 1447 | { |
1448 | unsigned int dataoff, datalen; | 1448 | unsigned int dataoff, datalen; |
1449 | const char *dptr; | 1449 | const char *dptr; |
1450 | 1450 | ||
1451 | /* No Data ? */ | 1451 | /* No Data ? */ |
1452 | dataoff = protoff + sizeof(struct udphdr); | 1452 | dataoff = protoff + sizeof(struct udphdr); |
1453 | if (dataoff >= skb->len) | 1453 | if (dataoff >= skb->len) |
1454 | return NF_ACCEPT; | 1454 | return NF_ACCEPT; |
1455 | 1455 | ||
1456 | nf_ct_refresh(ct, skb, sip_timeout * HZ); | 1456 | nf_ct_refresh(ct, skb, sip_timeout * HZ); |
1457 | 1457 | ||
1458 | if (skb_is_nonlinear(skb)) { | 1458 | if (skb_is_nonlinear(skb)) { |
1459 | pr_debug("Copy of skbuff not supported yet.\n"); | 1459 | pr_debug("Copy of skbuff not supported yet.\n"); |
1460 | return NF_ACCEPT; | 1460 | return NF_ACCEPT; |
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | dptr = skb->data + dataoff; | 1463 | dptr = skb->data + dataoff; |
1464 | datalen = skb->len - dataoff; | 1464 | datalen = skb->len - dataoff; |
1465 | if (datalen < strlen("SIP/2.0 200")) | 1465 | if (datalen < strlen("SIP/2.0 200")) |
1466 | return NF_ACCEPT; | 1466 | return NF_ACCEPT; |
1467 | 1467 | ||
1468 | return process_sip_msg(skb, ct, dataoff, &dptr, &datalen); | 1468 | return process_sip_msg(skb, ct, dataoff, &dptr, &datalen); |
1469 | } | 1469 | } |
1470 | 1470 | ||
1471 | static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; | 1471 | static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; |
1472 | static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly; | 1472 | static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly; |
1473 | 1473 | ||
1474 | static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { | 1474 | static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { |
1475 | [SIP_EXPECT_SIGNALLING] = { | 1475 | [SIP_EXPECT_SIGNALLING] = { |
1476 | .name = "signalling", | 1476 | .name = "signalling", |
1477 | .max_expected = 1, | 1477 | .max_expected = 1, |
1478 | .timeout = 3 * 60, | 1478 | .timeout = 3 * 60, |
1479 | }, | 1479 | }, |
1480 | [SIP_EXPECT_AUDIO] = { | 1480 | [SIP_EXPECT_AUDIO] = { |
1481 | .name = "audio", | 1481 | .name = "audio", |
1482 | .max_expected = 2 * IP_CT_DIR_MAX, | 1482 | .max_expected = 2 * IP_CT_DIR_MAX, |
1483 | .timeout = 3 * 60, | 1483 | .timeout = 3 * 60, |
1484 | }, | 1484 | }, |
1485 | [SIP_EXPECT_VIDEO] = { | 1485 | [SIP_EXPECT_VIDEO] = { |
1486 | .name = "video", | 1486 | .name = "video", |
1487 | .max_expected = 2 * IP_CT_DIR_MAX, | 1487 | .max_expected = 2 * IP_CT_DIR_MAX, |
1488 | .timeout = 3 * 60, | 1488 | .timeout = 3 * 60, |
1489 | }, | 1489 | }, |
1490 | [SIP_EXPECT_IMAGE] = { | 1490 | [SIP_EXPECT_IMAGE] = { |
1491 | .name = "image", | 1491 | .name = "image", |
1492 | .max_expected = IP_CT_DIR_MAX, | 1492 | .max_expected = IP_CT_DIR_MAX, |
1493 | .timeout = 3 * 60, | 1493 | .timeout = 3 * 60, |
1494 | }, | 1494 | }, |
1495 | }; | 1495 | }; |
1496 | 1496 | ||
1497 | static void nf_conntrack_sip_fini(void) | 1497 | static void nf_conntrack_sip_fini(void) |
1498 | { | 1498 | { |
1499 | int i, j; | 1499 | int i, j; |
1500 | 1500 | ||
1501 | for (i = 0; i < ports_c; i++) { | 1501 | for (i = 0; i < ports_c; i++) { |
1502 | for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { | 1502 | for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { |
1503 | if (sip[i][j].me == NULL) | 1503 | if (sip[i][j].me == NULL) |
1504 | continue; | 1504 | continue; |
1505 | nf_conntrack_helper_unregister(&sip[i][j]); | 1505 | nf_conntrack_helper_unregister(&sip[i][j]); |
1506 | } | 1506 | } |
1507 | } | 1507 | } |
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | static int __init nf_conntrack_sip_init(void) | 1510 | static int __init nf_conntrack_sip_init(void) |
1511 | { | 1511 | { |
1512 | int i, j, ret; | 1512 | int i, j, ret; |
1513 | char *tmpname; | 1513 | char *tmpname; |
1514 | 1514 | ||
1515 | if (ports_c == 0) | 1515 | if (ports_c == 0) |
1516 | ports[ports_c++] = SIP_PORT; | 1516 | ports[ports_c++] = SIP_PORT; |
1517 | 1517 | ||
1518 | for (i = 0; i < ports_c; i++) { | 1518 | for (i = 0; i < ports_c; i++) { |
1519 | memset(&sip[i], 0, sizeof(sip[i])); | 1519 | memset(&sip[i], 0, sizeof(sip[i])); |
1520 | 1520 | ||
1521 | sip[i][0].tuple.src.l3num = AF_INET; | 1521 | sip[i][0].tuple.src.l3num = AF_INET; |
1522 | sip[i][0].tuple.dst.protonum = IPPROTO_UDP; | 1522 | sip[i][0].tuple.dst.protonum = IPPROTO_UDP; |
1523 | sip[i][0].help = sip_help_udp; | 1523 | sip[i][0].help = sip_help_udp; |
1524 | sip[i][1].tuple.src.l3num = AF_INET; | 1524 | sip[i][1].tuple.src.l3num = AF_INET; |
1525 | sip[i][1].tuple.dst.protonum = IPPROTO_TCP; | 1525 | sip[i][1].tuple.dst.protonum = IPPROTO_TCP; |
1526 | sip[i][1].help = sip_help_tcp; | 1526 | sip[i][1].help = sip_help_tcp; |
1527 | 1527 | ||
1528 | sip[i][2].tuple.src.l3num = AF_INET6; | 1528 | sip[i][2].tuple.src.l3num = AF_INET6; |
1529 | sip[i][2].tuple.dst.protonum = IPPROTO_UDP; | 1529 | sip[i][2].tuple.dst.protonum = IPPROTO_UDP; |
1530 | sip[i][2].help = sip_help_udp; | 1530 | sip[i][2].help = sip_help_udp; |
1531 | sip[i][3].tuple.src.l3num = AF_INET6; | 1531 | sip[i][3].tuple.src.l3num = AF_INET6; |
1532 | sip[i][3].tuple.dst.protonum = IPPROTO_TCP; | 1532 | sip[i][3].tuple.dst.protonum = IPPROTO_TCP; |
1533 | sip[i][3].help = sip_help_tcp; | 1533 | sip[i][3].help = sip_help_tcp; |
1534 | 1534 | ||
1535 | for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { | 1535 | for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { |
1536 | sip[i][j].tuple.src.u.udp.port = htons(ports[i]); | 1536 | sip[i][j].tuple.src.u.udp.port = htons(ports[i]); |
1537 | sip[i][j].expect_policy = sip_exp_policy; | 1537 | sip[i][j].expect_policy = sip_exp_policy; |
1538 | sip[i][j].expect_class_max = SIP_EXPECT_MAX; | 1538 | sip[i][j].expect_class_max = SIP_EXPECT_MAX; |
1539 | sip[i][j].me = THIS_MODULE; | 1539 | sip[i][j].me = THIS_MODULE; |
1540 | 1540 | ||
1541 | tmpname = &sip_names[i][j][0]; | 1541 | tmpname = &sip_names[i][j][0]; |
1542 | if (ports[i] == SIP_PORT) | 1542 | if (ports[i] == SIP_PORT) |
1543 | sprintf(tmpname, "sip"); | 1543 | sprintf(tmpname, "sip"); |
1544 | else | 1544 | else |
1545 | sprintf(tmpname, "sip-%u", i); | 1545 | sprintf(tmpname, "sip-%u", i); |
1546 | sip[i][j].name = tmpname; | 1546 | sip[i][j].name = tmpname; |
1547 | 1547 | ||
1548 | pr_debug("port #%u: %u\n", i, ports[i]); | 1548 | pr_debug("port #%u: %u\n", i, ports[i]); |
1549 | 1549 | ||
1550 | ret = nf_conntrack_helper_register(&sip[i][j]); | 1550 | ret = nf_conntrack_helper_register(&sip[i][j]); |
1551 | if (ret) { | 1551 | if (ret) { |
1552 | printk("nf_ct_sip: failed to register helper " | 1552 | printk(KERN_ERR "nf_ct_sip: failed to register" |
1553 | "for pf: %u port: %u\n", | 1553 | " helper for pf: %u port: %u\n", |
1554 | sip[i][j].tuple.src.l3num, ports[i]); | 1554 | sip[i][j].tuple.src.l3num, ports[i]); |
1555 | nf_conntrack_sip_fini(); | 1555 | nf_conntrack_sip_fini(); |
1556 | return ret; | 1556 | return ret; |
1557 | } | 1557 | } |
1558 | } | 1558 | } |
1559 | } | 1559 | } |
1560 | return 0; | 1560 | return 0; |
1561 | } | 1561 | } |
1562 | 1562 | ||
1563 | module_init(nf_conntrack_sip_init); | 1563 | module_init(nf_conntrack_sip_init); |
1564 | module_exit(nf_conntrack_sip_fini); | 1564 | module_exit(nf_conntrack_sip_fini); |
1565 | 1565 |
net/netfilter/nf_conntrack_standalone.c
1 | /* (C) 1999-2001 Paul `Rusty' Russell | 1 | /* (C) 1999-2001 Paul `Rusty' Russell |
2 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> | 2 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/netfilter.h> | 10 | #include <linux/netfilter.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/proc_fs.h> | 14 | #include <linux/proc_fs.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <net/net_namespace.h> | 18 | #include <net/net_namespace.h> |
19 | #ifdef CONFIG_SYSCTL | 19 | #ifdef CONFIG_SYSCTL |
20 | #include <linux/sysctl.h> | 20 | #include <linux/sysctl.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
24 | #include <net/netfilter/nf_conntrack_core.h> | 24 | #include <net/netfilter/nf_conntrack_core.h> |
25 | #include <net/netfilter/nf_conntrack_l3proto.h> | 25 | #include <net/netfilter/nf_conntrack_l3proto.h> |
26 | #include <net/netfilter/nf_conntrack_l4proto.h> | 26 | #include <net/netfilter/nf_conntrack_l4proto.h> |
27 | #include <net/netfilter/nf_conntrack_expect.h> | 27 | #include <net/netfilter/nf_conntrack_expect.h> |
28 | #include <net/netfilter/nf_conntrack_helper.h> | 28 | #include <net/netfilter/nf_conntrack_helper.h> |
29 | #include <net/netfilter/nf_conntrack_acct.h> | 29 | #include <net/netfilter/nf_conntrack_acct.h> |
30 | #include <net/netfilter/nf_conntrack_zones.h> | 30 | #include <net/netfilter/nf_conntrack_zones.h> |
31 | 31 | ||
32 | MODULE_LICENSE("GPL"); | 32 | MODULE_LICENSE("GPL"); |
33 | 33 | ||
34 | #ifdef CONFIG_PROC_FS | 34 | #ifdef CONFIG_PROC_FS |
35 | int | 35 | int |
36 | print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, | 36 | print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, |
37 | const struct nf_conntrack_l3proto *l3proto, | 37 | const struct nf_conntrack_l3proto *l3proto, |
38 | const struct nf_conntrack_l4proto *l4proto) | 38 | const struct nf_conntrack_l4proto *l4proto) |
39 | { | 39 | { |
40 | return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple); | 40 | return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple); |
41 | } | 41 | } |
42 | EXPORT_SYMBOL_GPL(print_tuple); | 42 | EXPORT_SYMBOL_GPL(print_tuple); |
43 | 43 | ||
44 | struct ct_iter_state { | 44 | struct ct_iter_state { |
45 | struct seq_net_private p; | 45 | struct seq_net_private p; |
46 | unsigned int bucket; | 46 | unsigned int bucket; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) | 49 | static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) |
50 | { | 50 | { |
51 | struct net *net = seq_file_net(seq); | 51 | struct net *net = seq_file_net(seq); |
52 | struct ct_iter_state *st = seq->private; | 52 | struct ct_iter_state *st = seq->private; |
53 | struct hlist_nulls_node *n; | 53 | struct hlist_nulls_node *n; |
54 | 54 | ||
55 | for (st->bucket = 0; | 55 | for (st->bucket = 0; |
56 | st->bucket < net->ct.htable_size; | 56 | st->bucket < net->ct.htable_size; |
57 | st->bucket++) { | 57 | st->bucket++) { |
58 | n = rcu_dereference(net->ct.hash[st->bucket].first); | 58 | n = rcu_dereference(net->ct.hash[st->bucket].first); |
59 | if (!is_a_nulls(n)) | 59 | if (!is_a_nulls(n)) |
60 | return n; | 60 | return n; |
61 | } | 61 | } |
62 | return NULL; | 62 | return NULL; |
63 | } | 63 | } |
64 | 64 | ||
65 | static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, | 65 | static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, |
66 | struct hlist_nulls_node *head) | 66 | struct hlist_nulls_node *head) |
67 | { | 67 | { |
68 | struct net *net = seq_file_net(seq); | 68 | struct net *net = seq_file_net(seq); |
69 | struct ct_iter_state *st = seq->private; | 69 | struct ct_iter_state *st = seq->private; |
70 | 70 | ||
71 | head = rcu_dereference(head->next); | 71 | head = rcu_dereference(head->next); |
72 | while (is_a_nulls(head)) { | 72 | while (is_a_nulls(head)) { |
73 | if (likely(get_nulls_value(head) == st->bucket)) { | 73 | if (likely(get_nulls_value(head) == st->bucket)) { |
74 | if (++st->bucket >= net->ct.htable_size) | 74 | if (++st->bucket >= net->ct.htable_size) |
75 | return NULL; | 75 | return NULL; |
76 | } | 76 | } |
77 | head = rcu_dereference(net->ct.hash[st->bucket].first); | 77 | head = rcu_dereference(net->ct.hash[st->bucket].first); |
78 | } | 78 | } |
79 | return head; | 79 | return head; |
80 | } | 80 | } |
81 | 81 | ||
82 | static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos) | 82 | static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos) |
83 | { | 83 | { |
84 | struct hlist_nulls_node *head = ct_get_first(seq); | 84 | struct hlist_nulls_node *head = ct_get_first(seq); |
85 | 85 | ||
86 | if (head) | 86 | if (head) |
87 | while (pos && (head = ct_get_next(seq, head))) | 87 | while (pos && (head = ct_get_next(seq, head))) |
88 | pos--; | 88 | pos--; |
89 | return pos ? NULL : head; | 89 | return pos ? NULL : head; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void *ct_seq_start(struct seq_file *seq, loff_t *pos) | 92 | static void *ct_seq_start(struct seq_file *seq, loff_t *pos) |
93 | __acquires(RCU) | 93 | __acquires(RCU) |
94 | { | 94 | { |
95 | rcu_read_lock(); | 95 | rcu_read_lock(); |
96 | return ct_get_idx(seq, *pos); | 96 | return ct_get_idx(seq, *pos); |
97 | } | 97 | } |
98 | 98 | ||
99 | static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) | 99 | static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) |
100 | { | 100 | { |
101 | (*pos)++; | 101 | (*pos)++; |
102 | return ct_get_next(s, v); | 102 | return ct_get_next(s, v); |
103 | } | 103 | } |
104 | 104 | ||
105 | static void ct_seq_stop(struct seq_file *s, void *v) | 105 | static void ct_seq_stop(struct seq_file *s, void *v) |
106 | __releases(RCU) | 106 | __releases(RCU) |
107 | { | 107 | { |
108 | rcu_read_unlock(); | 108 | rcu_read_unlock(); |
109 | } | 109 | } |
110 | 110 | ||
111 | /* return 0 on success, 1 in case of error */ | 111 | /* return 0 on success, 1 in case of error */ |
112 | static int ct_seq_show(struct seq_file *s, void *v) | 112 | static int ct_seq_show(struct seq_file *s, void *v) |
113 | { | 113 | { |
114 | struct nf_conntrack_tuple_hash *hash = v; | 114 | struct nf_conntrack_tuple_hash *hash = v; |
115 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); | 115 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); |
116 | const struct nf_conntrack_l3proto *l3proto; | 116 | const struct nf_conntrack_l3proto *l3proto; |
117 | const struct nf_conntrack_l4proto *l4proto; | 117 | const struct nf_conntrack_l4proto *l4proto; |
118 | int ret = 0; | 118 | int ret = 0; |
119 | 119 | ||
120 | NF_CT_ASSERT(ct); | 120 | NF_CT_ASSERT(ct); |
121 | if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) | 121 | if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) |
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | /* we only want to print DIR_ORIGINAL */ | 124 | /* we only want to print DIR_ORIGINAL */ |
125 | if (NF_CT_DIRECTION(hash)) | 125 | if (NF_CT_DIRECTION(hash)) |
126 | goto release; | 126 | goto release; |
127 | 127 | ||
128 | l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); | 128 | l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); |
129 | NF_CT_ASSERT(l3proto); | 129 | NF_CT_ASSERT(l3proto); |
130 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | 130 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
131 | NF_CT_ASSERT(l4proto); | 131 | NF_CT_ASSERT(l4proto); |
132 | 132 | ||
133 | ret = -ENOSPC; | 133 | ret = -ENOSPC; |
134 | if (seq_printf(s, "%-8s %u %-8s %u %ld ", | 134 | if (seq_printf(s, "%-8s %u %-8s %u %ld ", |
135 | l3proto->name, nf_ct_l3num(ct), | 135 | l3proto->name, nf_ct_l3num(ct), |
136 | l4proto->name, nf_ct_protonum(ct), | 136 | l4proto->name, nf_ct_protonum(ct), |
137 | timer_pending(&ct->timeout) | 137 | timer_pending(&ct->timeout) |
138 | ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) | 138 | ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) |
139 | goto release; | 139 | goto release; |
140 | 140 | ||
141 | if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) | 141 | if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) |
142 | goto release; | 142 | goto release; |
143 | 143 | ||
144 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 144 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
145 | l3proto, l4proto)) | 145 | l3proto, l4proto)) |
146 | goto release; | 146 | goto release; |
147 | 147 | ||
148 | if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) | 148 | if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) |
149 | goto release; | 149 | goto release; |
150 | 150 | ||
151 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) | 151 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) |
152 | if (seq_printf(s, "[UNREPLIED] ")) | 152 | if (seq_printf(s, "[UNREPLIED] ")) |
153 | goto release; | 153 | goto release; |
154 | 154 | ||
155 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, | 155 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
156 | l3proto, l4proto)) | 156 | l3proto, l4proto)) |
157 | goto release; | 157 | goto release; |
158 | 158 | ||
159 | if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) | 159 | if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) |
160 | goto release; | 160 | goto release; |
161 | 161 | ||
162 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) | 162 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) |
163 | if (seq_printf(s, "[ASSURED] ")) | 163 | if (seq_printf(s, "[ASSURED] ")) |
164 | goto release; | 164 | goto release; |
165 | 165 | ||
166 | #if defined(CONFIG_NF_CONNTRACK_MARK) | 166 | #if defined(CONFIG_NF_CONNTRACK_MARK) |
167 | if (seq_printf(s, "mark=%u ", ct->mark)) | 167 | if (seq_printf(s, "mark=%u ", ct->mark)) |
168 | goto release; | 168 | goto release; |
169 | #endif | 169 | #endif |
170 | 170 | ||
171 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 171 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
172 | if (seq_printf(s, "secmark=%u ", ct->secmark)) | 172 | if (seq_printf(s, "secmark=%u ", ct->secmark)) |
173 | goto release; | 173 | goto release; |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 176 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
177 | if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) | 177 | if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) |
178 | goto release; | 178 | goto release; |
179 | #endif | 179 | #endif |
180 | 180 | ||
181 | if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) | 181 | if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) |
182 | goto release; | 182 | goto release; |
183 | 183 | ||
184 | ret = 0; | 184 | ret = 0; |
185 | release: | 185 | release: |
186 | nf_ct_put(ct); | 186 | nf_ct_put(ct); |
187 | return 0; | 187 | return 0; |
188 | } | 188 | } |
189 | 189 | ||
190 | static const struct seq_operations ct_seq_ops = { | 190 | static const struct seq_operations ct_seq_ops = { |
191 | .start = ct_seq_start, | 191 | .start = ct_seq_start, |
192 | .next = ct_seq_next, | 192 | .next = ct_seq_next, |
193 | .stop = ct_seq_stop, | 193 | .stop = ct_seq_stop, |
194 | .show = ct_seq_show | 194 | .show = ct_seq_show |
195 | }; | 195 | }; |
196 | 196 | ||
197 | static int ct_open(struct inode *inode, struct file *file) | 197 | static int ct_open(struct inode *inode, struct file *file) |
198 | { | 198 | { |
199 | return seq_open_net(inode, file, &ct_seq_ops, | 199 | return seq_open_net(inode, file, &ct_seq_ops, |
200 | sizeof(struct ct_iter_state)); | 200 | sizeof(struct ct_iter_state)); |
201 | } | 201 | } |
202 | 202 | ||
203 | static const struct file_operations ct_file_ops = { | 203 | static const struct file_operations ct_file_ops = { |
204 | .owner = THIS_MODULE, | 204 | .owner = THIS_MODULE, |
205 | .open = ct_open, | 205 | .open = ct_open, |
206 | .read = seq_read, | 206 | .read = seq_read, |
207 | .llseek = seq_lseek, | 207 | .llseek = seq_lseek, |
208 | .release = seq_release_net, | 208 | .release = seq_release_net, |
209 | }; | 209 | }; |
210 | 210 | ||
211 | static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) | 211 | static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) |
212 | { | 212 | { |
213 | struct net *net = seq_file_net(seq); | 213 | struct net *net = seq_file_net(seq); |
214 | int cpu; | 214 | int cpu; |
215 | 215 | ||
216 | if (*pos == 0) | 216 | if (*pos == 0) |
217 | return SEQ_START_TOKEN; | 217 | return SEQ_START_TOKEN; |
218 | 218 | ||
219 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { | 219 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
220 | if (!cpu_possible(cpu)) | 220 | if (!cpu_possible(cpu)) |
221 | continue; | 221 | continue; |
222 | *pos = cpu + 1; | 222 | *pos = cpu + 1; |
223 | return per_cpu_ptr(net->ct.stat, cpu); | 223 | return per_cpu_ptr(net->ct.stat, cpu); |
224 | } | 224 | } |
225 | 225 | ||
226 | return NULL; | 226 | return NULL; |
227 | } | 227 | } |
228 | 228 | ||
229 | static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 229 | static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
230 | { | 230 | { |
231 | struct net *net = seq_file_net(seq); | 231 | struct net *net = seq_file_net(seq); |
232 | int cpu; | 232 | int cpu; |
233 | 233 | ||
234 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { | 234 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
235 | if (!cpu_possible(cpu)) | 235 | if (!cpu_possible(cpu)) |
236 | continue; | 236 | continue; |
237 | *pos = cpu + 1; | 237 | *pos = cpu + 1; |
238 | return per_cpu_ptr(net->ct.stat, cpu); | 238 | return per_cpu_ptr(net->ct.stat, cpu); |
239 | } | 239 | } |
240 | 240 | ||
241 | return NULL; | 241 | return NULL; |
242 | } | 242 | } |
243 | 243 | ||
244 | static void ct_cpu_seq_stop(struct seq_file *seq, void *v) | 244 | static void ct_cpu_seq_stop(struct seq_file *seq, void *v) |
245 | { | 245 | { |
246 | } | 246 | } |
247 | 247 | ||
248 | static int ct_cpu_seq_show(struct seq_file *seq, void *v) | 248 | static int ct_cpu_seq_show(struct seq_file *seq, void *v) |
249 | { | 249 | { |
250 | struct net *net = seq_file_net(seq); | 250 | struct net *net = seq_file_net(seq); |
251 | unsigned int nr_conntracks = atomic_read(&net->ct.count); | 251 | unsigned int nr_conntracks = atomic_read(&net->ct.count); |
252 | const struct ip_conntrack_stat *st = v; | 252 | const struct ip_conntrack_stat *st = v; |
253 | 253 | ||
254 | if (v == SEQ_START_TOKEN) { | 254 | if (v == SEQ_START_TOKEN) { |
255 | seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n"); | 255 | seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n"); |
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " | 259 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " |
260 | "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 260 | "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
261 | nr_conntracks, | 261 | nr_conntracks, |
262 | st->searched, | 262 | st->searched, |
263 | st->found, | 263 | st->found, |
264 | st->new, | 264 | st->new, |
265 | st->invalid, | 265 | st->invalid, |
266 | st->ignore, | 266 | st->ignore, |
267 | st->delete, | 267 | st->delete, |
268 | st->delete_list, | 268 | st->delete_list, |
269 | st->insert, | 269 | st->insert, |
270 | st->insert_failed, | 270 | st->insert_failed, |
271 | st->drop, | 271 | st->drop, |
272 | st->early_drop, | 272 | st->early_drop, |
273 | st->error, | 273 | st->error, |
274 | 274 | ||
275 | st->expect_new, | 275 | st->expect_new, |
276 | st->expect_create, | 276 | st->expect_create, |
277 | st->expect_delete, | 277 | st->expect_delete, |
278 | st->search_restart | 278 | st->search_restart |
279 | ); | 279 | ); |
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
282 | 282 | ||
283 | static const struct seq_operations ct_cpu_seq_ops = { | 283 | static const struct seq_operations ct_cpu_seq_ops = { |
284 | .start = ct_cpu_seq_start, | 284 | .start = ct_cpu_seq_start, |
285 | .next = ct_cpu_seq_next, | 285 | .next = ct_cpu_seq_next, |
286 | .stop = ct_cpu_seq_stop, | 286 | .stop = ct_cpu_seq_stop, |
287 | .show = ct_cpu_seq_show, | 287 | .show = ct_cpu_seq_show, |
288 | }; | 288 | }; |
289 | 289 | ||
290 | static int ct_cpu_seq_open(struct inode *inode, struct file *file) | 290 | static int ct_cpu_seq_open(struct inode *inode, struct file *file) |
291 | { | 291 | { |
292 | return seq_open_net(inode, file, &ct_cpu_seq_ops, | 292 | return seq_open_net(inode, file, &ct_cpu_seq_ops, |
293 | sizeof(struct seq_net_private)); | 293 | sizeof(struct seq_net_private)); |
294 | } | 294 | } |
295 | 295 | ||
296 | static const struct file_operations ct_cpu_seq_fops = { | 296 | static const struct file_operations ct_cpu_seq_fops = { |
297 | .owner = THIS_MODULE, | 297 | .owner = THIS_MODULE, |
298 | .open = ct_cpu_seq_open, | 298 | .open = ct_cpu_seq_open, |
299 | .read = seq_read, | 299 | .read = seq_read, |
300 | .llseek = seq_lseek, | 300 | .llseek = seq_lseek, |
301 | .release = seq_release_net, | 301 | .release = seq_release_net, |
302 | }; | 302 | }; |
303 | 303 | ||
304 | static int nf_conntrack_standalone_init_proc(struct net *net) | 304 | static int nf_conntrack_standalone_init_proc(struct net *net) |
305 | { | 305 | { |
306 | struct proc_dir_entry *pde; | 306 | struct proc_dir_entry *pde; |
307 | 307 | ||
308 | pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops); | 308 | pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops); |
309 | if (!pde) | 309 | if (!pde) |
310 | goto out_nf_conntrack; | 310 | goto out_nf_conntrack; |
311 | 311 | ||
312 | pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat, | 312 | pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat, |
313 | &ct_cpu_seq_fops); | 313 | &ct_cpu_seq_fops); |
314 | if (!pde) | 314 | if (!pde) |
315 | goto out_stat_nf_conntrack; | 315 | goto out_stat_nf_conntrack; |
316 | return 0; | 316 | return 0; |
317 | 317 | ||
318 | out_stat_nf_conntrack: | 318 | out_stat_nf_conntrack: |
319 | proc_net_remove(net, "nf_conntrack"); | 319 | proc_net_remove(net, "nf_conntrack"); |
320 | out_nf_conntrack: | 320 | out_nf_conntrack: |
321 | return -ENOMEM; | 321 | return -ENOMEM; |
322 | } | 322 | } |
323 | 323 | ||
324 | static void nf_conntrack_standalone_fini_proc(struct net *net) | 324 | static void nf_conntrack_standalone_fini_proc(struct net *net) |
325 | { | 325 | { |
326 | remove_proc_entry("nf_conntrack", net->proc_net_stat); | 326 | remove_proc_entry("nf_conntrack", net->proc_net_stat); |
327 | proc_net_remove(net, "nf_conntrack"); | 327 | proc_net_remove(net, "nf_conntrack"); |
328 | } | 328 | } |
329 | #else | 329 | #else |
330 | static int nf_conntrack_standalone_init_proc(struct net *net) | 330 | static int nf_conntrack_standalone_init_proc(struct net *net) |
331 | { | 331 | { |
332 | return 0; | 332 | return 0; |
333 | } | 333 | } |
334 | 334 | ||
335 | static void nf_conntrack_standalone_fini_proc(struct net *net) | 335 | static void nf_conntrack_standalone_fini_proc(struct net *net) |
336 | { | 336 | { |
337 | } | 337 | } |
338 | #endif /* CONFIG_PROC_FS */ | 338 | #endif /* CONFIG_PROC_FS */ |
339 | 339 | ||
340 | /* Sysctl support */ | 340 | /* Sysctl support */ |
341 | 341 | ||
342 | #ifdef CONFIG_SYSCTL | 342 | #ifdef CONFIG_SYSCTL |
343 | /* Log invalid packets of a given protocol */ | 343 | /* Log invalid packets of a given protocol */ |
344 | static int log_invalid_proto_min = 0; | 344 | static int log_invalid_proto_min = 0; |
345 | static int log_invalid_proto_max = 255; | 345 | static int log_invalid_proto_max = 255; |
346 | 346 | ||
347 | static struct ctl_table_header *nf_ct_netfilter_header; | 347 | static struct ctl_table_header *nf_ct_netfilter_header; |
348 | 348 | ||
349 | static ctl_table nf_ct_sysctl_table[] = { | 349 | static ctl_table nf_ct_sysctl_table[] = { |
350 | { | 350 | { |
351 | .procname = "nf_conntrack_max", | 351 | .procname = "nf_conntrack_max", |
352 | .data = &nf_conntrack_max, | 352 | .data = &nf_conntrack_max, |
353 | .maxlen = sizeof(int), | 353 | .maxlen = sizeof(int), |
354 | .mode = 0644, | 354 | .mode = 0644, |
355 | .proc_handler = proc_dointvec, | 355 | .proc_handler = proc_dointvec, |
356 | }, | 356 | }, |
357 | { | 357 | { |
358 | .procname = "nf_conntrack_count", | 358 | .procname = "nf_conntrack_count", |
359 | .data = &init_net.ct.count, | 359 | .data = &init_net.ct.count, |
360 | .maxlen = sizeof(int), | 360 | .maxlen = sizeof(int), |
361 | .mode = 0444, | 361 | .mode = 0444, |
362 | .proc_handler = proc_dointvec, | 362 | .proc_handler = proc_dointvec, |
363 | }, | 363 | }, |
364 | { | 364 | { |
365 | .procname = "nf_conntrack_buckets", | 365 | .procname = "nf_conntrack_buckets", |
366 | .data = &init_net.ct.htable_size, | 366 | .data = &init_net.ct.htable_size, |
367 | .maxlen = sizeof(unsigned int), | 367 | .maxlen = sizeof(unsigned int), |
368 | .mode = 0444, | 368 | .mode = 0444, |
369 | .proc_handler = proc_dointvec, | 369 | .proc_handler = proc_dointvec, |
370 | }, | 370 | }, |
371 | { | 371 | { |
372 | .procname = "nf_conntrack_checksum", | 372 | .procname = "nf_conntrack_checksum", |
373 | .data = &init_net.ct.sysctl_checksum, | 373 | .data = &init_net.ct.sysctl_checksum, |
374 | .maxlen = sizeof(unsigned int), | 374 | .maxlen = sizeof(unsigned int), |
375 | .mode = 0644, | 375 | .mode = 0644, |
376 | .proc_handler = proc_dointvec, | 376 | .proc_handler = proc_dointvec, |
377 | }, | 377 | }, |
378 | { | 378 | { |
379 | .procname = "nf_conntrack_log_invalid", | 379 | .procname = "nf_conntrack_log_invalid", |
380 | .data = &init_net.ct.sysctl_log_invalid, | 380 | .data = &init_net.ct.sysctl_log_invalid, |
381 | .maxlen = sizeof(unsigned int), | 381 | .maxlen = sizeof(unsigned int), |
382 | .mode = 0644, | 382 | .mode = 0644, |
383 | .proc_handler = proc_dointvec_minmax, | 383 | .proc_handler = proc_dointvec_minmax, |
384 | .extra1 = &log_invalid_proto_min, | 384 | .extra1 = &log_invalid_proto_min, |
385 | .extra2 = &log_invalid_proto_max, | 385 | .extra2 = &log_invalid_proto_max, |
386 | }, | 386 | }, |
387 | { | 387 | { |
388 | .procname = "nf_conntrack_expect_max", | 388 | .procname = "nf_conntrack_expect_max", |
389 | .data = &nf_ct_expect_max, | 389 | .data = &nf_ct_expect_max, |
390 | .maxlen = sizeof(int), | 390 | .maxlen = sizeof(int), |
391 | .mode = 0644, | 391 | .mode = 0644, |
392 | .proc_handler = proc_dointvec, | 392 | .proc_handler = proc_dointvec, |
393 | }, | 393 | }, |
394 | { } | 394 | { } |
395 | }; | 395 | }; |
396 | 396 | ||
397 | #define NET_NF_CONNTRACK_MAX 2089 | 397 | #define NET_NF_CONNTRACK_MAX 2089 |
398 | 398 | ||
399 | static ctl_table nf_ct_netfilter_table[] = { | 399 | static ctl_table nf_ct_netfilter_table[] = { |
400 | { | 400 | { |
401 | .procname = "nf_conntrack_max", | 401 | .procname = "nf_conntrack_max", |
402 | .data = &nf_conntrack_max, | 402 | .data = &nf_conntrack_max, |
403 | .maxlen = sizeof(int), | 403 | .maxlen = sizeof(int), |
404 | .mode = 0644, | 404 | .mode = 0644, |
405 | .proc_handler = proc_dointvec, | 405 | .proc_handler = proc_dointvec, |
406 | }, | 406 | }, |
407 | { } | 407 | { } |
408 | }; | 408 | }; |
409 | 409 | ||
410 | static struct ctl_path nf_ct_path[] = { | 410 | static struct ctl_path nf_ct_path[] = { |
411 | { .procname = "net", }, | 411 | { .procname = "net", }, |
412 | { } | 412 | { } |
413 | }; | 413 | }; |
414 | 414 | ||
415 | static int nf_conntrack_standalone_init_sysctl(struct net *net) | 415 | static int nf_conntrack_standalone_init_sysctl(struct net *net) |
416 | { | 416 | { |
417 | struct ctl_table *table; | 417 | struct ctl_table *table; |
418 | 418 | ||
419 | if (net_eq(net, &init_net)) { | 419 | if (net_eq(net, &init_net)) { |
420 | nf_ct_netfilter_header = | 420 | nf_ct_netfilter_header = |
421 | register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table); | 421 | register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table); |
422 | if (!nf_ct_netfilter_header) | 422 | if (!nf_ct_netfilter_header) |
423 | goto out; | 423 | goto out; |
424 | } | 424 | } |
425 | 425 | ||
426 | table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table), | 426 | table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table), |
427 | GFP_KERNEL); | 427 | GFP_KERNEL); |
428 | if (!table) | 428 | if (!table) |
429 | goto out_kmemdup; | 429 | goto out_kmemdup; |
430 | 430 | ||
431 | table[1].data = &net->ct.count; | 431 | table[1].data = &net->ct.count; |
432 | table[2].data = &net->ct.htable_size; | 432 | table[2].data = &net->ct.htable_size; |
433 | table[3].data = &net->ct.sysctl_checksum; | 433 | table[3].data = &net->ct.sysctl_checksum; |
434 | table[4].data = &net->ct.sysctl_log_invalid; | 434 | table[4].data = &net->ct.sysctl_log_invalid; |
435 | 435 | ||
436 | net->ct.sysctl_header = register_net_sysctl_table(net, | 436 | net->ct.sysctl_header = register_net_sysctl_table(net, |
437 | nf_net_netfilter_sysctl_path, table); | 437 | nf_net_netfilter_sysctl_path, table); |
438 | if (!net->ct.sysctl_header) | 438 | if (!net->ct.sysctl_header) |
439 | goto out_unregister_netfilter; | 439 | goto out_unregister_netfilter; |
440 | 440 | ||
441 | return 0; | 441 | return 0; |
442 | 442 | ||
443 | out_unregister_netfilter: | 443 | out_unregister_netfilter: |
444 | kfree(table); | 444 | kfree(table); |
445 | out_kmemdup: | 445 | out_kmemdup: |
446 | if (net_eq(net, &init_net)) | 446 | if (net_eq(net, &init_net)) |
447 | unregister_sysctl_table(nf_ct_netfilter_header); | 447 | unregister_sysctl_table(nf_ct_netfilter_header); |
448 | out: | 448 | out: |
449 | printk("nf_conntrack: can't register to sysctl.\n"); | 449 | printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n"); |
450 | return -ENOMEM; | 450 | return -ENOMEM; |
451 | } | 451 | } |
452 | 452 | ||
453 | static void nf_conntrack_standalone_fini_sysctl(struct net *net) | 453 | static void nf_conntrack_standalone_fini_sysctl(struct net *net) |
454 | { | 454 | { |
455 | struct ctl_table *table; | 455 | struct ctl_table *table; |
456 | 456 | ||
457 | if (net_eq(net, &init_net)) | 457 | if (net_eq(net, &init_net)) |
458 | unregister_sysctl_table(nf_ct_netfilter_header); | 458 | unregister_sysctl_table(nf_ct_netfilter_header); |
459 | table = net->ct.sysctl_header->ctl_table_arg; | 459 | table = net->ct.sysctl_header->ctl_table_arg; |
460 | unregister_net_sysctl_table(net->ct.sysctl_header); | 460 | unregister_net_sysctl_table(net->ct.sysctl_header); |
461 | kfree(table); | 461 | kfree(table); |
462 | } | 462 | } |
463 | #else | 463 | #else |
464 | static int nf_conntrack_standalone_init_sysctl(struct net *net) | 464 | static int nf_conntrack_standalone_init_sysctl(struct net *net) |
465 | { | 465 | { |
466 | return 0; | 466 | return 0; |
467 | } | 467 | } |
468 | 468 | ||
469 | static void nf_conntrack_standalone_fini_sysctl(struct net *net) | 469 | static void nf_conntrack_standalone_fini_sysctl(struct net *net) |
470 | { | 470 | { |
471 | } | 471 | } |
472 | #endif /* CONFIG_SYSCTL */ | 472 | #endif /* CONFIG_SYSCTL */ |
473 | 473 | ||
474 | static int nf_conntrack_net_init(struct net *net) | 474 | static int nf_conntrack_net_init(struct net *net) |
475 | { | 475 | { |
476 | int ret; | 476 | int ret; |
477 | 477 | ||
478 | ret = nf_conntrack_init(net); | 478 | ret = nf_conntrack_init(net); |
479 | if (ret < 0) | 479 | if (ret < 0) |
480 | goto out_init; | 480 | goto out_init; |
481 | ret = nf_conntrack_standalone_init_proc(net); | 481 | ret = nf_conntrack_standalone_init_proc(net); |
482 | if (ret < 0) | 482 | if (ret < 0) |
483 | goto out_proc; | 483 | goto out_proc; |
484 | net->ct.sysctl_checksum = 1; | 484 | net->ct.sysctl_checksum = 1; |
485 | net->ct.sysctl_log_invalid = 0; | 485 | net->ct.sysctl_log_invalid = 0; |
486 | ret = nf_conntrack_standalone_init_sysctl(net); | 486 | ret = nf_conntrack_standalone_init_sysctl(net); |
487 | if (ret < 0) | 487 | if (ret < 0) |
488 | goto out_sysctl; | 488 | goto out_sysctl; |
489 | return 0; | 489 | return 0; |
490 | 490 | ||
491 | out_sysctl: | 491 | out_sysctl: |
492 | nf_conntrack_standalone_fini_proc(net); | 492 | nf_conntrack_standalone_fini_proc(net); |
493 | out_proc: | 493 | out_proc: |
494 | nf_conntrack_cleanup(net); | 494 | nf_conntrack_cleanup(net); |
495 | out_init: | 495 | out_init: |
496 | return ret; | 496 | return ret; |
497 | } | 497 | } |
498 | 498 | ||
499 | static void nf_conntrack_net_exit(struct net *net) | 499 | static void nf_conntrack_net_exit(struct net *net) |
500 | { | 500 | { |
501 | nf_conntrack_standalone_fini_sysctl(net); | 501 | nf_conntrack_standalone_fini_sysctl(net); |
502 | nf_conntrack_standalone_fini_proc(net); | 502 | nf_conntrack_standalone_fini_proc(net); |
503 | nf_conntrack_cleanup(net); | 503 | nf_conntrack_cleanup(net); |
504 | } | 504 | } |
505 | 505 | ||
506 | static struct pernet_operations nf_conntrack_net_ops = { | 506 | static struct pernet_operations nf_conntrack_net_ops = { |
507 | .init = nf_conntrack_net_init, | 507 | .init = nf_conntrack_net_init, |
508 | .exit = nf_conntrack_net_exit, | 508 | .exit = nf_conntrack_net_exit, |
509 | }; | 509 | }; |
510 | 510 | ||
511 | static int __init nf_conntrack_standalone_init(void) | 511 | static int __init nf_conntrack_standalone_init(void) |
512 | { | 512 | { |
513 | return register_pernet_subsys(&nf_conntrack_net_ops); | 513 | return register_pernet_subsys(&nf_conntrack_net_ops); |
514 | } | 514 | } |
515 | 515 | ||
516 | static void __exit nf_conntrack_standalone_fini(void) | 516 | static void __exit nf_conntrack_standalone_fini(void) |
517 | { | 517 | { |
518 | unregister_pernet_subsys(&nf_conntrack_net_ops); | 518 | unregister_pernet_subsys(&nf_conntrack_net_ops); |
519 | } | 519 | } |
520 | 520 | ||
521 | module_init(nf_conntrack_standalone_init); | 521 | module_init(nf_conntrack_standalone_init); |
522 | module_exit(nf_conntrack_standalone_fini); | 522 | module_exit(nf_conntrack_standalone_fini); |
523 | 523 | ||
524 | /* Some modules need us, but don't depend directly on any symbol. | 524 | /* Some modules need us, but don't depend directly on any symbol. |
525 | They should call this. */ | 525 | They should call this. */ |
526 | void need_conntrack(void) | 526 | void need_conntrack(void) |
527 | { | 527 | { |
528 | } | 528 | } |
529 | EXPORT_SYMBOL_GPL(need_conntrack); | 529 | EXPORT_SYMBOL_GPL(need_conntrack); |
530 | 530 |
net/netfilter/nf_conntrack_tftp.c
1 | /* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu> | 1 | /* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu> |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 as | 4 | * it under the terms of the GNU General Public License version 2 as |
5 | * published by the Free Software Foundation. | 5 | * published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/moduleparam.h> | 9 | #include <linux/moduleparam.h> |
10 | #include <linux/in.h> | 10 | #include <linux/in.h> |
11 | #include <linux/udp.h> | 11 | #include <linux/udp.h> |
12 | #include <linux/netfilter.h> | 12 | #include <linux/netfilter.h> |
13 | 13 | ||
14 | #include <net/netfilter/nf_conntrack.h> | 14 | #include <net/netfilter/nf_conntrack.h> |
15 | #include <net/netfilter/nf_conntrack_tuple.h> | 15 | #include <net/netfilter/nf_conntrack_tuple.h> |
16 | #include <net/netfilter/nf_conntrack_expect.h> | 16 | #include <net/netfilter/nf_conntrack_expect.h> |
17 | #include <net/netfilter/nf_conntrack_ecache.h> | 17 | #include <net/netfilter/nf_conntrack_ecache.h> |
18 | #include <net/netfilter/nf_conntrack_helper.h> | 18 | #include <net/netfilter/nf_conntrack_helper.h> |
19 | #include <linux/netfilter/nf_conntrack_tftp.h> | 19 | #include <linux/netfilter/nf_conntrack_tftp.h> |
20 | 20 | ||
21 | MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); | 21 | MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); |
22 | MODULE_DESCRIPTION("TFTP connection tracking helper"); | 22 | MODULE_DESCRIPTION("TFTP connection tracking helper"); |
23 | MODULE_LICENSE("GPL"); | 23 | MODULE_LICENSE("GPL"); |
24 | MODULE_ALIAS("ip_conntrack_tftp"); | 24 | MODULE_ALIAS("ip_conntrack_tftp"); |
25 | MODULE_ALIAS_NFCT_HELPER("tftp"); | 25 | MODULE_ALIAS_NFCT_HELPER("tftp"); |
26 | 26 | ||
27 | #define MAX_PORTS 8 | 27 | #define MAX_PORTS 8 |
28 | static unsigned short ports[MAX_PORTS]; | 28 | static unsigned short ports[MAX_PORTS]; |
29 | static unsigned int ports_c; | 29 | static unsigned int ports_c; |
30 | module_param_array(ports, ushort, &ports_c, 0400); | 30 | module_param_array(ports, ushort, &ports_c, 0400); |
31 | MODULE_PARM_DESC(ports, "Port numbers of TFTP servers"); | 31 | MODULE_PARM_DESC(ports, "Port numbers of TFTP servers"); |
32 | 32 | ||
33 | unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb, | 33 | unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb, |
34 | enum ip_conntrack_info ctinfo, | 34 | enum ip_conntrack_info ctinfo, |
35 | struct nf_conntrack_expect *exp) __read_mostly; | 35 | struct nf_conntrack_expect *exp) __read_mostly; |
36 | EXPORT_SYMBOL_GPL(nf_nat_tftp_hook); | 36 | EXPORT_SYMBOL_GPL(nf_nat_tftp_hook); |
37 | 37 | ||
38 | static int tftp_help(struct sk_buff *skb, | 38 | static int tftp_help(struct sk_buff *skb, |
39 | unsigned int protoff, | 39 | unsigned int protoff, |
40 | struct nf_conn *ct, | 40 | struct nf_conn *ct, |
41 | enum ip_conntrack_info ctinfo) | 41 | enum ip_conntrack_info ctinfo) |
42 | { | 42 | { |
43 | const struct tftphdr *tfh; | 43 | const struct tftphdr *tfh; |
44 | struct tftphdr _tftph; | 44 | struct tftphdr _tftph; |
45 | struct nf_conntrack_expect *exp; | 45 | struct nf_conntrack_expect *exp; |
46 | struct nf_conntrack_tuple *tuple; | 46 | struct nf_conntrack_tuple *tuple; |
47 | unsigned int ret = NF_ACCEPT; | 47 | unsigned int ret = NF_ACCEPT; |
48 | typeof(nf_nat_tftp_hook) nf_nat_tftp; | 48 | typeof(nf_nat_tftp_hook) nf_nat_tftp; |
49 | 49 | ||
50 | tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr), | 50 | tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr), |
51 | sizeof(_tftph), &_tftph); | 51 | sizeof(_tftph), &_tftph); |
52 | if (tfh == NULL) | 52 | if (tfh == NULL) |
53 | return NF_ACCEPT; | 53 | return NF_ACCEPT; |
54 | 54 | ||
55 | switch (ntohs(tfh->opcode)) { | 55 | switch (ntohs(tfh->opcode)) { |
56 | case TFTP_OPCODE_READ: | 56 | case TFTP_OPCODE_READ: |
57 | case TFTP_OPCODE_WRITE: | 57 | case TFTP_OPCODE_WRITE: |
58 | /* RRQ and WRQ works the same way */ | 58 | /* RRQ and WRQ works the same way */ |
59 | nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 59 | nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
60 | nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); | 60 | nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
61 | 61 | ||
62 | exp = nf_ct_expect_alloc(ct); | 62 | exp = nf_ct_expect_alloc(ct); |
63 | if (exp == NULL) | 63 | if (exp == NULL) |
64 | return NF_DROP; | 64 | return NF_DROP; |
65 | tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 65 | tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
66 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, | 66 | nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, |
67 | nf_ct_l3num(ct), | 67 | nf_ct_l3num(ct), |
68 | &tuple->src.u3, &tuple->dst.u3, | 68 | &tuple->src.u3, &tuple->dst.u3, |
69 | IPPROTO_UDP, NULL, &tuple->dst.u.udp.port); | 69 | IPPROTO_UDP, NULL, &tuple->dst.u.udp.port); |
70 | 70 | ||
71 | pr_debug("expect: "); | 71 | pr_debug("expect: "); |
72 | nf_ct_dump_tuple(&exp->tuple); | 72 | nf_ct_dump_tuple(&exp->tuple); |
73 | 73 | ||
74 | nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); | 74 | nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); |
75 | if (nf_nat_tftp && ct->status & IPS_NAT_MASK) | 75 | if (nf_nat_tftp && ct->status & IPS_NAT_MASK) |
76 | ret = nf_nat_tftp(skb, ctinfo, exp); | 76 | ret = nf_nat_tftp(skb, ctinfo, exp); |
77 | else if (nf_ct_expect_related(exp) != 0) | 77 | else if (nf_ct_expect_related(exp) != 0) |
78 | ret = NF_DROP; | 78 | ret = NF_DROP; |
79 | nf_ct_expect_put(exp); | 79 | nf_ct_expect_put(exp); |
80 | break; | 80 | break; |
81 | case TFTP_OPCODE_DATA: | 81 | case TFTP_OPCODE_DATA: |
82 | case TFTP_OPCODE_ACK: | 82 | case TFTP_OPCODE_ACK: |
83 | pr_debug("Data/ACK opcode\n"); | 83 | pr_debug("Data/ACK opcode\n"); |
84 | break; | 84 | break; |
85 | case TFTP_OPCODE_ERROR: | 85 | case TFTP_OPCODE_ERROR: |
86 | pr_debug("Error opcode\n"); | 86 | pr_debug("Error opcode\n"); |
87 | break; | 87 | break; |
88 | default: | 88 | default: |
89 | pr_debug("Unknown opcode\n"); | 89 | pr_debug("Unknown opcode\n"); |
90 | } | 90 | } |
91 | return ret; | 91 | return ret; |
92 | } | 92 | } |
93 | 93 | ||
94 | static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly; | 94 | static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly; |
95 | static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly; | 95 | static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly; |
96 | 96 | ||
97 | static const struct nf_conntrack_expect_policy tftp_exp_policy = { | 97 | static const struct nf_conntrack_expect_policy tftp_exp_policy = { |
98 | .max_expected = 1, | 98 | .max_expected = 1, |
99 | .timeout = 5 * 60, | 99 | .timeout = 5 * 60, |
100 | }; | 100 | }; |
101 | 101 | ||
102 | static void nf_conntrack_tftp_fini(void) | 102 | static void nf_conntrack_tftp_fini(void) |
103 | { | 103 | { |
104 | int i, j; | 104 | int i, j; |
105 | 105 | ||
106 | for (i = 0; i < ports_c; i++) { | 106 | for (i = 0; i < ports_c; i++) { |
107 | for (j = 0; j < 2; j++) | 107 | for (j = 0; j < 2; j++) |
108 | nf_conntrack_helper_unregister(&tftp[i][j]); | 108 | nf_conntrack_helper_unregister(&tftp[i][j]); |
109 | } | 109 | } |
110 | } | 110 | } |
111 | 111 | ||
112 | static int __init nf_conntrack_tftp_init(void) | 112 | static int __init nf_conntrack_tftp_init(void) |
113 | { | 113 | { |
114 | int i, j, ret; | 114 | int i, j, ret; |
115 | char *tmpname; | 115 | char *tmpname; |
116 | 116 | ||
117 | if (ports_c == 0) | 117 | if (ports_c == 0) |
118 | ports[ports_c++] = TFTP_PORT; | 118 | ports[ports_c++] = TFTP_PORT; |
119 | 119 | ||
120 | for (i = 0; i < ports_c; i++) { | 120 | for (i = 0; i < ports_c; i++) { |
121 | memset(&tftp[i], 0, sizeof(tftp[i])); | 121 | memset(&tftp[i], 0, sizeof(tftp[i])); |
122 | 122 | ||
123 | tftp[i][0].tuple.src.l3num = AF_INET; | 123 | tftp[i][0].tuple.src.l3num = AF_INET; |
124 | tftp[i][1].tuple.src.l3num = AF_INET6; | 124 | tftp[i][1].tuple.src.l3num = AF_INET6; |
125 | for (j = 0; j < 2; j++) { | 125 | for (j = 0; j < 2; j++) { |
126 | tftp[i][j].tuple.dst.protonum = IPPROTO_UDP; | 126 | tftp[i][j].tuple.dst.protonum = IPPROTO_UDP; |
127 | tftp[i][j].tuple.src.u.udp.port = htons(ports[i]); | 127 | tftp[i][j].tuple.src.u.udp.port = htons(ports[i]); |
128 | tftp[i][j].expect_policy = &tftp_exp_policy; | 128 | tftp[i][j].expect_policy = &tftp_exp_policy; |
129 | tftp[i][j].me = THIS_MODULE; | 129 | tftp[i][j].me = THIS_MODULE; |
130 | tftp[i][j].help = tftp_help; | 130 | tftp[i][j].help = tftp_help; |
131 | 131 | ||
132 | tmpname = &tftp_names[i][j][0]; | 132 | tmpname = &tftp_names[i][j][0]; |
133 | if (ports[i] == TFTP_PORT) | 133 | if (ports[i] == TFTP_PORT) |
134 | sprintf(tmpname, "tftp"); | 134 | sprintf(tmpname, "tftp"); |
135 | else | 135 | else |
136 | sprintf(tmpname, "tftp-%u", i); | 136 | sprintf(tmpname, "tftp-%u", i); |
137 | tftp[i][j].name = tmpname; | 137 | tftp[i][j].name = tmpname; |
138 | 138 | ||
139 | ret = nf_conntrack_helper_register(&tftp[i][j]); | 139 | ret = nf_conntrack_helper_register(&tftp[i][j]); |
140 | if (ret) { | 140 | if (ret) { |
141 | printk("nf_ct_tftp: failed to register helper " | 141 | printk(KERN_ERR "nf_ct_tftp: failed to register" |
142 | "for pf: %u port: %u\n", | 142 | " helper for pf: %u port: %u\n", |
143 | tftp[i][j].tuple.src.l3num, ports[i]); | 143 | tftp[i][j].tuple.src.l3num, ports[i]); |
144 | nf_conntrack_tftp_fini(); | 144 | nf_conntrack_tftp_fini(); |
145 | return ret; | 145 | return ret; |
146 | } | 146 | } |
147 | } | 147 | } |
148 | } | 148 | } |
149 | return 0; | 149 | return 0; |
150 | } | 150 | } |
151 | 151 | ||
152 | module_init(nf_conntrack_tftp_init); | 152 | module_init(nf_conntrack_tftp_init); |
153 | module_exit(nf_conntrack_tftp_fini); | 153 | module_exit(nf_conntrack_tftp_fini); |
154 | 154 |
net/netfilter/nf_internals.h
1 | #ifndef _NF_INTERNALS_H | 1 | #ifndef _NF_INTERNALS_H |
2 | #define _NF_INTERNALS_H | 2 | #define _NF_INTERNALS_H |
3 | 3 | ||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/skbuff.h> | 5 | #include <linux/skbuff.h> |
6 | #include <linux/netdevice.h> | 6 | #include <linux/netdevice.h> |
7 | 7 | ||
8 | #ifdef CONFIG_NETFILTER_DEBUG | 8 | #ifdef CONFIG_NETFILTER_DEBUG |
9 | #define NFDEBUG(format, args...) printk(format , ## args) | 9 | #define NFDEBUG(format, args...) printk(KERN_DEBUG format , ## args) |
10 | #else | 10 | #else |
11 | #define NFDEBUG(format, args...) | 11 | #define NFDEBUG(format, args...) |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | 14 | ||
15 | /* core.c */ | 15 | /* core.c */ |
16 | extern unsigned int nf_iterate(struct list_head *head, | 16 | extern unsigned int nf_iterate(struct list_head *head, |
17 | struct sk_buff *skb, | 17 | struct sk_buff *skb, |
18 | unsigned int hook, | 18 | unsigned int hook, |
19 | const struct net_device *indev, | 19 | const struct net_device *indev, |
20 | const struct net_device *outdev, | 20 | const struct net_device *outdev, |
21 | struct list_head **i, | 21 | struct list_head **i, |
22 | int (*okfn)(struct sk_buff *), | 22 | int (*okfn)(struct sk_buff *), |
23 | int hook_thresh); | 23 | int hook_thresh); |
24 | 24 | ||
25 | /* nf_queue.c */ | 25 | /* nf_queue.c */ |
26 | extern int nf_queue(struct sk_buff *skb, | 26 | extern int nf_queue(struct sk_buff *skb, |
27 | struct list_head *elem, | 27 | struct list_head *elem, |
28 | u_int8_t pf, unsigned int hook, | 28 | u_int8_t pf, unsigned int hook, |
29 | struct net_device *indev, | 29 | struct net_device *indev, |
30 | struct net_device *outdev, | 30 | struct net_device *outdev, |
31 | int (*okfn)(struct sk_buff *), | 31 | int (*okfn)(struct sk_buff *), |
32 | unsigned int queuenum); | 32 | unsigned int queuenum); |
33 | extern int __init netfilter_queue_init(void); | 33 | extern int __init netfilter_queue_init(void); |
34 | 34 | ||
35 | /* nf_log.c */ | 35 | /* nf_log.c */ |
36 | extern int __init netfilter_log_init(void); | 36 | extern int __init netfilter_log_init(void); |
37 | 37 | ||
38 | #endif | 38 | #endif |
39 | 39 |
net/netfilter/nfnetlink.c
1 | /* Netfilter messages via netlink socket. Allows for user space | 1 | /* Netfilter messages via netlink socket. Allows for user space |
2 | * protocol helpers and general trouble making from userspace. | 2 | * protocol helpers and general trouble making from userspace. |
3 | * | 3 | * |
4 | * (C) 2001 by Jay Schulist <jschlst@samba.org>, | 4 | * (C) 2001 by Jay Schulist <jschlst@samba.org>, |
5 | * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> | 5 | * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> |
6 | * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org> | 6 | * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org> |
7 | * | 7 | * |
8 | * Initial netfilter messages via netlink development funded and | 8 | * Initial netfilter messages via netlink development funded and |
9 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) | 9 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) |
10 | * | 10 | * |
11 | * Further development of this code funded by Astaro AG (http://www.astaro.com) | 11 | * Further development of this code funded by Astaro AG (http://www.astaro.com) |
12 | * | 12 | * |
13 | * This software may be used and distributed according to the terms | 13 | * This software may be used and distributed according to the terms |
14 | * of the GNU General Public License, incorporated herein by reference. | 14 | * of the GNU General Public License, incorporated herein by reference. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/socket.h> | 19 | #include <linux/socket.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/sockios.h> | 22 | #include <linux/sockios.h> |
23 | #include <linux/net.h> | 23 | #include <linux/net.h> |
24 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <asm/system.h> | 26 | #include <asm/system.h> |
27 | #include <net/sock.h> | 27 | #include <net/sock.h> |
28 | #include <net/netlink.h> | 28 | #include <net/netlink.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | 30 | ||
31 | #include <linux/netlink.h> | 31 | #include <linux/netlink.h> |
32 | #include <linux/netfilter/nfnetlink.h> | 32 | #include <linux/netfilter/nfnetlink.h> |
33 | 33 | ||
34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
35 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 35 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
36 | MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); | 36 | MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); |
37 | 37 | ||
38 | static char __initdata nfversion[] = "0.30"; | 38 | static char __initdata nfversion[] = "0.30"; |
39 | 39 | ||
40 | static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; | 40 | static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; |
41 | static DEFINE_MUTEX(nfnl_mutex); | 41 | static DEFINE_MUTEX(nfnl_mutex); |
42 | 42 | ||
43 | void nfnl_lock(void) | 43 | void nfnl_lock(void) |
44 | { | 44 | { |
45 | mutex_lock(&nfnl_mutex); | 45 | mutex_lock(&nfnl_mutex); |
46 | } | 46 | } |
47 | EXPORT_SYMBOL_GPL(nfnl_lock); | 47 | EXPORT_SYMBOL_GPL(nfnl_lock); |
48 | 48 | ||
49 | void nfnl_unlock(void) | 49 | void nfnl_unlock(void) |
50 | { | 50 | { |
51 | mutex_unlock(&nfnl_mutex); | 51 | mutex_unlock(&nfnl_mutex); |
52 | } | 52 | } |
53 | EXPORT_SYMBOL_GPL(nfnl_unlock); | 53 | EXPORT_SYMBOL_GPL(nfnl_unlock); |
54 | 54 | ||
55 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) | 55 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) |
56 | { | 56 | { |
57 | nfnl_lock(); | 57 | nfnl_lock(); |
58 | if (subsys_table[n->subsys_id]) { | 58 | if (subsys_table[n->subsys_id]) { |
59 | nfnl_unlock(); | 59 | nfnl_unlock(); |
60 | return -EBUSY; | 60 | return -EBUSY; |
61 | } | 61 | } |
62 | subsys_table[n->subsys_id] = n; | 62 | subsys_table[n->subsys_id] = n; |
63 | nfnl_unlock(); | 63 | nfnl_unlock(); |
64 | 64 | ||
65 | return 0; | 65 | return 0; |
66 | } | 66 | } |
67 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); | 67 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); |
68 | 68 | ||
69 | int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) | 69 | int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) |
70 | { | 70 | { |
71 | nfnl_lock(); | 71 | nfnl_lock(); |
72 | subsys_table[n->subsys_id] = NULL; | 72 | subsys_table[n->subsys_id] = NULL; |
73 | nfnl_unlock(); | 73 | nfnl_unlock(); |
74 | 74 | ||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); | 77 | EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); |
78 | 78 | ||
79 | static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) | 79 | static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) |
80 | { | 80 | { |
81 | u_int8_t subsys_id = NFNL_SUBSYS_ID(type); | 81 | u_int8_t subsys_id = NFNL_SUBSYS_ID(type); |
82 | 82 | ||
83 | if (subsys_id >= NFNL_SUBSYS_COUNT) | 83 | if (subsys_id >= NFNL_SUBSYS_COUNT) |
84 | return NULL; | 84 | return NULL; |
85 | 85 | ||
86 | return subsys_table[subsys_id]; | 86 | return subsys_table[subsys_id]; |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline const struct nfnl_callback * | 89 | static inline const struct nfnl_callback * |
90 | nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) | 90 | nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) |
91 | { | 91 | { |
92 | u_int8_t cb_id = NFNL_MSG_TYPE(type); | 92 | u_int8_t cb_id = NFNL_MSG_TYPE(type); |
93 | 93 | ||
94 | if (cb_id >= ss->cb_count) | 94 | if (cb_id >= ss->cb_count) |
95 | return NULL; | 95 | return NULL; |
96 | 96 | ||
97 | return &ss->cb[cb_id]; | 97 | return &ss->cb[cb_id]; |
98 | } | 98 | } |
99 | 99 | ||
100 | int nfnetlink_has_listeners(struct net *net, unsigned int group) | 100 | int nfnetlink_has_listeners(struct net *net, unsigned int group) |
101 | { | 101 | { |
102 | return netlink_has_listeners(net->nfnl, group); | 102 | return netlink_has_listeners(net->nfnl, group); |
103 | } | 103 | } |
104 | EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); | 104 | EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); |
105 | 105 | ||
106 | int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, | 106 | int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, |
107 | unsigned group, int echo, gfp_t flags) | 107 | unsigned group, int echo, gfp_t flags) |
108 | { | 108 | { |
109 | return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); | 109 | return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); |
110 | } | 110 | } |
111 | EXPORT_SYMBOL_GPL(nfnetlink_send); | 111 | EXPORT_SYMBOL_GPL(nfnetlink_send); |
112 | 112 | ||
113 | int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) | 113 | int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) |
114 | { | 114 | { |
115 | return netlink_set_err(net->nfnl, pid, group, error); | 115 | return netlink_set_err(net->nfnl, pid, group, error); |
116 | } | 116 | } |
117 | EXPORT_SYMBOL_GPL(nfnetlink_set_err); | 117 | EXPORT_SYMBOL_GPL(nfnetlink_set_err); |
118 | 118 | ||
119 | int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags) | 119 | int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags) |
120 | { | 120 | { |
121 | return netlink_unicast(net->nfnl, skb, pid, flags); | 121 | return netlink_unicast(net->nfnl, skb, pid, flags); |
122 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(nfnetlink_unicast); | 123 | EXPORT_SYMBOL_GPL(nfnetlink_unicast); |
124 | 124 | ||
125 | /* Process one complete nfnetlink message. */ | 125 | /* Process one complete nfnetlink message. */ |
126 | static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | 126 | static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
127 | { | 127 | { |
128 | struct net *net = sock_net(skb->sk); | 128 | struct net *net = sock_net(skb->sk); |
129 | const struct nfnl_callback *nc; | 129 | const struct nfnl_callback *nc; |
130 | const struct nfnetlink_subsystem *ss; | 130 | const struct nfnetlink_subsystem *ss; |
131 | int type, err; | 131 | int type, err; |
132 | 132 | ||
133 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 133 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) |
134 | return -EPERM; | 134 | return -EPERM; |
135 | 135 | ||
136 | /* All the messages must at least contain nfgenmsg */ | 136 | /* All the messages must at least contain nfgenmsg */ |
137 | if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct nfgenmsg))) | 137 | if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct nfgenmsg))) |
138 | return 0; | 138 | return 0; |
139 | 139 | ||
140 | type = nlh->nlmsg_type; | 140 | type = nlh->nlmsg_type; |
141 | replay: | 141 | replay: |
142 | ss = nfnetlink_get_subsys(type); | 142 | ss = nfnetlink_get_subsys(type); |
143 | if (!ss) { | 143 | if (!ss) { |
144 | #ifdef CONFIG_MODULES | 144 | #ifdef CONFIG_MODULES |
145 | nfnl_unlock(); | 145 | nfnl_unlock(); |
146 | request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); | 146 | request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); |
147 | nfnl_lock(); | 147 | nfnl_lock(); |
148 | ss = nfnetlink_get_subsys(type); | 148 | ss = nfnetlink_get_subsys(type); |
149 | if (!ss) | 149 | if (!ss) |
150 | #endif | 150 | #endif |
151 | return -EINVAL; | 151 | return -EINVAL; |
152 | } | 152 | } |
153 | 153 | ||
154 | nc = nfnetlink_find_client(type, ss); | 154 | nc = nfnetlink_find_client(type, ss); |
155 | if (!nc) | 155 | if (!nc) |
156 | return -EINVAL; | 156 | return -EINVAL; |
157 | 157 | ||
158 | { | 158 | { |
159 | int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); | 159 | int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); |
160 | u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); | 160 | u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); |
161 | struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; | 161 | struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; |
162 | struct nlattr *attr = (void *)nlh + min_len; | 162 | struct nlattr *attr = (void *)nlh + min_len; |
163 | int attrlen = nlh->nlmsg_len - min_len; | 163 | int attrlen = nlh->nlmsg_len - min_len; |
164 | 164 | ||
165 | err = nla_parse(cda, ss->cb[cb_id].attr_count, | 165 | err = nla_parse(cda, ss->cb[cb_id].attr_count, |
166 | attr, attrlen, ss->cb[cb_id].policy); | 166 | attr, attrlen, ss->cb[cb_id].policy); |
167 | if (err < 0) | 167 | if (err < 0) |
168 | return err; | 168 | return err; |
169 | 169 | ||
170 | err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda); | 170 | err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda); |
171 | if (err == -EAGAIN) | 171 | if (err == -EAGAIN) |
172 | goto replay; | 172 | goto replay; |
173 | return err; | 173 | return err; |
174 | } | 174 | } |
175 | } | 175 | } |
176 | 176 | ||
177 | static void nfnetlink_rcv(struct sk_buff *skb) | 177 | static void nfnetlink_rcv(struct sk_buff *skb) |
178 | { | 178 | { |
179 | nfnl_lock(); | 179 | nfnl_lock(); |
180 | netlink_rcv_skb(skb, &nfnetlink_rcv_msg); | 180 | netlink_rcv_skb(skb, &nfnetlink_rcv_msg); |
181 | nfnl_unlock(); | 181 | nfnl_unlock(); |
182 | } | 182 | } |
183 | 183 | ||
184 | static int __net_init nfnetlink_net_init(struct net *net) | 184 | static int __net_init nfnetlink_net_init(struct net *net) |
185 | { | 185 | { |
186 | struct sock *nfnl; | 186 | struct sock *nfnl; |
187 | 187 | ||
188 | nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX, | 188 | nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX, |
189 | nfnetlink_rcv, NULL, THIS_MODULE); | 189 | nfnetlink_rcv, NULL, THIS_MODULE); |
190 | if (!nfnl) | 190 | if (!nfnl) |
191 | return -ENOMEM; | 191 | return -ENOMEM; |
192 | net->nfnl_stash = nfnl; | 192 | net->nfnl_stash = nfnl; |
193 | rcu_assign_pointer(net->nfnl, nfnl); | 193 | rcu_assign_pointer(net->nfnl, nfnl); |
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
197 | static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) | 197 | static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) |
198 | { | 198 | { |
199 | struct net *net; | 199 | struct net *net; |
200 | 200 | ||
201 | list_for_each_entry(net, net_exit_list, exit_list) | 201 | list_for_each_entry(net, net_exit_list, exit_list) |
202 | rcu_assign_pointer(net->nfnl, NULL); | 202 | rcu_assign_pointer(net->nfnl, NULL); |
203 | synchronize_net(); | 203 | synchronize_net(); |
204 | list_for_each_entry(net, net_exit_list, exit_list) | 204 | list_for_each_entry(net, net_exit_list, exit_list) |
205 | netlink_kernel_release(net->nfnl_stash); | 205 | netlink_kernel_release(net->nfnl_stash); |
206 | } | 206 | } |
207 | 207 | ||
208 | static struct pernet_operations nfnetlink_net_ops = { | 208 | static struct pernet_operations nfnetlink_net_ops = { |
209 | .init = nfnetlink_net_init, | 209 | .init = nfnetlink_net_init, |
210 | .exit_batch = nfnetlink_net_exit_batch, | 210 | .exit_batch = nfnetlink_net_exit_batch, |
211 | }; | 211 | }; |
212 | 212 | ||
213 | static int __init nfnetlink_init(void) | 213 | static int __init nfnetlink_init(void) |
214 | { | 214 | { |
215 | printk("Netfilter messages via NETLINK v%s.\n", nfversion); | 215 | pr_info("Netfilter messages via NETLINK v%s.\n", nfversion); |
216 | return register_pernet_subsys(&nfnetlink_net_ops); | 216 | return register_pernet_subsys(&nfnetlink_net_ops); |
217 | } | 217 | } |
218 | 218 | ||
219 | static void __exit nfnetlink_exit(void) | 219 | static void __exit nfnetlink_exit(void) |
220 | { | 220 | { |
221 | printk("Removing netfilter NETLINK layer.\n"); | 221 | pr_info("Removing netfilter NETLINK layer.\n"); |
222 | unregister_pernet_subsys(&nfnetlink_net_ops); | 222 | unregister_pernet_subsys(&nfnetlink_net_ops); |
223 | } | 223 | } |
224 | module_init(nfnetlink_init); | 224 | module_init(nfnetlink_init); |
225 | module_exit(nfnetlink_exit); | 225 | module_exit(nfnetlink_exit); |
226 | 226 |
net/netfilter/nfnetlink_log.c
1 | /* | 1 | /* |
2 | * This is a module which is used for logging packets to userspace via | 2 | * This is a module which is used for logging packets to userspace via |
3 | * nfetlink. | 3 | * nfetlink. |
4 | * | 4 | * |
5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> | 5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> |
6 | * | 6 | * |
7 | * Based on the old ipv4-only ipt_ULOG.c: | 7 | * Based on the old ipv4-only ipt_ULOG.c: |
8 | * (C) 2000-2004 by Harald Welte <laforge@netfilter.org> | 8 | * (C) 2000-2004 by Harald Welte <laforge@netfilter.org> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
18 | #include <linux/ipv6.h> | 18 | #include <linux/ipv6.h> |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/netfilter.h> | 20 | #include <linux/netfilter.h> |
21 | #include <linux/netlink.h> | 21 | #include <linux/netlink.h> |
22 | #include <linux/netfilter/nfnetlink.h> | 22 | #include <linux/netfilter/nfnetlink.h> |
23 | #include <linux/netfilter/nfnetlink_log.h> | 23 | #include <linux/netfilter/nfnetlink_log.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/sysctl.h> | 25 | #include <linux/sysctl.h> |
26 | #include <linux/proc_fs.h> | 26 | #include <linux/proc_fs.h> |
27 | #include <linux/security.h> | 27 | #include <linux/security.h> |
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/jhash.h> | 29 | #include <linux/jhash.h> |
30 | #include <linux/random.h> | 30 | #include <linux/random.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <net/sock.h> | 32 | #include <net/sock.h> |
33 | #include <net/netfilter/nf_log.h> | 33 | #include <net/netfilter/nf_log.h> |
34 | #include <net/netfilter/nfnetlink_log.h> | 34 | #include <net/netfilter/nfnetlink_log.h> |
35 | 35 | ||
36 | #include <asm/atomic.h> | 36 | #include <asm/atomic.h> |
37 | 37 | ||
38 | #ifdef CONFIG_BRIDGE_NETFILTER | 38 | #ifdef CONFIG_BRIDGE_NETFILTER |
39 | #include "../bridge/br_private.h" | 39 | #include "../bridge/br_private.h" |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE | 42 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE |
43 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ | 43 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ |
44 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ | 44 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ |
45 | #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ | 45 | #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ |
46 | 46 | ||
47 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ | 47 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ |
48 | printk(x, ## args); } while (0); | 48 | printk(x, ## args); } while (0); |
49 | 49 | ||
50 | struct nfulnl_instance { | 50 | struct nfulnl_instance { |
51 | struct hlist_node hlist; /* global list of instances */ | 51 | struct hlist_node hlist; /* global list of instances */ |
52 | spinlock_t lock; | 52 | spinlock_t lock; |
53 | atomic_t use; /* use count */ | 53 | atomic_t use; /* use count */ |
54 | 54 | ||
55 | unsigned int qlen; /* number of nlmsgs in skb */ | 55 | unsigned int qlen; /* number of nlmsgs in skb */ |
56 | struct sk_buff *skb; /* pre-allocatd skb */ | 56 | struct sk_buff *skb; /* pre-allocatd skb */ |
57 | struct timer_list timer; | 57 | struct timer_list timer; |
58 | int peer_pid; /* PID of the peer process */ | 58 | int peer_pid; /* PID of the peer process */ |
59 | 59 | ||
60 | /* configurable parameters */ | 60 | /* configurable parameters */ |
61 | unsigned int flushtimeout; /* timeout until queue flush */ | 61 | unsigned int flushtimeout; /* timeout until queue flush */ |
62 | unsigned int nlbufsiz; /* netlink buffer allocation size */ | 62 | unsigned int nlbufsiz; /* netlink buffer allocation size */ |
63 | unsigned int qthreshold; /* threshold of the queue */ | 63 | unsigned int qthreshold; /* threshold of the queue */ |
64 | u_int32_t copy_range; | 64 | u_int32_t copy_range; |
65 | u_int32_t seq; /* instance-local sequential counter */ | 65 | u_int32_t seq; /* instance-local sequential counter */ |
66 | u_int16_t group_num; /* number of this queue */ | 66 | u_int16_t group_num; /* number of this queue */ |
67 | u_int16_t flags; | 67 | u_int16_t flags; |
68 | u_int8_t copy_mode; | 68 | u_int8_t copy_mode; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static DEFINE_RWLOCK(instances_lock); | 71 | static DEFINE_RWLOCK(instances_lock); |
72 | static atomic_t global_seq; | 72 | static atomic_t global_seq; |
73 | 73 | ||
74 | #define INSTANCE_BUCKETS 16 | 74 | #define INSTANCE_BUCKETS 16 |
75 | static struct hlist_head instance_table[INSTANCE_BUCKETS]; | 75 | static struct hlist_head instance_table[INSTANCE_BUCKETS]; |
76 | static unsigned int hash_init; | 76 | static unsigned int hash_init; |
77 | 77 | ||
78 | static inline u_int8_t instance_hashfn(u_int16_t group_num) | 78 | static inline u_int8_t instance_hashfn(u_int16_t group_num) |
79 | { | 79 | { |
80 | return ((group_num & 0xff) % INSTANCE_BUCKETS); | 80 | return ((group_num & 0xff) % INSTANCE_BUCKETS); |
81 | } | 81 | } |
82 | 82 | ||
83 | static struct nfulnl_instance * | 83 | static struct nfulnl_instance * |
84 | __instance_lookup(u_int16_t group_num) | 84 | __instance_lookup(u_int16_t group_num) |
85 | { | 85 | { |
86 | struct hlist_head *head; | 86 | struct hlist_head *head; |
87 | struct hlist_node *pos; | 87 | struct hlist_node *pos; |
88 | struct nfulnl_instance *inst; | 88 | struct nfulnl_instance *inst; |
89 | 89 | ||
90 | head = &instance_table[instance_hashfn(group_num)]; | 90 | head = &instance_table[instance_hashfn(group_num)]; |
91 | hlist_for_each_entry(inst, pos, head, hlist) { | 91 | hlist_for_each_entry(inst, pos, head, hlist) { |
92 | if (inst->group_num == group_num) | 92 | if (inst->group_num == group_num) |
93 | return inst; | 93 | return inst; |
94 | } | 94 | } |
95 | return NULL; | 95 | return NULL; |
96 | } | 96 | } |
97 | 97 | ||
98 | static inline void | 98 | static inline void |
99 | instance_get(struct nfulnl_instance *inst) | 99 | instance_get(struct nfulnl_instance *inst) |
100 | { | 100 | { |
101 | atomic_inc(&inst->use); | 101 | atomic_inc(&inst->use); |
102 | } | 102 | } |
103 | 103 | ||
104 | static struct nfulnl_instance * | 104 | static struct nfulnl_instance * |
105 | instance_lookup_get(u_int16_t group_num) | 105 | instance_lookup_get(u_int16_t group_num) |
106 | { | 106 | { |
107 | struct nfulnl_instance *inst; | 107 | struct nfulnl_instance *inst; |
108 | 108 | ||
109 | read_lock_bh(&instances_lock); | 109 | read_lock_bh(&instances_lock); |
110 | inst = __instance_lookup(group_num); | 110 | inst = __instance_lookup(group_num); |
111 | if (inst) | 111 | if (inst) |
112 | instance_get(inst); | 112 | instance_get(inst); |
113 | read_unlock_bh(&instances_lock); | 113 | read_unlock_bh(&instances_lock); |
114 | 114 | ||
115 | return inst; | 115 | return inst; |
116 | } | 116 | } |
117 | 117 | ||
118 | static void | 118 | static void |
119 | instance_put(struct nfulnl_instance *inst) | 119 | instance_put(struct nfulnl_instance *inst) |
120 | { | 120 | { |
121 | if (inst && atomic_dec_and_test(&inst->use)) { | 121 | if (inst && atomic_dec_and_test(&inst->use)) { |
122 | kfree(inst); | 122 | kfree(inst); |
123 | module_put(THIS_MODULE); | 123 | module_put(THIS_MODULE); |
124 | } | 124 | } |
125 | } | 125 | } |
126 | 126 | ||
127 | static void nfulnl_timer(unsigned long data); | 127 | static void nfulnl_timer(unsigned long data); |
128 | 128 | ||
129 | static struct nfulnl_instance * | 129 | static struct nfulnl_instance * |
130 | instance_create(u_int16_t group_num, int pid) | 130 | instance_create(u_int16_t group_num, int pid) |
131 | { | 131 | { |
132 | struct nfulnl_instance *inst; | 132 | struct nfulnl_instance *inst; |
133 | int err; | 133 | int err; |
134 | 134 | ||
135 | write_lock_bh(&instances_lock); | 135 | write_lock_bh(&instances_lock); |
136 | if (__instance_lookup(group_num)) { | 136 | if (__instance_lookup(group_num)) { |
137 | err = -EEXIST; | 137 | err = -EEXIST; |
138 | goto out_unlock; | 138 | goto out_unlock; |
139 | } | 139 | } |
140 | 140 | ||
141 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); | 141 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); |
142 | if (!inst) { | 142 | if (!inst) { |
143 | err = -ENOMEM; | 143 | err = -ENOMEM; |
144 | goto out_unlock; | 144 | goto out_unlock; |
145 | } | 145 | } |
146 | 146 | ||
147 | if (!try_module_get(THIS_MODULE)) { | 147 | if (!try_module_get(THIS_MODULE)) { |
148 | kfree(inst); | 148 | kfree(inst); |
149 | err = -EAGAIN; | 149 | err = -EAGAIN; |
150 | goto out_unlock; | 150 | goto out_unlock; |
151 | } | 151 | } |
152 | 152 | ||
153 | INIT_HLIST_NODE(&inst->hlist); | 153 | INIT_HLIST_NODE(&inst->hlist); |
154 | spin_lock_init(&inst->lock); | 154 | spin_lock_init(&inst->lock); |
155 | /* needs to be two, since we _put() after creation */ | 155 | /* needs to be two, since we _put() after creation */ |
156 | atomic_set(&inst->use, 2); | 156 | atomic_set(&inst->use, 2); |
157 | 157 | ||
158 | setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); | 158 | setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); |
159 | 159 | ||
160 | inst->peer_pid = pid; | 160 | inst->peer_pid = pid; |
161 | inst->group_num = group_num; | 161 | inst->group_num = group_num; |
162 | 162 | ||
163 | inst->qthreshold = NFULNL_QTHRESH_DEFAULT; | 163 | inst->qthreshold = NFULNL_QTHRESH_DEFAULT; |
164 | inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; | 164 | inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; |
165 | inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; | 165 | inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; |
166 | inst->copy_mode = NFULNL_COPY_PACKET; | 166 | inst->copy_mode = NFULNL_COPY_PACKET; |
167 | inst->copy_range = NFULNL_COPY_RANGE_MAX; | 167 | inst->copy_range = NFULNL_COPY_RANGE_MAX; |
168 | 168 | ||
169 | hlist_add_head(&inst->hlist, | 169 | hlist_add_head(&inst->hlist, |
170 | &instance_table[instance_hashfn(group_num)]); | 170 | &instance_table[instance_hashfn(group_num)]); |
171 | 171 | ||
172 | write_unlock_bh(&instances_lock); | 172 | write_unlock_bh(&instances_lock); |
173 | 173 | ||
174 | return inst; | 174 | return inst; |
175 | 175 | ||
176 | out_unlock: | 176 | out_unlock: |
177 | write_unlock_bh(&instances_lock); | 177 | write_unlock_bh(&instances_lock); |
178 | return ERR_PTR(err); | 178 | return ERR_PTR(err); |
179 | } | 179 | } |
180 | 180 | ||
181 | static void __nfulnl_flush(struct nfulnl_instance *inst); | 181 | static void __nfulnl_flush(struct nfulnl_instance *inst); |
182 | 182 | ||
183 | static void | 183 | static void |
184 | __instance_destroy(struct nfulnl_instance *inst) | 184 | __instance_destroy(struct nfulnl_instance *inst) |
185 | { | 185 | { |
186 | /* first pull it out of the global list */ | 186 | /* first pull it out of the global list */ |
187 | hlist_del(&inst->hlist); | 187 | hlist_del(&inst->hlist); |
188 | 188 | ||
189 | /* then flush all pending packets from skb */ | 189 | /* then flush all pending packets from skb */ |
190 | 190 | ||
191 | spin_lock_bh(&inst->lock); | 191 | spin_lock_bh(&inst->lock); |
192 | if (inst->skb) | 192 | if (inst->skb) |
193 | __nfulnl_flush(inst); | 193 | __nfulnl_flush(inst); |
194 | spin_unlock_bh(&inst->lock); | 194 | spin_unlock_bh(&inst->lock); |
195 | 195 | ||
196 | /* and finally put the refcount */ | 196 | /* and finally put the refcount */ |
197 | instance_put(inst); | 197 | instance_put(inst); |
198 | } | 198 | } |
199 | 199 | ||
200 | static inline void | 200 | static inline void |
201 | instance_destroy(struct nfulnl_instance *inst) | 201 | instance_destroy(struct nfulnl_instance *inst) |
202 | { | 202 | { |
203 | write_lock_bh(&instances_lock); | 203 | write_lock_bh(&instances_lock); |
204 | __instance_destroy(inst); | 204 | __instance_destroy(inst); |
205 | write_unlock_bh(&instances_lock); | 205 | write_unlock_bh(&instances_lock); |
206 | } | 206 | } |
207 | 207 | ||
208 | static int | 208 | static int |
209 | nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, | 209 | nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, |
210 | unsigned int range) | 210 | unsigned int range) |
211 | { | 211 | { |
212 | int status = 0; | 212 | int status = 0; |
213 | 213 | ||
214 | spin_lock_bh(&inst->lock); | 214 | spin_lock_bh(&inst->lock); |
215 | 215 | ||
216 | switch (mode) { | 216 | switch (mode) { |
217 | case NFULNL_COPY_NONE: | 217 | case NFULNL_COPY_NONE: |
218 | case NFULNL_COPY_META: | 218 | case NFULNL_COPY_META: |
219 | inst->copy_mode = mode; | 219 | inst->copy_mode = mode; |
220 | inst->copy_range = 0; | 220 | inst->copy_range = 0; |
221 | break; | 221 | break; |
222 | 222 | ||
223 | case NFULNL_COPY_PACKET: | 223 | case NFULNL_COPY_PACKET: |
224 | inst->copy_mode = mode; | 224 | inst->copy_mode = mode; |
225 | inst->copy_range = min_t(unsigned int, | 225 | inst->copy_range = min_t(unsigned int, |
226 | range, NFULNL_COPY_RANGE_MAX); | 226 | range, NFULNL_COPY_RANGE_MAX); |
227 | break; | 227 | break; |
228 | 228 | ||
229 | default: | 229 | default: |
230 | status = -EINVAL; | 230 | status = -EINVAL; |
231 | break; | 231 | break; |
232 | } | 232 | } |
233 | 233 | ||
234 | spin_unlock_bh(&inst->lock); | 234 | spin_unlock_bh(&inst->lock); |
235 | 235 | ||
236 | return status; | 236 | return status; |
237 | } | 237 | } |
238 | 238 | ||
239 | static int | 239 | static int |
240 | nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz) | 240 | nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz) |
241 | { | 241 | { |
242 | int status; | 242 | int status; |
243 | 243 | ||
244 | spin_lock_bh(&inst->lock); | 244 | spin_lock_bh(&inst->lock); |
245 | if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT) | 245 | if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT) |
246 | status = -ERANGE; | 246 | status = -ERANGE; |
247 | else if (nlbufsiz > 131072) | 247 | else if (nlbufsiz > 131072) |
248 | status = -ERANGE; | 248 | status = -ERANGE; |
249 | else { | 249 | else { |
250 | inst->nlbufsiz = nlbufsiz; | 250 | inst->nlbufsiz = nlbufsiz; |
251 | status = 0; | 251 | status = 0; |
252 | } | 252 | } |
253 | spin_unlock_bh(&inst->lock); | 253 | spin_unlock_bh(&inst->lock); |
254 | 254 | ||
255 | return status; | 255 | return status; |
256 | } | 256 | } |
257 | 257 | ||
258 | static int | 258 | static int |
259 | nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout) | 259 | nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout) |
260 | { | 260 | { |
261 | spin_lock_bh(&inst->lock); | 261 | spin_lock_bh(&inst->lock); |
262 | inst->flushtimeout = timeout; | 262 | inst->flushtimeout = timeout; |
263 | spin_unlock_bh(&inst->lock); | 263 | spin_unlock_bh(&inst->lock); |
264 | 264 | ||
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | 267 | ||
268 | static int | 268 | static int |
269 | nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh) | 269 | nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh) |
270 | { | 270 | { |
271 | spin_lock_bh(&inst->lock); | 271 | spin_lock_bh(&inst->lock); |
272 | inst->qthreshold = qthresh; | 272 | inst->qthreshold = qthresh; |
273 | spin_unlock_bh(&inst->lock); | 273 | spin_unlock_bh(&inst->lock); |
274 | 274 | ||
275 | return 0; | 275 | return 0; |
276 | } | 276 | } |
277 | 277 | ||
278 | static int | 278 | static int |
279 | nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) | 279 | nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) |
280 | { | 280 | { |
281 | spin_lock_bh(&inst->lock); | 281 | spin_lock_bh(&inst->lock); |
282 | inst->flags = flags; | 282 | inst->flags = flags; |
283 | spin_unlock_bh(&inst->lock); | 283 | spin_unlock_bh(&inst->lock); |
284 | 284 | ||
285 | return 0; | 285 | return 0; |
286 | } | 286 | } |
287 | 287 | ||
288 | static struct sk_buff * | 288 | static struct sk_buff * |
289 | nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) | 289 | nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) |
290 | { | 290 | { |
291 | struct sk_buff *skb; | 291 | struct sk_buff *skb; |
292 | unsigned int n; | 292 | unsigned int n; |
293 | 293 | ||
294 | /* alloc skb which should be big enough for a whole multipart | 294 | /* alloc skb which should be big enough for a whole multipart |
295 | * message. WARNING: has to be <= 128k due to slab restrictions */ | 295 | * message. WARNING: has to be <= 128k due to slab restrictions */ |
296 | 296 | ||
297 | n = max(inst_size, pkt_size); | 297 | n = max(inst_size, pkt_size); |
298 | skb = alloc_skb(n, GFP_ATOMIC); | 298 | skb = alloc_skb(n, GFP_ATOMIC); |
299 | if (!skb) { | 299 | if (!skb) { |
300 | PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", | 300 | pr_notice("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", |
301 | inst_size); | 301 | inst_size); |
302 | 302 | ||
303 | if (n > pkt_size) { | 303 | if (n > pkt_size) { |
304 | /* try to allocate only as much as we need for current | 304 | /* try to allocate only as much as we need for current |
305 | * packet */ | 305 | * packet */ |
306 | 306 | ||
307 | skb = alloc_skb(pkt_size, GFP_ATOMIC); | 307 | skb = alloc_skb(pkt_size, GFP_ATOMIC); |
308 | if (!skb) | 308 | if (!skb) |
309 | PRINTR("nfnetlink_log: can't even alloc %u " | 309 | pr_err("nfnetlink_log: can't even alloc %u " |
310 | "bytes\n", pkt_size); | 310 | "bytes\n", pkt_size); |
311 | } | 311 | } |
312 | } | 312 | } |
313 | 313 | ||
314 | return skb; | 314 | return skb; |
315 | } | 315 | } |
316 | 316 | ||
317 | static int | 317 | static int |
318 | __nfulnl_send(struct nfulnl_instance *inst) | 318 | __nfulnl_send(struct nfulnl_instance *inst) |
319 | { | 319 | { |
320 | int status = -1; | 320 | int status = -1; |
321 | 321 | ||
322 | if (inst->qlen > 1) | 322 | if (inst->qlen > 1) |
323 | NLMSG_PUT(inst->skb, 0, 0, | 323 | NLMSG_PUT(inst->skb, 0, 0, |
324 | NLMSG_DONE, | 324 | NLMSG_DONE, |
325 | sizeof(struct nfgenmsg)); | 325 | sizeof(struct nfgenmsg)); |
326 | 326 | ||
327 | status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid, | 327 | status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid, |
328 | MSG_DONTWAIT); | 328 | MSG_DONTWAIT); |
329 | 329 | ||
330 | inst->qlen = 0; | 330 | inst->qlen = 0; |
331 | inst->skb = NULL; | 331 | inst->skb = NULL; |
332 | 332 | ||
333 | nlmsg_failure: | 333 | nlmsg_failure: |
334 | return status; | 334 | return status; |
335 | } | 335 | } |
336 | 336 | ||
337 | static void | 337 | static void |
338 | __nfulnl_flush(struct nfulnl_instance *inst) | 338 | __nfulnl_flush(struct nfulnl_instance *inst) |
339 | { | 339 | { |
340 | /* timer holds a reference */ | 340 | /* timer holds a reference */ |
341 | if (del_timer(&inst->timer)) | 341 | if (del_timer(&inst->timer)) |
342 | instance_put(inst); | 342 | instance_put(inst); |
343 | if (inst->skb) | 343 | if (inst->skb) |
344 | __nfulnl_send(inst); | 344 | __nfulnl_send(inst); |
345 | } | 345 | } |
346 | 346 | ||
347 | static void | 347 | static void |
348 | nfulnl_timer(unsigned long data) | 348 | nfulnl_timer(unsigned long data) |
349 | { | 349 | { |
350 | struct nfulnl_instance *inst = (struct nfulnl_instance *)data; | 350 | struct nfulnl_instance *inst = (struct nfulnl_instance *)data; |
351 | 351 | ||
352 | spin_lock_bh(&inst->lock); | 352 | spin_lock_bh(&inst->lock); |
353 | if (inst->skb) | 353 | if (inst->skb) |
354 | __nfulnl_send(inst); | 354 | __nfulnl_send(inst); |
355 | spin_unlock_bh(&inst->lock); | 355 | spin_unlock_bh(&inst->lock); |
356 | instance_put(inst); | 356 | instance_put(inst); |
357 | } | 357 | } |
358 | 358 | ||
359 | /* This is an inline function, we don't really care about a long | 359 | /* This is an inline function, we don't really care about a long |
360 | * list of arguments */ | 360 | * list of arguments */ |
361 | static inline int | 361 | static inline int |
362 | __build_packet_message(struct nfulnl_instance *inst, | 362 | __build_packet_message(struct nfulnl_instance *inst, |
363 | const struct sk_buff *skb, | 363 | const struct sk_buff *skb, |
364 | unsigned int data_len, | 364 | unsigned int data_len, |
365 | u_int8_t pf, | 365 | u_int8_t pf, |
366 | unsigned int hooknum, | 366 | unsigned int hooknum, |
367 | const struct net_device *indev, | 367 | const struct net_device *indev, |
368 | const struct net_device *outdev, | 368 | const struct net_device *outdev, |
369 | const struct nf_loginfo *li, | 369 | const struct nf_loginfo *li, |
370 | const char *prefix, unsigned int plen) | 370 | const char *prefix, unsigned int plen) |
371 | { | 371 | { |
372 | struct nfulnl_msg_packet_hdr pmsg; | 372 | struct nfulnl_msg_packet_hdr pmsg; |
373 | struct nlmsghdr *nlh; | 373 | struct nlmsghdr *nlh; |
374 | struct nfgenmsg *nfmsg; | 374 | struct nfgenmsg *nfmsg; |
375 | __be32 tmp_uint; | 375 | __be32 tmp_uint; |
376 | sk_buff_data_t old_tail = inst->skb->tail; | 376 | sk_buff_data_t old_tail = inst->skb->tail; |
377 | 377 | ||
378 | nlh = NLMSG_PUT(inst->skb, 0, 0, | 378 | nlh = NLMSG_PUT(inst->skb, 0, 0, |
379 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, | 379 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, |
380 | sizeof(struct nfgenmsg)); | 380 | sizeof(struct nfgenmsg)); |
381 | nfmsg = NLMSG_DATA(nlh); | 381 | nfmsg = NLMSG_DATA(nlh); |
382 | nfmsg->nfgen_family = pf; | 382 | nfmsg->nfgen_family = pf; |
383 | nfmsg->version = NFNETLINK_V0; | 383 | nfmsg->version = NFNETLINK_V0; |
384 | nfmsg->res_id = htons(inst->group_num); | 384 | nfmsg->res_id = htons(inst->group_num); |
385 | 385 | ||
386 | pmsg.hw_protocol = skb->protocol; | 386 | pmsg.hw_protocol = skb->protocol; |
387 | pmsg.hook = hooknum; | 387 | pmsg.hook = hooknum; |
388 | 388 | ||
389 | NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); | 389 | NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); |
390 | 390 | ||
391 | if (prefix) | 391 | if (prefix) |
392 | NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); | 392 | NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); |
393 | 393 | ||
394 | if (indev) { | 394 | if (indev) { |
395 | #ifndef CONFIG_BRIDGE_NETFILTER | 395 | #ifndef CONFIG_BRIDGE_NETFILTER |
396 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, | 396 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, |
397 | htonl(indev->ifindex)); | 397 | htonl(indev->ifindex)); |
398 | #else | 398 | #else |
399 | if (pf == PF_BRIDGE) { | 399 | if (pf == PF_BRIDGE) { |
400 | /* Case 1: outdev is physical input device, we need to | 400 | /* Case 1: outdev is physical input device, we need to |
401 | * look for bridge group (when called from | 401 | * look for bridge group (when called from |
402 | * netfilter_bridge) */ | 402 | * netfilter_bridge) */ |
403 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, | 403 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, |
404 | htonl(indev->ifindex)); | 404 | htonl(indev->ifindex)); |
405 | /* this is the bridge group "brX" */ | 405 | /* this is the bridge group "brX" */ |
406 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, | 406 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, |
407 | htonl(indev->br_port->br->dev->ifindex)); | 407 | htonl(indev->br_port->br->dev->ifindex)); |
408 | } else { | 408 | } else { |
409 | /* Case 2: indev is bridge group, we need to look for | 409 | /* Case 2: indev is bridge group, we need to look for |
410 | * physical device (when called from ipv4) */ | 410 | * physical device (when called from ipv4) */ |
411 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, | 411 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, |
412 | htonl(indev->ifindex)); | 412 | htonl(indev->ifindex)); |
413 | if (skb->nf_bridge && skb->nf_bridge->physindev) | 413 | if (skb->nf_bridge && skb->nf_bridge->physindev) |
414 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, | 414 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, |
415 | htonl(skb->nf_bridge->physindev->ifindex)); | 415 | htonl(skb->nf_bridge->physindev->ifindex)); |
416 | } | 416 | } |
417 | #endif | 417 | #endif |
418 | } | 418 | } |
419 | 419 | ||
420 | if (outdev) { | 420 | if (outdev) { |
421 | tmp_uint = htonl(outdev->ifindex); | 421 | tmp_uint = htonl(outdev->ifindex); |
422 | #ifndef CONFIG_BRIDGE_NETFILTER | 422 | #ifndef CONFIG_BRIDGE_NETFILTER |
423 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, | 423 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, |
424 | htonl(outdev->ifindex)); | 424 | htonl(outdev->ifindex)); |
425 | #else | 425 | #else |
426 | if (pf == PF_BRIDGE) { | 426 | if (pf == PF_BRIDGE) { |
427 | /* Case 1: outdev is physical output device, we need to | 427 | /* Case 1: outdev is physical output device, we need to |
428 | * look for bridge group (when called from | 428 | * look for bridge group (when called from |
429 | * netfilter_bridge) */ | 429 | * netfilter_bridge) */ |
430 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, | 430 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, |
431 | htonl(outdev->ifindex)); | 431 | htonl(outdev->ifindex)); |
432 | /* this is the bridge group "brX" */ | 432 | /* this is the bridge group "brX" */ |
433 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, | 433 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, |
434 | htonl(outdev->br_port->br->dev->ifindex)); | 434 | htonl(outdev->br_port->br->dev->ifindex)); |
435 | } else { | 435 | } else { |
436 | /* Case 2: indev is a bridge group, we need to look | 436 | /* Case 2: indev is a bridge group, we need to look |
437 | * for physical device (when called from ipv4) */ | 437 | * for physical device (when called from ipv4) */ |
438 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, | 438 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, |
439 | htonl(outdev->ifindex)); | 439 | htonl(outdev->ifindex)); |
440 | if (skb->nf_bridge && skb->nf_bridge->physoutdev) | 440 | if (skb->nf_bridge && skb->nf_bridge->physoutdev) |
441 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, | 441 | NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, |
442 | htonl(skb->nf_bridge->physoutdev->ifindex)); | 442 | htonl(skb->nf_bridge->physoutdev->ifindex)); |
443 | } | 443 | } |
444 | #endif | 444 | #endif |
445 | } | 445 | } |
446 | 446 | ||
447 | if (skb->mark) | 447 | if (skb->mark) |
448 | NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); | 448 | NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); |
449 | 449 | ||
450 | if (indev && skb->dev) { | 450 | if (indev && skb->dev) { |
451 | struct nfulnl_msg_packet_hw phw; | 451 | struct nfulnl_msg_packet_hw phw; |
452 | int len = dev_parse_header(skb, phw.hw_addr); | 452 | int len = dev_parse_header(skb, phw.hw_addr); |
453 | if (len > 0) { | 453 | if (len > 0) { |
454 | phw.hw_addrlen = htons(len); | 454 | phw.hw_addrlen = htons(len); |
455 | NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); | 455 | NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); |
456 | } | 456 | } |
457 | } | 457 | } |
458 | 458 | ||
459 | if (indev && skb_mac_header_was_set(skb)) { | 459 | if (indev && skb_mac_header_was_set(skb)) { |
460 | NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)); | 460 | NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)); |
461 | NLA_PUT_BE16(inst->skb, NFULA_HWLEN, | 461 | NLA_PUT_BE16(inst->skb, NFULA_HWLEN, |
462 | htons(skb->dev->hard_header_len)); | 462 | htons(skb->dev->hard_header_len)); |
463 | NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, | 463 | NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, |
464 | skb_mac_header(skb)); | 464 | skb_mac_header(skb)); |
465 | } | 465 | } |
466 | 466 | ||
467 | if (skb->tstamp.tv64) { | 467 | if (skb->tstamp.tv64) { |
468 | struct nfulnl_msg_packet_timestamp ts; | 468 | struct nfulnl_msg_packet_timestamp ts; |
469 | struct timeval tv = ktime_to_timeval(skb->tstamp); | 469 | struct timeval tv = ktime_to_timeval(skb->tstamp); |
470 | ts.sec = cpu_to_be64(tv.tv_sec); | 470 | ts.sec = cpu_to_be64(tv.tv_sec); |
471 | ts.usec = cpu_to_be64(tv.tv_usec); | 471 | ts.usec = cpu_to_be64(tv.tv_usec); |
472 | 472 | ||
473 | NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); | 473 | NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); |
474 | } | 474 | } |
475 | 475 | ||
476 | /* UID */ | 476 | /* UID */ |
477 | if (skb->sk) { | 477 | if (skb->sk) { |
478 | read_lock_bh(&skb->sk->sk_callback_lock); | 478 | read_lock_bh(&skb->sk->sk_callback_lock); |
479 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) { | 479 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) { |
480 | struct file *file = skb->sk->sk_socket->file; | 480 | struct file *file = skb->sk->sk_socket->file; |
481 | __be32 uid = htonl(file->f_cred->fsuid); | 481 | __be32 uid = htonl(file->f_cred->fsuid); |
482 | __be32 gid = htonl(file->f_cred->fsgid); | 482 | __be32 gid = htonl(file->f_cred->fsgid); |
483 | /* need to unlock here since NLA_PUT may goto */ | 483 | /* need to unlock here since NLA_PUT may goto */ |
484 | read_unlock_bh(&skb->sk->sk_callback_lock); | 484 | read_unlock_bh(&skb->sk->sk_callback_lock); |
485 | NLA_PUT_BE32(inst->skb, NFULA_UID, uid); | 485 | NLA_PUT_BE32(inst->skb, NFULA_UID, uid); |
486 | NLA_PUT_BE32(inst->skb, NFULA_GID, gid); | 486 | NLA_PUT_BE32(inst->skb, NFULA_GID, gid); |
487 | } else | 487 | } else |
488 | read_unlock_bh(&skb->sk->sk_callback_lock); | 488 | read_unlock_bh(&skb->sk->sk_callback_lock); |
489 | } | 489 | } |
490 | 490 | ||
491 | /* local sequence number */ | 491 | /* local sequence number */ |
492 | if (inst->flags & NFULNL_CFG_F_SEQ) | 492 | if (inst->flags & NFULNL_CFG_F_SEQ) |
493 | NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++)); | 493 | NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++)); |
494 | 494 | ||
495 | /* global sequence number */ | 495 | /* global sequence number */ |
496 | if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) | 496 | if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) |
497 | NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, | 497 | NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, |
498 | htonl(atomic_inc_return(&global_seq))); | 498 | htonl(atomic_inc_return(&global_seq))); |
499 | 499 | ||
500 | if (data_len) { | 500 | if (data_len) { |
501 | struct nlattr *nla; | 501 | struct nlattr *nla; |
502 | int size = nla_attr_size(data_len); | 502 | int size = nla_attr_size(data_len); |
503 | 503 | ||
504 | if (skb_tailroom(inst->skb) < nla_total_size(data_len)) { | 504 | if (skb_tailroom(inst->skb) < nla_total_size(data_len)) { |
505 | printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); | 505 | printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); |
506 | goto nlmsg_failure; | 506 | goto nlmsg_failure; |
507 | } | 507 | } |
508 | 508 | ||
509 | nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len)); | 509 | nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len)); |
510 | nla->nla_type = NFULA_PAYLOAD; | 510 | nla->nla_type = NFULA_PAYLOAD; |
511 | nla->nla_len = size; | 511 | nla->nla_len = size; |
512 | 512 | ||
513 | if (skb_copy_bits(skb, 0, nla_data(nla), data_len)) | 513 | if (skb_copy_bits(skb, 0, nla_data(nla), data_len)) |
514 | BUG(); | 514 | BUG(); |
515 | } | 515 | } |
516 | 516 | ||
517 | nlh->nlmsg_len = inst->skb->tail - old_tail; | 517 | nlh->nlmsg_len = inst->skb->tail - old_tail; |
518 | return 0; | 518 | return 0; |
519 | 519 | ||
520 | nlmsg_failure: | 520 | nlmsg_failure: |
521 | nla_put_failure: | 521 | nla_put_failure: |
522 | PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); | 522 | PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); |
523 | return -1; | 523 | return -1; |
524 | } | 524 | } |
525 | 525 | ||
526 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) | 526 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) |
527 | 527 | ||
528 | static struct nf_loginfo default_loginfo = { | 528 | static struct nf_loginfo default_loginfo = { |
529 | .type = NF_LOG_TYPE_ULOG, | 529 | .type = NF_LOG_TYPE_ULOG, |
530 | .u = { | 530 | .u = { |
531 | .ulog = { | 531 | .ulog = { |
532 | .copy_len = 0xffff, | 532 | .copy_len = 0xffff, |
533 | .group = 0, | 533 | .group = 0, |
534 | .qthreshold = 1, | 534 | .qthreshold = 1, |
535 | }, | 535 | }, |
536 | }, | 536 | }, |
537 | }; | 537 | }; |
538 | 538 | ||
539 | /* log handler for internal netfilter logging api */ | 539 | /* log handler for internal netfilter logging api */ |
540 | void | 540 | void |
541 | nfulnl_log_packet(u_int8_t pf, | 541 | nfulnl_log_packet(u_int8_t pf, |
542 | unsigned int hooknum, | 542 | unsigned int hooknum, |
543 | const struct sk_buff *skb, | 543 | const struct sk_buff *skb, |
544 | const struct net_device *in, | 544 | const struct net_device *in, |
545 | const struct net_device *out, | 545 | const struct net_device *out, |
546 | const struct nf_loginfo *li_user, | 546 | const struct nf_loginfo *li_user, |
547 | const char *prefix) | 547 | const char *prefix) |
548 | { | 548 | { |
549 | unsigned int size, data_len; | 549 | unsigned int size, data_len; |
550 | struct nfulnl_instance *inst; | 550 | struct nfulnl_instance *inst; |
551 | const struct nf_loginfo *li; | 551 | const struct nf_loginfo *li; |
552 | unsigned int qthreshold; | 552 | unsigned int qthreshold; |
553 | unsigned int plen; | 553 | unsigned int plen; |
554 | 554 | ||
555 | if (li_user && li_user->type == NF_LOG_TYPE_ULOG) | 555 | if (li_user && li_user->type == NF_LOG_TYPE_ULOG) |
556 | li = li_user; | 556 | li = li_user; |
557 | else | 557 | else |
558 | li = &default_loginfo; | 558 | li = &default_loginfo; |
559 | 559 | ||
560 | inst = instance_lookup_get(li->u.ulog.group); | 560 | inst = instance_lookup_get(li->u.ulog.group); |
561 | if (!inst) | 561 | if (!inst) |
562 | return; | 562 | return; |
563 | 563 | ||
564 | plen = 0; | 564 | plen = 0; |
565 | if (prefix) | 565 | if (prefix) |
566 | plen = strlen(prefix) + 1; | 566 | plen = strlen(prefix) + 1; |
567 | 567 | ||
568 | /* FIXME: do we want to make the size calculation conditional based on | 568 | /* FIXME: do we want to make the size calculation conditional based on |
569 | * what is actually present? way more branches and checks, but more | 569 | * what is actually present? way more branches and checks, but more |
570 | * memory efficient... */ | 570 | * memory efficient... */ |
571 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) | 571 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) |
572 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) | 572 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) |
573 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 573 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
574 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 574 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
575 | #ifdef CONFIG_BRIDGE_NETFILTER | 575 | #ifdef CONFIG_BRIDGE_NETFILTER |
576 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 576 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
577 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 577 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
578 | #endif | 578 | #endif |
579 | + nla_total_size(sizeof(u_int32_t)) /* mark */ | 579 | + nla_total_size(sizeof(u_int32_t)) /* mark */ |
580 | + nla_total_size(sizeof(u_int32_t)) /* uid */ | 580 | + nla_total_size(sizeof(u_int32_t)) /* uid */ |
581 | + nla_total_size(sizeof(u_int32_t)) /* gid */ | 581 | + nla_total_size(sizeof(u_int32_t)) /* gid */ |
582 | + nla_total_size(plen) /* prefix */ | 582 | + nla_total_size(plen) /* prefix */ |
583 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) | 583 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) |
584 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); | 584 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); |
585 | 585 | ||
586 | if (in && skb_mac_header_was_set(skb)) { | 586 | if (in && skb_mac_header_was_set(skb)) { |
587 | size += nla_total_size(skb->dev->hard_header_len) | 587 | size += nla_total_size(skb->dev->hard_header_len) |
588 | + nla_total_size(sizeof(u_int16_t)) /* hwtype */ | 588 | + nla_total_size(sizeof(u_int16_t)) /* hwtype */ |
589 | + nla_total_size(sizeof(u_int16_t)); /* hwlen */ | 589 | + nla_total_size(sizeof(u_int16_t)); /* hwlen */ |
590 | } | 590 | } |
591 | 591 | ||
592 | spin_lock_bh(&inst->lock); | 592 | spin_lock_bh(&inst->lock); |
593 | 593 | ||
594 | if (inst->flags & NFULNL_CFG_F_SEQ) | 594 | if (inst->flags & NFULNL_CFG_F_SEQ) |
595 | size += nla_total_size(sizeof(u_int32_t)); | 595 | size += nla_total_size(sizeof(u_int32_t)); |
596 | if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) | 596 | if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) |
597 | size += nla_total_size(sizeof(u_int32_t)); | 597 | size += nla_total_size(sizeof(u_int32_t)); |
598 | 598 | ||
599 | qthreshold = inst->qthreshold; | 599 | qthreshold = inst->qthreshold; |
600 | /* per-rule qthreshold overrides per-instance */ | 600 | /* per-rule qthreshold overrides per-instance */ |
601 | if (li->u.ulog.qthreshold) | 601 | if (li->u.ulog.qthreshold) |
602 | if (qthreshold > li->u.ulog.qthreshold) | 602 | if (qthreshold > li->u.ulog.qthreshold) |
603 | qthreshold = li->u.ulog.qthreshold; | 603 | qthreshold = li->u.ulog.qthreshold; |
604 | 604 | ||
605 | 605 | ||
606 | switch (inst->copy_mode) { | 606 | switch (inst->copy_mode) { |
607 | case NFULNL_COPY_META: | 607 | case NFULNL_COPY_META: |
608 | case NFULNL_COPY_NONE: | 608 | case NFULNL_COPY_NONE: |
609 | data_len = 0; | 609 | data_len = 0; |
610 | break; | 610 | break; |
611 | 611 | ||
612 | case NFULNL_COPY_PACKET: | 612 | case NFULNL_COPY_PACKET: |
613 | if (inst->copy_range == 0 | 613 | if (inst->copy_range == 0 |
614 | || inst->copy_range > skb->len) | 614 | || inst->copy_range > skb->len) |
615 | data_len = skb->len; | 615 | data_len = skb->len; |
616 | else | 616 | else |
617 | data_len = inst->copy_range; | 617 | data_len = inst->copy_range; |
618 | 618 | ||
619 | size += nla_total_size(data_len); | 619 | size += nla_total_size(data_len); |
620 | break; | 620 | break; |
621 | 621 | ||
622 | default: | 622 | default: |
623 | goto unlock_and_release; | 623 | goto unlock_and_release; |
624 | } | 624 | } |
625 | 625 | ||
626 | if (inst->skb && | 626 | if (inst->skb && |
627 | size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { | 627 | size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { |
628 | /* either the queue len is too high or we don't have | 628 | /* either the queue len is too high or we don't have |
629 | * enough room in the skb left. flush to userspace. */ | 629 | * enough room in the skb left. flush to userspace. */ |
630 | __nfulnl_flush(inst); | 630 | __nfulnl_flush(inst); |
631 | } | 631 | } |
632 | 632 | ||
633 | if (!inst->skb) { | 633 | if (!inst->skb) { |
634 | inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size); | 634 | inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size); |
635 | if (!inst->skb) | 635 | if (!inst->skb) |
636 | goto alloc_failure; | 636 | goto alloc_failure; |
637 | } | 637 | } |
638 | 638 | ||
639 | inst->qlen++; | 639 | inst->qlen++; |
640 | 640 | ||
641 | __build_packet_message(inst, skb, data_len, pf, | 641 | __build_packet_message(inst, skb, data_len, pf, |
642 | hooknum, in, out, li, prefix, plen); | 642 | hooknum, in, out, li, prefix, plen); |
643 | 643 | ||
644 | if (inst->qlen >= qthreshold) | 644 | if (inst->qlen >= qthreshold) |
645 | __nfulnl_flush(inst); | 645 | __nfulnl_flush(inst); |
646 | /* timer_pending always called within inst->lock, so there | 646 | /* timer_pending always called within inst->lock, so there |
647 | * is no chance of a race here */ | 647 | * is no chance of a race here */ |
648 | else if (!timer_pending(&inst->timer)) { | 648 | else if (!timer_pending(&inst->timer)) { |
649 | instance_get(inst); | 649 | instance_get(inst); |
650 | inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); | 650 | inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); |
651 | add_timer(&inst->timer); | 651 | add_timer(&inst->timer); |
652 | } | 652 | } |
653 | 653 | ||
654 | unlock_and_release: | 654 | unlock_and_release: |
655 | spin_unlock_bh(&inst->lock); | 655 | spin_unlock_bh(&inst->lock); |
656 | instance_put(inst); | 656 | instance_put(inst); |
657 | return; | 657 | return; |
658 | 658 | ||
659 | alloc_failure: | 659 | alloc_failure: |
660 | /* FIXME: statistics */ | 660 | /* FIXME: statistics */ |
661 | goto unlock_and_release; | 661 | goto unlock_and_release; |
662 | } | 662 | } |
663 | EXPORT_SYMBOL_GPL(nfulnl_log_packet); | 663 | EXPORT_SYMBOL_GPL(nfulnl_log_packet); |
664 | 664 | ||
665 | static int | 665 | static int |
666 | nfulnl_rcv_nl_event(struct notifier_block *this, | 666 | nfulnl_rcv_nl_event(struct notifier_block *this, |
667 | unsigned long event, void *ptr) | 667 | unsigned long event, void *ptr) |
668 | { | 668 | { |
669 | struct netlink_notify *n = ptr; | 669 | struct netlink_notify *n = ptr; |
670 | 670 | ||
671 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { | 671 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { |
672 | int i; | 672 | int i; |
673 | 673 | ||
674 | /* destroy all instances for this pid */ | 674 | /* destroy all instances for this pid */ |
675 | write_lock_bh(&instances_lock); | 675 | write_lock_bh(&instances_lock); |
676 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 676 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
677 | struct hlist_node *tmp, *t2; | 677 | struct hlist_node *tmp, *t2; |
678 | struct nfulnl_instance *inst; | 678 | struct nfulnl_instance *inst; |
679 | struct hlist_head *head = &instance_table[i]; | 679 | struct hlist_head *head = &instance_table[i]; |
680 | 680 | ||
681 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { | 681 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { |
682 | if ((net_eq(n->net, &init_net)) && | 682 | if ((net_eq(n->net, &init_net)) && |
683 | (n->pid == inst->peer_pid)) | 683 | (n->pid == inst->peer_pid)) |
684 | __instance_destroy(inst); | 684 | __instance_destroy(inst); |
685 | } | 685 | } |
686 | } | 686 | } |
687 | write_unlock_bh(&instances_lock); | 687 | write_unlock_bh(&instances_lock); |
688 | } | 688 | } |
689 | return NOTIFY_DONE; | 689 | return NOTIFY_DONE; |
690 | } | 690 | } |
691 | 691 | ||
692 | static struct notifier_block nfulnl_rtnl_notifier = { | 692 | static struct notifier_block nfulnl_rtnl_notifier = { |
693 | .notifier_call = nfulnl_rcv_nl_event, | 693 | .notifier_call = nfulnl_rcv_nl_event, |
694 | }; | 694 | }; |
695 | 695 | ||
696 | static int | 696 | static int |
697 | nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, | 697 | nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, |
698 | const struct nlmsghdr *nlh, | 698 | const struct nlmsghdr *nlh, |
699 | const struct nlattr * const nfqa[]) | 699 | const struct nlattr * const nfqa[]) |
700 | { | 700 | { |
701 | return -ENOTSUPP; | 701 | return -ENOTSUPP; |
702 | } | 702 | } |
703 | 703 | ||
704 | static struct nf_logger nfulnl_logger __read_mostly = { | 704 | static struct nf_logger nfulnl_logger __read_mostly = { |
705 | .name = "nfnetlink_log", | 705 | .name = "nfnetlink_log", |
706 | .logfn = &nfulnl_log_packet, | 706 | .logfn = &nfulnl_log_packet, |
707 | .me = THIS_MODULE, | 707 | .me = THIS_MODULE, |
708 | }; | 708 | }; |
709 | 709 | ||
710 | static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = { | 710 | static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = { |
711 | [NFULA_CFG_CMD] = { .len = sizeof(struct nfulnl_msg_config_cmd) }, | 711 | [NFULA_CFG_CMD] = { .len = sizeof(struct nfulnl_msg_config_cmd) }, |
712 | [NFULA_CFG_MODE] = { .len = sizeof(struct nfulnl_msg_config_mode) }, | 712 | [NFULA_CFG_MODE] = { .len = sizeof(struct nfulnl_msg_config_mode) }, |
713 | [NFULA_CFG_TIMEOUT] = { .type = NLA_U32 }, | 713 | [NFULA_CFG_TIMEOUT] = { .type = NLA_U32 }, |
714 | [NFULA_CFG_QTHRESH] = { .type = NLA_U32 }, | 714 | [NFULA_CFG_QTHRESH] = { .type = NLA_U32 }, |
715 | [NFULA_CFG_NLBUFSIZ] = { .type = NLA_U32 }, | 715 | [NFULA_CFG_NLBUFSIZ] = { .type = NLA_U32 }, |
716 | [NFULA_CFG_FLAGS] = { .type = NLA_U16 }, | 716 | [NFULA_CFG_FLAGS] = { .type = NLA_U16 }, |
717 | }; | 717 | }; |
718 | 718 | ||
719 | static int | 719 | static int |
720 | nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | 720 | nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, |
721 | const struct nlmsghdr *nlh, | 721 | const struct nlmsghdr *nlh, |
722 | const struct nlattr * const nfula[]) | 722 | const struct nlattr * const nfula[]) |
723 | { | 723 | { |
724 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 724 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
725 | u_int16_t group_num = ntohs(nfmsg->res_id); | 725 | u_int16_t group_num = ntohs(nfmsg->res_id); |
726 | struct nfulnl_instance *inst; | 726 | struct nfulnl_instance *inst; |
727 | struct nfulnl_msg_config_cmd *cmd = NULL; | 727 | struct nfulnl_msg_config_cmd *cmd = NULL; |
728 | int ret = 0; | 728 | int ret = 0; |
729 | 729 | ||
730 | if (nfula[NFULA_CFG_CMD]) { | 730 | if (nfula[NFULA_CFG_CMD]) { |
731 | u_int8_t pf = nfmsg->nfgen_family; | 731 | u_int8_t pf = nfmsg->nfgen_family; |
732 | cmd = nla_data(nfula[NFULA_CFG_CMD]); | 732 | cmd = nla_data(nfula[NFULA_CFG_CMD]); |
733 | 733 | ||
734 | /* Commands without queue context */ | 734 | /* Commands without queue context */ |
735 | switch (cmd->command) { | 735 | switch (cmd->command) { |
736 | case NFULNL_CFG_CMD_PF_BIND: | 736 | case NFULNL_CFG_CMD_PF_BIND: |
737 | return nf_log_bind_pf(pf, &nfulnl_logger); | 737 | return nf_log_bind_pf(pf, &nfulnl_logger); |
738 | case NFULNL_CFG_CMD_PF_UNBIND: | 738 | case NFULNL_CFG_CMD_PF_UNBIND: |
739 | nf_log_unbind_pf(pf); | 739 | nf_log_unbind_pf(pf); |
740 | return 0; | 740 | return 0; |
741 | } | 741 | } |
742 | } | 742 | } |
743 | 743 | ||
744 | inst = instance_lookup_get(group_num); | 744 | inst = instance_lookup_get(group_num); |
745 | if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { | 745 | if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { |
746 | ret = -EPERM; | 746 | ret = -EPERM; |
747 | goto out_put; | 747 | goto out_put; |
748 | } | 748 | } |
749 | 749 | ||
750 | if (cmd != NULL) { | 750 | if (cmd != NULL) { |
751 | switch (cmd->command) { | 751 | switch (cmd->command) { |
752 | case NFULNL_CFG_CMD_BIND: | 752 | case NFULNL_CFG_CMD_BIND: |
753 | if (inst) { | 753 | if (inst) { |
754 | ret = -EBUSY; | 754 | ret = -EBUSY; |
755 | goto out_put; | 755 | goto out_put; |
756 | } | 756 | } |
757 | 757 | ||
758 | inst = instance_create(group_num, | 758 | inst = instance_create(group_num, |
759 | NETLINK_CB(skb).pid); | 759 | NETLINK_CB(skb).pid); |
760 | if (IS_ERR(inst)) { | 760 | if (IS_ERR(inst)) { |
761 | ret = PTR_ERR(inst); | 761 | ret = PTR_ERR(inst); |
762 | goto out; | 762 | goto out; |
763 | } | 763 | } |
764 | break; | 764 | break; |
765 | case NFULNL_CFG_CMD_UNBIND: | 765 | case NFULNL_CFG_CMD_UNBIND: |
766 | if (!inst) { | 766 | if (!inst) { |
767 | ret = -ENODEV; | 767 | ret = -ENODEV; |
768 | goto out; | 768 | goto out; |
769 | } | 769 | } |
770 | 770 | ||
771 | instance_destroy(inst); | 771 | instance_destroy(inst); |
772 | goto out_put; | 772 | goto out_put; |
773 | default: | 773 | default: |
774 | ret = -ENOTSUPP; | 774 | ret = -ENOTSUPP; |
775 | break; | 775 | break; |
776 | } | 776 | } |
777 | } | 777 | } |
778 | 778 | ||
779 | if (nfula[NFULA_CFG_MODE]) { | 779 | if (nfula[NFULA_CFG_MODE]) { |
780 | struct nfulnl_msg_config_mode *params; | 780 | struct nfulnl_msg_config_mode *params; |
781 | params = nla_data(nfula[NFULA_CFG_MODE]); | 781 | params = nla_data(nfula[NFULA_CFG_MODE]); |
782 | 782 | ||
783 | if (!inst) { | 783 | if (!inst) { |
784 | ret = -ENODEV; | 784 | ret = -ENODEV; |
785 | goto out; | 785 | goto out; |
786 | } | 786 | } |
787 | nfulnl_set_mode(inst, params->copy_mode, | 787 | nfulnl_set_mode(inst, params->copy_mode, |
788 | ntohl(params->copy_range)); | 788 | ntohl(params->copy_range)); |
789 | } | 789 | } |
790 | 790 | ||
791 | if (nfula[NFULA_CFG_TIMEOUT]) { | 791 | if (nfula[NFULA_CFG_TIMEOUT]) { |
792 | __be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]); | 792 | __be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]); |
793 | 793 | ||
794 | if (!inst) { | 794 | if (!inst) { |
795 | ret = -ENODEV; | 795 | ret = -ENODEV; |
796 | goto out; | 796 | goto out; |
797 | } | 797 | } |
798 | nfulnl_set_timeout(inst, ntohl(timeout)); | 798 | nfulnl_set_timeout(inst, ntohl(timeout)); |
799 | } | 799 | } |
800 | 800 | ||
801 | if (nfula[NFULA_CFG_NLBUFSIZ]) { | 801 | if (nfula[NFULA_CFG_NLBUFSIZ]) { |
802 | __be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]); | 802 | __be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]); |
803 | 803 | ||
804 | if (!inst) { | 804 | if (!inst) { |
805 | ret = -ENODEV; | 805 | ret = -ENODEV; |
806 | goto out; | 806 | goto out; |
807 | } | 807 | } |
808 | nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz)); | 808 | nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz)); |
809 | } | 809 | } |
810 | 810 | ||
811 | if (nfula[NFULA_CFG_QTHRESH]) { | 811 | if (nfula[NFULA_CFG_QTHRESH]) { |
812 | __be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]); | 812 | __be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]); |
813 | 813 | ||
814 | if (!inst) { | 814 | if (!inst) { |
815 | ret = -ENODEV; | 815 | ret = -ENODEV; |
816 | goto out; | 816 | goto out; |
817 | } | 817 | } |
818 | nfulnl_set_qthresh(inst, ntohl(qthresh)); | 818 | nfulnl_set_qthresh(inst, ntohl(qthresh)); |
819 | } | 819 | } |
820 | 820 | ||
821 | if (nfula[NFULA_CFG_FLAGS]) { | 821 | if (nfula[NFULA_CFG_FLAGS]) { |
822 | __be16 flags = nla_get_be16(nfula[NFULA_CFG_FLAGS]); | 822 | __be16 flags = nla_get_be16(nfula[NFULA_CFG_FLAGS]); |
823 | 823 | ||
824 | if (!inst) { | 824 | if (!inst) { |
825 | ret = -ENODEV; | 825 | ret = -ENODEV; |
826 | goto out; | 826 | goto out; |
827 | } | 827 | } |
828 | nfulnl_set_flags(inst, ntohs(flags)); | 828 | nfulnl_set_flags(inst, ntohs(flags)); |
829 | } | 829 | } |
830 | 830 | ||
831 | out_put: | 831 | out_put: |
832 | instance_put(inst); | 832 | instance_put(inst); |
833 | out: | 833 | out: |
834 | return ret; | 834 | return ret; |
835 | } | 835 | } |
836 | 836 | ||
837 | static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = { | 837 | static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = { |
838 | [NFULNL_MSG_PACKET] = { .call = nfulnl_recv_unsupp, | 838 | [NFULNL_MSG_PACKET] = { .call = nfulnl_recv_unsupp, |
839 | .attr_count = NFULA_MAX, }, | 839 | .attr_count = NFULA_MAX, }, |
840 | [NFULNL_MSG_CONFIG] = { .call = nfulnl_recv_config, | 840 | [NFULNL_MSG_CONFIG] = { .call = nfulnl_recv_config, |
841 | .attr_count = NFULA_CFG_MAX, | 841 | .attr_count = NFULA_CFG_MAX, |
842 | .policy = nfula_cfg_policy }, | 842 | .policy = nfula_cfg_policy }, |
843 | }; | 843 | }; |
844 | 844 | ||
845 | static const struct nfnetlink_subsystem nfulnl_subsys = { | 845 | static const struct nfnetlink_subsystem nfulnl_subsys = { |
846 | .name = "log", | 846 | .name = "log", |
847 | .subsys_id = NFNL_SUBSYS_ULOG, | 847 | .subsys_id = NFNL_SUBSYS_ULOG, |
848 | .cb_count = NFULNL_MSG_MAX, | 848 | .cb_count = NFULNL_MSG_MAX, |
849 | .cb = nfulnl_cb, | 849 | .cb = nfulnl_cb, |
850 | }; | 850 | }; |
851 | 851 | ||
852 | #ifdef CONFIG_PROC_FS | 852 | #ifdef CONFIG_PROC_FS |
853 | struct iter_state { | 853 | struct iter_state { |
854 | unsigned int bucket; | 854 | unsigned int bucket; |
855 | }; | 855 | }; |
856 | 856 | ||
857 | static struct hlist_node *get_first(struct iter_state *st) | 857 | static struct hlist_node *get_first(struct iter_state *st) |
858 | { | 858 | { |
859 | if (!st) | 859 | if (!st) |
860 | return NULL; | 860 | return NULL; |
861 | 861 | ||
862 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { | 862 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { |
863 | if (!hlist_empty(&instance_table[st->bucket])) | 863 | if (!hlist_empty(&instance_table[st->bucket])) |
864 | return instance_table[st->bucket].first; | 864 | return instance_table[st->bucket].first; |
865 | } | 865 | } |
866 | return NULL; | 866 | return NULL; |
867 | } | 867 | } |
868 | 868 | ||
869 | static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) | 869 | static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) |
870 | { | 870 | { |
871 | h = h->next; | 871 | h = h->next; |
872 | while (!h) { | 872 | while (!h) { |
873 | if (++st->bucket >= INSTANCE_BUCKETS) | 873 | if (++st->bucket >= INSTANCE_BUCKETS) |
874 | return NULL; | 874 | return NULL; |
875 | 875 | ||
876 | h = instance_table[st->bucket].first; | 876 | h = instance_table[st->bucket].first; |
877 | } | 877 | } |
878 | return h; | 878 | return h; |
879 | } | 879 | } |
880 | 880 | ||
881 | static struct hlist_node *get_idx(struct iter_state *st, loff_t pos) | 881 | static struct hlist_node *get_idx(struct iter_state *st, loff_t pos) |
882 | { | 882 | { |
883 | struct hlist_node *head; | 883 | struct hlist_node *head; |
884 | head = get_first(st); | 884 | head = get_first(st); |
885 | 885 | ||
886 | if (head) | 886 | if (head) |
887 | while (pos && (head = get_next(st, head))) | 887 | while (pos && (head = get_next(st, head))) |
888 | pos--; | 888 | pos--; |
889 | return pos ? NULL : head; | 889 | return pos ? NULL : head; |
890 | } | 890 | } |
891 | 891 | ||
892 | static void *seq_start(struct seq_file *seq, loff_t *pos) | 892 | static void *seq_start(struct seq_file *seq, loff_t *pos) |
893 | __acquires(instances_lock) | 893 | __acquires(instances_lock) |
894 | { | 894 | { |
895 | read_lock_bh(&instances_lock); | 895 | read_lock_bh(&instances_lock); |
896 | return get_idx(seq->private, *pos); | 896 | return get_idx(seq->private, *pos); |
897 | } | 897 | } |
898 | 898 | ||
899 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) | 899 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) |
900 | { | 900 | { |
901 | (*pos)++; | 901 | (*pos)++; |
902 | return get_next(s->private, v); | 902 | return get_next(s->private, v); |
903 | } | 903 | } |
904 | 904 | ||
905 | static void seq_stop(struct seq_file *s, void *v) | 905 | static void seq_stop(struct seq_file *s, void *v) |
906 | __releases(instances_lock) | 906 | __releases(instances_lock) |
907 | { | 907 | { |
908 | read_unlock_bh(&instances_lock); | 908 | read_unlock_bh(&instances_lock); |
909 | } | 909 | } |
910 | 910 | ||
911 | static int seq_show(struct seq_file *s, void *v) | 911 | static int seq_show(struct seq_file *s, void *v) |
912 | { | 912 | { |
913 | const struct nfulnl_instance *inst = v; | 913 | const struct nfulnl_instance *inst = v; |
914 | 914 | ||
915 | return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n", | 915 | return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n", |
916 | inst->group_num, | 916 | inst->group_num, |
917 | inst->peer_pid, inst->qlen, | 917 | inst->peer_pid, inst->qlen, |
918 | inst->copy_mode, inst->copy_range, | 918 | inst->copy_mode, inst->copy_range, |
919 | inst->flushtimeout, atomic_read(&inst->use)); | 919 | inst->flushtimeout, atomic_read(&inst->use)); |
920 | } | 920 | } |
921 | 921 | ||
922 | static const struct seq_operations nful_seq_ops = { | 922 | static const struct seq_operations nful_seq_ops = { |
923 | .start = seq_start, | 923 | .start = seq_start, |
924 | .next = seq_next, | 924 | .next = seq_next, |
925 | .stop = seq_stop, | 925 | .stop = seq_stop, |
926 | .show = seq_show, | 926 | .show = seq_show, |
927 | }; | 927 | }; |
928 | 928 | ||
929 | static int nful_open(struct inode *inode, struct file *file) | 929 | static int nful_open(struct inode *inode, struct file *file) |
930 | { | 930 | { |
931 | return seq_open_private(file, &nful_seq_ops, | 931 | return seq_open_private(file, &nful_seq_ops, |
932 | sizeof(struct iter_state)); | 932 | sizeof(struct iter_state)); |
933 | } | 933 | } |
934 | 934 | ||
935 | static const struct file_operations nful_file_ops = { | 935 | static const struct file_operations nful_file_ops = { |
936 | .owner = THIS_MODULE, | 936 | .owner = THIS_MODULE, |
937 | .open = nful_open, | 937 | .open = nful_open, |
938 | .read = seq_read, | 938 | .read = seq_read, |
939 | .llseek = seq_lseek, | 939 | .llseek = seq_lseek, |
940 | .release = seq_release_private, | 940 | .release = seq_release_private, |
941 | }; | 941 | }; |
942 | 942 | ||
943 | #endif /* PROC_FS */ | 943 | #endif /* PROC_FS */ |
944 | 944 | ||
945 | static int __init nfnetlink_log_init(void) | 945 | static int __init nfnetlink_log_init(void) |
946 | { | 946 | { |
947 | int i, status = -ENOMEM; | 947 | int i, status = -ENOMEM; |
948 | 948 | ||
949 | for (i = 0; i < INSTANCE_BUCKETS; i++) | 949 | for (i = 0; i < INSTANCE_BUCKETS; i++) |
950 | INIT_HLIST_HEAD(&instance_table[i]); | 950 | INIT_HLIST_HEAD(&instance_table[i]); |
951 | 951 | ||
952 | /* it's not really all that important to have a random value, so | 952 | /* it's not really all that important to have a random value, so |
953 | * we can do this from the init function, even if there hasn't | 953 | * we can do this from the init function, even if there hasn't |
954 | * been that much entropy yet */ | 954 | * been that much entropy yet */ |
955 | get_random_bytes(&hash_init, sizeof(hash_init)); | 955 | get_random_bytes(&hash_init, sizeof(hash_init)); |
956 | 956 | ||
957 | netlink_register_notifier(&nfulnl_rtnl_notifier); | 957 | netlink_register_notifier(&nfulnl_rtnl_notifier); |
958 | status = nfnetlink_subsys_register(&nfulnl_subsys); | 958 | status = nfnetlink_subsys_register(&nfulnl_subsys); |
959 | if (status < 0) { | 959 | if (status < 0) { |
960 | printk(KERN_ERR "log: failed to create netlink socket\n"); | 960 | printk(KERN_ERR "log: failed to create netlink socket\n"); |
961 | goto cleanup_netlink_notifier; | 961 | goto cleanup_netlink_notifier; |
962 | } | 962 | } |
963 | 963 | ||
964 | status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger); | 964 | status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger); |
965 | if (status < 0) { | 965 | if (status < 0) { |
966 | printk(KERN_ERR "log: failed to register logger\n"); | 966 | printk(KERN_ERR "log: failed to register logger\n"); |
967 | goto cleanup_subsys; | 967 | goto cleanup_subsys; |
968 | } | 968 | } |
969 | 969 | ||
970 | #ifdef CONFIG_PROC_FS | 970 | #ifdef CONFIG_PROC_FS |
971 | if (!proc_create("nfnetlink_log", 0440, | 971 | if (!proc_create("nfnetlink_log", 0440, |
972 | proc_net_netfilter, &nful_file_ops)) | 972 | proc_net_netfilter, &nful_file_ops)) |
973 | goto cleanup_logger; | 973 | goto cleanup_logger; |
974 | #endif | 974 | #endif |
975 | return status; | 975 | return status; |
976 | 976 | ||
977 | #ifdef CONFIG_PROC_FS | 977 | #ifdef CONFIG_PROC_FS |
978 | cleanup_logger: | 978 | cleanup_logger: |
979 | nf_log_unregister(&nfulnl_logger); | 979 | nf_log_unregister(&nfulnl_logger); |
980 | #endif | 980 | #endif |
981 | cleanup_subsys: | 981 | cleanup_subsys: |
982 | nfnetlink_subsys_unregister(&nfulnl_subsys); | 982 | nfnetlink_subsys_unregister(&nfulnl_subsys); |
983 | cleanup_netlink_notifier: | 983 | cleanup_netlink_notifier: |
984 | netlink_unregister_notifier(&nfulnl_rtnl_notifier); | 984 | netlink_unregister_notifier(&nfulnl_rtnl_notifier); |
985 | return status; | 985 | return status; |
986 | } | 986 | } |
987 | 987 | ||
988 | static void __exit nfnetlink_log_fini(void) | 988 | static void __exit nfnetlink_log_fini(void) |
989 | { | 989 | { |
990 | nf_log_unregister(&nfulnl_logger); | 990 | nf_log_unregister(&nfulnl_logger); |
991 | #ifdef CONFIG_PROC_FS | 991 | #ifdef CONFIG_PROC_FS |
992 | remove_proc_entry("nfnetlink_log", proc_net_netfilter); | 992 | remove_proc_entry("nfnetlink_log", proc_net_netfilter); |
993 | #endif | 993 | #endif |
994 | nfnetlink_subsys_unregister(&nfulnl_subsys); | 994 | nfnetlink_subsys_unregister(&nfulnl_subsys); |
995 | netlink_unregister_notifier(&nfulnl_rtnl_notifier); | 995 | netlink_unregister_notifier(&nfulnl_rtnl_notifier); |
996 | } | 996 | } |
997 | 997 | ||
998 | MODULE_DESCRIPTION("netfilter userspace logging"); | 998 | MODULE_DESCRIPTION("netfilter userspace logging"); |
999 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 999 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
1000 | MODULE_LICENSE("GPL"); | 1000 | MODULE_LICENSE("GPL"); |
1001 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG); | 1001 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG); |
1002 | 1002 | ||
1003 | module_init(nfnetlink_log_init); | 1003 | module_init(nfnetlink_log_init); |
1004 | module_exit(nfnetlink_log_fini); | 1004 | module_exit(nfnetlink_log_fini); |
1005 | 1005 |