Commit 914afea84e3e20cdbcd040f8387a0e6ef20ffc97
Committed by
David S. Miller
1 parent
b7047a1c88
Exists in
master
and in
7 other branches
[NETFILTER]: nfnetlink_queue: fix EPERM when binding/unbinding and instance 0 exists
Similar to the nfnetlink_log problem, nfnetlink_queue incorrectly returns -EPERM when binding or unbinding to an address family and queueing instance 0 exists and is owned by a different process. Unlike nfnetlink_log it previously completes the operation, but it is still incorrect. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 4 additions and 11 deletions Inline Diff
net/netfilter/nfnetlink_queue.c
1 | /* | 1 | /* |
2 | * This is a module which is used for queueing packets and communicating with | 2 | * This is a module which is used for queueing packets and communicating with |
3 | * userspace via nfetlink. | 3 | * userspace via nfetlink. |
4 | * | 4 | * |
5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> | 5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> |
6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> | 6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> |
7 | * | 7 | * |
8 | * Based on the old ipv4-only ip_queue.c: | 8 | * Based on the old ipv4-only ip_queue.c: |
9 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> | 9 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> |
10 | * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> | 10 | * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/notifier.h> | 21 | #include <linux/notifier.h> |
22 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
23 | #include <linux/netfilter.h> | 23 | #include <linux/netfilter.h> |
24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
25 | #include <linux/netfilter_ipv4.h> | 25 | #include <linux/netfilter_ipv4.h> |
26 | #include <linux/netfilter_ipv6.h> | 26 | #include <linux/netfilter_ipv6.h> |
27 | #include <linux/netfilter/nfnetlink.h> | 27 | #include <linux/netfilter/nfnetlink.h> |
28 | #include <linux/netfilter/nfnetlink_queue.h> | 28 | #include <linux/netfilter/nfnetlink_queue.h> |
29 | #include <linux/list.h> | 29 | #include <linux/list.h> |
30 | #include <net/sock.h> | 30 | #include <net/sock.h> |
31 | #include <net/netfilter/nf_queue.h> | 31 | #include <net/netfilter/nf_queue.h> |
32 | 32 | ||
33 | #include <asm/atomic.h> | 33 | #include <asm/atomic.h> |
34 | 34 | ||
35 | #ifdef CONFIG_BRIDGE_NETFILTER | 35 | #ifdef CONFIG_BRIDGE_NETFILTER |
36 | #include "../bridge/br_private.h" | 36 | #include "../bridge/br_private.h" |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | #define NFQNL_QMAX_DEFAULT 1024 | 39 | #define NFQNL_QMAX_DEFAULT 1024 |
40 | 40 | ||
41 | struct nfqnl_instance { | 41 | struct nfqnl_instance { |
42 | struct hlist_node hlist; /* global list of queues */ | 42 | struct hlist_node hlist; /* global list of queues */ |
43 | struct rcu_head rcu; | 43 | struct rcu_head rcu; |
44 | 44 | ||
45 | int peer_pid; | 45 | int peer_pid; |
46 | unsigned int queue_maxlen; | 46 | unsigned int queue_maxlen; |
47 | unsigned int copy_range; | 47 | unsigned int copy_range; |
48 | unsigned int queue_total; | 48 | unsigned int queue_total; |
49 | unsigned int queue_dropped; | 49 | unsigned int queue_dropped; |
50 | unsigned int queue_user_dropped; | 50 | unsigned int queue_user_dropped; |
51 | 51 | ||
52 | unsigned int id_sequence; /* 'sequence' of pkt ids */ | 52 | unsigned int id_sequence; /* 'sequence' of pkt ids */ |
53 | 53 | ||
54 | u_int16_t queue_num; /* number of this queue */ | 54 | u_int16_t queue_num; /* number of this queue */ |
55 | u_int8_t copy_mode; | 55 | u_int8_t copy_mode; |
56 | 56 | ||
57 | spinlock_t lock; | 57 | spinlock_t lock; |
58 | 58 | ||
59 | struct list_head queue_list; /* packets in queue */ | 59 | struct list_head queue_list; /* packets in queue */ |
60 | }; | 60 | }; |
61 | 61 | ||
62 | typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); | 62 | typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); |
63 | 63 | ||
64 | static DEFINE_SPINLOCK(instances_lock); | 64 | static DEFINE_SPINLOCK(instances_lock); |
65 | 65 | ||
66 | #define INSTANCE_BUCKETS 16 | 66 | #define INSTANCE_BUCKETS 16 |
67 | static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; | 67 | static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; |
68 | 68 | ||
69 | static inline u_int8_t instance_hashfn(u_int16_t queue_num) | 69 | static inline u_int8_t instance_hashfn(u_int16_t queue_num) |
70 | { | 70 | { |
71 | return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; | 71 | return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; |
72 | } | 72 | } |
73 | 73 | ||
74 | static struct nfqnl_instance * | 74 | static struct nfqnl_instance * |
75 | instance_lookup(u_int16_t queue_num) | 75 | instance_lookup(u_int16_t queue_num) |
76 | { | 76 | { |
77 | struct hlist_head *head; | 77 | struct hlist_head *head; |
78 | struct hlist_node *pos; | 78 | struct hlist_node *pos; |
79 | struct nfqnl_instance *inst; | 79 | struct nfqnl_instance *inst; |
80 | 80 | ||
81 | head = &instance_table[instance_hashfn(queue_num)]; | 81 | head = &instance_table[instance_hashfn(queue_num)]; |
82 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { | 82 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { |
83 | if (inst->queue_num == queue_num) | 83 | if (inst->queue_num == queue_num) |
84 | return inst; | 84 | return inst; |
85 | } | 85 | } |
86 | return NULL; | 86 | return NULL; |
87 | } | 87 | } |
88 | 88 | ||
89 | static struct nfqnl_instance * | 89 | static struct nfqnl_instance * |
90 | instance_create(u_int16_t queue_num, int pid) | 90 | instance_create(u_int16_t queue_num, int pid) |
91 | { | 91 | { |
92 | struct nfqnl_instance *inst; | 92 | struct nfqnl_instance *inst; |
93 | unsigned int h; | 93 | unsigned int h; |
94 | int err; | 94 | int err; |
95 | 95 | ||
96 | spin_lock(&instances_lock); | 96 | spin_lock(&instances_lock); |
97 | if (instance_lookup(queue_num)) { | 97 | if (instance_lookup(queue_num)) { |
98 | err = -EEXIST; | 98 | err = -EEXIST; |
99 | goto out_unlock; | 99 | goto out_unlock; |
100 | } | 100 | } |
101 | 101 | ||
102 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); | 102 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); |
103 | if (!inst) { | 103 | if (!inst) { |
104 | err = -ENOMEM; | 104 | err = -ENOMEM; |
105 | goto out_unlock; | 105 | goto out_unlock; |
106 | } | 106 | } |
107 | 107 | ||
108 | inst->queue_num = queue_num; | 108 | inst->queue_num = queue_num; |
109 | inst->peer_pid = pid; | 109 | inst->peer_pid = pid; |
110 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; | 110 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; |
111 | inst->copy_range = 0xfffff; | 111 | inst->copy_range = 0xfffff; |
112 | inst->copy_mode = NFQNL_COPY_NONE; | 112 | inst->copy_mode = NFQNL_COPY_NONE; |
113 | spin_lock_init(&inst->lock); | 113 | spin_lock_init(&inst->lock); |
114 | INIT_LIST_HEAD(&inst->queue_list); | 114 | INIT_LIST_HEAD(&inst->queue_list); |
115 | INIT_RCU_HEAD(&inst->rcu); | 115 | INIT_RCU_HEAD(&inst->rcu); |
116 | 116 | ||
117 | if (!try_module_get(THIS_MODULE)) { | 117 | if (!try_module_get(THIS_MODULE)) { |
118 | err = -EAGAIN; | 118 | err = -EAGAIN; |
119 | goto out_free; | 119 | goto out_free; |
120 | } | 120 | } |
121 | 121 | ||
122 | h = instance_hashfn(queue_num); | 122 | h = instance_hashfn(queue_num); |
123 | hlist_add_head_rcu(&inst->hlist, &instance_table[h]); | 123 | hlist_add_head_rcu(&inst->hlist, &instance_table[h]); |
124 | 124 | ||
125 | spin_unlock(&instances_lock); | 125 | spin_unlock(&instances_lock); |
126 | 126 | ||
127 | return inst; | 127 | return inst; |
128 | 128 | ||
129 | out_free: | 129 | out_free: |
130 | kfree(inst); | 130 | kfree(inst); |
131 | out_unlock: | 131 | out_unlock: |
132 | spin_unlock(&instances_lock); | 132 | spin_unlock(&instances_lock); |
133 | return ERR_PTR(err); | 133 | return ERR_PTR(err); |
134 | } | 134 | } |
135 | 135 | ||
136 | static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, | 136 | static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, |
137 | unsigned long data); | 137 | unsigned long data); |
138 | 138 | ||
139 | static void | 139 | static void |
140 | instance_destroy_rcu(struct rcu_head *head) | 140 | instance_destroy_rcu(struct rcu_head *head) |
141 | { | 141 | { |
142 | struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, | 142 | struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, |
143 | rcu); | 143 | rcu); |
144 | 144 | ||
145 | nfqnl_flush(inst, NULL, 0); | 145 | nfqnl_flush(inst, NULL, 0); |
146 | kfree(inst); | 146 | kfree(inst); |
147 | module_put(THIS_MODULE); | 147 | module_put(THIS_MODULE); |
148 | } | 148 | } |
149 | 149 | ||
150 | static void | 150 | static void |
151 | __instance_destroy(struct nfqnl_instance *inst) | 151 | __instance_destroy(struct nfqnl_instance *inst) |
152 | { | 152 | { |
153 | hlist_del_rcu(&inst->hlist); | 153 | hlist_del_rcu(&inst->hlist); |
154 | call_rcu(&inst->rcu, instance_destroy_rcu); | 154 | call_rcu(&inst->rcu, instance_destroy_rcu); |
155 | } | 155 | } |
156 | 156 | ||
157 | static void | 157 | static void |
158 | instance_destroy(struct nfqnl_instance *inst) | 158 | instance_destroy(struct nfqnl_instance *inst) |
159 | { | 159 | { |
160 | spin_lock(&instances_lock); | 160 | spin_lock(&instances_lock); |
161 | __instance_destroy(inst); | 161 | __instance_destroy(inst); |
162 | spin_unlock(&instances_lock); | 162 | spin_unlock(&instances_lock); |
163 | } | 163 | } |
164 | 164 | ||
165 | static inline void | 165 | static inline void |
166 | __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) | 166 | __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) |
167 | { | 167 | { |
168 | list_add_tail(&entry->list, &queue->queue_list); | 168 | list_add_tail(&entry->list, &queue->queue_list); |
169 | queue->queue_total++; | 169 | queue->queue_total++; |
170 | } | 170 | } |
171 | 171 | ||
172 | static struct nf_queue_entry * | 172 | static struct nf_queue_entry * |
173 | find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) | 173 | find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) |
174 | { | 174 | { |
175 | struct nf_queue_entry *entry = NULL, *i; | 175 | struct nf_queue_entry *entry = NULL, *i; |
176 | 176 | ||
177 | spin_lock_bh(&queue->lock); | 177 | spin_lock_bh(&queue->lock); |
178 | 178 | ||
179 | list_for_each_entry(i, &queue->queue_list, list) { | 179 | list_for_each_entry(i, &queue->queue_list, list) { |
180 | if (i->id == id) { | 180 | if (i->id == id) { |
181 | entry = i; | 181 | entry = i; |
182 | break; | 182 | break; |
183 | } | 183 | } |
184 | } | 184 | } |
185 | 185 | ||
186 | if (entry) { | 186 | if (entry) { |
187 | list_del(&entry->list); | 187 | list_del(&entry->list); |
188 | queue->queue_total--; | 188 | queue->queue_total--; |
189 | } | 189 | } |
190 | 190 | ||
191 | spin_unlock_bh(&queue->lock); | 191 | spin_unlock_bh(&queue->lock); |
192 | 192 | ||
193 | return entry; | 193 | return entry; |
194 | } | 194 | } |
195 | 195 | ||
196 | static void | 196 | static void |
197 | nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) | 197 | nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) |
198 | { | 198 | { |
199 | struct nf_queue_entry *entry, *next; | 199 | struct nf_queue_entry *entry, *next; |
200 | 200 | ||
201 | spin_lock_bh(&queue->lock); | 201 | spin_lock_bh(&queue->lock); |
202 | list_for_each_entry_safe(entry, next, &queue->queue_list, list) { | 202 | list_for_each_entry_safe(entry, next, &queue->queue_list, list) { |
203 | if (!cmpfn || cmpfn(entry, data)) { | 203 | if (!cmpfn || cmpfn(entry, data)) { |
204 | list_del(&entry->list); | 204 | list_del(&entry->list); |
205 | queue->queue_total--; | 205 | queue->queue_total--; |
206 | nf_reinject(entry, NF_DROP); | 206 | nf_reinject(entry, NF_DROP); |
207 | } | 207 | } |
208 | } | 208 | } |
209 | spin_unlock_bh(&queue->lock); | 209 | spin_unlock_bh(&queue->lock); |
210 | } | 210 | } |
211 | 211 | ||
212 | static struct sk_buff * | 212 | static struct sk_buff * |
213 | nfqnl_build_packet_message(struct nfqnl_instance *queue, | 213 | nfqnl_build_packet_message(struct nfqnl_instance *queue, |
214 | struct nf_queue_entry *entry) | 214 | struct nf_queue_entry *entry) |
215 | { | 215 | { |
216 | sk_buff_data_t old_tail; | 216 | sk_buff_data_t old_tail; |
217 | size_t size; | 217 | size_t size; |
218 | size_t data_len = 0; | 218 | size_t data_len = 0; |
219 | struct sk_buff *skb; | 219 | struct sk_buff *skb; |
220 | struct nfqnl_msg_packet_hdr pmsg; | 220 | struct nfqnl_msg_packet_hdr pmsg; |
221 | struct nlmsghdr *nlh; | 221 | struct nlmsghdr *nlh; |
222 | struct nfgenmsg *nfmsg; | 222 | struct nfgenmsg *nfmsg; |
223 | struct sk_buff *entskb = entry->skb; | 223 | struct sk_buff *entskb = entry->skb; |
224 | struct net_device *indev; | 224 | struct net_device *indev; |
225 | struct net_device *outdev; | 225 | struct net_device *outdev; |
226 | 226 | ||
227 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) | 227 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) |
228 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) | 228 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) |
229 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 229 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
230 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 230 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
231 | #ifdef CONFIG_BRIDGE_NETFILTER | 231 | #ifdef CONFIG_BRIDGE_NETFILTER |
232 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 232 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
233 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 233 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
234 | #endif | 234 | #endif |
235 | + nla_total_size(sizeof(u_int32_t)) /* mark */ | 235 | + nla_total_size(sizeof(u_int32_t)) /* mark */ |
236 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) | 236 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) |
237 | + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); | 237 | + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); |
238 | 238 | ||
239 | outdev = entry->outdev; | 239 | outdev = entry->outdev; |
240 | 240 | ||
241 | spin_lock_bh(&queue->lock); | 241 | spin_lock_bh(&queue->lock); |
242 | 242 | ||
243 | switch ((enum nfqnl_config_mode)queue->copy_mode) { | 243 | switch ((enum nfqnl_config_mode)queue->copy_mode) { |
244 | case NFQNL_COPY_META: | 244 | case NFQNL_COPY_META: |
245 | case NFQNL_COPY_NONE: | 245 | case NFQNL_COPY_NONE: |
246 | data_len = 0; | 246 | data_len = 0; |
247 | break; | 247 | break; |
248 | 248 | ||
249 | case NFQNL_COPY_PACKET: | 249 | case NFQNL_COPY_PACKET: |
250 | if ((entskb->ip_summed == CHECKSUM_PARTIAL || | 250 | if ((entskb->ip_summed == CHECKSUM_PARTIAL || |
251 | entskb->ip_summed == CHECKSUM_COMPLETE) && | 251 | entskb->ip_summed == CHECKSUM_COMPLETE) && |
252 | skb_checksum_help(entskb)) { | 252 | skb_checksum_help(entskb)) { |
253 | spin_unlock_bh(&queue->lock); | 253 | spin_unlock_bh(&queue->lock); |
254 | return NULL; | 254 | return NULL; |
255 | } | 255 | } |
256 | if (queue->copy_range == 0 | 256 | if (queue->copy_range == 0 |
257 | || queue->copy_range > entskb->len) | 257 | || queue->copy_range > entskb->len) |
258 | data_len = entskb->len; | 258 | data_len = entskb->len; |
259 | else | 259 | else |
260 | data_len = queue->copy_range; | 260 | data_len = queue->copy_range; |
261 | 261 | ||
262 | size += nla_total_size(data_len); | 262 | size += nla_total_size(data_len); |
263 | break; | 263 | break; |
264 | } | 264 | } |
265 | 265 | ||
266 | entry->id = queue->id_sequence++; | 266 | entry->id = queue->id_sequence++; |
267 | 267 | ||
268 | spin_unlock_bh(&queue->lock); | 268 | spin_unlock_bh(&queue->lock); |
269 | 269 | ||
270 | skb = alloc_skb(size, GFP_ATOMIC); | 270 | skb = alloc_skb(size, GFP_ATOMIC); |
271 | if (!skb) | 271 | if (!skb) |
272 | goto nlmsg_failure; | 272 | goto nlmsg_failure; |
273 | 273 | ||
274 | old_tail = skb->tail; | 274 | old_tail = skb->tail; |
275 | nlh = NLMSG_PUT(skb, 0, 0, | 275 | nlh = NLMSG_PUT(skb, 0, 0, |
276 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, | 276 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, |
277 | sizeof(struct nfgenmsg)); | 277 | sizeof(struct nfgenmsg)); |
278 | nfmsg = NLMSG_DATA(nlh); | 278 | nfmsg = NLMSG_DATA(nlh); |
279 | nfmsg->nfgen_family = entry->pf; | 279 | nfmsg->nfgen_family = entry->pf; |
280 | nfmsg->version = NFNETLINK_V0; | 280 | nfmsg->version = NFNETLINK_V0; |
281 | nfmsg->res_id = htons(queue->queue_num); | 281 | nfmsg->res_id = htons(queue->queue_num); |
282 | 282 | ||
283 | pmsg.packet_id = htonl(entry->id); | 283 | pmsg.packet_id = htonl(entry->id); |
284 | pmsg.hw_protocol = entskb->protocol; | 284 | pmsg.hw_protocol = entskb->protocol; |
285 | pmsg.hook = entry->hook; | 285 | pmsg.hook = entry->hook; |
286 | 286 | ||
287 | NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); | 287 | NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); |
288 | 288 | ||
289 | indev = entry->indev; | 289 | indev = entry->indev; |
290 | if (indev) { | 290 | if (indev) { |
291 | #ifndef CONFIG_BRIDGE_NETFILTER | 291 | #ifndef CONFIG_BRIDGE_NETFILTER |
292 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); | 292 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); |
293 | #else | 293 | #else |
294 | if (entry->pf == PF_BRIDGE) { | 294 | if (entry->pf == PF_BRIDGE) { |
295 | /* Case 1: indev is physical input device, we need to | 295 | /* Case 1: indev is physical input device, we need to |
296 | * look for bridge group (when called from | 296 | * look for bridge group (when called from |
297 | * netfilter_bridge) */ | 297 | * netfilter_bridge) */ |
298 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, | 298 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, |
299 | htonl(indev->ifindex)); | 299 | htonl(indev->ifindex)); |
300 | /* this is the bridge group "brX" */ | 300 | /* this is the bridge group "brX" */ |
301 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, | 301 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, |
302 | htonl(indev->br_port->br->dev->ifindex)); | 302 | htonl(indev->br_port->br->dev->ifindex)); |
303 | } else { | 303 | } else { |
304 | /* Case 2: indev is bridge group, we need to look for | 304 | /* Case 2: indev is bridge group, we need to look for |
305 | * physical device (when called from ipv4) */ | 305 | * physical device (when called from ipv4) */ |
306 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, | 306 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, |
307 | htonl(indev->ifindex)); | 307 | htonl(indev->ifindex)); |
308 | if (entskb->nf_bridge && entskb->nf_bridge->physindev) | 308 | if (entskb->nf_bridge && entskb->nf_bridge->physindev) |
309 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, | 309 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, |
310 | htonl(entskb->nf_bridge->physindev->ifindex)); | 310 | htonl(entskb->nf_bridge->physindev->ifindex)); |
311 | } | 311 | } |
312 | #endif | 312 | #endif |
313 | } | 313 | } |
314 | 314 | ||
315 | if (outdev) { | 315 | if (outdev) { |
316 | #ifndef CONFIG_BRIDGE_NETFILTER | 316 | #ifndef CONFIG_BRIDGE_NETFILTER |
317 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); | 317 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); |
318 | #else | 318 | #else |
319 | if (entry->pf == PF_BRIDGE) { | 319 | if (entry->pf == PF_BRIDGE) { |
320 | /* Case 1: outdev is physical output device, we need to | 320 | /* Case 1: outdev is physical output device, we need to |
321 | * look for bridge group (when called from | 321 | * look for bridge group (when called from |
322 | * netfilter_bridge) */ | 322 | * netfilter_bridge) */ |
323 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, | 323 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, |
324 | htonl(outdev->ifindex)); | 324 | htonl(outdev->ifindex)); |
325 | /* this is the bridge group "brX" */ | 325 | /* this is the bridge group "brX" */ |
326 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, | 326 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, |
327 | htonl(outdev->br_port->br->dev->ifindex)); | 327 | htonl(outdev->br_port->br->dev->ifindex)); |
328 | } else { | 328 | } else { |
329 | /* Case 2: outdev is bridge group, we need to look for | 329 | /* Case 2: outdev is bridge group, we need to look for |
330 | * physical output device (when called from ipv4) */ | 330 | * physical output device (when called from ipv4) */ |
331 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, | 331 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, |
332 | htonl(outdev->ifindex)); | 332 | htonl(outdev->ifindex)); |
333 | if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) | 333 | if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) |
334 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, | 334 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, |
335 | htonl(entskb->nf_bridge->physoutdev->ifindex)); | 335 | htonl(entskb->nf_bridge->physoutdev->ifindex)); |
336 | } | 336 | } |
337 | #endif | 337 | #endif |
338 | } | 338 | } |
339 | 339 | ||
340 | if (entskb->mark) | 340 | if (entskb->mark) |
341 | NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); | 341 | NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); |
342 | 342 | ||
343 | if (indev && entskb->dev) { | 343 | if (indev && entskb->dev) { |
344 | struct nfqnl_msg_packet_hw phw; | 344 | struct nfqnl_msg_packet_hw phw; |
345 | int len = dev_parse_header(entskb, phw.hw_addr); | 345 | int len = dev_parse_header(entskb, phw.hw_addr); |
346 | if (len) { | 346 | if (len) { |
347 | phw.hw_addrlen = htons(len); | 347 | phw.hw_addrlen = htons(len); |
348 | NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); | 348 | NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); |
349 | } | 349 | } |
350 | } | 350 | } |
351 | 351 | ||
352 | if (entskb->tstamp.tv64) { | 352 | if (entskb->tstamp.tv64) { |
353 | struct nfqnl_msg_packet_timestamp ts; | 353 | struct nfqnl_msg_packet_timestamp ts; |
354 | struct timeval tv = ktime_to_timeval(entskb->tstamp); | 354 | struct timeval tv = ktime_to_timeval(entskb->tstamp); |
355 | ts.sec = cpu_to_be64(tv.tv_sec); | 355 | ts.sec = cpu_to_be64(tv.tv_sec); |
356 | ts.usec = cpu_to_be64(tv.tv_usec); | 356 | ts.usec = cpu_to_be64(tv.tv_usec); |
357 | 357 | ||
358 | NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); | 358 | NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); |
359 | } | 359 | } |
360 | 360 | ||
361 | if (data_len) { | 361 | if (data_len) { |
362 | struct nlattr *nla; | 362 | struct nlattr *nla; |
363 | int sz = nla_attr_size(data_len); | 363 | int sz = nla_attr_size(data_len); |
364 | 364 | ||
365 | if (skb_tailroom(skb) < nla_total_size(data_len)) { | 365 | if (skb_tailroom(skb) < nla_total_size(data_len)) { |
366 | printk(KERN_WARNING "nf_queue: no tailroom!\n"); | 366 | printk(KERN_WARNING "nf_queue: no tailroom!\n"); |
367 | goto nlmsg_failure; | 367 | goto nlmsg_failure; |
368 | } | 368 | } |
369 | 369 | ||
370 | nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); | 370 | nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); |
371 | nla->nla_type = NFQA_PAYLOAD; | 371 | nla->nla_type = NFQA_PAYLOAD; |
372 | nla->nla_len = sz; | 372 | nla->nla_len = sz; |
373 | 373 | ||
374 | if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) | 374 | if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) |
375 | BUG(); | 375 | BUG(); |
376 | } | 376 | } |
377 | 377 | ||
378 | nlh->nlmsg_len = skb->tail - old_tail; | 378 | nlh->nlmsg_len = skb->tail - old_tail; |
379 | return skb; | 379 | return skb; |
380 | 380 | ||
381 | nlmsg_failure: | 381 | nlmsg_failure: |
382 | nla_put_failure: | 382 | nla_put_failure: |
383 | if (skb) | 383 | if (skb) |
384 | kfree_skb(skb); | 384 | kfree_skb(skb); |
385 | if (net_ratelimit()) | 385 | if (net_ratelimit()) |
386 | printk(KERN_ERR "nf_queue: error creating packet message\n"); | 386 | printk(KERN_ERR "nf_queue: error creating packet message\n"); |
387 | return NULL; | 387 | return NULL; |
388 | } | 388 | } |
389 | 389 | ||
390 | static int | 390 | static int |
391 | nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | 391 | nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) |
392 | { | 392 | { |
393 | struct sk_buff *nskb; | 393 | struct sk_buff *nskb; |
394 | struct nfqnl_instance *queue; | 394 | struct nfqnl_instance *queue; |
395 | int err; | 395 | int err; |
396 | 396 | ||
397 | /* rcu_read_lock()ed by nf_hook_slow() */ | 397 | /* rcu_read_lock()ed by nf_hook_slow() */ |
398 | queue = instance_lookup(queuenum); | 398 | queue = instance_lookup(queuenum); |
399 | if (!queue) | 399 | if (!queue) |
400 | goto err_out; | 400 | goto err_out; |
401 | 401 | ||
402 | if (queue->copy_mode == NFQNL_COPY_NONE) | 402 | if (queue->copy_mode == NFQNL_COPY_NONE) |
403 | goto err_out; | 403 | goto err_out; |
404 | 404 | ||
405 | nskb = nfqnl_build_packet_message(queue, entry); | 405 | nskb = nfqnl_build_packet_message(queue, entry); |
406 | if (nskb == NULL) | 406 | if (nskb == NULL) |
407 | goto err_out; | 407 | goto err_out; |
408 | 408 | ||
409 | spin_lock_bh(&queue->lock); | 409 | spin_lock_bh(&queue->lock); |
410 | 410 | ||
411 | if (!queue->peer_pid) | 411 | if (!queue->peer_pid) |
412 | goto err_out_free_nskb; | 412 | goto err_out_free_nskb; |
413 | 413 | ||
414 | if (queue->queue_total >= queue->queue_maxlen) { | 414 | if (queue->queue_total >= queue->queue_maxlen) { |
415 | queue->queue_dropped++; | 415 | queue->queue_dropped++; |
416 | if (net_ratelimit()) | 416 | if (net_ratelimit()) |
417 | printk(KERN_WARNING "nf_queue: full at %d entries, " | 417 | printk(KERN_WARNING "nf_queue: full at %d entries, " |
418 | "dropping packets(s). Dropped: %d\n", | 418 | "dropping packets(s). Dropped: %d\n", |
419 | queue->queue_total, queue->queue_dropped); | 419 | queue->queue_total, queue->queue_dropped); |
420 | goto err_out_free_nskb; | 420 | goto err_out_free_nskb; |
421 | } | 421 | } |
422 | 422 | ||
423 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ | 423 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ |
424 | err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); | 424 | err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); |
425 | if (err < 0) { | 425 | if (err < 0) { |
426 | queue->queue_user_dropped++; | 426 | queue->queue_user_dropped++; |
427 | goto err_out_unlock; | 427 | goto err_out_unlock; |
428 | } | 428 | } |
429 | 429 | ||
430 | __enqueue_entry(queue, entry); | 430 | __enqueue_entry(queue, entry); |
431 | 431 | ||
432 | spin_unlock_bh(&queue->lock); | 432 | spin_unlock_bh(&queue->lock); |
433 | return 0; | 433 | return 0; |
434 | 434 | ||
435 | err_out_free_nskb: | 435 | err_out_free_nskb: |
436 | kfree_skb(nskb); | 436 | kfree_skb(nskb); |
437 | err_out_unlock: | 437 | err_out_unlock: |
438 | spin_unlock_bh(&queue->lock); | 438 | spin_unlock_bh(&queue->lock); |
439 | err_out: | 439 | err_out: |
440 | return -1; | 440 | return -1; |
441 | } | 441 | } |
442 | 442 | ||
443 | static int | 443 | static int |
444 | nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) | 444 | nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) |
445 | { | 445 | { |
446 | struct sk_buff *nskb; | 446 | struct sk_buff *nskb; |
447 | int diff; | 447 | int diff; |
448 | 448 | ||
449 | diff = data_len - e->skb->len; | 449 | diff = data_len - e->skb->len; |
450 | if (diff < 0) { | 450 | if (diff < 0) { |
451 | if (pskb_trim(e->skb, data_len)) | 451 | if (pskb_trim(e->skb, data_len)) |
452 | return -ENOMEM; | 452 | return -ENOMEM; |
453 | } else if (diff > 0) { | 453 | } else if (diff > 0) { |
454 | if (data_len > 0xFFFF) | 454 | if (data_len > 0xFFFF) |
455 | return -EINVAL; | 455 | return -EINVAL; |
456 | if (diff > skb_tailroom(e->skb)) { | 456 | if (diff > skb_tailroom(e->skb)) { |
457 | nskb = skb_copy_expand(e->skb, 0, | 457 | nskb = skb_copy_expand(e->skb, 0, |
458 | diff - skb_tailroom(e->skb), | 458 | diff - skb_tailroom(e->skb), |
459 | GFP_ATOMIC); | 459 | GFP_ATOMIC); |
460 | if (!nskb) { | 460 | if (!nskb) { |
461 | printk(KERN_WARNING "nf_queue: OOM " | 461 | printk(KERN_WARNING "nf_queue: OOM " |
462 | "in mangle, dropping packet\n"); | 462 | "in mangle, dropping packet\n"); |
463 | return -ENOMEM; | 463 | return -ENOMEM; |
464 | } | 464 | } |
465 | kfree_skb(e->skb); | 465 | kfree_skb(e->skb); |
466 | e->skb = nskb; | 466 | e->skb = nskb; |
467 | } | 467 | } |
468 | skb_put(e->skb, diff); | 468 | skb_put(e->skb, diff); |
469 | } | 469 | } |
470 | if (!skb_make_writable(e->skb, data_len)) | 470 | if (!skb_make_writable(e->skb, data_len)) |
471 | return -ENOMEM; | 471 | return -ENOMEM; |
472 | skb_copy_to_linear_data(e->skb, data, data_len); | 472 | skb_copy_to_linear_data(e->skb, data, data_len); |
473 | e->skb->ip_summed = CHECKSUM_NONE; | 473 | e->skb->ip_summed = CHECKSUM_NONE; |
474 | return 0; | 474 | return 0; |
475 | } | 475 | } |
476 | 476 | ||
477 | static int | 477 | static int |
478 | nfqnl_set_mode(struct nfqnl_instance *queue, | 478 | nfqnl_set_mode(struct nfqnl_instance *queue, |
479 | unsigned char mode, unsigned int range) | 479 | unsigned char mode, unsigned int range) |
480 | { | 480 | { |
481 | int status = 0; | 481 | int status = 0; |
482 | 482 | ||
483 | spin_lock_bh(&queue->lock); | 483 | spin_lock_bh(&queue->lock); |
484 | switch (mode) { | 484 | switch (mode) { |
485 | case NFQNL_COPY_NONE: | 485 | case NFQNL_COPY_NONE: |
486 | case NFQNL_COPY_META: | 486 | case NFQNL_COPY_META: |
487 | queue->copy_mode = mode; | 487 | queue->copy_mode = mode; |
488 | queue->copy_range = 0; | 488 | queue->copy_range = 0; |
489 | break; | 489 | break; |
490 | 490 | ||
491 | case NFQNL_COPY_PACKET: | 491 | case NFQNL_COPY_PACKET: |
492 | queue->copy_mode = mode; | 492 | queue->copy_mode = mode; |
493 | /* we're using struct nlattr which has 16bit nla_len */ | 493 | /* we're using struct nlattr which has 16bit nla_len */ |
494 | if (range > 0xffff) | 494 | if (range > 0xffff) |
495 | queue->copy_range = 0xffff; | 495 | queue->copy_range = 0xffff; |
496 | else | 496 | else |
497 | queue->copy_range = range; | 497 | queue->copy_range = range; |
498 | break; | 498 | break; |
499 | 499 | ||
500 | default: | 500 | default: |
501 | status = -EINVAL; | 501 | status = -EINVAL; |
502 | 502 | ||
503 | } | 503 | } |
504 | spin_unlock_bh(&queue->lock); | 504 | spin_unlock_bh(&queue->lock); |
505 | 505 | ||
506 | return status; | 506 | return status; |
507 | } | 507 | } |
508 | 508 | ||
509 | static int | 509 | static int |
510 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) | 510 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) |
511 | { | 511 | { |
512 | if (entry->indev) | 512 | if (entry->indev) |
513 | if (entry->indev->ifindex == ifindex) | 513 | if (entry->indev->ifindex == ifindex) |
514 | return 1; | 514 | return 1; |
515 | if (entry->outdev) | 515 | if (entry->outdev) |
516 | if (entry->outdev->ifindex == ifindex) | 516 | if (entry->outdev->ifindex == ifindex) |
517 | return 1; | 517 | return 1; |
518 | #ifdef CONFIG_BRIDGE_NETFILTER | 518 | #ifdef CONFIG_BRIDGE_NETFILTER |
519 | if (entry->skb->nf_bridge) { | 519 | if (entry->skb->nf_bridge) { |
520 | if (entry->skb->nf_bridge->physindev && | 520 | if (entry->skb->nf_bridge->physindev && |
521 | entry->skb->nf_bridge->physindev->ifindex == ifindex) | 521 | entry->skb->nf_bridge->physindev->ifindex == ifindex) |
522 | return 1; | 522 | return 1; |
523 | if (entry->skb->nf_bridge->physoutdev && | 523 | if (entry->skb->nf_bridge->physoutdev && |
524 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) | 524 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) |
525 | return 1; | 525 | return 1; |
526 | } | 526 | } |
527 | #endif | 527 | #endif |
528 | return 0; | 528 | return 0; |
529 | } | 529 | } |
530 | 530 | ||
531 | /* drop all packets with either indev or outdev == ifindex from all queue | 531 | /* drop all packets with either indev or outdev == ifindex from all queue |
532 | * instances */ | 532 | * instances */ |
533 | static void | 533 | static void |
534 | nfqnl_dev_drop(int ifindex) | 534 | nfqnl_dev_drop(int ifindex) |
535 | { | 535 | { |
536 | int i; | 536 | int i; |
537 | 537 | ||
538 | rcu_read_lock(); | 538 | rcu_read_lock(); |
539 | 539 | ||
540 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 540 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
541 | struct hlist_node *tmp; | 541 | struct hlist_node *tmp; |
542 | struct nfqnl_instance *inst; | 542 | struct nfqnl_instance *inst; |
543 | struct hlist_head *head = &instance_table[i]; | 543 | struct hlist_head *head = &instance_table[i]; |
544 | 544 | ||
545 | hlist_for_each_entry_rcu(inst, tmp, head, hlist) | 545 | hlist_for_each_entry_rcu(inst, tmp, head, hlist) |
546 | nfqnl_flush(inst, dev_cmp, ifindex); | 546 | nfqnl_flush(inst, dev_cmp, ifindex); |
547 | } | 547 | } |
548 | 548 | ||
549 | rcu_read_unlock(); | 549 | rcu_read_unlock(); |
550 | } | 550 | } |
551 | 551 | ||
552 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) | 552 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) |
553 | 553 | ||
554 | static int | 554 | static int |
555 | nfqnl_rcv_dev_event(struct notifier_block *this, | 555 | nfqnl_rcv_dev_event(struct notifier_block *this, |
556 | unsigned long event, void *ptr) | 556 | unsigned long event, void *ptr) |
557 | { | 557 | { |
558 | struct net_device *dev = ptr; | 558 | struct net_device *dev = ptr; |
559 | 559 | ||
560 | if (dev->nd_net != &init_net) | 560 | if (dev->nd_net != &init_net) |
561 | return NOTIFY_DONE; | 561 | return NOTIFY_DONE; |
562 | 562 | ||
563 | /* Drop any packets associated with the downed device */ | 563 | /* Drop any packets associated with the downed device */ |
564 | if (event == NETDEV_DOWN) | 564 | if (event == NETDEV_DOWN) |
565 | nfqnl_dev_drop(dev->ifindex); | 565 | nfqnl_dev_drop(dev->ifindex); |
566 | return NOTIFY_DONE; | 566 | return NOTIFY_DONE; |
567 | } | 567 | } |
568 | 568 | ||
569 | static struct notifier_block nfqnl_dev_notifier = { | 569 | static struct notifier_block nfqnl_dev_notifier = { |
570 | .notifier_call = nfqnl_rcv_dev_event, | 570 | .notifier_call = nfqnl_rcv_dev_event, |
571 | }; | 571 | }; |
572 | 572 | ||
573 | static int | 573 | static int |
574 | nfqnl_rcv_nl_event(struct notifier_block *this, | 574 | nfqnl_rcv_nl_event(struct notifier_block *this, |
575 | unsigned long event, void *ptr) | 575 | unsigned long event, void *ptr) |
576 | { | 576 | { |
577 | struct netlink_notify *n = ptr; | 577 | struct netlink_notify *n = ptr; |
578 | 578 | ||
579 | if (event == NETLINK_URELEASE && | 579 | if (event == NETLINK_URELEASE && |
580 | n->protocol == NETLINK_NETFILTER && n->pid) { | 580 | n->protocol == NETLINK_NETFILTER && n->pid) { |
581 | int i; | 581 | int i; |
582 | 582 | ||
583 | /* destroy all instances for this pid */ | 583 | /* destroy all instances for this pid */ |
584 | spin_lock(&instances_lock); | 584 | spin_lock(&instances_lock); |
585 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 585 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
586 | struct hlist_node *tmp, *t2; | 586 | struct hlist_node *tmp, *t2; |
587 | struct nfqnl_instance *inst; | 587 | struct nfqnl_instance *inst; |
588 | struct hlist_head *head = &instance_table[i]; | 588 | struct hlist_head *head = &instance_table[i]; |
589 | 589 | ||
590 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { | 590 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { |
591 | if ((n->net == &init_net) && | 591 | if ((n->net == &init_net) && |
592 | (n->pid == inst->peer_pid)) | 592 | (n->pid == inst->peer_pid)) |
593 | __instance_destroy(inst); | 593 | __instance_destroy(inst); |
594 | } | 594 | } |
595 | } | 595 | } |
596 | spin_unlock(&instances_lock); | 596 | spin_unlock(&instances_lock); |
597 | } | 597 | } |
598 | return NOTIFY_DONE; | 598 | return NOTIFY_DONE; |
599 | } | 599 | } |
600 | 600 | ||
601 | static struct notifier_block nfqnl_rtnl_notifier = { | 601 | static struct notifier_block nfqnl_rtnl_notifier = { |
602 | .notifier_call = nfqnl_rcv_nl_event, | 602 | .notifier_call = nfqnl_rcv_nl_event, |
603 | }; | 603 | }; |
604 | 604 | ||
605 | static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { | 605 | static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { |
606 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, | 606 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, |
607 | [NFQA_MARK] = { .type = NLA_U32 }, | 607 | [NFQA_MARK] = { .type = NLA_U32 }, |
608 | [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, | 608 | [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, |
609 | }; | 609 | }; |
610 | 610 | ||
611 | static int | 611 | static int |
612 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | 612 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, |
613 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) | 613 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) |
614 | { | 614 | { |
615 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 615 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
616 | u_int16_t queue_num = ntohs(nfmsg->res_id); | 616 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
617 | 617 | ||
618 | struct nfqnl_msg_verdict_hdr *vhdr; | 618 | struct nfqnl_msg_verdict_hdr *vhdr; |
619 | struct nfqnl_instance *queue; | 619 | struct nfqnl_instance *queue; |
620 | unsigned int verdict; | 620 | unsigned int verdict; |
621 | struct nf_queue_entry *entry; | 621 | struct nf_queue_entry *entry; |
622 | int err; | 622 | int err; |
623 | 623 | ||
624 | rcu_read_lock(); | 624 | rcu_read_lock(); |
625 | queue = instance_lookup(queue_num); | 625 | queue = instance_lookup(queue_num); |
626 | if (!queue) { | 626 | if (!queue) { |
627 | err = -ENODEV; | 627 | err = -ENODEV; |
628 | goto err_out_unlock; | 628 | goto err_out_unlock; |
629 | } | 629 | } |
630 | 630 | ||
631 | if (queue->peer_pid != NETLINK_CB(skb).pid) { | 631 | if (queue->peer_pid != NETLINK_CB(skb).pid) { |
632 | err = -EPERM; | 632 | err = -EPERM; |
633 | goto err_out_unlock; | 633 | goto err_out_unlock; |
634 | } | 634 | } |
635 | 635 | ||
636 | if (!nfqa[NFQA_VERDICT_HDR]) { | 636 | if (!nfqa[NFQA_VERDICT_HDR]) { |
637 | err = -EINVAL; | 637 | err = -EINVAL; |
638 | goto err_out_unlock; | 638 | goto err_out_unlock; |
639 | } | 639 | } |
640 | 640 | ||
641 | vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); | 641 | vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); |
642 | verdict = ntohl(vhdr->verdict); | 642 | verdict = ntohl(vhdr->verdict); |
643 | 643 | ||
644 | if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { | 644 | if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { |
645 | err = -EINVAL; | 645 | err = -EINVAL; |
646 | goto err_out_unlock; | 646 | goto err_out_unlock; |
647 | } | 647 | } |
648 | 648 | ||
649 | entry = find_dequeue_entry(queue, ntohl(vhdr->id)); | 649 | entry = find_dequeue_entry(queue, ntohl(vhdr->id)); |
650 | if (entry == NULL) { | 650 | if (entry == NULL) { |
651 | err = -ENOENT; | 651 | err = -ENOENT; |
652 | goto err_out_unlock; | 652 | goto err_out_unlock; |
653 | } | 653 | } |
654 | rcu_read_unlock(); | 654 | rcu_read_unlock(); |
655 | 655 | ||
656 | if (nfqa[NFQA_PAYLOAD]) { | 656 | if (nfqa[NFQA_PAYLOAD]) { |
657 | if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), | 657 | if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), |
658 | nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) | 658 | nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) |
659 | verdict = NF_DROP; | 659 | verdict = NF_DROP; |
660 | } | 660 | } |
661 | 661 | ||
662 | if (nfqa[NFQA_MARK]) | 662 | if (nfqa[NFQA_MARK]) |
663 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); | 663 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); |
664 | 664 | ||
665 | nf_reinject(entry, verdict); | 665 | nf_reinject(entry, verdict); |
666 | return 0; | 666 | return 0; |
667 | 667 | ||
668 | err_out_unlock: | 668 | err_out_unlock: |
669 | rcu_read_unlock(); | 669 | rcu_read_unlock(); |
670 | return err; | 670 | return err; |
671 | } | 671 | } |
672 | 672 | ||
673 | static int | 673 | static int |
674 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, | 674 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, |
675 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) | 675 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) |
676 | { | 676 | { |
677 | return -ENOTSUPP; | 677 | return -ENOTSUPP; |
678 | } | 678 | } |
679 | 679 | ||
680 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { | 680 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { |
681 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, | 681 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, |
682 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, | 682 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, |
683 | }; | 683 | }; |
684 | 684 | ||
685 | static const struct nf_queue_handler nfqh = { | 685 | static const struct nf_queue_handler nfqh = { |
686 | .name = "nf_queue", | 686 | .name = "nf_queue", |
687 | .outfn = &nfqnl_enqueue_packet, | 687 | .outfn = &nfqnl_enqueue_packet, |
688 | }; | 688 | }; |
689 | 689 | ||
690 | static int | 690 | static int |
691 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | 691 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, |
692 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) | 692 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) |
693 | { | 693 | { |
694 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 694 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
695 | u_int16_t queue_num = ntohs(nfmsg->res_id); | 695 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
696 | struct nfqnl_instance *queue; | 696 | struct nfqnl_instance *queue; |
697 | struct nfqnl_msg_config_cmd *cmd = NULL; | 697 | struct nfqnl_msg_config_cmd *cmd = NULL; |
698 | int ret = 0; | 698 | int ret = 0; |
699 | 699 | ||
700 | if (nfqa[NFQA_CFG_CMD]) { | 700 | if (nfqa[NFQA_CFG_CMD]) { |
701 | cmd = nla_data(nfqa[NFQA_CFG_CMD]); | 701 | cmd = nla_data(nfqa[NFQA_CFG_CMD]); |
702 | 702 | ||
703 | /* Commands without queue context - might sleep */ | 703 | /* Commands without queue context - might sleep */ |
704 | switch (cmd->command) { | 704 | switch (cmd->command) { |
705 | case NFQNL_CFG_CMD_PF_BIND: | 705 | case NFQNL_CFG_CMD_PF_BIND: |
706 | ret = nf_register_queue_handler(ntohs(cmd->pf), | 706 | return nf_register_queue_handler(ntohs(cmd->pf), |
707 | &nfqh); | 707 | &nfqh); |
708 | break; | ||
709 | case NFQNL_CFG_CMD_PF_UNBIND: | 708 | case NFQNL_CFG_CMD_PF_UNBIND: |
710 | ret = nf_unregister_queue_handler(ntohs(cmd->pf), | 709 | return nf_unregister_queue_handler(ntohs(cmd->pf), |
711 | &nfqh); | 710 | &nfqh); |
712 | break; | ||
713 | default: | ||
714 | break; | ||
715 | } | 711 | } |
716 | |||
717 | if (ret < 0) | ||
718 | return ret; | ||
719 | } | 712 | } |
720 | 713 | ||
721 | rcu_read_lock(); | 714 | rcu_read_lock(); |
722 | queue = instance_lookup(queue_num); | 715 | queue = instance_lookup(queue_num); |
723 | if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { | 716 | if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { |
724 | ret = -EPERM; | 717 | ret = -EPERM; |
725 | goto err_out_unlock; | 718 | goto err_out_unlock; |
726 | } | 719 | } |
727 | 720 | ||
728 | if (cmd != NULL) { | 721 | if (cmd != NULL) { |
729 | switch (cmd->command) { | 722 | switch (cmd->command) { |
730 | case NFQNL_CFG_CMD_BIND: | 723 | case NFQNL_CFG_CMD_BIND: |
731 | if (queue) { | 724 | if (queue) { |
732 | ret = -EBUSY; | 725 | ret = -EBUSY; |
733 | goto err_out_unlock; | 726 | goto err_out_unlock; |
734 | } | 727 | } |
735 | queue = instance_create(queue_num, NETLINK_CB(skb).pid); | 728 | queue = instance_create(queue_num, NETLINK_CB(skb).pid); |
736 | if (IS_ERR(queue)) { | 729 | if (IS_ERR(queue)) { |
737 | ret = PTR_ERR(queue); | 730 | ret = PTR_ERR(queue); |
738 | goto err_out_unlock; | 731 | goto err_out_unlock; |
739 | } | 732 | } |
740 | break; | 733 | break; |
741 | case NFQNL_CFG_CMD_UNBIND: | 734 | case NFQNL_CFG_CMD_UNBIND: |
742 | if (!queue) { | 735 | if (!queue) { |
743 | ret = -ENODEV; | 736 | ret = -ENODEV; |
744 | goto err_out_unlock; | 737 | goto err_out_unlock; |
745 | } | 738 | } |
746 | instance_destroy(queue); | 739 | instance_destroy(queue); |
747 | break; | 740 | break; |
748 | case NFQNL_CFG_CMD_PF_BIND: | 741 | case NFQNL_CFG_CMD_PF_BIND: |
749 | case NFQNL_CFG_CMD_PF_UNBIND: | 742 | case NFQNL_CFG_CMD_PF_UNBIND: |
750 | break; | 743 | break; |
751 | default: | 744 | default: |
752 | ret = -ENOTSUPP; | 745 | ret = -ENOTSUPP; |
753 | break; | 746 | break; |
754 | } | 747 | } |
755 | } | 748 | } |
756 | 749 | ||
757 | if (nfqa[NFQA_CFG_PARAMS]) { | 750 | if (nfqa[NFQA_CFG_PARAMS]) { |
758 | struct nfqnl_msg_config_params *params; | 751 | struct nfqnl_msg_config_params *params; |
759 | 752 | ||
760 | if (!queue) { | 753 | if (!queue) { |
761 | ret = -ENODEV; | 754 | ret = -ENODEV; |
762 | goto err_out_unlock; | 755 | goto err_out_unlock; |
763 | } | 756 | } |
764 | params = nla_data(nfqa[NFQA_CFG_PARAMS]); | 757 | params = nla_data(nfqa[NFQA_CFG_PARAMS]); |
765 | nfqnl_set_mode(queue, params->copy_mode, | 758 | nfqnl_set_mode(queue, params->copy_mode, |
766 | ntohl(params->copy_range)); | 759 | ntohl(params->copy_range)); |
767 | } | 760 | } |
768 | 761 | ||
769 | if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { | 762 | if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { |
770 | __be32 *queue_maxlen; | 763 | __be32 *queue_maxlen; |
771 | 764 | ||
772 | if (!queue) { | 765 | if (!queue) { |
773 | ret = -ENODEV; | 766 | ret = -ENODEV; |
774 | goto err_out_unlock; | 767 | goto err_out_unlock; |
775 | } | 768 | } |
776 | queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); | 769 | queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); |
777 | spin_lock_bh(&queue->lock); | 770 | spin_lock_bh(&queue->lock); |
778 | queue->queue_maxlen = ntohl(*queue_maxlen); | 771 | queue->queue_maxlen = ntohl(*queue_maxlen); |
779 | spin_unlock_bh(&queue->lock); | 772 | spin_unlock_bh(&queue->lock); |
780 | } | 773 | } |
781 | 774 | ||
782 | err_out_unlock: | 775 | err_out_unlock: |
783 | rcu_read_unlock(); | 776 | rcu_read_unlock(); |
784 | return ret; | 777 | return ret; |
785 | } | 778 | } |
786 | 779 | ||
787 | static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { | 780 | static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { |
788 | [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, | 781 | [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, |
789 | .attr_count = NFQA_MAX, }, | 782 | .attr_count = NFQA_MAX, }, |
790 | [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, | 783 | [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, |
791 | .attr_count = NFQA_MAX, | 784 | .attr_count = NFQA_MAX, |
792 | .policy = nfqa_verdict_policy }, | 785 | .policy = nfqa_verdict_policy }, |
793 | [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, | 786 | [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, |
794 | .attr_count = NFQA_CFG_MAX, | 787 | .attr_count = NFQA_CFG_MAX, |
795 | .policy = nfqa_cfg_policy }, | 788 | .policy = nfqa_cfg_policy }, |
796 | }; | 789 | }; |
797 | 790 | ||
798 | static const struct nfnetlink_subsystem nfqnl_subsys = { | 791 | static const struct nfnetlink_subsystem nfqnl_subsys = { |
799 | .name = "nf_queue", | 792 | .name = "nf_queue", |
800 | .subsys_id = NFNL_SUBSYS_QUEUE, | 793 | .subsys_id = NFNL_SUBSYS_QUEUE, |
801 | .cb_count = NFQNL_MSG_MAX, | 794 | .cb_count = NFQNL_MSG_MAX, |
802 | .cb = nfqnl_cb, | 795 | .cb = nfqnl_cb, |
803 | }; | 796 | }; |
804 | 797 | ||
805 | #ifdef CONFIG_PROC_FS | 798 | #ifdef CONFIG_PROC_FS |
806 | struct iter_state { | 799 | struct iter_state { |
807 | unsigned int bucket; | 800 | unsigned int bucket; |
808 | }; | 801 | }; |
809 | 802 | ||
810 | static struct hlist_node *get_first(struct seq_file *seq) | 803 | static struct hlist_node *get_first(struct seq_file *seq) |
811 | { | 804 | { |
812 | struct iter_state *st = seq->private; | 805 | struct iter_state *st = seq->private; |
813 | 806 | ||
814 | if (!st) | 807 | if (!st) |
815 | return NULL; | 808 | return NULL; |
816 | 809 | ||
817 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { | 810 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { |
818 | if (!hlist_empty(&instance_table[st->bucket])) | 811 | if (!hlist_empty(&instance_table[st->bucket])) |
819 | return instance_table[st->bucket].first; | 812 | return instance_table[st->bucket].first; |
820 | } | 813 | } |
821 | return NULL; | 814 | return NULL; |
822 | } | 815 | } |
823 | 816 | ||
824 | static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) | 817 | static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) |
825 | { | 818 | { |
826 | struct iter_state *st = seq->private; | 819 | struct iter_state *st = seq->private; |
827 | 820 | ||
828 | h = h->next; | 821 | h = h->next; |
829 | while (!h) { | 822 | while (!h) { |
830 | if (++st->bucket >= INSTANCE_BUCKETS) | 823 | if (++st->bucket >= INSTANCE_BUCKETS) |
831 | return NULL; | 824 | return NULL; |
832 | 825 | ||
833 | h = instance_table[st->bucket].first; | 826 | h = instance_table[st->bucket].first; |
834 | } | 827 | } |
835 | return h; | 828 | return h; |
836 | } | 829 | } |
837 | 830 | ||
838 | static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) | 831 | static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) |
839 | { | 832 | { |
840 | struct hlist_node *head; | 833 | struct hlist_node *head; |
841 | head = get_first(seq); | 834 | head = get_first(seq); |
842 | 835 | ||
843 | if (head) | 836 | if (head) |
844 | while (pos && (head = get_next(seq, head))) | 837 | while (pos && (head = get_next(seq, head))) |
845 | pos--; | 838 | pos--; |
846 | return pos ? NULL : head; | 839 | return pos ? NULL : head; |
847 | } | 840 | } |
848 | 841 | ||
849 | static void *seq_start(struct seq_file *seq, loff_t *pos) | 842 | static void *seq_start(struct seq_file *seq, loff_t *pos) |
850 | __acquires(instances_lock) | 843 | __acquires(instances_lock) |
851 | { | 844 | { |
852 | spin_lock(&instances_lock); | 845 | spin_lock(&instances_lock); |
853 | return get_idx(seq, *pos); | 846 | return get_idx(seq, *pos); |
854 | } | 847 | } |
855 | 848 | ||
856 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) | 849 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) |
857 | { | 850 | { |
858 | (*pos)++; | 851 | (*pos)++; |
859 | return get_next(s, v); | 852 | return get_next(s, v); |
860 | } | 853 | } |
861 | 854 | ||
862 | static void seq_stop(struct seq_file *s, void *v) | 855 | static void seq_stop(struct seq_file *s, void *v) |
863 | __releases(instances_lock) | 856 | __releases(instances_lock) |
864 | { | 857 | { |
865 | spin_unlock(&instances_lock); | 858 | spin_unlock(&instances_lock); |
866 | } | 859 | } |
867 | 860 | ||
868 | static int seq_show(struct seq_file *s, void *v) | 861 | static int seq_show(struct seq_file *s, void *v) |
869 | { | 862 | { |
870 | const struct nfqnl_instance *inst = v; | 863 | const struct nfqnl_instance *inst = v; |
871 | 864 | ||
872 | return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", | 865 | return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", |
873 | inst->queue_num, | 866 | inst->queue_num, |
874 | inst->peer_pid, inst->queue_total, | 867 | inst->peer_pid, inst->queue_total, |
875 | inst->copy_mode, inst->copy_range, | 868 | inst->copy_mode, inst->copy_range, |
876 | inst->queue_dropped, inst->queue_user_dropped, | 869 | inst->queue_dropped, inst->queue_user_dropped, |
877 | inst->id_sequence, 1); | 870 | inst->id_sequence, 1); |
878 | } | 871 | } |
879 | 872 | ||
880 | static const struct seq_operations nfqnl_seq_ops = { | 873 | static const struct seq_operations nfqnl_seq_ops = { |
881 | .start = seq_start, | 874 | .start = seq_start, |
882 | .next = seq_next, | 875 | .next = seq_next, |
883 | .stop = seq_stop, | 876 | .stop = seq_stop, |
884 | .show = seq_show, | 877 | .show = seq_show, |
885 | }; | 878 | }; |
886 | 879 | ||
887 | static int nfqnl_open(struct inode *inode, struct file *file) | 880 | static int nfqnl_open(struct inode *inode, struct file *file) |
888 | { | 881 | { |
889 | return seq_open_private(file, &nfqnl_seq_ops, | 882 | return seq_open_private(file, &nfqnl_seq_ops, |
890 | sizeof(struct iter_state)); | 883 | sizeof(struct iter_state)); |
891 | } | 884 | } |
892 | 885 | ||
893 | static const struct file_operations nfqnl_file_ops = { | 886 | static const struct file_operations nfqnl_file_ops = { |
894 | .owner = THIS_MODULE, | 887 | .owner = THIS_MODULE, |
895 | .open = nfqnl_open, | 888 | .open = nfqnl_open, |
896 | .read = seq_read, | 889 | .read = seq_read, |
897 | .llseek = seq_lseek, | 890 | .llseek = seq_lseek, |
898 | .release = seq_release_private, | 891 | .release = seq_release_private, |
899 | }; | 892 | }; |
900 | 893 | ||
901 | #endif /* PROC_FS */ | 894 | #endif /* PROC_FS */ |
902 | 895 | ||
903 | static int __init nfnetlink_queue_init(void) | 896 | static int __init nfnetlink_queue_init(void) |
904 | { | 897 | { |
905 | int i, status = -ENOMEM; | 898 | int i, status = -ENOMEM; |
906 | #ifdef CONFIG_PROC_FS | 899 | #ifdef CONFIG_PROC_FS |
907 | struct proc_dir_entry *proc_nfqueue; | 900 | struct proc_dir_entry *proc_nfqueue; |
908 | #endif | 901 | #endif |
909 | 902 | ||
910 | for (i = 0; i < INSTANCE_BUCKETS; i++) | 903 | for (i = 0; i < INSTANCE_BUCKETS; i++) |
911 | INIT_HLIST_HEAD(&instance_table[i]); | 904 | INIT_HLIST_HEAD(&instance_table[i]); |
912 | 905 | ||
913 | netlink_register_notifier(&nfqnl_rtnl_notifier); | 906 | netlink_register_notifier(&nfqnl_rtnl_notifier); |
914 | status = nfnetlink_subsys_register(&nfqnl_subsys); | 907 | status = nfnetlink_subsys_register(&nfqnl_subsys); |
915 | if (status < 0) { | 908 | if (status < 0) { |
916 | printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); | 909 | printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); |
917 | goto cleanup_netlink_notifier; | 910 | goto cleanup_netlink_notifier; |
918 | } | 911 | } |
919 | 912 | ||
920 | #ifdef CONFIG_PROC_FS | 913 | #ifdef CONFIG_PROC_FS |
921 | proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440, | 914 | proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440, |
922 | proc_net_netfilter); | 915 | proc_net_netfilter); |
923 | if (!proc_nfqueue) | 916 | if (!proc_nfqueue) |
924 | goto cleanup_subsys; | 917 | goto cleanup_subsys; |
925 | proc_nfqueue->proc_fops = &nfqnl_file_ops; | 918 | proc_nfqueue->proc_fops = &nfqnl_file_ops; |
926 | #endif | 919 | #endif |
927 | 920 | ||
928 | register_netdevice_notifier(&nfqnl_dev_notifier); | 921 | register_netdevice_notifier(&nfqnl_dev_notifier); |
929 | return status; | 922 | return status; |
930 | 923 | ||
931 | #ifdef CONFIG_PROC_FS | 924 | #ifdef CONFIG_PROC_FS |
932 | cleanup_subsys: | 925 | cleanup_subsys: |
933 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 926 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
934 | #endif | 927 | #endif |
935 | cleanup_netlink_notifier: | 928 | cleanup_netlink_notifier: |
936 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 929 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
937 | return status; | 930 | return status; |
938 | } | 931 | } |
939 | 932 | ||
940 | static void __exit nfnetlink_queue_fini(void) | 933 | static void __exit nfnetlink_queue_fini(void) |
941 | { | 934 | { |
942 | nf_unregister_queue_handlers(&nfqh); | 935 | nf_unregister_queue_handlers(&nfqh); |
943 | unregister_netdevice_notifier(&nfqnl_dev_notifier); | 936 | unregister_netdevice_notifier(&nfqnl_dev_notifier); |
944 | #ifdef CONFIG_PROC_FS | 937 | #ifdef CONFIG_PROC_FS |
945 | remove_proc_entry("nfnetlink_queue", proc_net_netfilter); | 938 | remove_proc_entry("nfnetlink_queue", proc_net_netfilter); |
946 | #endif | 939 | #endif |
947 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 940 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
948 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 941 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
949 | } | 942 | } |
950 | 943 | ||
951 | MODULE_DESCRIPTION("netfilter packet queue handler"); | 944 | MODULE_DESCRIPTION("netfilter packet queue handler"); |
952 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 945 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
953 | MODULE_LICENSE("GPL"); | 946 | MODULE_LICENSE("GPL"); |
954 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); | 947 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); |
955 | 948 | ||
956 | module_init(nfnetlink_queue_init); | 949 | module_init(nfnetlink_queue_init); |
957 | module_exit(nfnetlink_queue_fini); | 950 | module_exit(nfnetlink_queue_fini); |
958 | 951 |