Commit 08c0cad69f32ad1e881fa3fb7f5e0a25db5b07ce
Committed by
Pablo Neira Ayuso
1 parent
534473c608
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
netfilter: nfnetlink_queue: enable UID/GID socket info retrieval
Thanks to commits 41063e9 (ipv4: Early TCP socket demux) and 421b388 (udp: ipv4: Add udp early demux) it is now possible to parse UID and GID socket info also for incoming TCP and UDP connections. Having this info available, it is convenient to let NFQUEUE parse it in order to improve and refine the traffic analysis in userspace. Signed-off-by: Valentina Giusti <valentina.giusti@bmw-carit.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Showing 2 changed files with 38 additions and 1 deletions Inline Diff
include/uapi/linux/netfilter/nfnetlink_queue.h
1 | #ifndef _NFNETLINK_QUEUE_H | 1 | #ifndef _NFNETLINK_QUEUE_H |
2 | #define _NFNETLINK_QUEUE_H | 2 | #define _NFNETLINK_QUEUE_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/netfilter/nfnetlink.h> | 5 | #include <linux/netfilter/nfnetlink.h> |
6 | 6 | ||
7 | enum nfqnl_msg_types { | 7 | enum nfqnl_msg_types { |
8 | NFQNL_MSG_PACKET, /* packet from kernel to userspace */ | 8 | NFQNL_MSG_PACKET, /* packet from kernel to userspace */ |
9 | NFQNL_MSG_VERDICT, /* verdict from userspace to kernel */ | 9 | NFQNL_MSG_VERDICT, /* verdict from userspace to kernel */ |
10 | NFQNL_MSG_CONFIG, /* connect to a particular queue */ | 10 | NFQNL_MSG_CONFIG, /* connect to a particular queue */ |
11 | NFQNL_MSG_VERDICT_BATCH, /* batchv from userspace to kernel */ | 11 | NFQNL_MSG_VERDICT_BATCH, /* batchv from userspace to kernel */ |
12 | 12 | ||
13 | NFQNL_MSG_MAX | 13 | NFQNL_MSG_MAX |
14 | }; | 14 | }; |
15 | 15 | ||
16 | struct nfqnl_msg_packet_hdr { | 16 | struct nfqnl_msg_packet_hdr { |
17 | __be32 packet_id; /* unique ID of packet in queue */ | 17 | __be32 packet_id; /* unique ID of packet in queue */ |
18 | __be16 hw_protocol; /* hw protocol (network order) */ | 18 | __be16 hw_protocol; /* hw protocol (network order) */ |
19 | __u8 hook; /* netfilter hook */ | 19 | __u8 hook; /* netfilter hook */ |
20 | } __attribute__ ((packed)); | 20 | } __attribute__ ((packed)); |
21 | 21 | ||
22 | struct nfqnl_msg_packet_hw { | 22 | struct nfqnl_msg_packet_hw { |
23 | __be16 hw_addrlen; | 23 | __be16 hw_addrlen; |
24 | __u16 _pad; | 24 | __u16 _pad; |
25 | __u8 hw_addr[8]; | 25 | __u8 hw_addr[8]; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | struct nfqnl_msg_packet_timestamp { | 28 | struct nfqnl_msg_packet_timestamp { |
29 | __aligned_be64 sec; | 29 | __aligned_be64 sec; |
30 | __aligned_be64 usec; | 30 | __aligned_be64 usec; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | enum nfqnl_attr_type { | 33 | enum nfqnl_attr_type { |
34 | NFQA_UNSPEC, | 34 | NFQA_UNSPEC, |
35 | NFQA_PACKET_HDR, | 35 | NFQA_PACKET_HDR, |
36 | NFQA_VERDICT_HDR, /* nfqnl_msg_verdict_hrd */ | 36 | NFQA_VERDICT_HDR, /* nfqnl_msg_verdict_hrd */ |
37 | NFQA_MARK, /* __u32 nfmark */ | 37 | NFQA_MARK, /* __u32 nfmark */ |
38 | NFQA_TIMESTAMP, /* nfqnl_msg_packet_timestamp */ | 38 | NFQA_TIMESTAMP, /* nfqnl_msg_packet_timestamp */ |
39 | NFQA_IFINDEX_INDEV, /* __u32 ifindex */ | 39 | NFQA_IFINDEX_INDEV, /* __u32 ifindex */ |
40 | NFQA_IFINDEX_OUTDEV, /* __u32 ifindex */ | 40 | NFQA_IFINDEX_OUTDEV, /* __u32 ifindex */ |
41 | NFQA_IFINDEX_PHYSINDEV, /* __u32 ifindex */ | 41 | NFQA_IFINDEX_PHYSINDEV, /* __u32 ifindex */ |
42 | NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ | 42 | NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ |
43 | NFQA_HWADDR, /* nfqnl_msg_packet_hw */ | 43 | NFQA_HWADDR, /* nfqnl_msg_packet_hw */ |
44 | NFQA_PAYLOAD, /* opaque data payload */ | 44 | NFQA_PAYLOAD, /* opaque data payload */ |
45 | NFQA_CT, /* nf_conntrack_netlink.h */ | 45 | NFQA_CT, /* nf_conntrack_netlink.h */ |
46 | NFQA_CT_INFO, /* enum ip_conntrack_info */ | 46 | NFQA_CT_INFO, /* enum ip_conntrack_info */ |
47 | NFQA_CAP_LEN, /* __u32 length of captured packet */ | 47 | NFQA_CAP_LEN, /* __u32 length of captured packet */ |
48 | NFQA_SKB_INFO, /* __u32 skb meta information */ | 48 | NFQA_SKB_INFO, /* __u32 skb meta information */ |
49 | NFQA_EXP, /* nf_conntrack_netlink.h */ | 49 | NFQA_EXP, /* nf_conntrack_netlink.h */ |
50 | NFQA_UID, /* __u32 sk uid */ | ||
51 | NFQA_GID, /* __u32 sk gid */ | ||
50 | 52 | ||
51 | __NFQA_MAX | 53 | __NFQA_MAX |
52 | }; | 54 | }; |
53 | #define NFQA_MAX (__NFQA_MAX - 1) | 55 | #define NFQA_MAX (__NFQA_MAX - 1) |
54 | 56 | ||
55 | struct nfqnl_msg_verdict_hdr { | 57 | struct nfqnl_msg_verdict_hdr { |
56 | __be32 verdict; | 58 | __be32 verdict; |
57 | __be32 id; | 59 | __be32 id; |
58 | }; | 60 | }; |
59 | 61 | ||
60 | 62 | ||
61 | enum nfqnl_msg_config_cmds { | 63 | enum nfqnl_msg_config_cmds { |
62 | NFQNL_CFG_CMD_NONE, | 64 | NFQNL_CFG_CMD_NONE, |
63 | NFQNL_CFG_CMD_BIND, | 65 | NFQNL_CFG_CMD_BIND, |
64 | NFQNL_CFG_CMD_UNBIND, | 66 | NFQNL_CFG_CMD_UNBIND, |
65 | NFQNL_CFG_CMD_PF_BIND, | 67 | NFQNL_CFG_CMD_PF_BIND, |
66 | NFQNL_CFG_CMD_PF_UNBIND, | 68 | NFQNL_CFG_CMD_PF_UNBIND, |
67 | }; | 69 | }; |
68 | 70 | ||
69 | struct nfqnl_msg_config_cmd { | 71 | struct nfqnl_msg_config_cmd { |
70 | __u8 command; /* nfqnl_msg_config_cmds */ | 72 | __u8 command; /* nfqnl_msg_config_cmds */ |
71 | __u8 _pad; | 73 | __u8 _pad; |
72 | __be16 pf; /* AF_xxx for PF_[UN]BIND */ | 74 | __be16 pf; /* AF_xxx for PF_[UN]BIND */ |
73 | }; | 75 | }; |
74 | 76 | ||
75 | enum nfqnl_config_mode { | 77 | enum nfqnl_config_mode { |
76 | NFQNL_COPY_NONE, | 78 | NFQNL_COPY_NONE, |
77 | NFQNL_COPY_META, | 79 | NFQNL_COPY_META, |
78 | NFQNL_COPY_PACKET, | 80 | NFQNL_COPY_PACKET, |
79 | }; | 81 | }; |
80 | 82 | ||
81 | struct nfqnl_msg_config_params { | 83 | struct nfqnl_msg_config_params { |
82 | __be32 copy_range; | 84 | __be32 copy_range; |
83 | __u8 copy_mode; /* enum nfqnl_config_mode */ | 85 | __u8 copy_mode; /* enum nfqnl_config_mode */ |
84 | } __attribute__ ((packed)); | 86 | } __attribute__ ((packed)); |
85 | 87 | ||
86 | 88 | ||
87 | enum nfqnl_attr_config { | 89 | enum nfqnl_attr_config { |
88 | NFQA_CFG_UNSPEC, | 90 | NFQA_CFG_UNSPEC, |
89 | NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */ | 91 | NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */ |
90 | NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */ | 92 | NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */ |
91 | NFQA_CFG_QUEUE_MAXLEN, /* __u32 */ | 93 | NFQA_CFG_QUEUE_MAXLEN, /* __u32 */ |
92 | NFQA_CFG_MASK, /* identify which flags to change */ | 94 | NFQA_CFG_MASK, /* identify which flags to change */ |
93 | NFQA_CFG_FLAGS, /* value of these flags (__u32) */ | 95 | NFQA_CFG_FLAGS, /* value of these flags (__u32) */ |
94 | __NFQA_CFG_MAX | 96 | __NFQA_CFG_MAX |
95 | }; | 97 | }; |
96 | #define NFQA_CFG_MAX (__NFQA_CFG_MAX-1) | 98 | #define NFQA_CFG_MAX (__NFQA_CFG_MAX-1) |
97 | 99 | ||
98 | /* Flags for NFQA_CFG_FLAGS */ | 100 | /* Flags for NFQA_CFG_FLAGS */ |
99 | #define NFQA_CFG_F_FAIL_OPEN (1 << 0) | 101 | #define NFQA_CFG_F_FAIL_OPEN (1 << 0) |
100 | #define NFQA_CFG_F_CONNTRACK (1 << 1) | 102 | #define NFQA_CFG_F_CONNTRACK (1 << 1) |
101 | #define NFQA_CFG_F_GSO (1 << 2) | 103 | #define NFQA_CFG_F_GSO (1 << 2) |
102 | #define NFQA_CFG_F_MAX (1 << 3) | 104 | #define NFQA_CFG_F_UID_GID (1 << 3) |
105 | #define NFQA_CFG_F_MAX (1 << 4) | ||
103 | 106 | ||
104 | /* flags for NFQA_SKB_INFO */ | 107 | /* flags for NFQA_SKB_INFO */ |
105 | /* packet appears to have wrong checksums, but they are ok */ | 108 | /* packet appears to have wrong checksums, but they are ok */ |
106 | #define NFQA_SKB_CSUMNOTREADY (1 << 0) | 109 | #define NFQA_SKB_CSUMNOTREADY (1 << 0) |
107 | /* packet is GSO (i.e., exceeds device mtu) */ | 110 | /* packet is GSO (i.e., exceeds device mtu) */ |
108 | #define NFQA_SKB_GSO (1 << 1) | 111 | #define NFQA_SKB_GSO (1 << 1) |
109 | /* csum not validated (incoming device doesn't support hw checksum, etc.) */ | 112 | /* csum not validated (incoming device doesn't support hw checksum, etc.) */ |
110 | #define NFQA_SKB_CSUM_NOTVERIFIED (1 << 2) | 113 | #define NFQA_SKB_CSUM_NOTVERIFIED (1 << 2) |
111 | 114 | ||
112 | #endif /* _NFNETLINK_QUEUE_H */ | 115 | #endif /* _NFNETLINK_QUEUE_H */ |
113 | 116 |
net/netfilter/nfnetlink_queue_core.c
1 | /* | 1 | /* |
2 | * This is a module which is used for queueing packets and communicating with | 2 | * This is a module which is used for queueing packets and communicating with |
3 | * userspace via nfnetlink. | 3 | * userspace via nfnetlink. |
4 | * | 4 | * |
5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> | 5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> |
6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> | 6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> |
7 | * | 7 | * |
8 | * Based on the old ipv4-only ip_queue.c: | 8 | * Based on the old ipv4-only ip_queue.c: |
9 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> | 9 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> |
10 | * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> | 10 | * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/notifier.h> | 22 | #include <linux/notifier.h> |
23 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
24 | #include <linux/netfilter.h> | 24 | #include <linux/netfilter.h> |
25 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
26 | #include <linux/netfilter_ipv4.h> | 26 | #include <linux/netfilter_ipv4.h> |
27 | #include <linux/netfilter_ipv6.h> | 27 | #include <linux/netfilter_ipv6.h> |
28 | #include <linux/netfilter/nfnetlink.h> | 28 | #include <linux/netfilter/nfnetlink.h> |
29 | #include <linux/netfilter/nfnetlink_queue.h> | 29 | #include <linux/netfilter/nfnetlink_queue.h> |
30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
31 | #include <net/sock.h> | 31 | #include <net/sock.h> |
32 | #include <net/netfilter/nf_queue.h> | 32 | #include <net/netfilter/nf_queue.h> |
33 | #include <net/netns/generic.h> | 33 | #include <net/netns/generic.h> |
34 | #include <net/netfilter/nfnetlink_queue.h> | 34 | #include <net/netfilter/nfnetlink_queue.h> |
35 | 35 | ||
36 | #include <linux/atomic.h> | 36 | #include <linux/atomic.h> |
37 | 37 | ||
38 | #ifdef CONFIG_BRIDGE_NETFILTER | 38 | #ifdef CONFIG_BRIDGE_NETFILTER |
39 | #include "../bridge/br_private.h" | 39 | #include "../bridge/br_private.h" |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #define NFQNL_QMAX_DEFAULT 1024 | 42 | #define NFQNL_QMAX_DEFAULT 1024 |
43 | 43 | ||
44 | /* We're using struct nlattr which has 16bit nla_len. Note that nla_len | 44 | /* We're using struct nlattr which has 16bit nla_len. Note that nla_len |
45 | * includes the header length. Thus, the maximum packet length that we | 45 | * includes the header length. Thus, the maximum packet length that we |
46 | * support is 65531 bytes. We send truncated packets if the specified length | 46 | * support is 65531 bytes. We send truncated packets if the specified length |
47 | * is larger than that. Userspace can check for presence of NFQA_CAP_LEN | 47 | * is larger than that. Userspace can check for presence of NFQA_CAP_LEN |
48 | * attribute to detect truncation. | 48 | * attribute to detect truncation. |
49 | */ | 49 | */ |
50 | #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN) | 50 | #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN) |
51 | 51 | ||
52 | struct nfqnl_instance { | 52 | struct nfqnl_instance { |
53 | struct hlist_node hlist; /* global list of queues */ | 53 | struct hlist_node hlist; /* global list of queues */ |
54 | struct rcu_head rcu; | 54 | struct rcu_head rcu; |
55 | 55 | ||
56 | int peer_portid; | 56 | int peer_portid; |
57 | unsigned int queue_maxlen; | 57 | unsigned int queue_maxlen; |
58 | unsigned int copy_range; | 58 | unsigned int copy_range; |
59 | unsigned int queue_dropped; | 59 | unsigned int queue_dropped; |
60 | unsigned int queue_user_dropped; | 60 | unsigned int queue_user_dropped; |
61 | 61 | ||
62 | 62 | ||
63 | u_int16_t queue_num; /* number of this queue */ | 63 | u_int16_t queue_num; /* number of this queue */ |
64 | u_int8_t copy_mode; | 64 | u_int8_t copy_mode; |
65 | u_int32_t flags; /* Set using NFQA_CFG_FLAGS */ | 65 | u_int32_t flags; /* Set using NFQA_CFG_FLAGS */ |
66 | /* | 66 | /* |
67 | * Following fields are dirtied for each queued packet, | 67 | * Following fields are dirtied for each queued packet, |
68 | * keep them in same cache line if possible. | 68 | * keep them in same cache line if possible. |
69 | */ | 69 | */ |
70 | spinlock_t lock; | 70 | spinlock_t lock; |
71 | unsigned int queue_total; | 71 | unsigned int queue_total; |
72 | unsigned int id_sequence; /* 'sequence' of pkt ids */ | 72 | unsigned int id_sequence; /* 'sequence' of pkt ids */ |
73 | struct list_head queue_list; /* packets in queue */ | 73 | struct list_head queue_list; /* packets in queue */ |
74 | }; | 74 | }; |
75 | 75 | ||
76 | typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); | 76 | typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); |
77 | 77 | ||
78 | static int nfnl_queue_net_id __read_mostly; | 78 | static int nfnl_queue_net_id __read_mostly; |
79 | 79 | ||
80 | #define INSTANCE_BUCKETS 16 | 80 | #define INSTANCE_BUCKETS 16 |
81 | struct nfnl_queue_net { | 81 | struct nfnl_queue_net { |
82 | spinlock_t instances_lock; | 82 | spinlock_t instances_lock; |
83 | struct hlist_head instance_table[INSTANCE_BUCKETS]; | 83 | struct hlist_head instance_table[INSTANCE_BUCKETS]; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net) | 86 | static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net) |
87 | { | 87 | { |
88 | return net_generic(net, nfnl_queue_net_id); | 88 | return net_generic(net, nfnl_queue_net_id); |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline u_int8_t instance_hashfn(u_int16_t queue_num) | 91 | static inline u_int8_t instance_hashfn(u_int16_t queue_num) |
92 | { | 92 | { |
93 | return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS; | 93 | return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS; |
94 | } | 94 | } |
95 | 95 | ||
96 | static struct nfqnl_instance * | 96 | static struct nfqnl_instance * |
97 | instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) | 97 | instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) |
98 | { | 98 | { |
99 | struct hlist_head *head; | 99 | struct hlist_head *head; |
100 | struct nfqnl_instance *inst; | 100 | struct nfqnl_instance *inst; |
101 | 101 | ||
102 | head = &q->instance_table[instance_hashfn(queue_num)]; | 102 | head = &q->instance_table[instance_hashfn(queue_num)]; |
103 | hlist_for_each_entry_rcu(inst, head, hlist) { | 103 | hlist_for_each_entry_rcu(inst, head, hlist) { |
104 | if (inst->queue_num == queue_num) | 104 | if (inst->queue_num == queue_num) |
105 | return inst; | 105 | return inst; |
106 | } | 106 | } |
107 | return NULL; | 107 | return NULL; |
108 | } | 108 | } |
109 | 109 | ||
110 | static struct nfqnl_instance * | 110 | static struct nfqnl_instance * |
111 | instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, | 111 | instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, |
112 | int portid) | 112 | int portid) |
113 | { | 113 | { |
114 | struct nfqnl_instance *inst; | 114 | struct nfqnl_instance *inst; |
115 | unsigned int h; | 115 | unsigned int h; |
116 | int err; | 116 | int err; |
117 | 117 | ||
118 | spin_lock(&q->instances_lock); | 118 | spin_lock(&q->instances_lock); |
119 | if (instance_lookup(q, queue_num)) { | 119 | if (instance_lookup(q, queue_num)) { |
120 | err = -EEXIST; | 120 | err = -EEXIST; |
121 | goto out_unlock; | 121 | goto out_unlock; |
122 | } | 122 | } |
123 | 123 | ||
124 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); | 124 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); |
125 | if (!inst) { | 125 | if (!inst) { |
126 | err = -ENOMEM; | 126 | err = -ENOMEM; |
127 | goto out_unlock; | 127 | goto out_unlock; |
128 | } | 128 | } |
129 | 129 | ||
130 | inst->queue_num = queue_num; | 130 | inst->queue_num = queue_num; |
131 | inst->peer_portid = portid; | 131 | inst->peer_portid = portid; |
132 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; | 132 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; |
133 | inst->copy_range = NFQNL_MAX_COPY_RANGE; | 133 | inst->copy_range = NFQNL_MAX_COPY_RANGE; |
134 | inst->copy_mode = NFQNL_COPY_NONE; | 134 | inst->copy_mode = NFQNL_COPY_NONE; |
135 | spin_lock_init(&inst->lock); | 135 | spin_lock_init(&inst->lock); |
136 | INIT_LIST_HEAD(&inst->queue_list); | 136 | INIT_LIST_HEAD(&inst->queue_list); |
137 | 137 | ||
138 | if (!try_module_get(THIS_MODULE)) { | 138 | if (!try_module_get(THIS_MODULE)) { |
139 | err = -EAGAIN; | 139 | err = -EAGAIN; |
140 | goto out_free; | 140 | goto out_free; |
141 | } | 141 | } |
142 | 142 | ||
143 | h = instance_hashfn(queue_num); | 143 | h = instance_hashfn(queue_num); |
144 | hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); | 144 | hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); |
145 | 145 | ||
146 | spin_unlock(&q->instances_lock); | 146 | spin_unlock(&q->instances_lock); |
147 | 147 | ||
148 | return inst; | 148 | return inst; |
149 | 149 | ||
150 | out_free: | 150 | out_free: |
151 | kfree(inst); | 151 | kfree(inst); |
152 | out_unlock: | 152 | out_unlock: |
153 | spin_unlock(&q->instances_lock); | 153 | spin_unlock(&q->instances_lock); |
154 | return ERR_PTR(err); | 154 | return ERR_PTR(err); |
155 | } | 155 | } |
156 | 156 | ||
157 | static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, | 157 | static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, |
158 | unsigned long data); | 158 | unsigned long data); |
159 | 159 | ||
160 | static void | 160 | static void |
161 | instance_destroy_rcu(struct rcu_head *head) | 161 | instance_destroy_rcu(struct rcu_head *head) |
162 | { | 162 | { |
163 | struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, | 163 | struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, |
164 | rcu); | 164 | rcu); |
165 | 165 | ||
166 | nfqnl_flush(inst, NULL, 0); | 166 | nfqnl_flush(inst, NULL, 0); |
167 | kfree(inst); | 167 | kfree(inst); |
168 | module_put(THIS_MODULE); | 168 | module_put(THIS_MODULE); |
169 | } | 169 | } |
170 | 170 | ||
171 | static void | 171 | static void |
172 | __instance_destroy(struct nfqnl_instance *inst) | 172 | __instance_destroy(struct nfqnl_instance *inst) |
173 | { | 173 | { |
174 | hlist_del_rcu(&inst->hlist); | 174 | hlist_del_rcu(&inst->hlist); |
175 | call_rcu(&inst->rcu, instance_destroy_rcu); | 175 | call_rcu(&inst->rcu, instance_destroy_rcu); |
176 | } | 176 | } |
177 | 177 | ||
178 | static void | 178 | static void |
179 | instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst) | 179 | instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst) |
180 | { | 180 | { |
181 | spin_lock(&q->instances_lock); | 181 | spin_lock(&q->instances_lock); |
182 | __instance_destroy(inst); | 182 | __instance_destroy(inst); |
183 | spin_unlock(&q->instances_lock); | 183 | spin_unlock(&q->instances_lock); |
184 | } | 184 | } |
185 | 185 | ||
186 | static inline void | 186 | static inline void |
187 | __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) | 187 | __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) |
188 | { | 188 | { |
189 | list_add_tail(&entry->list, &queue->queue_list); | 189 | list_add_tail(&entry->list, &queue->queue_list); |
190 | queue->queue_total++; | 190 | queue->queue_total++; |
191 | } | 191 | } |
192 | 192 | ||
193 | static void | 193 | static void |
194 | __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) | 194 | __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) |
195 | { | 195 | { |
196 | list_del(&entry->list); | 196 | list_del(&entry->list); |
197 | queue->queue_total--; | 197 | queue->queue_total--; |
198 | } | 198 | } |
199 | 199 | ||
200 | static struct nf_queue_entry * | 200 | static struct nf_queue_entry * |
201 | find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) | 201 | find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) |
202 | { | 202 | { |
203 | struct nf_queue_entry *entry = NULL, *i; | 203 | struct nf_queue_entry *entry = NULL, *i; |
204 | 204 | ||
205 | spin_lock_bh(&queue->lock); | 205 | spin_lock_bh(&queue->lock); |
206 | 206 | ||
207 | list_for_each_entry(i, &queue->queue_list, list) { | 207 | list_for_each_entry(i, &queue->queue_list, list) { |
208 | if (i->id == id) { | 208 | if (i->id == id) { |
209 | entry = i; | 209 | entry = i; |
210 | break; | 210 | break; |
211 | } | 211 | } |
212 | } | 212 | } |
213 | 213 | ||
214 | if (entry) | 214 | if (entry) |
215 | __dequeue_entry(queue, entry); | 215 | __dequeue_entry(queue, entry); |
216 | 216 | ||
217 | spin_unlock_bh(&queue->lock); | 217 | spin_unlock_bh(&queue->lock); |
218 | 218 | ||
219 | return entry; | 219 | return entry; |
220 | } | 220 | } |
221 | 221 | ||
222 | static void | 222 | static void |
223 | nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) | 223 | nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) |
224 | { | 224 | { |
225 | struct nf_queue_entry *entry, *next; | 225 | struct nf_queue_entry *entry, *next; |
226 | 226 | ||
227 | spin_lock_bh(&queue->lock); | 227 | spin_lock_bh(&queue->lock); |
228 | list_for_each_entry_safe(entry, next, &queue->queue_list, list) { | 228 | list_for_each_entry_safe(entry, next, &queue->queue_list, list) { |
229 | if (!cmpfn || cmpfn(entry, data)) { | 229 | if (!cmpfn || cmpfn(entry, data)) { |
230 | list_del(&entry->list); | 230 | list_del(&entry->list); |
231 | queue->queue_total--; | 231 | queue->queue_total--; |
232 | nf_reinject(entry, NF_DROP); | 232 | nf_reinject(entry, NF_DROP); |
233 | } | 233 | } |
234 | } | 234 | } |
235 | spin_unlock_bh(&queue->lock); | 235 | spin_unlock_bh(&queue->lock); |
236 | } | 236 | } |
237 | 237 | ||
238 | static void | 238 | static void |
239 | nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) | 239 | nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) |
240 | { | 240 | { |
241 | int i, j = 0; | 241 | int i, j = 0; |
242 | int plen = 0; /* length of skb->head fragment */ | 242 | int plen = 0; /* length of skb->head fragment */ |
243 | struct page *page; | 243 | struct page *page; |
244 | unsigned int offset; | 244 | unsigned int offset; |
245 | 245 | ||
246 | /* dont bother with small payloads */ | 246 | /* dont bother with small payloads */ |
247 | if (len <= skb_tailroom(to)) { | 247 | if (len <= skb_tailroom(to)) { |
248 | skb_copy_bits(from, 0, skb_put(to, len), len); | 248 | skb_copy_bits(from, 0, skb_put(to, len), len); |
249 | return; | 249 | return; |
250 | } | 250 | } |
251 | 251 | ||
252 | if (hlen) { | 252 | if (hlen) { |
253 | skb_copy_bits(from, 0, skb_put(to, hlen), hlen); | 253 | skb_copy_bits(from, 0, skb_put(to, hlen), hlen); |
254 | len -= hlen; | 254 | len -= hlen; |
255 | } else { | 255 | } else { |
256 | plen = min_t(int, skb_headlen(from), len); | 256 | plen = min_t(int, skb_headlen(from), len); |
257 | if (plen) { | 257 | if (plen) { |
258 | page = virt_to_head_page(from->head); | 258 | page = virt_to_head_page(from->head); |
259 | offset = from->data - (unsigned char *)page_address(page); | 259 | offset = from->data - (unsigned char *)page_address(page); |
260 | __skb_fill_page_desc(to, 0, page, offset, plen); | 260 | __skb_fill_page_desc(to, 0, page, offset, plen); |
261 | get_page(page); | 261 | get_page(page); |
262 | j = 1; | 262 | j = 1; |
263 | len -= plen; | 263 | len -= plen; |
264 | } | 264 | } |
265 | } | 265 | } |
266 | 266 | ||
267 | to->truesize += len + plen; | 267 | to->truesize += len + plen; |
268 | to->len += len + plen; | 268 | to->len += len + plen; |
269 | to->data_len += len + plen; | 269 | to->data_len += len + plen; |
270 | 270 | ||
271 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { | 271 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { |
272 | if (!len) | 272 | if (!len) |
273 | break; | 273 | break; |
274 | skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; | 274 | skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; |
275 | skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); | 275 | skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); |
276 | len -= skb_shinfo(to)->frags[j].size; | 276 | len -= skb_shinfo(to)->frags[j].size; |
277 | skb_frag_ref(to, j); | 277 | skb_frag_ref(to, j); |
278 | j++; | 278 | j++; |
279 | } | 279 | } |
280 | skb_shinfo(to)->nr_frags = j; | 280 | skb_shinfo(to)->nr_frags = j; |
281 | } | 281 | } |
282 | 282 | ||
283 | static int | 283 | static int |
284 | nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet, | 284 | nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet, |
285 | bool csum_verify) | 285 | bool csum_verify) |
286 | { | 286 | { |
287 | __u32 flags = 0; | 287 | __u32 flags = 0; |
288 | 288 | ||
289 | if (packet->ip_summed == CHECKSUM_PARTIAL) | 289 | if (packet->ip_summed == CHECKSUM_PARTIAL) |
290 | flags = NFQA_SKB_CSUMNOTREADY; | 290 | flags = NFQA_SKB_CSUMNOTREADY; |
291 | else if (csum_verify) | 291 | else if (csum_verify) |
292 | flags = NFQA_SKB_CSUM_NOTVERIFIED; | 292 | flags = NFQA_SKB_CSUM_NOTVERIFIED; |
293 | 293 | ||
294 | if (skb_is_gso(packet)) | 294 | if (skb_is_gso(packet)) |
295 | flags |= NFQA_SKB_GSO; | 295 | flags |= NFQA_SKB_GSO; |
296 | 296 | ||
297 | return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0; | 297 | return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk) | ||
301 | { | ||
302 | const struct cred *cred; | ||
303 | |||
304 | if (sk->sk_state == TCP_TIME_WAIT) | ||
305 | return 0; | ||
306 | |||
307 | read_lock_bh(&sk->sk_callback_lock); | ||
308 | if (sk->sk_socket && sk->sk_socket->file) { | ||
309 | cred = sk->sk_socket->file->f_cred; | ||
310 | if (nla_put_be32(skb, NFQA_UID, | ||
311 | htonl(from_kuid_munged(&init_user_ns, cred->fsuid)))) | ||
312 | goto nla_put_failure; | ||
313 | if (nla_put_be32(skb, NFQA_GID, | ||
314 | htonl(from_kgid_munged(&init_user_ns, cred->fsgid)))) | ||
315 | goto nla_put_failure; | ||
316 | } | ||
317 | read_unlock_bh(&sk->sk_callback_lock); | ||
318 | return 0; | ||
319 | |||
320 | nla_put_failure: | ||
321 | read_unlock_bh(&sk->sk_callback_lock); | ||
322 | return -1; | ||
323 | } | ||
324 | |||
300 | static struct sk_buff * | 325 | static struct sk_buff * |
301 | nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | 326 | nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, |
302 | struct nf_queue_entry *entry, | 327 | struct nf_queue_entry *entry, |
303 | __be32 **packet_id_ptr) | 328 | __be32 **packet_id_ptr) |
304 | { | 329 | { |
305 | size_t size; | 330 | size_t size; |
306 | size_t data_len = 0, cap_len = 0; | 331 | size_t data_len = 0, cap_len = 0; |
307 | int hlen = 0; | 332 | int hlen = 0; |
308 | struct sk_buff *skb; | 333 | struct sk_buff *skb; |
309 | struct nlattr *nla; | 334 | struct nlattr *nla; |
310 | struct nfqnl_msg_packet_hdr *pmsg; | 335 | struct nfqnl_msg_packet_hdr *pmsg; |
311 | struct nlmsghdr *nlh; | 336 | struct nlmsghdr *nlh; |
312 | struct nfgenmsg *nfmsg; | 337 | struct nfgenmsg *nfmsg; |
313 | struct sk_buff *entskb = entry->skb; | 338 | struct sk_buff *entskb = entry->skb; |
314 | struct net_device *indev; | 339 | struct net_device *indev; |
315 | struct net_device *outdev; | 340 | struct net_device *outdev; |
316 | struct nf_conn *ct = NULL; | 341 | struct nf_conn *ct = NULL; |
317 | enum ip_conntrack_info uninitialized_var(ctinfo); | 342 | enum ip_conntrack_info uninitialized_var(ctinfo); |
318 | bool csum_verify; | 343 | bool csum_verify; |
319 | 344 | ||
320 | size = nlmsg_total_size(sizeof(struct nfgenmsg)) | 345 | size = nlmsg_total_size(sizeof(struct nfgenmsg)) |
321 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) | 346 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) |
322 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 347 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
323 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 348 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
324 | #ifdef CONFIG_BRIDGE_NETFILTER | 349 | #ifdef CONFIG_BRIDGE_NETFILTER |
325 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 350 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
326 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 351 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
327 | #endif | 352 | #endif |
328 | + nla_total_size(sizeof(u_int32_t)) /* mark */ | 353 | + nla_total_size(sizeof(u_int32_t)) /* mark */ |
329 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) | 354 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) |
330 | + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ | 355 | + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ |
331 | + nla_total_size(sizeof(u_int32_t)); /* cap_len */ | 356 | + nla_total_size(sizeof(u_int32_t)); /* cap_len */ |
332 | 357 | ||
333 | if (entskb->tstamp.tv64) | 358 | if (entskb->tstamp.tv64) |
334 | size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); | 359 | size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); |
335 | 360 | ||
336 | if (entry->hook <= NF_INET_FORWARD || | 361 | if (entry->hook <= NF_INET_FORWARD || |
337 | (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) | 362 | (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) |
338 | csum_verify = !skb_csum_unnecessary(entskb); | 363 | csum_verify = !skb_csum_unnecessary(entskb); |
339 | else | 364 | else |
340 | csum_verify = false; | 365 | csum_verify = false; |
341 | 366 | ||
342 | outdev = entry->outdev; | 367 | outdev = entry->outdev; |
343 | 368 | ||
344 | switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { | 369 | switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { |
345 | case NFQNL_COPY_META: | 370 | case NFQNL_COPY_META: |
346 | case NFQNL_COPY_NONE: | 371 | case NFQNL_COPY_NONE: |
347 | break; | 372 | break; |
348 | 373 | ||
349 | case NFQNL_COPY_PACKET: | 374 | case NFQNL_COPY_PACKET: |
350 | if (!(queue->flags & NFQA_CFG_F_GSO) && | 375 | if (!(queue->flags & NFQA_CFG_F_GSO) && |
351 | entskb->ip_summed == CHECKSUM_PARTIAL && | 376 | entskb->ip_summed == CHECKSUM_PARTIAL && |
352 | skb_checksum_help(entskb)) | 377 | skb_checksum_help(entskb)) |
353 | return NULL; | 378 | return NULL; |
354 | 379 | ||
355 | data_len = ACCESS_ONCE(queue->copy_range); | 380 | data_len = ACCESS_ONCE(queue->copy_range); |
356 | if (data_len > entskb->len) | 381 | if (data_len > entskb->len) |
357 | data_len = entskb->len; | 382 | data_len = entskb->len; |
358 | 383 | ||
359 | if (!entskb->head_frag || | 384 | if (!entskb->head_frag || |
360 | skb_headlen(entskb) < L1_CACHE_BYTES || | 385 | skb_headlen(entskb) < L1_CACHE_BYTES || |
361 | skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS) | 386 | skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS) |
362 | hlen = skb_headlen(entskb); | 387 | hlen = skb_headlen(entskb); |
363 | 388 | ||
364 | if (skb_has_frag_list(entskb)) | 389 | if (skb_has_frag_list(entskb)) |
365 | hlen = entskb->len; | 390 | hlen = entskb->len; |
366 | hlen = min_t(int, data_len, hlen); | 391 | hlen = min_t(int, data_len, hlen); |
367 | size += sizeof(struct nlattr) + hlen; | 392 | size += sizeof(struct nlattr) + hlen; |
368 | cap_len = entskb->len; | 393 | cap_len = entskb->len; |
369 | break; | 394 | break; |
370 | } | 395 | } |
371 | 396 | ||
372 | if (queue->flags & NFQA_CFG_F_CONNTRACK) | 397 | if (queue->flags & NFQA_CFG_F_CONNTRACK) |
373 | ct = nfqnl_ct_get(entskb, &size, &ctinfo); | 398 | ct = nfqnl_ct_get(entskb, &size, &ctinfo); |
374 | 399 | ||
400 | if (queue->flags & NFQA_CFG_F_UID_GID) { | ||
401 | size += (nla_total_size(sizeof(u_int32_t)) /* uid */ | ||
402 | + nla_total_size(sizeof(u_int32_t))); /* gid */ | ||
403 | } | ||
404 | |||
375 | skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, | 405 | skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, |
376 | GFP_ATOMIC); | 406 | GFP_ATOMIC); |
377 | if (!skb) | 407 | if (!skb) |
378 | return NULL; | 408 | return NULL; |
379 | 409 | ||
380 | nlh = nlmsg_put(skb, 0, 0, | 410 | nlh = nlmsg_put(skb, 0, 0, |
381 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, | 411 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, |
382 | sizeof(struct nfgenmsg), 0); | 412 | sizeof(struct nfgenmsg), 0); |
383 | if (!nlh) { | 413 | if (!nlh) { |
384 | kfree_skb(skb); | 414 | kfree_skb(skb); |
385 | return NULL; | 415 | return NULL; |
386 | } | 416 | } |
387 | nfmsg = nlmsg_data(nlh); | 417 | nfmsg = nlmsg_data(nlh); |
388 | nfmsg->nfgen_family = entry->pf; | 418 | nfmsg->nfgen_family = entry->pf; |
389 | nfmsg->version = NFNETLINK_V0; | 419 | nfmsg->version = NFNETLINK_V0; |
390 | nfmsg->res_id = htons(queue->queue_num); | 420 | nfmsg->res_id = htons(queue->queue_num); |
391 | 421 | ||
392 | nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); | 422 | nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); |
393 | pmsg = nla_data(nla); | 423 | pmsg = nla_data(nla); |
394 | pmsg->hw_protocol = entskb->protocol; | 424 | pmsg->hw_protocol = entskb->protocol; |
395 | pmsg->hook = entry->hook; | 425 | pmsg->hook = entry->hook; |
396 | *packet_id_ptr = &pmsg->packet_id; | 426 | *packet_id_ptr = &pmsg->packet_id; |
397 | 427 | ||
398 | indev = entry->indev; | 428 | indev = entry->indev; |
399 | if (indev) { | 429 | if (indev) { |
400 | #ifndef CONFIG_BRIDGE_NETFILTER | 430 | #ifndef CONFIG_BRIDGE_NETFILTER |
401 | if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) | 431 | if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) |
402 | goto nla_put_failure; | 432 | goto nla_put_failure; |
403 | #else | 433 | #else |
404 | if (entry->pf == PF_BRIDGE) { | 434 | if (entry->pf == PF_BRIDGE) { |
405 | /* Case 1: indev is physical input device, we need to | 435 | /* Case 1: indev is physical input device, we need to |
406 | * look for bridge group (when called from | 436 | * look for bridge group (when called from |
407 | * netfilter_bridge) */ | 437 | * netfilter_bridge) */ |
408 | if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, | 438 | if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, |
409 | htonl(indev->ifindex)) || | 439 | htonl(indev->ifindex)) || |
410 | /* this is the bridge group "brX" */ | 440 | /* this is the bridge group "brX" */ |
411 | /* rcu_read_lock()ed by __nf_queue */ | 441 | /* rcu_read_lock()ed by __nf_queue */ |
412 | nla_put_be32(skb, NFQA_IFINDEX_INDEV, | 442 | nla_put_be32(skb, NFQA_IFINDEX_INDEV, |
413 | htonl(br_port_get_rcu(indev)->br->dev->ifindex))) | 443 | htonl(br_port_get_rcu(indev)->br->dev->ifindex))) |
414 | goto nla_put_failure; | 444 | goto nla_put_failure; |
415 | } else { | 445 | } else { |
416 | /* Case 2: indev is bridge group, we need to look for | 446 | /* Case 2: indev is bridge group, we need to look for |
417 | * physical device (when called from ipv4) */ | 447 | * physical device (when called from ipv4) */ |
418 | if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, | 448 | if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, |
419 | htonl(indev->ifindex))) | 449 | htonl(indev->ifindex))) |
420 | goto nla_put_failure; | 450 | goto nla_put_failure; |
421 | if (entskb->nf_bridge && entskb->nf_bridge->physindev && | 451 | if (entskb->nf_bridge && entskb->nf_bridge->physindev && |
422 | nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, | 452 | nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, |
423 | htonl(entskb->nf_bridge->physindev->ifindex))) | 453 | htonl(entskb->nf_bridge->physindev->ifindex))) |
424 | goto nla_put_failure; | 454 | goto nla_put_failure; |
425 | } | 455 | } |
426 | #endif | 456 | #endif |
427 | } | 457 | } |
428 | 458 | ||
429 | if (outdev) { | 459 | if (outdev) { |
430 | #ifndef CONFIG_BRIDGE_NETFILTER | 460 | #ifndef CONFIG_BRIDGE_NETFILTER |
431 | if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) | 461 | if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) |
432 | goto nla_put_failure; | 462 | goto nla_put_failure; |
433 | #else | 463 | #else |
434 | if (entry->pf == PF_BRIDGE) { | 464 | if (entry->pf == PF_BRIDGE) { |
435 | /* Case 1: outdev is physical output device, we need to | 465 | /* Case 1: outdev is physical output device, we need to |
436 | * look for bridge group (when called from | 466 | * look for bridge group (when called from |
437 | * netfilter_bridge) */ | 467 | * netfilter_bridge) */ |
438 | if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, | 468 | if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, |
439 | htonl(outdev->ifindex)) || | 469 | htonl(outdev->ifindex)) || |
440 | /* this is the bridge group "brX" */ | 470 | /* this is the bridge group "brX" */ |
441 | /* rcu_read_lock()ed by __nf_queue */ | 471 | /* rcu_read_lock()ed by __nf_queue */ |
442 | nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, | 472 | nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, |
443 | htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) | 473 | htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) |
444 | goto nla_put_failure; | 474 | goto nla_put_failure; |
445 | } else { | 475 | } else { |
446 | /* Case 2: outdev is bridge group, we need to look for | 476 | /* Case 2: outdev is bridge group, we need to look for |
447 | * physical output device (when called from ipv4) */ | 477 | * physical output device (when called from ipv4) */ |
448 | if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, | 478 | if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, |
449 | htonl(outdev->ifindex))) | 479 | htonl(outdev->ifindex))) |
450 | goto nla_put_failure; | 480 | goto nla_put_failure; |
451 | if (entskb->nf_bridge && entskb->nf_bridge->physoutdev && | 481 | if (entskb->nf_bridge && entskb->nf_bridge->physoutdev && |
452 | nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, | 482 | nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, |
453 | htonl(entskb->nf_bridge->physoutdev->ifindex))) | 483 | htonl(entskb->nf_bridge->physoutdev->ifindex))) |
454 | goto nla_put_failure; | 484 | goto nla_put_failure; |
455 | } | 485 | } |
456 | #endif | 486 | #endif |
457 | } | 487 | } |
458 | 488 | ||
459 | if (entskb->mark && | 489 | if (entskb->mark && |
460 | nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark))) | 490 | nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark))) |
461 | goto nla_put_failure; | 491 | goto nla_put_failure; |
462 | 492 | ||
463 | if (indev && entskb->dev && | 493 | if (indev && entskb->dev && |
464 | entskb->mac_header != entskb->network_header) { | 494 | entskb->mac_header != entskb->network_header) { |
465 | struct nfqnl_msg_packet_hw phw; | 495 | struct nfqnl_msg_packet_hw phw; |
466 | int len; | 496 | int len; |
467 | 497 | ||
468 | memset(&phw, 0, sizeof(phw)); | 498 | memset(&phw, 0, sizeof(phw)); |
469 | len = dev_parse_header(entskb, phw.hw_addr); | 499 | len = dev_parse_header(entskb, phw.hw_addr); |
470 | if (len) { | 500 | if (len) { |
471 | phw.hw_addrlen = htons(len); | 501 | phw.hw_addrlen = htons(len); |
472 | if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) | 502 | if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) |
473 | goto nla_put_failure; | 503 | goto nla_put_failure; |
474 | } | 504 | } |
475 | } | 505 | } |
476 | 506 | ||
477 | if (entskb->tstamp.tv64) { | 507 | if (entskb->tstamp.tv64) { |
478 | struct nfqnl_msg_packet_timestamp ts; | 508 | struct nfqnl_msg_packet_timestamp ts; |
479 | struct timeval tv = ktime_to_timeval(entskb->tstamp); | 509 | struct timeval tv = ktime_to_timeval(entskb->tstamp); |
480 | ts.sec = cpu_to_be64(tv.tv_sec); | 510 | ts.sec = cpu_to_be64(tv.tv_sec); |
481 | ts.usec = cpu_to_be64(tv.tv_usec); | 511 | ts.usec = cpu_to_be64(tv.tv_usec); |
482 | 512 | ||
483 | if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts)) | 513 | if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts)) |
484 | goto nla_put_failure; | 514 | goto nla_put_failure; |
485 | } | 515 | } |
516 | |||
517 | if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk && | ||
518 | nfqnl_put_sk_uidgid(skb, entskb->sk) < 0) | ||
519 | goto nla_put_failure; | ||
486 | 520 | ||
487 | if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0) | 521 | if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0) |
488 | goto nla_put_failure; | 522 | goto nla_put_failure; |
489 | 523 | ||
490 | if (cap_len > data_len && | 524 | if (cap_len > data_len && |
491 | nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) | 525 | nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) |
492 | goto nla_put_failure; | 526 | goto nla_put_failure; |
493 | 527 | ||
494 | if (nfqnl_put_packet_info(skb, entskb, csum_verify)) | 528 | if (nfqnl_put_packet_info(skb, entskb, csum_verify)) |
495 | goto nla_put_failure; | 529 | goto nla_put_failure; |
496 | 530 | ||
497 | if (data_len) { | 531 | if (data_len) { |
498 | struct nlattr *nla; | 532 | struct nlattr *nla; |
499 | 533 | ||
500 | if (skb_tailroom(skb) < sizeof(*nla) + hlen) | 534 | if (skb_tailroom(skb) < sizeof(*nla) + hlen) |
501 | goto nla_put_failure; | 535 | goto nla_put_failure; |
502 | 536 | ||
503 | nla = (struct nlattr *)skb_put(skb, sizeof(*nla)); | 537 | nla = (struct nlattr *)skb_put(skb, sizeof(*nla)); |
504 | nla->nla_type = NFQA_PAYLOAD; | 538 | nla->nla_type = NFQA_PAYLOAD; |
505 | nla->nla_len = nla_attr_size(data_len); | 539 | nla->nla_len = nla_attr_size(data_len); |
506 | 540 | ||
507 | nfqnl_zcopy(skb, entskb, data_len, hlen); | 541 | nfqnl_zcopy(skb, entskb, data_len, hlen); |
508 | } | 542 | } |
509 | 543 | ||
510 | nlh->nlmsg_len = skb->len; | 544 | nlh->nlmsg_len = skb->len; |
511 | return skb; | 545 | return skb; |
512 | 546 | ||
513 | nla_put_failure: | 547 | nla_put_failure: |
514 | kfree_skb(skb); | 548 | kfree_skb(skb); |
515 | net_err_ratelimited("nf_queue: error creating packet message\n"); | 549 | net_err_ratelimited("nf_queue: error creating packet message\n"); |
516 | return NULL; | 550 | return NULL; |
517 | } | 551 | } |
518 | 552 | ||
519 | static int | 553 | static int |
520 | __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, | 554 | __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, |
521 | struct nf_queue_entry *entry) | 555 | struct nf_queue_entry *entry) |
522 | { | 556 | { |
523 | struct sk_buff *nskb; | 557 | struct sk_buff *nskb; |
524 | int err = -ENOBUFS; | 558 | int err = -ENOBUFS; |
525 | __be32 *packet_id_ptr; | 559 | __be32 *packet_id_ptr; |
526 | int failopen = 0; | 560 | int failopen = 0; |
527 | 561 | ||
528 | nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr); | 562 | nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr); |
529 | if (nskb == NULL) { | 563 | if (nskb == NULL) { |
530 | err = -ENOMEM; | 564 | err = -ENOMEM; |
531 | goto err_out; | 565 | goto err_out; |
532 | } | 566 | } |
533 | spin_lock_bh(&queue->lock); | 567 | spin_lock_bh(&queue->lock); |
534 | 568 | ||
535 | if (queue->queue_total >= queue->queue_maxlen) { | 569 | if (queue->queue_total >= queue->queue_maxlen) { |
536 | if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { | 570 | if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { |
537 | failopen = 1; | 571 | failopen = 1; |
538 | err = 0; | 572 | err = 0; |
539 | } else { | 573 | } else { |
540 | queue->queue_dropped++; | 574 | queue->queue_dropped++; |
541 | net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", | 575 | net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", |
542 | queue->queue_total); | 576 | queue->queue_total); |
543 | } | 577 | } |
544 | goto err_out_free_nskb; | 578 | goto err_out_free_nskb; |
545 | } | 579 | } |
546 | entry->id = ++queue->id_sequence; | 580 | entry->id = ++queue->id_sequence; |
547 | *packet_id_ptr = htonl(entry->id); | 581 | *packet_id_ptr = htonl(entry->id); |
548 | 582 | ||
549 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ | 583 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ |
550 | err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); | 584 | err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); |
551 | if (err < 0) { | 585 | if (err < 0) { |
552 | queue->queue_user_dropped++; | 586 | queue->queue_user_dropped++; |
553 | goto err_out_unlock; | 587 | goto err_out_unlock; |
554 | } | 588 | } |
555 | 589 | ||
556 | __enqueue_entry(queue, entry); | 590 | __enqueue_entry(queue, entry); |
557 | 591 | ||
558 | spin_unlock_bh(&queue->lock); | 592 | spin_unlock_bh(&queue->lock); |
559 | return 0; | 593 | return 0; |
560 | 594 | ||
561 | err_out_free_nskb: | 595 | err_out_free_nskb: |
562 | kfree_skb(nskb); | 596 | kfree_skb(nskb); |
563 | err_out_unlock: | 597 | err_out_unlock: |
564 | spin_unlock_bh(&queue->lock); | 598 | spin_unlock_bh(&queue->lock); |
565 | if (failopen) | 599 | if (failopen) |
566 | nf_reinject(entry, NF_ACCEPT); | 600 | nf_reinject(entry, NF_ACCEPT); |
567 | err_out: | 601 | err_out: |
568 | return err; | 602 | return err; |
569 | } | 603 | } |
570 | 604 | ||
571 | static struct nf_queue_entry * | 605 | static struct nf_queue_entry * |
572 | nf_queue_entry_dup(struct nf_queue_entry *e) | 606 | nf_queue_entry_dup(struct nf_queue_entry *e) |
573 | { | 607 | { |
574 | struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); | 608 | struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); |
575 | if (entry) { | 609 | if (entry) { |
576 | if (nf_queue_entry_get_refs(entry)) | 610 | if (nf_queue_entry_get_refs(entry)) |
577 | return entry; | 611 | return entry; |
578 | kfree(entry); | 612 | kfree(entry); |
579 | } | 613 | } |
580 | return NULL; | 614 | return NULL; |
581 | } | 615 | } |
582 | 616 | ||
583 | #ifdef CONFIG_BRIDGE_NETFILTER | 617 | #ifdef CONFIG_BRIDGE_NETFILTER |
584 | /* When called from bridge netfilter, skb->data must point to MAC header | 618 | /* When called from bridge netfilter, skb->data must point to MAC header |
585 | * before calling skb_gso_segment(). Else, original MAC header is lost | 619 | * before calling skb_gso_segment(). Else, original MAC header is lost |
586 | * and segmented skbs will be sent to wrong destination. | 620 | * and segmented skbs will be sent to wrong destination. |
587 | */ | 621 | */ |
588 | static void nf_bridge_adjust_skb_data(struct sk_buff *skb) | 622 | static void nf_bridge_adjust_skb_data(struct sk_buff *skb) |
589 | { | 623 | { |
590 | if (skb->nf_bridge) | 624 | if (skb->nf_bridge) |
591 | __skb_push(skb, skb->network_header - skb->mac_header); | 625 | __skb_push(skb, skb->network_header - skb->mac_header); |
592 | } | 626 | } |
593 | 627 | ||
594 | static void nf_bridge_adjust_segmented_data(struct sk_buff *skb) | 628 | static void nf_bridge_adjust_segmented_data(struct sk_buff *skb) |
595 | { | 629 | { |
596 | if (skb->nf_bridge) | 630 | if (skb->nf_bridge) |
597 | __skb_pull(skb, skb->network_header - skb->mac_header); | 631 | __skb_pull(skb, skb->network_header - skb->mac_header); |
598 | } | 632 | } |
599 | #else | 633 | #else |
600 | #define nf_bridge_adjust_skb_data(s) do {} while (0) | 634 | #define nf_bridge_adjust_skb_data(s) do {} while (0) |
601 | #define nf_bridge_adjust_segmented_data(s) do {} while (0) | 635 | #define nf_bridge_adjust_segmented_data(s) do {} while (0) |
602 | #endif | 636 | #endif |
603 | 637 | ||
604 | static void free_entry(struct nf_queue_entry *entry) | 638 | static void free_entry(struct nf_queue_entry *entry) |
605 | { | 639 | { |
606 | nf_queue_entry_release_refs(entry); | 640 | nf_queue_entry_release_refs(entry); |
607 | kfree(entry); | 641 | kfree(entry); |
608 | } | 642 | } |
609 | 643 | ||
610 | static int | 644 | static int |
611 | __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, | 645 | __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, |
612 | struct sk_buff *skb, struct nf_queue_entry *entry) | 646 | struct sk_buff *skb, struct nf_queue_entry *entry) |
613 | { | 647 | { |
614 | int ret = -ENOMEM; | 648 | int ret = -ENOMEM; |
615 | struct nf_queue_entry *entry_seg; | 649 | struct nf_queue_entry *entry_seg; |
616 | 650 | ||
617 | nf_bridge_adjust_segmented_data(skb); | 651 | nf_bridge_adjust_segmented_data(skb); |
618 | 652 | ||
619 | if (skb->next == NULL) { /* last packet, no need to copy entry */ | 653 | if (skb->next == NULL) { /* last packet, no need to copy entry */ |
620 | struct sk_buff *gso_skb = entry->skb; | 654 | struct sk_buff *gso_skb = entry->skb; |
621 | entry->skb = skb; | 655 | entry->skb = skb; |
622 | ret = __nfqnl_enqueue_packet(net, queue, entry); | 656 | ret = __nfqnl_enqueue_packet(net, queue, entry); |
623 | if (ret) | 657 | if (ret) |
624 | entry->skb = gso_skb; | 658 | entry->skb = gso_skb; |
625 | return ret; | 659 | return ret; |
626 | } | 660 | } |
627 | 661 | ||
628 | skb->next = NULL; | 662 | skb->next = NULL; |
629 | 663 | ||
630 | entry_seg = nf_queue_entry_dup(entry); | 664 | entry_seg = nf_queue_entry_dup(entry); |
631 | if (entry_seg) { | 665 | if (entry_seg) { |
632 | entry_seg->skb = skb; | 666 | entry_seg->skb = skb; |
633 | ret = __nfqnl_enqueue_packet(net, queue, entry_seg); | 667 | ret = __nfqnl_enqueue_packet(net, queue, entry_seg); |
634 | if (ret) | 668 | if (ret) |
635 | free_entry(entry_seg); | 669 | free_entry(entry_seg); |
636 | } | 670 | } |
637 | return ret; | 671 | return ret; |
638 | } | 672 | } |
639 | 673 | ||
640 | static int | 674 | static int |
641 | nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | 675 | nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) |
642 | { | 676 | { |
643 | unsigned int queued; | 677 | unsigned int queued; |
644 | struct nfqnl_instance *queue; | 678 | struct nfqnl_instance *queue; |
645 | struct sk_buff *skb, *segs; | 679 | struct sk_buff *skb, *segs; |
646 | int err = -ENOBUFS; | 680 | int err = -ENOBUFS; |
647 | struct net *net = dev_net(entry->indev ? | 681 | struct net *net = dev_net(entry->indev ? |
648 | entry->indev : entry->outdev); | 682 | entry->indev : entry->outdev); |
649 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 683 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
650 | 684 | ||
651 | /* rcu_read_lock()ed by nf_hook_slow() */ | 685 | /* rcu_read_lock()ed by nf_hook_slow() */ |
652 | queue = instance_lookup(q, queuenum); | 686 | queue = instance_lookup(q, queuenum); |
653 | if (!queue) | 687 | if (!queue) |
654 | return -ESRCH; | 688 | return -ESRCH; |
655 | 689 | ||
656 | if (queue->copy_mode == NFQNL_COPY_NONE) | 690 | if (queue->copy_mode == NFQNL_COPY_NONE) |
657 | return -EINVAL; | 691 | return -EINVAL; |
658 | 692 | ||
659 | skb = entry->skb; | 693 | skb = entry->skb; |
660 | 694 | ||
661 | switch (entry->pf) { | 695 | switch (entry->pf) { |
662 | case NFPROTO_IPV4: | 696 | case NFPROTO_IPV4: |
663 | skb->protocol = htons(ETH_P_IP); | 697 | skb->protocol = htons(ETH_P_IP); |
664 | break; | 698 | break; |
665 | case NFPROTO_IPV6: | 699 | case NFPROTO_IPV6: |
666 | skb->protocol = htons(ETH_P_IPV6); | 700 | skb->protocol = htons(ETH_P_IPV6); |
667 | break; | 701 | break; |
668 | } | 702 | } |
669 | 703 | ||
670 | if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) | 704 | if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) |
671 | return __nfqnl_enqueue_packet(net, queue, entry); | 705 | return __nfqnl_enqueue_packet(net, queue, entry); |
672 | 706 | ||
673 | nf_bridge_adjust_skb_data(skb); | 707 | nf_bridge_adjust_skb_data(skb); |
674 | segs = skb_gso_segment(skb, 0); | 708 | segs = skb_gso_segment(skb, 0); |
675 | /* Does not use PTR_ERR to limit the number of error codes that can be | 709 | /* Does not use PTR_ERR to limit the number of error codes that can be |
676 | * returned by nf_queue. For instance, callers rely on -ECANCELED to | 710 | * returned by nf_queue. For instance, callers rely on -ECANCELED to |
677 | * mean 'ignore this hook'. | 711 | * mean 'ignore this hook'. |
678 | */ | 712 | */ |
679 | if (IS_ERR(segs)) | 713 | if (IS_ERR(segs)) |
680 | goto out_err; | 714 | goto out_err; |
681 | queued = 0; | 715 | queued = 0; |
682 | err = 0; | 716 | err = 0; |
683 | do { | 717 | do { |
684 | struct sk_buff *nskb = segs->next; | 718 | struct sk_buff *nskb = segs->next; |
685 | if (err == 0) | 719 | if (err == 0) |
686 | err = __nfqnl_enqueue_packet_gso(net, queue, | 720 | err = __nfqnl_enqueue_packet_gso(net, queue, |
687 | segs, entry); | 721 | segs, entry); |
688 | if (err == 0) | 722 | if (err == 0) |
689 | queued++; | 723 | queued++; |
690 | else | 724 | else |
691 | kfree_skb(segs); | 725 | kfree_skb(segs); |
692 | segs = nskb; | 726 | segs = nskb; |
693 | } while (segs); | 727 | } while (segs); |
694 | 728 | ||
695 | if (queued) { | 729 | if (queued) { |
696 | if (err) /* some segments are already queued */ | 730 | if (err) /* some segments are already queued */ |
697 | free_entry(entry); | 731 | free_entry(entry); |
698 | kfree_skb(skb); | 732 | kfree_skb(skb); |
699 | return 0; | 733 | return 0; |
700 | } | 734 | } |
701 | out_err: | 735 | out_err: |
702 | nf_bridge_adjust_segmented_data(skb); | 736 | nf_bridge_adjust_segmented_data(skb); |
703 | return err; | 737 | return err; |
704 | } | 738 | } |
705 | 739 | ||
706 | static int | 740 | static int |
707 | nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff) | 741 | nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff) |
708 | { | 742 | { |
709 | struct sk_buff *nskb; | 743 | struct sk_buff *nskb; |
710 | 744 | ||
711 | if (diff < 0) { | 745 | if (diff < 0) { |
712 | if (pskb_trim(e->skb, data_len)) | 746 | if (pskb_trim(e->skb, data_len)) |
713 | return -ENOMEM; | 747 | return -ENOMEM; |
714 | } else if (diff > 0) { | 748 | } else if (diff > 0) { |
715 | if (data_len > 0xFFFF) | 749 | if (data_len > 0xFFFF) |
716 | return -EINVAL; | 750 | return -EINVAL; |
717 | if (diff > skb_tailroom(e->skb)) { | 751 | if (diff > skb_tailroom(e->skb)) { |
718 | nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), | 752 | nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), |
719 | diff, GFP_ATOMIC); | 753 | diff, GFP_ATOMIC); |
720 | if (!nskb) { | 754 | if (!nskb) { |
721 | printk(KERN_WARNING "nf_queue: OOM " | 755 | printk(KERN_WARNING "nf_queue: OOM " |
722 | "in mangle, dropping packet\n"); | 756 | "in mangle, dropping packet\n"); |
723 | return -ENOMEM; | 757 | return -ENOMEM; |
724 | } | 758 | } |
725 | kfree_skb(e->skb); | 759 | kfree_skb(e->skb); |
726 | e->skb = nskb; | 760 | e->skb = nskb; |
727 | } | 761 | } |
728 | skb_put(e->skb, diff); | 762 | skb_put(e->skb, diff); |
729 | } | 763 | } |
730 | if (!skb_make_writable(e->skb, data_len)) | 764 | if (!skb_make_writable(e->skb, data_len)) |
731 | return -ENOMEM; | 765 | return -ENOMEM; |
732 | skb_copy_to_linear_data(e->skb, data, data_len); | 766 | skb_copy_to_linear_data(e->skb, data, data_len); |
733 | e->skb->ip_summed = CHECKSUM_NONE; | 767 | e->skb->ip_summed = CHECKSUM_NONE; |
734 | return 0; | 768 | return 0; |
735 | } | 769 | } |
736 | 770 | ||
737 | static int | 771 | static int |
738 | nfqnl_set_mode(struct nfqnl_instance *queue, | 772 | nfqnl_set_mode(struct nfqnl_instance *queue, |
739 | unsigned char mode, unsigned int range) | 773 | unsigned char mode, unsigned int range) |
740 | { | 774 | { |
741 | int status = 0; | 775 | int status = 0; |
742 | 776 | ||
743 | spin_lock_bh(&queue->lock); | 777 | spin_lock_bh(&queue->lock); |
744 | switch (mode) { | 778 | switch (mode) { |
745 | case NFQNL_COPY_NONE: | 779 | case NFQNL_COPY_NONE: |
746 | case NFQNL_COPY_META: | 780 | case NFQNL_COPY_META: |
747 | queue->copy_mode = mode; | 781 | queue->copy_mode = mode; |
748 | queue->copy_range = 0; | 782 | queue->copy_range = 0; |
749 | break; | 783 | break; |
750 | 784 | ||
751 | case NFQNL_COPY_PACKET: | 785 | case NFQNL_COPY_PACKET: |
752 | queue->copy_mode = mode; | 786 | queue->copy_mode = mode; |
753 | if (range == 0 || range > NFQNL_MAX_COPY_RANGE) | 787 | if (range == 0 || range > NFQNL_MAX_COPY_RANGE) |
754 | queue->copy_range = NFQNL_MAX_COPY_RANGE; | 788 | queue->copy_range = NFQNL_MAX_COPY_RANGE; |
755 | else | 789 | else |
756 | queue->copy_range = range; | 790 | queue->copy_range = range; |
757 | break; | 791 | break; |
758 | 792 | ||
759 | default: | 793 | default: |
760 | status = -EINVAL; | 794 | status = -EINVAL; |
761 | 795 | ||
762 | } | 796 | } |
763 | spin_unlock_bh(&queue->lock); | 797 | spin_unlock_bh(&queue->lock); |
764 | 798 | ||
765 | return status; | 799 | return status; |
766 | } | 800 | } |
767 | 801 | ||
768 | static int | 802 | static int |
769 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) | 803 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) |
770 | { | 804 | { |
771 | if (entry->indev) | 805 | if (entry->indev) |
772 | if (entry->indev->ifindex == ifindex) | 806 | if (entry->indev->ifindex == ifindex) |
773 | return 1; | 807 | return 1; |
774 | if (entry->outdev) | 808 | if (entry->outdev) |
775 | if (entry->outdev->ifindex == ifindex) | 809 | if (entry->outdev->ifindex == ifindex) |
776 | return 1; | 810 | return 1; |
777 | #ifdef CONFIG_BRIDGE_NETFILTER | 811 | #ifdef CONFIG_BRIDGE_NETFILTER |
778 | if (entry->skb->nf_bridge) { | 812 | if (entry->skb->nf_bridge) { |
779 | if (entry->skb->nf_bridge->physindev && | 813 | if (entry->skb->nf_bridge->physindev && |
780 | entry->skb->nf_bridge->physindev->ifindex == ifindex) | 814 | entry->skb->nf_bridge->physindev->ifindex == ifindex) |
781 | return 1; | 815 | return 1; |
782 | if (entry->skb->nf_bridge->physoutdev && | 816 | if (entry->skb->nf_bridge->physoutdev && |
783 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) | 817 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) |
784 | return 1; | 818 | return 1; |
785 | } | 819 | } |
786 | #endif | 820 | #endif |
787 | return 0; | 821 | return 0; |
788 | } | 822 | } |
789 | 823 | ||
790 | /* drop all packets with either indev or outdev == ifindex from all queue | 824 | /* drop all packets with either indev or outdev == ifindex from all queue |
791 | * instances */ | 825 | * instances */ |
792 | static void | 826 | static void |
793 | nfqnl_dev_drop(struct net *net, int ifindex) | 827 | nfqnl_dev_drop(struct net *net, int ifindex) |
794 | { | 828 | { |
795 | int i; | 829 | int i; |
796 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 830 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
797 | 831 | ||
798 | rcu_read_lock(); | 832 | rcu_read_lock(); |
799 | 833 | ||
800 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 834 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
801 | struct nfqnl_instance *inst; | 835 | struct nfqnl_instance *inst; |
802 | struct hlist_head *head = &q->instance_table[i]; | 836 | struct hlist_head *head = &q->instance_table[i]; |
803 | 837 | ||
804 | hlist_for_each_entry_rcu(inst, head, hlist) | 838 | hlist_for_each_entry_rcu(inst, head, hlist) |
805 | nfqnl_flush(inst, dev_cmp, ifindex); | 839 | nfqnl_flush(inst, dev_cmp, ifindex); |
806 | } | 840 | } |
807 | 841 | ||
808 | rcu_read_unlock(); | 842 | rcu_read_unlock(); |
809 | } | 843 | } |
810 | 844 | ||
811 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) | 845 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) |
812 | 846 | ||
813 | static int | 847 | static int |
814 | nfqnl_rcv_dev_event(struct notifier_block *this, | 848 | nfqnl_rcv_dev_event(struct notifier_block *this, |
815 | unsigned long event, void *ptr) | 849 | unsigned long event, void *ptr) |
816 | { | 850 | { |
817 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 851 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
818 | 852 | ||
819 | /* Drop any packets associated with the downed device */ | 853 | /* Drop any packets associated with the downed device */ |
820 | if (event == NETDEV_DOWN) | 854 | if (event == NETDEV_DOWN) |
821 | nfqnl_dev_drop(dev_net(dev), dev->ifindex); | 855 | nfqnl_dev_drop(dev_net(dev), dev->ifindex); |
822 | return NOTIFY_DONE; | 856 | return NOTIFY_DONE; |
823 | } | 857 | } |
824 | 858 | ||
825 | static struct notifier_block nfqnl_dev_notifier = { | 859 | static struct notifier_block nfqnl_dev_notifier = { |
826 | .notifier_call = nfqnl_rcv_dev_event, | 860 | .notifier_call = nfqnl_rcv_dev_event, |
827 | }; | 861 | }; |
828 | 862 | ||
829 | static int | 863 | static int |
830 | nfqnl_rcv_nl_event(struct notifier_block *this, | 864 | nfqnl_rcv_nl_event(struct notifier_block *this, |
831 | unsigned long event, void *ptr) | 865 | unsigned long event, void *ptr) |
832 | { | 866 | { |
833 | struct netlink_notify *n = ptr; | 867 | struct netlink_notify *n = ptr; |
834 | struct nfnl_queue_net *q = nfnl_queue_pernet(n->net); | 868 | struct nfnl_queue_net *q = nfnl_queue_pernet(n->net); |
835 | 869 | ||
836 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { | 870 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { |
837 | int i; | 871 | int i; |
838 | 872 | ||
839 | /* destroy all instances for this portid */ | 873 | /* destroy all instances for this portid */ |
840 | spin_lock(&q->instances_lock); | 874 | spin_lock(&q->instances_lock); |
841 | for (i = 0; i < INSTANCE_BUCKETS; i++) { | 875 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
842 | struct hlist_node *t2; | 876 | struct hlist_node *t2; |
843 | struct nfqnl_instance *inst; | 877 | struct nfqnl_instance *inst; |
844 | struct hlist_head *head = &q->instance_table[i]; | 878 | struct hlist_head *head = &q->instance_table[i]; |
845 | 879 | ||
846 | hlist_for_each_entry_safe(inst, t2, head, hlist) { | 880 | hlist_for_each_entry_safe(inst, t2, head, hlist) { |
847 | if (n->portid == inst->peer_portid) | 881 | if (n->portid == inst->peer_portid) |
848 | __instance_destroy(inst); | 882 | __instance_destroy(inst); |
849 | } | 883 | } |
850 | } | 884 | } |
851 | spin_unlock(&q->instances_lock); | 885 | spin_unlock(&q->instances_lock); |
852 | } | 886 | } |
853 | return NOTIFY_DONE; | 887 | return NOTIFY_DONE; |
854 | } | 888 | } |
855 | 889 | ||
856 | static struct notifier_block nfqnl_rtnl_notifier = { | 890 | static struct notifier_block nfqnl_rtnl_notifier = { |
857 | .notifier_call = nfqnl_rcv_nl_event, | 891 | .notifier_call = nfqnl_rcv_nl_event, |
858 | }; | 892 | }; |
859 | 893 | ||
860 | static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { | 894 | static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { |
861 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, | 895 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, |
862 | [NFQA_MARK] = { .type = NLA_U32 }, | 896 | [NFQA_MARK] = { .type = NLA_U32 }, |
863 | [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, | 897 | [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, |
864 | [NFQA_CT] = { .type = NLA_UNSPEC }, | 898 | [NFQA_CT] = { .type = NLA_UNSPEC }, |
865 | [NFQA_EXP] = { .type = NLA_UNSPEC }, | 899 | [NFQA_EXP] = { .type = NLA_UNSPEC }, |
866 | }; | 900 | }; |
867 | 901 | ||
868 | static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { | 902 | static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { |
869 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, | 903 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, |
870 | [NFQA_MARK] = { .type = NLA_U32 }, | 904 | [NFQA_MARK] = { .type = NLA_U32 }, |
871 | }; | 905 | }; |
872 | 906 | ||
873 | static struct nfqnl_instance * | 907 | static struct nfqnl_instance * |
874 | verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, int nlportid) | 908 | verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, int nlportid) |
875 | { | 909 | { |
876 | struct nfqnl_instance *queue; | 910 | struct nfqnl_instance *queue; |
877 | 911 | ||
878 | queue = instance_lookup(q, queue_num); | 912 | queue = instance_lookup(q, queue_num); |
879 | if (!queue) | 913 | if (!queue) |
880 | return ERR_PTR(-ENODEV); | 914 | return ERR_PTR(-ENODEV); |
881 | 915 | ||
882 | if (queue->peer_portid != nlportid) | 916 | if (queue->peer_portid != nlportid) |
883 | return ERR_PTR(-EPERM); | 917 | return ERR_PTR(-EPERM); |
884 | 918 | ||
885 | return queue; | 919 | return queue; |
886 | } | 920 | } |
887 | 921 | ||
888 | static struct nfqnl_msg_verdict_hdr* | 922 | static struct nfqnl_msg_verdict_hdr* |
889 | verdicthdr_get(const struct nlattr * const nfqa[]) | 923 | verdicthdr_get(const struct nlattr * const nfqa[]) |
890 | { | 924 | { |
891 | struct nfqnl_msg_verdict_hdr *vhdr; | 925 | struct nfqnl_msg_verdict_hdr *vhdr; |
892 | unsigned int verdict; | 926 | unsigned int verdict; |
893 | 927 | ||
894 | if (!nfqa[NFQA_VERDICT_HDR]) | 928 | if (!nfqa[NFQA_VERDICT_HDR]) |
895 | return NULL; | 929 | return NULL; |
896 | 930 | ||
897 | vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); | 931 | vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); |
898 | verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; | 932 | verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; |
899 | if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN) | 933 | if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN) |
900 | return NULL; | 934 | return NULL; |
901 | return vhdr; | 935 | return vhdr; |
902 | } | 936 | } |
903 | 937 | ||
904 | static int nfq_id_after(unsigned int id, unsigned int max) | 938 | static int nfq_id_after(unsigned int id, unsigned int max) |
905 | { | 939 | { |
906 | return (int)(id - max) > 0; | 940 | return (int)(id - max) > 0; |
907 | } | 941 | } |
908 | 942 | ||
909 | static int | 943 | static int |
910 | nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb, | 944 | nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb, |
911 | const struct nlmsghdr *nlh, | 945 | const struct nlmsghdr *nlh, |
912 | const struct nlattr * const nfqa[]) | 946 | const struct nlattr * const nfqa[]) |
913 | { | 947 | { |
914 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 948 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
915 | struct nf_queue_entry *entry, *tmp; | 949 | struct nf_queue_entry *entry, *tmp; |
916 | unsigned int verdict, maxid; | 950 | unsigned int verdict, maxid; |
917 | struct nfqnl_msg_verdict_hdr *vhdr; | 951 | struct nfqnl_msg_verdict_hdr *vhdr; |
918 | struct nfqnl_instance *queue; | 952 | struct nfqnl_instance *queue; |
919 | LIST_HEAD(batch_list); | 953 | LIST_HEAD(batch_list); |
920 | u16 queue_num = ntohs(nfmsg->res_id); | 954 | u16 queue_num = ntohs(nfmsg->res_id); |
921 | 955 | ||
922 | struct net *net = sock_net(ctnl); | 956 | struct net *net = sock_net(ctnl); |
923 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 957 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
924 | 958 | ||
925 | queue = verdict_instance_lookup(q, queue_num, | 959 | queue = verdict_instance_lookup(q, queue_num, |
926 | NETLINK_CB(skb).portid); | 960 | NETLINK_CB(skb).portid); |
927 | if (IS_ERR(queue)) | 961 | if (IS_ERR(queue)) |
928 | return PTR_ERR(queue); | 962 | return PTR_ERR(queue); |
929 | 963 | ||
930 | vhdr = verdicthdr_get(nfqa); | 964 | vhdr = verdicthdr_get(nfqa); |
931 | if (!vhdr) | 965 | if (!vhdr) |
932 | return -EINVAL; | 966 | return -EINVAL; |
933 | 967 | ||
934 | verdict = ntohl(vhdr->verdict); | 968 | verdict = ntohl(vhdr->verdict); |
935 | maxid = ntohl(vhdr->id); | 969 | maxid = ntohl(vhdr->id); |
936 | 970 | ||
937 | spin_lock_bh(&queue->lock); | 971 | spin_lock_bh(&queue->lock); |
938 | 972 | ||
939 | list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { | 973 | list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { |
940 | if (nfq_id_after(entry->id, maxid)) | 974 | if (nfq_id_after(entry->id, maxid)) |
941 | break; | 975 | break; |
942 | __dequeue_entry(queue, entry); | 976 | __dequeue_entry(queue, entry); |
943 | list_add_tail(&entry->list, &batch_list); | 977 | list_add_tail(&entry->list, &batch_list); |
944 | } | 978 | } |
945 | 979 | ||
946 | spin_unlock_bh(&queue->lock); | 980 | spin_unlock_bh(&queue->lock); |
947 | 981 | ||
948 | if (list_empty(&batch_list)) | 982 | if (list_empty(&batch_list)) |
949 | return -ENOENT; | 983 | return -ENOENT; |
950 | 984 | ||
951 | list_for_each_entry_safe(entry, tmp, &batch_list, list) { | 985 | list_for_each_entry_safe(entry, tmp, &batch_list, list) { |
952 | if (nfqa[NFQA_MARK]) | 986 | if (nfqa[NFQA_MARK]) |
953 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); | 987 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); |
954 | nf_reinject(entry, verdict); | 988 | nf_reinject(entry, verdict); |
955 | } | 989 | } |
956 | return 0; | 990 | return 0; |
957 | } | 991 | } |
958 | 992 | ||
959 | static int | 993 | static int |
960 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | 994 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, |
961 | const struct nlmsghdr *nlh, | 995 | const struct nlmsghdr *nlh, |
962 | const struct nlattr * const nfqa[]) | 996 | const struct nlattr * const nfqa[]) |
963 | { | 997 | { |
964 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 998 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
965 | u_int16_t queue_num = ntohs(nfmsg->res_id); | 999 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
966 | 1000 | ||
967 | struct nfqnl_msg_verdict_hdr *vhdr; | 1001 | struct nfqnl_msg_verdict_hdr *vhdr; |
968 | struct nfqnl_instance *queue; | 1002 | struct nfqnl_instance *queue; |
969 | unsigned int verdict; | 1003 | unsigned int verdict; |
970 | struct nf_queue_entry *entry; | 1004 | struct nf_queue_entry *entry; |
971 | enum ip_conntrack_info uninitialized_var(ctinfo); | 1005 | enum ip_conntrack_info uninitialized_var(ctinfo); |
972 | struct nf_conn *ct = NULL; | 1006 | struct nf_conn *ct = NULL; |
973 | 1007 | ||
974 | struct net *net = sock_net(ctnl); | 1008 | struct net *net = sock_net(ctnl); |
975 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 1009 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
976 | 1010 | ||
977 | queue = instance_lookup(q, queue_num); | 1011 | queue = instance_lookup(q, queue_num); |
978 | if (!queue) | 1012 | if (!queue) |
979 | queue = verdict_instance_lookup(q, queue_num, | 1013 | queue = verdict_instance_lookup(q, queue_num, |
980 | NETLINK_CB(skb).portid); | 1014 | NETLINK_CB(skb).portid); |
981 | if (IS_ERR(queue)) | 1015 | if (IS_ERR(queue)) |
982 | return PTR_ERR(queue); | 1016 | return PTR_ERR(queue); |
983 | 1017 | ||
984 | vhdr = verdicthdr_get(nfqa); | 1018 | vhdr = verdicthdr_get(nfqa); |
985 | if (!vhdr) | 1019 | if (!vhdr) |
986 | return -EINVAL; | 1020 | return -EINVAL; |
987 | 1021 | ||
988 | verdict = ntohl(vhdr->verdict); | 1022 | verdict = ntohl(vhdr->verdict); |
989 | 1023 | ||
990 | entry = find_dequeue_entry(queue, ntohl(vhdr->id)); | 1024 | entry = find_dequeue_entry(queue, ntohl(vhdr->id)); |
991 | if (entry == NULL) | 1025 | if (entry == NULL) |
992 | return -ENOENT; | 1026 | return -ENOENT; |
993 | 1027 | ||
994 | if (nfqa[NFQA_CT]) { | 1028 | if (nfqa[NFQA_CT]) { |
995 | ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo); | 1029 | ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo); |
996 | if (ct && nfqa[NFQA_EXP]) { | 1030 | if (ct && nfqa[NFQA_EXP]) { |
997 | nfqnl_attach_expect(ct, nfqa[NFQA_EXP], | 1031 | nfqnl_attach_expect(ct, nfqa[NFQA_EXP], |
998 | NETLINK_CB(skb).portid, | 1032 | NETLINK_CB(skb).portid, |
999 | nlmsg_report(nlh)); | 1033 | nlmsg_report(nlh)); |
1000 | } | 1034 | } |
1001 | } | 1035 | } |
1002 | 1036 | ||
1003 | if (nfqa[NFQA_PAYLOAD]) { | 1037 | if (nfqa[NFQA_PAYLOAD]) { |
1004 | u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); | 1038 | u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); |
1005 | int diff = payload_len - entry->skb->len; | 1039 | int diff = payload_len - entry->skb->len; |
1006 | 1040 | ||
1007 | if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), | 1041 | if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), |
1008 | payload_len, entry, diff) < 0) | 1042 | payload_len, entry, diff) < 0) |
1009 | verdict = NF_DROP; | 1043 | verdict = NF_DROP; |
1010 | 1044 | ||
1011 | if (ct) | 1045 | if (ct) |
1012 | nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff); | 1046 | nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff); |
1013 | } | 1047 | } |
1014 | 1048 | ||
1015 | if (nfqa[NFQA_MARK]) | 1049 | if (nfqa[NFQA_MARK]) |
1016 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); | 1050 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); |
1017 | 1051 | ||
1018 | nf_reinject(entry, verdict); | 1052 | nf_reinject(entry, verdict); |
1019 | return 0; | 1053 | return 0; |
1020 | } | 1054 | } |
1021 | 1055 | ||
1022 | static int | 1056 | static int |
1023 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, | 1057 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, |
1024 | const struct nlmsghdr *nlh, | 1058 | const struct nlmsghdr *nlh, |
1025 | const struct nlattr * const nfqa[]) | 1059 | const struct nlattr * const nfqa[]) |
1026 | { | 1060 | { |
1027 | return -ENOTSUPP; | 1061 | return -ENOTSUPP; |
1028 | } | 1062 | } |
1029 | 1063 | ||
1030 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { | 1064 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { |
1031 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, | 1065 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, |
1032 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, | 1066 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, |
1033 | }; | 1067 | }; |
1034 | 1068 | ||
1035 | static const struct nf_queue_handler nfqh = { | 1069 | static const struct nf_queue_handler nfqh = { |
1036 | .outfn = &nfqnl_enqueue_packet, | 1070 | .outfn = &nfqnl_enqueue_packet, |
1037 | }; | 1071 | }; |
1038 | 1072 | ||
1039 | static int | 1073 | static int |
1040 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | 1074 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, |
1041 | const struct nlmsghdr *nlh, | 1075 | const struct nlmsghdr *nlh, |
1042 | const struct nlattr * const nfqa[]) | 1076 | const struct nlattr * const nfqa[]) |
1043 | { | 1077 | { |
1044 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1078 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
1045 | u_int16_t queue_num = ntohs(nfmsg->res_id); | 1079 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
1046 | struct nfqnl_instance *queue; | 1080 | struct nfqnl_instance *queue; |
1047 | struct nfqnl_msg_config_cmd *cmd = NULL; | 1081 | struct nfqnl_msg_config_cmd *cmd = NULL; |
1048 | struct net *net = sock_net(ctnl); | 1082 | struct net *net = sock_net(ctnl); |
1049 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 1083 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
1050 | int ret = 0; | 1084 | int ret = 0; |
1051 | 1085 | ||
1052 | if (nfqa[NFQA_CFG_CMD]) { | 1086 | if (nfqa[NFQA_CFG_CMD]) { |
1053 | cmd = nla_data(nfqa[NFQA_CFG_CMD]); | 1087 | cmd = nla_data(nfqa[NFQA_CFG_CMD]); |
1054 | 1088 | ||
1055 | /* Obsolete commands without queue context */ | 1089 | /* Obsolete commands without queue context */ |
1056 | switch (cmd->command) { | 1090 | switch (cmd->command) { |
1057 | case NFQNL_CFG_CMD_PF_BIND: return 0; | 1091 | case NFQNL_CFG_CMD_PF_BIND: return 0; |
1058 | case NFQNL_CFG_CMD_PF_UNBIND: return 0; | 1092 | case NFQNL_CFG_CMD_PF_UNBIND: return 0; |
1059 | } | 1093 | } |
1060 | } | 1094 | } |
1061 | 1095 | ||
1062 | rcu_read_lock(); | 1096 | rcu_read_lock(); |
1063 | queue = instance_lookup(q, queue_num); | 1097 | queue = instance_lookup(q, queue_num); |
1064 | if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { | 1098 | if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { |
1065 | ret = -EPERM; | 1099 | ret = -EPERM; |
1066 | goto err_out_unlock; | 1100 | goto err_out_unlock; |
1067 | } | 1101 | } |
1068 | 1102 | ||
1069 | if (cmd != NULL) { | 1103 | if (cmd != NULL) { |
1070 | switch (cmd->command) { | 1104 | switch (cmd->command) { |
1071 | case NFQNL_CFG_CMD_BIND: | 1105 | case NFQNL_CFG_CMD_BIND: |
1072 | if (queue) { | 1106 | if (queue) { |
1073 | ret = -EBUSY; | 1107 | ret = -EBUSY; |
1074 | goto err_out_unlock; | 1108 | goto err_out_unlock; |
1075 | } | 1109 | } |
1076 | queue = instance_create(q, queue_num, | 1110 | queue = instance_create(q, queue_num, |
1077 | NETLINK_CB(skb).portid); | 1111 | NETLINK_CB(skb).portid); |
1078 | if (IS_ERR(queue)) { | 1112 | if (IS_ERR(queue)) { |
1079 | ret = PTR_ERR(queue); | 1113 | ret = PTR_ERR(queue); |
1080 | goto err_out_unlock; | 1114 | goto err_out_unlock; |
1081 | } | 1115 | } |
1082 | break; | 1116 | break; |
1083 | case NFQNL_CFG_CMD_UNBIND: | 1117 | case NFQNL_CFG_CMD_UNBIND: |
1084 | if (!queue) { | 1118 | if (!queue) { |
1085 | ret = -ENODEV; | 1119 | ret = -ENODEV; |
1086 | goto err_out_unlock; | 1120 | goto err_out_unlock; |
1087 | } | 1121 | } |
1088 | instance_destroy(q, queue); | 1122 | instance_destroy(q, queue); |
1089 | break; | 1123 | break; |
1090 | case NFQNL_CFG_CMD_PF_BIND: | 1124 | case NFQNL_CFG_CMD_PF_BIND: |
1091 | case NFQNL_CFG_CMD_PF_UNBIND: | 1125 | case NFQNL_CFG_CMD_PF_UNBIND: |
1092 | break; | 1126 | break; |
1093 | default: | 1127 | default: |
1094 | ret = -ENOTSUPP; | 1128 | ret = -ENOTSUPP; |
1095 | break; | 1129 | break; |
1096 | } | 1130 | } |
1097 | } | 1131 | } |
1098 | 1132 | ||
1099 | if (nfqa[NFQA_CFG_PARAMS]) { | 1133 | if (nfqa[NFQA_CFG_PARAMS]) { |
1100 | struct nfqnl_msg_config_params *params; | 1134 | struct nfqnl_msg_config_params *params; |
1101 | 1135 | ||
1102 | if (!queue) { | 1136 | if (!queue) { |
1103 | ret = -ENODEV; | 1137 | ret = -ENODEV; |
1104 | goto err_out_unlock; | 1138 | goto err_out_unlock; |
1105 | } | 1139 | } |
1106 | params = nla_data(nfqa[NFQA_CFG_PARAMS]); | 1140 | params = nla_data(nfqa[NFQA_CFG_PARAMS]); |
1107 | nfqnl_set_mode(queue, params->copy_mode, | 1141 | nfqnl_set_mode(queue, params->copy_mode, |
1108 | ntohl(params->copy_range)); | 1142 | ntohl(params->copy_range)); |
1109 | } | 1143 | } |
1110 | 1144 | ||
1111 | if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { | 1145 | if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { |
1112 | __be32 *queue_maxlen; | 1146 | __be32 *queue_maxlen; |
1113 | 1147 | ||
1114 | if (!queue) { | 1148 | if (!queue) { |
1115 | ret = -ENODEV; | 1149 | ret = -ENODEV; |
1116 | goto err_out_unlock; | 1150 | goto err_out_unlock; |
1117 | } | 1151 | } |
1118 | queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); | 1152 | queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); |
1119 | spin_lock_bh(&queue->lock); | 1153 | spin_lock_bh(&queue->lock); |
1120 | queue->queue_maxlen = ntohl(*queue_maxlen); | 1154 | queue->queue_maxlen = ntohl(*queue_maxlen); |
1121 | spin_unlock_bh(&queue->lock); | 1155 | spin_unlock_bh(&queue->lock); |
1122 | } | 1156 | } |
1123 | 1157 | ||
1124 | if (nfqa[NFQA_CFG_FLAGS]) { | 1158 | if (nfqa[NFQA_CFG_FLAGS]) { |
1125 | __u32 flags, mask; | 1159 | __u32 flags, mask; |
1126 | 1160 | ||
1127 | if (!queue) { | 1161 | if (!queue) { |
1128 | ret = -ENODEV; | 1162 | ret = -ENODEV; |
1129 | goto err_out_unlock; | 1163 | goto err_out_unlock; |
1130 | } | 1164 | } |
1131 | 1165 | ||
1132 | if (!nfqa[NFQA_CFG_MASK]) { | 1166 | if (!nfqa[NFQA_CFG_MASK]) { |
1133 | /* A mask is needed to specify which flags are being | 1167 | /* A mask is needed to specify which flags are being |
1134 | * changed. | 1168 | * changed. |
1135 | */ | 1169 | */ |
1136 | ret = -EINVAL; | 1170 | ret = -EINVAL; |
1137 | goto err_out_unlock; | 1171 | goto err_out_unlock; |
1138 | } | 1172 | } |
1139 | 1173 | ||
1140 | flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS])); | 1174 | flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS])); |
1141 | mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK])); | 1175 | mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK])); |
1142 | 1176 | ||
1143 | if (flags >= NFQA_CFG_F_MAX) { | 1177 | if (flags >= NFQA_CFG_F_MAX) { |
1144 | ret = -EOPNOTSUPP; | 1178 | ret = -EOPNOTSUPP; |
1145 | goto err_out_unlock; | 1179 | goto err_out_unlock; |
1146 | } | 1180 | } |
1147 | 1181 | ||
1148 | spin_lock_bh(&queue->lock); | 1182 | spin_lock_bh(&queue->lock); |
1149 | queue->flags &= ~mask; | 1183 | queue->flags &= ~mask; |
1150 | queue->flags |= flags & mask; | 1184 | queue->flags |= flags & mask; |
1151 | spin_unlock_bh(&queue->lock); | 1185 | spin_unlock_bh(&queue->lock); |
1152 | } | 1186 | } |
1153 | 1187 | ||
1154 | err_out_unlock: | 1188 | err_out_unlock: |
1155 | rcu_read_unlock(); | 1189 | rcu_read_unlock(); |
1156 | return ret; | 1190 | return ret; |
1157 | } | 1191 | } |
1158 | 1192 | ||
1159 | static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { | 1193 | static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { |
1160 | [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp, | 1194 | [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp, |
1161 | .attr_count = NFQA_MAX, }, | 1195 | .attr_count = NFQA_MAX, }, |
1162 | [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict, | 1196 | [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict, |
1163 | .attr_count = NFQA_MAX, | 1197 | .attr_count = NFQA_MAX, |
1164 | .policy = nfqa_verdict_policy }, | 1198 | .policy = nfqa_verdict_policy }, |
1165 | [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, | 1199 | [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, |
1166 | .attr_count = NFQA_CFG_MAX, | 1200 | .attr_count = NFQA_CFG_MAX, |
1167 | .policy = nfqa_cfg_policy }, | 1201 | .policy = nfqa_cfg_policy }, |
1168 | [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch, | 1202 | [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch, |
1169 | .attr_count = NFQA_MAX, | 1203 | .attr_count = NFQA_MAX, |
1170 | .policy = nfqa_verdict_batch_policy }, | 1204 | .policy = nfqa_verdict_batch_policy }, |
1171 | }; | 1205 | }; |
1172 | 1206 | ||
1173 | static const struct nfnetlink_subsystem nfqnl_subsys = { | 1207 | static const struct nfnetlink_subsystem nfqnl_subsys = { |
1174 | .name = "nf_queue", | 1208 | .name = "nf_queue", |
1175 | .subsys_id = NFNL_SUBSYS_QUEUE, | 1209 | .subsys_id = NFNL_SUBSYS_QUEUE, |
1176 | .cb_count = NFQNL_MSG_MAX, | 1210 | .cb_count = NFQNL_MSG_MAX, |
1177 | .cb = nfqnl_cb, | 1211 | .cb = nfqnl_cb, |
1178 | }; | 1212 | }; |
1179 | 1213 | ||
1180 | #ifdef CONFIG_PROC_FS | 1214 | #ifdef CONFIG_PROC_FS |
1181 | struct iter_state { | 1215 | struct iter_state { |
1182 | struct seq_net_private p; | 1216 | struct seq_net_private p; |
1183 | unsigned int bucket; | 1217 | unsigned int bucket; |
1184 | }; | 1218 | }; |
1185 | 1219 | ||
1186 | static struct hlist_node *get_first(struct seq_file *seq) | 1220 | static struct hlist_node *get_first(struct seq_file *seq) |
1187 | { | 1221 | { |
1188 | struct iter_state *st = seq->private; | 1222 | struct iter_state *st = seq->private; |
1189 | struct net *net; | 1223 | struct net *net; |
1190 | struct nfnl_queue_net *q; | 1224 | struct nfnl_queue_net *q; |
1191 | 1225 | ||
1192 | if (!st) | 1226 | if (!st) |
1193 | return NULL; | 1227 | return NULL; |
1194 | 1228 | ||
1195 | net = seq_file_net(seq); | 1229 | net = seq_file_net(seq); |
1196 | q = nfnl_queue_pernet(net); | 1230 | q = nfnl_queue_pernet(net); |
1197 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { | 1231 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { |
1198 | if (!hlist_empty(&q->instance_table[st->bucket])) | 1232 | if (!hlist_empty(&q->instance_table[st->bucket])) |
1199 | return q->instance_table[st->bucket].first; | 1233 | return q->instance_table[st->bucket].first; |
1200 | } | 1234 | } |
1201 | return NULL; | 1235 | return NULL; |
1202 | } | 1236 | } |
1203 | 1237 | ||
1204 | static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) | 1238 | static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) |
1205 | { | 1239 | { |
1206 | struct iter_state *st = seq->private; | 1240 | struct iter_state *st = seq->private; |
1207 | struct net *net = seq_file_net(seq); | 1241 | struct net *net = seq_file_net(seq); |
1208 | 1242 | ||
1209 | h = h->next; | 1243 | h = h->next; |
1210 | while (!h) { | 1244 | while (!h) { |
1211 | struct nfnl_queue_net *q; | 1245 | struct nfnl_queue_net *q; |
1212 | 1246 | ||
1213 | if (++st->bucket >= INSTANCE_BUCKETS) | 1247 | if (++st->bucket >= INSTANCE_BUCKETS) |
1214 | return NULL; | 1248 | return NULL; |
1215 | 1249 | ||
1216 | q = nfnl_queue_pernet(net); | 1250 | q = nfnl_queue_pernet(net); |
1217 | h = q->instance_table[st->bucket].first; | 1251 | h = q->instance_table[st->bucket].first; |
1218 | } | 1252 | } |
1219 | return h; | 1253 | return h; |
1220 | } | 1254 | } |
1221 | 1255 | ||
1222 | static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) | 1256 | static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) |
1223 | { | 1257 | { |
1224 | struct hlist_node *head; | 1258 | struct hlist_node *head; |
1225 | head = get_first(seq); | 1259 | head = get_first(seq); |
1226 | 1260 | ||
1227 | if (head) | 1261 | if (head) |
1228 | while (pos && (head = get_next(seq, head))) | 1262 | while (pos && (head = get_next(seq, head))) |
1229 | pos--; | 1263 | pos--; |
1230 | return pos ? NULL : head; | 1264 | return pos ? NULL : head; |
1231 | } | 1265 | } |
1232 | 1266 | ||
1233 | static void *seq_start(struct seq_file *s, loff_t *pos) | 1267 | static void *seq_start(struct seq_file *s, loff_t *pos) |
1234 | __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock) | 1268 | __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock) |
1235 | { | 1269 | { |
1236 | spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); | 1270 | spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); |
1237 | return get_idx(s, *pos); | 1271 | return get_idx(s, *pos); |
1238 | } | 1272 | } |
1239 | 1273 | ||
1240 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) | 1274 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) |
1241 | { | 1275 | { |
1242 | (*pos)++; | 1276 | (*pos)++; |
1243 | return get_next(s, v); | 1277 | return get_next(s, v); |
1244 | } | 1278 | } |
1245 | 1279 | ||
1246 | static void seq_stop(struct seq_file *s, void *v) | 1280 | static void seq_stop(struct seq_file *s, void *v) |
1247 | __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock) | 1281 | __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock) |
1248 | { | 1282 | { |
1249 | spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); | 1283 | spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); |
1250 | } | 1284 | } |
1251 | 1285 | ||
1252 | static int seq_show(struct seq_file *s, void *v) | 1286 | static int seq_show(struct seq_file *s, void *v) |
1253 | { | 1287 | { |
1254 | const struct nfqnl_instance *inst = v; | 1288 | const struct nfqnl_instance *inst = v; |
1255 | 1289 | ||
1256 | return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", | 1290 | return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", |
1257 | inst->queue_num, | 1291 | inst->queue_num, |
1258 | inst->peer_portid, inst->queue_total, | 1292 | inst->peer_portid, inst->queue_total, |
1259 | inst->copy_mode, inst->copy_range, | 1293 | inst->copy_mode, inst->copy_range, |
1260 | inst->queue_dropped, inst->queue_user_dropped, | 1294 | inst->queue_dropped, inst->queue_user_dropped, |
1261 | inst->id_sequence, 1); | 1295 | inst->id_sequence, 1); |
1262 | } | 1296 | } |
1263 | 1297 | ||
1264 | static const struct seq_operations nfqnl_seq_ops = { | 1298 | static const struct seq_operations nfqnl_seq_ops = { |
1265 | .start = seq_start, | 1299 | .start = seq_start, |
1266 | .next = seq_next, | 1300 | .next = seq_next, |
1267 | .stop = seq_stop, | 1301 | .stop = seq_stop, |
1268 | .show = seq_show, | 1302 | .show = seq_show, |
1269 | }; | 1303 | }; |
1270 | 1304 | ||
1271 | static int nfqnl_open(struct inode *inode, struct file *file) | 1305 | static int nfqnl_open(struct inode *inode, struct file *file) |
1272 | { | 1306 | { |
1273 | return seq_open_net(inode, file, &nfqnl_seq_ops, | 1307 | return seq_open_net(inode, file, &nfqnl_seq_ops, |
1274 | sizeof(struct iter_state)); | 1308 | sizeof(struct iter_state)); |
1275 | } | 1309 | } |
1276 | 1310 | ||
1277 | static const struct file_operations nfqnl_file_ops = { | 1311 | static const struct file_operations nfqnl_file_ops = { |
1278 | .owner = THIS_MODULE, | 1312 | .owner = THIS_MODULE, |
1279 | .open = nfqnl_open, | 1313 | .open = nfqnl_open, |
1280 | .read = seq_read, | 1314 | .read = seq_read, |
1281 | .llseek = seq_lseek, | 1315 | .llseek = seq_lseek, |
1282 | .release = seq_release_net, | 1316 | .release = seq_release_net, |
1283 | }; | 1317 | }; |
1284 | 1318 | ||
1285 | #endif /* PROC_FS */ | 1319 | #endif /* PROC_FS */ |
1286 | 1320 | ||
1287 | static int __net_init nfnl_queue_net_init(struct net *net) | 1321 | static int __net_init nfnl_queue_net_init(struct net *net) |
1288 | { | 1322 | { |
1289 | unsigned int i; | 1323 | unsigned int i; |
1290 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 1324 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
1291 | 1325 | ||
1292 | for (i = 0; i < INSTANCE_BUCKETS; i++) | 1326 | for (i = 0; i < INSTANCE_BUCKETS; i++) |
1293 | INIT_HLIST_HEAD(&q->instance_table[i]); | 1327 | INIT_HLIST_HEAD(&q->instance_table[i]); |
1294 | 1328 | ||
1295 | spin_lock_init(&q->instances_lock); | 1329 | spin_lock_init(&q->instances_lock); |
1296 | 1330 | ||
1297 | #ifdef CONFIG_PROC_FS | 1331 | #ifdef CONFIG_PROC_FS |
1298 | if (!proc_create("nfnetlink_queue", 0440, | 1332 | if (!proc_create("nfnetlink_queue", 0440, |
1299 | net->nf.proc_netfilter, &nfqnl_file_ops)) | 1333 | net->nf.proc_netfilter, &nfqnl_file_ops)) |
1300 | return -ENOMEM; | 1334 | return -ENOMEM; |
1301 | #endif | 1335 | #endif |
1302 | return 0; | 1336 | return 0; |
1303 | } | 1337 | } |
1304 | 1338 | ||
1305 | static void __net_exit nfnl_queue_net_exit(struct net *net) | 1339 | static void __net_exit nfnl_queue_net_exit(struct net *net) |
1306 | { | 1340 | { |
1307 | #ifdef CONFIG_PROC_FS | 1341 | #ifdef CONFIG_PROC_FS |
1308 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); | 1342 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); |
1309 | #endif | 1343 | #endif |
1310 | } | 1344 | } |
1311 | 1345 | ||
1312 | static struct pernet_operations nfnl_queue_net_ops = { | 1346 | static struct pernet_operations nfnl_queue_net_ops = { |
1313 | .init = nfnl_queue_net_init, | 1347 | .init = nfnl_queue_net_init, |
1314 | .exit = nfnl_queue_net_exit, | 1348 | .exit = nfnl_queue_net_exit, |
1315 | .id = &nfnl_queue_net_id, | 1349 | .id = &nfnl_queue_net_id, |
1316 | .size = sizeof(struct nfnl_queue_net), | 1350 | .size = sizeof(struct nfnl_queue_net), |
1317 | }; | 1351 | }; |
1318 | 1352 | ||
1319 | static int __init nfnetlink_queue_init(void) | 1353 | static int __init nfnetlink_queue_init(void) |
1320 | { | 1354 | { |
1321 | int status = -ENOMEM; | 1355 | int status = -ENOMEM; |
1322 | 1356 | ||
1323 | netlink_register_notifier(&nfqnl_rtnl_notifier); | 1357 | netlink_register_notifier(&nfqnl_rtnl_notifier); |
1324 | status = nfnetlink_subsys_register(&nfqnl_subsys); | 1358 | status = nfnetlink_subsys_register(&nfqnl_subsys); |
1325 | if (status < 0) { | 1359 | if (status < 0) { |
1326 | pr_err("nf_queue: failed to create netlink socket\n"); | 1360 | pr_err("nf_queue: failed to create netlink socket\n"); |
1327 | goto cleanup_netlink_notifier; | 1361 | goto cleanup_netlink_notifier; |
1328 | } | 1362 | } |
1329 | 1363 | ||
1330 | status = register_pernet_subsys(&nfnl_queue_net_ops); | 1364 | status = register_pernet_subsys(&nfnl_queue_net_ops); |
1331 | if (status < 0) { | 1365 | if (status < 0) { |
1332 | pr_err("nf_queue: failed to register pernet ops\n"); | 1366 | pr_err("nf_queue: failed to register pernet ops\n"); |
1333 | goto cleanup_subsys; | 1367 | goto cleanup_subsys; |
1334 | } | 1368 | } |
1335 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1369 | register_netdevice_notifier(&nfqnl_dev_notifier); |
1336 | nf_register_queue_handler(&nfqh); | 1370 | nf_register_queue_handler(&nfqh); |
1337 | return status; | 1371 | return status; |
1338 | 1372 | ||
1339 | cleanup_subsys: | 1373 | cleanup_subsys: |
1340 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 1374 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
1341 | cleanup_netlink_notifier: | 1375 | cleanup_netlink_notifier: |
1342 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1376 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
1343 | return status; | 1377 | return status; |
1344 | } | 1378 | } |
1345 | 1379 | ||
1346 | static void __exit nfnetlink_queue_fini(void) | 1380 | static void __exit nfnetlink_queue_fini(void) |
1347 | { | 1381 | { |
1348 | nf_unregister_queue_handler(); | 1382 | nf_unregister_queue_handler(); |
1349 | unregister_netdevice_notifier(&nfqnl_dev_notifier); | 1383 | unregister_netdevice_notifier(&nfqnl_dev_notifier); |
1350 | unregister_pernet_subsys(&nfnl_queue_net_ops); | 1384 | unregister_pernet_subsys(&nfnl_queue_net_ops); |
1351 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 1385 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
1352 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1386 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
1353 | 1387 | ||
1354 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | 1388 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
1355 | } | 1389 | } |
1356 | 1390 | ||
1357 | MODULE_DESCRIPTION("netfilter packet queue handler"); | 1391 | MODULE_DESCRIPTION("netfilter packet queue handler"); |
1358 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 1392 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
1359 | MODULE_LICENSE("GPL"); | 1393 | MODULE_LICENSE("GPL"); |
1360 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); | 1394 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); |
1361 | 1395 | ||
1362 | module_init(nfnetlink_queue_init); | 1396 | module_init(nfnetlink_queue_init); |
1363 | module_exit(nfnetlink_queue_fini); | 1397 | module_exit(nfnetlink_queue_fini); |
1364 | 1398 |