Commit a71258d79e3d05632e90c9f7db5ccf929d276529
Committed by
David S. Miller
1 parent
a676847b39
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
net: remove obsolete simple_strto<foo>
This patch removes the redundant occurences of simple_strto<foo> Signed-off-by: Abhijit Pawar <abhi.c.pawar@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 3 changed files with 0 additions and 3 deletions Inline Diff
net/core/netpoll.c
1 | /* | 1 | /* |
2 | * Common framework for low-level network console, dump, and debugger code | 2 | * Common framework for low-level network console, dump, and debugger code |
3 | * | 3 | * |
4 | * Sep 8 2003 Matt Mackall <mpm@selenic.com> | 4 | * Sep 8 2003 Matt Mackall <mpm@selenic.com> |
5 | * | 5 | * |
6 | * based on the netconsole code from: | 6 | * based on the netconsole code from: |
7 | * | 7 | * |
8 | * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> | 8 | * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> |
9 | * Copyright (C) 2002 Red Hat, Inc. | 9 | * Copyright (C) 2002 Red Hat, Inc. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 13 | ||
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
16 | #include <linux/etherdevice.h> | 16 | #include <linux/etherdevice.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/if_arp.h> | 18 | #include <linux/if_arp.h> |
19 | #include <linux/inetdevice.h> | 19 | #include <linux/inetdevice.h> |
20 | #include <linux/inet.h> | 20 | #include <linux/inet.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/netpoll.h> | 22 | #include <linux/netpoll.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/export.h> | 28 | #include <linux/export.h> |
29 | #include <linux/if_vlan.h> | 29 | #include <linux/if_vlan.h> |
30 | #include <net/tcp.h> | 30 | #include <net/tcp.h> |
31 | #include <net/udp.h> | 31 | #include <net/udp.h> |
32 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
33 | #include <trace/events/napi.h> | 33 | #include <trace/events/napi.h> |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * We maintain a small pool of fully-sized skbs, to make sure the | 36 | * We maintain a small pool of fully-sized skbs, to make sure the |
37 | * message gets out even in extreme OOM situations. | 37 | * message gets out even in extreme OOM situations. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #define MAX_UDP_CHUNK 1460 | 40 | #define MAX_UDP_CHUNK 1460 |
41 | #define MAX_SKBS 32 | 41 | #define MAX_SKBS 32 |
42 | 42 | ||
43 | static struct sk_buff_head skb_pool; | 43 | static struct sk_buff_head skb_pool; |
44 | 44 | ||
45 | static atomic_t trapped; | 45 | static atomic_t trapped; |
46 | 46 | ||
47 | #define USEC_PER_POLL 50 | 47 | #define USEC_PER_POLL 50 |
48 | #define NETPOLL_RX_ENABLED 1 | 48 | #define NETPOLL_RX_ENABLED 1 |
49 | #define NETPOLL_RX_DROP 2 | 49 | #define NETPOLL_RX_DROP 2 |
50 | 50 | ||
51 | #define MAX_SKB_SIZE \ | 51 | #define MAX_SKB_SIZE \ |
52 | (sizeof(struct ethhdr) + \ | 52 | (sizeof(struct ethhdr) + \ |
53 | sizeof(struct iphdr) + \ | 53 | sizeof(struct iphdr) + \ |
54 | sizeof(struct udphdr) + \ | 54 | sizeof(struct udphdr) + \ |
55 | MAX_UDP_CHUNK) | 55 | MAX_UDP_CHUNK) |
56 | 56 | ||
57 | static void zap_completion_queue(void); | 57 | static void zap_completion_queue(void); |
58 | static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo); | 58 | static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo); |
59 | 59 | ||
60 | static unsigned int carrier_timeout = 4; | 60 | static unsigned int carrier_timeout = 4; |
61 | module_param(carrier_timeout, uint, 0644); | 61 | module_param(carrier_timeout, uint, 0644); |
62 | 62 | ||
63 | #define np_info(np, fmt, ...) \ | 63 | #define np_info(np, fmt, ...) \ |
64 | pr_info("%s: " fmt, np->name, ##__VA_ARGS__) | 64 | pr_info("%s: " fmt, np->name, ##__VA_ARGS__) |
65 | #define np_err(np, fmt, ...) \ | 65 | #define np_err(np, fmt, ...) \ |
66 | pr_err("%s: " fmt, np->name, ##__VA_ARGS__) | 66 | pr_err("%s: " fmt, np->name, ##__VA_ARGS__) |
67 | #define np_notice(np, fmt, ...) \ | 67 | #define np_notice(np, fmt, ...) \ |
68 | pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) | 68 | pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) |
69 | 69 | ||
70 | static void queue_process(struct work_struct *work) | 70 | static void queue_process(struct work_struct *work) |
71 | { | 71 | { |
72 | struct netpoll_info *npinfo = | 72 | struct netpoll_info *npinfo = |
73 | container_of(work, struct netpoll_info, tx_work.work); | 73 | container_of(work, struct netpoll_info, tx_work.work); |
74 | struct sk_buff *skb; | 74 | struct sk_buff *skb; |
75 | unsigned long flags; | 75 | unsigned long flags; |
76 | 76 | ||
77 | while ((skb = skb_dequeue(&npinfo->txq))) { | 77 | while ((skb = skb_dequeue(&npinfo->txq))) { |
78 | struct net_device *dev = skb->dev; | 78 | struct net_device *dev = skb->dev; |
79 | const struct net_device_ops *ops = dev->netdev_ops; | 79 | const struct net_device_ops *ops = dev->netdev_ops; |
80 | struct netdev_queue *txq; | 80 | struct netdev_queue *txq; |
81 | 81 | ||
82 | if (!netif_device_present(dev) || !netif_running(dev)) { | 82 | if (!netif_device_present(dev) || !netif_running(dev)) { |
83 | __kfree_skb(skb); | 83 | __kfree_skb(skb); |
84 | continue; | 84 | continue; |
85 | } | 85 | } |
86 | 86 | ||
87 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 87 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
88 | 88 | ||
89 | local_irq_save(flags); | 89 | local_irq_save(flags); |
90 | __netif_tx_lock(txq, smp_processor_id()); | 90 | __netif_tx_lock(txq, smp_processor_id()); |
91 | if (netif_xmit_frozen_or_stopped(txq) || | 91 | if (netif_xmit_frozen_or_stopped(txq) || |
92 | ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { | 92 | ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { |
93 | skb_queue_head(&npinfo->txq, skb); | 93 | skb_queue_head(&npinfo->txq, skb); |
94 | __netif_tx_unlock(txq); | 94 | __netif_tx_unlock(txq); |
95 | local_irq_restore(flags); | 95 | local_irq_restore(flags); |
96 | 96 | ||
97 | schedule_delayed_work(&npinfo->tx_work, HZ/10); | 97 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
98 | return; | 98 | return; |
99 | } | 99 | } |
100 | __netif_tx_unlock(txq); | 100 | __netif_tx_unlock(txq); |
101 | local_irq_restore(flags); | 101 | local_irq_restore(flags); |
102 | } | 102 | } |
103 | } | 103 | } |
104 | 104 | ||
105 | static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, | 105 | static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, |
106 | unsigned short ulen, __be32 saddr, __be32 daddr) | 106 | unsigned short ulen, __be32 saddr, __be32 daddr) |
107 | { | 107 | { |
108 | __wsum psum; | 108 | __wsum psum; |
109 | 109 | ||
110 | if (uh->check == 0 || skb_csum_unnecessary(skb)) | 110 | if (uh->check == 0 || skb_csum_unnecessary(skb)) |
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); | 113 | psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); |
114 | 114 | ||
115 | if (skb->ip_summed == CHECKSUM_COMPLETE && | 115 | if (skb->ip_summed == CHECKSUM_COMPLETE && |
116 | !csum_fold(csum_add(psum, skb->csum))) | 116 | !csum_fold(csum_add(psum, skb->csum))) |
117 | return 0; | 117 | return 0; |
118 | 118 | ||
119 | skb->csum = psum; | 119 | skb->csum = psum; |
120 | 120 | ||
121 | return __skb_checksum_complete(skb); | 121 | return __skb_checksum_complete(skb); |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Check whether delayed processing was scheduled for our NIC. If so, | 125 | * Check whether delayed processing was scheduled for our NIC. If so, |
126 | * we attempt to grab the poll lock and use ->poll() to pump the card. | 126 | * we attempt to grab the poll lock and use ->poll() to pump the card. |
127 | * If this fails, either we've recursed in ->poll() or it's already | 127 | * If this fails, either we've recursed in ->poll() or it's already |
128 | * running on another CPU. | 128 | * running on another CPU. |
129 | * | 129 | * |
130 | * Note: we don't mask interrupts with this lock because we're using | 130 | * Note: we don't mask interrupts with this lock because we're using |
131 | * trylock here and interrupts are already disabled in the softirq | 131 | * trylock here and interrupts are already disabled in the softirq |
132 | * case. Further, we test the poll_owner to avoid recursion on UP | 132 | * case. Further, we test the poll_owner to avoid recursion on UP |
133 | * systems where the lock doesn't exist. | 133 | * systems where the lock doesn't exist. |
134 | * | 134 | * |
135 | * In cases where there is bi-directional communications, reading only | 135 | * In cases where there is bi-directional communications, reading only |
136 | * one message at a time can lead to packets being dropped by the | 136 | * one message at a time can lead to packets being dropped by the |
137 | * network adapter, forcing superfluous retries and possibly timeouts. | 137 | * network adapter, forcing superfluous retries and possibly timeouts. |
138 | * Thus, we set our budget to greater than 1. | 138 | * Thus, we set our budget to greater than 1. |
139 | */ | 139 | */ |
140 | static int poll_one_napi(struct netpoll_info *npinfo, | 140 | static int poll_one_napi(struct netpoll_info *npinfo, |
141 | struct napi_struct *napi, int budget) | 141 | struct napi_struct *napi, int budget) |
142 | { | 142 | { |
143 | int work; | 143 | int work; |
144 | 144 | ||
145 | /* net_rx_action's ->poll() invocations and our's are | 145 | /* net_rx_action's ->poll() invocations and our's are |
146 | * synchronized by this test which is only made while | 146 | * synchronized by this test which is only made while |
147 | * holding the napi->poll_lock. | 147 | * holding the napi->poll_lock. |
148 | */ | 148 | */ |
149 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) | 149 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) |
150 | return budget; | 150 | return budget; |
151 | 151 | ||
152 | npinfo->rx_flags |= NETPOLL_RX_DROP; | 152 | npinfo->rx_flags |= NETPOLL_RX_DROP; |
153 | atomic_inc(&trapped); | 153 | atomic_inc(&trapped); |
154 | set_bit(NAPI_STATE_NPSVC, &napi->state); | 154 | set_bit(NAPI_STATE_NPSVC, &napi->state); |
155 | 155 | ||
156 | work = napi->poll(napi, budget); | 156 | work = napi->poll(napi, budget); |
157 | trace_napi_poll(napi); | 157 | trace_napi_poll(napi); |
158 | 158 | ||
159 | clear_bit(NAPI_STATE_NPSVC, &napi->state); | 159 | clear_bit(NAPI_STATE_NPSVC, &napi->state); |
160 | atomic_dec(&trapped); | 160 | atomic_dec(&trapped); |
161 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; | 161 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; |
162 | 162 | ||
163 | return budget - work; | 163 | return budget - work; |
164 | } | 164 | } |
165 | 165 | ||
166 | static void poll_napi(struct net_device *dev) | 166 | static void poll_napi(struct net_device *dev) |
167 | { | 167 | { |
168 | struct napi_struct *napi; | 168 | struct napi_struct *napi; |
169 | int budget = 16; | 169 | int budget = 16; |
170 | 170 | ||
171 | list_for_each_entry(napi, &dev->napi_list, dev_list) { | 171 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
172 | if (napi->poll_owner != smp_processor_id() && | 172 | if (napi->poll_owner != smp_processor_id() && |
173 | spin_trylock(&napi->poll_lock)) { | 173 | spin_trylock(&napi->poll_lock)) { |
174 | budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), | 174 | budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), |
175 | napi, budget); | 175 | napi, budget); |
176 | spin_unlock(&napi->poll_lock); | 176 | spin_unlock(&napi->poll_lock); |
177 | 177 | ||
178 | if (!budget) | 178 | if (!budget) |
179 | break; | 179 | break; |
180 | } | 180 | } |
181 | } | 181 | } |
182 | } | 182 | } |
183 | 183 | ||
184 | static void service_arp_queue(struct netpoll_info *npi) | 184 | static void service_arp_queue(struct netpoll_info *npi) |
185 | { | 185 | { |
186 | if (npi) { | 186 | if (npi) { |
187 | struct sk_buff *skb; | 187 | struct sk_buff *skb; |
188 | 188 | ||
189 | while ((skb = skb_dequeue(&npi->arp_tx))) | 189 | while ((skb = skb_dequeue(&npi->arp_tx))) |
190 | netpoll_arp_reply(skb, npi); | 190 | netpoll_arp_reply(skb, npi); |
191 | } | 191 | } |
192 | } | 192 | } |
193 | 193 | ||
194 | static void netpoll_poll_dev(struct net_device *dev) | 194 | static void netpoll_poll_dev(struct net_device *dev) |
195 | { | 195 | { |
196 | const struct net_device_ops *ops; | 196 | const struct net_device_ops *ops; |
197 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); | 197 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); |
198 | 198 | ||
199 | if (!dev || !netif_running(dev)) | 199 | if (!dev || !netif_running(dev)) |
200 | return; | 200 | return; |
201 | 201 | ||
202 | ops = dev->netdev_ops; | 202 | ops = dev->netdev_ops; |
203 | if (!ops->ndo_poll_controller) | 203 | if (!ops->ndo_poll_controller) |
204 | return; | 204 | return; |
205 | 205 | ||
206 | /* Process pending work on NIC */ | 206 | /* Process pending work on NIC */ |
207 | ops->ndo_poll_controller(dev); | 207 | ops->ndo_poll_controller(dev); |
208 | 208 | ||
209 | poll_napi(dev); | 209 | poll_napi(dev); |
210 | 210 | ||
211 | if (dev->flags & IFF_SLAVE) { | 211 | if (dev->flags & IFF_SLAVE) { |
212 | if (ni) { | 212 | if (ni) { |
213 | struct net_device *bond_dev = dev->master; | 213 | struct net_device *bond_dev = dev->master; |
214 | struct sk_buff *skb; | 214 | struct sk_buff *skb; |
215 | struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); | 215 | struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); |
216 | while ((skb = skb_dequeue(&ni->arp_tx))) { | 216 | while ((skb = skb_dequeue(&ni->arp_tx))) { |
217 | skb->dev = bond_dev; | 217 | skb->dev = bond_dev; |
218 | skb_queue_tail(&bond_ni->arp_tx, skb); | 218 | skb_queue_tail(&bond_ni->arp_tx, skb); |
219 | } | 219 | } |
220 | } | 220 | } |
221 | } | 221 | } |
222 | 222 | ||
223 | service_arp_queue(ni); | 223 | service_arp_queue(ni); |
224 | 224 | ||
225 | zap_completion_queue(); | 225 | zap_completion_queue(); |
226 | } | 226 | } |
227 | 227 | ||
228 | static void refill_skbs(void) | 228 | static void refill_skbs(void) |
229 | { | 229 | { |
230 | struct sk_buff *skb; | 230 | struct sk_buff *skb; |
231 | unsigned long flags; | 231 | unsigned long flags; |
232 | 232 | ||
233 | spin_lock_irqsave(&skb_pool.lock, flags); | 233 | spin_lock_irqsave(&skb_pool.lock, flags); |
234 | while (skb_pool.qlen < MAX_SKBS) { | 234 | while (skb_pool.qlen < MAX_SKBS) { |
235 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); | 235 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); |
236 | if (!skb) | 236 | if (!skb) |
237 | break; | 237 | break; |
238 | 238 | ||
239 | __skb_queue_tail(&skb_pool, skb); | 239 | __skb_queue_tail(&skb_pool, skb); |
240 | } | 240 | } |
241 | spin_unlock_irqrestore(&skb_pool.lock, flags); | 241 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
242 | } | 242 | } |
243 | 243 | ||
244 | static void zap_completion_queue(void) | 244 | static void zap_completion_queue(void) |
245 | { | 245 | { |
246 | unsigned long flags; | 246 | unsigned long flags; |
247 | struct softnet_data *sd = &get_cpu_var(softnet_data); | 247 | struct softnet_data *sd = &get_cpu_var(softnet_data); |
248 | 248 | ||
249 | if (sd->completion_queue) { | 249 | if (sd->completion_queue) { |
250 | struct sk_buff *clist; | 250 | struct sk_buff *clist; |
251 | 251 | ||
252 | local_irq_save(flags); | 252 | local_irq_save(flags); |
253 | clist = sd->completion_queue; | 253 | clist = sd->completion_queue; |
254 | sd->completion_queue = NULL; | 254 | sd->completion_queue = NULL; |
255 | local_irq_restore(flags); | 255 | local_irq_restore(flags); |
256 | 256 | ||
257 | while (clist != NULL) { | 257 | while (clist != NULL) { |
258 | struct sk_buff *skb = clist; | 258 | struct sk_buff *skb = clist; |
259 | clist = clist->next; | 259 | clist = clist->next; |
260 | if (skb->destructor) { | 260 | if (skb->destructor) { |
261 | atomic_inc(&skb->users); | 261 | atomic_inc(&skb->users); |
262 | dev_kfree_skb_any(skb); /* put this one back */ | 262 | dev_kfree_skb_any(skb); /* put this one back */ |
263 | } else { | 263 | } else { |
264 | __kfree_skb(skb); | 264 | __kfree_skb(skb); |
265 | } | 265 | } |
266 | } | 266 | } |
267 | } | 267 | } |
268 | 268 | ||
269 | put_cpu_var(softnet_data); | 269 | put_cpu_var(softnet_data); |
270 | } | 270 | } |
271 | 271 | ||
272 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) | 272 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
273 | { | 273 | { |
274 | int count = 0; | 274 | int count = 0; |
275 | struct sk_buff *skb; | 275 | struct sk_buff *skb; |
276 | 276 | ||
277 | zap_completion_queue(); | 277 | zap_completion_queue(); |
278 | refill_skbs(); | 278 | refill_skbs(); |
279 | repeat: | 279 | repeat: |
280 | 280 | ||
281 | skb = alloc_skb(len, GFP_ATOMIC); | 281 | skb = alloc_skb(len, GFP_ATOMIC); |
282 | if (!skb) | 282 | if (!skb) |
283 | skb = skb_dequeue(&skb_pool); | 283 | skb = skb_dequeue(&skb_pool); |
284 | 284 | ||
285 | if (!skb) { | 285 | if (!skb) { |
286 | if (++count < 10) { | 286 | if (++count < 10) { |
287 | netpoll_poll_dev(np->dev); | 287 | netpoll_poll_dev(np->dev); |
288 | goto repeat; | 288 | goto repeat; |
289 | } | 289 | } |
290 | return NULL; | 290 | return NULL; |
291 | } | 291 | } |
292 | 292 | ||
293 | atomic_set(&skb->users, 1); | 293 | atomic_set(&skb->users, 1); |
294 | skb_reserve(skb, reserve); | 294 | skb_reserve(skb, reserve); |
295 | return skb; | 295 | return skb; |
296 | } | 296 | } |
297 | 297 | ||
298 | static int netpoll_owner_active(struct net_device *dev) | 298 | static int netpoll_owner_active(struct net_device *dev) |
299 | { | 299 | { |
300 | struct napi_struct *napi; | 300 | struct napi_struct *napi; |
301 | 301 | ||
302 | list_for_each_entry(napi, &dev->napi_list, dev_list) { | 302 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
303 | if (napi->poll_owner == smp_processor_id()) | 303 | if (napi->poll_owner == smp_processor_id()) |
304 | return 1; | 304 | return 1; |
305 | } | 305 | } |
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | /* call with IRQ disabled */ | 309 | /* call with IRQ disabled */ |
310 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | 310 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
311 | struct net_device *dev) | 311 | struct net_device *dev) |
312 | { | 312 | { |
313 | int status = NETDEV_TX_BUSY; | 313 | int status = NETDEV_TX_BUSY; |
314 | unsigned long tries; | 314 | unsigned long tries; |
315 | const struct net_device_ops *ops = dev->netdev_ops; | 315 | const struct net_device_ops *ops = dev->netdev_ops; |
316 | /* It is up to the caller to keep npinfo alive. */ | 316 | /* It is up to the caller to keep npinfo alive. */ |
317 | struct netpoll_info *npinfo; | 317 | struct netpoll_info *npinfo; |
318 | 318 | ||
319 | WARN_ON_ONCE(!irqs_disabled()); | 319 | WARN_ON_ONCE(!irqs_disabled()); |
320 | 320 | ||
321 | npinfo = rcu_dereference_bh(np->dev->npinfo); | 321 | npinfo = rcu_dereference_bh(np->dev->npinfo); |
322 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { | 322 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
323 | __kfree_skb(skb); | 323 | __kfree_skb(skb); |
324 | return; | 324 | return; |
325 | } | 325 | } |
326 | 326 | ||
327 | /* don't get messages out of order, and no recursion */ | 327 | /* don't get messages out of order, and no recursion */ |
328 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 328 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
329 | struct netdev_queue *txq; | 329 | struct netdev_queue *txq; |
330 | 330 | ||
331 | txq = netdev_pick_tx(dev, skb); | 331 | txq = netdev_pick_tx(dev, skb); |
332 | 332 | ||
333 | /* try until next clock tick */ | 333 | /* try until next clock tick */ |
334 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 334 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
335 | tries > 0; --tries) { | 335 | tries > 0; --tries) { |
336 | if (__netif_tx_trylock(txq)) { | 336 | if (__netif_tx_trylock(txq)) { |
337 | if (!netif_xmit_stopped(txq)) { | 337 | if (!netif_xmit_stopped(txq)) { |
338 | if (vlan_tx_tag_present(skb) && | 338 | if (vlan_tx_tag_present(skb) && |
339 | !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { | 339 | !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { |
340 | skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); | 340 | skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); |
341 | if (unlikely(!skb)) | 341 | if (unlikely(!skb)) |
342 | break; | 342 | break; |
343 | skb->vlan_tci = 0; | 343 | skb->vlan_tci = 0; |
344 | } | 344 | } |
345 | 345 | ||
346 | status = ops->ndo_start_xmit(skb, dev); | 346 | status = ops->ndo_start_xmit(skb, dev); |
347 | if (status == NETDEV_TX_OK) | 347 | if (status == NETDEV_TX_OK) |
348 | txq_trans_update(txq); | 348 | txq_trans_update(txq); |
349 | } | 349 | } |
350 | __netif_tx_unlock(txq); | 350 | __netif_tx_unlock(txq); |
351 | 351 | ||
352 | if (status == NETDEV_TX_OK) | 352 | if (status == NETDEV_TX_OK) |
353 | break; | 353 | break; |
354 | 354 | ||
355 | } | 355 | } |
356 | 356 | ||
357 | /* tickle device maybe there is some cleanup */ | 357 | /* tickle device maybe there is some cleanup */ |
358 | netpoll_poll_dev(np->dev); | 358 | netpoll_poll_dev(np->dev); |
359 | 359 | ||
360 | udelay(USEC_PER_POLL); | 360 | udelay(USEC_PER_POLL); |
361 | } | 361 | } |
362 | 362 | ||
363 | WARN_ONCE(!irqs_disabled(), | 363 | WARN_ONCE(!irqs_disabled(), |
364 | "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", | 364 | "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", |
365 | dev->name, ops->ndo_start_xmit); | 365 | dev->name, ops->ndo_start_xmit); |
366 | 366 | ||
367 | } | 367 | } |
368 | 368 | ||
369 | if (status != NETDEV_TX_OK) { | 369 | if (status != NETDEV_TX_OK) { |
370 | skb_queue_tail(&npinfo->txq, skb); | 370 | skb_queue_tail(&npinfo->txq, skb); |
371 | schedule_delayed_work(&npinfo->tx_work,0); | 371 | schedule_delayed_work(&npinfo->tx_work,0); |
372 | } | 372 | } |
373 | } | 373 | } |
374 | EXPORT_SYMBOL(netpoll_send_skb_on_dev); | 374 | EXPORT_SYMBOL(netpoll_send_skb_on_dev); |
375 | 375 | ||
376 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | 376 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
377 | { | 377 | { |
378 | int total_len, ip_len, udp_len; | 378 | int total_len, ip_len, udp_len; |
379 | struct sk_buff *skb; | 379 | struct sk_buff *skb; |
380 | struct udphdr *udph; | 380 | struct udphdr *udph; |
381 | struct iphdr *iph; | 381 | struct iphdr *iph; |
382 | struct ethhdr *eth; | 382 | struct ethhdr *eth; |
383 | static atomic_t ip_ident; | 383 | static atomic_t ip_ident; |
384 | 384 | ||
385 | udp_len = len + sizeof(*udph); | 385 | udp_len = len + sizeof(*udph); |
386 | ip_len = udp_len + sizeof(*iph); | 386 | ip_len = udp_len + sizeof(*iph); |
387 | total_len = ip_len + LL_RESERVED_SPACE(np->dev); | 387 | total_len = ip_len + LL_RESERVED_SPACE(np->dev); |
388 | 388 | ||
389 | skb = find_skb(np, total_len + np->dev->needed_tailroom, | 389 | skb = find_skb(np, total_len + np->dev->needed_tailroom, |
390 | total_len - len); | 390 | total_len - len); |
391 | if (!skb) | 391 | if (!skb) |
392 | return; | 392 | return; |
393 | 393 | ||
394 | skb_copy_to_linear_data(skb, msg, len); | 394 | skb_copy_to_linear_data(skb, msg, len); |
395 | skb_put(skb, len); | 395 | skb_put(skb, len); |
396 | 396 | ||
397 | skb_push(skb, sizeof(*udph)); | 397 | skb_push(skb, sizeof(*udph)); |
398 | skb_reset_transport_header(skb); | 398 | skb_reset_transport_header(skb); |
399 | udph = udp_hdr(skb); | 399 | udph = udp_hdr(skb); |
400 | udph->source = htons(np->local_port); | 400 | udph->source = htons(np->local_port); |
401 | udph->dest = htons(np->remote_port); | 401 | udph->dest = htons(np->remote_port); |
402 | udph->len = htons(udp_len); | 402 | udph->len = htons(udp_len); |
403 | udph->check = 0; | 403 | udph->check = 0; |
404 | udph->check = csum_tcpudp_magic(np->local_ip, | 404 | udph->check = csum_tcpudp_magic(np->local_ip, |
405 | np->remote_ip, | 405 | np->remote_ip, |
406 | udp_len, IPPROTO_UDP, | 406 | udp_len, IPPROTO_UDP, |
407 | csum_partial(udph, udp_len, 0)); | 407 | csum_partial(udph, udp_len, 0)); |
408 | if (udph->check == 0) | 408 | if (udph->check == 0) |
409 | udph->check = CSUM_MANGLED_0; | 409 | udph->check = CSUM_MANGLED_0; |
410 | 410 | ||
411 | skb_push(skb, sizeof(*iph)); | 411 | skb_push(skb, sizeof(*iph)); |
412 | skb_reset_network_header(skb); | 412 | skb_reset_network_header(skb); |
413 | iph = ip_hdr(skb); | 413 | iph = ip_hdr(skb); |
414 | 414 | ||
415 | /* iph->version = 4; iph->ihl = 5; */ | 415 | /* iph->version = 4; iph->ihl = 5; */ |
416 | put_unaligned(0x45, (unsigned char *)iph); | 416 | put_unaligned(0x45, (unsigned char *)iph); |
417 | iph->tos = 0; | 417 | iph->tos = 0; |
418 | put_unaligned(htons(ip_len), &(iph->tot_len)); | 418 | put_unaligned(htons(ip_len), &(iph->tot_len)); |
419 | iph->id = htons(atomic_inc_return(&ip_ident)); | 419 | iph->id = htons(atomic_inc_return(&ip_ident)); |
420 | iph->frag_off = 0; | 420 | iph->frag_off = 0; |
421 | iph->ttl = 64; | 421 | iph->ttl = 64; |
422 | iph->protocol = IPPROTO_UDP; | 422 | iph->protocol = IPPROTO_UDP; |
423 | iph->check = 0; | 423 | iph->check = 0; |
424 | put_unaligned(np->local_ip, &(iph->saddr)); | 424 | put_unaligned(np->local_ip, &(iph->saddr)); |
425 | put_unaligned(np->remote_ip, &(iph->daddr)); | 425 | put_unaligned(np->remote_ip, &(iph->daddr)); |
426 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); | 426 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
427 | 427 | ||
428 | eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); | 428 | eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); |
429 | skb_reset_mac_header(skb); | 429 | skb_reset_mac_header(skb); |
430 | skb->protocol = eth->h_proto = htons(ETH_P_IP); | 430 | skb->protocol = eth->h_proto = htons(ETH_P_IP); |
431 | memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN); | 431 | memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN); |
432 | memcpy(eth->h_dest, np->remote_mac, ETH_ALEN); | 432 | memcpy(eth->h_dest, np->remote_mac, ETH_ALEN); |
433 | 433 | ||
434 | skb->dev = np->dev; | 434 | skb->dev = np->dev; |
435 | 435 | ||
436 | netpoll_send_skb(np, skb); | 436 | netpoll_send_skb(np, skb); |
437 | } | 437 | } |
438 | EXPORT_SYMBOL(netpoll_send_udp); | 438 | EXPORT_SYMBOL(netpoll_send_udp); |
439 | 439 | ||
440 | static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo) | 440 | static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo) |
441 | { | 441 | { |
442 | struct arphdr *arp; | 442 | struct arphdr *arp; |
443 | unsigned char *arp_ptr; | 443 | unsigned char *arp_ptr; |
444 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; | 444 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; |
445 | __be32 sip, tip; | 445 | __be32 sip, tip; |
446 | unsigned char *sha; | 446 | unsigned char *sha; |
447 | struct sk_buff *send_skb; | 447 | struct sk_buff *send_skb; |
448 | struct netpoll *np, *tmp; | 448 | struct netpoll *np, *tmp; |
449 | unsigned long flags; | 449 | unsigned long flags; |
450 | int hlen, tlen; | 450 | int hlen, tlen; |
451 | int hits = 0; | 451 | int hits = 0; |
452 | 452 | ||
453 | if (list_empty(&npinfo->rx_np)) | 453 | if (list_empty(&npinfo->rx_np)) |
454 | return; | 454 | return; |
455 | 455 | ||
456 | /* Before checking the packet, we do some early | 456 | /* Before checking the packet, we do some early |
457 | inspection whether this is interesting at all */ | 457 | inspection whether this is interesting at all */ |
458 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 458 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
459 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { | 459 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
460 | if (np->dev == skb->dev) | 460 | if (np->dev == skb->dev) |
461 | hits++; | 461 | hits++; |
462 | } | 462 | } |
463 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 463 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
464 | 464 | ||
465 | /* No netpoll struct is using this dev */ | 465 | /* No netpoll struct is using this dev */ |
466 | if (!hits) | 466 | if (!hits) |
467 | return; | 467 | return; |
468 | 468 | ||
469 | /* No arp on this interface */ | 469 | /* No arp on this interface */ |
470 | if (skb->dev->flags & IFF_NOARP) | 470 | if (skb->dev->flags & IFF_NOARP) |
471 | return; | 471 | return; |
472 | 472 | ||
473 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) | 473 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) |
474 | return; | 474 | return; |
475 | 475 | ||
476 | skb_reset_network_header(skb); | 476 | skb_reset_network_header(skb); |
477 | skb_reset_transport_header(skb); | 477 | skb_reset_transport_header(skb); |
478 | arp = arp_hdr(skb); | 478 | arp = arp_hdr(skb); |
479 | 479 | ||
480 | if ((arp->ar_hrd != htons(ARPHRD_ETHER) && | 480 | if ((arp->ar_hrd != htons(ARPHRD_ETHER) && |
481 | arp->ar_hrd != htons(ARPHRD_IEEE802)) || | 481 | arp->ar_hrd != htons(ARPHRD_IEEE802)) || |
482 | arp->ar_pro != htons(ETH_P_IP) || | 482 | arp->ar_pro != htons(ETH_P_IP) || |
483 | arp->ar_op != htons(ARPOP_REQUEST)) | 483 | arp->ar_op != htons(ARPOP_REQUEST)) |
484 | return; | 484 | return; |
485 | 485 | ||
486 | arp_ptr = (unsigned char *)(arp+1); | 486 | arp_ptr = (unsigned char *)(arp+1); |
487 | /* save the location of the src hw addr */ | 487 | /* save the location of the src hw addr */ |
488 | sha = arp_ptr; | 488 | sha = arp_ptr; |
489 | arp_ptr += skb->dev->addr_len; | 489 | arp_ptr += skb->dev->addr_len; |
490 | memcpy(&sip, arp_ptr, 4); | 490 | memcpy(&sip, arp_ptr, 4); |
491 | arp_ptr += 4; | 491 | arp_ptr += 4; |
492 | /* If we actually cared about dst hw addr, | 492 | /* If we actually cared about dst hw addr, |
493 | it would get copied here */ | 493 | it would get copied here */ |
494 | arp_ptr += skb->dev->addr_len; | 494 | arp_ptr += skb->dev->addr_len; |
495 | memcpy(&tip, arp_ptr, 4); | 495 | memcpy(&tip, arp_ptr, 4); |
496 | 496 | ||
497 | /* Should we ignore arp? */ | 497 | /* Should we ignore arp? */ |
498 | if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) | 498 | if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) |
499 | return; | 499 | return; |
500 | 500 | ||
501 | size = arp_hdr_len(skb->dev); | 501 | size = arp_hdr_len(skb->dev); |
502 | 502 | ||
503 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 503 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
504 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { | 504 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
505 | if (tip != np->local_ip) | 505 | if (tip != np->local_ip) |
506 | continue; | 506 | continue; |
507 | 507 | ||
508 | hlen = LL_RESERVED_SPACE(np->dev); | 508 | hlen = LL_RESERVED_SPACE(np->dev); |
509 | tlen = np->dev->needed_tailroom; | 509 | tlen = np->dev->needed_tailroom; |
510 | send_skb = find_skb(np, size + hlen + tlen, hlen); | 510 | send_skb = find_skb(np, size + hlen + tlen, hlen); |
511 | if (!send_skb) | 511 | if (!send_skb) |
512 | continue; | 512 | continue; |
513 | 513 | ||
514 | skb_reset_network_header(send_skb); | 514 | skb_reset_network_header(send_skb); |
515 | arp = (struct arphdr *) skb_put(send_skb, size); | 515 | arp = (struct arphdr *) skb_put(send_skb, size); |
516 | send_skb->dev = skb->dev; | 516 | send_skb->dev = skb->dev; |
517 | send_skb->protocol = htons(ETH_P_ARP); | 517 | send_skb->protocol = htons(ETH_P_ARP); |
518 | 518 | ||
519 | /* Fill the device header for the ARP frame */ | 519 | /* Fill the device header for the ARP frame */ |
520 | if (dev_hard_header(send_skb, skb->dev, ptype, | 520 | if (dev_hard_header(send_skb, skb->dev, ptype, |
521 | sha, np->dev->dev_addr, | 521 | sha, np->dev->dev_addr, |
522 | send_skb->len) < 0) { | 522 | send_skb->len) < 0) { |
523 | kfree_skb(send_skb); | 523 | kfree_skb(send_skb); |
524 | continue; | 524 | continue; |
525 | } | 525 | } |
526 | 526 | ||
527 | /* | 527 | /* |
528 | * Fill out the arp protocol part. | 528 | * Fill out the arp protocol part. |
529 | * | 529 | * |
530 | * we only support ethernet device type, | 530 | * we only support ethernet device type, |
531 | * which (according to RFC 1390) should | 531 | * which (according to RFC 1390) should |
532 | * always equal 1 (Ethernet). | 532 | * always equal 1 (Ethernet). |
533 | */ | 533 | */ |
534 | 534 | ||
535 | arp->ar_hrd = htons(np->dev->type); | 535 | arp->ar_hrd = htons(np->dev->type); |
536 | arp->ar_pro = htons(ETH_P_IP); | 536 | arp->ar_pro = htons(ETH_P_IP); |
537 | arp->ar_hln = np->dev->addr_len; | 537 | arp->ar_hln = np->dev->addr_len; |
538 | arp->ar_pln = 4; | 538 | arp->ar_pln = 4; |
539 | arp->ar_op = htons(type); | 539 | arp->ar_op = htons(type); |
540 | 540 | ||
541 | arp_ptr = (unsigned char *)(arp + 1); | 541 | arp_ptr = (unsigned char *)(arp + 1); |
542 | memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); | 542 | memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); |
543 | arp_ptr += np->dev->addr_len; | 543 | arp_ptr += np->dev->addr_len; |
544 | memcpy(arp_ptr, &tip, 4); | 544 | memcpy(arp_ptr, &tip, 4); |
545 | arp_ptr += 4; | 545 | arp_ptr += 4; |
546 | memcpy(arp_ptr, sha, np->dev->addr_len); | 546 | memcpy(arp_ptr, sha, np->dev->addr_len); |
547 | arp_ptr += np->dev->addr_len; | 547 | arp_ptr += np->dev->addr_len; |
548 | memcpy(arp_ptr, &sip, 4); | 548 | memcpy(arp_ptr, &sip, 4); |
549 | 549 | ||
550 | netpoll_send_skb(np, send_skb); | 550 | netpoll_send_skb(np, send_skb); |
551 | 551 | ||
552 | /* If there are several rx_hooks for the same address, | 552 | /* If there are several rx_hooks for the same address, |
553 | we're fine by sending a single reply */ | 553 | we're fine by sending a single reply */ |
554 | break; | 554 | break; |
555 | } | 555 | } |
556 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 556 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
557 | } | 557 | } |
558 | 558 | ||
559 | int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) | 559 | int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) |
560 | { | 560 | { |
561 | int proto, len, ulen; | 561 | int proto, len, ulen; |
562 | int hits = 0; | 562 | int hits = 0; |
563 | const struct iphdr *iph; | 563 | const struct iphdr *iph; |
564 | struct udphdr *uh; | 564 | struct udphdr *uh; |
565 | struct netpoll *np, *tmp; | 565 | struct netpoll *np, *tmp; |
566 | 566 | ||
567 | if (list_empty(&npinfo->rx_np)) | 567 | if (list_empty(&npinfo->rx_np)) |
568 | goto out; | 568 | goto out; |
569 | 569 | ||
570 | if (skb->dev->type != ARPHRD_ETHER) | 570 | if (skb->dev->type != ARPHRD_ETHER) |
571 | goto out; | 571 | goto out; |
572 | 572 | ||
573 | /* check if netpoll clients need ARP */ | 573 | /* check if netpoll clients need ARP */ |
574 | if (skb->protocol == htons(ETH_P_ARP) && | 574 | if (skb->protocol == htons(ETH_P_ARP) && |
575 | atomic_read(&trapped)) { | 575 | atomic_read(&trapped)) { |
576 | skb_queue_tail(&npinfo->arp_tx, skb); | 576 | skb_queue_tail(&npinfo->arp_tx, skb); |
577 | return 1; | 577 | return 1; |
578 | } | 578 | } |
579 | 579 | ||
580 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { | 580 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { |
581 | skb = vlan_untag(skb); | 581 | skb = vlan_untag(skb); |
582 | if (unlikely(!skb)) | 582 | if (unlikely(!skb)) |
583 | goto out; | 583 | goto out; |
584 | } | 584 | } |
585 | 585 | ||
586 | proto = ntohs(eth_hdr(skb)->h_proto); | 586 | proto = ntohs(eth_hdr(skb)->h_proto); |
587 | if (proto != ETH_P_IP) | 587 | if (proto != ETH_P_IP) |
588 | goto out; | 588 | goto out; |
589 | if (skb->pkt_type == PACKET_OTHERHOST) | 589 | if (skb->pkt_type == PACKET_OTHERHOST) |
590 | goto out; | 590 | goto out; |
591 | if (skb_shared(skb)) | 591 | if (skb_shared(skb)) |
592 | goto out; | 592 | goto out; |
593 | 593 | ||
594 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) | 594 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) |
595 | goto out; | 595 | goto out; |
596 | iph = (struct iphdr *)skb->data; | 596 | iph = (struct iphdr *)skb->data; |
597 | if (iph->ihl < 5 || iph->version != 4) | 597 | if (iph->ihl < 5 || iph->version != 4) |
598 | goto out; | 598 | goto out; |
599 | if (!pskb_may_pull(skb, iph->ihl*4)) | 599 | if (!pskb_may_pull(skb, iph->ihl*4)) |
600 | goto out; | 600 | goto out; |
601 | iph = (struct iphdr *)skb->data; | 601 | iph = (struct iphdr *)skb->data; |
602 | if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) | 602 | if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) |
603 | goto out; | 603 | goto out; |
604 | 604 | ||
605 | len = ntohs(iph->tot_len); | 605 | len = ntohs(iph->tot_len); |
606 | if (skb->len < len || len < iph->ihl*4) | 606 | if (skb->len < len || len < iph->ihl*4) |
607 | goto out; | 607 | goto out; |
608 | 608 | ||
609 | /* | 609 | /* |
610 | * Our transport medium may have padded the buffer out. | 610 | * Our transport medium may have padded the buffer out. |
611 | * Now We trim to the true length of the frame. | 611 | * Now We trim to the true length of the frame. |
612 | */ | 612 | */ |
613 | if (pskb_trim_rcsum(skb, len)) | 613 | if (pskb_trim_rcsum(skb, len)) |
614 | goto out; | 614 | goto out; |
615 | 615 | ||
616 | iph = (struct iphdr *)skb->data; | 616 | iph = (struct iphdr *)skb->data; |
617 | if (iph->protocol != IPPROTO_UDP) | 617 | if (iph->protocol != IPPROTO_UDP) |
618 | goto out; | 618 | goto out; |
619 | 619 | ||
620 | len -= iph->ihl*4; | 620 | len -= iph->ihl*4; |
621 | uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); | 621 | uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); |
622 | ulen = ntohs(uh->len); | 622 | ulen = ntohs(uh->len); |
623 | 623 | ||
624 | if (ulen != len) | 624 | if (ulen != len) |
625 | goto out; | 625 | goto out; |
626 | if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) | 626 | if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) |
627 | goto out; | 627 | goto out; |
628 | 628 | ||
629 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { | 629 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
630 | if (np->local_ip && np->local_ip != iph->daddr) | 630 | if (np->local_ip && np->local_ip != iph->daddr) |
631 | continue; | 631 | continue; |
632 | if (np->remote_ip && np->remote_ip != iph->saddr) | 632 | if (np->remote_ip && np->remote_ip != iph->saddr) |
633 | continue; | 633 | continue; |
634 | if (np->local_port && np->local_port != ntohs(uh->dest)) | 634 | if (np->local_port && np->local_port != ntohs(uh->dest)) |
635 | continue; | 635 | continue; |
636 | 636 | ||
637 | np->rx_hook(np, ntohs(uh->source), | 637 | np->rx_hook(np, ntohs(uh->source), |
638 | (char *)(uh+1), | 638 | (char *)(uh+1), |
639 | ulen - sizeof(struct udphdr)); | 639 | ulen - sizeof(struct udphdr)); |
640 | hits++; | 640 | hits++; |
641 | } | 641 | } |
642 | 642 | ||
643 | if (!hits) | 643 | if (!hits) |
644 | goto out; | 644 | goto out; |
645 | 645 | ||
646 | kfree_skb(skb); | 646 | kfree_skb(skb); |
647 | return 1; | 647 | return 1; |
648 | 648 | ||
649 | out: | 649 | out: |
650 | if (atomic_read(&trapped)) { | 650 | if (atomic_read(&trapped)) { |
651 | kfree_skb(skb); | 651 | kfree_skb(skb); |
652 | return 1; | 652 | return 1; |
653 | } | 653 | } |
654 | 654 | ||
655 | return 0; | 655 | return 0; |
656 | } | 656 | } |
657 | 657 | ||
658 | void netpoll_print_options(struct netpoll *np) | 658 | void netpoll_print_options(struct netpoll *np) |
659 | { | 659 | { |
660 | np_info(np, "local port %d\n", np->local_port); | 660 | np_info(np, "local port %d\n", np->local_port); |
661 | np_info(np, "local IP %pI4\n", &np->local_ip); | 661 | np_info(np, "local IP %pI4\n", &np->local_ip); |
662 | np_info(np, "interface '%s'\n", np->dev_name); | 662 | np_info(np, "interface '%s'\n", np->dev_name); |
663 | np_info(np, "remote port %d\n", np->remote_port); | 663 | np_info(np, "remote port %d\n", np->remote_port); |
664 | np_info(np, "remote IP %pI4\n", &np->remote_ip); | 664 | np_info(np, "remote IP %pI4\n", &np->remote_ip); |
665 | np_info(np, "remote ethernet address %pM\n", np->remote_mac); | 665 | np_info(np, "remote ethernet address %pM\n", np->remote_mac); |
666 | } | 666 | } |
667 | EXPORT_SYMBOL(netpoll_print_options); | 667 | EXPORT_SYMBOL(netpoll_print_options); |
668 | 668 | ||
669 | int netpoll_parse_options(struct netpoll *np, char *opt) | 669 | int netpoll_parse_options(struct netpoll *np, char *opt) |
670 | { | 670 | { |
671 | char *cur=opt, *delim; | 671 | char *cur=opt, *delim; |
672 | 672 | ||
673 | if (*cur != '@') { | 673 | if (*cur != '@') { |
674 | if ((delim = strchr(cur, '@')) == NULL) | 674 | if ((delim = strchr(cur, '@')) == NULL) |
675 | goto parse_failed; | 675 | goto parse_failed; |
676 | *delim = 0; | 676 | *delim = 0; |
677 | if (kstrtou16(cur, 10, &np->local_port)) | 677 | if (kstrtou16(cur, 10, &np->local_port)) |
678 | goto parse_failed; | 678 | goto parse_failed; |
679 | cur = delim; | 679 | cur = delim; |
680 | } | 680 | } |
681 | cur++; | 681 | cur++; |
682 | 682 | ||
683 | if (*cur != '/') { | 683 | if (*cur != '/') { |
684 | if ((delim = strchr(cur, '/')) == NULL) | 684 | if ((delim = strchr(cur, '/')) == NULL) |
685 | goto parse_failed; | 685 | goto parse_failed; |
686 | *delim = 0; | 686 | *delim = 0; |
687 | np->local_ip = in_aton(cur); | 687 | np->local_ip = in_aton(cur); |
688 | cur = delim; | 688 | cur = delim; |
689 | } | 689 | } |
690 | cur++; | 690 | cur++; |
691 | 691 | ||
692 | if (*cur != ',') { | 692 | if (*cur != ',') { |
693 | /* parse out dev name */ | 693 | /* parse out dev name */ |
694 | if ((delim = strchr(cur, ',')) == NULL) | 694 | if ((delim = strchr(cur, ',')) == NULL) |
695 | goto parse_failed; | 695 | goto parse_failed; |
696 | *delim = 0; | 696 | *delim = 0; |
697 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); | 697 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); |
698 | cur = delim; | 698 | cur = delim; |
699 | } | 699 | } |
700 | cur++; | 700 | cur++; |
701 | 701 | ||
702 | if (*cur != '@') { | 702 | if (*cur != '@') { |
703 | /* dst port */ | 703 | /* dst port */ |
704 | if ((delim = strchr(cur, '@')) == NULL) | 704 | if ((delim = strchr(cur, '@')) == NULL) |
705 | goto parse_failed; | 705 | goto parse_failed; |
706 | *delim = 0; | 706 | *delim = 0; |
707 | if (*cur == ' ' || *cur == '\t') | 707 | if (*cur == ' ' || *cur == '\t') |
708 | np_info(np, "warning: whitespace is not allowed\n"); | 708 | np_info(np, "warning: whitespace is not allowed\n"); |
709 | np->remote_port = simple_strtol(cur, NULL, 10); | ||
710 | if (kstrtou16(cur, 10, &np->remote_port)) | 709 | if (kstrtou16(cur, 10, &np->remote_port)) |
711 | goto parse_failed; | 710 | goto parse_failed; |
712 | cur = delim; | 711 | cur = delim; |
713 | } | 712 | } |
714 | cur++; | 713 | cur++; |
715 | 714 | ||
716 | /* dst ip */ | 715 | /* dst ip */ |
717 | if ((delim = strchr(cur, '/')) == NULL) | 716 | if ((delim = strchr(cur, '/')) == NULL) |
718 | goto parse_failed; | 717 | goto parse_failed; |
719 | *delim = 0; | 718 | *delim = 0; |
720 | np->remote_ip = in_aton(cur); | 719 | np->remote_ip = in_aton(cur); |
721 | cur = delim + 1; | 720 | cur = delim + 1; |
722 | 721 | ||
723 | if (*cur != 0) { | 722 | if (*cur != 0) { |
724 | /* MAC address */ | 723 | /* MAC address */ |
725 | if (!mac_pton(cur, np->remote_mac)) | 724 | if (!mac_pton(cur, np->remote_mac)) |
726 | goto parse_failed; | 725 | goto parse_failed; |
727 | } | 726 | } |
728 | 727 | ||
729 | netpoll_print_options(np); | 728 | netpoll_print_options(np); |
730 | 729 | ||
731 | return 0; | 730 | return 0; |
732 | 731 | ||
733 | parse_failed: | 732 | parse_failed: |
734 | np_info(np, "couldn't parse config at '%s'!\n", cur); | 733 | np_info(np, "couldn't parse config at '%s'!\n", cur); |
735 | return -1; | 734 | return -1; |
736 | } | 735 | } |
737 | EXPORT_SYMBOL(netpoll_parse_options); | 736 | EXPORT_SYMBOL(netpoll_parse_options); |
738 | 737 | ||
739 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | 738 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) |
740 | { | 739 | { |
741 | struct netpoll_info *npinfo; | 740 | struct netpoll_info *npinfo; |
742 | const struct net_device_ops *ops; | 741 | const struct net_device_ops *ops; |
743 | unsigned long flags; | 742 | unsigned long flags; |
744 | int err; | 743 | int err; |
745 | 744 | ||
746 | np->dev = ndev; | 745 | np->dev = ndev; |
747 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); | 746 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); |
748 | 747 | ||
749 | if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || | 748 | if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || |
750 | !ndev->netdev_ops->ndo_poll_controller) { | 749 | !ndev->netdev_ops->ndo_poll_controller) { |
751 | np_err(np, "%s doesn't support polling, aborting\n", | 750 | np_err(np, "%s doesn't support polling, aborting\n", |
752 | np->dev_name); | 751 | np->dev_name); |
753 | err = -ENOTSUPP; | 752 | err = -ENOTSUPP; |
754 | goto out; | 753 | goto out; |
755 | } | 754 | } |
756 | 755 | ||
757 | if (!ndev->npinfo) { | 756 | if (!ndev->npinfo) { |
758 | npinfo = kmalloc(sizeof(*npinfo), gfp); | 757 | npinfo = kmalloc(sizeof(*npinfo), gfp); |
759 | if (!npinfo) { | 758 | if (!npinfo) { |
760 | err = -ENOMEM; | 759 | err = -ENOMEM; |
761 | goto out; | 760 | goto out; |
762 | } | 761 | } |
763 | 762 | ||
764 | npinfo->rx_flags = 0; | 763 | npinfo->rx_flags = 0; |
765 | INIT_LIST_HEAD(&npinfo->rx_np); | 764 | INIT_LIST_HEAD(&npinfo->rx_np); |
766 | 765 | ||
767 | spin_lock_init(&npinfo->rx_lock); | 766 | spin_lock_init(&npinfo->rx_lock); |
768 | skb_queue_head_init(&npinfo->arp_tx); | 767 | skb_queue_head_init(&npinfo->arp_tx); |
769 | skb_queue_head_init(&npinfo->txq); | 768 | skb_queue_head_init(&npinfo->txq); |
770 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); | 769 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); |
771 | 770 | ||
772 | atomic_set(&npinfo->refcnt, 1); | 771 | atomic_set(&npinfo->refcnt, 1); |
773 | 772 | ||
774 | ops = np->dev->netdev_ops; | 773 | ops = np->dev->netdev_ops; |
775 | if (ops->ndo_netpoll_setup) { | 774 | if (ops->ndo_netpoll_setup) { |
776 | err = ops->ndo_netpoll_setup(ndev, npinfo, gfp); | 775 | err = ops->ndo_netpoll_setup(ndev, npinfo, gfp); |
777 | if (err) | 776 | if (err) |
778 | goto free_npinfo; | 777 | goto free_npinfo; |
779 | } | 778 | } |
780 | } else { | 779 | } else { |
781 | npinfo = ndev->npinfo; | 780 | npinfo = ndev->npinfo; |
782 | atomic_inc(&npinfo->refcnt); | 781 | atomic_inc(&npinfo->refcnt); |
783 | } | 782 | } |
784 | 783 | ||
785 | npinfo->netpoll = np; | 784 | npinfo->netpoll = np; |
786 | 785 | ||
787 | if (np->rx_hook) { | 786 | if (np->rx_hook) { |
788 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 787 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
789 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; | 788 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; |
790 | list_add_tail(&np->rx, &npinfo->rx_np); | 789 | list_add_tail(&np->rx, &npinfo->rx_np); |
791 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 790 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
792 | } | 791 | } |
793 | 792 | ||
794 | /* last thing to do is link it to the net device structure */ | 793 | /* last thing to do is link it to the net device structure */ |
795 | rcu_assign_pointer(ndev->npinfo, npinfo); | 794 | rcu_assign_pointer(ndev->npinfo, npinfo); |
796 | 795 | ||
797 | return 0; | 796 | return 0; |
798 | 797 | ||
799 | free_npinfo: | 798 | free_npinfo: |
800 | kfree(npinfo); | 799 | kfree(npinfo); |
801 | out: | 800 | out: |
802 | return err; | 801 | return err; |
803 | } | 802 | } |
804 | EXPORT_SYMBOL_GPL(__netpoll_setup); | 803 | EXPORT_SYMBOL_GPL(__netpoll_setup); |
805 | 804 | ||
806 | int netpoll_setup(struct netpoll *np) | 805 | int netpoll_setup(struct netpoll *np) |
807 | { | 806 | { |
808 | struct net_device *ndev = NULL; | 807 | struct net_device *ndev = NULL; |
809 | struct in_device *in_dev; | 808 | struct in_device *in_dev; |
810 | int err; | 809 | int err; |
811 | 810 | ||
812 | if (np->dev_name) | 811 | if (np->dev_name) |
813 | ndev = dev_get_by_name(&init_net, np->dev_name); | 812 | ndev = dev_get_by_name(&init_net, np->dev_name); |
814 | if (!ndev) { | 813 | if (!ndev) { |
815 | np_err(np, "%s doesn't exist, aborting\n", np->dev_name); | 814 | np_err(np, "%s doesn't exist, aborting\n", np->dev_name); |
816 | return -ENODEV; | 815 | return -ENODEV; |
817 | } | 816 | } |
818 | 817 | ||
819 | if (ndev->master) { | 818 | if (ndev->master) { |
820 | np_err(np, "%s is a slave device, aborting\n", np->dev_name); | 819 | np_err(np, "%s is a slave device, aborting\n", np->dev_name); |
821 | err = -EBUSY; | 820 | err = -EBUSY; |
822 | goto put; | 821 | goto put; |
823 | } | 822 | } |
824 | 823 | ||
825 | if (!netif_running(ndev)) { | 824 | if (!netif_running(ndev)) { |
826 | unsigned long atmost, atleast; | 825 | unsigned long atmost, atleast; |
827 | 826 | ||
828 | np_info(np, "device %s not up yet, forcing it\n", np->dev_name); | 827 | np_info(np, "device %s not up yet, forcing it\n", np->dev_name); |
829 | 828 | ||
830 | rtnl_lock(); | 829 | rtnl_lock(); |
831 | err = dev_open(ndev); | 830 | err = dev_open(ndev); |
832 | rtnl_unlock(); | 831 | rtnl_unlock(); |
833 | 832 | ||
834 | if (err) { | 833 | if (err) { |
835 | np_err(np, "failed to open %s\n", ndev->name); | 834 | np_err(np, "failed to open %s\n", ndev->name); |
836 | goto put; | 835 | goto put; |
837 | } | 836 | } |
838 | 837 | ||
839 | atleast = jiffies + HZ/10; | 838 | atleast = jiffies + HZ/10; |
840 | atmost = jiffies + carrier_timeout * HZ; | 839 | atmost = jiffies + carrier_timeout * HZ; |
841 | while (!netif_carrier_ok(ndev)) { | 840 | while (!netif_carrier_ok(ndev)) { |
842 | if (time_after(jiffies, atmost)) { | 841 | if (time_after(jiffies, atmost)) { |
843 | np_notice(np, "timeout waiting for carrier\n"); | 842 | np_notice(np, "timeout waiting for carrier\n"); |
844 | break; | 843 | break; |
845 | } | 844 | } |
846 | msleep(1); | 845 | msleep(1); |
847 | } | 846 | } |
848 | 847 | ||
849 | /* If carrier appears to come up instantly, we don't | 848 | /* If carrier appears to come up instantly, we don't |
850 | * trust it and pause so that we don't pump all our | 849 | * trust it and pause so that we don't pump all our |
851 | * queued console messages into the bitbucket. | 850 | * queued console messages into the bitbucket. |
852 | */ | 851 | */ |
853 | 852 | ||
854 | if (time_before(jiffies, atleast)) { | 853 | if (time_before(jiffies, atleast)) { |
855 | np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n"); | 854 | np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n"); |
856 | msleep(4000); | 855 | msleep(4000); |
857 | } | 856 | } |
858 | } | 857 | } |
859 | 858 | ||
860 | if (!np->local_ip) { | 859 | if (!np->local_ip) { |
861 | rcu_read_lock(); | 860 | rcu_read_lock(); |
862 | in_dev = __in_dev_get_rcu(ndev); | 861 | in_dev = __in_dev_get_rcu(ndev); |
863 | 862 | ||
864 | if (!in_dev || !in_dev->ifa_list) { | 863 | if (!in_dev || !in_dev->ifa_list) { |
865 | rcu_read_unlock(); | 864 | rcu_read_unlock(); |
866 | np_err(np, "no IP address for %s, aborting\n", | 865 | np_err(np, "no IP address for %s, aborting\n", |
867 | np->dev_name); | 866 | np->dev_name); |
868 | err = -EDESTADDRREQ; | 867 | err = -EDESTADDRREQ; |
869 | goto put; | 868 | goto put; |
870 | } | 869 | } |
871 | 870 | ||
872 | np->local_ip = in_dev->ifa_list->ifa_local; | 871 | np->local_ip = in_dev->ifa_list->ifa_local; |
873 | rcu_read_unlock(); | 872 | rcu_read_unlock(); |
874 | np_info(np, "local IP %pI4\n", &np->local_ip); | 873 | np_info(np, "local IP %pI4\n", &np->local_ip); |
875 | } | 874 | } |
876 | 875 | ||
877 | /* fill up the skb queue */ | 876 | /* fill up the skb queue */ |
878 | refill_skbs(); | 877 | refill_skbs(); |
879 | 878 | ||
880 | rtnl_lock(); | 879 | rtnl_lock(); |
881 | err = __netpoll_setup(np, ndev, GFP_KERNEL); | 880 | err = __netpoll_setup(np, ndev, GFP_KERNEL); |
882 | rtnl_unlock(); | 881 | rtnl_unlock(); |
883 | 882 | ||
884 | if (err) | 883 | if (err) |
885 | goto put; | 884 | goto put; |
886 | 885 | ||
887 | return 0; | 886 | return 0; |
888 | 887 | ||
889 | put: | 888 | put: |
890 | dev_put(ndev); | 889 | dev_put(ndev); |
891 | return err; | 890 | return err; |
892 | } | 891 | } |
893 | EXPORT_SYMBOL(netpoll_setup); | 892 | EXPORT_SYMBOL(netpoll_setup); |
894 | 893 | ||
895 | static int __init netpoll_init(void) | 894 | static int __init netpoll_init(void) |
896 | { | 895 | { |
897 | skb_queue_head_init(&skb_pool); | 896 | skb_queue_head_init(&skb_pool); |
898 | return 0; | 897 | return 0; |
899 | } | 898 | } |
900 | core_initcall(netpoll_init); | 899 | core_initcall(netpoll_init); |
901 | 900 | ||
902 | static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) | 901 | static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) |
903 | { | 902 | { |
904 | struct netpoll_info *npinfo = | 903 | struct netpoll_info *npinfo = |
905 | container_of(rcu_head, struct netpoll_info, rcu); | 904 | container_of(rcu_head, struct netpoll_info, rcu); |
906 | 905 | ||
907 | skb_queue_purge(&npinfo->arp_tx); | 906 | skb_queue_purge(&npinfo->arp_tx); |
908 | skb_queue_purge(&npinfo->txq); | 907 | skb_queue_purge(&npinfo->txq); |
909 | 908 | ||
910 | /* we can't call cancel_delayed_work_sync here, as we are in softirq */ | 909 | /* we can't call cancel_delayed_work_sync here, as we are in softirq */ |
911 | cancel_delayed_work(&npinfo->tx_work); | 910 | cancel_delayed_work(&npinfo->tx_work); |
912 | 911 | ||
913 | /* clean after last, unfinished work */ | 912 | /* clean after last, unfinished work */ |
914 | __skb_queue_purge(&npinfo->txq); | 913 | __skb_queue_purge(&npinfo->txq); |
915 | /* now cancel it again */ | 914 | /* now cancel it again */ |
916 | cancel_delayed_work(&npinfo->tx_work); | 915 | cancel_delayed_work(&npinfo->tx_work); |
917 | kfree(npinfo); | 916 | kfree(npinfo); |
918 | } | 917 | } |
919 | 918 | ||
920 | void __netpoll_cleanup(struct netpoll *np) | 919 | void __netpoll_cleanup(struct netpoll *np) |
921 | { | 920 | { |
922 | struct netpoll_info *npinfo; | 921 | struct netpoll_info *npinfo; |
923 | unsigned long flags; | 922 | unsigned long flags; |
924 | 923 | ||
925 | npinfo = np->dev->npinfo; | 924 | npinfo = np->dev->npinfo; |
926 | if (!npinfo) | 925 | if (!npinfo) |
927 | return; | 926 | return; |
928 | 927 | ||
929 | if (!list_empty(&npinfo->rx_np)) { | 928 | if (!list_empty(&npinfo->rx_np)) { |
930 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 929 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
931 | list_del(&np->rx); | 930 | list_del(&np->rx); |
932 | if (list_empty(&npinfo->rx_np)) | 931 | if (list_empty(&npinfo->rx_np)) |
933 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | 932 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; |
934 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 933 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
935 | } | 934 | } |
936 | 935 | ||
937 | if (atomic_dec_and_test(&npinfo->refcnt)) { | 936 | if (atomic_dec_and_test(&npinfo->refcnt)) { |
938 | const struct net_device_ops *ops; | 937 | const struct net_device_ops *ops; |
939 | 938 | ||
940 | ops = np->dev->netdev_ops; | 939 | ops = np->dev->netdev_ops; |
941 | if (ops->ndo_netpoll_cleanup) | 940 | if (ops->ndo_netpoll_cleanup) |
942 | ops->ndo_netpoll_cleanup(np->dev); | 941 | ops->ndo_netpoll_cleanup(np->dev); |
943 | 942 | ||
944 | RCU_INIT_POINTER(np->dev->npinfo, NULL); | 943 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
945 | call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); | 944 | call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); |
946 | } | 945 | } |
947 | } | 946 | } |
948 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | 947 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); |
949 | 948 | ||
950 | static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) | 949 | static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) |
951 | { | 950 | { |
952 | struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); | 951 | struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); |
953 | 952 | ||
954 | __netpoll_cleanup(np); | 953 | __netpoll_cleanup(np); |
955 | kfree(np); | 954 | kfree(np); |
956 | } | 955 | } |
957 | 956 | ||
958 | void __netpoll_free_rcu(struct netpoll *np) | 957 | void __netpoll_free_rcu(struct netpoll *np) |
959 | { | 958 | { |
960 | call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); | 959 | call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); |
961 | } | 960 | } |
962 | EXPORT_SYMBOL_GPL(__netpoll_free_rcu); | 961 | EXPORT_SYMBOL_GPL(__netpoll_free_rcu); |
963 | 962 | ||
964 | void netpoll_cleanup(struct netpoll *np) | 963 | void netpoll_cleanup(struct netpoll *np) |
965 | { | 964 | { |
966 | if (!np->dev) | 965 | if (!np->dev) |
967 | return; | 966 | return; |
968 | 967 | ||
969 | rtnl_lock(); | 968 | rtnl_lock(); |
970 | __netpoll_cleanup(np); | 969 | __netpoll_cleanup(np); |
971 | rtnl_unlock(); | 970 | rtnl_unlock(); |
972 | 971 | ||
973 | dev_put(np->dev); | 972 | dev_put(np->dev); |
974 | np->dev = NULL; | 973 | np->dev = NULL; |
975 | } | 974 | } |
976 | EXPORT_SYMBOL(netpoll_cleanup); | 975 | EXPORT_SYMBOL(netpoll_cleanup); |
977 | 976 | ||
978 | int netpoll_trap(void) | 977 | int netpoll_trap(void) |
979 | { | 978 | { |
980 | return atomic_read(&trapped); | 979 | return atomic_read(&trapped); |
981 | } | 980 | } |
982 | EXPORT_SYMBOL(netpoll_trap); | 981 | EXPORT_SYMBOL(netpoll_trap); |
983 | 982 | ||
984 | void netpoll_set_trap(int trap) | 983 | void netpoll_set_trap(int trap) |
985 | { | 984 | { |
986 | if (trap) | 985 | if (trap) |
987 | atomic_inc(&trapped); | 986 | atomic_inc(&trapped); |
988 | else | 987 | else |
989 | atomic_dec(&trapped); | 988 | atomic_dec(&trapped); |
990 | } | 989 | } |
991 | EXPORT_SYMBOL(netpoll_set_trap); | 990 | EXPORT_SYMBOL(netpoll_set_trap); |
992 | 991 |
net/mac80211/debugfs_sta.c
1 | /* | 1 | /* |
2 | * Copyright 2003-2005 Devicescape Software, Inc. | 2 | * Copyright 2003-2005 Devicescape Software, Inc. |
3 | * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> | 3 | * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> |
4 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> | 4 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/debugfs.h> | 11 | #include <linux/debugfs.h> |
12 | #include <linux/ieee80211.h> | 12 | #include <linux/ieee80211.h> |
13 | #include "ieee80211_i.h" | 13 | #include "ieee80211_i.h" |
14 | #include "debugfs.h" | 14 | #include "debugfs.h" |
15 | #include "debugfs_sta.h" | 15 | #include "debugfs_sta.h" |
16 | #include "sta_info.h" | 16 | #include "sta_info.h" |
17 | #include "driver-ops.h" | 17 | #include "driver-ops.h" |
18 | 18 | ||
19 | /* sta attributtes */ | 19 | /* sta attributtes */ |
20 | 20 | ||
21 | #define STA_READ(name, field, format_string) \ | 21 | #define STA_READ(name, field, format_string) \ |
22 | static ssize_t sta_ ##name## _read(struct file *file, \ | 22 | static ssize_t sta_ ##name## _read(struct file *file, \ |
23 | char __user *userbuf, \ | 23 | char __user *userbuf, \ |
24 | size_t count, loff_t *ppos) \ | 24 | size_t count, loff_t *ppos) \ |
25 | { \ | 25 | { \ |
26 | struct sta_info *sta = file->private_data; \ | 26 | struct sta_info *sta = file->private_data; \ |
27 | return mac80211_format_buffer(userbuf, count, ppos, \ | 27 | return mac80211_format_buffer(userbuf, count, ppos, \ |
28 | format_string, sta->field); \ | 28 | format_string, sta->field); \ |
29 | } | 29 | } |
30 | #define STA_READ_D(name, field) STA_READ(name, field, "%d\n") | 30 | #define STA_READ_D(name, field) STA_READ(name, field, "%d\n") |
31 | #define STA_READ_U(name, field) STA_READ(name, field, "%u\n") | 31 | #define STA_READ_U(name, field) STA_READ(name, field, "%u\n") |
32 | #define STA_READ_S(name, field) STA_READ(name, field, "%s\n") | 32 | #define STA_READ_S(name, field) STA_READ(name, field, "%s\n") |
33 | 33 | ||
34 | #define STA_OPS(name) \ | 34 | #define STA_OPS(name) \ |
35 | static const struct file_operations sta_ ##name## _ops = { \ | 35 | static const struct file_operations sta_ ##name## _ops = { \ |
36 | .read = sta_##name##_read, \ | 36 | .read = sta_##name##_read, \ |
37 | .open = simple_open, \ | 37 | .open = simple_open, \ |
38 | .llseek = generic_file_llseek, \ | 38 | .llseek = generic_file_llseek, \ |
39 | } | 39 | } |
40 | 40 | ||
41 | #define STA_OPS_RW(name) \ | 41 | #define STA_OPS_RW(name) \ |
42 | static const struct file_operations sta_ ##name## _ops = { \ | 42 | static const struct file_operations sta_ ##name## _ops = { \ |
43 | .read = sta_##name##_read, \ | 43 | .read = sta_##name##_read, \ |
44 | .write = sta_##name##_write, \ | 44 | .write = sta_##name##_write, \ |
45 | .open = simple_open, \ | 45 | .open = simple_open, \ |
46 | .llseek = generic_file_llseek, \ | 46 | .llseek = generic_file_llseek, \ |
47 | } | 47 | } |
48 | 48 | ||
49 | #define STA_FILE(name, field, format) \ | 49 | #define STA_FILE(name, field, format) \ |
50 | STA_READ_##format(name, field) \ | 50 | STA_READ_##format(name, field) \ |
51 | STA_OPS(name) | 51 | STA_OPS(name) |
52 | 52 | ||
53 | STA_FILE(aid, sta.aid, D); | 53 | STA_FILE(aid, sta.aid, D); |
54 | STA_FILE(dev, sdata->name, S); | 54 | STA_FILE(dev, sdata->name, S); |
55 | STA_FILE(last_signal, last_signal, D); | 55 | STA_FILE(last_signal, last_signal, D); |
56 | STA_FILE(last_ack_signal, last_ack_signal, D); | 56 | STA_FILE(last_ack_signal, last_ack_signal, D); |
57 | 57 | ||
58 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | 58 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, |
59 | size_t count, loff_t *ppos) | 59 | size_t count, loff_t *ppos) |
60 | { | 60 | { |
61 | char buf[121]; | 61 | char buf[121]; |
62 | struct sta_info *sta = file->private_data; | 62 | struct sta_info *sta = file->private_data; |
63 | 63 | ||
64 | #define TEST(flg) \ | 64 | #define TEST(flg) \ |
65 | test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" | 65 | test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" |
66 | 66 | ||
67 | int res = scnprintf(buf, sizeof(buf), | 67 | int res = scnprintf(buf, sizeof(buf), |
68 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", | 68 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", |
69 | TEST(AUTH), TEST(ASSOC), TEST(PS_STA), | 69 | TEST(AUTH), TEST(ASSOC), TEST(PS_STA), |
70 | TEST(PS_DRIVER), TEST(AUTHORIZED), | 70 | TEST(PS_DRIVER), TEST(AUTHORIZED), |
71 | TEST(SHORT_PREAMBLE), | 71 | TEST(SHORT_PREAMBLE), |
72 | TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), | 72 | TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), |
73 | TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), | 73 | TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), |
74 | TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), | 74 | TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), |
75 | TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), | 75 | TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), |
76 | TEST(INSERTED), TEST(RATE_CONTROL), | 76 | TEST(INSERTED), TEST(RATE_CONTROL), |
77 | TEST(TOFFSET_KNOWN)); | 77 | TEST(TOFFSET_KNOWN)); |
78 | #undef TEST | 78 | #undef TEST |
79 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); | 79 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); |
80 | } | 80 | } |
81 | STA_OPS(flags); | 81 | STA_OPS(flags); |
82 | 82 | ||
83 | static ssize_t sta_num_ps_buf_frames_read(struct file *file, | 83 | static ssize_t sta_num_ps_buf_frames_read(struct file *file, |
84 | char __user *userbuf, | 84 | char __user *userbuf, |
85 | size_t count, loff_t *ppos) | 85 | size_t count, loff_t *ppos) |
86 | { | 86 | { |
87 | struct sta_info *sta = file->private_data; | 87 | struct sta_info *sta = file->private_data; |
88 | char buf[17*IEEE80211_NUM_ACS], *p = buf; | 88 | char buf[17*IEEE80211_NUM_ACS], *p = buf; |
89 | int ac; | 89 | int ac; |
90 | 90 | ||
91 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) | 91 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) |
92 | p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac, | 92 | p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac, |
93 | skb_queue_len(&sta->ps_tx_buf[ac]) + | 93 | skb_queue_len(&sta->ps_tx_buf[ac]) + |
94 | skb_queue_len(&sta->tx_filtered[ac])); | 94 | skb_queue_len(&sta->tx_filtered[ac])); |
95 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | 95 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); |
96 | } | 96 | } |
97 | STA_OPS(num_ps_buf_frames); | 97 | STA_OPS(num_ps_buf_frames); |
98 | 98 | ||
99 | static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, | 99 | static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, |
100 | size_t count, loff_t *ppos) | 100 | size_t count, loff_t *ppos) |
101 | { | 101 | { |
102 | struct sta_info *sta = file->private_data; | 102 | struct sta_info *sta = file->private_data; |
103 | return mac80211_format_buffer(userbuf, count, ppos, "%d\n", | 103 | return mac80211_format_buffer(userbuf, count, ppos, "%d\n", |
104 | jiffies_to_msecs(jiffies - sta->last_rx)); | 104 | jiffies_to_msecs(jiffies - sta->last_rx)); |
105 | } | 105 | } |
106 | STA_OPS(inactive_ms); | 106 | STA_OPS(inactive_ms); |
107 | 107 | ||
108 | 108 | ||
109 | static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf, | 109 | static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf, |
110 | size_t count, loff_t *ppos) | 110 | size_t count, loff_t *ppos) |
111 | { | 111 | { |
112 | struct sta_info *sta = file->private_data; | 112 | struct sta_info *sta = file->private_data; |
113 | struct timespec uptime; | 113 | struct timespec uptime; |
114 | struct tm result; | 114 | struct tm result; |
115 | long connected_time_secs; | 115 | long connected_time_secs; |
116 | char buf[100]; | 116 | char buf[100]; |
117 | int res; | 117 | int res; |
118 | do_posix_clock_monotonic_gettime(&uptime); | 118 | do_posix_clock_monotonic_gettime(&uptime); |
119 | connected_time_secs = uptime.tv_sec - sta->last_connected; | 119 | connected_time_secs = uptime.tv_sec - sta->last_connected; |
120 | time_to_tm(connected_time_secs, 0, &result); | 120 | time_to_tm(connected_time_secs, 0, &result); |
121 | result.tm_year -= 70; | 121 | result.tm_year -= 70; |
122 | result.tm_mday -= 1; | 122 | result.tm_mday -= 1; |
123 | res = scnprintf(buf, sizeof(buf), | 123 | res = scnprintf(buf, sizeof(buf), |
124 | "years - %ld\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n", | 124 | "years - %ld\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n", |
125 | result.tm_year, result.tm_mon, result.tm_mday, | 125 | result.tm_year, result.tm_mon, result.tm_mday, |
126 | result.tm_hour, result.tm_min, result.tm_sec); | 126 | result.tm_hour, result.tm_min, result.tm_sec); |
127 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); | 127 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); |
128 | } | 128 | } |
129 | STA_OPS(connected_time); | 129 | STA_OPS(connected_time); |
130 | 130 | ||
131 | 131 | ||
132 | 132 | ||
133 | static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, | 133 | static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, |
134 | size_t count, loff_t *ppos) | 134 | size_t count, loff_t *ppos) |
135 | { | 135 | { |
136 | char buf[15*IEEE80211_NUM_TIDS], *p = buf; | 136 | char buf[15*IEEE80211_NUM_TIDS], *p = buf; |
137 | int i; | 137 | int i; |
138 | struct sta_info *sta = file->private_data; | 138 | struct sta_info *sta = file->private_data; |
139 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) | 139 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) |
140 | p += scnprintf(p, sizeof(buf)+buf-p, "%x ", | 140 | p += scnprintf(p, sizeof(buf)+buf-p, "%x ", |
141 | le16_to_cpu(sta->last_seq_ctrl[i])); | 141 | le16_to_cpu(sta->last_seq_ctrl[i])); |
142 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); | 142 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); |
143 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | 143 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); |
144 | } | 144 | } |
145 | STA_OPS(last_seq_ctrl); | 145 | STA_OPS(last_seq_ctrl); |
146 | 146 | ||
147 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | 147 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, |
148 | size_t count, loff_t *ppos) | 148 | size_t count, loff_t *ppos) |
149 | { | 149 | { |
150 | char buf[71 + IEEE80211_NUM_TIDS * 40], *p = buf; | 150 | char buf[71 + IEEE80211_NUM_TIDS * 40], *p = buf; |
151 | int i; | 151 | int i; |
152 | struct sta_info *sta = file->private_data; | 152 | struct sta_info *sta = file->private_data; |
153 | struct tid_ampdu_rx *tid_rx; | 153 | struct tid_ampdu_rx *tid_rx; |
154 | struct tid_ampdu_tx *tid_tx; | 154 | struct tid_ampdu_tx *tid_tx; |
155 | 155 | ||
156 | rcu_read_lock(); | 156 | rcu_read_lock(); |
157 | 157 | ||
158 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", | 158 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", |
159 | sta->ampdu_mlme.dialog_token_allocator + 1); | 159 | sta->ampdu_mlme.dialog_token_allocator + 1); |
160 | p += scnprintf(p, sizeof(buf) + buf - p, | 160 | p += scnprintf(p, sizeof(buf) + buf - p, |
161 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); | 161 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); |
162 | 162 | ||
163 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { | 163 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { |
164 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); | 164 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); |
165 | tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); | 165 | tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); |
166 | 166 | ||
167 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); | 167 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); |
168 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_rx); | 168 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_rx); |
169 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", | 169 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", |
170 | tid_rx ? tid_rx->dialog_token : 0); | 170 | tid_rx ? tid_rx->dialog_token : 0); |
171 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", | 171 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", |
172 | tid_rx ? tid_rx->ssn : 0); | 172 | tid_rx ? tid_rx->ssn : 0); |
173 | 173 | ||
174 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_tx); | 174 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_tx); |
175 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", | 175 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", |
176 | tid_tx ? tid_tx->dialog_token : 0); | 176 | tid_tx ? tid_tx->dialog_token : 0); |
177 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", | 177 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", |
178 | tid_tx ? skb_queue_len(&tid_tx->pending) : 0); | 178 | tid_tx ? skb_queue_len(&tid_tx->pending) : 0); |
179 | p += scnprintf(p, sizeof(buf) + buf - p, "\n"); | 179 | p += scnprintf(p, sizeof(buf) + buf - p, "\n"); |
180 | } | 180 | } |
181 | rcu_read_unlock(); | 181 | rcu_read_unlock(); |
182 | 182 | ||
183 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | 183 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); |
184 | } | 184 | } |
185 | 185 | ||
186 | static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf, | 186 | static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf, |
187 | size_t count, loff_t *ppos) | 187 | size_t count, loff_t *ppos) |
188 | { | 188 | { |
189 | char _buf[12], *buf = _buf; | 189 | char _buf[12], *buf = _buf; |
190 | struct sta_info *sta = file->private_data; | 190 | struct sta_info *sta = file->private_data; |
191 | bool start, tx; | 191 | bool start, tx; |
192 | unsigned long tid; | 192 | unsigned long tid; |
193 | int ret; | 193 | int ret; |
194 | 194 | ||
195 | if (count > sizeof(_buf)) | 195 | if (count > sizeof(_buf)) |
196 | return -EINVAL; | 196 | return -EINVAL; |
197 | 197 | ||
198 | if (copy_from_user(buf, userbuf, count)) | 198 | if (copy_from_user(buf, userbuf, count)) |
199 | return -EFAULT; | 199 | return -EFAULT; |
200 | 200 | ||
201 | buf[sizeof(_buf) - 1] = '\0'; | 201 | buf[sizeof(_buf) - 1] = '\0'; |
202 | 202 | ||
203 | if (strncmp(buf, "tx ", 3) == 0) { | 203 | if (strncmp(buf, "tx ", 3) == 0) { |
204 | buf += 3; | 204 | buf += 3; |
205 | tx = true; | 205 | tx = true; |
206 | } else if (strncmp(buf, "rx ", 3) == 0) { | 206 | } else if (strncmp(buf, "rx ", 3) == 0) { |
207 | buf += 3; | 207 | buf += 3; |
208 | tx = false; | 208 | tx = false; |
209 | } else | 209 | } else |
210 | return -EINVAL; | 210 | return -EINVAL; |
211 | 211 | ||
212 | if (strncmp(buf, "start ", 6) == 0) { | 212 | if (strncmp(buf, "start ", 6) == 0) { |
213 | buf += 6; | 213 | buf += 6; |
214 | start = true; | 214 | start = true; |
215 | if (!tx) | 215 | if (!tx) |
216 | return -EINVAL; | 216 | return -EINVAL; |
217 | } else if (strncmp(buf, "stop ", 5) == 0) { | 217 | } else if (strncmp(buf, "stop ", 5) == 0) { |
218 | buf += 5; | 218 | buf += 5; |
219 | start = false; | 219 | start = false; |
220 | } else | 220 | } else |
221 | return -EINVAL; | 221 | return -EINVAL; |
222 | 222 | ||
223 | tid = simple_strtoul(buf, NULL, 0); | ||
224 | ret = kstrtoul(buf, 0, &tid); | 223 | ret = kstrtoul(buf, 0, &tid); |
225 | if (ret) | 224 | if (ret) |
226 | return ret; | 225 | return ret; |
227 | 226 | ||
228 | if (tid >= IEEE80211_NUM_TIDS) | 227 | if (tid >= IEEE80211_NUM_TIDS) |
229 | return -EINVAL; | 228 | return -EINVAL; |
230 | 229 | ||
231 | if (tx) { | 230 | if (tx) { |
232 | if (start) | 231 | if (start) |
233 | ret = ieee80211_start_tx_ba_session(&sta->sta, tid, 5000); | 232 | ret = ieee80211_start_tx_ba_session(&sta->sta, tid, 5000); |
234 | else | 233 | else |
235 | ret = ieee80211_stop_tx_ba_session(&sta->sta, tid); | 234 | ret = ieee80211_stop_tx_ba_session(&sta->sta, tid); |
236 | } else { | 235 | } else { |
237 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, | 236 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, |
238 | 3, true); | 237 | 3, true); |
239 | ret = 0; | 238 | ret = 0; |
240 | } | 239 | } |
241 | 240 | ||
242 | return ret ?: count; | 241 | return ret ?: count; |
243 | } | 242 | } |
244 | STA_OPS_RW(agg_status); | 243 | STA_OPS_RW(agg_status); |
245 | 244 | ||
246 | static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, | 245 | static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, |
247 | size_t count, loff_t *ppos) | 246 | size_t count, loff_t *ppos) |
248 | { | 247 | { |
249 | #define PRINT_HT_CAP(_cond, _str) \ | 248 | #define PRINT_HT_CAP(_cond, _str) \ |
250 | do { \ | 249 | do { \ |
251 | if (_cond) \ | 250 | if (_cond) \ |
252 | p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \ | 251 | p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \ |
253 | } while (0) | 252 | } while (0) |
254 | char buf[512], *p = buf; | 253 | char buf[512], *p = buf; |
255 | int i; | 254 | int i; |
256 | struct sta_info *sta = file->private_data; | 255 | struct sta_info *sta = file->private_data; |
257 | struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; | 256 | struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; |
258 | 257 | ||
259 | p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", | 258 | p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", |
260 | htc->ht_supported ? "" : "not "); | 259 | htc->ht_supported ? "" : "not "); |
261 | if (htc->ht_supported) { | 260 | if (htc->ht_supported) { |
262 | p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); | 261 | p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); |
263 | 262 | ||
264 | PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC"); | 263 | PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC"); |
265 | PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); | 264 | PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); |
266 | PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); | 265 | PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); |
267 | 266 | ||
268 | PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save"); | 267 | PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save"); |
269 | PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save"); | 268 | PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save"); |
270 | PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled"); | 269 | PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled"); |
271 | 270 | ||
272 | PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield"); | 271 | PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield"); |
273 | PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI"); | 272 | PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI"); |
274 | PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI"); | 273 | PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI"); |
275 | PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC"); | 274 | PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC"); |
276 | 275 | ||
277 | PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC"); | 276 | PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC"); |
278 | PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream"); | 277 | PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream"); |
279 | PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams"); | 278 | PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams"); |
280 | PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams"); | 279 | PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams"); |
281 | 280 | ||
282 | PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); | 281 | PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); |
283 | 282 | ||
284 | PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " | 283 | PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " |
285 | "3839 bytes"); | 284 | "3839 bytes"); |
286 | PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " | 285 | PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " |
287 | "7935 bytes"); | 286 | "7935 bytes"); |
288 | 287 | ||
289 | /* | 288 | /* |
290 | * For beacons and probe response this would mean the BSS | 289 | * For beacons and probe response this would mean the BSS |
291 | * does or does not allow the usage of DSSS/CCK HT40. | 290 | * does or does not allow the usage of DSSS/CCK HT40. |
292 | * Otherwise it means the STA does or does not use | 291 | * Otherwise it means the STA does or does not use |
293 | * DSSS/CCK HT40. | 292 | * DSSS/CCK HT40. |
294 | */ | 293 | */ |
295 | PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40"); | 294 | PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40"); |
296 | PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40"); | 295 | PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40"); |
297 | 296 | ||
298 | /* BIT(13) is reserved */ | 297 | /* BIT(13) is reserved */ |
299 | 298 | ||
300 | PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant"); | 299 | PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant"); |
301 | 300 | ||
302 | PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection"); | 301 | PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection"); |
303 | 302 | ||
304 | p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", | 303 | p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", |
305 | htc->ampdu_factor, htc->ampdu_density); | 304 | htc->ampdu_factor, htc->ampdu_density); |
306 | p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); | 305 | p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); |
307 | 306 | ||
308 | for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) | 307 | for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) |
309 | p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", | 308 | p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", |
310 | htc->mcs.rx_mask[i]); | 309 | htc->mcs.rx_mask[i]); |
311 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); | 310 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); |
312 | 311 | ||
313 | /* If not set this is meaningless */ | 312 | /* If not set this is meaningless */ |
314 | if (le16_to_cpu(htc->mcs.rx_highest)) { | 313 | if (le16_to_cpu(htc->mcs.rx_highest)) { |
315 | p += scnprintf(p, sizeof(buf)+buf-p, | 314 | p += scnprintf(p, sizeof(buf)+buf-p, |
316 | "MCS rx highest: %d Mbps\n", | 315 | "MCS rx highest: %d Mbps\n", |
317 | le16_to_cpu(htc->mcs.rx_highest)); | 316 | le16_to_cpu(htc->mcs.rx_highest)); |
318 | } | 317 | } |
319 | 318 | ||
320 | p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", | 319 | p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", |
321 | htc->mcs.tx_params); | 320 | htc->mcs.tx_params); |
322 | } | 321 | } |
323 | 322 | ||
324 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | 323 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); |
325 | } | 324 | } |
326 | STA_OPS(ht_capa); | 325 | STA_OPS(ht_capa); |
327 | 326 | ||
328 | static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf, | 327 | static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf, |
329 | size_t count, loff_t *ppos) | 328 | size_t count, loff_t *ppos) |
330 | { | 329 | { |
331 | struct sta_info *sta = file->private_data; | 330 | struct sta_info *sta = file->private_data; |
332 | struct rate_info rinfo; | 331 | struct rate_info rinfo; |
333 | u16 rate; | 332 | u16 rate; |
334 | sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo); | 333 | sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo); |
335 | rate = cfg80211_calculate_bitrate(&rinfo); | 334 | rate = cfg80211_calculate_bitrate(&rinfo); |
336 | 335 | ||
337 | return mac80211_format_buffer(userbuf, count, ppos, | 336 | return mac80211_format_buffer(userbuf, count, ppos, |
338 | "%d.%d MBit/s\n", | 337 | "%d.%d MBit/s\n", |
339 | rate/10, rate%10); | 338 | rate/10, rate%10); |
340 | } | 339 | } |
341 | STA_OPS(current_tx_rate); | 340 | STA_OPS(current_tx_rate); |
342 | 341 | ||
343 | static ssize_t sta_last_rx_rate_read(struct file *file, char __user *userbuf, | 342 | static ssize_t sta_last_rx_rate_read(struct file *file, char __user *userbuf, |
344 | size_t count, loff_t *ppos) | 343 | size_t count, loff_t *ppos) |
345 | { | 344 | { |
346 | struct sta_info *sta = file->private_data; | 345 | struct sta_info *sta = file->private_data; |
347 | struct rate_info rinfo; | 346 | struct rate_info rinfo; |
348 | u16 rate; | 347 | u16 rate; |
349 | 348 | ||
350 | sta_set_rate_info_rx(sta, &rinfo); | 349 | sta_set_rate_info_rx(sta, &rinfo); |
351 | 350 | ||
352 | rate = cfg80211_calculate_bitrate(&rinfo); | 351 | rate = cfg80211_calculate_bitrate(&rinfo); |
353 | 352 | ||
354 | return mac80211_format_buffer(userbuf, count, ppos, | 353 | return mac80211_format_buffer(userbuf, count, ppos, |
355 | "%d.%d MBit/s\n", | 354 | "%d.%d MBit/s\n", |
356 | rate/10, rate%10); | 355 | rate/10, rate%10); |
357 | } | 356 | } |
358 | STA_OPS(last_rx_rate); | 357 | STA_OPS(last_rx_rate); |
359 | 358 | ||
360 | #define DEBUGFS_ADD(name) \ | 359 | #define DEBUGFS_ADD(name) \ |
361 | debugfs_create_file(#name, 0400, \ | 360 | debugfs_create_file(#name, 0400, \ |
362 | sta->debugfs.dir, sta, &sta_ ##name## _ops); | 361 | sta->debugfs.dir, sta, &sta_ ##name## _ops); |
363 | 362 | ||
364 | #define DEBUGFS_ADD_COUNTER(name, field) \ | 363 | #define DEBUGFS_ADD_COUNTER(name, field) \ |
365 | if (sizeof(sta->field) == sizeof(u32)) \ | 364 | if (sizeof(sta->field) == sizeof(u32)) \ |
366 | debugfs_create_u32(#name, 0400, sta->debugfs.dir, \ | 365 | debugfs_create_u32(#name, 0400, sta->debugfs.dir, \ |
367 | (u32 *) &sta->field); \ | 366 | (u32 *) &sta->field); \ |
368 | else \ | 367 | else \ |
369 | debugfs_create_u64(#name, 0400, sta->debugfs.dir, \ | 368 | debugfs_create_u64(#name, 0400, sta->debugfs.dir, \ |
370 | (u64 *) &sta->field); | 369 | (u64 *) &sta->field); |
371 | 370 | ||
372 | void ieee80211_sta_debugfs_add(struct sta_info *sta) | 371 | void ieee80211_sta_debugfs_add(struct sta_info *sta) |
373 | { | 372 | { |
374 | struct ieee80211_local *local = sta->local; | 373 | struct ieee80211_local *local = sta->local; |
375 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 374 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
376 | struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; | 375 | struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; |
377 | u8 mac[3*ETH_ALEN]; | 376 | u8 mac[3*ETH_ALEN]; |
378 | 377 | ||
379 | sta->debugfs.add_has_run = true; | 378 | sta->debugfs.add_has_run = true; |
380 | 379 | ||
381 | if (!stations_dir) | 380 | if (!stations_dir) |
382 | return; | 381 | return; |
383 | 382 | ||
384 | snprintf(mac, sizeof(mac), "%pM", sta->sta.addr); | 383 | snprintf(mac, sizeof(mac), "%pM", sta->sta.addr); |
385 | 384 | ||
386 | /* | 385 | /* |
387 | * This might fail due to a race condition: | 386 | * This might fail due to a race condition: |
388 | * When mac80211 unlinks a station, the debugfs entries | 387 | * When mac80211 unlinks a station, the debugfs entries |
389 | * remain, but it is already possible to link a new | 388 | * remain, but it is already possible to link a new |
390 | * station with the same address which triggers adding | 389 | * station with the same address which triggers adding |
391 | * it to debugfs; therefore, if the old station isn't | 390 | * it to debugfs; therefore, if the old station isn't |
392 | * destroyed quickly enough the old station's debugfs | 391 | * destroyed quickly enough the old station's debugfs |
393 | * dir might still be around. | 392 | * dir might still be around. |
394 | */ | 393 | */ |
395 | sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); | 394 | sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); |
396 | if (!sta->debugfs.dir) | 395 | if (!sta->debugfs.dir) |
397 | return; | 396 | return; |
398 | 397 | ||
399 | DEBUGFS_ADD(flags); | 398 | DEBUGFS_ADD(flags); |
400 | DEBUGFS_ADD(num_ps_buf_frames); | 399 | DEBUGFS_ADD(num_ps_buf_frames); |
401 | DEBUGFS_ADD(inactive_ms); | 400 | DEBUGFS_ADD(inactive_ms); |
402 | DEBUGFS_ADD(connected_time); | 401 | DEBUGFS_ADD(connected_time); |
403 | DEBUGFS_ADD(last_seq_ctrl); | 402 | DEBUGFS_ADD(last_seq_ctrl); |
404 | DEBUGFS_ADD(agg_status); | 403 | DEBUGFS_ADD(agg_status); |
405 | DEBUGFS_ADD(dev); | 404 | DEBUGFS_ADD(dev); |
406 | DEBUGFS_ADD(last_signal); | 405 | DEBUGFS_ADD(last_signal); |
407 | DEBUGFS_ADD(ht_capa); | 406 | DEBUGFS_ADD(ht_capa); |
408 | DEBUGFS_ADD(last_ack_signal); | 407 | DEBUGFS_ADD(last_ack_signal); |
409 | DEBUGFS_ADD(current_tx_rate); | 408 | DEBUGFS_ADD(current_tx_rate); |
410 | DEBUGFS_ADD(last_rx_rate); | 409 | DEBUGFS_ADD(last_rx_rate); |
411 | 410 | ||
412 | DEBUGFS_ADD_COUNTER(rx_packets, rx_packets); | 411 | DEBUGFS_ADD_COUNTER(rx_packets, rx_packets); |
413 | DEBUGFS_ADD_COUNTER(tx_packets, tx_packets); | 412 | DEBUGFS_ADD_COUNTER(tx_packets, tx_packets); |
414 | DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes); | 413 | DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes); |
415 | DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes); | 414 | DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes); |
416 | DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates); | 415 | DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates); |
417 | DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments); | 416 | DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments); |
418 | DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped); | 417 | DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped); |
419 | DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments); | 418 | DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments); |
420 | DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count); | 419 | DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count); |
421 | DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed); | 420 | DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed); |
422 | DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); | 421 | DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); |
423 | DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); | 422 | DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); |
424 | 423 | ||
425 | drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); | 424 | drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); |
426 | } | 425 | } |
427 | 426 | ||
428 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) | 427 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) |
429 | { | 428 | { |
430 | struct ieee80211_local *local = sta->local; | 429 | struct ieee80211_local *local = sta->local; |
431 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 430 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
432 | 431 | ||
433 | drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); | 432 | drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); |
434 | debugfs_remove_recursive(sta->debugfs.dir); | 433 | debugfs_remove_recursive(sta->debugfs.dir); |
435 | sta->debugfs.dir = NULL; | 434 | sta->debugfs.dir = NULL; |
436 | } | 435 | } |
437 | 436 |
net/netfilter/nf_conntrack_core.c
1 | /* Connection state tracking for netfilter. This is separated from, | 1 | /* Connection state tracking for netfilter. This is separated from, |
2 | but required by, the NAT layer; it can also be used by an iptables | 2 | but required by, the NAT layer; it can also be used by an iptables |
3 | extension. */ | 3 | extension. */ |
4 | 4 | ||
5 | /* (C) 1999-2001 Paul `Rusty' Russell | 5 | /* (C) 1999-2001 Paul `Rusty' Russell |
6 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | 6 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
7 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | 7 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/netfilter.h> | 15 | #include <linux/netfilter.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
20 | #include <linux/vmalloc.h> | 20 | #include <linux/vmalloc.h> |
21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/random.h> | 23 | #include <linux/random.h> |
24 | #include <linux/jhash.h> | 24 | #include <linux/jhash.h> |
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/percpu.h> | 26 | #include <linux/percpu.h> |
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/notifier.h> | 28 | #include <linux/notifier.h> |
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
31 | #include <linux/socket.h> | 31 | #include <linux/socket.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/nsproxy.h> | 33 | #include <linux/nsproxy.h> |
34 | #include <linux/rculist_nulls.h> | 34 | #include <linux/rculist_nulls.h> |
35 | 35 | ||
36 | #include <net/netfilter/nf_conntrack.h> | 36 | #include <net/netfilter/nf_conntrack.h> |
37 | #include <net/netfilter/nf_conntrack_l3proto.h> | 37 | #include <net/netfilter/nf_conntrack_l3proto.h> |
38 | #include <net/netfilter/nf_conntrack_l4proto.h> | 38 | #include <net/netfilter/nf_conntrack_l4proto.h> |
39 | #include <net/netfilter/nf_conntrack_expect.h> | 39 | #include <net/netfilter/nf_conntrack_expect.h> |
40 | #include <net/netfilter/nf_conntrack_helper.h> | 40 | #include <net/netfilter/nf_conntrack_helper.h> |
41 | #include <net/netfilter/nf_conntrack_core.h> | 41 | #include <net/netfilter/nf_conntrack_core.h> |
42 | #include <net/netfilter/nf_conntrack_extend.h> | 42 | #include <net/netfilter/nf_conntrack_extend.h> |
43 | #include <net/netfilter/nf_conntrack_acct.h> | 43 | #include <net/netfilter/nf_conntrack_acct.h> |
44 | #include <net/netfilter/nf_conntrack_ecache.h> | 44 | #include <net/netfilter/nf_conntrack_ecache.h> |
45 | #include <net/netfilter/nf_conntrack_zones.h> | 45 | #include <net/netfilter/nf_conntrack_zones.h> |
46 | #include <net/netfilter/nf_conntrack_timestamp.h> | 46 | #include <net/netfilter/nf_conntrack_timestamp.h> |
47 | #include <net/netfilter/nf_conntrack_timeout.h> | 47 | #include <net/netfilter/nf_conntrack_timeout.h> |
48 | #include <net/netfilter/nf_nat.h> | 48 | #include <net/netfilter/nf_nat.h> |
49 | #include <net/netfilter/nf_nat_core.h> | 49 | #include <net/netfilter/nf_nat_core.h> |
50 | 50 | ||
51 | #define NF_CONNTRACK_VERSION "0.5.0" | 51 | #define NF_CONNTRACK_VERSION "0.5.0" |
52 | 52 | ||
53 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, | 53 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, |
54 | enum nf_nat_manip_type manip, | 54 | enum nf_nat_manip_type manip, |
55 | const struct nlattr *attr) __read_mostly; | 55 | const struct nlattr *attr) __read_mostly; |
56 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); | 56 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); |
57 | 57 | ||
58 | int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, | 58 | int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, |
59 | struct nf_conn *ct, | 59 | struct nf_conn *ct, |
60 | enum ip_conntrack_info ctinfo, | 60 | enum ip_conntrack_info ctinfo, |
61 | unsigned int protoff); | 61 | unsigned int protoff); |
62 | EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook); | 62 | EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook); |
63 | 63 | ||
64 | DEFINE_SPINLOCK(nf_conntrack_lock); | 64 | DEFINE_SPINLOCK(nf_conntrack_lock); |
65 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); | 65 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); |
66 | 66 | ||
67 | unsigned int nf_conntrack_htable_size __read_mostly; | 67 | unsigned int nf_conntrack_htable_size __read_mostly; |
68 | EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); | 68 | EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); |
69 | 69 | ||
70 | unsigned int nf_conntrack_max __read_mostly; | 70 | unsigned int nf_conntrack_max __read_mostly; |
71 | EXPORT_SYMBOL_GPL(nf_conntrack_max); | 71 | EXPORT_SYMBOL_GPL(nf_conntrack_max); |
72 | 72 | ||
73 | DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); | 73 | DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); |
74 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); | 74 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); |
75 | 75 | ||
76 | unsigned int nf_conntrack_hash_rnd __read_mostly; | 76 | unsigned int nf_conntrack_hash_rnd __read_mostly; |
77 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); | 77 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); |
78 | 78 | ||
79 | static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) | 79 | static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) |
80 | { | 80 | { |
81 | unsigned int n; | 81 | unsigned int n; |
82 | 82 | ||
83 | /* The direction must be ignored, so we hash everything up to the | 83 | /* The direction must be ignored, so we hash everything up to the |
84 | * destination ports (which is a multiple of 4) and treat the last | 84 | * destination ports (which is a multiple of 4) and treat the last |
85 | * three bytes manually. | 85 | * three bytes manually. |
86 | */ | 86 | */ |
87 | n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); | 87 | n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); |
88 | return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^ | 88 | return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^ |
89 | (((__force __u16)tuple->dst.u.all << 16) | | 89 | (((__force __u16)tuple->dst.u.all << 16) | |
90 | tuple->dst.protonum)); | 90 | tuple->dst.protonum)); |
91 | } | 91 | } |
92 | 92 | ||
93 | static u32 __hash_bucket(u32 hash, unsigned int size) | 93 | static u32 __hash_bucket(u32 hash, unsigned int size) |
94 | { | 94 | { |
95 | return ((u64)hash * size) >> 32; | 95 | return ((u64)hash * size) >> 32; |
96 | } | 96 | } |
97 | 97 | ||
98 | static u32 hash_bucket(u32 hash, const struct net *net) | 98 | static u32 hash_bucket(u32 hash, const struct net *net) |
99 | { | 99 | { |
100 | return __hash_bucket(hash, net->ct.htable_size); | 100 | return __hash_bucket(hash, net->ct.htable_size); |
101 | } | 101 | } |
102 | 102 | ||
103 | static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, | 103 | static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, |
104 | u16 zone, unsigned int size) | 104 | u16 zone, unsigned int size) |
105 | { | 105 | { |
106 | return __hash_bucket(hash_conntrack_raw(tuple, zone), size); | 106 | return __hash_bucket(hash_conntrack_raw(tuple, zone), size); |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, | 109 | static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, |
110 | const struct nf_conntrack_tuple *tuple) | 110 | const struct nf_conntrack_tuple *tuple) |
111 | { | 111 | { |
112 | return __hash_conntrack(tuple, zone, net->ct.htable_size); | 112 | return __hash_conntrack(tuple, zone, net->ct.htable_size); |
113 | } | 113 | } |
114 | 114 | ||
115 | bool | 115 | bool |
116 | nf_ct_get_tuple(const struct sk_buff *skb, | 116 | nf_ct_get_tuple(const struct sk_buff *skb, |
117 | unsigned int nhoff, | 117 | unsigned int nhoff, |
118 | unsigned int dataoff, | 118 | unsigned int dataoff, |
119 | u_int16_t l3num, | 119 | u_int16_t l3num, |
120 | u_int8_t protonum, | 120 | u_int8_t protonum, |
121 | struct nf_conntrack_tuple *tuple, | 121 | struct nf_conntrack_tuple *tuple, |
122 | const struct nf_conntrack_l3proto *l3proto, | 122 | const struct nf_conntrack_l3proto *l3proto, |
123 | const struct nf_conntrack_l4proto *l4proto) | 123 | const struct nf_conntrack_l4proto *l4proto) |
124 | { | 124 | { |
125 | memset(tuple, 0, sizeof(*tuple)); | 125 | memset(tuple, 0, sizeof(*tuple)); |
126 | 126 | ||
127 | tuple->src.l3num = l3num; | 127 | tuple->src.l3num = l3num; |
128 | if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) | 128 | if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) |
129 | return false; | 129 | return false; |
130 | 130 | ||
131 | tuple->dst.protonum = protonum; | 131 | tuple->dst.protonum = protonum; |
132 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; | 132 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; |
133 | 133 | ||
134 | return l4proto->pkt_to_tuple(skb, dataoff, tuple); | 134 | return l4proto->pkt_to_tuple(skb, dataoff, tuple); |
135 | } | 135 | } |
136 | EXPORT_SYMBOL_GPL(nf_ct_get_tuple); | 136 | EXPORT_SYMBOL_GPL(nf_ct_get_tuple); |
137 | 137 | ||
138 | bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, | 138 | bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, |
139 | u_int16_t l3num, struct nf_conntrack_tuple *tuple) | 139 | u_int16_t l3num, struct nf_conntrack_tuple *tuple) |
140 | { | 140 | { |
141 | struct nf_conntrack_l3proto *l3proto; | 141 | struct nf_conntrack_l3proto *l3proto; |
142 | struct nf_conntrack_l4proto *l4proto; | 142 | struct nf_conntrack_l4proto *l4proto; |
143 | unsigned int protoff; | 143 | unsigned int protoff; |
144 | u_int8_t protonum; | 144 | u_int8_t protonum; |
145 | int ret; | 145 | int ret; |
146 | 146 | ||
147 | rcu_read_lock(); | 147 | rcu_read_lock(); |
148 | 148 | ||
149 | l3proto = __nf_ct_l3proto_find(l3num); | 149 | l3proto = __nf_ct_l3proto_find(l3num); |
150 | ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); | 150 | ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); |
151 | if (ret != NF_ACCEPT) { | 151 | if (ret != NF_ACCEPT) { |
152 | rcu_read_unlock(); | 152 | rcu_read_unlock(); |
153 | return false; | 153 | return false; |
154 | } | 154 | } |
155 | 155 | ||
156 | l4proto = __nf_ct_l4proto_find(l3num, protonum); | 156 | l4proto = __nf_ct_l4proto_find(l3num, protonum); |
157 | 157 | ||
158 | ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, | 158 | ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, |
159 | l3proto, l4proto); | 159 | l3proto, l4proto); |
160 | 160 | ||
161 | rcu_read_unlock(); | 161 | rcu_read_unlock(); |
162 | return ret; | 162 | return ret; |
163 | } | 163 | } |
164 | EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); | 164 | EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); |
165 | 165 | ||
166 | bool | 166 | bool |
167 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | 167 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, |
168 | const struct nf_conntrack_tuple *orig, | 168 | const struct nf_conntrack_tuple *orig, |
169 | const struct nf_conntrack_l3proto *l3proto, | 169 | const struct nf_conntrack_l3proto *l3proto, |
170 | const struct nf_conntrack_l4proto *l4proto) | 170 | const struct nf_conntrack_l4proto *l4proto) |
171 | { | 171 | { |
172 | memset(inverse, 0, sizeof(*inverse)); | 172 | memset(inverse, 0, sizeof(*inverse)); |
173 | 173 | ||
174 | inverse->src.l3num = orig->src.l3num; | 174 | inverse->src.l3num = orig->src.l3num; |
175 | if (l3proto->invert_tuple(inverse, orig) == 0) | 175 | if (l3proto->invert_tuple(inverse, orig) == 0) |
176 | return false; | 176 | return false; |
177 | 177 | ||
178 | inverse->dst.dir = !orig->dst.dir; | 178 | inverse->dst.dir = !orig->dst.dir; |
179 | 179 | ||
180 | inverse->dst.protonum = orig->dst.protonum; | 180 | inverse->dst.protonum = orig->dst.protonum; |
181 | return l4proto->invert_tuple(inverse, orig); | 181 | return l4proto->invert_tuple(inverse, orig); |
182 | } | 182 | } |
183 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); | 183 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); |
184 | 184 | ||
185 | static void | 185 | static void |
186 | clean_from_lists(struct nf_conn *ct) | 186 | clean_from_lists(struct nf_conn *ct) |
187 | { | 187 | { |
188 | pr_debug("clean_from_lists(%p)\n", ct); | 188 | pr_debug("clean_from_lists(%p)\n", ct); |
189 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | 189 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); |
190 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); | 190 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); |
191 | 191 | ||
192 | /* Destroy all pending expectations */ | 192 | /* Destroy all pending expectations */ |
193 | nf_ct_remove_expectations(ct); | 193 | nf_ct_remove_expectations(ct); |
194 | } | 194 | } |
195 | 195 | ||
196 | static void | 196 | static void |
197 | destroy_conntrack(struct nf_conntrack *nfct) | 197 | destroy_conntrack(struct nf_conntrack *nfct) |
198 | { | 198 | { |
199 | struct nf_conn *ct = (struct nf_conn *)nfct; | 199 | struct nf_conn *ct = (struct nf_conn *)nfct; |
200 | struct net *net = nf_ct_net(ct); | 200 | struct net *net = nf_ct_net(ct); |
201 | struct nf_conntrack_l4proto *l4proto; | 201 | struct nf_conntrack_l4proto *l4proto; |
202 | 202 | ||
203 | pr_debug("destroy_conntrack(%p)\n", ct); | 203 | pr_debug("destroy_conntrack(%p)\n", ct); |
204 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); | 204 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); |
205 | NF_CT_ASSERT(!timer_pending(&ct->timeout)); | 205 | NF_CT_ASSERT(!timer_pending(&ct->timeout)); |
206 | 206 | ||
207 | /* To make sure we don't get any weird locking issues here: | 207 | /* To make sure we don't get any weird locking issues here: |
208 | * destroy_conntrack() MUST NOT be called with a write lock | 208 | * destroy_conntrack() MUST NOT be called with a write lock |
209 | * to nf_conntrack_lock!!! -HW */ | 209 | * to nf_conntrack_lock!!! -HW */ |
210 | rcu_read_lock(); | 210 | rcu_read_lock(); |
211 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | 211 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
212 | if (l4proto && l4proto->destroy) | 212 | if (l4proto && l4proto->destroy) |
213 | l4proto->destroy(ct); | 213 | l4proto->destroy(ct); |
214 | 214 | ||
215 | rcu_read_unlock(); | 215 | rcu_read_unlock(); |
216 | 216 | ||
217 | spin_lock_bh(&nf_conntrack_lock); | 217 | spin_lock_bh(&nf_conntrack_lock); |
218 | /* Expectations will have been removed in clean_from_lists, | 218 | /* Expectations will have been removed in clean_from_lists, |
219 | * except TFTP can create an expectation on the first packet, | 219 | * except TFTP can create an expectation on the first packet, |
220 | * before connection is in the list, so we need to clean here, | 220 | * before connection is in the list, so we need to clean here, |
221 | * too. */ | 221 | * too. */ |
222 | nf_ct_remove_expectations(ct); | 222 | nf_ct_remove_expectations(ct); |
223 | 223 | ||
224 | /* We overload first tuple to link into unconfirmed or dying list.*/ | 224 | /* We overload first tuple to link into unconfirmed or dying list.*/ |
225 | BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); | 225 | BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); |
226 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | 226 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); |
227 | 227 | ||
228 | NF_CT_STAT_INC(net, delete); | 228 | NF_CT_STAT_INC(net, delete); |
229 | spin_unlock_bh(&nf_conntrack_lock); | 229 | spin_unlock_bh(&nf_conntrack_lock); |
230 | 230 | ||
231 | if (ct->master) | 231 | if (ct->master) |
232 | nf_ct_put(ct->master); | 232 | nf_ct_put(ct->master); |
233 | 233 | ||
234 | pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); | 234 | pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); |
235 | nf_conntrack_free(ct); | 235 | nf_conntrack_free(ct); |
236 | } | 236 | } |
237 | 237 | ||
238 | void nf_ct_delete_from_lists(struct nf_conn *ct) | 238 | void nf_ct_delete_from_lists(struct nf_conn *ct) |
239 | { | 239 | { |
240 | struct net *net = nf_ct_net(ct); | 240 | struct net *net = nf_ct_net(ct); |
241 | 241 | ||
242 | nf_ct_helper_destroy(ct); | 242 | nf_ct_helper_destroy(ct); |
243 | spin_lock_bh(&nf_conntrack_lock); | 243 | spin_lock_bh(&nf_conntrack_lock); |
244 | /* Inside lock so preempt is disabled on module removal path. | 244 | /* Inside lock so preempt is disabled on module removal path. |
245 | * Otherwise we can get spurious warnings. */ | 245 | * Otherwise we can get spurious warnings. */ |
246 | NF_CT_STAT_INC(net, delete_list); | 246 | NF_CT_STAT_INC(net, delete_list); |
247 | clean_from_lists(ct); | 247 | clean_from_lists(ct); |
248 | /* add this conntrack to the dying list */ | 248 | /* add this conntrack to the dying list */ |
249 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | 249 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
250 | &net->ct.dying); | 250 | &net->ct.dying); |
251 | spin_unlock_bh(&nf_conntrack_lock); | 251 | spin_unlock_bh(&nf_conntrack_lock); |
252 | } | 252 | } |
253 | EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); | 253 | EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); |
254 | 254 | ||
255 | static void death_by_event(unsigned long ul_conntrack) | 255 | static void death_by_event(unsigned long ul_conntrack) |
256 | { | 256 | { |
257 | struct nf_conn *ct = (void *)ul_conntrack; | 257 | struct nf_conn *ct = (void *)ul_conntrack; |
258 | struct net *net = nf_ct_net(ct); | 258 | struct net *net = nf_ct_net(ct); |
259 | struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); | 259 | struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); |
260 | 260 | ||
261 | BUG_ON(ecache == NULL); | 261 | BUG_ON(ecache == NULL); |
262 | 262 | ||
263 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { | 263 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { |
264 | /* bad luck, let's retry again */ | 264 | /* bad luck, let's retry again */ |
265 | ecache->timeout.expires = jiffies + | 265 | ecache->timeout.expires = jiffies + |
266 | (random32() % net->ct.sysctl_events_retry_timeout); | 266 | (random32() % net->ct.sysctl_events_retry_timeout); |
267 | add_timer(&ecache->timeout); | 267 | add_timer(&ecache->timeout); |
268 | return; | 268 | return; |
269 | } | 269 | } |
270 | /* we've got the event delivered, now it's dying */ | 270 | /* we've got the event delivered, now it's dying */ |
271 | set_bit(IPS_DYING_BIT, &ct->status); | 271 | set_bit(IPS_DYING_BIT, &ct->status); |
272 | nf_ct_put(ct); | 272 | nf_ct_put(ct); |
273 | } | 273 | } |
274 | 274 | ||
275 | void nf_ct_dying_timeout(struct nf_conn *ct) | 275 | void nf_ct_dying_timeout(struct nf_conn *ct) |
276 | { | 276 | { |
277 | struct net *net = nf_ct_net(ct); | 277 | struct net *net = nf_ct_net(ct); |
278 | struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); | 278 | struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); |
279 | 279 | ||
280 | BUG_ON(ecache == NULL); | 280 | BUG_ON(ecache == NULL); |
281 | 281 | ||
282 | /* set a new timer to retry event delivery */ | 282 | /* set a new timer to retry event delivery */ |
283 | setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); | 283 | setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); |
284 | ecache->timeout.expires = jiffies + | 284 | ecache->timeout.expires = jiffies + |
285 | (random32() % net->ct.sysctl_events_retry_timeout); | 285 | (random32() % net->ct.sysctl_events_retry_timeout); |
286 | add_timer(&ecache->timeout); | 286 | add_timer(&ecache->timeout); |
287 | } | 287 | } |
288 | EXPORT_SYMBOL_GPL(nf_ct_dying_timeout); | 288 | EXPORT_SYMBOL_GPL(nf_ct_dying_timeout); |
289 | 289 | ||
290 | static void death_by_timeout(unsigned long ul_conntrack) | 290 | static void death_by_timeout(unsigned long ul_conntrack) |
291 | { | 291 | { |
292 | struct nf_conn *ct = (void *)ul_conntrack; | 292 | struct nf_conn *ct = (void *)ul_conntrack; |
293 | struct nf_conn_tstamp *tstamp; | 293 | struct nf_conn_tstamp *tstamp; |
294 | 294 | ||
295 | tstamp = nf_conn_tstamp_find(ct); | 295 | tstamp = nf_conn_tstamp_find(ct); |
296 | if (tstamp && tstamp->stop == 0) | 296 | if (tstamp && tstamp->stop == 0) |
297 | tstamp->stop = ktime_to_ns(ktime_get_real()); | 297 | tstamp->stop = ktime_to_ns(ktime_get_real()); |
298 | 298 | ||
299 | if (!test_bit(IPS_DYING_BIT, &ct->status) && | 299 | if (!test_bit(IPS_DYING_BIT, &ct->status) && |
300 | unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { | 300 | unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { |
301 | /* destroy event was not delivered */ | 301 | /* destroy event was not delivered */ |
302 | nf_ct_delete_from_lists(ct); | 302 | nf_ct_delete_from_lists(ct); |
303 | nf_ct_dying_timeout(ct); | 303 | nf_ct_dying_timeout(ct); |
304 | return; | 304 | return; |
305 | } | 305 | } |
306 | set_bit(IPS_DYING_BIT, &ct->status); | 306 | set_bit(IPS_DYING_BIT, &ct->status); |
307 | nf_ct_delete_from_lists(ct); | 307 | nf_ct_delete_from_lists(ct); |
308 | nf_ct_put(ct); | 308 | nf_ct_put(ct); |
309 | } | 309 | } |
310 | 310 | ||
311 | /* | 311 | /* |
312 | * Warning : | 312 | * Warning : |
313 | * - Caller must take a reference on returned object | 313 | * - Caller must take a reference on returned object |
314 | * and recheck nf_ct_tuple_equal(tuple, &h->tuple) | 314 | * and recheck nf_ct_tuple_equal(tuple, &h->tuple) |
315 | * OR | 315 | * OR |
316 | * - Caller must lock nf_conntrack_lock before calling this function | 316 | * - Caller must lock nf_conntrack_lock before calling this function |
317 | */ | 317 | */ |
318 | static struct nf_conntrack_tuple_hash * | 318 | static struct nf_conntrack_tuple_hash * |
319 | ____nf_conntrack_find(struct net *net, u16 zone, | 319 | ____nf_conntrack_find(struct net *net, u16 zone, |
320 | const struct nf_conntrack_tuple *tuple, u32 hash) | 320 | const struct nf_conntrack_tuple *tuple, u32 hash) |
321 | { | 321 | { |
322 | struct nf_conntrack_tuple_hash *h; | 322 | struct nf_conntrack_tuple_hash *h; |
323 | struct hlist_nulls_node *n; | 323 | struct hlist_nulls_node *n; |
324 | unsigned int bucket = hash_bucket(hash, net); | 324 | unsigned int bucket = hash_bucket(hash, net); |
325 | 325 | ||
326 | /* Disable BHs the entire time since we normally need to disable them | 326 | /* Disable BHs the entire time since we normally need to disable them |
327 | * at least once for the stats anyway. | 327 | * at least once for the stats anyway. |
328 | */ | 328 | */ |
329 | local_bh_disable(); | 329 | local_bh_disable(); |
330 | begin: | 330 | begin: |
331 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { | 331 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { |
332 | if (nf_ct_tuple_equal(tuple, &h->tuple) && | 332 | if (nf_ct_tuple_equal(tuple, &h->tuple) && |
333 | nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { | 333 | nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { |
334 | NF_CT_STAT_INC(net, found); | 334 | NF_CT_STAT_INC(net, found); |
335 | local_bh_enable(); | 335 | local_bh_enable(); |
336 | return h; | 336 | return h; |
337 | } | 337 | } |
338 | NF_CT_STAT_INC(net, searched); | 338 | NF_CT_STAT_INC(net, searched); |
339 | } | 339 | } |
340 | /* | 340 | /* |
341 | * if the nulls value we got at the end of this lookup is | 341 | * if the nulls value we got at the end of this lookup is |
342 | * not the expected one, we must restart lookup. | 342 | * not the expected one, we must restart lookup. |
343 | * We probably met an item that was moved to another chain. | 343 | * We probably met an item that was moved to another chain. |
344 | */ | 344 | */ |
345 | if (get_nulls_value(n) != bucket) { | 345 | if (get_nulls_value(n) != bucket) { |
346 | NF_CT_STAT_INC(net, search_restart); | 346 | NF_CT_STAT_INC(net, search_restart); |
347 | goto begin; | 347 | goto begin; |
348 | } | 348 | } |
349 | local_bh_enable(); | 349 | local_bh_enable(); |
350 | 350 | ||
351 | return NULL; | 351 | return NULL; |
352 | } | 352 | } |
353 | 353 | ||
354 | struct nf_conntrack_tuple_hash * | 354 | struct nf_conntrack_tuple_hash * |
355 | __nf_conntrack_find(struct net *net, u16 zone, | 355 | __nf_conntrack_find(struct net *net, u16 zone, |
356 | const struct nf_conntrack_tuple *tuple) | 356 | const struct nf_conntrack_tuple *tuple) |
357 | { | 357 | { |
358 | return ____nf_conntrack_find(net, zone, tuple, | 358 | return ____nf_conntrack_find(net, zone, tuple, |
359 | hash_conntrack_raw(tuple, zone)); | 359 | hash_conntrack_raw(tuple, zone)); |
360 | } | 360 | } |
361 | EXPORT_SYMBOL_GPL(__nf_conntrack_find); | 361 | EXPORT_SYMBOL_GPL(__nf_conntrack_find); |
362 | 362 | ||
363 | /* Find a connection corresponding to a tuple. */ | 363 | /* Find a connection corresponding to a tuple. */ |
364 | static struct nf_conntrack_tuple_hash * | 364 | static struct nf_conntrack_tuple_hash * |
365 | __nf_conntrack_find_get(struct net *net, u16 zone, | 365 | __nf_conntrack_find_get(struct net *net, u16 zone, |
366 | const struct nf_conntrack_tuple *tuple, u32 hash) | 366 | const struct nf_conntrack_tuple *tuple, u32 hash) |
367 | { | 367 | { |
368 | struct nf_conntrack_tuple_hash *h; | 368 | struct nf_conntrack_tuple_hash *h; |
369 | struct nf_conn *ct; | 369 | struct nf_conn *ct; |
370 | 370 | ||
371 | rcu_read_lock(); | 371 | rcu_read_lock(); |
372 | begin: | 372 | begin: |
373 | h = ____nf_conntrack_find(net, zone, tuple, hash); | 373 | h = ____nf_conntrack_find(net, zone, tuple, hash); |
374 | if (h) { | 374 | if (h) { |
375 | ct = nf_ct_tuplehash_to_ctrack(h); | 375 | ct = nf_ct_tuplehash_to_ctrack(h); |
376 | if (unlikely(nf_ct_is_dying(ct) || | 376 | if (unlikely(nf_ct_is_dying(ct) || |
377 | !atomic_inc_not_zero(&ct->ct_general.use))) | 377 | !atomic_inc_not_zero(&ct->ct_general.use))) |
378 | h = NULL; | 378 | h = NULL; |
379 | else { | 379 | else { |
380 | if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || | 380 | if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || |
381 | nf_ct_zone(ct) != zone)) { | 381 | nf_ct_zone(ct) != zone)) { |
382 | nf_ct_put(ct); | 382 | nf_ct_put(ct); |
383 | goto begin; | 383 | goto begin; |
384 | } | 384 | } |
385 | } | 385 | } |
386 | } | 386 | } |
387 | rcu_read_unlock(); | 387 | rcu_read_unlock(); |
388 | 388 | ||
389 | return h; | 389 | return h; |
390 | } | 390 | } |
391 | 391 | ||
392 | struct nf_conntrack_tuple_hash * | 392 | struct nf_conntrack_tuple_hash * |
393 | nf_conntrack_find_get(struct net *net, u16 zone, | 393 | nf_conntrack_find_get(struct net *net, u16 zone, |
394 | const struct nf_conntrack_tuple *tuple) | 394 | const struct nf_conntrack_tuple *tuple) |
395 | { | 395 | { |
396 | return __nf_conntrack_find_get(net, zone, tuple, | 396 | return __nf_conntrack_find_get(net, zone, tuple, |
397 | hash_conntrack_raw(tuple, zone)); | 397 | hash_conntrack_raw(tuple, zone)); |
398 | } | 398 | } |
399 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); | 399 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); |
400 | 400 | ||
401 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, | 401 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, |
402 | unsigned int hash, | 402 | unsigned int hash, |
403 | unsigned int repl_hash) | 403 | unsigned int repl_hash) |
404 | { | 404 | { |
405 | struct net *net = nf_ct_net(ct); | 405 | struct net *net = nf_ct_net(ct); |
406 | 406 | ||
407 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | 407 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
408 | &net->ct.hash[hash]); | 408 | &net->ct.hash[hash]); |
409 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, | 409 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, |
410 | &net->ct.hash[repl_hash]); | 410 | &net->ct.hash[repl_hash]); |
411 | } | 411 | } |
412 | 412 | ||
413 | int | 413 | int |
414 | nf_conntrack_hash_check_insert(struct nf_conn *ct) | 414 | nf_conntrack_hash_check_insert(struct nf_conn *ct) |
415 | { | 415 | { |
416 | struct net *net = nf_ct_net(ct); | 416 | struct net *net = nf_ct_net(ct); |
417 | unsigned int hash, repl_hash; | 417 | unsigned int hash, repl_hash; |
418 | struct nf_conntrack_tuple_hash *h; | 418 | struct nf_conntrack_tuple_hash *h; |
419 | struct hlist_nulls_node *n; | 419 | struct hlist_nulls_node *n; |
420 | u16 zone; | 420 | u16 zone; |
421 | 421 | ||
422 | zone = nf_ct_zone(ct); | 422 | zone = nf_ct_zone(ct); |
423 | hash = hash_conntrack(net, zone, | 423 | hash = hash_conntrack(net, zone, |
424 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 424 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
425 | repl_hash = hash_conntrack(net, zone, | 425 | repl_hash = hash_conntrack(net, zone, |
426 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | 426 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
427 | 427 | ||
428 | spin_lock_bh(&nf_conntrack_lock); | 428 | spin_lock_bh(&nf_conntrack_lock); |
429 | 429 | ||
430 | /* See if there's one in the list already, including reverse */ | 430 | /* See if there's one in the list already, including reverse */ |
431 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) | 431 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) |
432 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 432 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
433 | &h->tuple) && | 433 | &h->tuple) && |
434 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | 434 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) |
435 | goto out; | 435 | goto out; |
436 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) | 436 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) |
437 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, | 437 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
438 | &h->tuple) && | 438 | &h->tuple) && |
439 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | 439 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) |
440 | goto out; | 440 | goto out; |
441 | 441 | ||
442 | add_timer(&ct->timeout); | 442 | add_timer(&ct->timeout); |
443 | nf_conntrack_get(&ct->ct_general); | 443 | nf_conntrack_get(&ct->ct_general); |
444 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | 444 | __nf_conntrack_hash_insert(ct, hash, repl_hash); |
445 | NF_CT_STAT_INC(net, insert); | 445 | NF_CT_STAT_INC(net, insert); |
446 | spin_unlock_bh(&nf_conntrack_lock); | 446 | spin_unlock_bh(&nf_conntrack_lock); |
447 | 447 | ||
448 | return 0; | 448 | return 0; |
449 | 449 | ||
450 | out: | 450 | out: |
451 | NF_CT_STAT_INC(net, insert_failed); | 451 | NF_CT_STAT_INC(net, insert_failed); |
452 | spin_unlock_bh(&nf_conntrack_lock); | 452 | spin_unlock_bh(&nf_conntrack_lock); |
453 | return -EEXIST; | 453 | return -EEXIST; |
454 | } | 454 | } |
455 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); | 455 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); |
456 | 456 | ||
457 | /* Confirm a connection given skb; places it in hash table */ | 457 | /* Confirm a connection given skb; places it in hash table */ |
458 | int | 458 | int |
459 | __nf_conntrack_confirm(struct sk_buff *skb) | 459 | __nf_conntrack_confirm(struct sk_buff *skb) |
460 | { | 460 | { |
461 | unsigned int hash, repl_hash; | 461 | unsigned int hash, repl_hash; |
462 | struct nf_conntrack_tuple_hash *h; | 462 | struct nf_conntrack_tuple_hash *h; |
463 | struct nf_conn *ct; | 463 | struct nf_conn *ct; |
464 | struct nf_conn_help *help; | 464 | struct nf_conn_help *help; |
465 | struct nf_conn_tstamp *tstamp; | 465 | struct nf_conn_tstamp *tstamp; |
466 | struct hlist_nulls_node *n; | 466 | struct hlist_nulls_node *n; |
467 | enum ip_conntrack_info ctinfo; | 467 | enum ip_conntrack_info ctinfo; |
468 | struct net *net; | 468 | struct net *net; |
469 | u16 zone; | 469 | u16 zone; |
470 | 470 | ||
471 | ct = nf_ct_get(skb, &ctinfo); | 471 | ct = nf_ct_get(skb, &ctinfo); |
472 | net = nf_ct_net(ct); | 472 | net = nf_ct_net(ct); |
473 | 473 | ||
474 | /* ipt_REJECT uses nf_conntrack_attach to attach related | 474 | /* ipt_REJECT uses nf_conntrack_attach to attach related |
475 | ICMP/TCP RST packets in other direction. Actual packet | 475 | ICMP/TCP RST packets in other direction. Actual packet |
476 | which created connection will be IP_CT_NEW or for an | 476 | which created connection will be IP_CT_NEW or for an |
477 | expected connection, IP_CT_RELATED. */ | 477 | expected connection, IP_CT_RELATED. */ |
478 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | 478 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) |
479 | return NF_ACCEPT; | 479 | return NF_ACCEPT; |
480 | 480 | ||
481 | zone = nf_ct_zone(ct); | 481 | zone = nf_ct_zone(ct); |
482 | /* reuse the hash saved before */ | 482 | /* reuse the hash saved before */ |
483 | hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; | 483 | hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; |
484 | hash = hash_bucket(hash, net); | 484 | hash = hash_bucket(hash, net); |
485 | repl_hash = hash_conntrack(net, zone, | 485 | repl_hash = hash_conntrack(net, zone, |
486 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | 486 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
487 | 487 | ||
488 | /* We're not in hash table, and we refuse to set up related | 488 | /* We're not in hash table, and we refuse to set up related |
489 | connections for unconfirmed conns. But packet copies and | 489 | connections for unconfirmed conns. But packet copies and |
490 | REJECT will give spurious warnings here. */ | 490 | REJECT will give spurious warnings here. */ |
491 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ | 491 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ |
492 | 492 | ||
493 | /* No external references means no one else could have | 493 | /* No external references means no one else could have |
494 | confirmed us. */ | 494 | confirmed us. */ |
495 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | 495 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); |
496 | pr_debug("Confirming conntrack %p\n", ct); | 496 | pr_debug("Confirming conntrack %p\n", ct); |
497 | 497 | ||
498 | spin_lock_bh(&nf_conntrack_lock); | 498 | spin_lock_bh(&nf_conntrack_lock); |
499 | 499 | ||
500 | /* We have to check the DYING flag inside the lock to prevent | 500 | /* We have to check the DYING flag inside the lock to prevent |
501 | a race against nf_ct_get_next_corpse() possibly called from | 501 | a race against nf_ct_get_next_corpse() possibly called from |
502 | user context, else we insert an already 'dead' hash, blocking | 502 | user context, else we insert an already 'dead' hash, blocking |
503 | further use of that particular connection -JM */ | 503 | further use of that particular connection -JM */ |
504 | 504 | ||
505 | if (unlikely(nf_ct_is_dying(ct))) { | 505 | if (unlikely(nf_ct_is_dying(ct))) { |
506 | spin_unlock_bh(&nf_conntrack_lock); | 506 | spin_unlock_bh(&nf_conntrack_lock); |
507 | return NF_ACCEPT; | 507 | return NF_ACCEPT; |
508 | } | 508 | } |
509 | 509 | ||
510 | /* See if there's one in the list already, including reverse: | 510 | /* See if there's one in the list already, including reverse: |
511 | NAT could have grabbed it without realizing, since we're | 511 | NAT could have grabbed it without realizing, since we're |
512 | not in the hash. If there is, we lost race. */ | 512 | not in the hash. If there is, we lost race. */ |
513 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) | 513 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) |
514 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 514 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
515 | &h->tuple) && | 515 | &h->tuple) && |
516 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | 516 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) |
517 | goto out; | 517 | goto out; |
518 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) | 518 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) |
519 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, | 519 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
520 | &h->tuple) && | 520 | &h->tuple) && |
521 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | 521 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) |
522 | goto out; | 522 | goto out; |
523 | 523 | ||
524 | /* Remove from unconfirmed list */ | 524 | /* Remove from unconfirmed list */ |
525 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | 525 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); |
526 | 526 | ||
527 | /* Timer relative to confirmation time, not original | 527 | /* Timer relative to confirmation time, not original |
528 | setting time, otherwise we'd get timer wrap in | 528 | setting time, otherwise we'd get timer wrap in |
529 | weird delay cases. */ | 529 | weird delay cases. */ |
530 | ct->timeout.expires += jiffies; | 530 | ct->timeout.expires += jiffies; |
531 | add_timer(&ct->timeout); | 531 | add_timer(&ct->timeout); |
532 | atomic_inc(&ct->ct_general.use); | 532 | atomic_inc(&ct->ct_general.use); |
533 | ct->status |= IPS_CONFIRMED; | 533 | ct->status |= IPS_CONFIRMED; |
534 | 534 | ||
535 | /* set conntrack timestamp, if enabled. */ | 535 | /* set conntrack timestamp, if enabled. */ |
536 | tstamp = nf_conn_tstamp_find(ct); | 536 | tstamp = nf_conn_tstamp_find(ct); |
537 | if (tstamp) { | 537 | if (tstamp) { |
538 | if (skb->tstamp.tv64 == 0) | 538 | if (skb->tstamp.tv64 == 0) |
539 | __net_timestamp(skb); | 539 | __net_timestamp(skb); |
540 | 540 | ||
541 | tstamp->start = ktime_to_ns(skb->tstamp); | 541 | tstamp->start = ktime_to_ns(skb->tstamp); |
542 | } | 542 | } |
543 | /* Since the lookup is lockless, hash insertion must be done after | 543 | /* Since the lookup is lockless, hash insertion must be done after |
544 | * starting the timer and setting the CONFIRMED bit. The RCU barriers | 544 | * starting the timer and setting the CONFIRMED bit. The RCU barriers |
545 | * guarantee that no other CPU can find the conntrack before the above | 545 | * guarantee that no other CPU can find the conntrack before the above |
546 | * stores are visible. | 546 | * stores are visible. |
547 | */ | 547 | */ |
548 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | 548 | __nf_conntrack_hash_insert(ct, hash, repl_hash); |
549 | NF_CT_STAT_INC(net, insert); | 549 | NF_CT_STAT_INC(net, insert); |
550 | spin_unlock_bh(&nf_conntrack_lock); | 550 | spin_unlock_bh(&nf_conntrack_lock); |
551 | 551 | ||
552 | help = nfct_help(ct); | 552 | help = nfct_help(ct); |
553 | if (help && help->helper) | 553 | if (help && help->helper) |
554 | nf_conntrack_event_cache(IPCT_HELPER, ct); | 554 | nf_conntrack_event_cache(IPCT_HELPER, ct); |
555 | 555 | ||
556 | nf_conntrack_event_cache(master_ct(ct) ? | 556 | nf_conntrack_event_cache(master_ct(ct) ? |
557 | IPCT_RELATED : IPCT_NEW, ct); | 557 | IPCT_RELATED : IPCT_NEW, ct); |
558 | return NF_ACCEPT; | 558 | return NF_ACCEPT; |
559 | 559 | ||
560 | out: | 560 | out: |
561 | NF_CT_STAT_INC(net, insert_failed); | 561 | NF_CT_STAT_INC(net, insert_failed); |
562 | spin_unlock_bh(&nf_conntrack_lock); | 562 | spin_unlock_bh(&nf_conntrack_lock); |
563 | return NF_DROP; | 563 | return NF_DROP; |
564 | } | 564 | } |
565 | EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); | 565 | EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); |
566 | 566 | ||
567 | /* Returns true if a connection correspondings to the tuple (required | 567 | /* Returns true if a connection correspondings to the tuple (required |
568 | for NAT). */ | 568 | for NAT). */ |
569 | int | 569 | int |
570 | nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | 570 | nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, |
571 | const struct nf_conn *ignored_conntrack) | 571 | const struct nf_conn *ignored_conntrack) |
572 | { | 572 | { |
573 | struct net *net = nf_ct_net(ignored_conntrack); | 573 | struct net *net = nf_ct_net(ignored_conntrack); |
574 | struct nf_conntrack_tuple_hash *h; | 574 | struct nf_conntrack_tuple_hash *h; |
575 | struct hlist_nulls_node *n; | 575 | struct hlist_nulls_node *n; |
576 | struct nf_conn *ct; | 576 | struct nf_conn *ct; |
577 | u16 zone = nf_ct_zone(ignored_conntrack); | 577 | u16 zone = nf_ct_zone(ignored_conntrack); |
578 | unsigned int hash = hash_conntrack(net, zone, tuple); | 578 | unsigned int hash = hash_conntrack(net, zone, tuple); |
579 | 579 | ||
580 | /* Disable BHs the entire time since we need to disable them at | 580 | /* Disable BHs the entire time since we need to disable them at |
581 | * least once for the stats anyway. | 581 | * least once for the stats anyway. |
582 | */ | 582 | */ |
583 | rcu_read_lock_bh(); | 583 | rcu_read_lock_bh(); |
584 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { | 584 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { |
585 | ct = nf_ct_tuplehash_to_ctrack(h); | 585 | ct = nf_ct_tuplehash_to_ctrack(h); |
586 | if (ct != ignored_conntrack && | 586 | if (ct != ignored_conntrack && |
587 | nf_ct_tuple_equal(tuple, &h->tuple) && | 587 | nf_ct_tuple_equal(tuple, &h->tuple) && |
588 | nf_ct_zone(ct) == zone) { | 588 | nf_ct_zone(ct) == zone) { |
589 | NF_CT_STAT_INC(net, found); | 589 | NF_CT_STAT_INC(net, found); |
590 | rcu_read_unlock_bh(); | 590 | rcu_read_unlock_bh(); |
591 | return 1; | 591 | return 1; |
592 | } | 592 | } |
593 | NF_CT_STAT_INC(net, searched); | 593 | NF_CT_STAT_INC(net, searched); |
594 | } | 594 | } |
595 | rcu_read_unlock_bh(); | 595 | rcu_read_unlock_bh(); |
596 | 596 | ||
597 | return 0; | 597 | return 0; |
598 | } | 598 | } |
599 | EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); | 599 | EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); |
600 | 600 | ||
601 | #define NF_CT_EVICTION_RANGE 8 | 601 | #define NF_CT_EVICTION_RANGE 8 |
602 | 602 | ||
603 | /* There's a small race here where we may free a just-assured | 603 | /* There's a small race here where we may free a just-assured |
604 | connection. Too bad: we're in trouble anyway. */ | 604 | connection. Too bad: we're in trouble anyway. */ |
605 | static noinline int early_drop(struct net *net, unsigned int hash) | 605 | static noinline int early_drop(struct net *net, unsigned int hash) |
606 | { | 606 | { |
607 | /* Use oldest entry, which is roughly LRU */ | 607 | /* Use oldest entry, which is roughly LRU */ |
608 | struct nf_conntrack_tuple_hash *h; | 608 | struct nf_conntrack_tuple_hash *h; |
609 | struct nf_conn *ct = NULL, *tmp; | 609 | struct nf_conn *ct = NULL, *tmp; |
610 | struct hlist_nulls_node *n; | 610 | struct hlist_nulls_node *n; |
611 | unsigned int i, cnt = 0; | 611 | unsigned int i, cnt = 0; |
612 | int dropped = 0; | 612 | int dropped = 0; |
613 | 613 | ||
614 | rcu_read_lock(); | 614 | rcu_read_lock(); |
615 | for (i = 0; i < net->ct.htable_size; i++) { | 615 | for (i = 0; i < net->ct.htable_size; i++) { |
616 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], | 616 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], |
617 | hnnode) { | 617 | hnnode) { |
618 | tmp = nf_ct_tuplehash_to_ctrack(h); | 618 | tmp = nf_ct_tuplehash_to_ctrack(h); |
619 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) | 619 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) |
620 | ct = tmp; | 620 | ct = tmp; |
621 | cnt++; | 621 | cnt++; |
622 | } | 622 | } |
623 | 623 | ||
624 | if (ct != NULL) { | 624 | if (ct != NULL) { |
625 | if (likely(!nf_ct_is_dying(ct) && | 625 | if (likely(!nf_ct_is_dying(ct) && |
626 | atomic_inc_not_zero(&ct->ct_general.use))) | 626 | atomic_inc_not_zero(&ct->ct_general.use))) |
627 | break; | 627 | break; |
628 | else | 628 | else |
629 | ct = NULL; | 629 | ct = NULL; |
630 | } | 630 | } |
631 | 631 | ||
632 | if (cnt >= NF_CT_EVICTION_RANGE) | 632 | if (cnt >= NF_CT_EVICTION_RANGE) |
633 | break; | 633 | break; |
634 | 634 | ||
635 | hash = (hash + 1) % net->ct.htable_size; | 635 | hash = (hash + 1) % net->ct.htable_size; |
636 | } | 636 | } |
637 | rcu_read_unlock(); | 637 | rcu_read_unlock(); |
638 | 638 | ||
639 | if (!ct) | 639 | if (!ct) |
640 | return dropped; | 640 | return dropped; |
641 | 641 | ||
642 | if (del_timer(&ct->timeout)) { | 642 | if (del_timer(&ct->timeout)) { |
643 | death_by_timeout((unsigned long)ct); | 643 | death_by_timeout((unsigned long)ct); |
644 | /* Check if we indeed killed this entry. Reliable event | 644 | /* Check if we indeed killed this entry. Reliable event |
645 | delivery may have inserted it into the dying list. */ | 645 | delivery may have inserted it into the dying list. */ |
646 | if (test_bit(IPS_DYING_BIT, &ct->status)) { | 646 | if (test_bit(IPS_DYING_BIT, &ct->status)) { |
647 | dropped = 1; | 647 | dropped = 1; |
648 | NF_CT_STAT_INC_ATOMIC(net, early_drop); | 648 | NF_CT_STAT_INC_ATOMIC(net, early_drop); |
649 | } | 649 | } |
650 | } | 650 | } |
651 | nf_ct_put(ct); | 651 | nf_ct_put(ct); |
652 | return dropped; | 652 | return dropped; |
653 | } | 653 | } |
654 | 654 | ||
655 | void init_nf_conntrack_hash_rnd(void) | 655 | void init_nf_conntrack_hash_rnd(void) |
656 | { | 656 | { |
657 | unsigned int rand; | 657 | unsigned int rand; |
658 | 658 | ||
659 | /* | 659 | /* |
660 | * Why not initialize nf_conntrack_rnd in a "init()" function ? | 660 | * Why not initialize nf_conntrack_rnd in a "init()" function ? |
661 | * Because there isn't enough entropy when system initializing, | 661 | * Because there isn't enough entropy when system initializing, |
662 | * and we initialize it as late as possible. | 662 | * and we initialize it as late as possible. |
663 | */ | 663 | */ |
664 | do { | 664 | do { |
665 | get_random_bytes(&rand, sizeof(rand)); | 665 | get_random_bytes(&rand, sizeof(rand)); |
666 | } while (!rand); | 666 | } while (!rand); |
667 | cmpxchg(&nf_conntrack_hash_rnd, 0, rand); | 667 | cmpxchg(&nf_conntrack_hash_rnd, 0, rand); |
668 | } | 668 | } |
669 | 669 | ||
670 | static struct nf_conn * | 670 | static struct nf_conn * |
671 | __nf_conntrack_alloc(struct net *net, u16 zone, | 671 | __nf_conntrack_alloc(struct net *net, u16 zone, |
672 | const struct nf_conntrack_tuple *orig, | 672 | const struct nf_conntrack_tuple *orig, |
673 | const struct nf_conntrack_tuple *repl, | 673 | const struct nf_conntrack_tuple *repl, |
674 | gfp_t gfp, u32 hash) | 674 | gfp_t gfp, u32 hash) |
675 | { | 675 | { |
676 | struct nf_conn *ct; | 676 | struct nf_conn *ct; |
677 | 677 | ||
678 | if (unlikely(!nf_conntrack_hash_rnd)) { | 678 | if (unlikely(!nf_conntrack_hash_rnd)) { |
679 | init_nf_conntrack_hash_rnd(); | 679 | init_nf_conntrack_hash_rnd(); |
680 | /* recompute the hash as nf_conntrack_hash_rnd is initialized */ | 680 | /* recompute the hash as nf_conntrack_hash_rnd is initialized */ |
681 | hash = hash_conntrack_raw(orig, zone); | 681 | hash = hash_conntrack_raw(orig, zone); |
682 | } | 682 | } |
683 | 683 | ||
684 | /* We don't want any race condition at early drop stage */ | 684 | /* We don't want any race condition at early drop stage */ |
685 | atomic_inc(&net->ct.count); | 685 | atomic_inc(&net->ct.count); |
686 | 686 | ||
687 | if (nf_conntrack_max && | 687 | if (nf_conntrack_max && |
688 | unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { | 688 | unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { |
689 | if (!early_drop(net, hash_bucket(hash, net))) { | 689 | if (!early_drop(net, hash_bucket(hash, net))) { |
690 | atomic_dec(&net->ct.count); | 690 | atomic_dec(&net->ct.count); |
691 | net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); | 691 | net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); |
692 | return ERR_PTR(-ENOMEM); | 692 | return ERR_PTR(-ENOMEM); |
693 | } | 693 | } |
694 | } | 694 | } |
695 | 695 | ||
696 | /* | 696 | /* |
697 | * Do not use kmem_cache_zalloc(), as this cache uses | 697 | * Do not use kmem_cache_zalloc(), as this cache uses |
698 | * SLAB_DESTROY_BY_RCU. | 698 | * SLAB_DESTROY_BY_RCU. |
699 | */ | 699 | */ |
700 | ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); | 700 | ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); |
701 | if (ct == NULL) { | 701 | if (ct == NULL) { |
702 | atomic_dec(&net->ct.count); | 702 | atomic_dec(&net->ct.count); |
703 | return ERR_PTR(-ENOMEM); | 703 | return ERR_PTR(-ENOMEM); |
704 | } | 704 | } |
705 | /* | 705 | /* |
706 | * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next | 706 | * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next |
707 | * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. | 707 | * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. |
708 | */ | 708 | */ |
709 | memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, | 709 | memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, |
710 | offsetof(struct nf_conn, proto) - | 710 | offsetof(struct nf_conn, proto) - |
711 | offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); | 711 | offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); |
712 | spin_lock_init(&ct->lock); | 712 | spin_lock_init(&ct->lock); |
713 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; | 713 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; |
714 | ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; | 714 | ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; |
715 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; | 715 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; |
716 | /* save hash for reusing when confirming */ | 716 | /* save hash for reusing when confirming */ |
717 | *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; | 717 | *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; |
718 | /* Don't set timer yet: wait for confirmation */ | 718 | /* Don't set timer yet: wait for confirmation */ |
719 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); | 719 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); |
720 | write_pnet(&ct->ct_net, net); | 720 | write_pnet(&ct->ct_net, net); |
721 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 721 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
722 | if (zone) { | 722 | if (zone) { |
723 | struct nf_conntrack_zone *nf_ct_zone; | 723 | struct nf_conntrack_zone *nf_ct_zone; |
724 | 724 | ||
725 | nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); | 725 | nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); |
726 | if (!nf_ct_zone) | 726 | if (!nf_ct_zone) |
727 | goto out_free; | 727 | goto out_free; |
728 | nf_ct_zone->id = zone; | 728 | nf_ct_zone->id = zone; |
729 | } | 729 | } |
730 | #endif | 730 | #endif |
731 | /* | 731 | /* |
732 | * changes to lookup keys must be done before setting refcnt to 1 | 732 | * changes to lookup keys must be done before setting refcnt to 1 |
733 | */ | 733 | */ |
734 | smp_wmb(); | 734 | smp_wmb(); |
735 | atomic_set(&ct->ct_general.use, 1); | 735 | atomic_set(&ct->ct_general.use, 1); |
736 | return ct; | 736 | return ct; |
737 | 737 | ||
738 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 738 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
739 | out_free: | 739 | out_free: |
740 | atomic_dec(&net->ct.count); | 740 | atomic_dec(&net->ct.count); |
741 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | 741 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); |
742 | return ERR_PTR(-ENOMEM); | 742 | return ERR_PTR(-ENOMEM); |
743 | #endif | 743 | #endif |
744 | } | 744 | } |
745 | 745 | ||
746 | struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, | 746 | struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, |
747 | const struct nf_conntrack_tuple *orig, | 747 | const struct nf_conntrack_tuple *orig, |
748 | const struct nf_conntrack_tuple *repl, | 748 | const struct nf_conntrack_tuple *repl, |
749 | gfp_t gfp) | 749 | gfp_t gfp) |
750 | { | 750 | { |
751 | return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); | 751 | return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); |
752 | } | 752 | } |
753 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | 753 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); |
754 | 754 | ||
755 | void nf_conntrack_free(struct nf_conn *ct) | 755 | void nf_conntrack_free(struct nf_conn *ct) |
756 | { | 756 | { |
757 | struct net *net = nf_ct_net(ct); | 757 | struct net *net = nf_ct_net(ct); |
758 | 758 | ||
759 | nf_ct_ext_destroy(ct); | 759 | nf_ct_ext_destroy(ct); |
760 | atomic_dec(&net->ct.count); | 760 | atomic_dec(&net->ct.count); |
761 | nf_ct_ext_free(ct); | 761 | nf_ct_ext_free(ct); |
762 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | 762 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); |
763 | } | 763 | } |
764 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | 764 | EXPORT_SYMBOL_GPL(nf_conntrack_free); |
765 | 765 | ||
766 | /* Allocate a new conntrack: we return -ENOMEM if classification | 766 | /* Allocate a new conntrack: we return -ENOMEM if classification |
767 | failed due to stress. Otherwise it really is unclassifiable. */ | 767 | failed due to stress. Otherwise it really is unclassifiable. */ |
768 | static struct nf_conntrack_tuple_hash * | 768 | static struct nf_conntrack_tuple_hash * |
769 | init_conntrack(struct net *net, struct nf_conn *tmpl, | 769 | init_conntrack(struct net *net, struct nf_conn *tmpl, |
770 | const struct nf_conntrack_tuple *tuple, | 770 | const struct nf_conntrack_tuple *tuple, |
771 | struct nf_conntrack_l3proto *l3proto, | 771 | struct nf_conntrack_l3proto *l3proto, |
772 | struct nf_conntrack_l4proto *l4proto, | 772 | struct nf_conntrack_l4proto *l4proto, |
773 | struct sk_buff *skb, | 773 | struct sk_buff *skb, |
774 | unsigned int dataoff, u32 hash) | 774 | unsigned int dataoff, u32 hash) |
775 | { | 775 | { |
776 | struct nf_conn *ct; | 776 | struct nf_conn *ct; |
777 | struct nf_conn_help *help; | 777 | struct nf_conn_help *help; |
778 | struct nf_conntrack_tuple repl_tuple; | 778 | struct nf_conntrack_tuple repl_tuple; |
779 | struct nf_conntrack_ecache *ecache; | 779 | struct nf_conntrack_ecache *ecache; |
780 | struct nf_conntrack_expect *exp; | 780 | struct nf_conntrack_expect *exp; |
781 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; | 781 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; |
782 | struct nf_conn_timeout *timeout_ext; | 782 | struct nf_conn_timeout *timeout_ext; |
783 | unsigned int *timeouts; | 783 | unsigned int *timeouts; |
784 | 784 | ||
785 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { | 785 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { |
786 | pr_debug("Can't invert tuple.\n"); | 786 | pr_debug("Can't invert tuple.\n"); |
787 | return NULL; | 787 | return NULL; |
788 | } | 788 | } |
789 | 789 | ||
790 | ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, | 790 | ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, |
791 | hash); | 791 | hash); |
792 | if (IS_ERR(ct)) | 792 | if (IS_ERR(ct)) |
793 | return (struct nf_conntrack_tuple_hash *)ct; | 793 | return (struct nf_conntrack_tuple_hash *)ct; |
794 | 794 | ||
795 | timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; | 795 | timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; |
796 | if (timeout_ext) | 796 | if (timeout_ext) |
797 | timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); | 797 | timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); |
798 | else | 798 | else |
799 | timeouts = l4proto->get_timeouts(net); | 799 | timeouts = l4proto->get_timeouts(net); |
800 | 800 | ||
801 | if (!l4proto->new(ct, skb, dataoff, timeouts)) { | 801 | if (!l4proto->new(ct, skb, dataoff, timeouts)) { |
802 | nf_conntrack_free(ct); | 802 | nf_conntrack_free(ct); |
803 | pr_debug("init conntrack: can't track with proto module\n"); | 803 | pr_debug("init conntrack: can't track with proto module\n"); |
804 | return NULL; | 804 | return NULL; |
805 | } | 805 | } |
806 | 806 | ||
807 | if (timeout_ext) | 807 | if (timeout_ext) |
808 | nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC); | 808 | nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC); |
809 | 809 | ||
810 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); | 810 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); |
811 | nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); | 811 | nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); |
812 | 812 | ||
813 | ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; | 813 | ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; |
814 | nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, | 814 | nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, |
815 | ecache ? ecache->expmask : 0, | 815 | ecache ? ecache->expmask : 0, |
816 | GFP_ATOMIC); | 816 | GFP_ATOMIC); |
817 | 817 | ||
818 | spin_lock_bh(&nf_conntrack_lock); | 818 | spin_lock_bh(&nf_conntrack_lock); |
819 | exp = nf_ct_find_expectation(net, zone, tuple); | 819 | exp = nf_ct_find_expectation(net, zone, tuple); |
820 | if (exp) { | 820 | if (exp) { |
821 | pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", | 821 | pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", |
822 | ct, exp); | 822 | ct, exp); |
823 | /* Welcome, Mr. Bond. We've been expecting you... */ | 823 | /* Welcome, Mr. Bond. We've been expecting you... */ |
824 | __set_bit(IPS_EXPECTED_BIT, &ct->status); | 824 | __set_bit(IPS_EXPECTED_BIT, &ct->status); |
825 | ct->master = exp->master; | 825 | ct->master = exp->master; |
826 | if (exp->helper) { | 826 | if (exp->helper) { |
827 | help = nf_ct_helper_ext_add(ct, exp->helper, | 827 | help = nf_ct_helper_ext_add(ct, exp->helper, |
828 | GFP_ATOMIC); | 828 | GFP_ATOMIC); |
829 | if (help) | 829 | if (help) |
830 | rcu_assign_pointer(help->helper, exp->helper); | 830 | rcu_assign_pointer(help->helper, exp->helper); |
831 | } | 831 | } |
832 | 832 | ||
833 | #ifdef CONFIG_NF_CONNTRACK_MARK | 833 | #ifdef CONFIG_NF_CONNTRACK_MARK |
834 | ct->mark = exp->master->mark; | 834 | ct->mark = exp->master->mark; |
835 | #endif | 835 | #endif |
836 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 836 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
837 | ct->secmark = exp->master->secmark; | 837 | ct->secmark = exp->master->secmark; |
838 | #endif | 838 | #endif |
839 | nf_conntrack_get(&ct->master->ct_general); | 839 | nf_conntrack_get(&ct->master->ct_general); |
840 | NF_CT_STAT_INC(net, expect_new); | 840 | NF_CT_STAT_INC(net, expect_new); |
841 | } else { | 841 | } else { |
842 | __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); | 842 | __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); |
843 | NF_CT_STAT_INC(net, new); | 843 | NF_CT_STAT_INC(net, new); |
844 | } | 844 | } |
845 | 845 | ||
846 | /* Overload tuple linked list to put us in unconfirmed list. */ | 846 | /* Overload tuple linked list to put us in unconfirmed list. */ |
847 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | 847 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
848 | &net->ct.unconfirmed); | 848 | &net->ct.unconfirmed); |
849 | 849 | ||
850 | spin_unlock_bh(&nf_conntrack_lock); | 850 | spin_unlock_bh(&nf_conntrack_lock); |
851 | 851 | ||
852 | if (exp) { | 852 | if (exp) { |
853 | if (exp->expectfn) | 853 | if (exp->expectfn) |
854 | exp->expectfn(ct, exp); | 854 | exp->expectfn(ct, exp); |
855 | nf_ct_expect_put(exp); | 855 | nf_ct_expect_put(exp); |
856 | } | 856 | } |
857 | 857 | ||
858 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; | 858 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; |
859 | } | 859 | } |
860 | 860 | ||
861 | /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ | 861 | /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ |
862 | static inline struct nf_conn * | 862 | static inline struct nf_conn * |
863 | resolve_normal_ct(struct net *net, struct nf_conn *tmpl, | 863 | resolve_normal_ct(struct net *net, struct nf_conn *tmpl, |
864 | struct sk_buff *skb, | 864 | struct sk_buff *skb, |
865 | unsigned int dataoff, | 865 | unsigned int dataoff, |
866 | u_int16_t l3num, | 866 | u_int16_t l3num, |
867 | u_int8_t protonum, | 867 | u_int8_t protonum, |
868 | struct nf_conntrack_l3proto *l3proto, | 868 | struct nf_conntrack_l3proto *l3proto, |
869 | struct nf_conntrack_l4proto *l4proto, | 869 | struct nf_conntrack_l4proto *l4proto, |
870 | int *set_reply, | 870 | int *set_reply, |
871 | enum ip_conntrack_info *ctinfo) | 871 | enum ip_conntrack_info *ctinfo) |
872 | { | 872 | { |
873 | struct nf_conntrack_tuple tuple; | 873 | struct nf_conntrack_tuple tuple; |
874 | struct nf_conntrack_tuple_hash *h; | 874 | struct nf_conntrack_tuple_hash *h; |
875 | struct nf_conn *ct; | 875 | struct nf_conn *ct; |
876 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; | 876 | u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; |
877 | u32 hash; | 877 | u32 hash; |
878 | 878 | ||
879 | if (!nf_ct_get_tuple(skb, skb_network_offset(skb), | 879 | if (!nf_ct_get_tuple(skb, skb_network_offset(skb), |
880 | dataoff, l3num, protonum, &tuple, l3proto, | 880 | dataoff, l3num, protonum, &tuple, l3proto, |
881 | l4proto)) { | 881 | l4proto)) { |
882 | pr_debug("resolve_normal_ct: Can't get tuple\n"); | 882 | pr_debug("resolve_normal_ct: Can't get tuple\n"); |
883 | return NULL; | 883 | return NULL; |
884 | } | 884 | } |
885 | 885 | ||
886 | /* look for tuple match */ | 886 | /* look for tuple match */ |
887 | hash = hash_conntrack_raw(&tuple, zone); | 887 | hash = hash_conntrack_raw(&tuple, zone); |
888 | h = __nf_conntrack_find_get(net, zone, &tuple, hash); | 888 | h = __nf_conntrack_find_get(net, zone, &tuple, hash); |
889 | if (!h) { | 889 | if (!h) { |
890 | h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, | 890 | h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, |
891 | skb, dataoff, hash); | 891 | skb, dataoff, hash); |
892 | if (!h) | 892 | if (!h) |
893 | return NULL; | 893 | return NULL; |
894 | if (IS_ERR(h)) | 894 | if (IS_ERR(h)) |
895 | return (void *)h; | 895 | return (void *)h; |
896 | } | 896 | } |
897 | ct = nf_ct_tuplehash_to_ctrack(h); | 897 | ct = nf_ct_tuplehash_to_ctrack(h); |
898 | 898 | ||
899 | /* It exists; we have (non-exclusive) reference. */ | 899 | /* It exists; we have (non-exclusive) reference. */ |
900 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { | 900 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { |
901 | *ctinfo = IP_CT_ESTABLISHED_REPLY; | 901 | *ctinfo = IP_CT_ESTABLISHED_REPLY; |
902 | /* Please set reply bit if this packet OK */ | 902 | /* Please set reply bit if this packet OK */ |
903 | *set_reply = 1; | 903 | *set_reply = 1; |
904 | } else { | 904 | } else { |
905 | /* Once we've had two way comms, always ESTABLISHED. */ | 905 | /* Once we've had two way comms, always ESTABLISHED. */ |
906 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { | 906 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { |
907 | pr_debug("nf_conntrack_in: normal packet for %p\n", ct); | 907 | pr_debug("nf_conntrack_in: normal packet for %p\n", ct); |
908 | *ctinfo = IP_CT_ESTABLISHED; | 908 | *ctinfo = IP_CT_ESTABLISHED; |
909 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { | 909 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { |
910 | pr_debug("nf_conntrack_in: related packet for %p\n", | 910 | pr_debug("nf_conntrack_in: related packet for %p\n", |
911 | ct); | 911 | ct); |
912 | *ctinfo = IP_CT_RELATED; | 912 | *ctinfo = IP_CT_RELATED; |
913 | } else { | 913 | } else { |
914 | pr_debug("nf_conntrack_in: new packet for %p\n", ct); | 914 | pr_debug("nf_conntrack_in: new packet for %p\n", ct); |
915 | *ctinfo = IP_CT_NEW; | 915 | *ctinfo = IP_CT_NEW; |
916 | } | 916 | } |
917 | *set_reply = 0; | 917 | *set_reply = 0; |
918 | } | 918 | } |
919 | skb->nfct = &ct->ct_general; | 919 | skb->nfct = &ct->ct_general; |
920 | skb->nfctinfo = *ctinfo; | 920 | skb->nfctinfo = *ctinfo; |
921 | return ct; | 921 | return ct; |
922 | } | 922 | } |
923 | 923 | ||
924 | unsigned int | 924 | unsigned int |
925 | nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | 925 | nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, |
926 | struct sk_buff *skb) | 926 | struct sk_buff *skb) |
927 | { | 927 | { |
928 | struct nf_conn *ct, *tmpl = NULL; | 928 | struct nf_conn *ct, *tmpl = NULL; |
929 | enum ip_conntrack_info ctinfo; | 929 | enum ip_conntrack_info ctinfo; |
930 | struct nf_conntrack_l3proto *l3proto; | 930 | struct nf_conntrack_l3proto *l3proto; |
931 | struct nf_conntrack_l4proto *l4proto; | 931 | struct nf_conntrack_l4proto *l4proto; |
932 | unsigned int *timeouts; | 932 | unsigned int *timeouts; |
933 | unsigned int dataoff; | 933 | unsigned int dataoff; |
934 | u_int8_t protonum; | 934 | u_int8_t protonum; |
935 | int set_reply = 0; | 935 | int set_reply = 0; |
936 | int ret; | 936 | int ret; |
937 | 937 | ||
938 | if (skb->nfct) { | 938 | if (skb->nfct) { |
939 | /* Previously seen (loopback or untracked)? Ignore. */ | 939 | /* Previously seen (loopback or untracked)? Ignore. */ |
940 | tmpl = (struct nf_conn *)skb->nfct; | 940 | tmpl = (struct nf_conn *)skb->nfct; |
941 | if (!nf_ct_is_template(tmpl)) { | 941 | if (!nf_ct_is_template(tmpl)) { |
942 | NF_CT_STAT_INC_ATOMIC(net, ignore); | 942 | NF_CT_STAT_INC_ATOMIC(net, ignore); |
943 | return NF_ACCEPT; | 943 | return NF_ACCEPT; |
944 | } | 944 | } |
945 | skb->nfct = NULL; | 945 | skb->nfct = NULL; |
946 | } | 946 | } |
947 | 947 | ||
948 | /* rcu_read_lock()ed by nf_hook_slow */ | 948 | /* rcu_read_lock()ed by nf_hook_slow */ |
949 | l3proto = __nf_ct_l3proto_find(pf); | 949 | l3proto = __nf_ct_l3proto_find(pf); |
950 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), | 950 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), |
951 | &dataoff, &protonum); | 951 | &dataoff, &protonum); |
952 | if (ret <= 0) { | 952 | if (ret <= 0) { |
953 | pr_debug("not prepared to track yet or error occurred\n"); | 953 | pr_debug("not prepared to track yet or error occurred\n"); |
954 | NF_CT_STAT_INC_ATOMIC(net, error); | 954 | NF_CT_STAT_INC_ATOMIC(net, error); |
955 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 955 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
956 | ret = -ret; | 956 | ret = -ret; |
957 | goto out; | 957 | goto out; |
958 | } | 958 | } |
959 | 959 | ||
960 | l4proto = __nf_ct_l4proto_find(pf, protonum); | 960 | l4proto = __nf_ct_l4proto_find(pf, protonum); |
961 | 961 | ||
962 | /* It may be an special packet, error, unclean... | 962 | /* It may be an special packet, error, unclean... |
963 | * inverse of the return code tells to the netfilter | 963 | * inverse of the return code tells to the netfilter |
964 | * core what to do with the packet. */ | 964 | * core what to do with the packet. */ |
965 | if (l4proto->error != NULL) { | 965 | if (l4proto->error != NULL) { |
966 | ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, | 966 | ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, |
967 | pf, hooknum); | 967 | pf, hooknum); |
968 | if (ret <= 0) { | 968 | if (ret <= 0) { |
969 | NF_CT_STAT_INC_ATOMIC(net, error); | 969 | NF_CT_STAT_INC_ATOMIC(net, error); |
970 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 970 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
971 | ret = -ret; | 971 | ret = -ret; |
972 | goto out; | 972 | goto out; |
973 | } | 973 | } |
974 | /* ICMP[v6] protocol trackers may assign one conntrack. */ | 974 | /* ICMP[v6] protocol trackers may assign one conntrack. */ |
975 | if (skb->nfct) | 975 | if (skb->nfct) |
976 | goto out; | 976 | goto out; |
977 | } | 977 | } |
978 | 978 | ||
979 | ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, | 979 | ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, |
980 | l3proto, l4proto, &set_reply, &ctinfo); | 980 | l3proto, l4proto, &set_reply, &ctinfo); |
981 | if (!ct) { | 981 | if (!ct) { |
982 | /* Not valid part of a connection */ | 982 | /* Not valid part of a connection */ |
983 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 983 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
984 | ret = NF_ACCEPT; | 984 | ret = NF_ACCEPT; |
985 | goto out; | 985 | goto out; |
986 | } | 986 | } |
987 | 987 | ||
988 | if (IS_ERR(ct)) { | 988 | if (IS_ERR(ct)) { |
989 | /* Too stressed to deal. */ | 989 | /* Too stressed to deal. */ |
990 | NF_CT_STAT_INC_ATOMIC(net, drop); | 990 | NF_CT_STAT_INC_ATOMIC(net, drop); |
991 | ret = NF_DROP; | 991 | ret = NF_DROP; |
992 | goto out; | 992 | goto out; |
993 | } | 993 | } |
994 | 994 | ||
995 | NF_CT_ASSERT(skb->nfct); | 995 | NF_CT_ASSERT(skb->nfct); |
996 | 996 | ||
997 | /* Decide what timeout policy we want to apply to this flow. */ | 997 | /* Decide what timeout policy we want to apply to this flow. */ |
998 | timeouts = nf_ct_timeout_lookup(net, ct, l4proto); | 998 | timeouts = nf_ct_timeout_lookup(net, ct, l4proto); |
999 | 999 | ||
1000 | ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); | 1000 | ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); |
1001 | if (ret <= 0) { | 1001 | if (ret <= 0) { |
1002 | /* Invalid: inverse of the return code tells | 1002 | /* Invalid: inverse of the return code tells |
1003 | * the netfilter core what to do */ | 1003 | * the netfilter core what to do */ |
1004 | pr_debug("nf_conntrack_in: Can't track with proto module\n"); | 1004 | pr_debug("nf_conntrack_in: Can't track with proto module\n"); |
1005 | nf_conntrack_put(skb->nfct); | 1005 | nf_conntrack_put(skb->nfct); |
1006 | skb->nfct = NULL; | 1006 | skb->nfct = NULL; |
1007 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 1007 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
1008 | if (ret == -NF_DROP) | 1008 | if (ret == -NF_DROP) |
1009 | NF_CT_STAT_INC_ATOMIC(net, drop); | 1009 | NF_CT_STAT_INC_ATOMIC(net, drop); |
1010 | ret = -ret; | 1010 | ret = -ret; |
1011 | goto out; | 1011 | goto out; |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) | 1014 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) |
1015 | nf_conntrack_event_cache(IPCT_REPLY, ct); | 1015 | nf_conntrack_event_cache(IPCT_REPLY, ct); |
1016 | out: | 1016 | out: |
1017 | if (tmpl) { | 1017 | if (tmpl) { |
1018 | /* Special case: we have to repeat this hook, assign the | 1018 | /* Special case: we have to repeat this hook, assign the |
1019 | * template again to this packet. We assume that this packet | 1019 | * template again to this packet. We assume that this packet |
1020 | * has no conntrack assigned. This is used by nf_ct_tcp. */ | 1020 | * has no conntrack assigned. This is used by nf_ct_tcp. */ |
1021 | if (ret == NF_REPEAT) | 1021 | if (ret == NF_REPEAT) |
1022 | skb->nfct = (struct nf_conntrack *)tmpl; | 1022 | skb->nfct = (struct nf_conntrack *)tmpl; |
1023 | else | 1023 | else |
1024 | nf_ct_put(tmpl); | 1024 | nf_ct_put(tmpl); |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | return ret; | 1027 | return ret; |
1028 | } | 1028 | } |
1029 | EXPORT_SYMBOL_GPL(nf_conntrack_in); | 1029 | EXPORT_SYMBOL_GPL(nf_conntrack_in); |
1030 | 1030 | ||
1031 | bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, | 1031 | bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, |
1032 | const struct nf_conntrack_tuple *orig) | 1032 | const struct nf_conntrack_tuple *orig) |
1033 | { | 1033 | { |
1034 | bool ret; | 1034 | bool ret; |
1035 | 1035 | ||
1036 | rcu_read_lock(); | 1036 | rcu_read_lock(); |
1037 | ret = nf_ct_invert_tuple(inverse, orig, | 1037 | ret = nf_ct_invert_tuple(inverse, orig, |
1038 | __nf_ct_l3proto_find(orig->src.l3num), | 1038 | __nf_ct_l3proto_find(orig->src.l3num), |
1039 | __nf_ct_l4proto_find(orig->src.l3num, | 1039 | __nf_ct_l4proto_find(orig->src.l3num, |
1040 | orig->dst.protonum)); | 1040 | orig->dst.protonum)); |
1041 | rcu_read_unlock(); | 1041 | rcu_read_unlock(); |
1042 | return ret; | 1042 | return ret; |
1043 | } | 1043 | } |
1044 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); | 1044 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); |
1045 | 1045 | ||
1046 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is | 1046 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is |
1047 | implicitly racy: see __nf_conntrack_confirm */ | 1047 | implicitly racy: see __nf_conntrack_confirm */ |
1048 | void nf_conntrack_alter_reply(struct nf_conn *ct, | 1048 | void nf_conntrack_alter_reply(struct nf_conn *ct, |
1049 | const struct nf_conntrack_tuple *newreply) | 1049 | const struct nf_conntrack_tuple *newreply) |
1050 | { | 1050 | { |
1051 | struct nf_conn_help *help = nfct_help(ct); | 1051 | struct nf_conn_help *help = nfct_help(ct); |
1052 | 1052 | ||
1053 | /* Should be unconfirmed, so not in hash table yet */ | 1053 | /* Should be unconfirmed, so not in hash table yet */ |
1054 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | 1054 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); |
1055 | 1055 | ||
1056 | pr_debug("Altering reply tuple of %p to ", ct); | 1056 | pr_debug("Altering reply tuple of %p to ", ct); |
1057 | nf_ct_dump_tuple(newreply); | 1057 | nf_ct_dump_tuple(newreply); |
1058 | 1058 | ||
1059 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; | 1059 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; |
1060 | if (ct->master || (help && !hlist_empty(&help->expectations))) | 1060 | if (ct->master || (help && !hlist_empty(&help->expectations))) |
1061 | return; | 1061 | return; |
1062 | 1062 | ||
1063 | rcu_read_lock(); | 1063 | rcu_read_lock(); |
1064 | __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); | 1064 | __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); |
1065 | rcu_read_unlock(); | 1065 | rcu_read_unlock(); |
1066 | } | 1066 | } |
1067 | EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); | 1067 | EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); |
1068 | 1068 | ||
1069 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ | 1069 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ |
1070 | void __nf_ct_refresh_acct(struct nf_conn *ct, | 1070 | void __nf_ct_refresh_acct(struct nf_conn *ct, |
1071 | enum ip_conntrack_info ctinfo, | 1071 | enum ip_conntrack_info ctinfo, |
1072 | const struct sk_buff *skb, | 1072 | const struct sk_buff *skb, |
1073 | unsigned long extra_jiffies, | 1073 | unsigned long extra_jiffies, |
1074 | int do_acct) | 1074 | int do_acct) |
1075 | { | 1075 | { |
1076 | NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); | 1076 | NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); |
1077 | NF_CT_ASSERT(skb); | 1077 | NF_CT_ASSERT(skb); |
1078 | 1078 | ||
1079 | /* Only update if this is not a fixed timeout */ | 1079 | /* Only update if this is not a fixed timeout */ |
1080 | if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) | 1080 | if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) |
1081 | goto acct; | 1081 | goto acct; |
1082 | 1082 | ||
1083 | /* If not in hash table, timer will not be active yet */ | 1083 | /* If not in hash table, timer will not be active yet */ |
1084 | if (!nf_ct_is_confirmed(ct)) { | 1084 | if (!nf_ct_is_confirmed(ct)) { |
1085 | ct->timeout.expires = extra_jiffies; | 1085 | ct->timeout.expires = extra_jiffies; |
1086 | } else { | 1086 | } else { |
1087 | unsigned long newtime = jiffies + extra_jiffies; | 1087 | unsigned long newtime = jiffies + extra_jiffies; |
1088 | 1088 | ||
1089 | /* Only update the timeout if the new timeout is at least | 1089 | /* Only update the timeout if the new timeout is at least |
1090 | HZ jiffies from the old timeout. Need del_timer for race | 1090 | HZ jiffies from the old timeout. Need del_timer for race |
1091 | avoidance (may already be dying). */ | 1091 | avoidance (may already be dying). */ |
1092 | if (newtime - ct->timeout.expires >= HZ) | 1092 | if (newtime - ct->timeout.expires >= HZ) |
1093 | mod_timer_pending(&ct->timeout, newtime); | 1093 | mod_timer_pending(&ct->timeout, newtime); |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | acct: | 1096 | acct: |
1097 | if (do_acct) { | 1097 | if (do_acct) { |
1098 | struct nf_conn_counter *acct; | 1098 | struct nf_conn_counter *acct; |
1099 | 1099 | ||
1100 | acct = nf_conn_acct_find(ct); | 1100 | acct = nf_conn_acct_find(ct); |
1101 | if (acct) { | 1101 | if (acct) { |
1102 | atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); | 1102 | atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); |
1103 | atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes); | 1103 | atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes); |
1104 | } | 1104 | } |
1105 | } | 1105 | } |
1106 | } | 1106 | } |
1107 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); | 1107 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); |
1108 | 1108 | ||
1109 | bool __nf_ct_kill_acct(struct nf_conn *ct, | 1109 | bool __nf_ct_kill_acct(struct nf_conn *ct, |
1110 | enum ip_conntrack_info ctinfo, | 1110 | enum ip_conntrack_info ctinfo, |
1111 | const struct sk_buff *skb, | 1111 | const struct sk_buff *skb, |
1112 | int do_acct) | 1112 | int do_acct) |
1113 | { | 1113 | { |
1114 | if (do_acct) { | 1114 | if (do_acct) { |
1115 | struct nf_conn_counter *acct; | 1115 | struct nf_conn_counter *acct; |
1116 | 1116 | ||
1117 | acct = nf_conn_acct_find(ct); | 1117 | acct = nf_conn_acct_find(ct); |
1118 | if (acct) { | 1118 | if (acct) { |
1119 | atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); | 1119 | atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets); |
1120 | atomic64_add(skb->len - skb_network_offset(skb), | 1120 | atomic64_add(skb->len - skb_network_offset(skb), |
1121 | &acct[CTINFO2DIR(ctinfo)].bytes); | 1121 | &acct[CTINFO2DIR(ctinfo)].bytes); |
1122 | } | 1122 | } |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | if (del_timer(&ct->timeout)) { | 1125 | if (del_timer(&ct->timeout)) { |
1126 | ct->timeout.function((unsigned long)ct); | 1126 | ct->timeout.function((unsigned long)ct); |
1127 | return true; | 1127 | return true; |
1128 | } | 1128 | } |
1129 | return false; | 1129 | return false; |
1130 | } | 1130 | } |
1131 | EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); | 1131 | EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); |
1132 | 1132 | ||
1133 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 1133 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
1134 | static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { | 1134 | static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { |
1135 | .len = sizeof(struct nf_conntrack_zone), | 1135 | .len = sizeof(struct nf_conntrack_zone), |
1136 | .align = __alignof__(struct nf_conntrack_zone), | 1136 | .align = __alignof__(struct nf_conntrack_zone), |
1137 | .id = NF_CT_EXT_ZONE, | 1137 | .id = NF_CT_EXT_ZONE, |
1138 | }; | 1138 | }; |
1139 | #endif | 1139 | #endif |
1140 | 1140 | ||
1141 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) | 1141 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
1142 | 1142 | ||
1143 | #include <linux/netfilter/nfnetlink.h> | 1143 | #include <linux/netfilter/nfnetlink.h> |
1144 | #include <linux/netfilter/nfnetlink_conntrack.h> | 1144 | #include <linux/netfilter/nfnetlink_conntrack.h> |
1145 | #include <linux/mutex.h> | 1145 | #include <linux/mutex.h> |
1146 | 1146 | ||
1147 | /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be | 1147 | /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be |
1148 | * in ip_conntrack_core, since we don't want the protocols to autoload | 1148 | * in ip_conntrack_core, since we don't want the protocols to autoload |
1149 | * or depend on ctnetlink */ | 1149 | * or depend on ctnetlink */ |
1150 | int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, | 1150 | int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, |
1151 | const struct nf_conntrack_tuple *tuple) | 1151 | const struct nf_conntrack_tuple *tuple) |
1152 | { | 1152 | { |
1153 | if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || | 1153 | if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || |
1154 | nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) | 1154 | nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) |
1155 | goto nla_put_failure; | 1155 | goto nla_put_failure; |
1156 | return 0; | 1156 | return 0; |
1157 | 1157 | ||
1158 | nla_put_failure: | 1158 | nla_put_failure: |
1159 | return -1; | 1159 | return -1; |
1160 | } | 1160 | } |
1161 | EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); | 1161 | EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); |
1162 | 1162 | ||
1163 | const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { | 1163 | const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { |
1164 | [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, | 1164 | [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, |
1165 | [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, | 1165 | [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, |
1166 | }; | 1166 | }; |
1167 | EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); | 1167 | EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); |
1168 | 1168 | ||
1169 | int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], | 1169 | int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], |
1170 | struct nf_conntrack_tuple *t) | 1170 | struct nf_conntrack_tuple *t) |
1171 | { | 1171 | { |
1172 | if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) | 1172 | if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) |
1173 | return -EINVAL; | 1173 | return -EINVAL; |
1174 | 1174 | ||
1175 | t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); | 1175 | t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); |
1176 | t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); | 1176 | t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); |
1177 | 1177 | ||
1178 | return 0; | 1178 | return 0; |
1179 | } | 1179 | } |
1180 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); | 1180 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); |
1181 | 1181 | ||
1182 | int nf_ct_port_nlattr_tuple_size(void) | 1182 | int nf_ct_port_nlattr_tuple_size(void) |
1183 | { | 1183 | { |
1184 | return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); | 1184 | return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); |
1185 | } | 1185 | } |
1186 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); | 1186 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); |
1187 | #endif | 1187 | #endif |
1188 | 1188 | ||
1189 | /* Used by ipt_REJECT and ip6t_REJECT. */ | 1189 | /* Used by ipt_REJECT and ip6t_REJECT. */ |
1190 | static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) | 1190 | static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) |
1191 | { | 1191 | { |
1192 | struct nf_conn *ct; | 1192 | struct nf_conn *ct; |
1193 | enum ip_conntrack_info ctinfo; | 1193 | enum ip_conntrack_info ctinfo; |
1194 | 1194 | ||
1195 | /* This ICMP is in reverse direction to the packet which caused it */ | 1195 | /* This ICMP is in reverse direction to the packet which caused it */ |
1196 | ct = nf_ct_get(skb, &ctinfo); | 1196 | ct = nf_ct_get(skb, &ctinfo); |
1197 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | 1197 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) |
1198 | ctinfo = IP_CT_RELATED_REPLY; | 1198 | ctinfo = IP_CT_RELATED_REPLY; |
1199 | else | 1199 | else |
1200 | ctinfo = IP_CT_RELATED; | 1200 | ctinfo = IP_CT_RELATED; |
1201 | 1201 | ||
1202 | /* Attach to new skbuff, and increment count */ | 1202 | /* Attach to new skbuff, and increment count */ |
1203 | nskb->nfct = &ct->ct_general; | 1203 | nskb->nfct = &ct->ct_general; |
1204 | nskb->nfctinfo = ctinfo; | 1204 | nskb->nfctinfo = ctinfo; |
1205 | nf_conntrack_get(nskb->nfct); | 1205 | nf_conntrack_get(nskb->nfct); |
1206 | } | 1206 | } |
1207 | 1207 | ||
1208 | /* Bring out ya dead! */ | 1208 | /* Bring out ya dead! */ |
1209 | static struct nf_conn * | 1209 | static struct nf_conn * |
1210 | get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), | 1210 | get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), |
1211 | void *data, unsigned int *bucket) | 1211 | void *data, unsigned int *bucket) |
1212 | { | 1212 | { |
1213 | struct nf_conntrack_tuple_hash *h; | 1213 | struct nf_conntrack_tuple_hash *h; |
1214 | struct nf_conn *ct; | 1214 | struct nf_conn *ct; |
1215 | struct hlist_nulls_node *n; | 1215 | struct hlist_nulls_node *n; |
1216 | 1216 | ||
1217 | spin_lock_bh(&nf_conntrack_lock); | 1217 | spin_lock_bh(&nf_conntrack_lock); |
1218 | for (; *bucket < net->ct.htable_size; (*bucket)++) { | 1218 | for (; *bucket < net->ct.htable_size; (*bucket)++) { |
1219 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { | 1219 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { |
1220 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) | 1220 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) |
1221 | continue; | 1221 | continue; |
1222 | ct = nf_ct_tuplehash_to_ctrack(h); | 1222 | ct = nf_ct_tuplehash_to_ctrack(h); |
1223 | if (iter(ct, data)) | 1223 | if (iter(ct, data)) |
1224 | goto found; | 1224 | goto found; |
1225 | } | 1225 | } |
1226 | } | 1226 | } |
1227 | hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { | 1227 | hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { |
1228 | ct = nf_ct_tuplehash_to_ctrack(h); | 1228 | ct = nf_ct_tuplehash_to_ctrack(h); |
1229 | if (iter(ct, data)) | 1229 | if (iter(ct, data)) |
1230 | set_bit(IPS_DYING_BIT, &ct->status); | 1230 | set_bit(IPS_DYING_BIT, &ct->status); |
1231 | } | 1231 | } |
1232 | spin_unlock_bh(&nf_conntrack_lock); | 1232 | spin_unlock_bh(&nf_conntrack_lock); |
1233 | return NULL; | 1233 | return NULL; |
1234 | found: | 1234 | found: |
1235 | atomic_inc(&ct->ct_general.use); | 1235 | atomic_inc(&ct->ct_general.use); |
1236 | spin_unlock_bh(&nf_conntrack_lock); | 1236 | spin_unlock_bh(&nf_conntrack_lock); |
1237 | return ct; | 1237 | return ct; |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | void nf_ct_iterate_cleanup(struct net *net, | 1240 | void nf_ct_iterate_cleanup(struct net *net, |
1241 | int (*iter)(struct nf_conn *i, void *data), | 1241 | int (*iter)(struct nf_conn *i, void *data), |
1242 | void *data) | 1242 | void *data) |
1243 | { | 1243 | { |
1244 | struct nf_conn *ct; | 1244 | struct nf_conn *ct; |
1245 | unsigned int bucket = 0; | 1245 | unsigned int bucket = 0; |
1246 | 1246 | ||
1247 | while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { | 1247 | while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { |
1248 | /* Time to push up daises... */ | 1248 | /* Time to push up daises... */ |
1249 | if (del_timer(&ct->timeout)) | 1249 | if (del_timer(&ct->timeout)) |
1250 | death_by_timeout((unsigned long)ct); | 1250 | death_by_timeout((unsigned long)ct); |
1251 | /* ... else the timer will get him soon. */ | 1251 | /* ... else the timer will get him soon. */ |
1252 | 1252 | ||
1253 | nf_ct_put(ct); | 1253 | nf_ct_put(ct); |
1254 | } | 1254 | } |
1255 | } | 1255 | } |
1256 | EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); | 1256 | EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); |
1257 | 1257 | ||
1258 | struct __nf_ct_flush_report { | 1258 | struct __nf_ct_flush_report { |
1259 | u32 pid; | 1259 | u32 pid; |
1260 | int report; | 1260 | int report; |
1261 | }; | 1261 | }; |
1262 | 1262 | ||
1263 | static int kill_report(struct nf_conn *i, void *data) | 1263 | static int kill_report(struct nf_conn *i, void *data) |
1264 | { | 1264 | { |
1265 | struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; | 1265 | struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; |
1266 | struct nf_conn_tstamp *tstamp; | 1266 | struct nf_conn_tstamp *tstamp; |
1267 | 1267 | ||
1268 | tstamp = nf_conn_tstamp_find(i); | 1268 | tstamp = nf_conn_tstamp_find(i); |
1269 | if (tstamp && tstamp->stop == 0) | 1269 | if (tstamp && tstamp->stop == 0) |
1270 | tstamp->stop = ktime_to_ns(ktime_get_real()); | 1270 | tstamp->stop = ktime_to_ns(ktime_get_real()); |
1271 | 1271 | ||
1272 | /* If we fail to deliver the event, death_by_timeout() will retry */ | 1272 | /* If we fail to deliver the event, death_by_timeout() will retry */ |
1273 | if (nf_conntrack_event_report(IPCT_DESTROY, i, | 1273 | if (nf_conntrack_event_report(IPCT_DESTROY, i, |
1274 | fr->pid, fr->report) < 0) | 1274 | fr->pid, fr->report) < 0) |
1275 | return 1; | 1275 | return 1; |
1276 | 1276 | ||
1277 | /* Avoid the delivery of the destroy event in death_by_timeout(). */ | 1277 | /* Avoid the delivery of the destroy event in death_by_timeout(). */ |
1278 | set_bit(IPS_DYING_BIT, &i->status); | 1278 | set_bit(IPS_DYING_BIT, &i->status); |
1279 | return 1; | 1279 | return 1; |
1280 | } | 1280 | } |
1281 | 1281 | ||
1282 | static int kill_all(struct nf_conn *i, void *data) | 1282 | static int kill_all(struct nf_conn *i, void *data) |
1283 | { | 1283 | { |
1284 | return 1; | 1284 | return 1; |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | void nf_ct_free_hashtable(void *hash, unsigned int size) | 1287 | void nf_ct_free_hashtable(void *hash, unsigned int size) |
1288 | { | 1288 | { |
1289 | if (is_vmalloc_addr(hash)) | 1289 | if (is_vmalloc_addr(hash)) |
1290 | vfree(hash); | 1290 | vfree(hash); |
1291 | else | 1291 | else |
1292 | free_pages((unsigned long)hash, | 1292 | free_pages((unsigned long)hash, |
1293 | get_order(sizeof(struct hlist_head) * size)); | 1293 | get_order(sizeof(struct hlist_head) * size)); |
1294 | } | 1294 | } |
1295 | EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); | 1295 | EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); |
1296 | 1296 | ||
1297 | void nf_conntrack_flush_report(struct net *net, u32 pid, int report) | 1297 | void nf_conntrack_flush_report(struct net *net, u32 pid, int report) |
1298 | { | 1298 | { |
1299 | struct __nf_ct_flush_report fr = { | 1299 | struct __nf_ct_flush_report fr = { |
1300 | .pid = pid, | 1300 | .pid = pid, |
1301 | .report = report, | 1301 | .report = report, |
1302 | }; | 1302 | }; |
1303 | nf_ct_iterate_cleanup(net, kill_report, &fr); | 1303 | nf_ct_iterate_cleanup(net, kill_report, &fr); |
1304 | } | 1304 | } |
1305 | EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); | 1305 | EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); |
1306 | 1306 | ||
1307 | static void nf_ct_release_dying_list(struct net *net) | 1307 | static void nf_ct_release_dying_list(struct net *net) |
1308 | { | 1308 | { |
1309 | struct nf_conntrack_tuple_hash *h; | 1309 | struct nf_conntrack_tuple_hash *h; |
1310 | struct nf_conn *ct; | 1310 | struct nf_conn *ct; |
1311 | struct hlist_nulls_node *n; | 1311 | struct hlist_nulls_node *n; |
1312 | 1312 | ||
1313 | spin_lock_bh(&nf_conntrack_lock); | 1313 | spin_lock_bh(&nf_conntrack_lock); |
1314 | hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { | 1314 | hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { |
1315 | ct = nf_ct_tuplehash_to_ctrack(h); | 1315 | ct = nf_ct_tuplehash_to_ctrack(h); |
1316 | /* never fails to remove them, no listeners at this point */ | 1316 | /* never fails to remove them, no listeners at this point */ |
1317 | nf_ct_kill(ct); | 1317 | nf_ct_kill(ct); |
1318 | } | 1318 | } |
1319 | spin_unlock_bh(&nf_conntrack_lock); | 1319 | spin_unlock_bh(&nf_conntrack_lock); |
1320 | } | 1320 | } |
1321 | 1321 | ||
1322 | static int untrack_refs(void) | 1322 | static int untrack_refs(void) |
1323 | { | 1323 | { |
1324 | int cnt = 0, cpu; | 1324 | int cnt = 0, cpu; |
1325 | 1325 | ||
1326 | for_each_possible_cpu(cpu) { | 1326 | for_each_possible_cpu(cpu) { |
1327 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); | 1327 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); |
1328 | 1328 | ||
1329 | cnt += atomic_read(&ct->ct_general.use) - 1; | 1329 | cnt += atomic_read(&ct->ct_general.use) - 1; |
1330 | } | 1330 | } |
1331 | return cnt; | 1331 | return cnt; |
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | static void nf_conntrack_cleanup_init_net(void) | 1334 | static void nf_conntrack_cleanup_init_net(void) |
1335 | { | 1335 | { |
1336 | while (untrack_refs() > 0) | 1336 | while (untrack_refs() > 0) |
1337 | schedule(); | 1337 | schedule(); |
1338 | 1338 | ||
1339 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 1339 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
1340 | nf_ct_extend_unregister(&nf_ct_zone_extend); | 1340 | nf_ct_extend_unregister(&nf_ct_zone_extend); |
1341 | #endif | 1341 | #endif |
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static void nf_conntrack_cleanup_net(struct net *net) | 1344 | static void nf_conntrack_cleanup_net(struct net *net) |
1345 | { | 1345 | { |
1346 | i_see_dead_people: | 1346 | i_see_dead_people: |
1347 | nf_ct_iterate_cleanup(net, kill_all, NULL); | 1347 | nf_ct_iterate_cleanup(net, kill_all, NULL); |
1348 | nf_ct_release_dying_list(net); | 1348 | nf_ct_release_dying_list(net); |
1349 | if (atomic_read(&net->ct.count) != 0) { | 1349 | if (atomic_read(&net->ct.count) != 0) { |
1350 | schedule(); | 1350 | schedule(); |
1351 | goto i_see_dead_people; | 1351 | goto i_see_dead_people; |
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); | 1354 | nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); |
1355 | nf_conntrack_helper_fini(net); | 1355 | nf_conntrack_helper_fini(net); |
1356 | nf_conntrack_timeout_fini(net); | 1356 | nf_conntrack_timeout_fini(net); |
1357 | nf_conntrack_ecache_fini(net); | 1357 | nf_conntrack_ecache_fini(net); |
1358 | nf_conntrack_tstamp_fini(net); | 1358 | nf_conntrack_tstamp_fini(net); |
1359 | nf_conntrack_acct_fini(net); | 1359 | nf_conntrack_acct_fini(net); |
1360 | nf_conntrack_expect_fini(net); | 1360 | nf_conntrack_expect_fini(net); |
1361 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); | 1361 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); |
1362 | kfree(net->ct.slabname); | 1362 | kfree(net->ct.slabname); |
1363 | free_percpu(net->ct.stat); | 1363 | free_percpu(net->ct.stat); |
1364 | } | 1364 | } |
1365 | 1365 | ||
1366 | /* Mishearing the voices in his head, our hero wonders how he's | 1366 | /* Mishearing the voices in his head, our hero wonders how he's |
1367 | supposed to kill the mall. */ | 1367 | supposed to kill the mall. */ |
1368 | void nf_conntrack_cleanup(struct net *net) | 1368 | void nf_conntrack_cleanup(struct net *net) |
1369 | { | 1369 | { |
1370 | if (net_eq(net, &init_net)) | 1370 | if (net_eq(net, &init_net)) |
1371 | RCU_INIT_POINTER(ip_ct_attach, NULL); | 1371 | RCU_INIT_POINTER(ip_ct_attach, NULL); |
1372 | 1372 | ||
1373 | /* This makes sure all current packets have passed through | 1373 | /* This makes sure all current packets have passed through |
1374 | netfilter framework. Roll on, two-stage module | 1374 | netfilter framework. Roll on, two-stage module |
1375 | delete... */ | 1375 | delete... */ |
1376 | synchronize_net(); | 1376 | synchronize_net(); |
1377 | nf_conntrack_proto_fini(net); | 1377 | nf_conntrack_proto_fini(net); |
1378 | nf_conntrack_cleanup_net(net); | 1378 | nf_conntrack_cleanup_net(net); |
1379 | 1379 | ||
1380 | if (net_eq(net, &init_net)) { | 1380 | if (net_eq(net, &init_net)) { |
1381 | RCU_INIT_POINTER(nf_ct_destroy, NULL); | 1381 | RCU_INIT_POINTER(nf_ct_destroy, NULL); |
1382 | nf_conntrack_cleanup_init_net(); | 1382 | nf_conntrack_cleanup_init_net(); |
1383 | } | 1383 | } |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) | 1386 | void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) |
1387 | { | 1387 | { |
1388 | struct hlist_nulls_head *hash; | 1388 | struct hlist_nulls_head *hash; |
1389 | unsigned int nr_slots, i; | 1389 | unsigned int nr_slots, i; |
1390 | size_t sz; | 1390 | size_t sz; |
1391 | 1391 | ||
1392 | BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); | 1392 | BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); |
1393 | nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); | 1393 | nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); |
1394 | sz = nr_slots * sizeof(struct hlist_nulls_head); | 1394 | sz = nr_slots * sizeof(struct hlist_nulls_head); |
1395 | hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, | 1395 | hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, |
1396 | get_order(sz)); | 1396 | get_order(sz)); |
1397 | if (!hash) { | 1397 | if (!hash) { |
1398 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); | 1398 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); |
1399 | hash = vzalloc(sz); | 1399 | hash = vzalloc(sz); |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | if (hash && nulls) | 1402 | if (hash && nulls) |
1403 | for (i = 0; i < nr_slots; i++) | 1403 | for (i = 0; i < nr_slots; i++) |
1404 | INIT_HLIST_NULLS_HEAD(&hash[i], i); | 1404 | INIT_HLIST_NULLS_HEAD(&hash[i], i); |
1405 | 1405 | ||
1406 | return hash; | 1406 | return hash; |
1407 | } | 1407 | } |
1408 | EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); | 1408 | EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); |
1409 | 1409 | ||
1410 | int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) | 1410 | int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) |
1411 | { | 1411 | { |
1412 | int i, bucket, rc; | 1412 | int i, bucket, rc; |
1413 | unsigned int hashsize, old_size; | 1413 | unsigned int hashsize, old_size; |
1414 | struct hlist_nulls_head *hash, *old_hash; | 1414 | struct hlist_nulls_head *hash, *old_hash; |
1415 | struct nf_conntrack_tuple_hash *h; | 1415 | struct nf_conntrack_tuple_hash *h; |
1416 | struct nf_conn *ct; | 1416 | struct nf_conn *ct; |
1417 | 1417 | ||
1418 | if (current->nsproxy->net_ns != &init_net) | 1418 | if (current->nsproxy->net_ns != &init_net) |
1419 | return -EOPNOTSUPP; | 1419 | return -EOPNOTSUPP; |
1420 | 1420 | ||
1421 | /* On boot, we can set this without any fancy locking. */ | 1421 | /* On boot, we can set this without any fancy locking. */ |
1422 | if (!nf_conntrack_htable_size) | 1422 | if (!nf_conntrack_htable_size) |
1423 | return param_set_uint(val, kp); | 1423 | return param_set_uint(val, kp); |
1424 | 1424 | ||
1425 | hashsize = simple_strtoul(val, NULL, 0); | ||
1426 | rc = kstrtouint(val, 0, &hashsize); | 1425 | rc = kstrtouint(val, 0, &hashsize); |
1427 | if (rc) | 1426 | if (rc) |
1428 | return rc; | 1427 | return rc; |
1429 | if (!hashsize) | 1428 | if (!hashsize) |
1430 | return -EINVAL; | 1429 | return -EINVAL; |
1431 | 1430 | ||
1432 | hash = nf_ct_alloc_hashtable(&hashsize, 1); | 1431 | hash = nf_ct_alloc_hashtable(&hashsize, 1); |
1433 | if (!hash) | 1432 | if (!hash) |
1434 | return -ENOMEM; | 1433 | return -ENOMEM; |
1435 | 1434 | ||
1436 | /* Lookups in the old hash might happen in parallel, which means we | 1435 | /* Lookups in the old hash might happen in parallel, which means we |
1437 | * might get false negatives during connection lookup. New connections | 1436 | * might get false negatives during connection lookup. New connections |
1438 | * created because of a false negative won't make it into the hash | 1437 | * created because of a false negative won't make it into the hash |
1439 | * though since that required taking the lock. | 1438 | * though since that required taking the lock. |
1440 | */ | 1439 | */ |
1441 | spin_lock_bh(&nf_conntrack_lock); | 1440 | spin_lock_bh(&nf_conntrack_lock); |
1442 | for (i = 0; i < init_net.ct.htable_size; i++) { | 1441 | for (i = 0; i < init_net.ct.htable_size; i++) { |
1443 | while (!hlist_nulls_empty(&init_net.ct.hash[i])) { | 1442 | while (!hlist_nulls_empty(&init_net.ct.hash[i])) { |
1444 | h = hlist_nulls_entry(init_net.ct.hash[i].first, | 1443 | h = hlist_nulls_entry(init_net.ct.hash[i].first, |
1445 | struct nf_conntrack_tuple_hash, hnnode); | 1444 | struct nf_conntrack_tuple_hash, hnnode); |
1446 | ct = nf_ct_tuplehash_to_ctrack(h); | 1445 | ct = nf_ct_tuplehash_to_ctrack(h); |
1447 | hlist_nulls_del_rcu(&h->hnnode); | 1446 | hlist_nulls_del_rcu(&h->hnnode); |
1448 | bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), | 1447 | bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), |
1449 | hashsize); | 1448 | hashsize); |
1450 | hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); | 1449 | hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); |
1451 | } | 1450 | } |
1452 | } | 1451 | } |
1453 | old_size = init_net.ct.htable_size; | 1452 | old_size = init_net.ct.htable_size; |
1454 | old_hash = init_net.ct.hash; | 1453 | old_hash = init_net.ct.hash; |
1455 | 1454 | ||
1456 | init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; | 1455 | init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; |
1457 | init_net.ct.hash = hash; | 1456 | init_net.ct.hash = hash; |
1458 | spin_unlock_bh(&nf_conntrack_lock); | 1457 | spin_unlock_bh(&nf_conntrack_lock); |
1459 | 1458 | ||
1460 | nf_ct_free_hashtable(old_hash, old_size); | 1459 | nf_ct_free_hashtable(old_hash, old_size); |
1461 | return 0; | 1460 | return 0; |
1462 | } | 1461 | } |
1463 | EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); | 1462 | EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); |
1464 | 1463 | ||
1465 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, | 1464 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, |
1466 | &nf_conntrack_htable_size, 0600); | 1465 | &nf_conntrack_htable_size, 0600); |
1467 | 1466 | ||
1468 | void nf_ct_untracked_status_or(unsigned long bits) | 1467 | void nf_ct_untracked_status_or(unsigned long bits) |
1469 | { | 1468 | { |
1470 | int cpu; | 1469 | int cpu; |
1471 | 1470 | ||
1472 | for_each_possible_cpu(cpu) | 1471 | for_each_possible_cpu(cpu) |
1473 | per_cpu(nf_conntrack_untracked, cpu).status |= bits; | 1472 | per_cpu(nf_conntrack_untracked, cpu).status |= bits; |
1474 | } | 1473 | } |
1475 | EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); | 1474 | EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); |
1476 | 1475 | ||
1477 | static int nf_conntrack_init_init_net(void) | 1476 | static int nf_conntrack_init_init_net(void) |
1478 | { | 1477 | { |
1479 | int max_factor = 8; | 1478 | int max_factor = 8; |
1480 | int ret, cpu; | 1479 | int ret, cpu; |
1481 | 1480 | ||
1482 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB | 1481 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB |
1483 | * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ | 1482 | * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ |
1484 | if (!nf_conntrack_htable_size) { | 1483 | if (!nf_conntrack_htable_size) { |
1485 | nf_conntrack_htable_size | 1484 | nf_conntrack_htable_size |
1486 | = (((totalram_pages << PAGE_SHIFT) / 16384) | 1485 | = (((totalram_pages << PAGE_SHIFT) / 16384) |
1487 | / sizeof(struct hlist_head)); | 1486 | / sizeof(struct hlist_head)); |
1488 | if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) | 1487 | if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) |
1489 | nf_conntrack_htable_size = 16384; | 1488 | nf_conntrack_htable_size = 16384; |
1490 | if (nf_conntrack_htable_size < 32) | 1489 | if (nf_conntrack_htable_size < 32) |
1491 | nf_conntrack_htable_size = 32; | 1490 | nf_conntrack_htable_size = 32; |
1492 | 1491 | ||
1493 | /* Use a max. factor of four by default to get the same max as | 1492 | /* Use a max. factor of four by default to get the same max as |
1494 | * with the old struct list_heads. When a table size is given | 1493 | * with the old struct list_heads. When a table size is given |
1495 | * we use the old value of 8 to avoid reducing the max. | 1494 | * we use the old value of 8 to avoid reducing the max. |
1496 | * entries. */ | 1495 | * entries. */ |
1497 | max_factor = 4; | 1496 | max_factor = 4; |
1498 | } | 1497 | } |
1499 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; | 1498 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; |
1500 | 1499 | ||
1501 | printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", | 1500 | printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", |
1502 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, | 1501 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, |
1503 | nf_conntrack_max); | 1502 | nf_conntrack_max); |
1504 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 1503 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
1505 | ret = nf_ct_extend_register(&nf_ct_zone_extend); | 1504 | ret = nf_ct_extend_register(&nf_ct_zone_extend); |
1506 | if (ret < 0) | 1505 | if (ret < 0) |
1507 | goto err_extend; | 1506 | goto err_extend; |
1508 | #endif | 1507 | #endif |
1509 | /* Set up fake conntrack: to never be deleted, not in any hashes */ | 1508 | /* Set up fake conntrack: to never be deleted, not in any hashes */ |
1510 | for_each_possible_cpu(cpu) { | 1509 | for_each_possible_cpu(cpu) { |
1511 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); | 1510 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); |
1512 | write_pnet(&ct->ct_net, &init_net); | 1511 | write_pnet(&ct->ct_net, &init_net); |
1513 | atomic_set(&ct->ct_general.use, 1); | 1512 | atomic_set(&ct->ct_general.use, 1); |
1514 | } | 1513 | } |
1515 | /* - and look it like as a confirmed connection */ | 1514 | /* - and look it like as a confirmed connection */ |
1516 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); | 1515 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); |
1517 | return 0; | 1516 | return 0; |
1518 | 1517 | ||
1519 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 1518 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
1520 | err_extend: | 1519 | err_extend: |
1521 | #endif | 1520 | #endif |
1522 | return ret; | 1521 | return ret; |
1523 | } | 1522 | } |
1524 | 1523 | ||
1525 | /* | 1524 | /* |
1526 | * We need to use special "null" values, not used in hash table | 1525 | * We need to use special "null" values, not used in hash table |
1527 | */ | 1526 | */ |
1528 | #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) | 1527 | #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) |
1529 | #define DYING_NULLS_VAL ((1<<30)+1) | 1528 | #define DYING_NULLS_VAL ((1<<30)+1) |
1530 | 1529 | ||
1531 | static int nf_conntrack_init_net(struct net *net) | 1530 | static int nf_conntrack_init_net(struct net *net) |
1532 | { | 1531 | { |
1533 | int ret; | 1532 | int ret; |
1534 | 1533 | ||
1535 | atomic_set(&net->ct.count, 0); | 1534 | atomic_set(&net->ct.count, 0); |
1536 | INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); | 1535 | INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); |
1537 | INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); | 1536 | INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); |
1538 | net->ct.stat = alloc_percpu(struct ip_conntrack_stat); | 1537 | net->ct.stat = alloc_percpu(struct ip_conntrack_stat); |
1539 | if (!net->ct.stat) { | 1538 | if (!net->ct.stat) { |
1540 | ret = -ENOMEM; | 1539 | ret = -ENOMEM; |
1541 | goto err_stat; | 1540 | goto err_stat; |
1542 | } | 1541 | } |
1543 | 1542 | ||
1544 | net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); | 1543 | net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); |
1545 | if (!net->ct.slabname) { | 1544 | if (!net->ct.slabname) { |
1546 | ret = -ENOMEM; | 1545 | ret = -ENOMEM; |
1547 | goto err_slabname; | 1546 | goto err_slabname; |
1548 | } | 1547 | } |
1549 | 1548 | ||
1550 | net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, | 1549 | net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, |
1551 | sizeof(struct nf_conn), 0, | 1550 | sizeof(struct nf_conn), 0, |
1552 | SLAB_DESTROY_BY_RCU, NULL); | 1551 | SLAB_DESTROY_BY_RCU, NULL); |
1553 | if (!net->ct.nf_conntrack_cachep) { | 1552 | if (!net->ct.nf_conntrack_cachep) { |
1554 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); | 1553 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); |
1555 | ret = -ENOMEM; | 1554 | ret = -ENOMEM; |
1556 | goto err_cache; | 1555 | goto err_cache; |
1557 | } | 1556 | } |
1558 | 1557 | ||
1559 | net->ct.htable_size = nf_conntrack_htable_size; | 1558 | net->ct.htable_size = nf_conntrack_htable_size; |
1560 | net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); | 1559 | net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); |
1561 | if (!net->ct.hash) { | 1560 | if (!net->ct.hash) { |
1562 | ret = -ENOMEM; | 1561 | ret = -ENOMEM; |
1563 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); | 1562 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); |
1564 | goto err_hash; | 1563 | goto err_hash; |
1565 | } | 1564 | } |
1566 | ret = nf_conntrack_expect_init(net); | 1565 | ret = nf_conntrack_expect_init(net); |
1567 | if (ret < 0) | 1566 | if (ret < 0) |
1568 | goto err_expect; | 1567 | goto err_expect; |
1569 | ret = nf_conntrack_acct_init(net); | 1568 | ret = nf_conntrack_acct_init(net); |
1570 | if (ret < 0) | 1569 | if (ret < 0) |
1571 | goto err_acct; | 1570 | goto err_acct; |
1572 | ret = nf_conntrack_tstamp_init(net); | 1571 | ret = nf_conntrack_tstamp_init(net); |
1573 | if (ret < 0) | 1572 | if (ret < 0) |
1574 | goto err_tstamp; | 1573 | goto err_tstamp; |
1575 | ret = nf_conntrack_ecache_init(net); | 1574 | ret = nf_conntrack_ecache_init(net); |
1576 | if (ret < 0) | 1575 | if (ret < 0) |
1577 | goto err_ecache; | 1576 | goto err_ecache; |
1578 | ret = nf_conntrack_timeout_init(net); | 1577 | ret = nf_conntrack_timeout_init(net); |
1579 | if (ret < 0) | 1578 | if (ret < 0) |
1580 | goto err_timeout; | 1579 | goto err_timeout; |
1581 | ret = nf_conntrack_helper_init(net); | 1580 | ret = nf_conntrack_helper_init(net); |
1582 | if (ret < 0) | 1581 | if (ret < 0) |
1583 | goto err_helper; | 1582 | goto err_helper; |
1584 | return 0; | 1583 | return 0; |
1585 | err_helper: | 1584 | err_helper: |
1586 | nf_conntrack_timeout_fini(net); | 1585 | nf_conntrack_timeout_fini(net); |
1587 | err_timeout: | 1586 | err_timeout: |
1588 | nf_conntrack_ecache_fini(net); | 1587 | nf_conntrack_ecache_fini(net); |
1589 | err_ecache: | 1588 | err_ecache: |
1590 | nf_conntrack_tstamp_fini(net); | 1589 | nf_conntrack_tstamp_fini(net); |
1591 | err_tstamp: | 1590 | err_tstamp: |
1592 | nf_conntrack_acct_fini(net); | 1591 | nf_conntrack_acct_fini(net); |
1593 | err_acct: | 1592 | err_acct: |
1594 | nf_conntrack_expect_fini(net); | 1593 | nf_conntrack_expect_fini(net); |
1595 | err_expect: | 1594 | err_expect: |
1596 | nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); | 1595 | nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); |
1597 | err_hash: | 1596 | err_hash: |
1598 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); | 1597 | kmem_cache_destroy(net->ct.nf_conntrack_cachep); |
1599 | err_cache: | 1598 | err_cache: |
1600 | kfree(net->ct.slabname); | 1599 | kfree(net->ct.slabname); |
1601 | err_slabname: | 1600 | err_slabname: |
1602 | free_percpu(net->ct.stat); | 1601 | free_percpu(net->ct.stat); |
1603 | err_stat: | 1602 | err_stat: |
1604 | return ret; | 1603 | return ret; |
1605 | } | 1604 | } |
1606 | 1605 | ||
1607 | s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, | 1606 | s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, |
1608 | enum ip_conntrack_dir dir, | 1607 | enum ip_conntrack_dir dir, |
1609 | u32 seq); | 1608 | u32 seq); |
1610 | EXPORT_SYMBOL_GPL(nf_ct_nat_offset); | 1609 | EXPORT_SYMBOL_GPL(nf_ct_nat_offset); |
1611 | 1610 | ||
1612 | int nf_conntrack_init(struct net *net) | 1611 | int nf_conntrack_init(struct net *net) |
1613 | { | 1612 | { |
1614 | int ret; | 1613 | int ret; |
1615 | 1614 | ||
1616 | if (net_eq(net, &init_net)) { | 1615 | if (net_eq(net, &init_net)) { |
1617 | ret = nf_conntrack_init_init_net(); | 1616 | ret = nf_conntrack_init_init_net(); |
1618 | if (ret < 0) | 1617 | if (ret < 0) |
1619 | goto out_init_net; | 1618 | goto out_init_net; |
1620 | } | 1619 | } |
1621 | ret = nf_conntrack_proto_init(net); | 1620 | ret = nf_conntrack_proto_init(net); |
1622 | if (ret < 0) | 1621 | if (ret < 0) |
1623 | goto out_proto; | 1622 | goto out_proto; |
1624 | ret = nf_conntrack_init_net(net); | 1623 | ret = nf_conntrack_init_net(net); |
1625 | if (ret < 0) | 1624 | if (ret < 0) |
1626 | goto out_net; | 1625 | goto out_net; |
1627 | 1626 | ||
1628 | if (net_eq(net, &init_net)) { | 1627 | if (net_eq(net, &init_net)) { |
1629 | /* For use by REJECT target */ | 1628 | /* For use by REJECT target */ |
1630 | RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); | 1629 | RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); |
1631 | RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack); | 1630 | RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack); |
1632 | 1631 | ||
1633 | /* Howto get NAT offsets */ | 1632 | /* Howto get NAT offsets */ |
1634 | RCU_INIT_POINTER(nf_ct_nat_offset, NULL); | 1633 | RCU_INIT_POINTER(nf_ct_nat_offset, NULL); |
1635 | } | 1634 | } |
1636 | return 0; | 1635 | return 0; |
1637 | 1636 | ||
1638 | out_net: | 1637 | out_net: |
1639 | nf_conntrack_proto_fini(net); | 1638 | nf_conntrack_proto_fini(net); |
1640 | out_proto: | 1639 | out_proto: |
1641 | if (net_eq(net, &init_net)) | 1640 | if (net_eq(net, &init_net)) |
1642 | nf_conntrack_cleanup_init_net(); | 1641 | nf_conntrack_cleanup_init_net(); |
1643 | out_init_net: | 1642 | out_init_net: |
1644 | return ret; | 1643 | return ret; |
1645 | } | 1644 | } |
1646 | 1645 |